diff --git a/.clang-format b/.clang-format index bcf60344f9f5b07b072d50c9bbd52f173f38ef69..b62836419ea3e2d19de323071132fa26135c2f10 100644 --- a/.clang-format +++ b/.clang-format @@ -441,8 +441,11 @@ ForEachMacros: - 'inet_lhash2_for_each_icsk' - 'inet_lhash2_for_each_icsk_continue' - 'inet_lhash2_for_each_icsk_rcu' + - 'interval_tree_for_each_double_span' + - 'interval_tree_for_each_span' - 'intlist__for_each_entry' - 'intlist__for_each_entry_safe' + - 'iopt_for_each_contig_area' - 'kcore_copy__for_each_phdr' - 'key_for_each' - 'key_for_each_safe' diff --git a/CREDITS b/CREDITS index 198f675c419e37d2b963646b4fb12937979329cd..4e302a459ddf069ce3b59b3dd7bbebcd8476ef46 100644 --- a/CREDITS +++ b/CREDITS @@ -1439,6 +1439,10 @@ N: Justin Guyett E: jguyett@andrew.cmu.edu D: via-rhine net driver hacking +N: Nitin Gupta +E: ngupta@vflare.org +D: zsmalloc memory allocator and zram block device driver + N: Danny ter Haar E: dth@cistron.nl D: /proc/cpuinfo, reboot on panic , kernel pre-patch tester ;) diff --git a/Documentation/ABI/stable/sysfs-driver-dma-idxd b/Documentation/ABI/stable/sysfs-driver-dma-idxd index 8e2c2c405db22d411b7f86f8c0af634b09a3f36a..3becc9a82bdf6fbb28012e84763811dd6c091450 100644 --- a/Documentation/ABI/stable/sysfs-driver-dma-idxd +++ b/Documentation/ABI/stable/sysfs-driver-dma-idxd @@ -22,6 +22,7 @@ Date: Oct 25, 2019 KernelVersion: 5.6.0 Contact: dmaengine@vger.kernel.org Description: The largest number of work descriptors in a batch. + It's not visible when the device does not support batch. What: /sys/bus/dsa/devices/dsa/max_work_queues_size Date: Oct 25, 2019 @@ -49,6 +50,8 @@ Description: The total number of read buffers supported by this device. The read buffers represent resources within the DSA implementation, and these resources are allocated by engines to support operations. See DSA spec v1.2 9.2.4 Total Read Buffers. + It's not visible when the device does not support Read Buffer + allocation control. What: /sys/bus/dsa/devices/dsa/max_transfer_size Date: Oct 25, 2019 @@ -122,6 +125,8 @@ Contact: dmaengine@vger.kernel.org Description: The maximum number of read buffers that may be in use at one time by operations that access low bandwidth memory in the device. See DSA spec v1.2 9.2.8 GENCFG on Global Read Buffer Limit. + It's not visible when the device does not support Read Buffer + allocation control. What: /sys/bus/dsa/devices/dsa/cmd_status Date: Aug 28, 2020 @@ -205,6 +210,7 @@ KernelVersion: 5.10.0 Contact: dmaengine@vger.kernel.org Description: The max batch size for this workqueue. Cannot exceed device max batch size. Configurable parameter. + It's not visible when the device does not support batch. What: /sys/bus/dsa/devices/wq./ats_disable Date: Nov 13, 2020 @@ -250,6 +256,8 @@ KernelVersion: 5.17.0 Contact: dmaengine@vger.kernel.org Description: Enable the use of global read buffer limit for the group. See DSA spec v1.2 9.2.18 GRPCFG Use Global Read Buffer Limit. + It's not visible when the device does not support Read Buffer + allocation control. What: /sys/bus/dsa/devices/group./read_buffers_allowed Date: Dec 10, 2021 @@ -258,6 +266,8 @@ Contact: dmaengine@vger.kernel.org Description: Indicates max number of read buffers that may be in use at one time by all engines in the group. See DSA spec v1.2 9.2.18 GRPCFG Read Buffers Allowed. + It's not visible when the device does not support Read Buffer + allocation control. What: /sys/bus/dsa/devices/group./read_buffers_reserved Date: Dec 10, 2021 @@ -266,6 +276,8 @@ Contact: dmaengine@vger.kernel.org Description: Indicates the number of Read Buffers reserved for the use of engines in the group. See DSA spec v1.2 9.2.18 GRPCFG Read Buffers Reserved. + It's not visible when the device does not support Read Buffer + allocation control. What: /sys/bus/dsa/devices/group./desc_progress_limit Date: Sept 14, 2022 diff --git a/Documentation/ABI/stable/sysfs-driver-speakup b/Documentation/ABI/stable/sysfs-driver-speakup index dc2a6ba1674b87f903553431b493010b0effed29..bcb6831aa114db7640d468cbd7dce2e60de0da97 100644 --- a/Documentation/ABI/stable/sysfs-driver-speakup +++ b/Documentation/ABI/stable/sysfs-driver-speakup @@ -35,6 +35,15 @@ Description: This controls cursor delay when using arrow keys. When a characters. Set this to a higher value to adjust for the delay and better synchronisation between cursor position and speech. +What: /sys/accessibility/speakup/cur_phonetic +KernelVersion: 6.2 +Contact: speakup@linux-speakup.org +Description: This allows speakup to speak letters phoneticaly when arrowing through + a word letter by letter. This doesn't affect the spelling when typing + the characters. When cur_phonetic=1, speakup will speak characters + phoneticaly when arrowing over a letter. When cur_phonetic=0, speakup + will speak letters as normally. + What: /sys/accessibility/speakup/delimiters KernelVersion: 2.6 Contact: speakup@linux-speakup.org diff --git a/Documentation/ABI/testing/configfs-usb-gadget-uvc b/Documentation/ABI/testing/configfs-usb-gadget-uvc index 611b23e6488d964d81dee626b0fc2a6f5b279fb3..f00cff6d8c5cb48d1eabe94c335fd6ac3c834494 100644 --- a/Documentation/ABI/testing/configfs-usb-gadget-uvc +++ b/Documentation/ABI/testing/configfs-usb-gadget-uvc @@ -197,7 +197,7 @@ Description: Specific MJPEG format descriptors read-only bmaControls this format's data for bmaControls in the streaming header - bmInterfaceFlags specifies interlace information, + bmInterlaceFlags specifies interlace information, read-only bAspectRatioY the X dimension of the picture aspect ratio, read-only @@ -253,7 +253,7 @@ Description: Specific uncompressed format descriptors read-only bmaControls this format's data for bmaControls in the streaming header - bmInterfaceFlags specifies interlace information, + bmInterlaceFlags specifies interlace information, read-only bAspectRatioY the X dimension of the picture aspect ratio, read-only diff --git a/Documentation/ABI/testing/debugfs-driver-habanalabs b/Documentation/ABI/testing/debugfs-driver-habanalabs index c915bf17b2932632aa5be1b8fd475a6efb03230d..85f6d04f528b6fcbf57326117477e5dab9e686e1 100644 --- a/Documentation/ABI/testing/debugfs-driver-habanalabs +++ b/Documentation/ABI/testing/debugfs-driver-habanalabs @@ -91,6 +91,13 @@ Description: Enables the root user to set the device to specific state. Valid values are "disable", "enable", "suspend", "resume". User can read this property to see the valid values +What: /sys/kernel/debug/habanalabs/hl/device_release_watchdog_timeout +Date: Oct 2022 +KernelVersion: 6.2 +Contact: ttayar@habana.ai +Description: The watchdog timeout value in seconds for a device relese upon + certain error cases, after which the device is reset. + What: /sys/kernel/debug/habanalabs/hl/dma_size Date: Apr 2021 KernelVersion: 5.13 diff --git a/Documentation/ABI/testing/sysfs-block-zram b/Documentation/ABI/testing/sysfs-block-zram index 14b2bf2e5105ced6872477b0b85e6f24a76f1d08..628a00fb20a95185870e6acdef03d78cd194d817 100644 --- a/Documentation/ABI/testing/sysfs-block-zram +++ b/Documentation/ABI/testing/sysfs-block-zram @@ -137,3 +137,17 @@ Description: The writeback_limit file is read-write and specifies the maximum amount of writeback ZRAM can do. The limit could be changed in run time. + +What: /sys/block/zram/recomp_algorithm +Date: November 2022 +Contact: Sergey Senozhatsky +Description: + The recomp_algorithm file is read-write and allows to set + or show secondary compression algorithms. + +What: /sys/block/zram/recompress +Date: November 2022 +Contact: Sergey Senozhatsky +Description: + The recompress file is write-only and triggers re-compression + with secondary compression algorithms. diff --git a/Documentation/ABI/testing/sysfs-bus-coreboot b/Documentation/ABI/testing/sysfs-bus-coreboot new file mode 100644 index 0000000000000000000000000000000000000000..9c5accecc47092f9e2fd288ca7f5dac5c54870e5 --- /dev/null +++ b/Documentation/ABI/testing/sysfs-bus-coreboot @@ -0,0 +1,45 @@ +What: /sys/bus/coreboot +Date: August 2022 +Contact: Jack Rosenthal +Description: + The coreboot bus provides a variety of virtual devices used to + access data structures created by the Coreboot BIOS. + +What: /sys/bus/coreboot/devices/cbmem- +Date: August 2022 +Contact: Jack Rosenthal +Description: + CBMEM is a downwards-growing memory region created by Coreboot, + and contains tagged data structures to be shared with payloads + in the boot process and the OS. Each CBMEM entry is given a + directory in /sys/bus/coreboot/devices based on its id. + A list of ids known to Coreboot can be found in the coreboot + source tree at + ``src/commonlib/bsd/include/commonlib/bsd/cbmem_id.h``. + +What: /sys/bus/coreboot/devices/cbmem-/address +Date: August 2022 +Contact: Jack Rosenthal +Description: + This is the pyhsical memory address that the CBMEM entry's data + begins at, in hexadecimal (e.g., ``0x76ffe000``). + +What: /sys/bus/coreboot/devices/cbmem-/size +Date: August 2022 +Contact: Jack Rosenthal +Description: + This is the size of the CBMEM entry's data, in hexadecimal + (e.g., ``0x1234``). + +What: /sys/bus/coreboot/devices/cbmem-/mem +Date: August 2022 +Contact: Jack Rosenthal +Description: + A file exposing read/write access to the entry's data. Note + that this file does not support mmap(), as coreboot + does not guarantee that the data will be page-aligned. + + The mode of this file is 0600. While there shouldn't be + anything security-sensitive contained in CBMEM, read access + requires root privileges given this is exposing a small subset + of physical memory. diff --git a/Documentation/ABI/testing/sysfs-bus-iio-adc-ad4130 b/Documentation/ABI/testing/sysfs-bus-iio-adc-ad4130 new file mode 100644 index 0000000000000000000000000000000000000000..f24ed6687e900ec5213cf0769ebdf65a0b65ae3d --- /dev/null +++ b/Documentation/ABI/testing/sysfs-bus-iio-adc-ad4130 @@ -0,0 +1,46 @@ +What: /sys/bus/iio/devices/iio:deviceX/in_voltage-voltage_filter_mode_available +KernelVersion: 6.2 +Contact: linux-iio@vger.kernel.org +Description: + Reading returns a list with the possible filter modes. + + * "sinc4" - Sinc 4. Excellent noise performance. Long + 1st conversion time. No natural 50/60Hz rejection. + + * "sinc4+sinc1" - Sinc4 + averaging by 8. Low 1st conversion + time. + + * "sinc3" - Sinc3. Moderate 1st conversion time. + Good noise performance. + + * "sinc3+rej60" - Sinc3 + 60Hz rejection. At a sampling + frequency of 50Hz, achieves simultaneous 50Hz and 60Hz + rejection. + + * "sinc3+sinc1" - Sinc3 + averaging by 8. Low 1st conversion + time. Best used with a sampling frequency of at least + 216.19Hz. + + * "sinc3+pf1" - Sinc3 + Post Filter 1. 53dB rejection @ + 50Hz, 58dB rejection @ 60Hz. + + * "sinc3+pf2" - Sinc3 + Post Filter 2. 70dB rejection @ + 50Hz, 70dB rejection @ 60Hz. + + * "sinc3+pf3" - Sinc3 + Post Filter 3. 99dB rejection @ + 50Hz, 103dB rejection @ 60Hz. + + * "sinc3+pf4" - Sinc3 + Post Filter 4. 103dB rejection @ + 50Hz, 109dB rejection @ 60Hz. + +What: /sys/bus/iio/devices/iio:deviceX/in_voltageY-voltageZ_filter_mode +KernelVersion: 6.2 +Contact: linux-iio@vger.kernel.org +Description: + Set the filter mode of the differential channel. When the filter + mode changes, the in_voltageY-voltageZ_sampling_frequency and + in_voltageY-voltageZ_sampling_frequency_available attributes + might also change to accommodate the new filter mode. + If the current sampling frequency is out of range for the new + filter mode, the sampling frequency will be changed to the + closest valid one. diff --git a/Documentation/ABI/testing/sysfs-bus-iio-adc-max11410 b/Documentation/ABI/testing/sysfs-bus-iio-adc-max11410 new file mode 100644 index 0000000000000000000000000000000000000000..2a53c6b373603d3a2e2ebc5a1156fe2f09bd8623 --- /dev/null +++ b/Documentation/ABI/testing/sysfs-bus-iio-adc-max11410 @@ -0,0 +1,13 @@ +What: /sys/bus/iio/devices/iio:deviceX/in_voltage_filterY_notch_en +Date: September 2022 +KernelVersion: 6.0 +Contact: linux-iio@vger.kernel.org +Description: + Enable or disable a notch filter. + +What: /sys/bus/iio/devices/iio:deviceX/in_voltage_filterY_notch_center +Date: September 2022 +KernelVersion: 6.0 +Contact: linux-iio@vger.kernel.org +Description: + Center frequency of the notch filter in Hz. diff --git a/Documentation/ABI/testing/sysfs-bus-platform-devices-ampere-smpro b/Documentation/ABI/testing/sysfs-bus-platform-devices-ampere-smpro new file mode 100644 index 0000000000000000000000000000000000000000..ca93c215ef995e3619f79f7624065269955a55e1 --- /dev/null +++ b/Documentation/ABI/testing/sysfs-bus-platform-devices-ampere-smpro @@ -0,0 +1,312 @@ +What: /sys/bus/platform/devices/smpro-errmon.*/error_[core|mem|pcie|other]_[ce|ue] +KernelVersion: 6.1 +Contact: Quan Nguyen +Description: + (RO) Contains the 48-byte Ampere (Vendor-Specific) Error Record printed + in hex format according to the table below: + + +--------+---------------+-------------+------------------------------------------------------------+ + | Offset | Field | Size (byte) | Description | + +--------+---------------+-------------+------------------------------------------------------------+ + | 00 | Error Type | 1 | See :ref:`the table below ` for details | + +--------+---------------+-------------+------------------------------------------------------------+ + | 01 | Subtype | 1 | See :ref:`the table below ` for details | + +--------+---------------+-------------+------------------------------------------------------------+ + | 02 | Instance | 2 | See :ref:`the table below ` for details | + +--------+---------------+-------------+------------------------------------------------------------+ + | 04 | Error status | 4 | See ARM RAS specification for details | + +--------+---------------+-------------+------------------------------------------------------------+ + | 08 | Error Address | 8 | See ARM RAS specification for details | + +--------+---------------+-------------+------------------------------------------------------------+ + | 16 | Error Misc 0 | 8 | See ARM RAS specification for details | + +--------+---------------+-------------+------------------------------------------------------------+ + | 24 | Error Misc 1 | 8 | See ARM RAS specification for details | + +--------+---------------+-------------+------------------------------------------------------------+ + | 32 | Error Misc 2 | 8 | See ARM RAS specification for details | + +--------+---------------+-------------+------------------------------------------------------------+ + | 40 | Error Misc 3 | 8 | See ARM RAS specification for details | + +--------+---------------+-------------+------------------------------------------------------------+ + + The table below defines the value of error types, their subtype, subcomponent and instance: + + .. _smpro-error-types: + + +-----------------+------------+----------+----------------+----------------------------------------+ + | Error Group | Error Type | Sub type | Sub component | Instance | + +-----------------+------------+----------+----------------+----------------------------------------+ + | CPM (core) | 0 | 0 | Snoop-Logic | CPM # | + +-----------------+------------+----------+----------------+----------------------------------------+ + | CPM (core) | 0 | 2 | Armv8 Core 1 | CPM # | + +-----------------+------------+----------+----------------+----------------------------------------+ + | MCU (mem) | 1 | 1 | ERR1 | MCU # \| SLOT << 11 | + +-----------------+------------+----------+----------------+----------------------------------------+ + | MCU (mem) | 1 | 2 | ERR2 | MCU # \| SLOT << 11 | + +-----------------+------------+----------+----------------+----------------------------------------+ + | MCU (mem) | 1 | 3 | ERR3 | MCU # | + +-----------------+------------+----------+----------------+----------------------------------------+ + | MCU (mem) | 1 | 4 | ERR4 | MCU # | + +-----------------+------------+----------+----------------+----------------------------------------+ + | MCU (mem) | 1 | 5 | ERR5 | MCU # | + +-----------------+------------+----------+----------------+----------------------------------------+ + | MCU (mem) | 1 | 6 | ERR6 | MCU # | + +-----------------+------------+----------+----------------+----------------------------------------+ + | MCU (mem) | 1 | 7 | Link Error | MCU # | + +-----------------+------------+----------+----------------+----------------------------------------+ + | Mesh (other) | 2 | 0 | Cross Point | X \| (Y << 5) \| NS <<11 | + +-----------------+------------+----------+----------------+----------------------------------------+ + | Mesh (other) | 2 | 1 | Home Node(IO) | X \| (Y << 5) \| NS <<11 | + +-----------------+------------+----------+----------------+----------------------------------------+ + | Mesh (other) | 2 | 2 | Home Node(Mem) | X \| (Y << 5) \| NS <<11 \| device<<12 | + +-----------------+------------+----------+----------------+----------------------------------------+ + | Mesh (other) | 2 | 4 | CCIX Node | X \| (Y << 5) \| NS <<11 | + +-----------------+------------+----------+----------------+----------------------------------------+ + | 2P Link (other) | 3 | 0 | N/A | Altra 2P Link # | + +-----------------+------------+----------+----------------+----------------------------------------+ + | GIC (other) | 5 | 0 | ERR0 | 0 | + +-----------------+------------+----------+----------------+----------------------------------------+ + | GIC (other) | 5 | 1 | ERR1 | 0 | + +-----------------+------------+----------+----------------+----------------------------------------+ + | GIC (other) | 5 | 2 | ERR2 | 0 | + +-----------------+------------+----------+----------------+----------------------------------------+ + | GIC (other) | 5 | 3 | ERR3 | 0 | + +-----------------+------------+----------+----------------+----------------------------------------+ + | GIC (other) | 5 | 4 | ERR4 | 0 | + +-----------------+------------+----------+----------------+----------------------------------------+ + | GIC (other) | 5 | 5 | ERR5 | 0 | + +-----------------+------------+----------+----------------+----------------------------------------+ + | GIC (other) | 5 | 6 | ERR6 | 0 | + +-----------------+------------+----------+----------------+----------------------------------------+ + | GIC (other) | 5 | 7 | ERR7 | 0 | + +-----------------+------------+----------+----------------+----------------------------------------+ + | GIC (other) | 5 | 8 | ERR8 | 0 | + +-----------------+------------+----------+----------------+----------------------------------------+ + | GIC (other) | 5 | 9 | ERR9 | 0 | + +-----------------+------------+----------+----------------+----------------------------------------+ + | GIC (other) | 5 | 10 | ERR10 | 0 | + +-----------------+------------+----------+----------------+----------------------------------------+ + | GIC (other) | 5 | 11 | ERR11 | 0 | + +-----------------+------------+----------+----------------+----------------------------------------+ + | GIC (other) | 5 | 12 | ERR12 | 0 | + +-----------------+------------+----------+----------------+----------------------------------------+ + | GIC (other) | 5 | 13-21 | ERR13 | RC # + 1 | + +-----------------+------------+----------+----------------+----------------------------------------+ + | SMMU (other) | 6 | TCU | 100 | RC # | + +-----------------+------------+----------+----------------+----------------------------------------+ + | SMMU (other) | 6 | TBU0 | 0 | RC # | + +-----------------+------------+----------+----------------+----------------------------------------+ + | SMMU (other) | 6 | TBU1 | 1 | RC # | + +-----------------+------------+----------+----------------+----------------------------------------+ + | SMMU (other) | 6 | TBU2 | 2 | RC # | + +-----------------+------------+----------+----------------+----------------------------------------+ + | SMMU (other) | 6 | TBU3 | 3 | RC # | + +-----------------+------------+----------+----------------+----------------------------------------+ + | SMMU (other) | 6 | TBU4 | 4 | RC # | + +-----------------+------------+----------+----------------+----------------------------------------+ + | SMMU (other) | 6 | TBU5 | 5 | RC # | + +-----------------+------------+----------+----------------+----------------------------------------+ + | SMMU (other) | 6 | TBU6 | 6 | RC # | + +-----------------+------------+----------+----------------+----------------------------------------+ + | SMMU (other) | 6 | TBU7 | 7 | RC # | + +-----------------+------------+----------+----------------+----------------------------------------+ + | SMMU (other) | 6 | TBU8 | 8 | RC # | + +-----------------+------------+----------+----------------+----------------------------------------+ + | SMMU (other) | 6 | TBU9 | 9 | RC # | + +-----------------+------------+----------+----------------+----------------------------------------+ + | PCIe AER (pcie) | 7 | Root | 0 | RC # | + +-----------------+------------+----------+----------------+----------------------------------------+ + | PCIe AER (pcie) | 7 | Device | 1 | RC # | + +-----------------+------------+----------+----------------+----------------------------------------+ + | PCIe RC (pcie) | 8 | RCA HB | 0 | RC # | + +-----------------+------------+----------+----------------+----------------------------------------+ + | PCIe RC (pcie) | 8 | RCB HB | 1 | RC # | + +-----------------+------------+----------+----------------+----------------------------------------+ + | PCIe RC (pcie) | 8 | RASDP | 8 | RC # | + +-----------------+------------+----------+----------------+----------------------------------------+ + | OCM (other) | 9 | ERR0 | 0 | 0 | + +-----------------+------------+----------+----------------+----------------------------------------+ + | OCM (other) | 9 | ERR1 | 1 | 0 | + +-----------------+------------+----------+----------------+----------------------------------------+ + | OCM (other) | 9 | ERR2 | 2 | 0 | + +-----------------+------------+----------+----------------+----------------------------------------+ + | SMpro (other) | 10 | ERR0 | 0 | 0 | + +-----------------+------------+----------+----------------+----------------------------------------+ + | SMpro (other) | 10 | ERR1 | 1 | 0 | + +-----------------+------------+----------+----------------+----------------------------------------+ + | SMpro (other) | 10 | MPA_ERR | 2 | 0 | + +-----------------+------------+----------+----------------+----------------------------------------+ + | PMpro (other) | 11 | ERR0 | 0 | 0 | + +-----------------+------------+----------+----------------+----------------------------------------+ + | PMpro (other) | 11 | ERR1 | 1 | 0 | + +-----------------+------------+----------+----------------+----------------------------------------+ + | PMpro (other) | 11 | MPA_ERR | 2 | 0 | + +-----------------+------------+----------+----------------+----------------------------------------+ + + Example:: + + # cat error_other_ue + 880807001e004010401040101500000001004010401040100c0000000000000000000000000000000000000000000000 + + The detail of each sysfs entries is as below: + + +-------------+---------------------------------------------------------+----------------------------------+ + | Error | Sysfs entry | Description (when triggered) | + +-------------+---------------------------------------------------------+----------------------------------+ + | Core's CE | /sys/bus/platform/devices/smpro-errmon.*/error_core_ce | Core has CE error | + +-------------+---------------------------------------------------------+----------------------------------+ + | Core's UE | /sys/bus/platform/devices/smpro-errmon.*/error_core_ue | Core has UE error | + +-------------+---------------------------------------------------------+----------------------------------+ + | Memory's CE | /sys/bus/platform/devices/smpro-errmon.*/error_mem_ce | Memory has CE error | + +-------------+---------------------------------------------------------+----------------------------------+ + | Memory's UE | /sys/bus/platform/devices/smpro-errmon.*/error_mem_ue | Memory has UE error | + +-------------+---------------------------------------------------------+----------------------------------+ + | PCIe's CE | /sys/bus/platform/devices/smpro-errmon.*/error_pcie_ce | any PCIe controller has CE error | + +-------------+---------------------------------------------------------+----------------------------------+ + | PCIe's UE | /sys/bus/platform/devices/smpro-errmon.*/error_pcie_ue | any PCIe controller has UE error | + +-------------+---------------------------------------------------------+----------------------------------+ + | Other's CE | /sys/bus/platform/devices/smpro-errmon.*/error_other_ce | any other CE error | + +-------------+---------------------------------------------------------+----------------------------------+ + | Other's UE | /sys/bus/platform/devices/smpro-errmon.*/error_other_ue | any other UE error | + +-------------+---------------------------------------------------------+----------------------------------+ + + UE: Uncorrect-able Error + CE: Correct-able Error + + For details, see section `3.3 Ampere (Vendor-Specific) Error Record Formats, + Altra Family RAS Supplement`. + + +What: /sys/bus/platform/devices/smpro-errmon.*/overflow_[core|mem|pcie|other]_[ce|ue] +KernelVersion: 6.1 +Contact: Quan Nguyen +Description: + (RO) Return the overflow status of each type HW error reported: + + - 0 : No overflow + - 1 : There is an overflow and the oldest HW errors are dropped + + The detail of each sysfs entries is as below: + + +-------------+-----------------------------------------------------------+---------------------------------------+ + | Overflow | Sysfs entry | Description | + +-------------+-----------------------------------------------------------+---------------------------------------+ + | Core's CE | /sys/bus/platform/devices/smpro-errmon.*/overflow_core_ce | Core CE error overflow | + +-------------+-----------------------------------------------------------+---------------------------------------+ + | Core's UE | /sys/bus/platform/devices/smpro-errmon.*/overflow_core_ue | Core UE error overflow | + +-------------+-----------------------------------------------------------+---------------------------------------+ + | Memory's CE | /sys/bus/platform/devices/smpro-errmon.*/overflow_mem_ce | Memory CE error overflow | + +-------------+-----------------------------------------------------------+---------------------------------------+ + | Memory's UE | /sys/bus/platform/devices/smpro-errmon.*/overflow_mem_ue | Memory UE error overflow | + +-------------+-----------------------------------------------------------+---------------------------------------+ + | PCIe's CE | /sys/bus/platform/devices/smpro-errmon.*/overflow_pcie_ce | any PCIe controller CE error overflow | + +-------------+-----------------------------------------------------------+---------------------------------------+ + | PCIe's UE | /sys/bus/platform/devices/smpro-errmon.*/overflow_pcie_ue | any PCIe controller UE error overflow | + +-------------+-----------------------------------------------------------+---------------------------------------+ + | Other's CE | /sys/bus/platform/devices/smpro-errmon.*/overflow_other_ce| any other CE error overflow | + +-------------+-----------------------------------------------------------+---------------------------------------+ + | Other's UE | /sys/bus/platform/devices/smpro-errmon.*/overflow_other_ue| other UE error overflow | + +-------------+-----------------------------------------------------------+---------------------------------------+ + + where: + + - UE: Uncorrect-able Error + - CE: Correct-able Error + +What: /sys/bus/platform/devices/smpro-errmon.*/[error|warn]_[smpro|pmpro] +KernelVersion: 6.1 +Contact: Quan Nguyen +Description: + (RO) Contains the internal firmware error/warning printed as hex format. + + The detail of each sysfs entries is as below: + + +---------------+------------------------------------------------------+--------------------------+ + | Error | Sysfs entry | Description | + +---------------+------------------------------------------------------+--------------------------+ + | SMpro error | /sys/bus/platform/devices/smpro-errmon.*/error_smpro | system has SMpro error | + +---------------+------------------------------------------------------+--------------------------+ + | SMpro warning | /sys/bus/platform/devices/smpro-errmon.*/warn_smpro | system has SMpro warning | + +---------------+------------------------------------------------------+--------------------------+ + | PMpro error | /sys/bus/platform/devices/smpro-errmon.*/error_pmpro | system has PMpro error | + +---------------+------------------------------------------------------+--------------------------+ + | PMpro warning | /sys/bus/platform/devices/smpro-errmon.*/warn_pmpro | system has PMpro warning | + +---------------+------------------------------------------------------+--------------------------+ + + For details, see section `5.10 RAS Internal Error Register Definitions, + Altra Family Soc BMC Interface Specification`. + +What: /sys/bus/platform/devices/smpro-errmon.*/event_[vrd_warn_fault|vrd_hot|dimm_hot] +KernelVersion: 6.1 +Contact: Quan Nguyen +Description: + (RO) Contains the detail information in case of VRD/DIMM warning/hot events + in hex format as below:: + + AAAA + + where: + + - ``AAAA``: The event detail information data + + The detail of each sysfs entries is as below: + + +---------------+---------------------------------------------------------------+---------------------+ + | Event | Sysfs entry | Description | + +---------------+---------------------------------------------------------------+---------------------+ + | VRD HOT | /sys/bus/platform/devices/smpro-errmon.*/event_vrd_hot | VRD Hot | + +---------------+---------------------------------------------------------------+---------------------+ + | VR Warn/Fault | /sys/bus/platform/devices/smpro-errmon.*/event_vrd_warn_fault | VR Warning or Fault | + +---------------+---------------------------------------------------------------+---------------------+ + | DIMM HOT | /sys/bus/platform/devices/smpro-errmon.*/event_dimm_hot | DIMM Hot | + +---------------+---------------------------------------------------------------+---------------------+ + + For more details, see section `5.7 GPI Status Registers, + Altra Family Soc BMC Interface Specification`. + +What: /sys/bus/platform/devices/smpro-misc.*/boot_progress +KernelVersion: 6.1 +Contact: Quan Nguyen +Description: + (RO) Contains the boot stages information in hex as format below:: + + AABBCCCCCCCC + + where: + + - ``AA`` : The boot stages + + - 00: SMpro firmware booting + - 01: PMpro firmware booting + - 02: ATF BL1 firmware booting + - 03: DDR initialization + - 04: DDR training report status + - 05: ATF BL2 firmware booting + - 06: ATF BL31 firmware booting + - 07: ATF BL32 firmware booting + - 08: UEFI firmware booting + - 09: OS booting + + - ``BB`` : Boot status + + - 00: Not started + - 01: Started + - 02: Completed without error + - 03: Failed. + + - ``CCCCCCCC``: Boot status information defined for each boot stages + + For details, see section `5.11 Boot Stage Register Definitions` + and section `6. Processor Boot Progress Codes, Altra Family Soc BMC + Interface Specification`. + + +What: /sys/bus/platform/devices/smpro-misc*/soc_power_limit +KernelVersion: 6.1 +Contact: Quan Nguyen +Description: + (RW) Contains the desired SoC power limit in Watt. + Writes to this sysfs set the desired SoC power limit (W). + Reads from this register return the current SoC power limit (W). + The value ranges: + + - Minimum: 120 W + - Maximum: Socket TDP power diff --git a/Documentation/ABI/testing/sysfs-bus-usb b/Documentation/ABI/testing/sysfs-bus-usb index 568103d3376ee7912fea0705e0f71633de372814..545c2dd97ed02dc8475387117ec0e065ed01c984 100644 --- a/Documentation/ABI/testing/sysfs-bus-usb +++ b/Documentation/ABI/testing/sysfs-bus-usb @@ -264,6 +264,17 @@ Description: attached to the port will not be detected, initialized, or enumerated. +What: /sys/bus/usb/devices/...//port/early_stop +Date: Sep 2022 +Contact: Ray Chi +Description: + Some USB hosts have some watchdog mechanisms so that the device + may enter ramdump if it takes a long time during port initialization. + This attribute allows each port just has two attempts so that the + port initialization will be failed quickly. In addition, if a port + which is marked with early_stop has failed to initialize, it will ignore + all future connections until this attribute is clear. + What: /sys/bus/usb/devices/.../power/usb2_lpm_l1_timeout Date: May 2013 Contact: Mathias Nyman diff --git a/Documentation/ABI/testing/sysfs-class-bdi b/Documentation/ABI/testing/sysfs-class-bdi index 6d2a2fc189dd614d02469b2080fe0585f0f867df..0d2abd88a18cbd3add725d20e85c942a13c682d8 100644 --- a/Documentation/ABI/testing/sysfs-class-bdi +++ b/Documentation/ABI/testing/sysfs-class-bdi @@ -44,6 +44,21 @@ Description: (read-write) +What: /sys/class/bdi//min_ratio_fine +Date: November 2022 +Contact: Stefan Roesch +Description: + Under normal circumstances each device is given a part of the + total write-back cache that relates to its current average + writeout speed in relation to the other devices. + + The 'min_ratio_fine' parameter allows assigning a minimum reserve + of the write-back cache to a particular device. The value is + expressed as part of 1 million. For example, this is useful for + providing a minimum QoS. + + (read-write) + What: /sys/class/bdi//max_ratio Date: January 2008 Contact: Peter Zijlstra @@ -55,6 +70,59 @@ Description: mount that is prone to get stuck, or a FUSE mount which cannot be trusted to play fair. + (read-write) + +What: /sys/class/bdi//max_ratio_fine +Date: November 2022 +Contact: Stefan Roesch +Description: + Allows limiting a particular device to use not more than the + given value of the write-back cache. The value is given as part + of 1 million. This is useful in situations where we want to avoid + one device taking all or most of the write-back cache. For example + in case of an NFS mount that is prone to get stuck, or a FUSE mount + which cannot be trusted to play fair. + + (read-write) + +What: /sys/class/bdi//min_bytes +Date: October 2022 +Contact: Stefan Roesch +Description: + Under normal circumstances each device is given a part of the + total write-back cache that relates to its current average + writeout speed in relation to the other devices. + + The 'min_bytes' parameter allows assigning a minimum + percentage of the write-back cache to a particular device + expressed in bytes. + For example, this is useful for providing a minimum QoS. + + (read-write) + +What: /sys/class/bdi//max_bytes +Date: October 2022 +Contact: Stefan Roesch +Description: + Allows limiting a particular device to use not more than the + given 'max_bytes' of the write-back cache. This is useful in + situations where we want to avoid one device taking all or + most of the write-back cache. For example in case of an NFS + mount that is prone to get stuck, a FUSE mount which cannot be + trusted to play fair, or a nbd device. + + (read-write) + +What: /sys/class/bdi//strict_limit +Date: October 2022 +Contact: Stefan Roesch +Description: + Forces per-BDI checks for the share of given device in the write-back + cache even before the global background dirty limit is reached. This + is useful in situations where the global limit is much higher than + affordable for given relatively slow (or untrusted) device. Turning + strictlimit on has no visible effect if max_ratio is equal to 100%. + (read-write) What: /sys/class/bdi//stable_pages_required Date: January 2008 diff --git a/Documentation/ABI/testing/sysfs-fs-f2fs b/Documentation/ABI/testing/sysfs-fs-f2fs index 483639fb727b203e405b125a21611aaca2cfd487..9e3756625a8194d92761ee3e286c84aa273e5fa5 100644 --- a/Documentation/ABI/testing/sysfs-fs-f2fs +++ b/Documentation/ABI/testing/sysfs-fs-f2fs @@ -99,6 +99,12 @@ Description: Controls the issue rate of discard commands that consist of small checkpoint is triggered, and issued during the checkpoint. By default, it is disabled with 0. +What: /sys/fs/f2fs//max_ordered_discard +Date: October 2022 +Contact: "Yangtao Li" +Description: Controls the maximum ordered discard, the unit size is one block(4KB). + Set it to 16 by default. + What: /sys/fs/f2fs//max_discard_request Date: December 2021 Contact: "Konstantin Vyshetsky" @@ -132,7 +138,8 @@ Contact: "Chao Yu" Description: Controls discard granularity of inner discard thread. Inner thread will not issue discards with size that is smaller than granularity. The unit size is one block(4KB), now only support configuring - in range of [1, 512]. Default value is 4(=16KB). + in range of [1, 512]. Default value is 16. + For small devices, default value is 1. What: /sys/fs/f2fs//umount_discard_timeout Date: January 2019 @@ -235,7 +242,7 @@ Description: Shows total written kbytes issued to disk. What: /sys/fs/f2fs//features Date: July 2017 Contact: "Jaegeuk Kim" -Description: /feature_list/ +Description: /feature_list/> Shows all enabled features in current device. Supported features: encryption, blkzoned, extra_attr, projquota, inode_checksum, @@ -592,10 +599,10 @@ Description: With "mode=fragment:block" mount options, we can scatter block allo in the length of 1.. by turns. This value can be set between 1..512 and the default value is 4. -What: /sys/fs/f2fs//gc_urgent_high_remaining -Date: December 2021 -Contact: "Daeho Jeong" -Description: You can set the trial count limit for GC urgent high mode with this value. +What: /sys/fs/f2fs//gc_remaining_trials +Date: October 2022 +Contact: "Yangtao Li" +Description: You can set the trial count limit for GC urgent and idle mode with this value. If GC thread gets to the limit, the mode will turn back to GC normal mode. By default, the value is zero, which means there is no limit like before. @@ -634,3 +641,31 @@ Date: July 2022 Contact: "Daeho Jeong" Description: Show the accumulated total revoked atomic write block count after boot. If you write "0" here, you can initialize to "0". + +What: /sys/fs/f2fs//gc_mode +Date: October 2022 +Contact: "Yangtao Li" +Description: Show the current gc_mode as a string. + This is a read-only entry. + +What: /sys/fs/f2fs//discard_urgent_util +Date: November 2022 +Contact: "Yangtao Li" +Description: When space utilization exceeds this, do background DISCARD aggressively. + Does DISCARD forcibly in a period of given min_discard_issue_time when the number + of discards is not 0 and set discard granularity to 1. + Default: 80 + +What: /sys/fs/f2fs//hot_data_age_threshold +Date: November 2022 +Contact: "Ping Xiong" +Description: When DATA SEPARATION is on, it controls the age threshold to indicate + the data blocks as hot. By default it was initialized as 262144 blocks + (equals to 1GB). + +What: /sys/fs/f2fs//warm_data_age_threshold +Date: November 2022 +Contact: "Ping Xiong" +Description: When DATA SEPARATION is on, it controls the age threshold to indicate + the data blocks as warm. By default it was initialized as 2621440 blocks + (equals to 10GB). diff --git a/Documentation/ABI/testing/sysfs-kernel-cpu_byteorder b/Documentation/ABI/testing/sysfs-kernel-cpu_byteorder new file mode 100644 index 0000000000000000000000000000000000000000..f0e6ac1b53566f4201f0961065998ca9e2496b6b --- /dev/null +++ b/Documentation/ABI/testing/sysfs-kernel-cpu_byteorder @@ -0,0 +1,12 @@ +What: /sys/kernel/cpu_byteorder +Date: February 2023 +KernelVersion: 6.2 +Contact: Thomas Weißschuh +Description: + The endianness of the running kernel. + + Access: Read + + Valid values: + "little", "big" +Users: util-linux diff --git a/Documentation/ABI/testing/sysfs-kernel-mm-damon b/Documentation/ABI/testing/sysfs-kernel-mm-damon index 08b9df3235609ad4966aa17977c821fea693b824..13397b8536926780fc0f967099b6e193d6b23f50 100644 --- a/Documentation/ABI/testing/sysfs-kernel-mm-damon +++ b/Documentation/ABI/testing/sysfs-kernel-mm-damon @@ -27,6 +27,10 @@ Description: Writing 'on' or 'off' to this file makes the kdamond starts or makes the kdamond reads the user inputs in the sysfs files except 'state' again. Writing 'update_schemes_stats' to the file updates contents of schemes stats files of the kdamond. + Writing 'update_schemes_tried_regions' to the file updates + contents of 'tried_regions' directory of every scheme directory + of this kdamond. Writing 'clear_schemes_tried_regions' to the + file removes contents of the 'tried_regions' directory. What: /sys/kernel/mm/damon/admin/kdamonds//pid Date: Mar 2022 @@ -283,3 +287,31 @@ Date: Mar 2022 Contact: SeongJae Park Description: Reading this file returns the number of the exceed events of the scheme's quotas. + +What: /sys/kernel/mm/damon/admin/kdamonds//contexts//schemes//tried_regions//start +Date: Oct 2022 +Contact: SeongJae Park +Description: Reading this file returns the start address of a memory region + that corresponding DAMON-based Operation Scheme's action has + tried to be applied. + +What: /sys/kernel/mm/damon/admin/kdamonds//contexts//schemes//tried_regions//end +Date: Oct 2022 +Contact: SeongJae Park +Description: Reading this file returns the end address of a memory region + that corresponding DAMON-based Operation Scheme's action has + tried to be applied. + +What: /sys/kernel/mm/damon/admin/kdamonds//contexts//schemes//tried_regions//nr_accesses +Date: Oct 2022 +Contact: SeongJae Park +Description: Reading this file returns the 'nr_accesses' of a memory region + that corresponding DAMON-based Operation Scheme's action has + tried to be applied. + +What: /sys/kernel/mm/damon/admin/kdamonds//contexts//schemes//tried_regions//age +Date: Oct 2022 +Contact: SeongJae Park +Description: Reading this file returns the 'age' of a memory region that + corresponding DAMON-based Operation Scheme's action has tried + to be applied. diff --git a/Documentation/ABI/testing/sysfs-kernel-oops_count b/Documentation/ABI/testing/sysfs-kernel-oops_count new file mode 100644 index 0000000000000000000000000000000000000000..156cca9dbc960628c07458c9ec52d686cc551385 --- /dev/null +++ b/Documentation/ABI/testing/sysfs-kernel-oops_count @@ -0,0 +1,6 @@ +What: /sys/kernel/oops_count +Date: November 2022 +KernelVersion: 6.2.0 +Contact: Linux Kernel Hardening List +Description: + Shows how many times the system has Oopsed since last boot. diff --git a/Documentation/ABI/testing/sysfs-kernel-warn_count b/Documentation/ABI/testing/sysfs-kernel-warn_count new file mode 100644 index 0000000000000000000000000000000000000000..08f083d2fd51bf59bd8a6b8dd4abe77af3be6a35 --- /dev/null +++ b/Documentation/ABI/testing/sysfs-kernel-warn_count @@ -0,0 +1,6 @@ +What: /sys/kernel/oops_count +Date: November 2022 +KernelVersion: 6.2.0 +Contact: Linux Kernel Hardening List +Description: + Shows how many times the system has Warned since last boot. diff --git a/Documentation/admin-guide/blockdev/zram.rst b/Documentation/admin-guide/blockdev/zram.rst index c73b16930449e6eb1efe62aa8ab774c113db6343..e4551579cb128e15c91b0461f4e899d944553ead 100644 --- a/Documentation/admin-guide/blockdev/zram.rst +++ b/Documentation/admin-guide/blockdev/zram.rst @@ -348,8 +348,13 @@ this can be accomplished with:: echo huge_idle > /sys/block/zramX/writeback +If a user chooses to writeback only incompressible pages (pages that none of +algorithms can compress) this can be accomplished with:: + + echo incompressible > /sys/block/zramX/writeback + If an admin wants to write a specific page in zram device to the backing device, -they could write a page index into the interface. +they could write a page index into the interface:: echo "page_index=1251" > /sys/block/zramX/writeback @@ -401,6 +406,87 @@ budget in next setting is user's job. If admin wants to measure writeback count in a certain period, they could know it via /sys/block/zram0/bd_stat's 3rd column. +recompression +------------- + +With CONFIG_ZRAM_MULTI_COMP, zram can recompress pages using alternative +(secondary) compression algorithms. The basic idea is that alternative +compression algorithm can provide better compression ratio at a price of +(potentially) slower compression/decompression speeds. Alternative compression +algorithm can, for example, be more successful compressing huge pages (those +that default algorithm failed to compress). Another application is idle pages +recompression - pages that are cold and sit in the memory can be recompressed +using more effective algorithm and, hence, reduce zsmalloc memory usage. + +With CONFIG_ZRAM_MULTI_COMP, zram supports up to 4 compression algorithms: +one primary and up to 3 secondary ones. Primary zram compressor is explained +in "3) Select compression algorithm", secondary algorithms are configured +using recomp_algorithm device attribute. + +Example::: + + #show supported recompression algorithms + cat /sys/block/zramX/recomp_algorithm + #1: lzo lzo-rle lz4 lz4hc [zstd] + #2: lzo lzo-rle lz4 [lz4hc] zstd + +Alternative compression algorithms are sorted by priority. In the example +above, zstd is used as the first alternative algorithm, which has priority +of 1, while lz4hc is configured as a compression algorithm with priority 2. +Alternative compression algorithm's priority is provided during algorithms +configuration::: + + #select zstd recompression algorithm, priority 1 + echo "algo=zstd priority=1" > /sys/block/zramX/recomp_algorithm + + #select deflate recompression algorithm, priority 2 + echo "algo=deflate priority=2" > /sys/block/zramX/recomp_algorithm + +Another device attribute that CONFIG_ZRAM_MULTI_COMP enables is recompress, +which controls recompression. + +Examples::: + + #IDLE pages recompression is activated by `idle` mode + echo "type=idle" > /sys/block/zramX/recompress + + #HUGE pages recompression is activated by `huge` mode + echo "type=huge" > /sys/block/zram0/recompress + + #HUGE_IDLE pages recompression is activated by `huge_idle` mode + echo "type=huge_idle" > /sys/block/zramX/recompress + +The number of idle pages can be significant, so user-space can pass a size +threshold (in bytes) to the recompress knob: zram will recompress only pages +of equal or greater size::: + + #recompress all pages larger than 3000 bytes + echo "threshold=3000" > /sys/block/zramX/recompress + + #recompress idle pages larger than 2000 bytes + echo "type=idle threshold=2000" > /sys/block/zramX/recompress + +Recompression of idle pages requires memory tracking. + +During re-compression for every page, that matches re-compression criteria, +ZRAM iterates the list of registered alternative compression algorithms in +order of their priorities. ZRAM stops either when re-compression was +successful (re-compressed object is smaller in size than the original one) +and matches re-compression criteria (e.g. size threshold) or when there are +no secondary algorithms left to try. If none of the secondary algorithms can +successfully re-compressed the page such a page is marked as incompressible, +so ZRAM will not attempt to re-compress it in the future. + +This re-compression behaviour, when it iterates through the list of +registered compression algorithms, increases our chances of finding the +algorithm that successfully compresses a particular page. Sometimes, however, +it is convenient (and sometimes even necessary) to limit recompression to +only one particular algorithm so that it will not try any other algorithms. +This can be achieved by providing a algo=NAME parameter::: + + #use zstd algorithm only (if registered) + echo "type=huge algo=zstd" > /sys/block/zramX/recompress + memory tracking =============== @@ -411,9 +497,11 @@ pages of the process with*pagemap. If you enable the feature, you could see block state via /sys/kernel/debug/zram/zram0/block_state". The output is as follows:: - 300 75.033841 .wh. - 301 63.806904 s... - 302 63.806919 ..hi + 300 75.033841 .wh... + 301 63.806904 s..... + 302 63.806919 ..hi.. + 303 62.801919 ....r. + 304 146.781902 ..hi.n First column zram's block index. @@ -430,6 +518,10 @@ Third column huge page i: idle page + r: + recompressed page (secondary compression algorithm) + n: + none (including secondary) of algorithms could compress it First line of above example says 300th block is accessed at 75.033841sec and the block's state is huge so it is written back to the backing diff --git a/Documentation/admin-guide/cgroup-v1/memory.rst b/Documentation/admin-guide/cgroup-v1/memory.rst index 5b86245450bdc4e695770ab728949617e87d5ec6..60370f2c67b99c5541fb7de4cc99878833580174 100644 --- a/Documentation/admin-guide/cgroup-v1/memory.rst +++ b/Documentation/admin-guide/cgroup-v1/memory.rst @@ -543,7 +543,8 @@ inactive_anon # of bytes of anonymous and swap cache memory on inactive LRU list. active_anon # of bytes of anonymous and swap cache memory on active LRU list. -inactive_file # of bytes of file-backed memory on inactive LRU list. +inactive_file # of bytes of file-backed memory and MADV_FREE anonymous memory( + LazyFree pages) on inactive LRU list. active_file # of bytes of file-backed memory on active LRU list. unevictable # of bytes of memory that cannot be reclaimed (mlocked etc). =============== =============================================================== diff --git a/Documentation/admin-guide/cgroup-v2.rst b/Documentation/admin-guide/cgroup-v2.rst index dc254a3cb95686e67a7335bad3101313e97eedd9..c8ae7c897f142a329e9f3ac6c496435460ef1c8b 100644 --- a/Documentation/admin-guide/cgroup-v2.rst +++ b/Documentation/admin-guide/cgroup-v2.rst @@ -1245,17 +1245,13 @@ PAGE_SIZE multiple when read back. This is a simple interface to trigger memory reclaim in the target cgroup. - This file accepts a single key, the number of bytes to reclaim. - No nested keys are currently supported. + This file accepts a string which contains the number of bytes to + reclaim. Example:: echo "1G" > memory.reclaim - The interface can be later extended with nested keys to - configure the reclaim behavior. For example, specify the - type of memory to reclaim from (anon, file, ..). - Please note that the kernel can over or under reclaim from the target cgroup. If less bytes are reclaimed than the specified amount, -EAGAIN is returned. @@ -1267,6 +1263,13 @@ PAGE_SIZE multiple when read back. This means that the networking layer will not adapt based on reclaim induced by memory.reclaim. + This file also allows the user to specify the nodes to reclaim from, + via the 'nodes=' key, for example:: + + echo "1G nodes=0,1" > memory.reclaim + + The above instructs the kernel to reclaim memory from nodes 0,1. + memory.peak A read-only single value file which exists on non-root cgroups. @@ -1488,12 +1491,18 @@ PAGE_SIZE multiple when read back. pgscan_direct (npn) Amount of scanned pages directly (in an inactive LRU list) + pgscan_khugepaged (npn) + Amount of scanned pages by khugepaged (in an inactive LRU list) + pgsteal_kswapd (npn) Amount of reclaimed pages by kswapd pgsteal_direct (npn) Amount of reclaimed pages directly + pgsteal_khugepaged (npn) + Amount of reclaimed pages by khugepaged + pgfault (npn) Total number of page faults incurred diff --git a/Documentation/admin-guide/cifs/usage.rst b/Documentation/admin-guide/cifs/usage.rst index 3766bf8a1c20ed9bdc0d99770a90e312fd470062..ed3b8dc854ecf215b770b25b68209761997eb2c7 100644 --- a/Documentation/admin-guide/cifs/usage.rst +++ b/Documentation/admin-guide/cifs/usage.rst @@ -858,7 +858,7 @@ CIFS kernel module parameters These module parameters can be specified or modified either during the time of module loading or during the runtime by using the interface:: - /proc/module/cifs/parameters/ + /sys/module/cifs/parameters/ i.e.:: diff --git a/Documentation/admin-guide/kdump/vmcoreinfo.rst b/Documentation/admin-guide/kdump/vmcoreinfo.rst index 6726f439958ca0bad0d0fe26a3fe8034ccbdbaef..86fd884928700bd02b8673375e60ddde5f408521 100644 --- a/Documentation/admin-guide/kdump/vmcoreinfo.rst +++ b/Documentation/admin-guide/kdump/vmcoreinfo.rst @@ -595,3 +595,32 @@ X2TLB ----- Indicates whether the crashed kernel enabled SH extended mode. + +RISCV64 +======= + +VA_BITS +------- + +The maximum number of bits for virtual addresses. Used to compute the +virtual memory ranges. + +PAGE_OFFSET +----------- + +Indicates the virtual kernel start address of the direct-mapped RAM region. + +phys_ram_base +------------- + +Indicates the start physical RAM address. + +MODULES_VADDR|MODULES_END|VMALLOC_START|VMALLOC_END|VMEMMAP_START|VMEMMAP_END|KERNEL_LINK_ADDR +---------------------------------------------------------------------------------------------- + +Used to get the correct ranges: + + * MODULES_VADDR ~ MODULES_END : Kernel module space. + * VMALLOC_START ~ VMALLOC_END : vmalloc() / ioremap() space. + * VMEMMAP_START ~ VMEMMAP_END : vmemmap space, used for struct page array. + * KERNEL_LINK_ADDR : start address of Kernel link and BPF diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index fb388c6c8c607e647ec2ea72f22c5446ddd1e647..6cfa6e3996cf75ee6bb1e8b399daa4d5701ab92b 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -1050,6 +1050,11 @@ them frequently to increase the rate of SLB faults on kernel addresses. + stress_hpt [PPC] + Limits the number of kernel HPT entries in the hash + page table to increase the rate of hash page table + faults on kernel addresses. + disable= [IPV6] See Documentation/networking/ipv6.rst. @@ -2308,7 +2313,13 @@ Provide an override to the IOAPIC-ID<->DEVICE-ID mapping provided in the IVRS ACPI table. By default, PCI segment is 0, and can be omitted. - For example: + + For example, to map IOAPIC-ID decimal 10 to + PCI segment 0x1 and PCI device 00:14.0, + write the parameter as: + ivrs_ioapic=10@0001:00:14.0 + + Deprecated formats: * To map IOAPIC-ID decimal 10 to PCI device 00:14.0 write the parameter as: ivrs_ioapic[10]=00:14.0 @@ -2320,7 +2331,13 @@ Provide an override to the HPET-ID<->DEVICE-ID mapping provided in the IVRS ACPI table. By default, PCI segment is 0, and can be omitted. - For example: + + For example, to map HPET-ID decimal 10 to + PCI segment 0x1 and PCI device 00:14.0, + write the parameter as: + ivrs_hpet=10@0001:00:14.0 + + Deprecated formats: * To map HPET-ID decimal 0 to PCI device 00:14.0 write the parameter as: ivrs_hpet[0]=00:14.0 @@ -2331,15 +2348,20 @@ ivrs_acpihid [HW,X86-64] Provide an override to the ACPI-HID:UID<->DEVICE-ID mapping provided in the IVRS ACPI table. + By default, PCI segment is 0, and can be omitted. For example, to map UART-HID:UID AMD0020:0 to PCI segment 0x1 and PCI device ID 00:14.5, write the parameter as: - ivrs_acpihid[0001:00:14.5]=AMD0020:0 + ivrs_acpihid=AMD0020:0@0001:00:14.5 - By default, PCI segment is 0, and can be omitted. - For example, PCI device 00:14.5 write the parameter as: + Deprecated formats: + * To map UART-HID:UID AMD0020:0 to PCI segment is 0, + PCI device ID 00:14.5, write the parameter as: ivrs_acpihid[00:14.5]=AMD0020:0 + * To map UART-HID:UID AMD0020:0 to PCI segment 0x1 and + PCI device ID 00:14.5, write the parameter as: + ivrs_acpihid[0001:00:14.5]=AMD0020:0 js= [HW,JOY] Analog joystick See Documentation/input/joydev/joystick.rst. @@ -6266,6 +6288,25 @@ See also Documentation/trace/ftrace.rst "trace options" section. + trace_trigger=[trigger-list] + [FTRACE] Add a event trigger on specific events. + Set a trigger on top of a specific event, with an optional + filter. + + The format is is "trace_trigger=.[ if ],..." + Where more than one trigger may be specified that are comma deliminated. + + For example: + + trace_trigger="sched_switch.stacktrace if prev_state == 2" + + The above will enable the "stacktrace" trigger on the "sched_switch" + event but only trigger it if the "prev_state" of the "sched_switch" + event is "2" (TASK_UNINTERUPTIBLE). + + See also "Event triggers" in Documentation/trace/events.rst + + traceoff_on_warning [FTRACE] enable this option to disable tracing when a warning is hit. This turns off "tracing_on". Tracing can diff --git a/Documentation/admin-guide/mm/damon/usage.rst b/Documentation/admin-guide/mm/damon/usage.rst index b47b0cbbd491d35b13df1a4806509672d15a7130..1a5b6b71efa188524d95dc5f01dcfadd1ff92667 100644 --- a/Documentation/admin-guide/mm/damon/usage.rst +++ b/Documentation/admin-guide/mm/damon/usage.rst @@ -88,6 +88,9 @@ comma (","). :: │ │ │ │ │ │ │ │ weights/sz_permil,nr_accesses_permil,age_permil │ │ │ │ │ │ │ watermarks/metric,interval_us,high,mid,low │ │ │ │ │ │ │ stats/nr_tried,sz_tried,nr_applied,sz_applied,qt_exceeds + │ │ │ │ │ │ │ tried_regions/ + │ │ │ │ │ │ │ │ 0/start,end,nr_accesses,age + │ │ │ │ │ │ │ │ ... │ │ │ │ │ │ ... │ │ │ │ ... │ │ ... @@ -125,7 +128,14 @@ in the state. Writing ``commit`` to the ``state`` file makes kdamond reads the user inputs in the sysfs files except ``state`` file again. Writing ``update_schemes_stats`` to ``state`` file updates the contents of stats files for each DAMON-based operation scheme of the kdamond. For details of the -stats, please refer to :ref:`stats section `. +stats, please refer to :ref:`stats section `. Writing +``update_schemes_tried_regions`` to ``state`` file updates the DAMON-based +operation scheme action tried regions directory for each DAMON-based operation +scheme of the kdamond. Writing ``clear_schemes_tried_regions`` to ``state`` +file clears the DAMON-based operating scheme action tried regions directory for +each DAMON-based operation scheme of the kdamond. For details of the +DAMON-based operation scheme action tried regions directory, please refer to +:ref:tried_regions section `. If the state is ``on``, reading ``pid`` shows the pid of the kdamond thread. @@ -166,6 +176,8 @@ You can set and get what type of monitoring operations DAMON will use for the context by writing one of the keywords listed in ``avail_operations`` file and reading from the ``operations`` file. +.. _sysfs_monitoring_attrs: + contexts//monitoring_attrs/ ------------------------------ @@ -235,6 +247,9 @@ In each region directory, you will find two files (``start`` and ``end``). You can set and get the start and end addresses of the initial monitoring target region by writing to and reading from the files, respectively. +Each region should not overlap with others. ``end`` of directory ``N`` should +be equal or smaller than ``start`` of directory ``N+1``. + contexts//schemes/ --------------------- @@ -252,8 +267,9 @@ to ``N-1``. Each directory represents each DAMON-based operation scheme. schemes// ------------ -In each scheme directory, four directories (``access_pattern``, ``quotas``, -``watermarks``, and ``stats``) and one file (``action``) exist. +In each scheme directory, five directories (``access_pattern``, ``quotas``, +``watermarks``, ``stats``, and ``tried_regions``) and one file (``action``) +exist. The ``action`` file is for setting and getting what action you want to apply to memory regions having specific access pattern of the interest. The keywords @@ -348,6 +364,32 @@ should ask DAMON sysfs interface to updte the content of the files for the stats by writing a special keyword, ``update_schemes_stats`` to the relevant ``kdamonds//state`` file. +.. _sysfs_schemes_tried_regions: + +schemes//tried_regions/ +-------------------------- + +When a special keyword, ``update_schemes_tried_regions``, is written to the +relevant ``kdamonds//state`` file, DAMON creates directories named integer +starting from ``0`` under this directory. Each directory contains files +exposing detailed information about each of the memory region that the +corresponding scheme's ``action`` has tried to be applied under this directory, +during next :ref:`aggregation interval `. The +information includes address range, ``nr_accesses``, , and ``age`` of the +region. + +The directories will be removed when another special keyword, +``clear_schemes_tried_regions``, is written to the relevant +``kdamonds//state`` file. + +tried_regions// +------------------ + +In each region directory, you will find four files (``start``, ``end``, +``nr_accesses``, and ``age``). Reading the files will show the start and end +addresses, ``nr_accesses``, and ``age`` of the region that corresponding +DAMON-based operation scheme ``action`` has tried to be applied. + Example ~~~~~~~ @@ -465,8 +507,9 @@ regions in case of physical memory monitoring. Therefore, users should set the monitoring target regions by themselves. In such cases, users can explicitly set the initial monitoring target regions -as they want, by writing proper values to the ``init_regions`` file. Each line -of the input should represent one region in below form.:: +as they want, by writing proper values to the ``init_regions`` file. The input +should be a sequence of three integers separated by white spaces that represent +one region in below form.:: @@ -481,9 +524,9 @@ ranges, ``20-40`` and ``50-100`` as that of pid 4242, which is the second one # cd /damon # cat target_ids 42 4242 - # echo "0 1 100 - 0 100 200 - 1 20 40 + # echo "0 1 100 \ + 0 100 200 \ + 1 20 40 \ 1 50 100" > init_regions Note that this sets the initial monitoring target regions only. In case of diff --git a/Documentation/admin-guide/sysctl/kernel.rst b/Documentation/admin-guide/sysctl/kernel.rst index 39e7a2d46e1e4c2152eabc6002010e3d68e7acf3..46e3d62c0eea84d1c6c382800d092e561672823b 100644 --- a/Documentation/admin-guide/sysctl/kernel.rst +++ b/Documentation/admin-guide/sysctl/kernel.rst @@ -436,8 +436,8 @@ ignore-unaligned-usertrap On architectures where unaligned accesses cause traps, and where this feature is supported (``CONFIG_SYSCTL_ARCH_UNALIGN_NO_WARN``; -currently, ``arc`` and ``ia64``), controls whether all unaligned traps -are logged. +currently, ``arc``, ``ia64`` and ``loongarch``), controls whether all +unaligned traps are logged. = ============================================================= 0 Log all unaligned accesses. @@ -670,6 +670,15 @@ This is the default behavior. an oops event is detected. +oops_limit +========== + +Number of kernel oopses after which the kernel should panic when +``panic_on_oops`` is not set. Setting this to 0 disables checking +the count. Setting this to 1 has the same effect as setting +``panic_on_oops=1``. The default value is 10000. + + osrelease, ostype & version =========================== @@ -1483,8 +1492,8 @@ unaligned-trap On architectures where unaligned accesses cause traps, and where this feature is supported (``CONFIG_SYSCTL_ARCH_UNALIGN_ALLOW``; currently, -``arc`` and ``parisc``), controls whether unaligned traps are caught -and emulated (instead of failing). +``arc``, ``parisc`` and ``loongarch``), controls whether unaligned traps +are caught and emulated (instead of failing). = ======================================================== 0 Do not emulate unaligned accesses. @@ -1526,6 +1535,16 @@ entry will default to 2 instead of 0. 2 Unprivileged calls to ``bpf()`` are disabled = ============================================================= + +warn_limit +========== + +Number of kernel warnings after which the kernel should panic when +``panic_on_warn`` is not set. Setting this to 0 disables checking +the warning count. Setting this to 1 has the same effect as setting +``panic_on_warn=1``. The default value is 0. + + watchdog ======== diff --git a/Documentation/arm64/silicon-errata.rst b/Documentation/arm64/silicon-errata.rst index ec5f889d76819fd2fae733997d61ff1c33114ace..808ade4cc008ac7c41b0a13e5685fe5afae1dfe3 100644 --- a/Documentation/arm64/silicon-errata.rst +++ b/Documentation/arm64/silicon-errata.rst @@ -120,8 +120,6 @@ stable kernels. +----------------+-----------------+-----------------+-----------------------------+ | ARM | Cortex-A710 | #2224489 | ARM64_ERRATUM_2224489 | +----------------+-----------------+-----------------+-----------------------------+ -| ARM | Cortex-A715 | #2645198 | ARM64_ERRATUM_2645198 | -+----------------+-----------------+-----------------+-----------------------------+ | ARM | Cortex-X2 | #2119858 | ARM64_ERRATUM_2119858 | +----------------+-----------------+-----------------+-----------------------------+ | ARM | Cortex-X2 | #2224489 | ARM64_ERRATUM_2224489 | diff --git a/Documentation/bpf/map_sk_storage.rst b/Documentation/bpf/map_sk_storage.rst index 047e16c8aaa8c2c8da48a14a16aae1c2385f303b..4e9d23ab9ecdfcc77d3a208e88c76ba99b9e9701 100644 --- a/Documentation/bpf/map_sk_storage.rst +++ b/Documentation/bpf/map_sk_storage.rst @@ -34,13 +34,12 @@ bpf_sk_storage_get() void *bpf_sk_storage_get(struct bpf_map *map, void *sk, void *value, u64 flags) -Socket-local storage can be retrieved using the ``bpf_sk_storage_get()`` -helper. The helper gets the storage from ``sk`` that is associated with ``map``. -If the ``BPF_LOCAL_STORAGE_GET_F_CREATE`` flag is used then -``bpf_sk_storage_get()`` will create the storage for ``sk`` if it does not -already exist. ``value`` can be used together with -``BPF_LOCAL_STORAGE_GET_F_CREATE`` to initialize the storage value, otherwise it -will be zero initialized. Returns a pointer to the storage on success, or +Socket-local storage for ``map`` can be retrieved from socket ``sk`` using the +``bpf_sk_storage_get()`` helper. If the ``BPF_LOCAL_STORAGE_GET_F_CREATE`` +flag is used then ``bpf_sk_storage_get()`` will create the storage for ``sk`` +if it does not already exist. ``value`` can be used together with +``BPF_LOCAL_STORAGE_GET_F_CREATE`` to initialize the storage value, otherwise +it will be zero initialized. Returns a pointer to the storage on success, or ``NULL`` in case of failure. .. note:: @@ -54,9 +53,9 @@ bpf_sk_storage_delete() long bpf_sk_storage_delete(struct bpf_map *map, void *sk) -Socket-local storage can be deleted using the ``bpf_sk_storage_delete()`` -helper. The helper deletes the storage from ``sk`` that is identified by -``map``. Returns ``0`` on success, or negative error in case of failure. +Socket-local storage for ``map`` can be deleted from socket ``sk`` using the +``bpf_sk_storage_delete()`` helper. Returns ``0`` on success, or negative +error in case of failure. User space ---------- @@ -68,16 +67,20 @@ bpf_map_update_elem() int bpf_map_update_elem(int map_fd, const void *key, const void *value, __u64 flags) -Socket-local storage for the socket identified by ``key`` belonging to -``map_fd`` can be added or updated using the ``bpf_map_update_elem()`` libbpf -function. ``key`` must be a pointer to a valid ``fd`` in the user space -program. The ``flags`` parameter can be used to control the update behaviour: +Socket-local storage for map ``map_fd`` can be added or updated locally to a +socket using the ``bpf_map_update_elem()`` libbpf function. The socket is +identified by a `socket` ``fd`` stored in the pointer ``key``. The pointer +``value`` has the data to be added or updated to the socket ``fd``. The type +and size of ``value`` should be the same as the value type of the map +definition. -- ``BPF_ANY`` will create storage for ``fd`` or update existing storage. -- ``BPF_NOEXIST`` will create storage for ``fd`` only if it did not already - exist, otherwise the call will fail with ``-EEXIST``. -- ``BPF_EXIST`` will update existing storage for ``fd`` if it already exists, - otherwise the call will fail with ``-ENOENT``. +The ``flags`` parameter can be used to control the update behaviour: + +- ``BPF_ANY`` will create storage for `socket` ``fd`` or update existing storage. +- ``BPF_NOEXIST`` will create storage for `socket` ``fd`` only if it did not + already exist, otherwise the call will fail with ``-EEXIST``. +- ``BPF_EXIST`` will update existing storage for `socket` ``fd`` if it already + exists, otherwise the call will fail with ``-ENOENT``. Returns ``0`` on success, or negative error in case of failure. @@ -88,10 +91,10 @@ bpf_map_lookup_elem() int bpf_map_lookup_elem(int map_fd, const void *key, void *value) -Socket-local storage for the socket identified by ``key`` belonging to -``map_fd`` can be retrieved using the ``bpf_map_lookup_elem()`` libbpf -function. ``key`` must be a pointer to a valid ``fd`` in the user space -program. Returns ``0`` on success, or negative error in case of failure. +Socket-local storage for map ``map_fd`` can be retrieved from a socket using +the ``bpf_map_lookup_elem()`` libbpf function. The storage is retrieved from +the socket identified by a `socket` ``fd`` stored in the pointer +``key``. Returns ``0`` on success, or negative error in case of failure. bpf_map_delete_elem() ~~~~~~~~~~~~~~~~~~~~~ @@ -100,9 +103,10 @@ bpf_map_delete_elem() int bpf_map_delete_elem(int map_fd, const void *key) -Socket-local storage for the socket identified by ``key`` belonging to -``map_fd`` can be deleted using the ``bpf_map_delete_elem()`` libbpf -function. Returns ``0`` on success, or negative error in case of failure. +Socket-local storage for map ``map_fd`` can be deleted from a socket using the +``bpf_map_delete_elem()`` libbpf function. The storage is deleted from the +socket identified by a `socket` ``fd`` stored in the pointer ``key``. Returns +``0`` on success, or negative error in case of failure. Examples ======== diff --git a/Documentation/core-api/kernel-api.rst b/Documentation/core-api/kernel-api.rst index c9412eb855523c8cb1c47976734f1df948bbb952..62f961610773d68560b35217a9cb75879ce7b158 100644 --- a/Documentation/core-api/kernel-api.rst +++ b/Documentation/core-api/kernel-api.rst @@ -36,6 +36,9 @@ String Conversions String Manipulation ------------------- +.. kernel-doc:: include/linux/fortify-string.h + :internal: + .. kernel-doc:: lib/string.c :export: diff --git a/Documentation/crypto/devel-algos.rst b/Documentation/crypto/devel-algos.rst index f225a953ab4b9fb4891833872a1748d4c58d80a2..3506899ef83e394cd24463dbfcd170496c44ab2d 100644 --- a/Documentation/crypto/devel-algos.rst +++ b/Documentation/crypto/devel-algos.rst @@ -172,7 +172,7 @@ Here are schematics of how these functions are called when operated from other part of the kernel. Note that the .setkey() call might happen before or after any of these schematics happen, but must not happen during any of these are in-flight. Please note that calling .init() -followed immediately by .finish() is also a perfectly valid +followed immediately by .final() is also a perfectly valid transformation. :: diff --git a/Documentation/crypto/userspace-if.rst b/Documentation/crypto/userspace-if.rst index b45dabbf69d685d937817498e8a422f933cca86f..f80f243e227e623e5ee89a5615745cdcee7a66f5 100644 --- a/Documentation/crypto/userspace-if.rst +++ b/Documentation/crypto/userspace-if.rst @@ -131,9 +131,9 @@ from the kernel crypto API. If the buffer is too small for the message digest, the flag MSG_TRUNC is set by the kernel. In order to set a message digest key, the calling application must use -the setsockopt() option of ALG_SET_KEY. If the key is not set the HMAC -operation is performed without the initial HMAC state change caused by -the key. +the setsockopt() option of ALG_SET_KEY or ALG_SET_KEY_BY_KEY_SERIAL. If the +key is not set the HMAC operation is performed without the initial HMAC state +change caused by the key. Symmetric Cipher API -------------------- @@ -382,6 +382,15 @@ mentioned optname: - the RNG cipher type to provide the seed +- ALG_SET_KEY_BY_KEY_SERIAL -- Setting the key via keyring key_serial_t. + This operation behaves the same as ALG_SET_KEY. The decrypted + data is copied from a keyring key, and uses that data as the + key for symmetric encryption. + + The passed in key_serial_t must have the KEY_(POS|USR|GRP|OTH)_SEARCH + permission set, otherwise -EPERM is returned. Supports key types: user, + logon, encrypted, and trusted. + - ALG_SET_AEAD_AUTHSIZE -- Setting the authentication tag size for AEAD ciphers. For a encryption operation, the authentication tag of the given size will be generated. For a decryption operation, the diff --git a/Documentation/devicetree/bindings/arm/bcm/bcm2835.yaml b/Documentation/devicetree/bindings/arm/bcm/bcm2835.yaml index 8051a75c2c79acbfdb82b0f87a685ce697809446..162a39dab218237ae7222650e5c279eaa6da0261 100644 --- a/Documentation/devicetree/bindings/arm/bcm/bcm2835.yaml +++ b/Documentation/devicetree/bindings/arm/bcm/bcm2835.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/arm/bcm/bcm2835.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Broadcom BCM2711/BCM2835 Platforms Device Tree Bindings +title: Broadcom BCM2711/BCM2835 Platforms maintainers: - Eric Anholt diff --git a/Documentation/devicetree/bindings/arm/bcm/brcm,bcm11351.yaml b/Documentation/devicetree/bindings/arm/bcm/brcm,bcm11351.yaml index c6032435743574d9341778c87d348a3a3a2d0a00..f2bcac0096b7306ff4b52b997e7d5a2d263fc0c7 100644 --- a/Documentation/devicetree/bindings/arm/bcm/brcm,bcm11351.yaml +++ b/Documentation/devicetree/bindings/arm/bcm/brcm,bcm11351.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/arm/bcm/brcm,bcm11351.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Broadcom BCM11351 device tree bindings +title: Broadcom BCM11351 maintainers: - Florian Fainelli diff --git a/Documentation/devicetree/bindings/arm/bcm/brcm,bcm21664.yaml b/Documentation/devicetree/bindings/arm/bcm/brcm,bcm21664.yaml index b3020757380f610f70169b86d41f34cbe8785397..cf4e254e32f199b864df3555178ce27204e1b545 100644 --- a/Documentation/devicetree/bindings/arm/bcm/brcm,bcm21664.yaml +++ b/Documentation/devicetree/bindings/arm/bcm/brcm,bcm21664.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/arm/bcm/brcm,bcm21664.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Broadcom BCM21664 device tree bindings +title: Broadcom BCM21664 maintainers: - Florian Fainelli diff --git a/Documentation/devicetree/bindings/arm/bcm/brcm,bcm23550.yaml b/Documentation/devicetree/bindings/arm/bcm/brcm,bcm23550.yaml index 37f3a6fcde760a67857bc61e6321b0a4b01b9208..eafec29ba7abd27cecde6e6bc462f09c9fda286d 100644 --- a/Documentation/devicetree/bindings/arm/bcm/brcm,bcm23550.yaml +++ b/Documentation/devicetree/bindings/arm/bcm/brcm,bcm23550.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/arm/bcm/brcm,bcm23550.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Broadcom BCM23550 device tree bindings +title: Broadcom BCM23550 maintainers: - Florian Fainelli diff --git a/Documentation/devicetree/bindings/arm/bcm/brcm,bcm4708.yaml b/Documentation/devicetree/bindings/arm/bcm/brcm,bcm4708.yaml index 52b575c405998823f496259885f6f42691846b24..454b0e93245d824ace4f01568506bf2d2410fabc 100644 --- a/Documentation/devicetree/bindings/arm/bcm/brcm,bcm4708.yaml +++ b/Documentation/devicetree/bindings/arm/bcm/brcm,bcm4708.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/arm/bcm/brcm,bcm4708.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Broadcom BCM4708 device tree bindings +title: Broadcom BCM4708 description: Broadcom BCM4708/47081/4709/47094/53012 Wi-Fi/network SoCs based diff --git a/Documentation/devicetree/bindings/arm/bcm/brcm,bcmbca.yaml b/Documentation/devicetree/bindings/arm/bcm/brcm,bcmbca.yaml index 84866e29cab0e6b50c959b37a5f225f107434a55..07892cbdd23c523498d4eabb7cedd22ffcb78b0d 100644 --- a/Documentation/devicetree/bindings/arm/bcm/brcm,bcmbca.yaml +++ b/Documentation/devicetree/bindings/arm/bcm/brcm,bcmbca.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/arm/bcm/brcm,bcmbca.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Broadcom Broadband SoC device tree bindings +title: Broadcom Broadband SoC description: Broadcom Broadband SoCs include family of high performance DSL/PON/Wireless diff --git a/Documentation/devicetree/bindings/arm/bcm/brcm,cygnus.yaml b/Documentation/devicetree/bindings/arm/bcm/brcm,cygnus.yaml index 432ccf990f9e8e75c5528a7e444dc36b8e02050c..a0a3f32db54ea0737821e93e82d5c0a695aa52b2 100644 --- a/Documentation/devicetree/bindings/arm/bcm/brcm,cygnus.yaml +++ b/Documentation/devicetree/bindings/arm/bcm/brcm,cygnus.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/arm/bcm/brcm,cygnus.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Broadcom Cygnus device tree bindings +title: Broadcom Cygnus maintainers: - Ray Jui diff --git a/Documentation/devicetree/bindings/arm/bcm/brcm,hr2.yaml b/Documentation/devicetree/bindings/arm/bcm/brcm,hr2.yaml index 294948399f82545fa84c817271ee665f763ebc0f..cc6add0e933a04697c130fd958c9ca94542a5778 100644 --- a/Documentation/devicetree/bindings/arm/bcm/brcm,hr2.yaml +++ b/Documentation/devicetree/bindings/arm/bcm/brcm,hr2.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/arm/bcm/brcm,hr2.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Broadcom Hurricane 2 device tree bindings +title: Broadcom Hurricane 2 description: Broadcom Hurricane 2 family of SoCs are used for switching control. These SoCs diff --git a/Documentation/devicetree/bindings/arm/bcm/brcm,ns2.yaml b/Documentation/devicetree/bindings/arm/bcm/brcm,ns2.yaml index c4847abbecd8e792b216e45f08e6893fa250867d..6696598eca0e903c391c4e0537ca91d284c94230 100644 --- a/Documentation/devicetree/bindings/arm/bcm/brcm,ns2.yaml +++ b/Documentation/devicetree/bindings/arm/bcm/brcm,ns2.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/arm/bcm/brcm,ns2.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Broadcom North Star 2 (NS2) device tree bindings +title: Broadcom North Star 2 (NS2) maintainers: - Ray Jui diff --git a/Documentation/devicetree/bindings/arm/bcm/brcm,nsp.yaml b/Documentation/devicetree/bindings/arm/bcm/brcm,nsp.yaml index 7d184ba7d180b01f26b424c74980b9d33206a70a..a43b2d4d936b368dee66a37cd6cbecdad8a1790b 100644 --- a/Documentation/devicetree/bindings/arm/bcm/brcm,nsp.yaml +++ b/Documentation/devicetree/bindings/arm/bcm/brcm,nsp.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/arm/bcm/brcm,nsp.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Broadcom Northstar Plus device tree bindings +title: Broadcom Northstar Plus description: Broadcom Northstar Plus family of SoCs are used for switching control diff --git a/Documentation/devicetree/bindings/arm/bcm/brcm,stingray.yaml b/Documentation/devicetree/bindings/arm/bcm/brcm,stingray.yaml index c638e04ebae0fd1e4bf0243c56a08e11577a4de0..c6ccb78aab0a505e414a1c3e8221bded08f91832 100644 --- a/Documentation/devicetree/bindings/arm/bcm/brcm,stingray.yaml +++ b/Documentation/devicetree/bindings/arm/bcm/brcm,stingray.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/arm/bcm/brcm,stingray.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Broadcom Stingray device tree bindings +title: Broadcom Stingray maintainers: - Ray Jui diff --git a/Documentation/devicetree/bindings/arm/bcm/brcm,vulcan-soc.yaml b/Documentation/devicetree/bindings/arm/bcm/brcm,vulcan-soc.yaml index 4eba182abd5358921123606703ab1ae445448cae..3f441352fbf06484c5b3bdf583d1f7f453b81203 100644 --- a/Documentation/devicetree/bindings/arm/bcm/brcm,vulcan-soc.yaml +++ b/Documentation/devicetree/bindings/arm/bcm/brcm,vulcan-soc.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/arm/bcm/brcm,vulcan-soc.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Broadcom Vulcan device tree bindings +title: Broadcom Vulcan maintainers: - Robert Richter diff --git a/Documentation/devicetree/bindings/arm/cci-control-port.yaml b/Documentation/devicetree/bindings/arm/cci-control-port.yaml index c9114866213f341c2b4343e6e6618565293b8704..c29d250a6d7724c1f4bd063a7792586a6b0fc94d 100644 --- a/Documentation/devicetree/bindings/arm/cci-control-port.yaml +++ b/Documentation/devicetree/bindings/arm/cci-control-port.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/arm/cci-control-port.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: CCI Interconnect Bus Masters binding +title: CCI Interconnect Bus Masters maintainers: - Lorenzo Pieralisi diff --git a/Documentation/devicetree/bindings/arm/cpus.yaml b/Documentation/devicetree/bindings/arm/cpus.yaml index 7dd84f8f8e4fb02d59fc3a36bd64567aefe98f49..01b5a9c689a29fcf436db9ed827ad6396deaf175 100644 --- a/Documentation/devicetree/bindings/arm/cpus.yaml +++ b/Documentation/devicetree/bindings/arm/cpus.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/arm/cpus.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: ARM CPUs bindings +title: ARM CPUs maintainers: - Lorenzo Pieralisi diff --git a/Documentation/devicetree/bindings/arm/firmware/linaro,optee-tz.yaml b/Documentation/devicetree/bindings/arm/firmware/linaro,optee-tz.yaml index 9a426110a14a9c4d0c2d33efcdd07dbd3c950a31..d4dc0749f9fd1f257c05854fcc91a49498fd7102 100644 --- a/Documentation/devicetree/bindings/arm/firmware/linaro,optee-tz.yaml +++ b/Documentation/devicetree/bindings/arm/firmware/linaro,optee-tz.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/arm/firmware/linaro,optee-tz.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: OP-TEE Device Tree Bindings +title: OP-TEE maintainers: - Jens Wiklander diff --git a/Documentation/devicetree/bindings/arm/hisilicon/hisilicon.yaml b/Documentation/devicetree/bindings/arm/hisilicon/hisilicon.yaml index b38458022946f0d3610efcd83d55821e46623f27..540876322040a69865f3b4816df9220ce21461e5 100644 --- a/Documentation/devicetree/bindings/arm/hisilicon/hisilicon.yaml +++ b/Documentation/devicetree/bindings/arm/hisilicon/hisilicon.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/arm/hisilicon/hisilicon.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Hisilicon Platforms Device Tree Bindings +title: Hisilicon Platforms maintainers: - Wei Xu diff --git a/Documentation/devicetree/bindings/arm/keystone/ti,k3-sci-common.yaml b/Documentation/devicetree/bindings/arm/keystone/ti,k3-sci-common.yaml index 5cbcacaeb441e20a831a9c79b59af60055849271..ff378d5cbd32ae38e5db0aa0009830f27ca14469 100644 --- a/Documentation/devicetree/bindings/arm/keystone/ti,k3-sci-common.yaml +++ b/Documentation/devicetree/bindings/arm/keystone/ti,k3-sci-common.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/arm/keystone/ti,k3-sci-common.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Common K3 TI-SCI bindings +title: Common K3 TI-SCI maintainers: - Nishanth Menon diff --git a/Documentation/devicetree/bindings/arm/keystone/ti,sci.yaml b/Documentation/devicetree/bindings/arm/keystone/ti,sci.yaml index 34f5f877d44412c1b61c58ab7a269ff3745f4f28..91b96065f7dfc2a2e8e61f4a1ab8b898fff90afe 100644 --- a/Documentation/devicetree/bindings/arm/keystone/ti,sci.yaml +++ b/Documentation/devicetree/bindings/arm/keystone/ti,sci.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/arm/keystone/ti,sci.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: TI-SCI controller device node bindings +title: TI-SCI controller maintainers: - Nishanth Menon diff --git a/Documentation/devicetree/bindings/arm/marvell/armada-7k-8k.yaml b/Documentation/devicetree/bindings/arm/marvell/armada-7k-8k.yaml index e9bf3054529f1297d383e4b5cd801c9cecb284df..52d78521e4124c73104e5995f836906f18bf87d2 100644 --- a/Documentation/devicetree/bindings/arm/marvell/armada-7k-8k.yaml +++ b/Documentation/devicetree/bindings/arm/marvell/armada-7k-8k.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/arm/marvell/armada-7k-8k.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Marvell Armada 7K/8K Platforms Device Tree Bindings +title: Marvell Armada 7K/8K Platforms maintainers: - Gregory CLEMENT diff --git a/Documentation/devicetree/bindings/arm/mrvl/mrvl.yaml b/Documentation/devicetree/bindings/arm/mrvl/mrvl.yaml index d581161361547d12c0cfc02f5d50b1fd3ff369d7..4c43eaf3632e4ec8e7d9aeac62f7204e2af4405a 100644 --- a/Documentation/devicetree/bindings/arm/mrvl/mrvl.yaml +++ b/Documentation/devicetree/bindings/arm/mrvl/mrvl.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/arm/mrvl/mrvl.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Marvell Platforms Device Tree Bindings +title: Marvell Platforms maintainers: - Lubomir Rintel diff --git a/Documentation/devicetree/bindings/arm/mstar/mstar.yaml b/Documentation/devicetree/bindings/arm/mstar/mstar.yaml index 8892eb6bd3ef7b9206aa919be7faf38334188824..937059fcc7b3bce4151185900099ca52be5a82d1 100644 --- a/Documentation/devicetree/bindings/arm/mstar/mstar.yaml +++ b/Documentation/devicetree/bindings/arm/mstar/mstar.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/arm/mstar/mstar.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: MStar platforms device tree bindings +title: MStar platforms maintainers: - Daniel Palmer diff --git a/Documentation/devicetree/bindings/arm/npcm/npcm.yaml b/Documentation/devicetree/bindings/arm/npcm/npcm.yaml index 43409e5721d59bd0d6da4156b5e0a231120c5c3b..6871483947c51910b0e6d80180d0cec38ecba68b 100644 --- a/Documentation/devicetree/bindings/arm/npcm/npcm.yaml +++ b/Documentation/devicetree/bindings/arm/npcm/npcm.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/arm/npcm/npcm.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: NPCM Platforms Device Tree Bindings +title: NPCM Platforms maintainers: - Jonathan Neuschäfer diff --git a/Documentation/devicetree/bindings/arm/nxp/lpc32xx.yaml b/Documentation/devicetree/bindings/arm/nxp/lpc32xx.yaml index 214c97bc30634d21c3769991afe635df4fe4be58..f1bd6f50e726d8a997522f32b72e8e0c8068ba5f 100644 --- a/Documentation/devicetree/bindings/arm/nxp/lpc32xx.yaml +++ b/Documentation/devicetree/bindings/arm/nxp/lpc32xx.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/arm/nxp/lpc32xx.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: NXP LPC32xx Platforms Device Tree Bindings +title: NXP LPC32xx Platforms maintainers: - Roland Stigge diff --git a/Documentation/devicetree/bindings/arm/socionext/milbeaut.yaml b/Documentation/devicetree/bindings/arm/socionext/milbeaut.yaml index aa1d4afbc5105fb6b4b5dad4ed1fb2a4b0c64ccf..5a428a885760c556494d89e069b3c1baca650852 100644 --- a/Documentation/devicetree/bindings/arm/socionext/milbeaut.yaml +++ b/Documentation/devicetree/bindings/arm/socionext/milbeaut.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/arm/socionext/milbeaut.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Milbeaut platforms device tree bindings +title: Milbeaut platforms maintainers: - Taichi Sugaya diff --git a/Documentation/devicetree/bindings/arm/socionext/uniphier.yaml b/Documentation/devicetree/bindings/arm/socionext/uniphier.yaml index c2cea1c90f3cc30eef7fda8e698d4fcc4e8c1bab..3e7f3d927ec7bb65d7c106ad5f3689cfc1a4b60b 100644 --- a/Documentation/devicetree/bindings/arm/socionext/uniphier.yaml +++ b/Documentation/devicetree/bindings/arm/socionext/uniphier.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/arm/socionext/uniphier.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Socionext UniPhier platform device tree bindings +title: Socionext UniPhier platform maintainers: - Masahiro Yamada diff --git a/Documentation/devicetree/bindings/arm/sp810.yaml b/Documentation/devicetree/bindings/arm/sp810.yaml index bc8e524aa90ad5eab594064a4bcd323cfc7ad638..c9094e5ec56573c172bc0bc48b53a44620bd36cd 100644 --- a/Documentation/devicetree/bindings/arm/sp810.yaml +++ b/Documentation/devicetree/bindings/arm/sp810.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/arm/sp810.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: ARM Versatile Express SP810 System Controller bindings +title: ARM Versatile Express SP810 System Controller maintainers: - Andre Przywara diff --git a/Documentation/devicetree/bindings/arm/sprd/sprd.yaml b/Documentation/devicetree/bindings/arm/sprd/sprd.yaml index 2c12e571394b82454b3b6795ca866d85535f7c53..eaa67b8e0d6c7437e73ba9e5c891be56ef873842 100644 --- a/Documentation/devicetree/bindings/arm/sprd/sprd.yaml +++ b/Documentation/devicetree/bindings/arm/sprd/sprd.yaml @@ -5,7 +5,7 @@ $id: http://devicetree.org/schemas/arm/sprd/sprd.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Unisoc platforms device tree bindings +title: Unisoc platforms maintainers: - Orson Zhai diff --git a/Documentation/devicetree/bindings/arm/stm32/st,mlahb.yaml b/Documentation/devicetree/bindings/arm/stm32/st,mlahb.yaml index ecb28e90fd1152d902f923b359d738a139a56d63..2297ad3f4774c42fad29fbad069f76168efafac6 100644 --- a/Documentation/devicetree/bindings/arm/stm32/st,mlahb.yaml +++ b/Documentation/devicetree/bindings/arm/stm32/st,mlahb.yaml @@ -4,7 +4,7 @@ $id: "http://devicetree.org/schemas/arm/stm32/st,mlahb.yaml#" $schema: "http://devicetree.org/meta-schemas/core.yaml#" -title: STMicroelectronics STM32 ML-AHB interconnect bindings +title: STMicroelectronics STM32 ML-AHB interconnect maintainers: - Fabien Dessenne diff --git a/Documentation/devicetree/bindings/arm/stm32/st,stm32-syscon.yaml b/Documentation/devicetree/bindings/arm/stm32/st,stm32-syscon.yaml index 6f846d69c5e16609d7d37af3601b43ee063e48f1..b2b156cc160aed42d438a16342add10be56617ec 100644 --- a/Documentation/devicetree/bindings/arm/stm32/st,stm32-syscon.yaml +++ b/Documentation/devicetree/bindings/arm/stm32/st,stm32-syscon.yaml @@ -4,7 +4,7 @@ $id: "http://devicetree.org/schemas/arm/stm32/st,stm32-syscon.yaml#" $schema: "http://devicetree.org/meta-schemas/core.yaml#" -title: STMicroelectronics STM32 Platforms System Controller bindings +title: STMicroelectronics STM32 Platforms System Controller maintainers: - Alexandre Torgue diff --git a/Documentation/devicetree/bindings/arm/stm32/stm32.yaml b/Documentation/devicetree/bindings/arm/stm32/stm32.yaml index 44f5c5855af8c9138c4143892880770bd788c9eb..13e34241145b43c07ae31adc5d4edf5a41d01792 100644 --- a/Documentation/devicetree/bindings/arm/stm32/stm32.yaml +++ b/Documentation/devicetree/bindings/arm/stm32/stm32.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/arm/stm32/stm32.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: STMicroelectronics STM32 Platforms Device Tree Bindings +title: STMicroelectronics STM32 Platforms maintainers: - Alexandre Torgue diff --git a/Documentation/devicetree/bindings/arm/sunxi/allwinner,sun6i-a31-cpuconfig.yaml b/Documentation/devicetree/bindings/arm/sunxi/allwinner,sun6i-a31-cpuconfig.yaml index f3878e0b3cc48160d1df49b97816936b9c8322d6..d805c4508b4eae4f07e334a29b17280ffbec35eb 100644 --- a/Documentation/devicetree/bindings/arm/sunxi/allwinner,sun6i-a31-cpuconfig.yaml +++ b/Documentation/devicetree/bindings/arm/sunxi/allwinner,sun6i-a31-cpuconfig.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/arm/sunxi/allwinner,sun6i-a31-cpuconfig.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Allwinner CPU Configuration Controller Device Tree Bindings +title: Allwinner CPU Configuration Controller maintainers: - Chen-Yu Tsai diff --git a/Documentation/devicetree/bindings/arm/sunxi/allwinner,sun9i-a80-prcm.yaml b/Documentation/devicetree/bindings/arm/sunxi/allwinner,sun9i-a80-prcm.yaml index 668aadbfe4c0bf660299f3d47bdc7956c3290283..644f391afb321513e8379cdb4870bf9a00393af4 100644 --- a/Documentation/devicetree/bindings/arm/sunxi/allwinner,sun9i-a80-prcm.yaml +++ b/Documentation/devicetree/bindings/arm/sunxi/allwinner,sun9i-a80-prcm.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/arm/sunxi/allwinner,sun9i-a80-prcm.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Allwinner A80 PRCM Device Tree Bindings +title: Allwinner A80 PRCM maintainers: - Chen-Yu Tsai diff --git a/Documentation/devicetree/bindings/arm/tegra/nvidia,tegra-ccplex-cluster.yaml b/Documentation/devicetree/bindings/arm/tegra/nvidia,tegra-ccplex-cluster.yaml index 869c266e7ebc95365a2571112fe46a70f0e09488..6089a96eae4ff16b27df331ab7d0ea4992aef4ce 100644 --- a/Documentation/devicetree/bindings/arm/tegra/nvidia,tegra-ccplex-cluster.yaml +++ b/Documentation/devicetree/bindings/arm/tegra/nvidia,tegra-ccplex-cluster.yaml @@ -4,7 +4,7 @@ $id: "http://devicetree.org/schemas/arm/tegra/nvidia,tegra-ccplex-cluster.yaml#" $schema: "http://devicetree.org/meta-schemas/core.yaml#" -title: NVIDIA Tegra CPU COMPLEX CLUSTER area device tree bindings +title: NVIDIA Tegra CPU COMPLEX CLUSTER area maintainers: - Sumit Gupta diff --git a/Documentation/devicetree/bindings/arm/tegra/nvidia,tegra194-cbb.yaml b/Documentation/devicetree/bindings/arm/tegra/nvidia,tegra194-cbb.yaml index debb2b0c801378dd9c740eccbe609f96623c98cb..dd3a4770c6a1fd738b39e7cda54463825bebe55e 100644 --- a/Documentation/devicetree/bindings/arm/tegra/nvidia,tegra194-cbb.yaml +++ b/Documentation/devicetree/bindings/arm/tegra/nvidia,tegra194-cbb.yaml @@ -4,7 +4,7 @@ $id: "http://devicetree.org/schemas/arm/tegra/nvidia,tegra194-cbb.yaml#" $schema: "http://devicetree.org/meta-schemas/core.yaml#" -title: NVIDIA Tegra194 CBB 1.0 bindings +title: NVIDIA Tegra194 CBB 1.0 maintainers: - Sumit Gupta diff --git a/Documentation/devicetree/bindings/arm/tegra/nvidia,tegra234-cbb.yaml b/Documentation/devicetree/bindings/arm/tegra/nvidia,tegra234-cbb.yaml index 7b1fe50ffbe0bd997312db47792dfff254607a59..44184ee01449f804c5a571cc3497812a5dd6a75e 100644 --- a/Documentation/devicetree/bindings/arm/tegra/nvidia,tegra234-cbb.yaml +++ b/Documentation/devicetree/bindings/arm/tegra/nvidia,tegra234-cbb.yaml @@ -4,7 +4,7 @@ $id: "http://devicetree.org/schemas/arm/tegra/nvidia,tegra234-cbb.yaml#" $schema: "http://devicetree.org/meta-schemas/core.yaml#" -title: NVIDIA Tegra CBB 2.0 bindings +title: NVIDIA Tegra CBB 2.0 maintainers: - Sumit Gupta diff --git a/Documentation/devicetree/bindings/arm/ti/k3.yaml b/Documentation/devicetree/bindings/arm/ti/k3.yaml index 09e6845ff24393b62414aeb4c428f16e4e425239..203faab80142bbd4bca7edc22164b81bc733cd8c 100644 --- a/Documentation/devicetree/bindings/arm/ti/k3.yaml +++ b/Documentation/devicetree/bindings/arm/ti/k3.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/arm/ti/k3.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Texas Instruments K3 Multicore SoC architecture device tree bindings +title: Texas Instruments K3 Multicore SoC architecture maintainers: - Nishanth Menon diff --git a/Documentation/devicetree/bindings/arm/ti/ti,davinci.yaml b/Documentation/devicetree/bindings/arm/ti/ti,davinci.yaml index c022d325fc08b3db313cd020dccc95f631e9dc5e..1656d1a4476f614d0aca3dc6cc82ce0f43ca5224 100644 --- a/Documentation/devicetree/bindings/arm/ti/ti,davinci.yaml +++ b/Documentation/devicetree/bindings/arm/ti/ti,davinci.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/arm/ti/ti,davinci.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Texas Instruments DaVinci Platforms Device Tree Bindings +title: Texas Instruments DaVinci Platforms maintainers: - Sekhar Nori diff --git a/Documentation/devicetree/bindings/arm/vexpress-config.yaml b/Documentation/devicetree/bindings/arm/vexpress-config.yaml index 09e1adf5ca7a0b78f47e8cf01884c49708d75f91..b74380da3198b2363197a36a147e13256878d7ea 100644 --- a/Documentation/devicetree/bindings/arm/vexpress-config.yaml +++ b/Documentation/devicetree/bindings/arm/vexpress-config.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/arm/vexpress-config.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: ARM Versatile Express configuration bus bindings +title: ARM Versatile Express configuration bus maintainers: - Andre Przywara diff --git a/Documentation/devicetree/bindings/arm/vexpress-sysreg.yaml b/Documentation/devicetree/bindings/arm/vexpress-sysreg.yaml index f04db802a732194d9bd76efa23b794ab48789880..be6e3b5425690d767ef6aa3104f081798697c4c3 100644 --- a/Documentation/devicetree/bindings/arm/vexpress-sysreg.yaml +++ b/Documentation/devicetree/bindings/arm/vexpress-sysreg.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/arm/vexpress-sysreg.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: ARM Versatile Express system registers bindings +title: ARM Versatile Express system registers maintainers: - Andre Przywara diff --git a/Documentation/devicetree/bindings/ata/allwinner,sun4i-a10-ahci.yaml b/Documentation/devicetree/bindings/ata/allwinner,sun4i-a10-ahci.yaml index cb530b46beff57b679d5db015e481aebc2be031b..2011bd03cdcd9f0f159f80b4344102b723c5f178 100644 --- a/Documentation/devicetree/bindings/ata/allwinner,sun4i-a10-ahci.yaml +++ b/Documentation/devicetree/bindings/ata/allwinner,sun4i-a10-ahci.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/ata/allwinner,sun4i-a10-ahci.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Allwinner A10 AHCI SATA Controller bindings +title: Allwinner A10 AHCI SATA Controller maintainers: - Chen-Yu Tsai diff --git a/Documentation/devicetree/bindings/ata/allwinner,sun8i-r40-ahci.yaml b/Documentation/devicetree/bindings/ata/allwinner,sun8i-r40-ahci.yaml index e6b42a113ff17e2f2e6d7caaa73a9a93ff4bc031..a2afe2ad60638bf182610100ff647ffb8f1f44f8 100644 --- a/Documentation/devicetree/bindings/ata/allwinner,sun8i-r40-ahci.yaml +++ b/Documentation/devicetree/bindings/ata/allwinner,sun8i-r40-ahci.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/ata/allwinner,sun8i-r40-ahci.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Allwinner R40 AHCI SATA Controller bindings +title: Allwinner R40 AHCI SATA Controller maintainers: - Chen-Yu Tsai diff --git a/Documentation/devicetree/bindings/bus/fsl,imx8qxp-pixel-link-msi-bus.yaml b/Documentation/devicetree/bindings/bus/fsl,imx8qxp-pixel-link-msi-bus.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b568d0ce438d2117e776c91061bd250647dcd6c3 --- /dev/null +++ b/Documentation/devicetree/bindings/bus/fsl,imx8qxp-pixel-link-msi-bus.yaml @@ -0,0 +1,232 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/bus/fsl,imx8qxp-pixel-link-msi-bus.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Freescale i.MX8qxp Pixel Link Medium Speed Interconnect (MSI) Bus + +maintainers: + - Liu Ying + +description: | + i.MX8qxp pixel link MSI bus is used to control settings of PHYs, I/Os + sitting together with the PHYs. It is not the same as the MSI bus coming + from i.MX8 System Controller Unit (SCU) which is used to control power, + clock and reset through the i.MX8 Distributed Slave System Controller (DSC). + + i.MX8qxp pixel link MSI bus is a simple memory-mapped bus. Two input clocks, + that is, MSI clock and AHB clock, need to be enabled so that peripherals + connected to the bus can be accessed. Also, the bus is part of a power + domain. The power domain needs to be enabled before the peripherals can + be accessed. + + Peripherals in i.MX8qm/qxp imaging, LVDS, MIPI DSI and HDMI TX subsystems, + like I2C controller, PWM controller, MIPI DSI controller and Control and + Status Registers (CSR) module, are accessed through the bus. + + The i.MX System Controller Firmware (SCFW) owns and uses the i.MX8qm/qxp + pixel link MSI bus controller and does not allow SCFW user to control it. + So, the controller's registers cannot be accessed by SCFW user. Hence, + the interrupts generated by the controller don't make any sense from SCFW + user's point of view. + +allOf: + - $ref: simple-pm-bus.yaml# + +# We need a select here so we don't match all nodes with 'simple-pm-bus'. +select: + properties: + compatible: + contains: + enum: + - fsl,imx8qxp-display-pixel-link-msi-bus + - fsl,imx8qm-display-pixel-link-msi-bus + required: + - compatible + +properties: + compatible: + items: + - enum: + - fsl,imx8qxp-display-pixel-link-msi-bus + - fsl,imx8qm-display-pixel-link-msi-bus + - const: simple-pm-bus + + reg: + maxItems: 1 + + interrupts: + maxItems: 1 + + clocks: + items: + - description: master gated clock from system + - description: AHB clock + + clock-names: + items: + - const: msi + - const: ahb + +patternProperties: + "^.*@[0-9a-f]+$": + description: Devices attached to the bus + type: object + properties: + reg: + maxItems: 1 + + required: + - reg + +required: + - compatible + - reg + - clocks + - clock-names + - power-domains + +unevaluatedProperties: false + +examples: + - | + #include + #include + bus@56200000 { + compatible = "fsl,imx8qxp-display-pixel-link-msi-bus", "simple-pm-bus"; + reg = <0x56200000 0x20000>; + #address-cells = <1>; + #size-cells = <1>; + interrupt-parent = <&dc0_irqsteer>; + interrupts = <320>; + ranges; + clocks = <&dc0_disp_ctrl_link_mst0_lpcg IMX_LPCG_CLK_4>, + <&dc0_disp_ctrl_link_mst0_lpcg IMX_LPCG_CLK_4>; + clock-names = "msi", "ahb"; + power-domains = <&pd IMX_SC_R_DC_0>; + + syscon@56221000 { + compatible = "fsl,imx8qxp-mipi-lvds-csr", "syscon", "simple-mfd"; + reg = <0x56221000 0x1000>; + clocks = <&mipi_lvds_0_di_mipi_lvds_regs_lpcg IMX_LPCG_CLK_4>; + clock-names = "ipg"; + + pxl2dpi { + compatible = "fsl,imx8qxp-pxl2dpi"; + fsl,sc-resource = ; + power-domains = <&pd IMX_SC_R_MIPI_0>; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + #address-cells = <1>; + #size-cells = <0>; + reg = <0>; + + mipi_lvds_0_pxl2dpi_dc0_pixel_link0: endpoint@0 { + reg = <0>; + remote-endpoint = <&dc0_pixel_link0_mipi_lvds_0_pxl2dpi>; + }; + + mipi_lvds_0_pxl2dpi_dc0_pixel_link1: endpoint@1 { + reg = <1>; + remote-endpoint = <&dc0_pixel_link1_mipi_lvds_0_pxl2dpi>; + }; + }; + + port@1 { + #address-cells = <1>; + #size-cells = <0>; + reg = <1>; + + mipi_lvds_0_pxl2dpi_mipi_lvds_0_ldb_ch0: endpoint@0 { + reg = <0>; + remote-endpoint = <&mipi_lvds_0_ldb_ch0_mipi_lvds_0_pxl2dpi>; + }; + + mipi_lvds_0_pxl2dpi_mipi_lvds_0_ldb_ch1: endpoint@1 { + reg = <1>; + remote-endpoint = <&mipi_lvds_0_ldb_ch1_mipi_lvds_0_pxl2dpi>; + }; + }; + }; + }; + + ldb { + #address-cells = <1>; + #size-cells = <0>; + compatible = "fsl,imx8qxp-ldb"; + clocks = <&clk IMX_SC_R_LVDS_0 IMX_SC_PM_CLK_MISC2>, + <&clk IMX_SC_R_LVDS_0 IMX_SC_PM_CLK_BYPASS>; + clock-names = "pixel", "bypass"; + power-domains = <&pd IMX_SC_R_LVDS_0>; + + channel@0 { + #address-cells = <1>; + #size-cells = <0>; + reg = <0>; + phys = <&mipi_lvds_0_phy>; + phy-names = "lvds_phy"; + + port@0 { + reg = <0>; + + mipi_lvds_0_ldb_ch0_mipi_lvds_0_pxl2dpi: endpoint { + remote-endpoint = <&mipi_lvds_0_pxl2dpi_mipi_lvds_0_ldb_ch0>; + }; + }; + + port@1 { + reg = <1>; + + /* ... */ + }; + }; + + channel@1 { + #address-cells = <1>; + #size-cells = <0>; + reg = <1>; + phys = <&mipi_lvds_0_phy>; + phy-names = "lvds_phy"; + + port@0 { + reg = <0>; + + mipi_lvds_0_ldb_ch1_mipi_lvds_0_pxl2dpi: endpoint { + remote-endpoint = <&mipi_lvds_0_pxl2dpi_mipi_lvds_0_ldb_ch1>; + }; + }; + + port@1 { + reg = <1>; + + /* ... */ + }; + }; + }; + }; + + clock-controller@56223004 { + compatible = "fsl,imx8qxp-lpcg"; + reg = <0x56223004 0x4>; + #clock-cells = <1>; + clocks = <&mipi_lvds_0_ipg_clk>; + clock-indices = ; + clock-output-names = "mipi_lvds_0_di_mipi_lvds_regs_lpcg_ipg_clk"; + power-domains = <&pd IMX_SC_R_MIPI_0>; + }; + + phy@56228300 { + compatible = "fsl,imx8qxp-mipi-dphy"; + reg = <0x56228300 0x100>; + clocks = <&clk IMX_SC_R_LVDS_0 IMX_SC_PM_CLK_PHY>; + clock-names = "phy_ref"; + #phy-cells = <0>; + fsl,syscon = <&mipi_lvds_0_csr>; + power-domains = <&pd IMX_SC_R_MIPI_0>; + }; + }; diff --git a/Documentation/devicetree/bindings/bus/ti-sysc.yaml b/Documentation/devicetree/bindings/bus/ti-sysc.yaml index fced4082b047ba5b54fdf459dbac4cfe0940b731..f089634f9466a5f2b62485b5042bfb78d93ef51a 100644 --- a/Documentation/devicetree/bindings/bus/ti-sysc.yaml +++ b/Documentation/devicetree/bindings/bus/ti-sysc.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/bus/ti-sysc.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Texas Instruments interconnect target module binding +title: Texas Instruments interconnect target module maintainers: - Tony Lindgren diff --git a/Documentation/devicetree/bindings/clock/adi,axi-clkgen.yaml b/Documentation/devicetree/bindings/clock/adi,axi-clkgen.yaml index 983033fe5b177c2cb536e660634d39b1123a8b5b..5e942bccf27787d7029f76fc1a284232fb7f279d 100644 --- a/Documentation/devicetree/bindings/clock/adi,axi-clkgen.yaml +++ b/Documentation/devicetree/bindings/clock/adi,axi-clkgen.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/clock/adi,axi-clkgen.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Binding for Analog Devices AXI clkgen pcore clock generator +title: Analog Devices AXI clkgen pcore clock generator maintainers: - Lars-Peter Clausen diff --git a/Documentation/devicetree/bindings/clock/calxeda.yaml b/Documentation/devicetree/bindings/clock/calxeda.yaml index a34cbf3c9aaf583bbea2e84d447d207ef7ab1acd..a88fbe20fef16d5c8accbea965ec31ebc5f5e5c1 100644 --- a/Documentation/devicetree/bindings/clock/calxeda.yaml +++ b/Documentation/devicetree/bindings/clock/calxeda.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/clock/calxeda.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Device Tree Clock bindings for Calxeda highbank platform +title: Calxeda highbank platform Clock Controller description: | This binding covers the Calxeda SoC internal peripheral and bus clocks diff --git a/Documentation/devicetree/bindings/clock/cirrus,cs2000-cp.yaml b/Documentation/devicetree/bindings/clock/cirrus,cs2000-cp.yaml index 82836086cac1492e808a9043d8d2aa40bb7c1780..d416c374e8534f26026a55db871826a094227170 100644 --- a/Documentation/devicetree/bindings/clock/cirrus,cs2000-cp.yaml +++ b/Documentation/devicetree/bindings/clock/cirrus,cs2000-cp.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/clock/cirrus,cs2000-cp.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Binding CIRRUS LOGIC Fractional-N Clock Synthesizer & Clock Multiplier +title: CIRRUS LOGIC Fractional-N Clock Synthesizer & Clock Multiplier maintainers: - Kuninori Morimoto diff --git a/Documentation/devicetree/bindings/clock/fixed-clock.yaml b/Documentation/devicetree/bindings/clock/fixed-clock.yaml index b657ecd0ef1c39b5a740046addd0687ed39c8a23..b0a4fb8256e2ffd90deae3e949dd2553b17e3cbf 100644 --- a/Documentation/devicetree/bindings/clock/fixed-clock.yaml +++ b/Documentation/devicetree/bindings/clock/fixed-clock.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/clock/fixed-clock.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Binding for simple fixed-rate clock sources +title: Simple fixed-rate clock sources maintainers: - Michael Turquette diff --git a/Documentation/devicetree/bindings/clock/fixed-factor-clock.yaml b/Documentation/devicetree/bindings/clock/fixed-factor-clock.yaml index 0b02378a3a0c8882c79a58786d716f568ea0b586..8f71ab3004706a4ea60d18795a68509d8709c13d 100644 --- a/Documentation/devicetree/bindings/clock/fixed-factor-clock.yaml +++ b/Documentation/devicetree/bindings/clock/fixed-factor-clock.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/clock/fixed-factor-clock.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Binding for simple fixed factor rate clock sources +title: Simple fixed factor rate clock sources maintainers: - Michael Turquette diff --git a/Documentation/devicetree/bindings/clock/fixed-mmio-clock.yaml b/Documentation/devicetree/bindings/clock/fixed-mmio-clock.yaml index 1453ac849a6593ea257fa1268ccab172c7abc446..e22fc272d0231454c6201cfd2b9bbea730712beb 100644 --- a/Documentation/devicetree/bindings/clock/fixed-mmio-clock.yaml +++ b/Documentation/devicetree/bindings/clock/fixed-mmio-clock.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/clock/fixed-mmio-clock.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Binding for simple memory mapped IO fixed-rate clock sources +title: Simple memory mapped IO fixed-rate clock sources description: This binding describes a fixed-rate clock for which the frequency can diff --git a/Documentation/devicetree/bindings/clock/fsl,plldig.yaml b/Documentation/devicetree/bindings/clock/fsl,plldig.yaml index 9ac716dfa602f0b3155db43d43c646e47fa85c7b..88dd9c18db922a677a6c2f8a506bfaf8c4849806 100644 --- a/Documentation/devicetree/bindings/clock/fsl,plldig.yaml +++ b/Documentation/devicetree/bindings/clock/fsl,plldig.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/clock/fsl,plldig.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: NXP QorIQ Layerscape LS1028A Display PIXEL Clock Binding +title: NXP QorIQ Layerscape LS1028A Display PIXEL Clock maintainers: - Wen He diff --git a/Documentation/devicetree/bindings/clock/fsl,sai-clock.yaml b/Documentation/devicetree/bindings/clock/fsl,sai-clock.yaml index fc3bdfdc091abeb99521f24073d0feb9a51201d5..3bca9d11c148fa05862612a560bf4fd4dfadf1f3 100644 --- a/Documentation/devicetree/bindings/clock/fsl,sai-clock.yaml +++ b/Documentation/devicetree/bindings/clock/fsl,sai-clock.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/clock/fsl,sai-clock.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Freescale SAI bitclock-as-a-clock binding +title: Freescale SAI bitclock-as-a-clock maintainers: - Michael Walle diff --git a/Documentation/devicetree/bindings/clock/fsl,scu-clk.yaml b/Documentation/devicetree/bindings/clock/fsl,scu-clk.yaml index f2c48460a39978cbde3c7e3eccf7b411c26f176c..36d4cfc3c2f8da237d6be702f197c8b97a4f0ea9 100644 --- a/Documentation/devicetree/bindings/clock/fsl,scu-clk.yaml +++ b/Documentation/devicetree/bindings/clock/fsl,scu-clk.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/clock/fsl,scu-clk.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: i.MX SCU Client Device Node - Clock bindings based on SCU Message Protocol +title: i.MX SCU Client Device Node - Clock Controller Based on SCU Message Protocol maintainers: - Abel Vesa diff --git a/Documentation/devicetree/bindings/clock/idt,versaclock5.yaml b/Documentation/devicetree/bindings/clock/idt,versaclock5.yaml index f9ba9864d8b5282bab591e0c5b62b62cf6360804..61b246cf5e72aa4719db682ef47c44adfa13b6bd 100644 --- a/Documentation/devicetree/bindings/clock/idt,versaclock5.yaml +++ b/Documentation/devicetree/bindings/clock/idt,versaclock5.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/clock/idt,versaclock5.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Binding for IDT VersaClock 5 and 6 programmable I2C clock generators +title: IDT VersaClock 5 and 6 programmable I2C clock generators description: | The IDT VersaClock 5 and VersaClock 6 are programmable I2C diff --git a/Documentation/devicetree/bindings/clock/imx1-clock.yaml b/Documentation/devicetree/bindings/clock/imx1-clock.yaml index 56f524780b1a656f3720a2924e0040e2a95830a1..7ade4c32aff3776dd9959e719da96e8dc9ac73d7 100644 --- a/Documentation/devicetree/bindings/clock/imx1-clock.yaml +++ b/Documentation/devicetree/bindings/clock/imx1-clock.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/clock/imx1-clock.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Clock bindings for Freescale i.MX1 CPUs +title: Freescale i.MX1 CPUs Clock Controller maintainers: - Alexander Shiyan diff --git a/Documentation/devicetree/bindings/clock/imx21-clock.yaml b/Documentation/devicetree/bindings/clock/imx21-clock.yaml index e2d50544700a2079878c8a380f2eb580d9874739..79cc843703ec1b0036b4f38bbdf529c6b7b5a724 100644 --- a/Documentation/devicetree/bindings/clock/imx21-clock.yaml +++ b/Documentation/devicetree/bindings/clock/imx21-clock.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/clock/imx21-clock.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Clock bindings for Freescale i.MX21 +title: Freescale i.MX21 Clock Controller maintainers: - Alexander Shiyan diff --git a/Documentation/devicetree/bindings/clock/imx23-clock.yaml b/Documentation/devicetree/bindings/clock/imx23-clock.yaml index 7e890ab9c77d220dd60daa6347431dda92270c2e..5e71c9219500a0df776db54329d5682fc203fa9a 100644 --- a/Documentation/devicetree/bindings/clock/imx23-clock.yaml +++ b/Documentation/devicetree/bindings/clock/imx23-clock.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/clock/imx23-clock.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Clock bindings for Freescale i.MX23 +title: Freescale i.MX23 Clock Controller maintainers: - Shawn Guo diff --git a/Documentation/devicetree/bindings/clock/imx25-clock.yaml b/Documentation/devicetree/bindings/clock/imx25-clock.yaml index 1792e138984b9f79e03350e68541f6756b444250..c626a158590e21e9cd886453bd97567d378315d2 100644 --- a/Documentation/devicetree/bindings/clock/imx25-clock.yaml +++ b/Documentation/devicetree/bindings/clock/imx25-clock.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/clock/imx25-clock.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Clock bindings for Freescale i.MX25 +title: Freescale i.MX25 Clock Controller maintainers: - Sascha Hauer diff --git a/Documentation/devicetree/bindings/clock/imx27-clock.yaml b/Documentation/devicetree/bindings/clock/imx27-clock.yaml index 99925aa22a4c8e6d97194ad36529e344ea91aac6..71d78a0b551fc95446d7a739b1e6a9fd0191486e 100644 --- a/Documentation/devicetree/bindings/clock/imx27-clock.yaml +++ b/Documentation/devicetree/bindings/clock/imx27-clock.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/clock/imx27-clock.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Clock bindings for Freescale i.MX27 +title: Freescale i.MX27 Clock Controller maintainers: - Fabio Estevam diff --git a/Documentation/devicetree/bindings/clock/imx28-clock.yaml b/Documentation/devicetree/bindings/clock/imx28-clock.yaml index a542d680b1ca8e0cf140ec08ecc4e9360fd3cd8c..4aaad7b9c66e99adb1f83e78aca910a8b2f64041 100644 --- a/Documentation/devicetree/bindings/clock/imx28-clock.yaml +++ b/Documentation/devicetree/bindings/clock/imx28-clock.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/clock/imx28-clock.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Clock bindings for Freescale i.MX28 +title: Freescale i.MX28 Clock Controller maintainers: - Shawn Guo diff --git a/Documentation/devicetree/bindings/clock/imx31-clock.yaml b/Documentation/devicetree/bindings/clock/imx31-clock.yaml index 168c8ada5e81c7266e9bbc666eabec4b7d6ba83f..50a8498eef8a29d268a8f7f4bd43fde0785468d6 100644 --- a/Documentation/devicetree/bindings/clock/imx31-clock.yaml +++ b/Documentation/devicetree/bindings/clock/imx31-clock.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/clock/imx31-clock.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Clock bindings for Freescale i.MX31 +title: Freescale i.MX31 Clock Controller maintainers: - Fabio Estevam diff --git a/Documentation/devicetree/bindings/clock/imx35-clock.yaml b/Documentation/devicetree/bindings/clock/imx35-clock.yaml index 6415bb6a8d041b7c1831f531311c1c7256aaaad6..c063369de3ec778d6f9493571f038908b512e639 100644 --- a/Documentation/devicetree/bindings/clock/imx35-clock.yaml +++ b/Documentation/devicetree/bindings/clock/imx35-clock.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/clock/imx35-clock.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Clock bindings for Freescale i.MX35 +title: Freescale i.MX35 Clock Controller maintainers: - Steffen Trumtrar diff --git a/Documentation/devicetree/bindings/clock/imx5-clock.yaml b/Documentation/devicetree/bindings/clock/imx5-clock.yaml index c0e19ff92c7673a8e35906c53edd29a6ad1e1a10..423c0142c1d33e481da192314f255e88b8bc30ef 100644 --- a/Documentation/devicetree/bindings/clock/imx5-clock.yaml +++ b/Documentation/devicetree/bindings/clock/imx5-clock.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/clock/imx5-clock.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Clock bindings for Freescale i.MX5 +title: Freescale i.MX5 Clock Controller maintainers: - Fabio Estevam diff --git a/Documentation/devicetree/bindings/clock/imx6q-clock.yaml b/Documentation/devicetree/bindings/clock/imx6q-clock.yaml index 4f4637eddb8b7445b7b09b02349724104658dcc8..bae4fcb3aacc67f6e58597a54c0dbfbd540c05f8 100644 --- a/Documentation/devicetree/bindings/clock/imx6q-clock.yaml +++ b/Documentation/devicetree/bindings/clock/imx6q-clock.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/clock/imx6q-clock.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Clock bindings for Freescale i.MX6 Quad +title: Freescale i.MX6 Quad Clock Controller maintainers: - Anson Huang diff --git a/Documentation/devicetree/bindings/clock/imx6sl-clock.yaml b/Documentation/devicetree/bindings/clock/imx6sl-clock.yaml index b83c8f43d664c52f09d988ff0c66bd103e0dd419..c85ff6ea3d245c0256c6e930c386dd6ef58f3a3a 100644 --- a/Documentation/devicetree/bindings/clock/imx6sl-clock.yaml +++ b/Documentation/devicetree/bindings/clock/imx6sl-clock.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/clock/imx6sl-clock.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Clock bindings for Freescale i.MX6 SoloLite +title: Freescale i.MX6 SoloLite Clock Controller maintainers: - Anson Huang diff --git a/Documentation/devicetree/bindings/clock/imx6sll-clock.yaml b/Documentation/devicetree/bindings/clock/imx6sll-clock.yaml index 484894a4b23f4feb4bae4e6d77b08eebb6fa5743..6b549ed1493c3caab9157f541ba0f95fd12612a8 100644 --- a/Documentation/devicetree/bindings/clock/imx6sll-clock.yaml +++ b/Documentation/devicetree/bindings/clock/imx6sll-clock.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/clock/imx6sll-clock.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Clock bindings for Freescale i.MX6 SLL +title: Freescale i.MX6 SLL Clock Controller maintainers: - Anson Huang diff --git a/Documentation/devicetree/bindings/clock/imx6sx-clock.yaml b/Documentation/devicetree/bindings/clock/imx6sx-clock.yaml index e6c795657c24d2ddaa6ef93e09528bb7e22fe6b2..55dcad18b7c6d25d52aede343b3c0d8c6b773673 100644 --- a/Documentation/devicetree/bindings/clock/imx6sx-clock.yaml +++ b/Documentation/devicetree/bindings/clock/imx6sx-clock.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/clock/imx6sx-clock.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Clock bindings for Freescale i.MX6 SoloX +title: Freescale i.MX6 SoloX Clock Controller maintainers: - Anson Huang diff --git a/Documentation/devicetree/bindings/clock/imx6ul-clock.yaml b/Documentation/devicetree/bindings/clock/imx6ul-clock.yaml index 6a51a3f51cd98a2ad60cbc4f32c95bd3c49e8557..be54d4df5afa285503c4596cf6e3a998aeb81bab 100644 --- a/Documentation/devicetree/bindings/clock/imx6ul-clock.yaml +++ b/Documentation/devicetree/bindings/clock/imx6ul-clock.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/clock/imx6ul-clock.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Clock bindings for Freescale i.MX6 UltraLite +title: Freescale i.MX6 UltraLite Clock Controller maintainers: - Anson Huang diff --git a/Documentation/devicetree/bindings/clock/imx7d-clock.yaml b/Documentation/devicetree/bindings/clock/imx7d-clock.yaml index cefb61db01a82f201023e3724415514eaedfe538..e7d8427e49579c3dc7b62680a36c60cc59285ad0 100644 --- a/Documentation/devicetree/bindings/clock/imx7d-clock.yaml +++ b/Documentation/devicetree/bindings/clock/imx7d-clock.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/clock/imx7d-clock.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Clock bindings for Freescale i.MX7 Dual +title: Freescale i.MX7 Dual Clock Controller maintainers: - Frank Li diff --git a/Documentation/devicetree/bindings/clock/imx7ulp-pcc-clock.yaml b/Documentation/devicetree/bindings/clock/imx7ulp-pcc-clock.yaml index 739c3378f8c8fffc85d42e4c85a6d03838beaf3e..76842038f52eac77dabf45780ee1a0e7ed66cab2 100644 --- a/Documentation/devicetree/bindings/clock/imx7ulp-pcc-clock.yaml +++ b/Documentation/devicetree/bindings/clock/imx7ulp-pcc-clock.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/clock/imx7ulp-pcc-clock.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Clock bindings for Freescale i.MX7ULP Peripheral Clock Control (PCC) modules +title: Freescale i.MX7ULP Peripheral Clock Control (PCC) modules Clock Controller maintainers: - A.s. Dong diff --git a/Documentation/devicetree/bindings/clock/imx7ulp-scg-clock.yaml b/Documentation/devicetree/bindings/clock/imx7ulp-scg-clock.yaml index d06344d7e34fe4adc59fafc767a94d6d05aba8a8..5e25bc6d1372e68e89db9c12904d125fb32bdc68 100644 --- a/Documentation/devicetree/bindings/clock/imx7ulp-scg-clock.yaml +++ b/Documentation/devicetree/bindings/clock/imx7ulp-scg-clock.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/clock/imx7ulp-scg-clock.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Clock bindings for Freescale i.MX7ULP System Clock Generation (SCG) modules +title: Freescale i.MX7ULP System Clock Generation (SCG) modules Clock Controller maintainers: - A.s. Dong diff --git a/Documentation/devicetree/bindings/clock/imx8m-clock.yaml b/Documentation/devicetree/bindings/clock/imx8m-clock.yaml index 458c7645ee6838fc879f68f41332bd38ff0569b4..e4c4cadec501ada6f90df6b26f23dccb1f884fe8 100644 --- a/Documentation/devicetree/bindings/clock/imx8m-clock.yaml +++ b/Documentation/devicetree/bindings/clock/imx8m-clock.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/clock/imx8m-clock.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: NXP i.MX8M Family Clock Control Module Binding +title: NXP i.MX8M Family Clock Control Module maintainers: - Anson Huang diff --git a/Documentation/devicetree/bindings/clock/imx8qxp-lpcg.yaml b/Documentation/devicetree/bindings/clock/imx8qxp-lpcg.yaml index cb80105b3c7081e90d45a14d9854f2c51448449b..b207f95361b205474a9bb9400410b70eda014a72 100644 --- a/Documentation/devicetree/bindings/clock/imx8qxp-lpcg.yaml +++ b/Documentation/devicetree/bindings/clock/imx8qxp-lpcg.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/clock/imx8qxp-lpcg.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: NXP i.MX8QXP LPCG (Low-Power Clock Gating) Clock bindings +title: NXP i.MX8QXP LPCG (Low-Power Clock Gating) Clock maintainers: - Aisheng Dong diff --git a/Documentation/devicetree/bindings/clock/imx8ulp-cgc-clock.yaml b/Documentation/devicetree/bindings/clock/imx8ulp-cgc-clock.yaml index 71f7186b135ba2fe840980a9b6468f219c5c6343..68a60cdc19af39670f20cefbc8fd3bb05a643bb3 100644 --- a/Documentation/devicetree/bindings/clock/imx8ulp-cgc-clock.yaml +++ b/Documentation/devicetree/bindings/clock/imx8ulp-cgc-clock.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/clock/imx8ulp-cgc-clock.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: NXP i.MX8ULP Clock Generation & Control(CGC) Module Binding +title: NXP i.MX8ULP Clock Generation & Control(CGC) Module maintainers: - Jacky Bai diff --git a/Documentation/devicetree/bindings/clock/imx8ulp-pcc-clock.yaml b/Documentation/devicetree/bindings/clock/imx8ulp-pcc-clock.yaml index 00612725bf8b196e69438f58e05ddfcd3b3caef7..d0b0792fe7bab0f5cfae931810fbeb8775fd870e 100644 --- a/Documentation/devicetree/bindings/clock/imx8ulp-pcc-clock.yaml +++ b/Documentation/devicetree/bindings/clock/imx8ulp-pcc-clock.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/clock/imx8ulp-pcc-clock.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: NXP i.MX8ULP Peripheral Clock Controller(PCC) Module Binding +title: NXP i.MX8ULP Peripheral Clock Controller(PCC) Module maintainers: - Jacky Bai diff --git a/Documentation/devicetree/bindings/clock/imx93-clock.yaml b/Documentation/devicetree/bindings/clock/imx93-clock.yaml index 21a06194e4a3a480855f097cd6016bf56641416f..ccb53c6b96c11965350a8f489d6be715cd3e5497 100644 --- a/Documentation/devicetree/bindings/clock/imx93-clock.yaml +++ b/Documentation/devicetree/bindings/clock/imx93-clock.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/clock/imx93-clock.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: NXP i.MX93 Clock Control Module Binding +title: NXP i.MX93 Clock Control Module maintainers: - Peng Fan diff --git a/Documentation/devicetree/bindings/clock/imxrt1050-clock.yaml b/Documentation/devicetree/bindings/clock/imxrt1050-clock.yaml index 03fc5c1a293999dd0aaffdceae65341baa72bb0e..777af4aad4b26426ec6295754b111403d0746509 100644 --- a/Documentation/devicetree/bindings/clock/imxrt1050-clock.yaml +++ b/Documentation/devicetree/bindings/clock/imxrt1050-clock.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/clock/imxrt1050-clock.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Clock bindings for Freescale i.MXRT +title: Freescale i.MXRT Clock Controller maintainers: - Giulio Benetti diff --git a/Documentation/devicetree/bindings/clock/ingenic,cgu.yaml b/Documentation/devicetree/bindings/clock/ingenic,cgu.yaml index df256ebcd36636f059a532a1d9caf70d2357ee5f..9e733b10c392f4998311bebc59a7c8b405e73e2b 100644 --- a/Documentation/devicetree/bindings/clock/ingenic,cgu.yaml +++ b/Documentation/devicetree/bindings/clock/ingenic,cgu.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/clock/ingenic,cgu.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Ingenic SoCs CGU devicetree bindings +title: Ingenic SoCs CGU description: | The CGU in an Ingenic SoC provides all the clocks generated on-chip. It diff --git a/Documentation/devicetree/bindings/clock/intel,agilex.yaml b/Documentation/devicetree/bindings/clock/intel,agilex.yaml index cf5a9eb803e6261965cfae7d816b936aac6fd8be..3745ba8dbd7630f3977e98a9c256add1166d1bfb 100644 --- a/Documentation/devicetree/bindings/clock/intel,agilex.yaml +++ b/Documentation/devicetree/bindings/clock/intel,agilex.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/clock/intel,agilex.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Intel SoCFPGA Agilex platform clock controller binding +title: Intel SoCFPGA Agilex platform clock controller maintainers: - Dinh Nguyen diff --git a/Documentation/devicetree/bindings/clock/intel,cgu-lgm.yaml b/Documentation/devicetree/bindings/clock/intel,cgu-lgm.yaml index f3e1a700a2ca548899d117d2b51f539220ce75b7..76609a390429a436b8fbea026407899e24d0d6ea 100644 --- a/Documentation/devicetree/bindings/clock/intel,cgu-lgm.yaml +++ b/Documentation/devicetree/bindings/clock/intel,cgu-lgm.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/clock/intel,cgu-lgm.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Intel Lightning Mountain SoC's Clock Controller(CGU) Binding +title: Intel Lightning Mountain SoC's Clock Controller(CGU) maintainers: - Rahul Tanwar diff --git a/Documentation/devicetree/bindings/clock/intel,easic-n5x.yaml b/Documentation/devicetree/bindings/clock/intel,easic-n5x.yaml index 8f45976e946ee3451eca7e76c17421c24359c96a..e000116a51a44419f51b556bab09f814e78c4ea7 100644 --- a/Documentation/devicetree/bindings/clock/intel,easic-n5x.yaml +++ b/Documentation/devicetree/bindings/clock/intel,easic-n5x.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/clock/intel,easic-n5x.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Intel SoCFPGA eASIC N5X platform clock controller binding +title: Intel SoCFPGA eASIC N5X platform clock controller maintainers: - Dinh Nguyen diff --git a/Documentation/devicetree/bindings/clock/intel,stratix10.yaml b/Documentation/devicetree/bindings/clock/intel,stratix10.yaml index f506e3db9782f21cab5c56bd601806971ec5e659..b4a8be2134001b40f7e247850b28227ab2cba03b 100644 --- a/Documentation/devicetree/bindings/clock/intel,stratix10.yaml +++ b/Documentation/devicetree/bindings/clock/intel,stratix10.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/clock/intel,stratix10.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Intel SoCFPGA Stratix10 platform clock controller binding +title: Intel SoCFPGA Stratix10 platform clock controller maintainers: - Dinh Nguyen diff --git a/Documentation/devicetree/bindings/clock/microchip,mpfs-clkcfg.yaml b/Documentation/devicetree/bindings/clock/microchip,mpfs-clkcfg.yaml index b2ce78722247cd4b6a8fef78ac2abd6dc1bc8233..e4e1c31267d2a18c30239715a98ff13e112fc9a7 100644 --- a/Documentation/devicetree/bindings/clock/microchip,mpfs-clkcfg.yaml +++ b/Documentation/devicetree/bindings/clock/microchip,mpfs-clkcfg.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/clock/microchip,mpfs-clkcfg.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Microchip PolarFire Clock Control Module Binding +title: Microchip PolarFire Clock Control Module maintainers: - Daire McNamara diff --git a/Documentation/devicetree/bindings/clock/milbeaut-clock.yaml b/Documentation/devicetree/bindings/clock/milbeaut-clock.yaml index 6d39344d2b705a76eaab1753f9f345feb3130a28..0af1c569eb32888adb36292a756e618d1e2e619d 100644 --- a/Documentation/devicetree/bindings/clock/milbeaut-clock.yaml +++ b/Documentation/devicetree/bindings/clock/milbeaut-clock.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/clock/milbeaut-clock.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Milbeaut SoCs Clock Controller Binding +title: Milbeaut SoCs Clock Controller maintainers: - Taichi Sugaya diff --git a/Documentation/devicetree/bindings/clock/nuvoton,npcm845-clk.yaml b/Documentation/devicetree/bindings/clock/nuvoton,npcm845-clk.yaml index 771db2ddf02692a4211696e72142b782b3d072a5..b901ca13cd25daee03a839fed5e6e091562436b3 100644 --- a/Documentation/devicetree/bindings/clock/nuvoton,npcm845-clk.yaml +++ b/Documentation/devicetree/bindings/clock/nuvoton,npcm845-clk.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/clock/nuvoton,npcm845-clk.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Nuvoton NPCM8XX Clock Controller Binding +title: Nuvoton NPCM8XX Clock Controller maintainers: - Tomer Maimon diff --git a/Documentation/devicetree/bindings/clock/qcom,dispcc-sc8280xp.yaml b/Documentation/devicetree/bindings/clock/qcom,dispcc-sc8280xp.yaml index 28c13237059fa1aa74a5f8937c65e276c1257c19..3cb996b2c9d5958246e9e1ceda3b9ced8af7560b 100644 --- a/Documentation/devicetree/bindings/clock/qcom,dispcc-sc8280xp.yaml +++ b/Documentation/devicetree/bindings/clock/qcom,dispcc-sc8280xp.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/clock/qcom,dispcc-sc8280xp.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Qualcomm Display Clock & Reset Controller Binding for SC8280XP +title: Qualcomm Display Clock & Reset Controller on SC8280XP maintainers: - Bjorn Andersson diff --git a/Documentation/devicetree/bindings/clock/qcom,gcc.yaml b/Documentation/devicetree/bindings/clock/qcom,gcc.yaml index 1ab416c83c8d8ef10ab4839073e770baaa73801f..7129fbcf2b6c553c3ea8942f1ee72b935baf061b 100644 --- a/Documentation/devicetree/bindings/clock/qcom,gcc.yaml +++ b/Documentation/devicetree/bindings/clock/qcom,gcc.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/clock/qcom,gcc.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Qualcomm Global Clock & Reset Controller Common Bindings +title: Qualcomm Global Clock & Reset Controller Common Properties maintainers: - Stephen Boyd diff --git a/Documentation/devicetree/bindings/clock/qcom,rpmhcc.yaml b/Documentation/devicetree/bindings/clock/qcom,rpmhcc.yaml index fccb91e78e499a67ebeb796f1113208b046ae307..cf25ba0419e214fc3fbe4d8e18ce645acc3231e7 100644 --- a/Documentation/devicetree/bindings/clock/qcom,rpmhcc.yaml +++ b/Documentation/devicetree/bindings/clock/qcom,rpmhcc.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/clock/qcom,rpmhcc.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Qualcomm Technologies, Inc. RPMh Clocks Bindings +title: Qualcomm Technologies, Inc. RPMh Clocks maintainers: - Taniya Das diff --git a/Documentation/devicetree/bindings/clock/renesas,9series.yaml b/Documentation/devicetree/bindings/clock/renesas,9series.yaml index 102eb95cb3fcd24351c7ff790b3f455d0c5b0f63..6b6cec3fba528f943c069722488ca40e7e3bbd68 100644 --- a/Documentation/devicetree/bindings/clock/renesas,9series.yaml +++ b/Documentation/devicetree/bindings/clock/renesas,9series.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/clock/renesas,9series.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Binding for Renesas 9-series I2C PCIe clock generators +title: Renesas 9-series I2C PCIe clock generators description: | The Renesas 9-series are I2C PCIe clock generators providing diff --git a/Documentation/devicetree/bindings/clock/renesas,versaclock7.yaml b/Documentation/devicetree/bindings/clock/renesas,versaclock7.yaml index 8d4eb4475fc8b27ee0059e5fd41b3fedd7c2771b..b339f1f9f072cee4353b2733131584fdf6059d1a 100644 --- a/Documentation/devicetree/bindings/clock/renesas,versaclock7.yaml +++ b/Documentation/devicetree/bindings/clock/renesas,versaclock7.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/clock/renesas,versaclock7.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Renesas Versaclock7 Programmable Clock Device Tree Bindings +title: Renesas Versaclock7 Programmable Clock maintainers: - Alex Helms diff --git a/Documentation/devicetree/bindings/clock/rockchip,rk3568-cru.yaml b/Documentation/devicetree/bindings/clock/rockchip,rk3568-cru.yaml index fc7546f521c55f2189690b19b187c276c90ac3cc..f809c289445e6d5540707fdf42c4b2fdbdea1720 100644 --- a/Documentation/devicetree/bindings/clock/rockchip,rk3568-cru.yaml +++ b/Documentation/devicetree/bindings/clock/rockchip,rk3568-cru.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/clock/rockchip,rk3568-cru.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: ROCKCHIP rk3568 Family Clock Control Module Binding +title: ROCKCHIP rk3568 Family Clock Control Module maintainers: - Elaine Zhang diff --git a/Documentation/devicetree/bindings/clock/st,stm32mp1-rcc.yaml b/Documentation/devicetree/bindings/clock/st,stm32mp1-rcc.yaml index 242fe922b035ee202af13c8aa1e9f5d9395b9225..5194be0b410e42be7b7e35fab6cca257d5e16f9f 100644 --- a/Documentation/devicetree/bindings/clock/st,stm32mp1-rcc.yaml +++ b/Documentation/devicetree/bindings/clock/st,stm32mp1-rcc.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/clock/st,stm32mp1-rcc.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Reset Clock Controller Binding +title: STMicroelectronics STM32MP1 Reset Clock Controller maintainers: - Gabriel Fernandez diff --git a/Documentation/devicetree/bindings/clock/ti,lmk04832.yaml b/Documentation/devicetree/bindings/clock/ti,lmk04832.yaml index bd8173848253fdb926b06614b3abb0e75325f228..73d17830f165e5a17f0234894b189da2f824b141 100644 --- a/Documentation/devicetree/bindings/clock/ti,lmk04832.yaml +++ b/Documentation/devicetree/bindings/clock/ti,lmk04832.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/clock/ti,lmk04832.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Clock bindings for the Texas Instruments LMK04832 +title: Texas Instruments LMK04832 Clock Controller maintainers: - Liam Beguin diff --git a/Documentation/devicetree/bindings/clock/ti,sci-clk.yaml b/Documentation/devicetree/bindings/clock/ti,sci-clk.yaml index 0e370289a0530b23a93e582e1c077035151039f8..63d9763416965e373fce93cacdb31b5412c3e41f 100644 --- a/Documentation/devicetree/bindings/clock/ti,sci-clk.yaml +++ b/Documentation/devicetree/bindings/clock/ti,sci-clk.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/clock/ti,sci-clk.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: TI-SCI clock controller node bindings +title: TI-SCI clock controller maintainers: - Nishanth Menon diff --git a/Documentation/devicetree/bindings/clock/ti/ti,clksel.yaml b/Documentation/devicetree/bindings/clock/ti/ti,clksel.yaml index c56f911fff4754673d7c234cf43c637d10344b6f..d525f96cf244b613627c492f441e9abf0f3ca554 100644 --- a/Documentation/devicetree/bindings/clock/ti/ti,clksel.yaml +++ b/Documentation/devicetree/bindings/clock/ti/ti,clksel.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/clock/ti/ti,clksel.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Binding for TI clksel clock +title: TI clksel clock maintainers: - Tony Lindgren diff --git a/Documentation/devicetree/bindings/cpu/idle-states.yaml b/Documentation/devicetree/bindings/cpu/idle-states.yaml index fa4d4142ac93737bf1e9bb12206d533f8d0a74c1..b8cc826c95013dcecd5eb63e1f1636e2ddd314c2 100644 --- a/Documentation/devicetree/bindings/cpu/idle-states.yaml +++ b/Documentation/devicetree/bindings/cpu/idle-states.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/cpu/idle-states.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Idle states binding description +title: Idle states maintainers: - Lorenzo Pieralisi diff --git a/Documentation/devicetree/bindings/cpufreq/cpufreq-mediatek-hw.yaml b/Documentation/devicetree/bindings/cpufreq/cpufreq-mediatek-hw.yaml index 9cd42a64b13efece241da4d8d38eb1b283de02d7..d0aecde2b89b1896c01ea8ae24f26032d8075a11 100644 --- a/Documentation/devicetree/bindings/cpufreq/cpufreq-mediatek-hw.yaml +++ b/Documentation/devicetree/bindings/cpufreq/cpufreq-mediatek-hw.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/cpufreq/cpufreq-mediatek-hw.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: MediaTek's CPUFREQ Bindings +title: MediaTek's CPUFREQ maintainers: - Hector Yuan diff --git a/Documentation/devicetree/bindings/cpufreq/qcom-cpufreq-nvmem.yaml b/Documentation/devicetree/bindings/cpufreq/qcom-cpufreq-nvmem.yaml index 3c00ad09eeaab6fc5c79f160cfe01910e97de34a..9c086eac6ca71ebcf0e7128e2a51bc2ff943a355 100644 --- a/Documentation/devicetree/bindings/cpufreq/qcom-cpufreq-nvmem.yaml +++ b/Documentation/devicetree/bindings/cpufreq/qcom-cpufreq-nvmem.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/cpufreq/qcom-cpufreq-nvmem.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Qualcomm Technologies, Inc. NVMEM CPUFreq bindings +title: Qualcomm Technologies, Inc. NVMEM CPUFreq maintainers: - Ilia Lin diff --git a/Documentation/devicetree/bindings/crypto/rockchip,rk3288-crypto.yaml b/Documentation/devicetree/bindings/crypto/rockchip,rk3288-crypto.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f1a9da8bff7a7506c97b26fb902e47efb8f487cf --- /dev/null +++ b/Documentation/devicetree/bindings/crypto/rockchip,rk3288-crypto.yaml @@ -0,0 +1,127 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/crypto/rockchip,rk3288-crypto.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Rockchip Electronics Security Accelerator + +maintainers: + - Heiko Stuebner + +properties: + compatible: + enum: + - rockchip,rk3288-crypto + - rockchip,rk3328-crypto + - rockchip,rk3399-crypto + + reg: + maxItems: 1 + + interrupts: + maxItems: 1 + + clocks: + minItems: 3 + maxItems: 4 + + clock-names: + minItems: 3 + maxItems: 4 + + resets: + minItems: 1 + maxItems: 3 + + reset-names: + minItems: 1 + maxItems: 3 + +allOf: + - if: + properties: + compatible: + contains: + const: rockchip,rk3288-crypto + then: + properties: + clocks: + minItems: 4 + clock-names: + items: + - const: aclk + - const: hclk + - const: sclk + - const: apb_pclk + resets: + maxItems: 1 + reset-names: + items: + - const: crypto-rst + - if: + properties: + compatible: + contains: + const: rockchip,rk3328-crypto + then: + properties: + clocks: + maxItems: 3 + clock-names: + items: + - const: hclk_master + - const: hclk_slave + - const: sclk + resets: + maxItems: 1 + reset-names: + items: + - const: crypto-rst + - if: + properties: + compatible: + contains: + const: rockchip,rk3399-crypto + then: + properties: + clocks: + maxItems: 3 + clock-names: + items: + - const: hclk_master + - const: hclk_slave + - const: sclk + resets: + minItems: 3 + reset-names: + items: + - const: master + - const: slave + - const: crypto-rst + +required: + - compatible + - reg + - interrupts + - clocks + - clock-names + - resets + - reset-names + +additionalProperties: false + +examples: + - | + #include + #include + crypto@ff8a0000 { + compatible = "rockchip,rk3288-crypto"; + reg = <0xff8a0000 0x4000>; + interrupts = ; + clocks = <&cru ACLK_CRYPTO>, <&cru HCLK_CRYPTO>, + <&cru SCLK_CRYPTO>, <&cru ACLK_DMAC1>; + clock-names = "aclk", "hclk", "sclk", "apb_pclk"; + resets = <&cru SRST_CRYPTO>; + reset-names = "crypto-rst"; + }; diff --git a/Documentation/devicetree/bindings/crypto/rockchip-crypto.txt b/Documentation/devicetree/bindings/crypto/rockchip-crypto.txt deleted file mode 100644 index 5e2ba385b8c9b9a6118af61c36c2a4e0b8463287..0000000000000000000000000000000000000000 --- a/Documentation/devicetree/bindings/crypto/rockchip-crypto.txt +++ /dev/null @@ -1,28 +0,0 @@ -Rockchip Electronics And Security Accelerator - -Required properties: -- compatible: Should be "rockchip,rk3288-crypto" -- reg: Base physical address of the engine and length of memory mapped - region -- interrupts: Interrupt number -- clocks: Reference to the clocks about crypto -- clock-names: "aclk" used to clock data - "hclk" used to clock data - "sclk" used to clock crypto accelerator - "apb_pclk" used to clock dma -- resets: Must contain an entry for each entry in reset-names. - See ../reset/reset.txt for details. -- reset-names: Must include the name "crypto-rst". - -Examples: - - crypto: cypto-controller@ff8a0000 { - compatible = "rockchip,rk3288-crypto"; - reg = <0xff8a0000 0x4000>; - interrupts = ; - clocks = <&cru ACLK_CRYPTO>, <&cru HCLK_CRYPTO>, - <&cru SCLK_CRYPTO>, <&cru ACLK_DMAC1>; - clock-names = "aclk", "hclk", "sclk", "apb_pclk"; - resets = <&cru SRST_CRYPTO>; - reset-names = "crypto-rst"; - }; diff --git a/Documentation/devicetree/bindings/crypto/st,stm32-crc.yaml b/Documentation/devicetree/bindings/crypto/st,stm32-crc.yaml index b72e4858f9aabe2b3b82c1e436552efb168f9bee..50b2c2e0c3cd839bf633dfef190af0fdabc22a91 100644 --- a/Documentation/devicetree/bindings/crypto/st,stm32-crc.yaml +++ b/Documentation/devicetree/bindings/crypto/st,stm32-crc.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/crypto/st,stm32-crc.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: STMicroelectronics STM32 CRC bindings +title: STMicroelectronics STM32 CRC maintainers: - Lionel Debieve diff --git a/Documentation/devicetree/bindings/crypto/st,stm32-cryp.yaml b/Documentation/devicetree/bindings/crypto/st,stm32-cryp.yaml index ed23bf94a8e00a5e11ca763ee35a779e29698928..0ddeb8a9a7a018bda6769f03e05f6373a348e8ae 100644 --- a/Documentation/devicetree/bindings/crypto/st,stm32-cryp.yaml +++ b/Documentation/devicetree/bindings/crypto/st,stm32-cryp.yaml @@ -4,7 +4,11 @@ $id: http://devicetree.org/schemas/crypto/st,stm32-cryp.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: STMicroelectronics STM32 CRYP bindings +title: STMicroelectronics STM32 CRYP + +description: The STM32 CRYP block is built on the CRYP block found in + the STn8820 SoC introduced in 2007, and subsequently used in the U8500 + SoC in 2010. maintainers: - Lionel Debieve @@ -12,6 +16,8 @@ maintainers: properties: compatible: enum: + - st,stn8820-cryp + - stericsson,ux500-cryp - st,stm32f756-cryp - st,stm32mp1-cryp @@ -27,6 +33,19 @@ properties: resets: maxItems: 1 + dmas: + items: + - description: mem2cryp DMA channel + - description: cryp2mem DMA channel + + dma-names: + items: + - const: mem2cryp + - const: cryp2mem + + power-domains: + maxItems: 1 + required: - compatible - reg diff --git a/Documentation/devicetree/bindings/crypto/st,stm32-hash.yaml b/Documentation/devicetree/bindings/crypto/st,stm32-hash.yaml index 10ba94792d95123c3e5abcf2689f2c8129f630ce..4ccb335e8063c03aad062af159a60609a2413233 100644 --- a/Documentation/devicetree/bindings/crypto/st,stm32-hash.yaml +++ b/Documentation/devicetree/bindings/crypto/st,stm32-hash.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/crypto/st,stm32-hash.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: STMicroelectronics STM32 HASH bindings +title: STMicroelectronics STM32 HASH maintainers: - Lionel Debieve diff --git a/Documentation/devicetree/bindings/display/arm,hdlcd.yaml b/Documentation/devicetree/bindings/display/arm,hdlcd.yaml index a2670258c48d3001b9a27fd20ebf4dc69d1bb51a..9a30e9005e8a22b238a1c50e7f44f2a8366d2a9d 100644 --- a/Documentation/devicetree/bindings/display/arm,hdlcd.yaml +++ b/Documentation/devicetree/bindings/display/arm,hdlcd.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/display/arm,hdlcd.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Arm HDLCD display controller binding +title: Arm HDLCD display controller maintainers: - Liviu Dudau diff --git a/Documentation/devicetree/bindings/display/arm,malidp.yaml b/Documentation/devicetree/bindings/display/arm,malidp.yaml index 2a17ec6fc97c02a8e2a9fae82cbef3b9d6928f31..91812573fd0800fa401cb58657a67226c58398a0 100644 --- a/Documentation/devicetree/bindings/display/arm,malidp.yaml +++ b/Documentation/devicetree/bindings/display/arm,malidp.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/display/arm,malidp.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Arm Mali Display Processor (Mali-DP) binding +title: Arm Mali Display Processor (Mali-DP) maintainers: - Liviu Dudau diff --git a/Documentation/devicetree/bindings/display/bridge/anx6345.yaml b/Documentation/devicetree/bindings/display/bridge/anx6345.yaml index 1c0406c38fe549ac5fbc1d98950363262496dec3..9bf2cbcea69fd87b21a1a14fd29d1bf30fc8780c 100644 --- a/Documentation/devicetree/bindings/display/bridge/anx6345.yaml +++ b/Documentation/devicetree/bindings/display/bridge/anx6345.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/display/bridge/anx6345.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Analogix ANX6345 eDP Transmitter Device Tree Bindings +title: Analogix ANX6345 eDP Transmitter maintainers: - Torsten Duwe diff --git a/Documentation/devicetree/bindings/display/bridge/chrontel,ch7033.yaml b/Documentation/devicetree/bindings/display/bridge/chrontel,ch7033.yaml index bb6289c7d375e79928edcc1f8d649f9849754919..b0589fa16736aeaf6e871361868ec3f943ce4e06 100644 --- a/Documentation/devicetree/bindings/display/bridge/chrontel,ch7033.yaml +++ b/Documentation/devicetree/bindings/display/bridge/chrontel,ch7033.yaml @@ -5,7 +5,7 @@ $id: http://devicetree.org/schemas/display/bridge/chrontel,ch7033.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Chrontel CH7033 Video Encoder Device Tree Bindings +title: Chrontel CH7033 Video Encoder maintainers: - Lubomir Rintel diff --git a/Documentation/devicetree/bindings/display/bridge/ingenic,jz4780-hdmi.yaml b/Documentation/devicetree/bindings/display/bridge/ingenic,jz4780-hdmi.yaml index 89490fdffeb078bd1298f4ff756be06dd8851c81..0b27df429bdce0b7e080ab291ee0c4c6b51f8e1f 100644 --- a/Documentation/devicetree/bindings/display/bridge/ingenic,jz4780-hdmi.yaml +++ b/Documentation/devicetree/bindings/display/bridge/ingenic,jz4780-hdmi.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/display/bridge/ingenic,jz4780-hdmi.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Bindings for Ingenic JZ4780 HDMI Transmitter +title: Ingenic JZ4780 HDMI Transmitter maintainers: - H. Nikolaus Schaller diff --git a/Documentation/devicetree/bindings/display/bridge/intel,keembay-dsi.yaml b/Documentation/devicetree/bindings/display/bridge/intel,keembay-dsi.yaml index dcb1336ee2a56e398ce1df321294693db3dc2a3f..958a073f4ff779a1752e6eda101c9abd43b9a1a6 100644 --- a/Documentation/devicetree/bindings/display/bridge/intel,keembay-dsi.yaml +++ b/Documentation/devicetree/bindings/display/bridge/intel,keembay-dsi.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/display/bridge/intel,keembay-dsi.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Devicetree bindings for Intel Keem Bay mipi dsi controller +title: Intel Keem Bay mipi dsi controller maintainers: - Anitha Chrisanthus diff --git a/Documentation/devicetree/bindings/display/bridge/ite,it6505.yaml b/Documentation/devicetree/bindings/display/bridge/ite,it6505.yaml index 833d11b2303a7d11a4f831522c16feb6991e7070..b697c42399ea3a1713ddea1a01868eb286276252 100644 --- a/Documentation/devicetree/bindings/display/bridge/ite,it6505.yaml +++ b/Documentation/devicetree/bindings/display/bridge/ite,it6505.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/display/bridge/ite,it6505.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: ITE it6505 Device Tree Bindings +title: ITE it6505 maintainers: - Allen Chen diff --git a/Documentation/devicetree/bindings/display/bridge/ite,it66121.yaml b/Documentation/devicetree/bindings/display/bridge/ite,it66121.yaml index 1b2185be92cdd32ce67f447f021f55862006a552..d3454da1247a0fc75fde3edb6368bdfd3e2db4d9 100644 --- a/Documentation/devicetree/bindings/display/bridge/ite,it66121.yaml +++ b/Documentation/devicetree/bindings/display/bridge/ite,it66121.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/display/bridge/ite,it66121.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: ITE it66121 HDMI bridge Device Tree Bindings +title: ITE it66121 HDMI bridge maintainers: - Phong LE diff --git a/Documentation/devicetree/bindings/display/bridge/ps8640.yaml b/Documentation/devicetree/bindings/display/bridge/ps8640.yaml index 8ab156e0a8cfd76cf66730e9a478f325d3790f35..28811aff2c5ad5bbf9acc48e06d49f50374d36f3 100644 --- a/Documentation/devicetree/bindings/display/bridge/ps8640.yaml +++ b/Documentation/devicetree/bindings/display/bridge/ps8640.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/display/bridge/ps8640.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: MIPI DSI to eDP Video Format Converter Device Tree Bindings +title: MIPI DSI to eDP Video Format Converter maintainers: - Nicolas Boichat diff --git a/Documentation/devicetree/bindings/display/bridge/toshiba,tc358767.yaml b/Documentation/devicetree/bindings/display/bridge/toshiba,tc358767.yaml index ed280053ec62bd91c76f7b85c763c918b267fd69..140927884418f24d50ad2f373325a044711a41d9 100644 --- a/Documentation/devicetree/bindings/display/bridge/toshiba,tc358767.yaml +++ b/Documentation/devicetree/bindings/display/bridge/toshiba,tc358767.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/display/bridge/toshiba,tc358767.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Toshiba TC358767 eDP bridge bindings +title: Toshiba TC358767 eDP bridge maintainers: - Andrey Gusakov diff --git a/Documentation/devicetree/bindings/display/bridge/toshiba,tc358775.yaml b/Documentation/devicetree/bindings/display/bridge/toshiba,tc358775.yaml index 10471c6c1ff91b2681cec910da55ab1bdf2bdf9a..d879c700594a6e08cdc0fd720440ae5d45d21425 100644 --- a/Documentation/devicetree/bindings/display/bridge/toshiba,tc358775.yaml +++ b/Documentation/devicetree/bindings/display/bridge/toshiba,tc358775.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/display/bridge/toshiba,tc358775.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Toshiba TC358775 DSI to LVDS bridge bindings +title: Toshiba TC358775 DSI to LVDS bridge maintainers: - Vinay Simha BN diff --git a/Documentation/devicetree/bindings/display/fsl,lcdif.yaml b/Documentation/devicetree/bindings/display/fsl,lcdif.yaml index f449cfc76789961b18ed71857c917b1affdeda10..75b4efd70ba85e4e0593eb6f616595810833ede3 100644 --- a/Documentation/devicetree/bindings/display/fsl,lcdif.yaml +++ b/Documentation/devicetree/bindings/display/fsl,lcdif.yaml @@ -99,7 +99,6 @@ allOf: maxItems: 3 required: - clock-names - - power-domains - if: not: properties: @@ -114,6 +113,19 @@ allOf: maxItems: 1 clock-names: maxItems: 1 + - if: + properties: + compatible: + contains: + enum: + - fsl,imx6sl-lcdif + - fsl,imx6sx-lcdif + - fsl,imx8mm-lcdif + - fsl,imx8mn-lcdif + - fsl,imx8mp-lcdif + then: + required: + - power-domains examples: - | @@ -128,6 +140,7 @@ examples: <&clks IMX6SX_CLK_LCDIF_APB>, <&clks IMX6SX_CLK_DISPLAY_AXI>; clock-names = "pix", "axi", "disp_axi"; + power-domains = <&pd_disp>; port { endpoint { diff --git a/Documentation/devicetree/bindings/display/ingenic,ipu.yaml b/Documentation/devicetree/bindings/display/ingenic,ipu.yaml index 3f93def2c5a20365697fd7d93fe56ceb4c140b50..319bd7c88fe3c472a4154432f146c563d256c85b 100644 --- a/Documentation/devicetree/bindings/display/ingenic,ipu.yaml +++ b/Documentation/devicetree/bindings/display/ingenic,ipu.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/display/ingenic,ipu.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Ingenic SoCs Image Processing Unit (IPU) devicetree bindings +title: Ingenic SoCs Image Processing Unit (IPU) maintainers: - Paul Cercueil diff --git a/Documentation/devicetree/bindings/display/ingenic,lcd.yaml b/Documentation/devicetree/bindings/display/ingenic,lcd.yaml index c0bb02fb49f4b23057d01c4af699772f1daff7c5..6d4c00f3fcc886b167756a5824c654cf1a17e210 100644 --- a/Documentation/devicetree/bindings/display/ingenic,lcd.yaml +++ b/Documentation/devicetree/bindings/display/ingenic,lcd.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/display/ingenic,lcd.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Ingenic SoCs LCD controller devicetree bindings +title: Ingenic SoCs LCD controller maintainers: - Paul Cercueil diff --git a/Documentation/devicetree/bindings/display/intel,keembay-display.yaml b/Documentation/devicetree/bindings/display/intel,keembay-display.yaml index bc6622b010cabf282b8c58ca140a3bd4945ea8aa..2cf54ecc707aaa0a2f16cff67cfb0cf19236ef06 100644 --- a/Documentation/devicetree/bindings/display/intel,keembay-display.yaml +++ b/Documentation/devicetree/bindings/display/intel,keembay-display.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/display/intel,keembay-display.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Devicetree bindings for Intel Keem Bay display controller +title: Intel Keem Bay display controller maintainers: - Anitha Chrisanthus diff --git a/Documentation/devicetree/bindings/display/intel,keembay-msscam.yaml b/Documentation/devicetree/bindings/display/intel,keembay-msscam.yaml index a222b52d8b8ff6b00edd707ef07e492f04954663..cc7e1f318fe43c869eea67472d6579a4c0cd44aa 100644 --- a/Documentation/devicetree/bindings/display/intel,keembay-msscam.yaml +++ b/Documentation/devicetree/bindings/display/intel,keembay-msscam.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/display/intel,keembay-msscam.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Devicetree bindings for Intel Keem Bay MSSCAM +title: Intel Keem Bay MSSCAM maintainers: - Anitha Chrisanthus diff --git a/Documentation/devicetree/bindings/display/mediatek/mediatek,cec.yaml b/Documentation/devicetree/bindings/display/mediatek/mediatek,cec.yaml index 66288b9f0aa6e25103c4ca4a7acd44cc722090d4..080cf321209ea6777237f298f3c487abee9f12c4 100644 --- a/Documentation/devicetree/bindings/display/mediatek/mediatek,cec.yaml +++ b/Documentation/devicetree/bindings/display/mediatek/mediatek,cec.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/display/mediatek/mediatek,cec.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Mediatek HDMI CEC Controller Device Tree Bindings +title: Mediatek HDMI CEC Controller maintainers: - CK Hu diff --git a/Documentation/devicetree/bindings/display/mediatek/mediatek,dsi.yaml b/Documentation/devicetree/bindings/display/mediatek/mediatek,dsi.yaml index b18d6a57c6e1b09acbc527d602673a8613c3dcfc..4707b60238b07b2d910da1c3e43cfbc8aa9e0df9 100644 --- a/Documentation/devicetree/bindings/display/mediatek/mediatek,dsi.yaml +++ b/Documentation/devicetree/bindings/display/mediatek/mediatek,dsi.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/display/mediatek/mediatek,dsi.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: MediaTek DSI Controller Device Tree Bindings +title: MediaTek DSI Controller maintainers: - Chun-Kuang Hu diff --git a/Documentation/devicetree/bindings/display/mediatek/mediatek,hdmi-ddc.yaml b/Documentation/devicetree/bindings/display/mediatek/mediatek,hdmi-ddc.yaml index b6fcdfb99ab2c03010ce1f14b5b911606e26bb86..bd8f7b8ae0ff253a36a573677dde3bd0d3294273 100644 --- a/Documentation/devicetree/bindings/display/mediatek/mediatek,hdmi-ddc.yaml +++ b/Documentation/devicetree/bindings/display/mediatek/mediatek,hdmi-ddc.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/display/mediatek/mediatek,hdmi-ddc.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Mediatek HDMI DDC Device Tree Bindings +title: Mediatek HDMI DDC maintainers: - CK Hu diff --git a/Documentation/devicetree/bindings/display/mediatek/mediatek,hdmi.yaml b/Documentation/devicetree/bindings/display/mediatek/mediatek,hdmi.yaml index bdaf0b51e68cdc06887696dd99a4d237037f101b..8afdd67d678096d0bf7fd9583beb061c99541fa4 100644 --- a/Documentation/devicetree/bindings/display/mediatek/mediatek,hdmi.yaml +++ b/Documentation/devicetree/bindings/display/mediatek/mediatek,hdmi.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/display/mediatek/mediatek,hdmi.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Mediatek HDMI Encoder Device Tree Bindings +title: Mediatek HDMI Encoder maintainers: - CK Hu diff --git a/Documentation/devicetree/bindings/display/msm/gmu.yaml b/Documentation/devicetree/bindings/display/msm/gmu.yaml index 67fdeeabae0cc398e1a9e66898df8f6b550290a6..ab14e81cb05069a0f6aff9db03602533f06e2d02 100644 --- a/Documentation/devicetree/bindings/display/msm/gmu.yaml +++ b/Documentation/devicetree/bindings/display/msm/gmu.yaml @@ -6,7 +6,7 @@ $id: "http://devicetree.org/schemas/display/msm/gmu.yaml#" $schema: "http://devicetree.org/meta-schemas/core.yaml#" -title: Devicetree bindings for the GMU attached to certain Adreno GPUs +title: GMU attached to certain Adreno GPUs maintainers: - Rob Clark diff --git a/Documentation/devicetree/bindings/display/msm/gpu.yaml b/Documentation/devicetree/bindings/display/msm/gpu.yaml index ec4b1a75f46ac4843db94d965830ec8614b8d230..c5f49842dc7b566b886d293386768350a8001911 100644 --- a/Documentation/devicetree/bindings/display/msm/gpu.yaml +++ b/Documentation/devicetree/bindings/display/msm/gpu.yaml @@ -5,7 +5,7 @@ $id: "http://devicetree.org/schemas/display/msm/gpu.yaml#" $schema: "http://devicetree.org/meta-schemas/core.yaml#" -title: Devicetree bindings for the Adreno or Snapdragon GPUs +title: Adreno or Snapdragon GPUs maintainers: - Rob Clark diff --git a/Documentation/devicetree/bindings/display/panel/display-timings.yaml b/Documentation/devicetree/bindings/display/panel/display-timings.yaml index 6d30575819d3b25c43c19f043882ba1ad2ce1f31..dc5f7e36e30bc82b339430fcacdc8cccae3e756b 100644 --- a/Documentation/devicetree/bindings/display/panel/display-timings.yaml +++ b/Documentation/devicetree/bindings/display/panel/display-timings.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/display/panel/display-timings.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: display timings bindings +title: display timings maintainers: - Thierry Reding diff --git a/Documentation/devicetree/bindings/display/panel/ilitek,ili9163.yaml b/Documentation/devicetree/bindings/display/panel/ilitek,ili9163.yaml index a4154b51043e1b8b3c66f88a2c1ea8bbcdf21ff8..90e323e19edb7f051983b10484c6addd647a1dc8 100644 --- a/Documentation/devicetree/bindings/display/panel/ilitek,ili9163.yaml +++ b/Documentation/devicetree/bindings/display/panel/ilitek,ili9163.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/display/panel/ilitek,ili9163.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Ilitek ILI9163 display panels device tree bindings +title: Ilitek ILI9163 display panels maintainers: - Daniel Mack diff --git a/Documentation/devicetree/bindings/display/panel/olimex,lcd-olinuxino.yaml b/Documentation/devicetree/bindings/display/panel/olimex,lcd-olinuxino.yaml index 2329d9610f83210e256e20a78d43bca87f60798e..9f97598efdfab5f0a44ef12e5a0d9a30b4470559 100644 --- a/Documentation/devicetree/bindings/display/panel/olimex,lcd-olinuxino.yaml +++ b/Documentation/devicetree/bindings/display/panel/olimex,lcd-olinuxino.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/display/panel/olimex,lcd-olinuxino.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Binding for Olimex Ltd. LCD-OLinuXino bridge panel. +title: Olimex Ltd. LCD-OLinuXino bridge panel. maintainers: - Stefan Mavrodiev diff --git a/Documentation/devicetree/bindings/display/panel/panel-lvds.yaml b/Documentation/devicetree/bindings/display/panel/panel-lvds.yaml index fcc50db6a812c8e431cea5a54125bf08aaac2371..c77ee034310a33ee4460d5a55423efc93f786263 100644 --- a/Documentation/devicetree/bindings/display/panel/panel-lvds.yaml +++ b/Documentation/devicetree/bindings/display/panel/panel-lvds.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/display/panel/panel-lvds.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Generic LVDS Display Panel Device Tree Bindings +title: Generic LVDS Display Panel maintainers: - Lad Prabhakar diff --git a/Documentation/devicetree/bindings/display/panel/panel-timing.yaml b/Documentation/devicetree/bindings/display/panel/panel-timing.yaml index 229e3b36ee29aa4bca1ad8990e3743e1a9b74521..0d317e61edd8f486e557d2d9bd55fcbad670771e 100644 --- a/Documentation/devicetree/bindings/display/panel/panel-timing.yaml +++ b/Documentation/devicetree/bindings/display/panel/panel-timing.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/display/panel/panel-timing.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: panel timing bindings +title: panel timing maintainers: - Thierry Reding diff --git a/Documentation/devicetree/bindings/display/panel/visionox,rm69299.yaml b/Documentation/devicetree/bindings/display/panel/visionox,rm69299.yaml index 076b057b4af54a33ec57d264071a69ebb18c21ed..481ef051df1e294756a56cb5fcc6cbedae1a6c7f 100644 --- a/Documentation/devicetree/bindings/display/panel/visionox,rm69299.yaml +++ b/Documentation/devicetree/bindings/display/panel/visionox,rm69299.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/display/panel/visionox,rm69299.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Visionox model RM69299 Panels Device Tree Bindings. +title: Visionox model RM69299 Panels maintainers: - Harigovindan P diff --git a/Documentation/devicetree/bindings/dma/dma-common.yaml b/Documentation/devicetree/bindings/dma/dma-common.yaml index ad06d36af208e9c32175d889f6e4787692294e6c..ea700f8ee6c6aa8ce3a59dba470a863057af378a 100644 --- a/Documentation/devicetree/bindings/dma/dma-common.yaml +++ b/Documentation/devicetree/bindings/dma/dma-common.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/dma/dma-common.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: DMA Engine Generic Binding +title: DMA Engine Common Properties maintainers: - Vinod Koul diff --git a/Documentation/devicetree/bindings/dma/dma-controller.yaml b/Documentation/devicetree/bindings/dma/dma-controller.yaml index 6d3727267fa8013f495007041966bd4a3074b2de..538ebadff652730d7a800ede2c483fdb599e11aa 100644 --- a/Documentation/devicetree/bindings/dma/dma-controller.yaml +++ b/Documentation/devicetree/bindings/dma/dma-controller.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/dma/dma-controller.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: DMA Controller Generic Binding +title: DMA Controller Common Properties maintainers: - Vinod Koul diff --git a/Documentation/devicetree/bindings/dma/dma-router.yaml b/Documentation/devicetree/bindings/dma/dma-router.yaml index 4b817f5dc30e7c0227a4716103953772566e6353..f8d8c3c88bcc9d42483f19e8d4b15551bc90067d 100644 --- a/Documentation/devicetree/bindings/dma/dma-router.yaml +++ b/Documentation/devicetree/bindings/dma/dma-router.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/dma/dma-router.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: DMA Router Generic Binding +title: DMA Router Common Properties maintainers: - Vinod Koul diff --git a/Documentation/devicetree/bindings/dma/ingenic,dma.yaml b/Documentation/devicetree/bindings/dma/ingenic,dma.yaml index 3b0b3b919af8c2f2b42e5ca0774891baba42cce2..fd5b0a8eaed80a7f87775cdf4fc6a5805e2bf92c 100644 --- a/Documentation/devicetree/bindings/dma/ingenic,dma.yaml +++ b/Documentation/devicetree/bindings/dma/ingenic,dma.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/dma/ingenic,dma.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Ingenic SoCs DMA Controller DT bindings +title: Ingenic SoCs DMA Controller maintainers: - Paul Cercueil @@ -18,6 +18,7 @@ properties: - enum: - ingenic,jz4740-dma - ingenic,jz4725b-dma + - ingenic,jz4755-dma - ingenic,jz4760-dma - ingenic,jz4760-bdma - ingenic,jz4760-mdma diff --git a/Documentation/devicetree/bindings/dma/nvidia,tegra186-gpc-dma.yaml b/Documentation/devicetree/bindings/dma/nvidia,tegra186-gpc-dma.yaml index c8894476b6abf95a6183c1fb0dfbc78fb80ba6cb..851bd50ee67fe0020f4ac34356e47cce7cce8463 100644 --- a/Documentation/devicetree/bindings/dma/nvidia,tegra186-gpc-dma.yaml +++ b/Documentation/devicetree/bindings/dma/nvidia,tegra186-gpc-dma.yaml @@ -39,7 +39,7 @@ properties: Should contain all of the per-channel DMA interrupts in ascending order with respect to the DMA channel index. minItems: 1 - maxItems: 31 + maxItems: 32 resets: maxItems: 1 @@ -52,6 +52,9 @@ properties: dma-coherent: true + dma-channel-mask: + maxItems: 1 + required: - compatible - reg @@ -60,6 +63,7 @@ required: - reset-names - "#dma-cells" - iommus + - dma-channel-mask additionalProperties: false @@ -108,5 +112,6 @@ examples: #dma-cells = <1>; iommus = <&smmu TEGRA186_SID_GPCDMA_0>; dma-coherent; + dma-channel-mask = <0xfffffffe>; }; ... diff --git a/Documentation/devicetree/bindings/dma/qcom,gpi.yaml b/Documentation/devicetree/bindings/dma/qcom,gpi.yaml index eabf8a76d3a05a6a88a6fecefdae5c4b00ada7a4..e7ba1c47a88eacce37ee4e3e42f6a5de955d8569 100644 --- a/Documentation/devicetree/bindings/dma/qcom,gpi.yaml +++ b/Documentation/devicetree/bindings/dma/qcom,gpi.yaml @@ -18,14 +18,24 @@ allOf: properties: compatible: - enum: - - qcom,sc7280-gpi-dma - - qcom,sdm845-gpi-dma - - qcom,sm6350-gpi-dma - - qcom,sm8150-gpi-dma - - qcom,sm8250-gpi-dma - - qcom,sm8350-gpi-dma - - qcom,sm8450-gpi-dma + oneOf: + - enum: + - qcom,sdm845-gpi-dma + - qcom,sm6350-gpi-dma + - items: + - enum: + - qcom,sc7280-gpi-dma + - qcom,sm6115-gpi-dma + - qcom,sm6375-gpi-dma + - qcom,sm8350-gpi-dma + - qcom,sm8450-gpi-dma + - const: qcom,sm6350-gpi-dma + - items: + - enum: + - qcom,sdm670-gpi-dma + - qcom,sm8150-gpi-dma + - qcom,sm8250-gpi-dma + - const: qcom,sdm845-gpi-dma reg: maxItems: 1 diff --git a/Documentation/devicetree/bindings/dma/st,stm32-dma.yaml b/Documentation/devicetree/bindings/dma/st,stm32-dma.yaml index 55faab6a468ebf3105d9efd49e3b167aac782deb..158c791d7caa6fb20fd68c28fbb206a795a53650 100644 --- a/Documentation/devicetree/bindings/dma/st,stm32-dma.yaml +++ b/Documentation/devicetree/bindings/dma/st,stm32-dma.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/dma/st,stm32-dma.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: STMicroelectronics STM32 DMA Controller bindings +title: STMicroelectronics STM32 DMA Controller description: | The STM32 DMA is a general-purpose direct memory access controller capable of diff --git a/Documentation/devicetree/bindings/dma/st,stm32-dmamux.yaml b/Documentation/devicetree/bindings/dma/st,stm32-dmamux.yaml index 1e1d8549b7ef407626c760f87c00409d4c3a0fd9..3e0b82d277cac255b7738199c400f092e5eacb5c 100644 --- a/Documentation/devicetree/bindings/dma/st,stm32-dmamux.yaml +++ b/Documentation/devicetree/bindings/dma/st,stm32-dmamux.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/dma/st,stm32-dmamux.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: STMicroelectronics STM32 DMA MUX (DMA request router) bindings +title: STMicroelectronics STM32 DMA MUX (DMA request router) maintainers: - Amelie Delaunay diff --git a/Documentation/devicetree/bindings/dma/st,stm32-mdma.yaml b/Documentation/devicetree/bindings/dma/st,stm32-mdma.yaml index 00cfa391365210780d8ac8925538485211603ea9..08a59bd69a2f48ae33fee4cfec4a924677500fb4 100644 --- a/Documentation/devicetree/bindings/dma/st,stm32-mdma.yaml +++ b/Documentation/devicetree/bindings/dma/st,stm32-mdma.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/dma/st,stm32-mdma.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: STMicroelectronics STM32 MDMA Controller bindings +title: STMicroelectronics STM32 MDMA Controller description: | The STM32 MDMA is a general-purpose direct memory access controller capable of diff --git a/Documentation/devicetree/bindings/dma/ti/k3-bcdma.yaml b/Documentation/devicetree/bindings/dma/ti/k3-bcdma.yaml index 08627d91e607de649034d8028547b892ecac9e0f..a702d2c2ff8d22e38df3693ec17e4e2699632e50 100644 --- a/Documentation/devicetree/bindings/dma/ti/k3-bcdma.yaml +++ b/Documentation/devicetree/bindings/dma/ti/k3-bcdma.yaml @@ -6,7 +6,7 @@ $id: http://devicetree.org/schemas/dma/ti/k3-bcdma.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Texas Instruments K3 DMSS BCDMA Device Tree Bindings +title: Texas Instruments K3 DMSS BCDMA maintainers: - Peter Ujfalusi diff --git a/Documentation/devicetree/bindings/dma/ti/k3-pktdma.yaml b/Documentation/devicetree/bindings/dma/ti/k3-pktdma.yaml index 507d16d84adeb49687c2cf0c32ccb768225a00ba..a69f62f854d8c3e8d4084c4aa2c6e0f6cccd5819 100644 --- a/Documentation/devicetree/bindings/dma/ti/k3-pktdma.yaml +++ b/Documentation/devicetree/bindings/dma/ti/k3-pktdma.yaml @@ -6,7 +6,7 @@ $id: http://devicetree.org/schemas/dma/ti/k3-pktdma.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Texas Instruments K3 DMSS PKTDMA Device Tree Bindings +title: Texas Instruments K3 DMSS PKTDMA maintainers: - Peter Ujfalusi diff --git a/Documentation/devicetree/bindings/dma/ti/k3-udma.yaml b/Documentation/devicetree/bindings/dma/ti/k3-udma.yaml index 6a09bbf83d4629215b62fba92c4eeaa2520b1e9f..7ff428ad3aaed0279a27edd86a1395a213d7e886 100644 --- a/Documentation/devicetree/bindings/dma/ti/k3-udma.yaml +++ b/Documentation/devicetree/bindings/dma/ti/k3-udma.yaml @@ -6,7 +6,7 @@ $id: http://devicetree.org/schemas/dma/ti/k3-udma.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Texas Instruments K3 NAVSS Unified DMA Device Tree Bindings +title: Texas Instruments K3 NAVSS Unified DMA maintainers: - Peter Ujfalusi diff --git a/Documentation/devicetree/bindings/dma/xilinx/xlnx,zynqmp-dpdma.yaml b/Documentation/devicetree/bindings/dma/xilinx/xlnx,zynqmp-dpdma.yaml index 2a595b18ff6c72158c7c9a6804c1e897a5dc613b..825294e3f0e8348d8687f5288fa987ef63862399 100644 --- a/Documentation/devicetree/bindings/dma/xilinx/xlnx,zynqmp-dpdma.yaml +++ b/Documentation/devicetree/bindings/dma/xilinx/xlnx,zynqmp-dpdma.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/dma/xilinx/xlnx,zynqmp-dpdma.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Xilinx ZynqMP DisplayPort DMA Controller Device Tree Bindings +title: Xilinx ZynqMP DisplayPort DMA Controller description: | These bindings describe the DMA engine included in the Xilinx ZynqMP diff --git a/Documentation/devicetree/bindings/edac/dmc-520.yaml b/Documentation/devicetree/bindings/edac/dmc-520.yaml index 3b6842e92d1b9e01df6233098ea75b06c8cf2725..84db3966662ac63a48c43bedb5cb3e38a3e6fc81 100644 --- a/Documentation/devicetree/bindings/edac/dmc-520.yaml +++ b/Documentation/devicetree/bindings/edac/dmc-520.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/edac/dmc-520.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: ARM DMC-520 EDAC bindings +title: ARM DMC-520 EDAC maintainers: - Lei Wang diff --git a/Documentation/devicetree/bindings/eeprom/at24.yaml b/Documentation/devicetree/bindings/eeprom/at24.yaml index d14e0accbda81d4b18cc172839ac993dddbe2e04..84af0d5f52aaff3e35b2eb418c2a7d097899e456 100644 --- a/Documentation/devicetree/bindings/eeprom/at24.yaml +++ b/Documentation/devicetree/bindings/eeprom/at24.yaml @@ -10,6 +10,9 @@ title: I2C EEPROMs compatible with Atmel's AT24 maintainers: - Bartosz Golaszewski +allOf: + - $ref: /schemas/nvmem/nvmem.yaml + select: properties: compatible: @@ -183,7 +186,7 @@ required: - compatible - reg -additionalProperties: false +unevaluatedProperties: false examples: - | diff --git a/Documentation/devicetree/bindings/eeprom/at25.yaml b/Documentation/devicetree/bindings/eeprom/at25.yaml index 8b1c997caac12d82bb8b13cdc3c6eeb201811004..0f5a8ef996d33b3caaf4eb4af1d2cafe4c342c67 100644 --- a/Documentation/devicetree/bindings/eeprom/at25.yaml +++ b/Documentation/devicetree/bindings/eeprom/at25.yaml @@ -104,6 +104,7 @@ required: allOf: - $ref: /schemas/spi/spi-peripheral-props.yaml# + - $ref: /schemas/nvmem/nvmem.yaml - if: properties: compatible: diff --git a/Documentation/devicetree/bindings/eeprom/microchip,93lc46b.yaml b/Documentation/devicetree/bindings/eeprom/microchip,93lc46b.yaml index 0c2f5ddb79c5af83aa91f7746b414c6326404ccc..144e86ce5c0a3274ffa496059819e192e08b55eb 100644 --- a/Documentation/devicetree/bindings/eeprom/microchip,93lc46b.yaml +++ b/Documentation/devicetree/bindings/eeprom/microchip,93lc46b.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/eeprom/microchip,93lc46b.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Microchip 93xx46 SPI compatible EEPROM family dt bindings +title: Microchip 93xx46 SPI compatible EEPROM family maintainers: - Cory Tusar @@ -47,6 +47,7 @@ required: allOf: - $ref: /schemas/spi/spi-peripheral-props.yaml# + - $ref: /schemas/nvmem/nvmem.yaml unevaluatedProperties: false diff --git a/Documentation/devicetree/bindings/example-schema.yaml b/Documentation/devicetree/bindings/example-schema.yaml index 8e1a8b19d42942ad1e30c7757be9aaf8ba1db85e..dfcf4c27d44a3e997fbecdb97a52289146f1039f 100644 --- a/Documentation/devicetree/bindings/example-schema.yaml +++ b/Documentation/devicetree/bindings/example-schema.yaml @@ -11,7 +11,7 @@ $id: http://devicetree.org/schemas/example-schema.yaml# # $schema is the meta-schema this schema should be validated with. $schema: http://devicetree.org/meta-schemas/core.yaml# -title: An example schema annotated with jsonschema details +title: An Example Device maintainers: - Rob Herring diff --git a/Documentation/devicetree/bindings/firmware/arm,scmi.yaml b/Documentation/devicetree/bindings/firmware/arm,scmi.yaml index 1c0388da6721ad24b741b25debaa67eafaddcc6c..176796931a22fb2f326a8d1f8e05353f1c18bbfd 100644 --- a/Documentation/devicetree/bindings/firmware/arm,scmi.yaml +++ b/Documentation/devicetree/bindings/firmware/arm,scmi.yaml @@ -5,7 +5,7 @@ $id: http://devicetree.org/schemas/firmware/arm,scmi.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: System Control and Management Interface (SCMI) Message Protocol bindings +title: System Control and Management Interface (SCMI) Message Protocol maintainers: - Sudeep Holla diff --git a/Documentation/devicetree/bindings/firmware/arm,scpi.yaml b/Documentation/devicetree/bindings/firmware/arm,scpi.yaml index 1f9322925e7c9f2d018c8d9ad520602eab35633d..241317239ffcf45a8a5fa5fdf3db54ce0a271a1e 100644 --- a/Documentation/devicetree/bindings/firmware/arm,scpi.yaml +++ b/Documentation/devicetree/bindings/firmware/arm,scpi.yaml @@ -5,7 +5,7 @@ $id: http://devicetree.org/schemas/firmware/arm,scpi.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: System Control and Power Interface (SCPI) Message Protocol bindings +title: System Control and Power Interface (SCPI) Message Protocol maintainers: - Sudeep Holla diff --git a/Documentation/devicetree/bindings/firmware/qemu,fw-cfg-mmio.yaml b/Documentation/devicetree/bindings/firmware/qemu,fw-cfg-mmio.yaml index fcf0011b8e6db9cf9d4d5cf3d6742cafdf49ff63..3faae32366656cb82aaad54405d0e7a329932ce5 100644 --- a/Documentation/devicetree/bindings/firmware/qemu,fw-cfg-mmio.yaml +++ b/Documentation/devicetree/bindings/firmware/qemu,fw-cfg-mmio.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/firmware/qemu,fw-cfg-mmio.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: QEMU Firmware Configuration bindings +title: QEMU Firmware Configuration maintainers: - Rob Herring diff --git a/Documentation/devicetree/bindings/fpga/lattice,sysconfig.yaml b/Documentation/devicetree/bindings/fpga/lattice,sysconfig.yaml new file mode 100644 index 0000000000000000000000000000000000000000..4fb05eb84e2af4488d06fc46345e24fb3f958af0 --- /dev/null +++ b/Documentation/devicetree/bindings/fpga/lattice,sysconfig.yaml @@ -0,0 +1,81 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/fpga/lattice,sysconfig.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Lattice Slave SPI sysCONFIG FPGA manager + +maintainers: + - Ivan Bornyakov + +description: | + Lattice sysCONFIG port, which is used for FPGA configuration, among others, + have Slave Serial Peripheral Interface. Only full reconfiguration is + supported. + + Programming of ECP5 is done by writing uncompressed bitstream image in .bit + format into FPGA's SRAM configuration memory. + +properties: + compatible: + enum: + - lattice,sysconfig-ecp5 + + reg: + maxItems: 1 + + program-gpios: + description: + A GPIO line connected to PROGRAMN (active low) pin of the device. + Initiates configuration sequence. + maxItems: 1 + + init-gpios: + description: + A GPIO line connected to INITN (active low) pin of the device. + Indicates that the FPGA is ready to be configured. + maxItems: 1 + + done-gpios: + description: + A GPIO line connected to DONE (active high) pin of the device. + Indicates that the configuration sequence is complete. + maxItems: 1 + +required: + - compatible + - reg + +allOf: + - $ref: /schemas/spi/spi-peripheral-props.yaml + + - if: + properties: + compatible: + contains: + const: lattice,sysconfig-ecp5 + then: + properties: + spi-max-frequency: + maximum: 60000000 + +unevaluatedProperties: false + +examples: + - | + #include + + spi { + #address-cells = <1>; + #size-cells = <0>; + + fpga-mgr@0 { + compatible = "lattice,sysconfig-ecp5"; + reg = <0>; + spi-max-frequency = <20000000>; + program-gpios = <&gpio3 4 GPIO_ACTIVE_LOW>; + init-gpios = <&gpio3 3 GPIO_ACTIVE_LOW>; + done-gpios = <&gpio3 2 GPIO_ACTIVE_HIGH>; + }; + }; diff --git a/Documentation/devicetree/bindings/gpio/gpio-davinci.yaml b/Documentation/devicetree/bindings/gpio/gpio-davinci.yaml index f32e09ef937c023dcf19b13a73950bbbb9591e5b..10e56cf306dbab5829fa9941d9b257c5bcc18a78 100644 --- a/Documentation/devicetree/bindings/gpio/gpio-davinci.yaml +++ b/Documentation/devicetree/bindings/gpio/gpio-davinci.yaml @@ -35,7 +35,7 @@ properties: gpio-line-names: description: strings describing the names of each gpio line. minItems: 1 - maxItems: 100 + maxItems: 144 "#gpio-cells": const: 2 diff --git a/Documentation/devicetree/bindings/gpio/gpio-latch.yaml b/Documentation/devicetree/bindings/gpio/gpio-latch.yaml new file mode 100644 index 0000000000000000000000000000000000000000..1ed82a2cebdaa446fc2466cd6362399845ce6ce9 --- /dev/null +++ b/Documentation/devicetree/bindings/gpio/gpio-latch.yaml @@ -0,0 +1,94 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/gpio/gpio-latch.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: GPIO latch controller + +maintainers: + - Sascha Hauer + +description: | + This binding describes a GPIO multiplexer based on latches connected to + other GPIOs, like this: + + CLK0 ----------------------. ,--------. + CLK1 -------------------. `--------|> #0 | + | | | + OUT0 ----------------+--|-----------|D0 Q0|-----|< + OUT1 --------------+-|--|-----------|D1 Q1|-----|< + OUT2 ------------+-|-|--|-----------|D2 Q2|-----|< + OUT3 ----------+-|-|-|--|-----------|D3 Q3|-----|< + OUT4 --------+-|-|-|-|--|-----------|D4 Q4|-----|< + OUT5 ------+-|-|-|-|-|--|-----------|D5 Q5|-----|< + OUT6 ----+-|-|-|-|-|-|--|-----------|D6 Q6|-----|< + OUT7 --+-|-|-|-|-|-|-|--|-----------|D7 Q7|-----|< + | | | | | | | | | `--------' + | | | | | | | | | + | | | | | | | | | ,--------. + | | | | | | | | `-----------|> #1 | + | | | | | | | | | | + | | | | | | | `--------------|D0 Q0|-----|< + | | | | | | `----------------|D1 Q1|-----|< + | | | | | `------------------|D2 Q2|-----|< + | | | | `--------------------|D3 Q3|-----|< + | | | `----------------------|D4 Q4|-----|< + | | `------------------------|D5 Q5|-----|< + | `--------------------------|D6 Q6|-----|< + `----------------------------|D7 Q7|-----|< + `--------' + + The number of clk-gpios and latched-gpios is not fixed. The actual number + of number of latches and the number of inputs per latch is derived from + the number of GPIOs given in the corresponding device tree properties. + +properties: + compatible: + const: gpio-latch + "#gpio-cells": + const: 2 + + clk-gpios: + description: Array of GPIOs to be used to clock a latch + + latched-gpios: + description: Array of GPIOs to be used as inputs per latch + + setup-duration-ns: + description: Delay in nanoseconds to wait after the latch inputs have been + set up + + clock-duration-ns: + description: Delay in nanoseconds to wait between clock output changes + + gpio-controller: true + + gpio-line-names: true + +required: + - compatible + - "#gpio-cells" + - gpio-controller + - clk-gpios + - latched-gpios + +additionalProperties: false + +examples: + - | + gpio-latch { + #gpio-cells = <2>; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_di_do_leds>; + compatible = "gpio-latch"; + gpio-controller; + setup-duration-ns = <100>; + clock-duration-ns = <100>; + + clk-gpios = <&gpio3 7 0>, <&gpio3 8 0>; + latched-gpios = <&gpio3 21 0>, <&gpio3 22 0>, + <&gpio3 23 0>, <&gpio3 24 0>, + <&gpio3 25 0>, <&gpio3 26 0>, + <&gpio3 27 0>, <&gpio3 28 0>; + }; diff --git a/Documentation/devicetree/bindings/gpio/gpio-pca9570.yaml b/Documentation/devicetree/bindings/gpio/gpio-pca9570.yaml index 1acaa0a3d35afe43a7dd294663ea698ccc8a5ae8..48bf414aa50ea1922a5f3b194807a80249c26cdc 100644 --- a/Documentation/devicetree/bindings/gpio/gpio-pca9570.yaml +++ b/Documentation/devicetree/bindings/gpio/gpio-pca9570.yaml @@ -12,6 +12,7 @@ maintainers: properties: compatible: enum: + - dlg,slg7xl45106 - nxp,pca9570 - nxp,pca9571 diff --git a/Documentation/devicetree/bindings/gpio/gpio-tpic2810.yaml b/Documentation/devicetree/bindings/gpio/gpio-tpic2810.yaml index cb8a5c376e1e28fbb15894a7fe0722b686071de7..157969bc4c46325d5518d16644a59788edc41c75 100644 --- a/Documentation/devicetree/bindings/gpio/gpio-tpic2810.yaml +++ b/Documentation/devicetree/bindings/gpio/gpio-tpic2810.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/gpio/gpio-tpic2810.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: TPIC2810 GPIO controller bindings +title: TPIC2810 GPIO controller maintainers: - Aswath Govindraju diff --git a/Documentation/devicetree/bindings/gpio/hisilicon,ascend910-gpio.yaml b/Documentation/devicetree/bindings/gpio/hisilicon,ascend910-gpio.yaml new file mode 100644 index 0000000000000000000000000000000000000000..735d97d645a09f460ade8f6cb272a959d3327ae5 --- /dev/null +++ b/Documentation/devicetree/bindings/gpio/hisilicon,ascend910-gpio.yaml @@ -0,0 +1,56 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/gpio/hisilicon,ascend910-gpio.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: HiSilicon common GPIO controller + +maintainers: + - Jay Fang + +description: + The HiSilicon common GPIO controller can be used for many different + types of SoC such as Huawei Ascend AI series chips. + +properties: + compatible: + const: hisilicon,ascend910-gpio + + reg: + maxItems: 1 + + interrupts: + maxItems: 1 + + gpio-controller: true + + "#gpio-cells": + const: 2 + + ngpios: + minimum: 1 + maximum: 32 + +required: + - compatible + - reg + - interrupts + - gpio-controller + - "#gpio-cells" + - ngpios + +additionalProperties: false + +examples: + - | + #include + + gpio@840d0000 { + compatible = "hisilicon,ascend910-gpio"; + reg = <0x840d0000 0x1000>; + ngpios = <32>; + gpio-controller; + #gpio-cells = <2>; + interrupts = ; + }; diff --git a/Documentation/devicetree/bindings/gpio/ti,omap-gpio.yaml b/Documentation/devicetree/bindings/gpio/ti,omap-gpio.yaml index 7087e4a5013f67b91f11ebb5f8ae04a4d4fa2a60..bd721c83905924613d96e03242261e4e21492f6c 100644 --- a/Documentation/devicetree/bindings/gpio/ti,omap-gpio.yaml +++ b/Documentation/devicetree/bindings/gpio/ti,omap-gpio.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/gpio/ti,omap-gpio.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: OMAP GPIO controller bindings +title: OMAP GPIO controller maintainers: - Grygorii Strashko diff --git a/Documentation/devicetree/bindings/gpu/brcm,bcm-v3d.yaml b/Documentation/devicetree/bindings/gpu/brcm,bcm-v3d.yaml index 217c42874f41818bd304d0cbb69ccb5b7fb919a7..dae55b8a267b081291c6e3a93e1a700ace1cd8c2 100644 --- a/Documentation/devicetree/bindings/gpu/brcm,bcm-v3d.yaml +++ b/Documentation/devicetree/bindings/gpu/brcm,bcm-v3d.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/gpu/brcm,bcm-v3d.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Broadcom V3D GPU Bindings +title: Broadcom V3D GPU maintainers: - Eric Anholt diff --git a/Documentation/devicetree/bindings/gpu/host1x/nvidia,tegra210-nvdec.yaml b/Documentation/devicetree/bindings/gpu/host1x/nvidia,tegra210-nvdec.yaml index 3cf862976448384b49fce8a060c1bb0deb10bd59..ed9554c837ef730e1c7516545d5e2ef2421735c0 100644 --- a/Documentation/devicetree/bindings/gpu/host1x/nvidia,tegra210-nvdec.yaml +++ b/Documentation/devicetree/bindings/gpu/host1x/nvidia,tegra210-nvdec.yaml @@ -4,7 +4,7 @@ $id: "http://devicetree.org/schemas/gpu/host1x/nvidia,tegra210-nvdec.yaml#" $schema: "http://devicetree.org/meta-schemas/core.yaml#" -title: Device tree binding for NVIDIA Tegra NVDEC +title: NVIDIA Tegra NVDEC description: | NVDEC is the hardware video decoder present on NVIDIA Tegra210 diff --git a/Documentation/devicetree/bindings/gpu/host1x/nvidia,tegra210-nvenc.yaml b/Documentation/devicetree/bindings/gpu/host1x/nvidia,tegra210-nvenc.yaml index e63ae1a008183a4736b6dbf511fdb4c6d050c4f1..8199e5fa82117850f74232340906dd3e237d5a9e 100644 --- a/Documentation/devicetree/bindings/gpu/host1x/nvidia,tegra210-nvenc.yaml +++ b/Documentation/devicetree/bindings/gpu/host1x/nvidia,tegra210-nvenc.yaml @@ -4,7 +4,7 @@ $id: "http://devicetree.org/schemas/gpu/host1x/nvidia,tegra210-nvenc.yaml#" $schema: "http://devicetree.org/meta-schemas/core.yaml#" -title: Device tree binding for NVIDIA Tegra NVENC +title: NVIDIA Tegra NVENC description: | NVENC is the hardware video encoder present on NVIDIA Tegra210 diff --git a/Documentation/devicetree/bindings/gpu/host1x/nvidia,tegra210-nvjpg.yaml b/Documentation/devicetree/bindings/gpu/host1x/nvidia,tegra210-nvjpg.yaml index 8647404d67e4f7f3debfc51dc205da2a892b2c71..895fb346ac72cb225b87c20bb4b70164b9b7f928 100644 --- a/Documentation/devicetree/bindings/gpu/host1x/nvidia,tegra210-nvjpg.yaml +++ b/Documentation/devicetree/bindings/gpu/host1x/nvidia,tegra210-nvjpg.yaml @@ -4,7 +4,7 @@ $id: "http://devicetree.org/schemas/gpu/host1x/nvidia,tegra210-nvjpg.yaml#" $schema: "http://devicetree.org/meta-schemas/core.yaml#" -title: Device tree binding for NVIDIA Tegra NVJPG +title: NVIDIA Tegra NVJPG description: | NVJPG is the hardware JPEG decoder and encoder present on NVIDIA Tegra210 diff --git a/Documentation/devicetree/bindings/gpu/host1x/nvidia,tegra234-nvdec.yaml b/Documentation/devicetree/bindings/gpu/host1x/nvidia,tegra234-nvdec.yaml index 7cc2dd525a96d4a18b0bf523e234797dee059646..4bdc19a2bccfbe11f6285d108f9188e3baf82ea9 100644 --- a/Documentation/devicetree/bindings/gpu/host1x/nvidia,tegra234-nvdec.yaml +++ b/Documentation/devicetree/bindings/gpu/host1x/nvidia,tegra234-nvdec.yaml @@ -4,7 +4,7 @@ $id: "http://devicetree.org/schemas/gpu/host1x/nvidia,tegra234-nvdec.yaml#" $schema: "http://devicetree.org/meta-schemas/core.yaml#" -title: Device tree binding for NVIDIA Tegra234 NVDEC +title: NVIDIA Tegra234 NVDEC description: | NVDEC is the hardware video decoder present on NVIDIA Tegra210 diff --git a/Documentation/devicetree/bindings/gpu/vivante,gc.yaml b/Documentation/devicetree/bindings/gpu/vivante,gc.yaml index 93e7244cdc0eb5a0f64a3c434bf471a3a928cb45..b1b10ea70ad9ad4692266451f42bb12e9248f313 100644 --- a/Documentation/devicetree/bindings/gpu/vivante,gc.yaml +++ b/Documentation/devicetree/bindings/gpu/vivante,gc.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/gpu/vivante,gc.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Vivante GPU Bindings +title: Vivante GPU description: Vivante GPU core devices diff --git a/Documentation/devicetree/bindings/hwlock/st,stm32-hwspinlock.yaml b/Documentation/devicetree/bindings/hwlock/st,stm32-hwspinlock.yaml index b18c616035a8bc0b49a212b7b86e10395da97778..829d1fdf4c674e0321897c0224c8d11d48520f43 100644 --- a/Documentation/devicetree/bindings/hwlock/st,stm32-hwspinlock.yaml +++ b/Documentation/devicetree/bindings/hwlock/st,stm32-hwspinlock.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/hwlock/st,stm32-hwspinlock.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: STMicroelectronics STM32 Hardware Spinlock bindings +title: STMicroelectronics STM32 Hardware Spinlock maintainers: - Fabien Dessenne diff --git a/Documentation/devicetree/bindings/hwmon/moortec,mr75203.yaml b/Documentation/devicetree/bindings/hwmon/moortec,mr75203.yaml index d0d54974920875aadac57f133be8417aa498bef7..ae4f68d4e69667d876e1456ce7b12ff25b1e482b 100644 --- a/Documentation/devicetree/bindings/hwmon/moortec,mr75203.yaml +++ b/Documentation/devicetree/bindings/hwmon/moortec,mr75203.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/hwmon/moortec,mr75203.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Moortec Semiconductor MR75203 PVT Controller bindings +title: Moortec Semiconductor MR75203 PVT Controller maintainers: - Rahul Tanwar diff --git a/Documentation/devicetree/bindings/hwmon/ntc-thermistor.yaml b/Documentation/devicetree/bindings/hwmon/ntc-thermistor.yaml index 3d3b139a91a2f95a422bb45c546a39cc5681da2c..6a1920712fb9f6fcb87307070c7ab33c2043102f 100644 --- a/Documentation/devicetree/bindings/hwmon/ntc-thermistor.yaml +++ b/Documentation/devicetree/bindings/hwmon/ntc-thermistor.yaml @@ -6,7 +6,6 @@ $schema: http://devicetree.org/meta-schemas/core.yaml# title: NTC thermistor temperature sensors maintainers: - - Naveen Krishna Chatradhi - Linus Walleij description: | diff --git a/Documentation/devicetree/bindings/i2c/hisilicon,ascend910-i2c.yaml b/Documentation/devicetree/bindings/i2c/hisilicon,ascend910-i2c.yaml new file mode 100644 index 0000000000000000000000000000000000000000..7d7a8de7bcd89027154b247ae1a2d0620ae2d37a --- /dev/null +++ b/Documentation/devicetree/bindings/i2c/hisilicon,ascend910-i2c.yaml @@ -0,0 +1,73 @@ +# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/i2c/hisilicon,ascend910-i2c.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: HiSilicon common I2C controller + +maintainers: + - Yicong Yang + +description: + The HiSilicon common I2C controller can be used for many different + types of SoC such as Huawei Ascend AI series chips. + +allOf: + - $ref: /schemas/i2c/i2c-controller.yaml# + +properties: + compatible: + const: hisilicon,ascend910-i2c + + reg: + maxItems: 1 + + interrupts: + maxItems: 1 + + clocks: + maxItems: 1 + + clock-frequency: + default: 400000 + + i2c-sda-falling-time-ns: + default: 343 + + i2c-scl-falling-time-ns: + default: 203 + + i2c-sda-hold-time-ns: + default: 830 + + i2c-scl-rising-time-ns: + default: 365 + + i2c-digital-filter-width-ns: + default: 0 + +required: + - compatible + - reg + - interrupts + +unevaluatedProperties: false + +examples: + - | + #include + + i2c@38b0000 { + compatible = "hisilicon,ascend910-i2c"; + reg = <0x38b0000 0x10000>; + interrupts = ; + i2c-sda-falling-time-ns = <56>; + i2c-scl-falling-time-ns = <56>; + i2c-sda-hold-time-ns = <56>; + i2c-scl-rising-time-ns = <56>; + i2c-digital-filter; + i2c-digital-filter-width-ns = <0x0>; + clocks = <&alg_clk>; + clock-frequency = <400000>; + }; diff --git a/Documentation/devicetree/bindings/i2c/i2c-gpio.yaml b/Documentation/devicetree/bindings/i2c/i2c-gpio.yaml index fd040284561f89b70e8c3037e85acc2453cb2c82..e0d76d5eb1031c4fb64d520dab3f7787870fe4b5 100644 --- a/Documentation/devicetree/bindings/i2c/i2c-gpio.yaml +++ b/Documentation/devicetree/bindings/i2c/i2c-gpio.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/i2c/i2c-gpio.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Bindings for GPIO bitbanged I2C +title: GPIO bitbanged I2C maintainers: - Wolfram Sang diff --git a/Documentation/devicetree/bindings/i2c/i2c-mt65xx.yaml b/Documentation/devicetree/bindings/i2c/i2c-mt65xx.yaml index 4e730fb7be5679127937f512d4b3005dfcbd7316..421563bf576cd5ca064a617f6d29c9c8508b1f45 100644 --- a/Documentation/devicetree/bindings/i2c/i2c-mt65xx.yaml +++ b/Documentation/devicetree/bindings/i2c/i2c-mt65xx.yaml @@ -23,6 +23,7 @@ properties: - const: mediatek,mt6577-i2c - const: mediatek,mt6589-i2c - const: mediatek,mt7622-i2c + - const: mediatek,mt7986-i2c - const: mediatek,mt8168-i2c - const: mediatek,mt8173-i2c - const: mediatek,mt8183-i2c diff --git a/Documentation/devicetree/bindings/i2c/i2c-pxa.yaml b/Documentation/devicetree/bindings/i2c/i2c-pxa.yaml index 015885dd02d32e8493419fa436380b41729695b7..31386a8d7684547261e95bb3d864e77869fd0b0e 100644 --- a/Documentation/devicetree/bindings/i2c/i2c-pxa.yaml +++ b/Documentation/devicetree/bindings/i2c/i2c-pxa.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/i2c/i2c-pxa.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Marvell MMP I2C controller bindings +title: Marvell MMP I2C controller maintainers: - Rob Herring diff --git a/Documentation/devicetree/bindings/i2c/ingenic,i2c.yaml b/Documentation/devicetree/bindings/i2c/ingenic,i2c.yaml index af6d64a6da6e6cb95f968ddba63b23fe658cce6c..b61fdc9548d871f0ec647922de972a7fed845e71 100644 --- a/Documentation/devicetree/bindings/i2c/ingenic,i2c.yaml +++ b/Documentation/devicetree/bindings/i2c/ingenic,i2c.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/i2c/ingenic,i2c.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Ingenic SoCs I2C controller devicetree bindings +title: Ingenic SoCs I2C controller maintainers: - Paul Cercueil diff --git a/Documentation/devicetree/bindings/i2c/marvell,mv64xxx-i2c.yaml b/Documentation/devicetree/bindings/i2c/marvell,mv64xxx-i2c.yaml index 93c164aa00dafc43b558c223bd06461b08c34cc9..984fc1ed3ec6ae2ef84a66331280908dc2a29034 100644 --- a/Documentation/devicetree/bindings/i2c/marvell,mv64xxx-i2c.yaml +++ b/Documentation/devicetree/bindings/i2c/marvell,mv64xxx-i2c.yaml @@ -19,6 +19,7 @@ properties: - const: allwinner,sun6i-a31-i2c - items: - enum: + - allwinner,suniv-f1c100s-i2c - allwinner,sun8i-a23-i2c - allwinner,sun8i-a83t-i2c - allwinner,sun8i-v536-i2c diff --git a/Documentation/devicetree/bindings/i2c/qcom,i2c-geni-qcom.yaml b/Documentation/devicetree/bindings/i2c/qcom,i2c-geni-qcom.yaml index 0e7ed00562e21bcaa9409bc365957ec90ed60c9e..f5f7dc8f325cb28b8829ff76acdf13fa1264afd8 100644 --- a/Documentation/devicetree/bindings/i2c/qcom,i2c-geni-qcom.yaml +++ b/Documentation/devicetree/bindings/i2c/qcom,i2c-geni-qcom.yaml @@ -10,18 +10,19 @@ maintainers: - Andy Gross - Bjorn Andersson -allOf: - - $ref: /schemas/i2c/i2c-controller.yaml# - properties: compatible: - const: qcom,geni-i2c + enum: + - qcom,geni-i2c + - qcom,geni-i2c-master-hub clocks: - maxItems: 1 + minItems: 1 + maxItems: 2 clock-names: - const: se + minItems: 1 + maxItems: 2 clock-frequency: default: 100000 @@ -35,13 +36,12 @@ properties: - const: rx interconnects: + minItems: 2 maxItems: 3 interconnect-names: - items: - - const: qup-core - - const: qup-config - - const: qup-memory + minItems: 2 + maxItems: 3 interrupts: maxItems: 1 @@ -71,6 +71,50 @@ required: - clock-names - reg +allOf: + - $ref: /schemas/i2c/i2c-controller.yaml# + - if: + properties: + compatible: + contains: + const: qcom,geni-i2c-master-hub + then: + properties: + clocks: + minItems: 2 + + clock-names: + items: + - const: se + - const: core + + dmas: false + dma-names: false + + interconnects: + maxItems: 2 + + interconnect-names: + items: + - const: qup-core + - const: qup-config + else: + properties: + clocks: + maxItems: 1 + + clock-names: + const: se + + interconnects: + minItems: 3 + + interconnect-names: + items: + - const: qup-core + - const: qup-config + - const: qup-memory + unevaluatedProperties: false examples: diff --git a/Documentation/devicetree/bindings/i2c/renesas,riic.yaml b/Documentation/devicetree/bindings/i2c/renesas,riic.yaml index d3c0d5c427acbcbf39fd6ad07fb525980dac3bd8..2291a7cd619be4eedef4be803663fadc60f946a8 100644 --- a/Documentation/devicetree/bindings/i2c/renesas,riic.yaml +++ b/Documentation/devicetree/bindings/i2c/renesas,riic.yaml @@ -19,7 +19,7 @@ properties: - enum: - renesas,riic-r7s72100 # RZ/A1H - renesas,riic-r7s9210 # RZ/A2M - - renesas,riic-r9a07g043 # RZ/G2UL + - renesas,riic-r9a07g043 # RZ/G2UL and RZ/Five - renesas,riic-r9a07g044 # RZ/G2{L,LC} - renesas,riic-r9a07g054 # RZ/V2L - const: renesas,riic-rz # RZ/A or RZ/G2L diff --git a/Documentation/devicetree/bindings/i2c/st,nomadik-i2c.yaml b/Documentation/devicetree/bindings/i2c/st,nomadik-i2c.yaml index 42c5974ec7b042be791110fc0e23b28f49e87cf5..16024415a4a74085c568c0565c7ac4ef7149a5c2 100644 --- a/Documentation/devicetree/bindings/i2c/st,nomadik-i2c.yaml +++ b/Documentation/devicetree/bindings/i2c/st,nomadik-i2c.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/i2c/st,nomadik-i2c.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: ST Microelectronics Nomadik I2C Bindings +title: ST Microelectronics Nomadik I2C description: The Nomadik I2C host controller began its life in the ST Microelectronics STn8800 SoC, and was then inherited into STn8810 and diff --git a/Documentation/devicetree/bindings/i2c/ti,omap4-i2c.yaml b/Documentation/devicetree/bindings/i2c/ti,omap4-i2c.yaml index db0843be91c56cc6694eee83ff31725d5ea13998..781108ae1ce3b48f7a21ce4db4ac2cc5d4b06bcb 100644 --- a/Documentation/devicetree/bindings/i2c/ti,omap4-i2c.yaml +++ b/Documentation/devicetree/bindings/i2c/ti,omap4-i2c.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/i2c/ti,omap4-i2c.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Bindings for I2C controllers on TI's OMAP and K3 SoCs +title: I2C controllers on TI's OMAP and K3 SoCs maintainers: - Vignesh Raghavendra diff --git a/Documentation/devicetree/bindings/i3c/i3c.yaml b/Documentation/devicetree/bindings/i3c/i3c.yaml index 1f82fc92379967c21815edd9e8c595062b74ff79..fdb4212149e79a86adfa22a366ea297126a3895c 100644 --- a/Documentation/devicetree/bindings/i3c/i3c.yaml +++ b/Documentation/devicetree/bindings/i3c/i3c.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/i3c/i3c.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: I3C bus binding +title: I3C bus maintainers: - Alexandre Belloni diff --git a/Documentation/devicetree/bindings/iio/accel/adi,adxl355.yaml b/Documentation/devicetree/bindings/iio/accel/adi,adxl355.yaml index 14b487088ab49e902dcb36b6902300eb906f2956..6b03c4efbb083057922be42a7ca2eb69b62729b1 100644 --- a/Documentation/devicetree/bindings/iio/accel/adi,adxl355.yaml +++ b/Documentation/devicetree/bindings/iio/accel/adi,adxl355.yaml @@ -4,20 +4,22 @@ $id: http://devicetree.org/schemas/iio/accel/adi,adxl355.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Analog Devices ADXL355 3-Axis, Low noise MEMS Accelerometer +title: Analog Devices ADXL355 and ADXL359 3-Axis, Low noise MEMS Accelerometers maintainers: - Puranjay Mohan description: | - Analog Devices ADXL355 3-Axis, Low noise MEMS Accelerometer that supports - both I2C & SPI interfaces + Analog Devices ADXL355 and ADXL359 3-Axis, Low noise MEMS Accelerometers that + support both I2C & SPI interfaces https://www.analog.com/en/products/adxl355.html + https://www.analog.com/en/products/adxl359.html properties: compatible: enum: - adi,adxl355 + - adi,adxl359 reg: maxItems: 1 diff --git a/Documentation/devicetree/bindings/iio/accel/kionix,kx022a.yaml b/Documentation/devicetree/bindings/iio/accel/kionix,kx022a.yaml new file mode 100644 index 0000000000000000000000000000000000000000..986df1a6ff0ab7efecef17123db44ead46bdb034 --- /dev/null +++ b/Documentation/devicetree/bindings/iio/accel/kionix,kx022a.yaml @@ -0,0 +1,65 @@ +# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/iio/accel/kionix,kx022a.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: ROHM/Kionix KX022A Accelerometer + +maintainers: + - Matti Vaittinen + +description: | + KX022A is a 3-axis accelerometer supporting +/- 2G, 4G, 8G and 16G ranges, + output data-rates from 0.78Hz to 1600Hz and a hardware-fifo buffering. + KX022A can be accessed either via I2C or SPI. + +properties: + compatible: + const: kionix,kx022a + + reg: + maxItems: 1 + + interrupts: + minItems: 1 + maxItems: 2 + + interrupt-names: + minItems: 1 + items: + - enum: [INT1, INT2] + - const: INT2 + + vdd-supply: true + io-vdd-supply: true + + mount-matrix: + description: | + an optional 3x3 mounting rotation matrix. + +required: + - compatible + - reg + - interrupts + +additionalProperties: false + +examples: + - | + #include + i2c { + #address-cells = <1>; + #size-cells = <0>; + accel@1f { + compatible = "kionix,kx022a"; + reg = <0x1f>; + + interrupt-parent = <&gpio1>; + interrupts = <29 IRQ_TYPE_LEVEL_LOW>; + interrupt-names = "INT1"; + + io-vdd-supply = <&iovdd>; + vdd-supply = <&vdd>; + }; + }; diff --git a/Documentation/devicetree/bindings/iio/adc/adc.yaml b/Documentation/devicetree/bindings/iio/adc/adc.yaml index db348fcbb52cb0e5874802001af734f7d42392ee..261601729745693385dfebd2fbbab15049cc6cd8 100644 --- a/Documentation/devicetree/bindings/iio/adc/adc.yaml +++ b/Documentation/devicetree/bindings/iio/adc/adc.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/iio/adc/adc.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Generic IIO bindings for ADC channels +title: IIO Common Properties for ADC Channels maintainers: - Jonathan Cameron diff --git a/Documentation/devicetree/bindings/iio/adc/adi,ad4130.yaml b/Documentation/devicetree/bindings/iio/adc/adi,ad4130.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d00690a8d3fbd92ad1c46353b75a26fe88ef2402 --- /dev/null +++ b/Documentation/devicetree/bindings/iio/adc/adi,ad4130.yaml @@ -0,0 +1,262 @@ +# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) +# Copyright 2022 Analog Devices Inc. +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/iio/adc/adi,ad4130.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Analog Devices AD4130 ADC device driver + +maintainers: + - Cosmin Tanislav + +description: | + Bindings for the Analog Devices AD4130 ADC. Datasheet can be found here: + https://www.analog.com/media/en/technical-documentation/data-sheets/AD4130-8.pdf + +properties: + compatible: + enum: + - adi,ad4130 + + reg: + maxItems: 1 + + clocks: + maxItems: 1 + description: phandle to the master clock (mclk) + + clock-names: + items: + - const: mclk + + interrupts: + maxItems: 1 + + interrupt-names: + description: | + Specify which interrupt pin should be configured as Data Ready / FIFO + interrupt. + Default if not supplied is int. + enum: + - int + - clk + - p2 + - dout + + '#address-cells': + const: 1 + + '#size-cells': + const: 0 + + '#clock-cells': + const: 0 + + clock-output-names: + maxItems: 1 + + refin1-supply: + description: refin1 supply. Can be used as reference for conversion. + + refin2-supply: + description: refin2 supply. Can be used as reference for conversion. + + avdd-supply: + description: AVDD voltage supply. Can be used as reference for conversion. + + iovdd-supply: + description: IOVDD voltage supply. Used for the chip interface. + + spi-max-frequency: + maximum: 5000000 + + adi,ext-clk-freq-hz: + description: Specify the frequency of the external clock. + enum: [76800, 153600] + default: 76800 + + adi,bipolar: + description: Specify if the device should be used in bipolar mode. + type: boolean + + adi,vbias-pins: + description: Analog inputs to apply a voltage bias of (AVDD − AVSS) / 2 to. + $ref: /schemas/types.yaml#/definitions/uint32-array + minItems: 1 + maxItems: 16 + items: + minimum: 0 + maximum: 15 + +required: + - compatible + - reg + - interrupts + +patternProperties: + "^channel@([0-9a-f])$": + type: object + $ref: adc.yaml + unevaluatedProperties: false + + properties: + reg: + description: The channel number. + minimum: 0 + maximum: 15 + + diff-channels: + description: | + Besides the analog inputs available, internal inputs can be used. + 16: Internal temperature sensor. + 17: AVSS + 18: Internal reference + 19: DGND + 20: (AVDD − AVSS)/6+ + 21: (AVDD − AVSS)/6- + 22: (IOVDD − DGND)/6+ + 23: (IOVDD − DGND)/6- + 24: (ALDO − AVSS)/6+ + 25: (ALDO − AVSS)/6- + 26: (DLDO − DGND)/6+ + 27: (DLDO − DGND)/6- + 28: V_MV_P + 29: V_MV_M + items: + minimum: 0 + maximum: 29 + + adi,reference-select: + description: | + Select the reference source to use when converting on the + specific channel. Valid values are: + 0: REFIN1(+)/REFIN1(−) + 1: REFIN2(+)/REFIN2(−) + 2: REFOUT/AVSS (Internal reference) + 3: AVDD/AVSS + If not specified, REFIN1 is used. + $ref: /schemas/types.yaml#/definitions/uint32 + enum: [0, 1, 2, 3] + default: 0 + + adi,excitation-pin-0: + description: | + Analog input to apply excitation current to while the channel + is active. + $ref: /schemas/types.yaml#/definitions/uint32 + minimum: 0 + maximum: 15 + default: 0 + + adi,excitation-pin-1: + description: | + Analog input to apply excitation current to while this channel + is active. + $ref: /schemas/types.yaml#/definitions/uint32 + minimum: 0 + maximum: 15 + default: 0 + + adi,excitation-current-0-nanoamp: + description: | + Excitation current in nanoamps to be applied to pin specified in + adi,excitation-pin-0 while this channel is active. + enum: [0, 100, 10000, 20000, 50000, 100000, 150000, 200000] + default: 0 + + adi,excitation-current-1-nanoamp: + description: | + Excitation current in nanoamps to be applied to pin specified in + adi,excitation-pin-1 while this channel is active. + enum: [0, 100, 10000, 20000, 50000, 100000, 150000, 200000] + default: 0 + + adi,burnout-current-nanoamp: + description: | + Burnout current in nanoamps to be applied for this channel. + enum: [0, 500, 2000, 4000] + default: 0 + + adi,buffered-positive: + description: Enable buffered mode for positive input. + type: boolean + + adi,buffered-negative: + description: Enable buffered mode for negative input. + type: boolean + + required: + - reg + - diff-channels + +allOf: + - $ref: /schemas/spi/spi-peripheral-props.yaml# + +unevaluatedProperties: false + +examples: + - | + #include + + spi { + #address-cells = <1>; + #size-cells = <0>; + + adc@0 { + compatible = "adi,ad4130"; + reg = <0>; + + #address-cells = <1>; + #size-cells = <0>; + + spi-max-frequency = <5000000>; + interrupts = <27 IRQ_TYPE_EDGE_FALLING>; + interrupt-parent = <&gpio>; + + channel@0 { + reg = <0>; + + adi,reference-select = <2>; + + /* AIN8, AIN9 */ + diff-channels = <8 9>; + }; + + channel@1 { + reg = <1>; + + adi,reference-select = <2>; + + /* AIN10, AIN11 */ + diff-channels = <10 11>; + }; + + channel@2 { + reg = <2>; + + adi,reference-select = <2>; + + /* Temperature Sensor, DGND */ + diff-channels = <16 19>; + }; + + channel@3 { + reg = <3>; + + adi,reference-select = <2>; + + /* Internal reference, DGND */ + diff-channels = <18 19>; + }; + + channel@4 { + reg = <4>; + + adi,reference-select = <2>; + + /* DGND, DGND */ + diff-channels = <19 19>; + }; + }; + }; diff --git a/Documentation/devicetree/bindings/iio/adc/adi,ad7923.yaml b/Documentation/devicetree/bindings/iio/adc/adi,ad7923.yaml index 07f9d1c09c7da98cc3bf3f88b447c910c52cf85b..85148338c597c83cb2ccda4b6ddd2152c22c9b6c 100644 --- a/Documentation/devicetree/bindings/iio/adc/adi,ad7923.yaml +++ b/Documentation/devicetree/bindings/iio/adc/adi,ad7923.yaml @@ -11,7 +11,7 @@ maintainers: description: | Analog Devices AD7904, AD7914, AD7923, AD7924 4 Channel ADCs, and AD7908, - AD7918, AD7928 8 Channels ADCs. + AD7918, AD7927, AD7928 8 Channels ADCs. Specifications about the part can be found at: https://www.analog.com/media/en/technical-documentation/data-sheets/AD7923.pdf @@ -20,14 +20,22 @@ description: | properties: compatible: - enum: - - adi,ad7904 - - adi,ad7914 - - adi,ad7923 - - adi,ad7924 - - adi,ad7908 - - adi,ad7918 - - adi,ad7928 + oneOf: + - enum: + - adi,ad7904 + - adi,ad7908 + - adi,ad7914 + - adi,ad7918 + - adi,ad7923 + - adi,ad7928 + - const: adi,ad7924 + deprecated: true + - items: + - const: adi,ad7924 + - const: adi,ad7923 + - items: + - const: adi,ad7927 + - const: adi,ad7928 reg: maxItems: 1 diff --git a/Documentation/devicetree/bindings/iio/adc/adi,max11410.yaml b/Documentation/devicetree/bindings/iio/adc/adi,max11410.yaml new file mode 100644 index 0000000000000000000000000000000000000000..53f9feff137ba601cc5f3515e397ec17f2df1438 --- /dev/null +++ b/Documentation/devicetree/bindings/iio/adc/adi,max11410.yaml @@ -0,0 +1,177 @@ +# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) +# Copyright 2022 Analog Devices Inc. +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/iio/adc/adi,max11410.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Analog Devices MAX11410 ADC device driver + +maintainers: + - Ibrahim Tilki + +description: | + Bindings for the Analog Devices MAX11410 ADC device. Datasheet can be + found here: + https://datasheets.maximintegrated.com/en/ds/MAX11410.pdf + +properties: + compatible: + enum: + - adi,max11410 + + reg: + maxItems: 1 + + interrupts: + minItems: 1 + maxItems: 2 + + interrupt-names: + description: Name of the gpio pin of max11410 used for IRQ + minItems: 1 + items: + - enum: [gpio0, gpio1] + - const: gpio1 + + '#address-cells': + const: 1 + + '#size-cells': + const: 0 + + avdd-supply: + description: Optional avdd supply. Used as reference when no explicit reference supplied. + + vref0p-supply: + description: vref0p supply can be used as reference for conversion. + + vref1p-supply: + description: vref1p supply can be used as reference for conversion. + + vref2p-supply: + description: vref2p supply can be used as reference for conversion. + + vref0n-supply: + description: vref0n supply can be used as reference for conversion. + + vref1n-supply: + description: vref1n supply can be used as reference for conversion. + + vref2n-supply: + description: vref2n supply can be used as reference for conversion. + + spi-max-frequency: + maximum: 8000000 + +patternProperties: + "^channel(@[0-9])?$": + $ref: adc.yaml + type: object + description: Represents the external channels which are connected to the ADC. + + properties: + reg: + description: The channel number in single-ended mode. + minimum: 0 + maximum: 9 + + adi,reference: + description: | + Select the reference source to use when converting on + the specific channel. Valid values are: + 0: VREF0P/VREF0N + 1: VREF1P/VREF1N + 2: VREF2P/VREF2N + 3: AVDD/AGND + 4: VREF0P/AGND + 5: VREF1P/AGND + 6: VREF2P/AGND + If this field is left empty, AVDD/AGND is selected. + $ref: /schemas/types.yaml#/definitions/uint32 + enum: [0, 1, 2, 3, 4, 5, 6] + default: 3 + + adi,input-mode: + description: | + Select signal path of input channels. Valid values are: + 0: Buffered, low-power, unity-gain path (default) + 1: Bypass path + 2: PGA path + $ref: /schemas/types.yaml#/definitions/uint32 + enum: [0, 1, 2] + default: 0 + + diff-channels: true + + bipolar: true + + settling-time-us: true + + adi,buffered-vrefp: + description: Enable buffered mode for positive reference. + type: boolean + + adi,buffered-vrefn: + description: Enable buffered mode for negative reference. + type: boolean + + required: + - reg + + additionalProperties: false + +required: + - compatible + - reg + +allOf: + - $ref: /schemas/spi/spi-peripheral-props.yaml# + +unevaluatedProperties: false + +examples: + - | + #include + spi { + #address-cells = <1>; + #size-cells = <0>; + + adc@0 { + reg = <0>; + compatible = "adi,max11410"; + spi-max-frequency = <8000000>; + + interrupt-parent = <&gpio>; + interrupts = <25 IRQ_TYPE_EDGE_FALLING>; + interrupt-names = "gpio1"; + + avdd-supply = <&adc_avdd>; + + vref1p-supply = <&adc_vref1p>; + vref1n-supply = <&adc_vref1n>; + + #address-cells = <1>; + #size-cells = <0>; + + channel@0 { + reg = <0>; + }; + + channel@1 { + reg = <1>; + diff-channels = <2 3>; + adi,reference = <1>; + bipolar; + settling-time-us = <100000>; + }; + + channel@2 { + reg = <2>; + diff-channels = <7 9>; + adi,reference = <5>; + adi,input-mode = <2>; + settling-time-us = <50000>; + }; + }; + }; diff --git a/Documentation/devicetree/bindings/iio/adc/allwinner,sun8i-a33-ths.yaml b/Documentation/devicetree/bindings/iio/adc/allwinner,sun8i-a33-ths.yaml index 15c514b83583b81cce8ac27bda28d5be6b65bab9..a73a355fc6652e6d5b79f44637928ccace7221c2 100644 --- a/Documentation/devicetree/bindings/iio/adc/allwinner,sun8i-a33-ths.yaml +++ b/Documentation/devicetree/bindings/iio/adc/allwinner,sun8i-a33-ths.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/iio/adc/allwinner,sun8i-a33-ths.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Allwinner A33 Thermal Sensor Device Tree Bindings +title: Allwinner A33 Thermal Sensor maintainers: - Chen-Yu Tsai diff --git a/Documentation/devicetree/bindings/iio/adc/ingenic,adc.yaml b/Documentation/devicetree/bindings/iio/adc/ingenic,adc.yaml index 698beb896f76a11e01b4988cf7a893e3baf345e2..517e8b1fcb739a0b74fc90eda5da5263e75a62df 100644 --- a/Documentation/devicetree/bindings/iio/adc/ingenic,adc.yaml +++ b/Documentation/devicetree/bindings/iio/adc/ingenic,adc.yaml @@ -5,7 +5,7 @@ $id: "http://devicetree.org/schemas/iio/adc/ingenic,adc.yaml#" $schema: "http://devicetree.org/meta-schemas/core.yaml#" -title: Ingenic JZ47xx ADC controller IIO bindings +title: Ingenic JZ47xx ADC controller IIO maintainers: - Artur Rojek diff --git a/Documentation/devicetree/bindings/iio/adc/motorola,cpcap-adc.yaml b/Documentation/devicetree/bindings/iio/adc/motorola,cpcap-adc.yaml index a6cb857a232daf1e5fdca3f0e3e762534c8069ea..9ceb6f18c854f4d64d7cd90a411a7ca231b37537 100644 --- a/Documentation/devicetree/bindings/iio/adc/motorola,cpcap-adc.yaml +++ b/Documentation/devicetree/bindings/iio/adc/motorola,cpcap-adc.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/iio/adc/motorola,cpcap-adc.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Motorola CPCAP PMIC ADC binding +title: Motorola CPCAP PMIC ADC maintainers: - Tony Lindgren diff --git a/Documentation/devicetree/bindings/iio/adc/nxp,imx8qxp-adc.yaml b/Documentation/devicetree/bindings/iio/adc/nxp,imx8qxp-adc.yaml index 9c59a20a60327e6e77e7ea26b07270097dc810bd..63369ba388e4738cd4934884a37fbc444749458b 100644 --- a/Documentation/devicetree/bindings/iio/adc/nxp,imx8qxp-adc.yaml +++ b/Documentation/devicetree/bindings/iio/adc/nxp,imx8qxp-adc.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/iio/adc/nxp,imx8qxp-adc.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: NXP IMX8QXP ADC bindings +title: NXP IMX8QXP ADC maintainers: - Cai Huoqing diff --git a/Documentation/devicetree/bindings/iio/adc/nxp,lpc1850-adc.yaml b/Documentation/devicetree/bindings/iio/adc/nxp,lpc1850-adc.yaml index 43abb300fa3d323ac26e33c1c25e011325cdb758..70b38038a0805f171073a276844b868a1b412315 100644 --- a/Documentation/devicetree/bindings/iio/adc/nxp,lpc1850-adc.yaml +++ b/Documentation/devicetree/bindings/iio/adc/nxp,lpc1850-adc.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/iio/adc/nxp,lpc1850-adc.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: NXP LPC1850 ADC bindings +title: NXP LPC1850 ADC maintainers: - Jonathan Cameron diff --git a/Documentation/devicetree/bindings/iio/adc/qcom,spmi-iadc.yaml b/Documentation/devicetree/bindings/iio/adc/qcom,spmi-iadc.yaml index 2a94db6888308379f0d0b61305a9e6a1ad210585..fa855baa368c38d0920d9143fd3e804437ff1a3d 100644 --- a/Documentation/devicetree/bindings/iio/adc/qcom,spmi-iadc.yaml +++ b/Documentation/devicetree/bindings/iio/adc/qcom,spmi-iadc.yaml @@ -18,7 +18,10 @@ description: | properties: compatible: - const: qcom,spmi-iadc + items: + - enum: + - qcom,pm8941-iadc + - const: qcom,spmi-iadc reg: description: IADC base address and length in the SPMI PMIC register map @@ -50,7 +53,7 @@ examples: #address-cells = <1>; #size-cells = <0>; pmic_iadc: adc@3600 { - compatible = "qcom,spmi-iadc"; + compatible = "qcom,pm8941-iadc", "qcom,spmi-iadc"; reg = <0x3600>; interrupts = <0x0 0x36 0x0 IRQ_TYPE_EDGE_RISING>; qcom,external-resistor-micro-ohms = <10000>; diff --git a/Documentation/devicetree/bindings/iio/adc/qcom,spmi-vadc.yaml b/Documentation/devicetree/bindings/iio/adc/qcom,spmi-vadc.yaml index e94beb2255ba1f5fb222be93d6b797bb96f0690b..bd6e0d6f6e0ce8a3114a4d75d363c691c1484a42 100644 --- a/Documentation/devicetree/bindings/iio/adc/qcom,spmi-vadc.yaml +++ b/Documentation/devicetree/bindings/iio/adc/qcom,spmi-vadc.yaml @@ -22,13 +22,11 @@ properties: - items: - const: qcom,pms405-adc - const: qcom,spmi-adc-rev2 - - - items: - - enum: - - qcom,spmi-vadc - - qcom,spmi-adc5 - - qcom,spmi-adc-rev2 - - qcom,spmi-adc7 + - enum: + - qcom,spmi-vadc + - qcom,spmi-adc5 + - qcom,spmi-adc-rev2 + - qcom,spmi-adc7 reg: description: VADC base address in the SPMI PMIC register map diff --git a/Documentation/devicetree/bindings/iio/adc/renesas,rzg2l-adc.yaml b/Documentation/devicetree/bindings/iio/adc/renesas,rzg2l-adc.yaml index 61c6157cf5a963103d42f2ac9bf832883e51488a..8b743742a5f9f1947d7913235a3550cdc444bf83 100644 --- a/Documentation/devicetree/bindings/iio/adc/renesas,rzg2l-adc.yaml +++ b/Documentation/devicetree/bindings/iio/adc/renesas,rzg2l-adc.yaml @@ -19,7 +19,7 @@ properties: compatible: items: - enum: - - renesas,r9a07g043-adc # RZ/G2UL + - renesas,r9a07g043-adc # RZ/G2UL and RZ/Five - renesas,r9a07g044-adc # RZ/G2L - renesas,r9a07g054-adc # RZ/V2L - const: renesas,rzg2l-adc diff --git a/Documentation/devicetree/bindings/iio/adc/rockchip-saradc.yaml b/Documentation/devicetree/bindings/iio/adc/rockchip-saradc.yaml index e512a14e41b439951390840bbeaeb08089968685..da50b529c1576bfcf7f83cbc21048fa9498731da 100644 --- a/Documentation/devicetree/bindings/iio/adc/rockchip-saradc.yaml +++ b/Documentation/devicetree/bindings/iio/adc/rockchip-saradc.yaml @@ -22,6 +22,7 @@ properties: - rockchip,rk3328-saradc - rockchip,rk3568-saradc - rockchip,rv1108-saradc + - rockchip,rv1126-saradc - const: rockchip,rk3399-saradc reg: diff --git a/Documentation/devicetree/bindings/iio/adc/sigma-delta-modulator.yaml b/Documentation/devicetree/bindings/iio/adc/sigma-delta-modulator.yaml index 2287697f1f61cb37639de2482413ed290ecd4d3e..cab0d425eaa4269bb6aed1accf4d536c9dc6373e 100644 --- a/Documentation/devicetree/bindings/iio/adc/sigma-delta-modulator.yaml +++ b/Documentation/devicetree/bindings/iio/adc/sigma-delta-modulator.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/iio/adc/sigma-delta-modulator.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Device-Tree bindings for sigma delta modulator +title: Sigma delta modulator maintainers: - Arnaud Pouliquen diff --git a/Documentation/devicetree/bindings/iio/adc/sprd,sc2720-adc.yaml b/Documentation/devicetree/bindings/iio/adc/sprd,sc2720-adc.yaml index 44aa28b59197af06b3f7f2788898e45b45db03a2..8181cf9a8e07d41d9ed28c39ac75794221656752 100644 --- a/Documentation/devicetree/bindings/iio/adc/sprd,sc2720-adc.yaml +++ b/Documentation/devicetree/bindings/iio/adc/sprd,sc2720-adc.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/iio/adc/sprd,sc2720-adc.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Spreadtrum SC27XX series PMICs ADC binding +title: Spreadtrum SC27XX series PMICs ADC maintainers: - Baolin Wang diff --git a/Documentation/devicetree/bindings/iio/adc/st,stm32-adc.yaml b/Documentation/devicetree/bindings/iio/adc/st,stm32-adc.yaml index fa8da42cb1e6baad8dbf2c9efd0843cb876f15de..1c340c95df160b2c6cebaea391cc45a1ff684d3f 100644 --- a/Documentation/devicetree/bindings/iio/adc/st,stm32-adc.yaml +++ b/Documentation/devicetree/bindings/iio/adc/st,stm32-adc.yaml @@ -4,7 +4,7 @@ $id: "http://devicetree.org/schemas/iio/adc/st,stm32-adc.yaml#" $schema: "http://devicetree.org/meta-schemas/core.yaml#" -title: STMicroelectronics STM32 ADC bindings +title: STMicroelectronics STM32 ADC description: | STM32 ADC is a successive approximation analog-to-digital converter. @@ -27,6 +27,7 @@ properties: - st,stm32f4-adc-core - st,stm32h7-adc-core - st,stm32mp1-adc-core + - st,stm32mp13-adc-core reg: maxItems: 1 @@ -37,6 +38,7 @@ properties: - stm32f4 and stm32h7 share a common ADC interrupt line. - stm32mp1 has two separate interrupt lines, one for each ADC within ADC block. + - stm32mp13 has an interrupt line per ADC block. minItems: 1 maxItems: 2 @@ -180,6 +182,33 @@ allOf: maximum: 36000000 default: 36000000 + - if: + properties: + compatible: + contains: + const: st,stm32mp13-adc-core + + then: + properties: + clocks: + minItems: 1 + maxItems: 2 + + clock-names: + items: + - const: bus + - const: adc + minItems: 1 + + interrupts: + items: + - description: ADC interrupt line + + st,max-clk-rate-hz: + minimum: 150000 + maximum: 75000000 + default: 75000000 + additionalProperties: false required: @@ -208,6 +237,7 @@ patternProperties: - st,stm32f4-adc - st,stm32h7-adc - st,stm32mp1-adc + - st,stm32mp13-adc reg: description: | @@ -229,7 +259,7 @@ patternProperties: interrupts: description: | IRQ Line for the ADC instance. Valid values are: - - 0 for adc@0 + - 0 for adc@0 (single adc for stm32mp13) - 1 for adc@100 - 2 for adc@200 (stm32f4 only) maxItems: 1 @@ -250,13 +280,14 @@ patternProperties: assigned-resolution-bits: description: | Resolution (bits) to use for conversions: - - can be 6, 8, 10 or 12 on stm32f4 + - can be 6, 8, 10 or 12 on stm32f4 and stm32mp13 - can be 8, 10, 12, 14 or 16 on stm32h7 and stm32mp1 st,adc-channels: description: | List of single-ended channels muxed for this ADC. It can have up to: - 16 channels, numbered from 0 to 15 (for in0..in15) on stm32f4 + - 19 channels, numbered from 0 to 18 (for in0..in18) on stm32mp13. - 20 channels, numbered from 0 to 19 (for in0..in19) on stm32h7 and stm32mp1. $ref: /schemas/types.yaml#/definitions/uint32-array @@ -322,7 +353,7 @@ patternProperties: label: description: | Unique name to identify which channel this is. - Reserved label names "vddcore", "vrefint" and "vbat" + Reserved label names "vddcore", "vddcpu", "vddq_ddr", "vrefint" and "vbat" are used to identify internal channels with matching names. diff-channels: @@ -419,6 +450,37 @@ patternProperties: items: minimum: 40 + + - if: + properties: + compatible: + contains: + const: st,stm32mp13-adc + + then: + properties: + reg: + const: 0x0 + + interrupts: + const: 0 + + assigned-resolution-bits: + enum: [6, 8, 10, 12] + default: 12 + + st,adc-channels: + minItems: 1 + maxItems: 19 + items: + minimum: 0 + maximum: 18 + + st,min-sample-time-nsecs: + minItems: 1 + maxItems: 19 + items: + minimum: 40 additionalProperties: false required: diff --git a/Documentation/devicetree/bindings/iio/adc/ti,palmas-gpadc.yaml b/Documentation/devicetree/bindings/iio/adc/ti,palmas-gpadc.yaml index 57a31356082ef780edce1f47bc7eee821d224cc8..720c16a108d4e2034e4e8a4ec182e4a1ab3f60b0 100644 --- a/Documentation/devicetree/bindings/iio/adc/ti,palmas-gpadc.yaml +++ b/Documentation/devicetree/bindings/iio/adc/ti,palmas-gpadc.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/iio/adc/ti,palmas-gpadc.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Palmas general purpose ADC IP block devicetree bindings +title: Palmas general purpose ADC IP block maintainers: - Tony Lindgren diff --git a/Documentation/devicetree/bindings/iio/adc/x-powers,axp209-adc.yaml b/Documentation/devicetree/bindings/iio/adc/x-powers,axp209-adc.yaml index d6d3d85901715e69d09327763ea9fca6c72d78f7..d40689f233f220ea17add4c61c7679928fba6b5e 100644 --- a/Documentation/devicetree/bindings/iio/adc/x-powers,axp209-adc.yaml +++ b/Documentation/devicetree/bindings/iio/adc/x-powers,axp209-adc.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/iio/adc/x-powers,axp209-adc.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: X-Powers AXP ADC bindings +title: X-Powers AXP ADC maintainers: - Chen-Yu Tsai diff --git a/Documentation/devicetree/bindings/iio/addac/adi,ad74115.yaml b/Documentation/devicetree/bindings/iio/addac/adi,ad74115.yaml new file mode 100644 index 0000000000000000000000000000000000000000..72d2e910f2065309f89eb8916da8755ca6815934 --- /dev/null +++ b/Documentation/devicetree/bindings/iio/addac/adi,ad74115.yaml @@ -0,0 +1,373 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/iio/addac/adi,ad74115.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Analog Devices AD74115H device + +maintainers: + - Cosmin Tanislav + +description: | + The AD74115H is a single-channel software configurable input/output + device for industrial control applications. It contains functionality for + analog output, analog input, digital output, digital input, resistance + temperature detector, and thermocouple measurements integrated into a single + chip solution with an SPI interface. The device features a 16-bit ADC and a + 14-bit DAC. + + https://www.analog.com/en/products/ad74115h.html + +properties: + compatible: + enum: + - adi,ad74115h + + reg: + maxItems: 1 + + spi-max-frequency: + maximum: 24000000 + + spi-cpol: true + + reset-gpios: true + + interrupts: + minItems: 1 + maxItems: 2 + + interrupt-names: + minItems: 1 + maxItems: 2 + items: + enum: + - adc_rdy + - alert + + avdd-supply: true + avcc-supply: true + dvcc-supply: true + dovdd-supply: true + refin-supply: true + + adi,ch-func: + $ref: /schemas/types.yaml#/definitions/uint32 + description: | + Channel function. + 0 - High impedance + 1 - Voltage output + 2 - Current output + 3 - Voltage input + 4 - Current input, externally-powered + 5 - Current input, loop-powered + 6 - Resistance input + 7 - RTD measure + 8 - Digital input logic + 9 - Digital input, loop-powered + 10 - Current output with HART + 11 - Current input, externally-powered, with HART + 12 - Current input, loop-powered, with HART + minimum: 0 + maximum: 12 + default: 0 + + adi,conv2-mux: + $ref: /schemas/types.yaml#/definitions/uint32 + description: | + Input node for ADC conversion 2. + 0 - SENSE_EXT1 to AGND_SENSE + 1 - SENSE_EXT2 to AGND_SENSE + 2 - SENSE_EXT2 to SENSE_EXT1 + 3 - AGND to AGND + minimum: 0 + maximum: 3 + default: 0 + + adi,conv2-range-microvolt: + description: Conversion range for ADC conversion 2. + oneOf: + - items: + - enum: [-2500000, 0] + - const: 2500000 + - items: + - enum: [-12000000, 0] + - const: 12000000 + - items: + - const: -2500000 + - const: 0 + - items: + - const: -104000 + - const: 104000 + - items: + - const: 0 + - const: 625000 + + adi,sense-agnd-buffer-low-power: + type: boolean + description: + Whether to enable low-power buffered mode for the AGND sense pin. + + adi,lf-buffer-low-power: + type: boolean + description: + Whether to enable low-power buffered mode for the low-side filtered + sense pin. + + adi,hf-buffer-low-power: + type: boolean + description: + Whether to enable low-power buffered mode for the high-side filtered + sense pin. + + adi,ext2-buffer-low-power: + type: boolean + description: Whether to enable low-power buffered mode for the EXT2 pin. + + adi,ext1-buffer-low-power: + type: boolean + description: Whether to enable low-power buffered mode for the EXT1 pin. + + adi,comparator-invert: + type: boolean + description: Whether to invert the comparator output. + + adi,digital-input-sink-range-high: + $ref: /schemas/types.yaml#/definitions/uint32 + description: | + When not present, the digital input range is from 0 to 3700uA in steps + of 120uA, with a ~2k series resistance. + When present, the digital input range is from 0 to 7400uA in steps + of 240uA, with a ~1k series resistance. + + adi,digital-input-sink-microamp: + description: Sink current in digital input mode. + minimum: 0 + maximum: 3700 + default: 0 + + adi,digital-input-debounce-mode-counter-reset: + type: boolean + description: | + When not present, a counter increments when the signal is asserted + and decrements when the signal is de-asserted. + When present, a counter increments while the signal is asserted and + resets when the signal de-asserts + + adi,digital-input-unbuffered: + type: boolean + description: Whether to buffer digital input signals. + + adi,digital-input-short-circuit-detection: + type: boolean + description: Whether to detect digital input short circuits. + + adi,digital-input-open-circuit-detection: + type: boolean + description: Whether to detect digital input open circuits. + + adi,digital-input-threshold-mode-fixed: + type: boolean + description: | + When not present, the digital input threshold range is -0.96 * AVDD + to AVDD. + When present, the threshold range is fixed from -19V to 30V. + + adi,dac-bipolar: + type: boolean + description: | + When not present, the DAC operates in the 0V to 12V range. + When present, the DAC operates in the -12V to 12V range. + + adi,charge-pump: + type: boolean + description: Whether to enable the internal charge pump. + + adi,dac-hart-slew: + type: boolean + description: Whether to use a HART-compatible slew rate. + + adi,dac-current-limit-low: + type: boolean + description: | + When not present, the DAC short-circuit current limit is 32mA in + either source or sink for VOUT and 4mA sink for IOUT. + When present, the limit is 16mA in either source or sink for VOUT, + 1mA sink for IOUT. + + adi,4-wire-rtd: + type: boolean + description: | + When not present, the ADC should be used for measuring 3-wire RTDs. + When present, the ADC should be used for measuring 4-wire RTDs. + + adi,3-wire-rtd-excitation-swap: + type: boolean + description: Whether to swap the excitation for 3-wire RTD. + + adi,rtd-excitation-current-microamp: + description: Excitation current to apply to RTD. + enum: [250, 500, 750, 1000] + default: 250 + + adi,ext1-burnout: + type: boolean + description: Whether to enable burnout current for EXT1. + + adi,ext1-burnout-current-nanoamp: + $ref: /schemas/types.yaml#/definitions/uint32 + description: + Burnout current in nanoamps to be applied to EXT1. + enum: [0, 50, 500, 1000, 10000] + default: 0 + + adi,ext1-burnout-current-polarity-sourcing: + type: boolean + description: | + When not present, the burnout current polarity for EXT1 is sinking. + When present, the burnout current polarity for EXT1 is sourcing. + + adi,ext2-burnout: + type: boolean + description: Whether to enable burnout current for EXT2. + + adi,ext2-burnout-current-nanoamp: + $ref: /schemas/types.yaml#/definitions/uint32 + description: Burnout current in nanoamps to be applied to EXT2. + enum: [0, 50, 500, 1000, 10000] + default: 0 + + adi,ext2-burnout-current-polarity-sourcing: + type: boolean + description: | + When not present, the burnout current polarity for EXT2 is sinking. + When present, the burnout current polarity for EXT2 is sourcing. + + adi,viout-burnout: + type: boolean + description: Whether to enable burnout current for VIOUT. + + adi,viout-burnout-current-nanoamp: + $ref: /schemas/types.yaml#/definitions/uint32 + description: Burnout current in nanoamps to be applied to VIOUT. + enum: [0, 1000, 10000] + default: 0 + + adi,viout-burnout-current-polarity-sourcing: + type: boolean + description: | + When not present, the burnout current polarity for VIOUT is sinking. + When present, the burnout current polarity for VIOUT is sourcing. + + adi,gpio0-mode: + $ref: /schemas/types.yaml#/definitions/uint32 + description: | + GPIO functions. + 0 - Disabled + 1 - Logic I/O + 2 - Comparator output + 3 - Control HART CD + 4 - Monitor HART CD + 5 - Monitor HART EOM status + minimum: 0 + maximum: 5 + default: 0 + + adi,gpio1-mode: + $ref: /schemas/types.yaml#/definitions/uint32 + description: | + GPIO functions. + 0 - Disabled + 1 - Logic I/O + 2 - Drive external digital output FET + 3 - Control HART RXD + 4 - Monitor HART RXD + 5 - Monitor HART SOM status + minimum: 0 + maximum: 5 + default: 0 + + adi,gpio2-mode: + $ref: /schemas/types.yaml#/definitions/uint32 + description: | + GPIO functions. + 0 - Disabled + 1 - Logic I/O + 2 - Drive internal digital output FET + 3 - Control HART TXD + 4 - Monitor HART TXD + 5 - Monitor HART TX complete status + minimum: 0 + maximum: 5 + default: 0 + + adi,gpio3-mode: + $ref: /schemas/types.yaml#/definitions/uint32 + description: | + GPIO functions. + 0 - Disabled + 1 - Logic I/O + 2 - High impedance + 3 - Control HART RTS + 4 - Monitor HART RTS + 5 - Monitor HART CD complete status + minimum: 0 + maximum: 5 + default: 0 + +required: + - compatible + - reg + - spi-cpol + - avdd-supply + +allOf: + - $ref: /schemas/spi/spi-peripheral-props.yaml# + - if: + required: + - adi,digital-input-sink-range-high + then: + properties: + adi,digital-input-sink-microamp: + maximum: 7400 + +unevaluatedProperties: false + +examples: + - | + #include + #include + + spi { + #address-cells = <1>; + #size-cells = <0>; + + addac@0 { + compatible = "adi,ad74115h"; + reg = <0>; + + spi-max-frequency = <12000000>; + spi-cpol; + + reset-gpios = <&gpio 27 GPIO_ACTIVE_LOW>; + + interrupt-parent = <&gpio>; + interrupts = <26 IRQ_TYPE_EDGE_FALLING>; + interrupt-names = "adc_rdy"; + + avdd-supply = <&ad74115_avdd>; + + adi,ch-func = <1>; + adi,conv2-mux = <2>; + adi,conv2-range-microvolt = <(-12000000) 12000000>; + + adi,gpio0-mode = <1>; + adi,gpio1-mode = <1>; + adi,gpio2-mode = <1>; + adi,gpio3-mode = <1>; + + adi,dac-bipolar; + }; + }; +... diff --git a/Documentation/devicetree/bindings/iio/addac/adi,ad74413r.yaml b/Documentation/devicetree/bindings/iio/addac/adi,ad74413r.yaml index d2a9f92c0a6dd8a5a435c61f8245e8eb7b90f207..9eb3ecc8bbc8737ccbcc5c35e8880cfecb395437 100644 --- a/Documentation/devicetree/bindings/iio/addac/adi,ad74413r.yaml +++ b/Documentation/devicetree/bindings/iio/addac/adi,ad74413r.yaml @@ -51,6 +51,9 @@ properties: Shunt (sense) resistor value in micro-Ohms. default: 100000000 + reset-gpios: + maxItems: 1 + required: - compatible - reg @@ -58,8 +61,6 @@ required: - spi-cpol - refin-supply -additionalProperties: false - patternProperties: "^channel@[0-3]$": type: object @@ -103,6 +104,11 @@ patternProperties: required: - reg +allOf: + - $ref: /schemas/spi/spi-peripheral-props.yaml# + +unevaluatedProperties: false + examples: - | #include @@ -113,9 +119,7 @@ examples: #address-cells = <1>; #size-cells = <0>; - cs-gpios = <&gpio 17 GPIO_ACTIVE_LOW>; - - ad74413r@0 { + addac@0 { compatible = "adi,ad74413r"; reg = <0>; spi-max-frequency = <1000000>; @@ -128,6 +132,7 @@ examples: interrupts = <26 IRQ_TYPE_EDGE_FALLING>; refin-supply = <&ad74413r_refin>; + reset-gpios = <&gpio2 6 GPIO_ACTIVE_LOW>; channel@0 { reg = <0>; diff --git a/Documentation/devicetree/bindings/iio/dac/adi,ad5758.yaml b/Documentation/devicetree/bindings/iio/dac/adi,ad5758.yaml index e49e7556175df26469304c1e6baac3bada49175a..4e508bfcc9d87e46e5c5fb3c561304fba2acba41 100644 --- a/Documentation/devicetree/bindings/iio/dac/adi,ad5758.yaml +++ b/Documentation/devicetree/bindings/iio/dac/adi,ad5758.yaml @@ -102,8 +102,7 @@ allOf: - if: properties: adi,dc-dc-mode: - contains: - enum: [1, 3] + enum: [1, 3] then: properties: adi,range-microvolt: false diff --git a/Documentation/devicetree/bindings/iio/dac/adi,ad5766.yaml b/Documentation/devicetree/bindings/iio/dac/adi,ad5766.yaml index 29bd16dab5464ade3ba7f3efda912be6ac5fa1da..3c8784a54d2cefa042fcca80ef81375fef4e27b8 100644 --- a/Documentation/devicetree/bindings/iio/dac/adi,ad5766.yaml +++ b/Documentation/devicetree/bindings/iio/dac/adi,ad5766.yaml @@ -8,7 +8,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml# title: Analog Devices AD5766 DAC device driver maintainers: - - Cristian Pop + - Nuno Sá description: | Bindings for the Analog Devices AD5766 current DAC device. Datasheet can be diff --git a/Documentation/devicetree/bindings/iio/dac/nxp,lpc1850-dac.yaml b/Documentation/devicetree/bindings/iio/dac/nxp,lpc1850-dac.yaml index 595f481c548e00e27a862abf314fb17c4656a327..9c8afe3f1b690bafb628d4cfc9576a1c1647f0f3 100644 --- a/Documentation/devicetree/bindings/iio/dac/nxp,lpc1850-dac.yaml +++ b/Documentation/devicetree/bindings/iio/dac/nxp,lpc1850-dac.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/iio/dac/nxp,lpc1850-dac.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: NXP LPC1850 DAC bindings +title: NXP LPC1850 DAC maintainers: - Jonathan Cameron diff --git a/Documentation/devicetree/bindings/iio/dac/st,stm32-dac.yaml b/Documentation/devicetree/bindings/iio/dac/st,stm32-dac.yaml index 6adeda4087fc26608ecbed4a765fd02bb70a1af9..0f1bf1110122a2961d7434f1337676a31205ab4e 100644 --- a/Documentation/devicetree/bindings/iio/dac/st,stm32-dac.yaml +++ b/Documentation/devicetree/bindings/iio/dac/st,stm32-dac.yaml @@ -4,7 +4,7 @@ $id: "http://devicetree.org/schemas/iio/dac/st,stm32-dac.yaml#" $schema: "http://devicetree.org/meta-schemas/core.yaml#" -title: STMicroelectronics STM32 DAC bindings +title: STMicroelectronics STM32 DAC description: | The STM32 DAC is a 12-bit voltage output digital-to-analog converter. The DAC diff --git a/Documentation/devicetree/bindings/iio/frequency/adi,adf4350.yaml b/Documentation/devicetree/bindings/iio/frequency/adi,adf4350.yaml index d7f20b8518e06ea37c6fd50989dc82bc5b8dcd5c..43cbf27114c7b955b5fc03f83c0e239c64cafbe5 100644 --- a/Documentation/devicetree/bindings/iio/frequency/adi,adf4350.yaml +++ b/Documentation/devicetree/bindings/iio/frequency/adi,adf4350.yaml @@ -160,13 +160,16 @@ properties: 2: +2dBm 3: +5dBm -additionalProperties: false - required: - compatible - reg - clocks +allOf: + - $ref: /schemas/spi/spi-peripheral-props.yaml# + +unevaluatedProperties: false + examples: - | spi { diff --git a/Documentation/devicetree/bindings/iio/frequency/adi,adf4377.yaml b/Documentation/devicetree/bindings/iio/frequency/adi,adf4377.yaml new file mode 100644 index 0000000000000000000000000000000000000000..aa6a3193b4e035d1a988325bb0d2599970a0d2d7 --- /dev/null +++ b/Documentation/devicetree/bindings/iio/frequency/adi,adf4377.yaml @@ -0,0 +1,92 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/iio/frequency/adi,adf4377.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: ADF4377 Microwave Wideband Synthesizer with Integrated VCO + +maintainers: + - Antoniu Miclaus + - Dragos Bogdan + +description: | + The ADF4377 is a high performance, ultralow jitter, dual output integer-N + phased locked loop (PLL) with integrated voltage controlled oscillator (VCO) + ideally suited for data converter and mixed signal front end (MxFE) clock + applications. + + https://www.analog.com/en/products/adf4377.html + +properties: + compatible: + enum: + - adi,adf4377 + - adi,adf4378 + + reg: + maxItems: 1 + + spi-max-frequency: + maximum: 10000000 + + clocks: + maxItems: 1 + + clock-names: + description: + External clock that provides reference input frequency. + items: + - const: ref_in + + chip-enable-gpios: + description: + GPIO that controls the Chip Enable Pin. + maxItems: 1 + + clk1-enable-gpios: + description: + GPIO that controls the Enable Clock 1 Output Buffer Pin. + maxItems: 1 + + clk2-enable-gpios: + description: + GPIO that controls the Enable Clock 2 Output Buffer Pin. + maxItems: 1 + + adi,muxout-select: + description: + On chip multiplexer output selection. + high_z - MUXOUT Pin set to high-Z. + lock_detect - MUXOUT Pin set to lock detector output. + muxout_low - MUXOUT Pin set to low. + f_div_rclk_2 - MUXOUT Pin set to fDIV_RCLK/2. + f_div_nclk_2 - MUXOUT Pin set to fDIV_NCLK/2. + muxout_high - MUXOUT Pin set to high. + enum: [high_z, lock_detect, muxout_low, f_div_rclk_2, f_div_nclk_2, muxout_high] + +required: + - compatible + - reg + - clocks + - clock-names + +allOf: + - $ref: /schemas/spi/spi-peripheral-props.yaml# + +unevaluatedProperties: false + +examples: + - | + spi { + #address-cells = <1>; + #size-cells = <0>; + frequency@0 { + compatible = "adi,adf4377"; + reg = <0>; + spi-max-frequency = <10000000>; + clocks = <&adf4377_ref_in>; + clock-names = "ref_in"; + }; + }; +... diff --git a/Documentation/devicetree/bindings/iio/frequency/adi,admv1013.yaml b/Documentation/devicetree/bindings/iio/frequency/adi,admv1013.yaml index 23f1f3b55abb30a123c90c7b34e807671f7629ba..fc813bcb65320637f0e42081bad301bc8c5cecfd 100644 --- a/Documentation/devicetree/bindings/iio/frequency/adi,admv1013.yaml +++ b/Documentation/devicetree/bindings/iio/frequency/adi,admv1013.yaml @@ -70,7 +70,10 @@ required: - clock-names - vcm-supply -additionalProperties: false +allOf: + - $ref: /schemas/spi/spi-peripheral-props.yaml# + +unevaluatedProperties: false examples: - | diff --git a/Documentation/devicetree/bindings/iio/frequency/adi,admv1014.yaml b/Documentation/devicetree/bindings/iio/frequency/adi,admv1014.yaml index 2716c1e8fe3163d73d3ac1196845c828b6242f4e..ab86daa2c56ec4e5eb57b8308974b90280cad308 100644 --- a/Documentation/devicetree/bindings/iio/frequency/adi,admv1014.yaml +++ b/Documentation/devicetree/bindings/iio/frequency/adi,admv1014.yaml @@ -104,7 +104,10 @@ required: - clock-names - vcm-supply -additionalProperties: false +allOf: + - $ref: /schemas/spi/spi-peripheral-props.yaml# + +unevaluatedProperties: false examples: - | diff --git a/Documentation/devicetree/bindings/iio/frequency/adi,admv4420.yaml b/Documentation/devicetree/bindings/iio/frequency/adi,admv4420.yaml index da7fe85ec92ef3ffbffe8dc6380cb095e6edadfa..64f2352aac3d512825037402a064956271a83878 100644 --- a/Documentation/devicetree/bindings/iio/frequency/adi,admv4420.yaml +++ b/Documentation/devicetree/bindings/iio/frequency/adi,admv4420.yaml @@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml# title: ADMV4420 K Band Downconverter maintainers: - - Cristian Pop + - Nuno Sá description: The ADMV4420 is a highly integrated, double balanced, active @@ -37,7 +37,11 @@ required: - compatible - reg -additionalProperties: false + +allOf: + - $ref: /schemas/spi/spi-peripheral-props.yaml# + +unevaluatedProperties: false examples: - | diff --git a/Documentation/devicetree/bindings/iio/frequency/adi,adrf6780.yaml b/Documentation/devicetree/bindings/iio/frequency/adi,adrf6780.yaml index 3a8ea93f4e0c1e112dbcf6a04118094e9d317b42..f11391ab4b622d6ff380860ad0ff239e82771248 100644 --- a/Documentation/devicetree/bindings/iio/frequency/adi,adrf6780.yaml +++ b/Documentation/devicetree/bindings/iio/frequency/adi,adrf6780.yaml @@ -113,7 +113,10 @@ required: - clocks - clock-names -additionalProperties: false +allOf: + - $ref: /schemas/spi/spi-peripheral-props.yaml# + +unevaluatedProperties: false examples: - | diff --git a/Documentation/devicetree/bindings/iio/gyroscope/adi,adxrs290.yaml b/Documentation/devicetree/bindings/iio/gyroscope/adi,adxrs290.yaml index 662ec59ca0af52dad2b4e301b8151c94c03eb105..0ae2464b9bc42a46018183f3466567e5b4374539 100644 --- a/Documentation/devicetree/bindings/iio/gyroscope/adi,adxrs290.yaml +++ b/Documentation/devicetree/bindings/iio/gyroscope/adi,adxrs290.yaml @@ -38,7 +38,10 @@ required: - spi-cpol - spi-cpha -additionalProperties: false +allOf: + - $ref: /schemas/spi/spi-peripheral-props.yaml# + +unevaluatedProperties: false examples: - | diff --git a/Documentation/devicetree/bindings/iio/gyroscope/nxp,fxas21002c.yaml b/Documentation/devicetree/bindings/iio/gyroscope/nxp,fxas21002c.yaml index 3f57a1b813e69a00aae5436824262f6e2b5a2b8c..2c900e9dddc624a235cb4d7c31c17a354f379842 100644 --- a/Documentation/devicetree/bindings/iio/gyroscope/nxp,fxas21002c.yaml +++ b/Documentation/devicetree/bindings/iio/gyroscope/nxp,fxas21002c.yaml @@ -56,7 +56,10 @@ required: - compatible - reg -additionalProperties: false +allOf: + - $ref: /schemas/spi/spi-peripheral-props.yaml# + +unevaluatedProperties: false examples: - | diff --git a/Documentation/devicetree/bindings/iio/imu/adi,adis16475.yaml b/Documentation/devicetree/bindings/iio/imu/adi,adis16475.yaml index a7574210175a9fbfea974213574e9b434e617f46..5dbfae80bb281542bc20d0c551b087e01b57c5f5 100644 --- a/Documentation/devicetree/bindings/iio/imu/adi,adis16475.yaml +++ b/Documentation/devicetree/bindings/iio/imu/adi,adis16475.yaml @@ -79,6 +79,7 @@ required: - spi-cpol allOf: + - $ref: /schemas/spi/spi-peripheral-props.yaml# - if: properties: compatible: @@ -107,7 +108,7 @@ allOf: dependencies: adi,sync-mode: [ clocks ] -additionalProperties: false +unevaluatedProperties: false examples: - | diff --git a/Documentation/devicetree/bindings/iio/imu/invensense,icm42600.yaml b/Documentation/devicetree/bindings/iio/imu/invensense,icm42600.yaml index 488349755c99d8084e5309a742ee3d966af166a0..13c9abdd31314256d3e986f59345438f7816d81b 100644 --- a/Documentation/devicetree/bindings/iio/imu/invensense,icm42600.yaml +++ b/Documentation/devicetree/bindings/iio/imu/invensense,icm42600.yaml @@ -31,6 +31,7 @@ properties: - invensense,icm42602 - invensense,icm42605 - invensense,icm42622 + - invensense,icm42631 reg: maxItems: 1 diff --git a/Documentation/devicetree/bindings/iio/imu/st,lsm6dsx.yaml b/Documentation/devicetree/bindings/iio/imu/st,lsm6dsx.yaml index fe1e02e5d7b39d6fa1b57f0bf0654b1b1d595148..68b481c633188378031594101e3b5d531787af01 100644 --- a/Documentation/devicetree/bindings/iio/imu/st,lsm6dsx.yaml +++ b/Documentation/devicetree/bindings/iio/imu/st,lsm6dsx.yaml @@ -32,12 +32,20 @@ properties: - st,lsm6dsrx - st,lsm6dst - st,lsm6dsop + - st,lsm6dsv + - st,lsm6dso16is - items: - const: st,asm330lhhx - const: st,lsm6dsr - items: - const: st,lsm6dstx - const: st,lsm6dst + - items: + - const: st,lsm6dsv16x + - const: st,lsm6dsv + - items: + - const: st,ism330is + - const: st,lsm6dso16is reg: maxItems: 1 diff --git a/Documentation/devicetree/bindings/iio/multiplexer/io-channel-mux.yaml b/Documentation/devicetree/bindings/iio/multiplexer/io-channel-mux.yaml index 611ad4444cf0cb9c4083e7b2014ee1fae80442d9..c55831b60ee67ff969e068780aa85bfa94ddf517 100644 --- a/Documentation/devicetree/bindings/iio/multiplexer/io-channel-mux.yaml +++ b/Documentation/devicetree/bindings/iio/multiplexer/io-channel-mux.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/iio/multiplexer/io-channel-mux.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: I/O channel multiplexer bindings +title: I/O channel multiplexer maintainers: - Peter Rosin diff --git a/Documentation/devicetree/bindings/iio/pressure/meas,ms5611.yaml b/Documentation/devicetree/bindings/iio/pressure/meas,ms5611.yaml index 4f06707450bf5cbd41c1deb7d3e3bd94337eb1c5..21e6ddb7f41e3a30883c0924d4d6ea91456b6d10 100644 --- a/Documentation/devicetree/bindings/iio/pressure/meas,ms5611.yaml +++ b/Documentation/devicetree/bindings/iio/pressure/meas,ms5611.yaml @@ -30,7 +30,10 @@ required: - compatible - reg -additionalProperties: false +allOf: + - $ref: /schemas/spi/spi-peripheral-props.yaml# + +unevaluatedProperties: false examples: - | @@ -52,6 +55,7 @@ examples: compatible = "meas,ms5611"; reg = <0>; vdd-supply = <&ldo_3v3_gnss>; + spi-max-frequency = <20000000>; }; }; ... diff --git a/Documentation/devicetree/bindings/iio/pressure/murata,zpa2326.yaml b/Documentation/devicetree/bindings/iio/pressure/murata,zpa2326.yaml index d6103be03460bb96e06ac7200e13108cf71c6645..c33640ddde5865140f9c583b477e6a17f3778a02 100644 --- a/Documentation/devicetree/bindings/iio/pressure/murata,zpa2326.yaml +++ b/Documentation/devicetree/bindings/iio/pressure/murata,zpa2326.yaml @@ -33,7 +33,10 @@ required: - compatible - reg -additionalProperties: false +allOf: + - $ref: /schemas/spi/spi-peripheral-props.yaml# + +unevaluatedProperties: false examples: - | diff --git a/Documentation/devicetree/bindings/iio/proximity/ams,as3935.yaml b/Documentation/devicetree/bindings/iio/proximity/ams,as3935.yaml index 7fcba5d6d508ecd4c579b35aebf4ddc94ec6dc65..710d3b9a86d90c13c224d24c3a8364208878c90e 100644 --- a/Documentation/devicetree/bindings/iio/proximity/ams,as3935.yaml +++ b/Documentation/devicetree/bindings/iio/proximity/ams,as3935.yaml @@ -49,7 +49,10 @@ required: - spi-cpha - interrupts -additionalProperties: false +allOf: + - $ref: /schemas/spi/spi-peripheral-props.yaml# + +unevaluatedProperties: false examples: - | diff --git a/Documentation/devicetree/bindings/iio/resolver/adi,ad2s90.yaml b/Documentation/devicetree/bindings/iio/resolver/adi,ad2s90.yaml index 81e4bdfc17c4366b0ccc9b2a62fac773292376cb..b24e5a202a48a549d05ff72e80cfe5822765065b 100644 --- a/Documentation/devicetree/bindings/iio/resolver/adi,ad2s90.yaml +++ b/Documentation/devicetree/bindings/iio/resolver/adi,ad2s90.yaml @@ -33,8 +33,6 @@ properties: spi-cpha: true -additionalProperties: false - required: - compatible - reg @@ -43,6 +41,11 @@ dependencies: spi-cpol: [ spi-cpha ] spi-cpha: [ spi-cpol ] +allOf: + - $ref: /schemas/spi/spi-peripheral-props.yaml# + +unevaluatedProperties: false + examples: - | spi { diff --git a/Documentation/devicetree/bindings/iio/temperature/adi,ltc2983.yaml b/Documentation/devicetree/bindings/iio/temperature/adi,ltc2983.yaml index 722781aa4697e46d5f4fa1ca03e48ddf7639ce39..b69813f281dadca1015b2c2a09be9d7c5524760f 100644 --- a/Documentation/devicetree/bindings/iio/temperature/adi,ltc2983.yaml +++ b/Documentation/devicetree/bindings/iio/temperature/adi,ltc2983.yaml @@ -4,19 +4,30 @@ $id: http://devicetree.org/schemas/iio/temperature/adi,ltc2983.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Analog Devices LTC2983 Multi-sensor Temperature system +title: Analog Devices LTC2983, LTC2986, LTM2985 Multi-sensor Temperature system maintainers: - Nuno Sá description: | - Analog Devices LTC2983 Multi-Sensor Digital Temperature Measurement System + Analog Devices LTC2983, LTC2984, LTC2986, LTM2985 Multi-Sensor Digital + Temperature Measurement Systems + https://www.analog.com/media/en/technical-documentation/data-sheets/2983fc.pdf + https://www.analog.com/media/en/technical-documentation/data-sheets/2984fb.pdf + https://www.analog.com/media/en/technical-documentation/data-sheets/29861fa.pdf + https://www.analog.com/media/en/technical-documentation/data-sheets/ltm2985.pdf properties: compatible: - enum: - - adi,ltc2983 + oneOf: + - enum: + - adi,ltc2983 + - adi,ltc2986 + - adi,ltm2985 + - items: + - const: adi,ltc2984 + - const: adi,ltc2983 reg: maxItems: 1 @@ -25,26 +36,26 @@ properties: maxItems: 1 adi,mux-delay-config-us: - description: - The LTC2983 performs 2 or 3 internal conversion cycles per temperature - result. Each conversion cycle is performed with different excitation and - input multiplexer configurations. Prior to each conversion, these - excitation circuits and input switch configurations are changed and an - internal 1ms delay ensures settling prior to the conversion cycle in most - cases. An extra delay can be configured using this property. The value is - rounded to nearest 100us. + description: | + Extra delay prior to each conversion, in addition to the internal 1ms + delay, for the multiplexer to switch input configurations and + excitation values. + + This property is supposed to be in microseconds, but to maintain + compatibility, this value will be multiplied by 100 before usage. maximum: 255 + default: 0 adi,filter-notch-freq: description: - Set's the default setting of the digital filter. The default is - simultaneous 50/60Hz rejection. + Notch frequency of the digital filter. 0 - 50/60Hz rejection 1 - 60Hz rejection 2 - 50Hz rejection $ref: /schemas/types.yaml#/definitions/uint32 minimum: 0 maximum: 2 + default: 0 '#address-cells': const: 1 @@ -53,19 +64,20 @@ properties: const: 0 patternProperties: - "@([1-9]|1[0-9]|20)$": + "@([0-9a-f]+)$": type: object + description: Sensor. properties: reg: description: - The channel number. It can be connected to one of the 20 channels of - the device. + Channel number. Connects the sensor to the channel with this number + of the device. minimum: 1 maximum: 20 adi,sensor-type: - description: Identifies the type of sensor connected to the device. + description: Type of sensor connected to the device. $ref: /schemas/types.yaml#/definitions/uint32 required: @@ -74,9 +86,7 @@ patternProperties: "^thermocouple@": type: object - description: - Represents a thermocouple sensor which is connected to one of the device - channels. + description: Thermocouple sensor. properties: adi,sensor-type: @@ -95,86 +105,87 @@ patternProperties: maximum: 9 adi,single-ended: - description: - Boolean property which set's the thermocouple as single-ended. + description: Whether the sensor is single-ended. type: boolean adi,sensor-oc-current-microamp: - description: - This property set's the pulsed current value applied during - open-circuit detect. + description: Pulsed current value applied during open-circuit detect. enum: [10, 100, 500, 1000] + default: 10 adi,cold-junction-handle: description: - Phandle which points to a sensor object responsible for measuring - the thermocouple cold junction temperature. - $ref: "/schemas/types.yaml#/definitions/phandle" + Sensor responsible for measuring the thermocouple cold junction + temperature. + $ref: /schemas/types.yaml#/definitions/phandle adi,custom-thermocouple: description: - This is a table, where each entry should be a pair of - voltage(mv)-temperature(K). The entries must be given in nv and uK - so that, the original values must be multiplied by 1000000. For - more details look at table 69 and 70. - Note should be signed, but dtc doesn't currently maintain the - sign. + Used for digitizing custom thermocouples. + See Page 59 of the datasheet. $ref: /schemas/types.yaml#/definitions/uint64-matrix minItems: 3 maxItems: 64 items: - minItems: 2 - maxItems: 2 + items: + - description: Voltage point in nV, signed. + - description: Temperature point in uK. + + allOf: + - if: + properties: + adi,sensor-type: + const: 9 + then: + required: + - adi,custom-thermocouple "^diode@": type: object - description: - Represents a diode sensor which is connected to one of the device - channels. + description: Diode sensor. properties: adi,sensor-type: - description: Identifies the sensor as a diode. + description: Sensor type for diodes. $ref: /schemas/types.yaml#/definitions/uint32 const: 28 adi,single-ended: - description: Boolean property which set's the diode as single-ended. + description: Whether the sensor is single-ended. type: boolean adi,three-conversion-cycles: description: - Boolean property which set's three conversion cycles removing - parasitic resistance effects between the LTC2983 and the diode. + Whether to use three conversion cycles to remove parasitic + resistance between the device and the diode. type: boolean adi,average-on: description: - Boolean property which enables a running average of the diode - temperature reading. This reduces the noise when the diode is used - as a cold junction temperature element on an isothermal block - where temperatures change slowly. + Whether to use a running average of the diode temperature + reading to reduce the noise when the diode is used as a cold + junction temperature element on an isothermal block where + temperatures change slowly. type: boolean adi,excitation-current-microamp: description: - This property controls the magnitude of the excitation current - applied to the diode. Depending on the number of conversions - cycles, this property will assume different predefined values on - each cycle. Just set the value of the first cycle (1l). + Magnitude of the 1l excitation current applied to the diode. + 4l excitation current will be 4 times this value, and 8l + excitation current will be 8 times value. enum: [10, 20, 40, 80] + default: 10 adi,ideal-factor-value: description: - This property sets the diode ideality factor. The real value must - be multiplied by 1000000 to remove the fractional part. For more - information look at table 20 of the datasheet. + Diode ideality factor. + Set this property to 1000000 times the real value. $ref: /schemas/types.yaml#/definitions/uint32 + default: 0 "^rtd@": type: object - description: - Represents a rtd sensor which is connected to one of the device channels. + description: RTD sensor. properties: reg: @@ -197,68 +208,82 @@ patternProperties: maximum: 18 adi,rsense-handle: - description: - Phandle pointing to a rsense object associated with this RTD. - $ref: "/schemas/types.yaml#/definitions/phandle" + description: Associated sense resistor sensor. + $ref: /schemas/types.yaml#/definitions/phandle adi,number-of-wires: description: - Identifies the number of wires used by the RTD. Setting this - property to 5 means 4 wires with Kelvin Rsense. + Number of wires used by the RTD. + 5 means 4 wires with Kelvin sense resistor. $ref: /schemas/types.yaml#/definitions/uint32 enum: [2, 3, 4, 5] + default: 2 adi,rsense-share: description: - Boolean property which enables Rsense sharing, where one sense - resistor is used for multiple 2-, 3-, and/or 4-wire RTDs. - type: boolean - - adi,current-rotate: - description: - Boolean property which enables excitation current rotation to - automatically remove parasitic thermocouple effects. Note that - this property is not allowed for 2- and 3-wire RTDs. + Whether to enable sense resistor sharing, where one sense + resistor is used by multiple sensors. type: boolean adi,excitation-current-microamp: - description: - This property controls the magnitude of the excitation current - applied to the RTD. + description: Excitation current applied to the RTD. enum: [5, 10, 25, 50, 100, 250, 500, 1000] + default: 5 adi,rtd-curve: - description: - This property set the RTD curve used and the corresponding - Callendar-VanDusen constants. Look at table 30 of the datasheet. + description: | + RTD curve and the corresponding Callendar-VanDusen constants. + 0 - European + 1 - American + 2 - Japanese + 3 - ITS-90 $ref: /schemas/types.yaml#/definitions/uint32 minimum: 0 maximum: 3 + default: 0 adi,custom-rtd: description: - This is a table, where each entry should be a pair of - resistance(ohm)-temperature(K). The entries added here are in uohm - and uK. For more details values look at table 74 and 75. + Used for digitizing custom RTDs. + See Page 62 of the datasheet. $ref: /schemas/types.yaml#/definitions/uint64-matrix + minItems: 3 + maxItems: 64 items: - minItems: 3 - maxItems: 64 items: - minItems: 2 - maxItems: 2 + - description: Resistance point in uOhms. + - description: Temperature point in uK. required: - adi,rsense-handle - dependencies: - adi,current-rotate: [ "adi,rsense-share" ] + allOf: + - if: + properties: + adi,number-of-wires: + const: 4 + then: + properties: + adi,current-rotate: + description: + Whether to enable excitation current rotation to automatically + remove parasitic thermocouple effects. + type: boolean + + dependencies: + adi,current-rotate: [ "adi,rsense-share" ] + + - if: + properties: + adi,sensor-type: + const: 18 + then: + required: + - adi,custom-rtd "^thermistor@": type: object - description: - Represents a thermistor sensor which is connected to one of the device - channels. + description: Thermistor sensor. properties: adi,sensor-type: @@ -277,61 +302,53 @@ patternProperties: maximum: 27 adi,rsense-handle: - description: - Phandle pointing to a rsense object associated with this - thermistor. - $ref: "/schemas/types.yaml#/definitions/phandle" + description: Associated sense resistor sensor. + $ref: /schemas/types.yaml#/definitions/phandle adi,single-ended: - description: - Boolean property which set's the thermistor as single-ended. + description: Whether the sensor is single-ended. type: boolean adi,rsense-share: description: - Boolean property which enables Rsense sharing, where one sense - resistor is used for multiple thermistors. Note that this property - is ignored if adi,single-ended is set. + Whether to enable sense resistor sharing, where one sense + resistor is used by multiple sensors. type: boolean adi,current-rotate: description: - Boolean property which enables excitation current rotation to - automatically remove parasitic thermocouple effects. + Whether to enable excitation current rotation to automatically + remove parasitic thermocouple effects. type: boolean adi,excitation-current-nanoamp: description: - This property controls the magnitude of the excitation current - applied to the thermistor. Value 0 set's the sensor in auto-range - mode. - $ref: /schemas/types.yaml#/definitions/uint32 + Excitation current applied to the thermistor. + 0 sets the sensor in auto-range mode. enum: [0, 250, 500, 1000, 5000, 10000, 25000, 50000, 100000, 250000, 500000, 1000000] + default: 0 adi,custom-thermistor: description: - This is a table, where each entry should be a pair of - resistance(ohm)-temperature(K). The entries added here are in uohm - and uK only for custom thermistors. For more details look at table - 78 and 79. + Used for digitizing custom thermistors. + See Page 65 of the datasheet. $ref: /schemas/types.yaml#/definitions/uint64-matrix minItems: 3 maxItems: 64 items: - minItems: 2 - maxItems: 2 + items: + - description: Resistance point in uOhms. + - description: Temperature point in uK. adi,custom-steinhart: description: - Steinhart-Hart coefficients are also supported and can - be programmed into the device memory using this property. For - Steinhart sensors the coefficients are given in the raw - format. Look at table 82 for more information. + Steinhart-Hart coefficients in raw format, used for digitizing + custom thermistors. + See Page 68 of the datasheet. $ref: /schemas/types.yaml#/definitions/uint32-array - items: - minItems: 6 - maxItems: 6 + minItems: 6 + maxItems: 6 required: - adi,rsense-handle @@ -339,25 +356,78 @@ patternProperties: dependencies: adi,current-rotate: [ "adi,rsense-share" ] + allOf: + - if: + properties: + adi,sensor-type: + const: 26 + then: + properties: + adi,excitation-current-nanoamp: + enum: [250, 500, 1000, 5000, 10000, 25000, 50000, 100000, + 250000, 500000, 1000000] + default: 1000 + required: + - adi,custom-steinhart + - if: + properties: + adi,sensor-type: + const: 27 + then: + properties: + adi,excitation-current-nanoamp: + enum: [250, 500, 1000, 5000, 10000, 25000, 50000, 100000, + 250000, 500000, 1000000] + default: 1000 + required: + - adi,custom-thermistor + "^adc@": type: object - description: Represents a channel which is being used as a direct adc. + description: Direct ADC sensor. properties: adi,sensor-type: - description: Identifies the sensor as a direct adc. + description: Sensor type for direct ADC sensors. $ref: /schemas/types.yaml#/definitions/uint32 const: 30 adi,single-ended: - description: Boolean property which set's the adc as single-ended. + description: Whether the sensor is single-ended. + type: boolean + + "^temp@": + type: object + description: Active analog temperature sensor. + + properties: + adi,sensor-type: + description: Sensor type for active analog temperature sensors. + $ref: /schemas/types.yaml#/definitions/uint32 + const: 31 + + adi,single-ended: + description: Whether the sensor is single-ended. type: boolean + adi,custom-temp: + description: + Used for digitizing active analog temperature sensors. + See Page 67 of the LTM2985 datasheet. + $ref: /schemas/types.yaml#/definitions/uint64-matrix + minItems: 3 + maxItems: 64 + items: + items: + - description: Voltage point in nV, signed. + - description: Temperature point in uK. + + required: + - adi,custom-temp + "^rsense@": type: object - description: - Represents a rsense which is connected to one of the device channels. - Rsense are used by thermistors and RTD's. + description: Sense resistor sensor. properties: reg: @@ -365,14 +435,12 @@ patternProperties: maximum: 20 adi,sensor-type: - description: Identifies the sensor as a rsense. + description: Sensor type sense resistor sensors. $ref: /schemas/types.yaml#/definitions/uint32 const: 29 adi,rsense-val-milli-ohms: - description: - Sets the value of the sense resistor. Look at table 20 of the - datasheet for information. + description: Value of the sense resistor. required: - adi,rsense-val-milli-ohms @@ -384,6 +452,18 @@ required: additionalProperties: false +allOf: + - if: + properties: + compatible: + contains: + enum: + - adi,ltc2983 + - adi,ltc2984 + then: + patternProperties: + "^temp@": false + examples: - | #include @@ -391,7 +471,7 @@ examples: #address-cells = <1>; #size-cells = <0>; - sensor_ltc2983: ltc2983@0 { + temperature-sensor@0 { compatible = "adi,ltc2983"; reg = <0>; diff --git a/Documentation/devicetree/bindings/input/fsl,scu-key.yaml b/Documentation/devicetree/bindings/input/fsl,scu-key.yaml index e6266d1882665ce0e863bbbf9676f79e5cbc9eeb..e5a3c355ee1f05a62faa181b45f94146d0d26c98 100644 --- a/Documentation/devicetree/bindings/input/fsl,scu-key.yaml +++ b/Documentation/devicetree/bindings/input/fsl,scu-key.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/input/fsl,scu-key.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: i.MX SCU Client Device Node - SCU key bindings based on SCU Message Protocol +title: i.MX SCU Client Device Node - SCU Key Based on SCU Message Protocol maintainers: - Dong Aisheng diff --git a/Documentation/devicetree/bindings/input/gpio-keys.yaml b/Documentation/devicetree/bindings/input/gpio-keys.yaml index 17ac9dff7972089621b14fb3f537ff94ad376e9b..159cd9d9fe573c7315cc5066409dae6c8c478e94 100644 --- a/Documentation/devicetree/bindings/input/gpio-keys.yaml +++ b/Documentation/devicetree/bindings/input/gpio-keys.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/input/gpio-keys.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Device-Tree bindings for GPIO attached keys +title: GPIO attached keys maintainers: - Rob Herring diff --git a/Documentation/devicetree/bindings/input/input.yaml b/Documentation/devicetree/bindings/input/input.yaml index 17512f4347fdd2cefac70ea1c0db01f1a81a9370..94f7942189e8c469e71f8b4522435d4880190454 100644 --- a/Documentation/devicetree/bindings/input/input.yaml +++ b/Documentation/devicetree/bindings/input/input.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/input/input.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Common input schema binding +title: Input Devices Common Properties maintainers: - Dmitry Torokhov diff --git a/Documentation/devicetree/bindings/input/matrix-keymap.yaml b/Documentation/devicetree/bindings/input/matrix-keymap.yaml index 6699d5e32dcafe111af27dc0a4899b5d0d12c066..4d6dbe91646dc8f29a9c9504da8246ebf03fc84c 100644 --- a/Documentation/devicetree/bindings/input/matrix-keymap.yaml +++ b/Documentation/devicetree/bindings/input/matrix-keymap.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/input/matrix-keymap.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Common key matrices binding for matrix-connected key boards +title: Common Key Matrices on Matrix-connected Key Boards maintainers: - Olof Johansson diff --git a/Documentation/devicetree/bindings/input/microchip,cap11xx.yaml b/Documentation/devicetree/bindings/input/microchip,cap11xx.yaml index 96358b12f9b203641c8772394af78901320241e7..67d4d8f86a2d8b2770b3d3787f043457245e057d 100644 --- a/Documentation/devicetree/bindings/input/microchip,cap11xx.yaml +++ b/Documentation/devicetree/bindings/input/microchip,cap11xx.yaml @@ -4,7 +4,7 @@ $id: "http://devicetree.org/schemas/input/microchip,cap11xx.yaml#" $schema: "http://devicetree.org/meta-schemas/core.yaml#" -title: Device tree bindings for Microchip CAP11xx based capacitive touch sensors +title: Microchip CAP11xx based capacitive touch sensors description: | The Microchip CAP1xxx Family of RightTouchTM multiple-channel capacitive diff --git a/Documentation/devicetree/bindings/input/pine64,pinephone-keyboard.yaml b/Documentation/devicetree/bindings/input/pine64,pinephone-keyboard.yaml index e4a0ac0fff9a7c35a796e2be1e04805384745599..490f6c3d9e4be6cf624b9dca29617375333c08fb 100644 --- a/Documentation/devicetree/bindings/input/pine64,pinephone-keyboard.yaml +++ b/Documentation/devicetree/bindings/input/pine64,pinephone-keyboard.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/input/pine64,pinephone-keyboard.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Pine64 PinePhone keyboard device tree bindings +title: Pine64 PinePhone keyboard maintainers: - Samuel Holland diff --git a/Documentation/devicetree/bindings/input/touchscreen/chipone,icn8318.yaml b/Documentation/devicetree/bindings/input/touchscreen/chipone,icn8318.yaml index 9df685bdc5db175f29b657e3270ca86dbda7f48d..74a8a01e074504bdd22d087f301b573605e8e745 100644 --- a/Documentation/devicetree/bindings/input/touchscreen/chipone,icn8318.yaml +++ b/Documentation/devicetree/bindings/input/touchscreen/chipone,icn8318.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/input/touchscreen/chipone,icn8318.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: ChipOne ICN8318 Touchscreen Controller Device Tree Bindings +title: ChipOne ICN8318 Touchscreen Controller maintainers: - Dmitry Torokhov diff --git a/Documentation/devicetree/bindings/input/touchscreen/cypress,cy8ctma140.yaml b/Documentation/devicetree/bindings/input/touchscreen/cypress,cy8ctma140.yaml index 3225c8d1fdaf002c35ea43bd3b6a0d68967672bc..86a6d18f952a0c12e810d9e80c94e78df9a47d6e 100644 --- a/Documentation/devicetree/bindings/input/touchscreen/cypress,cy8ctma140.yaml +++ b/Documentation/devicetree/bindings/input/touchscreen/cypress,cy8ctma140.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/input/touchscreen/cypress,cy8ctma140.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Cypress CY8CTMA140 series touchscreen controller bindings +title: Cypress CY8CTMA140 series touchscreen controller maintainers: - Linus Walleij diff --git a/Documentation/devicetree/bindings/input/touchscreen/cypress,cy8ctma340.yaml b/Documentation/devicetree/bindings/input/touchscreen/cypress,cy8ctma340.yaml index 762e56ee90cd2a5537ccc113adce9eed978e1dde..4dfbb93678b564377e79f271a47817f435c3f558 100644 --- a/Documentation/devicetree/bindings/input/touchscreen/cypress,cy8ctma340.yaml +++ b/Documentation/devicetree/bindings/input/touchscreen/cypress,cy8ctma340.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/input/touchscreen/cypress,cy8ctma340.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Cypress CY8CTMA340 series touchscreen controller bindings +title: Cypress CY8CTMA340 series touchscreen controller description: The Cypress CY8CTMA340 series (also known as "CYTTSP" after the marketing name Cypress TrueTouch Standard Product) touchscreens can diff --git a/Documentation/devicetree/bindings/input/touchscreen/edt-ft5x06.yaml b/Documentation/devicetree/bindings/input/touchscreen/edt-ft5x06.yaml index 46bc8c028fe69e03380f87a008e6de060616133b..ef4c841387bdd480c856eb729812b7b0372df27b 100644 --- a/Documentation/devicetree/bindings/input/touchscreen/edt-ft5x06.yaml +++ b/Documentation/devicetree/bindings/input/touchscreen/edt-ft5x06.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/input/touchscreen/edt-ft5x06.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: FocalTech EDT-FT5x06 Polytouch Bindings +title: FocalTech EDT-FT5x06 Polytouch description: | There are 5 variants of the chip for various touch panel sizes diff --git a/Documentation/devicetree/bindings/input/touchscreen/goodix.yaml b/Documentation/devicetree/bindings/input/touchscreen/goodix.yaml index 19ac9da421df3356ef19ffaa7bbf53f5d4126e63..3d016b87c8df8684aaee0779e11f8aa930a158a9 100644 --- a/Documentation/devicetree/bindings/input/touchscreen/goodix.yaml +++ b/Documentation/devicetree/bindings/input/touchscreen/goodix.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/input/touchscreen/goodix.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Goodix GT9xx series touchscreen controller Bindings +title: Goodix GT9xx series touchscreen controller maintainers: - Dmitry Torokhov diff --git a/Documentation/devicetree/bindings/input/touchscreen/himax,hx83112b.yaml b/Documentation/devicetree/bindings/input/touchscreen/himax,hx83112b.yaml index be2ba185c086bd3b0a792f3ca8ba6435dc33e06a..f42b23d532eb00824da1ca395d85d8ba14f08a4f 100644 --- a/Documentation/devicetree/bindings/input/touchscreen/himax,hx83112b.yaml +++ b/Documentation/devicetree/bindings/input/touchscreen/himax,hx83112b.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/input/touchscreen/himax,hx83112b.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Himax hx83112b touchscreen controller bindings +title: Himax hx83112b touchscreen controller maintainers: - Job Noorman diff --git a/Documentation/devicetree/bindings/input/touchscreen/hycon,hy46xx.yaml b/Documentation/devicetree/bindings/input/touchscreen/hycon,hy46xx.yaml index 942562f1e45b2498cfa63adb3e3483e7b3f1b4b2..874c0781c47693a036ef7ee97219112b21ac4b86 100644 --- a/Documentation/devicetree/bindings/input/touchscreen/hycon,hy46xx.yaml +++ b/Documentation/devicetree/bindings/input/touchscreen/hycon,hy46xx.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/input/touchscreen/hycon,hy46xx.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Hycon HY46XX series touchscreen controller bindings +title: Hycon HY46XX series touchscreen controller description: | There are 6 variants of the chip for various touch panel sizes and cover lens material diff --git a/Documentation/devicetree/bindings/input/touchscreen/imagis,ist3038c.yaml b/Documentation/devicetree/bindings/input/touchscreen/imagis,ist3038c.yaml index e3a2b871e50c6b2b3df53f546b9c09d9ffd35899..0d6b033fd5fbc2508206ac7dd0bcf0201704c9c2 100644 --- a/Documentation/devicetree/bindings/input/touchscreen/imagis,ist3038c.yaml +++ b/Documentation/devicetree/bindings/input/touchscreen/imagis,ist3038c.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/input/touchscreen/imagis,ist3038c.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Imagis IST30XXC family touchscreen controller bindings +title: Imagis IST30XXC family touchscreen controller maintainers: - Markuss Broks diff --git a/Documentation/devicetree/bindings/input/touchscreen/melfas,mms114.yaml b/Documentation/devicetree/bindings/input/touchscreen/melfas,mms114.yaml index 62366886fb3e17cd6e186aea2dcee49b072bda9a..fdd02898e2492543bcf3233c31f5feba558843a5 100644 --- a/Documentation/devicetree/bindings/input/touchscreen/melfas,mms114.yaml +++ b/Documentation/devicetree/bindings/input/touchscreen/melfas,mms114.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/input/touchscreen/melfas,mms114.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Melfas MMS114 family touchscreen controller bindings +title: Melfas MMS114 family touchscreen controller maintainers: - Linus Walleij diff --git a/Documentation/devicetree/bindings/input/touchscreen/mstar,msg2638.yaml b/Documentation/devicetree/bindings/input/touchscreen/mstar,msg2638.yaml index af4f954de958d16bf93fa1bfe2a522d5504b2038..ddbbc820c7e556eb8f74f6ba9c3fa4502706978c 100644 --- a/Documentation/devicetree/bindings/input/touchscreen/mstar,msg2638.yaml +++ b/Documentation/devicetree/bindings/input/touchscreen/mstar,msg2638.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/input/touchscreen/mstar,msg2638.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: MStar msg2638 touchscreen controller Bindings +title: MStar msg2638 touchscreen controller maintainers: - Vincent Knecht diff --git a/Documentation/devicetree/bindings/input/touchscreen/pixcir,pixcir_ts.yaml b/Documentation/devicetree/bindings/input/touchscreen/pixcir,pixcir_ts.yaml index f9998edbff7027444b144a9847d39531de0e6198..3305eda5ed88daa326565601ee38a1fc9b3dce7a 100644 --- a/Documentation/devicetree/bindings/input/touchscreen/pixcir,pixcir_ts.yaml +++ b/Documentation/devicetree/bindings/input/touchscreen/pixcir,pixcir_ts.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/input/touchscreen/pixcir,pixcir_ts.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Pixcir Touchscreen Controller Device Tree Bindings +title: Pixcir Touchscreen Controller maintainers: - Dmitry Torokhov diff --git a/Documentation/devicetree/bindings/input/touchscreen/silead,gsl1680.yaml b/Documentation/devicetree/bindings/input/touchscreen/silead,gsl1680.yaml index eec6f7f6f0a3aadecc193983be6add8408e2f827..95b554be25b4076ad4615f919e5b76b55c62fbde 100644 --- a/Documentation/devicetree/bindings/input/touchscreen/silead,gsl1680.yaml +++ b/Documentation/devicetree/bindings/input/touchscreen/silead,gsl1680.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/input/touchscreen/silead,gsl1680.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Silead GSL1680 Touchscreen Controller Device Tree Bindings +title: Silead GSL1680 Touchscreen Controller maintainers: - Dmitry Torokhov diff --git a/Documentation/devicetree/bindings/input/touchscreen/ti,tsc2005.yaml b/Documentation/devicetree/bindings/input/touchscreen/ti,tsc2005.yaml index 938aab016cc269159b75bcf8c45a4c679acd4875..7187c390b2f574012c09a3ef16b05886110b4cdd 100644 --- a/Documentation/devicetree/bindings/input/touchscreen/ti,tsc2005.yaml +++ b/Documentation/devicetree/bindings/input/touchscreen/ti,tsc2005.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/input/touchscreen/ti,tsc2005.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Texas Instruments TSC2004 and TSC2005 touchscreen controller bindings +title: Texas Instruments TSC2004 and TSC2005 touchscreen controller maintainers: - Marek Vasut diff --git a/Documentation/devicetree/bindings/input/touchscreen/touchscreen.yaml b/Documentation/devicetree/bindings/input/touchscreen/touchscreen.yaml index 4b5b212c772c42162119e0fad7d439cbbe5adae2..895592da962634437a8f22519f15827e7f328923 100644 --- a/Documentation/devicetree/bindings/input/touchscreen/touchscreen.yaml +++ b/Documentation/devicetree/bindings/input/touchscreen/touchscreen.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/input/touchscreen/touchscreen.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Common touchscreen Bindings +title: Common touchscreen maintainers: - Dmitry Torokhov diff --git a/Documentation/devicetree/bindings/input/touchscreen/zinitix,bt400.yaml b/Documentation/devicetree/bindings/input/touchscreen/zinitix,bt400.yaml index b4e5ba7c0b49ec8ec5dcee994c7285432ff72842..b1507463a03e99574fd8a8304108d63fde0f0714 100644 --- a/Documentation/devicetree/bindings/input/touchscreen/zinitix,bt400.yaml +++ b/Documentation/devicetree/bindings/input/touchscreen/zinitix,bt400.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/input/touchscreen/zinitix,bt400.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Zinitix BT4xx and BT5xx series touchscreen controller bindings +title: Zinitix BT4xx and BT5xx series touchscreen controller description: The Zinitix BT4xx and BT5xx series of touchscreen controllers are Korea-produced touchscreens with embedded microcontrollers. The diff --git a/Documentation/devicetree/bindings/interconnect/qcom,msm8998-bwmon.yaml b/Documentation/devicetree/bindings/interconnect/qcom,msm8998-bwmon.yaml index be29e0b80995e0a6ea7d0cbea61055d6a028b4d1..0c720dbde36e1aef78dd92a62382f869b5e4dc79 100644 --- a/Documentation/devicetree/bindings/interconnect/qcom,msm8998-bwmon.yaml +++ b/Documentation/devicetree/bindings/interconnect/qcom,msm8998-bwmon.yaml @@ -25,9 +25,14 @@ properties: - items: - enum: - qcom,sc7280-cpu-bwmon + - qcom,sc8280xp-cpu-bwmon - qcom,sdm845-bwmon - const: qcom,msm8998-bwmon - const: qcom,msm8998-bwmon # BWMON v4 + - items: + - enum: + - qcom,sc8280xp-llcc-bwmon + - const: qcom,sc7280-llcc-bwmon - const: qcom,sc7280-llcc-bwmon # BWMON v5 - const: qcom,sdm845-llcc-bwmon # BWMON v5 diff --git a/Documentation/devicetree/bindings/interconnect/qcom,osm-l3.yaml b/Documentation/devicetree/bindings/interconnect/qcom,osm-l3.yaml index bf538c0c5a811386cc33aac7093a5f07c2f7106c..aadae4424ba9537e96b0cf4750d8e07dee1838dd 100644 --- a/Documentation/devicetree/bindings/interconnect/qcom,osm-l3.yaml +++ b/Documentation/devicetree/bindings/interconnect/qcom,osm-l3.yaml @@ -16,13 +16,21 @@ description: properties: compatible: - enum: - - qcom,sc7180-osm-l3 - - qcom,sc7280-epss-l3 - - qcom,sc8180x-osm-l3 - - qcom,sdm845-osm-l3 - - qcom,sm8150-osm-l3 - - qcom,sm8250-epss-l3 + oneOf: + - items: + - enum: + - qcom,sc7180-osm-l3 + - qcom,sc8180x-osm-l3 + - qcom,sdm845-osm-l3 + - qcom,sm8150-osm-l3 + - const: qcom,osm-l3 + - items: + - enum: + - qcom,sc7280-epss-l3 + - qcom,sc8280xp-epss-l3 + - qcom,sm8250-epss-l3 + - qcom,sm8350-epss-l3 + - const: qcom,epss-l3 reg: maxItems: 1 @@ -56,7 +64,7 @@ examples: #define RPMH_CXO_CLK 0 osm_l3: interconnect@17d41000 { - compatible = "qcom,sdm845-osm-l3"; + compatible = "qcom,sdm845-osm-l3", "qcom,osm-l3"; reg = <0x17d41000 0x1400>; clocks = <&rpmhcc RPMH_CXO_CLK>, <&gcc GPLL0>; diff --git a/Documentation/devicetree/bindings/interrupt-controller/ingenic,intc.yaml b/Documentation/devicetree/bindings/interrupt-controller/ingenic,intc.yaml index 0358a7739c8e273b6ab8e95bb9b410df6aff2380..609308a5f91d84a643f93d4d8941346665ebb33f 100644 --- a/Documentation/devicetree/bindings/interrupt-controller/ingenic,intc.yaml +++ b/Documentation/devicetree/bindings/interrupt-controller/ingenic,intc.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/interrupt-controller/ingenic,intc.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Ingenic SoCs interrupt controller devicetree bindings +title: Ingenic SoCs interrupt controller maintainers: - Paul Cercueil diff --git a/Documentation/devicetree/bindings/interrupt-controller/mrvl,intc.yaml b/Documentation/devicetree/bindings/interrupt-controller/mrvl,intc.yaml index 5a583bf3dbc105a010a3ba1f166c961a536919b0..9acc21028413eb5d4766bce0b1ff74d5dd3f194d 100644 --- a/Documentation/devicetree/bindings/interrupt-controller/mrvl,intc.yaml +++ b/Documentation/devicetree/bindings/interrupt-controller/mrvl,intc.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/interrupt-controller/mrvl,intc.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Marvell MMP/Orion Interrupt controller bindings +title: Marvell MMP/Orion Interrupt controller maintainers: - Andrew Lunn diff --git a/Documentation/devicetree/bindings/interrupt-controller/nuvoton,wpcm450-aic.yaml b/Documentation/devicetree/bindings/interrupt-controller/nuvoton,wpcm450-aic.yaml index 9ce6804bdb9996f915fd914f804a798a197e0c7f..2d6307a383ad4556ec679a029acf1db16d9004d0 100644 --- a/Documentation/devicetree/bindings/interrupt-controller/nuvoton,wpcm450-aic.yaml +++ b/Documentation/devicetree/bindings/interrupt-controller/nuvoton,wpcm450-aic.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/interrupt-controller/nuvoton,wpcm450-aic.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Nuvoton WPCM450 Advanced Interrupt Controller bindings +title: Nuvoton WPCM450 Advanced Interrupt Controller maintainers: - Jonathan Neuschäfer diff --git a/Documentation/devicetree/bindings/interrupt-controller/realtek,rtl-intc.yaml b/Documentation/devicetree/bindings/interrupt-controller/realtek,rtl-intc.yaml index 13a893b18fb64aa9d15458ac93b01809b9aa07a8..fb5593724059db224232a534843845a95ecf7671 100644 --- a/Documentation/devicetree/bindings/interrupt-controller/realtek,rtl-intc.yaml +++ b/Documentation/devicetree/bindings/interrupt-controller/realtek,rtl-intc.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/interrupt-controller/realtek,rtl-intc.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Realtek RTL SoC interrupt controller devicetree bindings +title: Realtek RTL SoC interrupt controller description: Interrupt controller and router for Realtek MIPS SoCs, allowing each SoC diff --git a/Documentation/devicetree/bindings/interrupt-controller/renesas,irqc.yaml b/Documentation/devicetree/bindings/interrupt-controller/renesas,irqc.yaml index 62fd47c88275d58722df4f093f0479059ae22063..95033cb514fbd8f63ead74c0db7a5eada84cd056 100644 --- a/Documentation/devicetree/bindings/interrupt-controller/renesas,irqc.yaml +++ b/Documentation/devicetree/bindings/interrupt-controller/renesas,irqc.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/interrupt-controller/renesas,irqc.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: DT bindings for the R-Mobile/R-Car/RZ/G interrupt controller +title: R-Mobile/R-Car/RZ/G interrupt controller maintainers: - Geert Uytterhoeven diff --git a/Documentation/devicetree/bindings/iommu/arm,smmu.yaml b/Documentation/devicetree/bindings/iommu/arm,smmu.yaml index 9066e6df1ba103d4cd5f166aa1e09b24db18e75e..b28c5c2b0ff23dc34dda84ef3281c8207f06c4db 100644 --- a/Documentation/devicetree/bindings/iommu/arm,smmu.yaml +++ b/Documentation/devicetree/bindings/iommu/arm,smmu.yaml @@ -28,19 +28,50 @@ properties: - enum: - qcom,msm8996-smmu-v2 - qcom,msm8998-smmu-v2 + - qcom,sdm630-smmu-v2 - const: qcom,smmu-v2 - - description: Qcom SoCs implementing "arm,mmu-500" + - description: Qcom SoCs implementing "qcom,smmu-500" and "arm,mmu-500" items: - enum: - qcom,qcm2290-smmu-500 + - qcom,qdu1000-smmu-500 - qcom,sc7180-smmu-500 - qcom,sc7280-smmu-500 - qcom,sc8180x-smmu-500 - qcom,sc8280xp-smmu-500 + - qcom,sdm670-smmu-500 - qcom,sdm845-smmu-500 + - qcom,sm6115-smmu-500 + - qcom,sm6350-smmu-500 + - qcom,sm6375-smmu-500 + - qcom,sm8150-smmu-500 + - qcom,sm8250-smmu-500 + - qcom,sm8350-smmu-500 + - qcom,sm8450-smmu-500 + - const: qcom,smmu-500 + - const: arm,mmu-500 + + - description: Qcom SoCs implementing "arm,mmu-500" (non-qcom implementation) + deprecated: true + items: + - enum: - qcom,sdx55-smmu-500 - qcom,sdx65-smmu-500 + - const: arm,mmu-500 + + - description: Qcom SoCs implementing "arm,mmu-500" (legacy binding) + deprecated: true + items: + # Do not add additional SoC to this list. Instead use two previous lists. + - enum: + - qcom,qcm2290-smmu-500 + - qcom,sc7180-smmu-500 + - qcom,sc7280-smmu-500 + - qcom,sc8180x-smmu-500 + - qcom,sc8280xp-smmu-500 + - qcom,sdm845-smmu-500 + - qcom,sm6115-smmu-500 - qcom,sm6350-smmu-500 - qcom,sm6375-smmu-500 - qcom,sm8150-smmu-500 @@ -48,13 +79,28 @@ properties: - qcom,sm8350-smmu-500 - qcom,sm8450-smmu-500 - const: arm,mmu-500 + + - description: Qcom Adreno GPUs implementing "arm,smmu-500" + items: + - enum: + - qcom,sc7280-smmu-500 + - qcom,sm8250-smmu-500 + - const: qcom,adreno-smmu + - const: arm,mmu-500 - description: Qcom Adreno GPUs implementing "arm,smmu-v2" items: - enum: + - qcom,msm8996-smmu-v2 - qcom,sc7180-smmu-v2 + - qcom,sdm630-smmu-v2 - qcom,sdm845-smmu-v2 + - qcom,sm6350-smmu-v2 - const: qcom,adreno-smmu - const: qcom,smmu-v2 + - description: Qcom Adreno GPUs on Google Cheza platform + items: + - const: qcom,sdm845-smmu-v2 + - const: qcom,smmu-v2 - description: Marvell SoCs implementing "arm,mmu-500" items: - const: marvell,ap806-smmu-500 @@ -147,16 +193,12 @@ properties: present in such cases. clock-names: - items: - - const: bus - - const: iface + minItems: 1 + maxItems: 7 clocks: - items: - - description: bus clock required for downstream bus access and for the - smmu ptw - - description: interface clock required to access smmu's registers - through the TCU's programming interface. + minItems: 1 + maxItems: 7 power-domains: maxItems: 1 @@ -206,6 +248,124 @@ allOf: reg: maxItems: 1 + - if: + properties: + compatible: + contains: + enum: + - qcom,msm8998-smmu-v2 + - qcom,sdm630-smmu-v2 + then: + anyOf: + - properties: + clock-names: + items: + - const: bus + clocks: + items: + - description: bus clock required for downstream bus access and for + the smmu ptw + - properties: + clock-names: + items: + - const: iface + - const: mem + - const: mem_iface + clocks: + items: + - description: interface clock required to access smmu's registers + through the TCU's programming interface. + - description: bus clock required for memory access + - description: bus clock required for GPU memory access + - properties: + clock-names: + items: + - const: iface-mm + - const: iface-smmu + - const: bus-mm + - const: bus-smmu + clocks: + items: + - description: interface clock required to access mnoc's registers + through the TCU's programming interface. + - description: interface clock required to access smmu's registers + through the TCU's programming interface. + - description: bus clock required for downstream bus access + - description: bus clock required for the smmu ptw + + - if: + properties: + compatible: + contains: + enum: + - qcom,msm8996-smmu-v2 + - qcom,sc7180-smmu-v2 + - qcom,sdm845-smmu-v2 + then: + properties: + clock-names: + items: + - const: bus + - const: iface + + clocks: + items: + - description: bus clock required for downstream bus access and for + the smmu ptw + - description: interface clock required to access smmu's registers + through the TCU's programming interface. + + - if: + properties: + compatible: + contains: + const: qcom,sc7280-smmu-500 + then: + properties: + clock-names: + items: + - const: gcc_gpu_memnoc_gfx_clk + - const: gcc_gpu_snoc_dvm_gfx_clk + - const: gpu_cc_ahb_clk + - const: gpu_cc_hlos1_vote_gpu_smmu_clk + - const: gpu_cc_cx_gmu_clk + - const: gpu_cc_hub_cx_int_clk + - const: gpu_cc_hub_aon_clk + + clocks: + items: + - description: GPU memnoc_gfx clock + - description: GPU snoc_dvm_gfx clock + - description: GPU ahb clock + - description: GPU hlos1_vote_GPU smmu clock + - description: GPU cx_gmu clock + - description: GPU hub_cx_int clock + - description: GPU hub_aon clock + + - if: + properties: + compatible: + contains: + enum: + - qcom,sm6350-smmu-v2 + - qcom,sm8150-smmu-500 + - qcom,sm8250-smmu-500 + then: + properties: + clock-names: + items: + - const: ahb + - const: bus + - const: iface + + clocks: + items: + - description: bus clock required for AHB bus access + - description: bus clock required for downstream bus access and for + the smmu ptw + - description: interface clock required to access smmu's registers + through the TCU's programming interface. + examples: - |+ /* SMMU with stream matching or stream indexing */ diff --git a/Documentation/devicetree/bindings/iommu/mediatek,iommu.yaml b/Documentation/devicetree/bindings/iommu/mediatek,iommu.yaml index 839e3be0bf3ca7f7f302cff51c144dd3c9b56b18..5b6395bc10e0cc1ecd13d05ffbf5a67551dbff9b 100644 --- a/Documentation/devicetree/bindings/iommu/mediatek,iommu.yaml +++ b/Documentation/devicetree/bindings/iommu/mediatek,iommu.yaml @@ -82,6 +82,7 @@ properties: - mediatek,mt8195-iommu-vdo # generation two - mediatek,mt8195-iommu-vpp # generation two - mediatek,mt8195-iommu-infra # generation two + - mediatek,mt8365-m4u # generation two - description: mt7623 generation one items: @@ -132,6 +133,7 @@ properties: dt-binding/memory/mt8186-memory-port.h for mt8186, dt-binding/memory/mt8192-larb-port.h for mt8192. dt-binding/memory/mt8195-memory-port.h for mt8195. + dt-binding/memory/mediatek,mt8365-larb-port.h for mt8365. power-domains: maxItems: 1 diff --git a/Documentation/devicetree/bindings/ipmi/ipmi-ipmb.yaml b/Documentation/devicetree/bindings/ipmi/ipmi-ipmb.yaml index 71bc031c4fde6e10e04ca3a617e8545d6ea9a4dc..3f25cdb4e99bd0014b132f256114a8fe0ae03423 100644 --- a/Documentation/devicetree/bindings/ipmi/ipmi-ipmb.yaml +++ b/Documentation/devicetree/bindings/ipmi/ipmi-ipmb.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/ipmi/ipmi-ipmb.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: IPMI IPMB device bindings +title: IPMI IPMB device description: IPMI IPMB device bindings diff --git a/Documentation/devicetree/bindings/ipmi/ipmi-smic.yaml b/Documentation/devicetree/bindings/ipmi/ipmi-smic.yaml index 898e3267893acbb46ef80635f880d50e11063151..c1b4bf95ef99fe948fcbbcdd4292e2922be90f5b 100644 --- a/Documentation/devicetree/bindings/ipmi/ipmi-smic.yaml +++ b/Documentation/devicetree/bindings/ipmi/ipmi-smic.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/ipmi/ipmi-smic.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: IPMI device bindings +title: IPMI device description: IPMI device bindings diff --git a/Documentation/devicetree/bindings/leds/backlight/gpio-backlight.yaml b/Documentation/devicetree/bindings/leds/backlight/gpio-backlight.yaml index 3300451fcfd5e5235467f38230222c5b91eff732..584030b6b0b95edf8400eb1e2d8c110bcdfa8e9f 100644 --- a/Documentation/devicetree/bindings/leds/backlight/gpio-backlight.yaml +++ b/Documentation/devicetree/bindings/leds/backlight/gpio-backlight.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/leds/backlight/gpio-backlight.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: gpio-backlight bindings +title: gpio-backlight maintainers: - Lee Jones diff --git a/Documentation/devicetree/bindings/leds/backlight/led-backlight.yaml b/Documentation/devicetree/bindings/leds/backlight/led-backlight.yaml index 0793d0adc4ec93d891786d57597ab58ef1beb359..d7b78198abc22c6813cd03b78c7c0855bcd10142 100644 --- a/Documentation/devicetree/bindings/leds/backlight/led-backlight.yaml +++ b/Documentation/devicetree/bindings/leds/backlight/led-backlight.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/leds/backlight/led-backlight.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: led-backlight bindings +title: led-backlight maintainers: - Lee Jones diff --git a/Documentation/devicetree/bindings/leds/backlight/pwm-backlight.yaml b/Documentation/devicetree/bindings/leds/backlight/pwm-backlight.yaml index 78fbe20a175805f89e91cfe4cedb73d0f6bd4077..5ec47a8c6568b60e617bd9deacbb149ccd3447de 100644 --- a/Documentation/devicetree/bindings/leds/backlight/pwm-backlight.yaml +++ b/Documentation/devicetree/bindings/leds/backlight/pwm-backlight.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/leds/backlight/pwm-backlight.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: pwm-backlight bindings +title: pwm-backlight maintainers: - Lee Jones diff --git a/Documentation/devicetree/bindings/leds/backlight/qcom-wled.yaml b/Documentation/devicetree/bindings/leds/backlight/qcom-wled.yaml index 4c15693f7a013e648fecb834d475811d93de8781..9acdb789551474ff9ba02b05eaa79ceb14905724 100644 --- a/Documentation/devicetree/bindings/leds/backlight/qcom-wled.yaml +++ b/Documentation/devicetree/bindings/leds/backlight/qcom-wled.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/leds/backlight/qcom-wled.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Binding for Qualcomm Technologies, Inc. WLED driver +title: Qualcomm Technologies, Inc. WLED driver maintainers: - Bjorn Andersson diff --git a/Documentation/devicetree/bindings/leds/common.yaml b/Documentation/devicetree/bindings/leds/common.yaml index 3c14a98430e190afa765419a27032652f7cc01bf..f5c57a580078ea23552d65921b472f5ca2ed474b 100644 --- a/Documentation/devicetree/bindings/leds/common.yaml +++ b/Documentation/devicetree/bindings/leds/common.yaml @@ -100,6 +100,7 @@ properties: - pattern # LED is triggered by SD/MMC activity - pattern: "^mmc[0-9]+$" + - pattern: "^cpu[0-9]*$" led-pattern: description: | diff --git a/Documentation/devicetree/bindings/leds/issi,is31fl319x.yaml b/Documentation/devicetree/bindings/leds/issi,is31fl319x.yaml index 2929382625b696b4ef490ee033b8af2901e7eb2b..d1b01bae9f63e336d7253c52823a95a8a53265d7 100644 --- a/Documentation/devicetree/bindings/leds/issi,is31fl319x.yaml +++ b/Documentation/devicetree/bindings/leds/issi,is31fl319x.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/leds/issi,is31fl319x.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: ISSI LED controllers bindings for IS31FL319{0,1,3,6,9} +title: ISSI LED Controllers for IS31FL319{0,1,3,6,9} maintainers: - Vincent Knecht diff --git a/Documentation/devicetree/bindings/leds/leds-pm8058.txt b/Documentation/devicetree/bindings/leds/leds-pm8058.txt deleted file mode 100644 index 89584c49aab2b37227bca98c8765d77c247d0630..0000000000000000000000000000000000000000 --- a/Documentation/devicetree/bindings/leds/leds-pm8058.txt +++ /dev/null @@ -1,67 +0,0 @@ -Qualcomm PM8058 LED driver - -The Qualcomm PM8058 is a multi-functional device which contains -an LED driver block for up to six LEDs: three normal LEDs, two -"flash" LEDs and one "keypad backlight" LED. The names are -quoted because sometimes these LED drivers are used for wildly -different things than flash or keypad backlight: their names -are more of a suggestion than a hard-wired usecase. - -Hardware-wise the different LEDs support slightly different -output currents. The "flash" LEDs do not need to charge nor -do they support external triggers. They are just powerful LED -drivers. - -The LEDs appear as children to the PM8058 device, with the -proper compatible string. For the PM8058 bindings see: -mfd/qcom-pm8xxx.txt. - -Each LED is represented as a sub-node of the syscon device. Each -node's name represents the name of the corresponding LED. - -LED sub-node properties: - -Required properties: -- compatible: one of - "qcom,pm8058-led" (for the normal LEDs at 0x131, 0x132 and 0x133) - "qcom,pm8058-keypad-led" (for the "keypad" LED at 0x48) - "qcom,pm8058-flash-led" (for the "flash" LEDs at 0x49 and 0xFB) - -Optional properties: -- label: see Documentation/devicetree/bindings/leds/common.txt -- default-state: see Documentation/devicetree/bindings/leds/common.txt -- linux,default-trigger: see Documentation/devicetree/bindings/leds/common.txt - -Example: - -qcom,ssbi@500000 { - pmicintc: pmic@0 { - compatible = "qcom,pm8058"; - led@48 { - compatible = "qcom,pm8058-keypad-led"; - reg = <0x48>; - label = "pm8050:white:keypad"; - default-state = "off"; - }; - led@131 { - compatible = "qcom,pm8058-led"; - reg = <0x131>; - label = "pm8058:red"; - default-state = "off"; - }; - led@132 { - compatible = "qcom,pm8058-led"; - reg = <0x132>; - label = "pm8058:yellow"; - default-state = "off"; - linux,default-trigger = "mmc0"; - }; - led@133 { - compatible = "qcom,pm8058-led"; - reg = <0x133>; - label = "pm8058:green"; - default-state = "on"; - linux,default-trigger = "heartbeat"; - }; - }; -}; diff --git a/Documentation/devicetree/bindings/leds/qcom,pm8058-led.yaml b/Documentation/devicetree/bindings/leds/qcom,pm8058-led.yaml new file mode 100644 index 0000000000000000000000000000000000000000..fa03e73622d4874e48a583bd7e6d941ed3187bda --- /dev/null +++ b/Documentation/devicetree/bindings/leds/qcom,pm8058-led.yaml @@ -0,0 +1,57 @@ +# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/leds/qcom,pm8058-led.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Qualcomm PM8058 PMIC LED + +maintainers: + - Krzysztof Kozlowski + +description: | + The Qualcomm PM8058 contains an LED block for up to six LEDs:: three normal + LEDs, two "flash" LEDs and one "keypad backlight" LED. The names are quoted + because sometimes these LED drivers are used for wildly different things than + flash or keypad backlight:: their names are more of a suggestion than a + hard-wired usecase. + + Hardware-wise the different LEDs support slightly different output currents. + The "flash" LEDs do not need to charge nor do they support external triggers. + They are just powerful LED drivers. + +allOf: + - $ref: common.yaml# + +properties: + compatible: + enum: + - qcom,pm8058-led + - qcom,pm8058-keypad-led + - qcom,pm8058-flash-led + + reg: + maxItems: 1 + +required: + - compatible + - reg + +unevaluatedProperties: false + +examples: + - | + #include + + pmic { + #address-cells = <1>; + #size-cells = <0>; + + led@131 { + compatible = "qcom,pm8058-led"; + reg = <0x131>; + label = "pm8058:red"; + color = ; + default-state = "off"; + }; + }; diff --git a/Documentation/devicetree/bindings/leds/register-bit-led.yaml b/Documentation/devicetree/bindings/leds/register-bit-led.yaml index 79b8fc0f9d235cc9f544457e752b56a09e609914..ed26ec19ecbd8e1c8346d224eb96dbe74a9bdd0d 100644 --- a/Documentation/devicetree/bindings/leds/register-bit-led.yaml +++ b/Documentation/devicetree/bindings/leds/register-bit-led.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/leds/register-bit-led.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Device Tree Bindings for Register Bit LEDs +title: Register Bit LEDs maintainers: - Linus Walleij diff --git a/Documentation/devicetree/bindings/leds/regulator-led.yaml b/Documentation/devicetree/bindings/leds/regulator-led.yaml index 3e020d700c00ff22a9949caa4f669a6e6b0e2488..4ef7b96e9a086b815ee23edecf91aa9fb7b6a3bc 100644 --- a/Documentation/devicetree/bindings/leds/regulator-led.yaml +++ b/Documentation/devicetree/bindings/leds/regulator-led.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/leds/regulator-led.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Device Tree Bindings for Regulator LEDs +title: Regulator LEDs maintainers: - Linus Walleij diff --git a/Documentation/devicetree/bindings/mailbox/qcom,apcs-kpss-global.yaml b/Documentation/devicetree/bindings/mailbox/qcom,apcs-kpss-global.yaml index f24fd84b4b05c21510d66f4a9cf256a6e0b7da3b..c71b7c065b825334d438e28e3c9ea8c7c3c57135 100644 --- a/Documentation/devicetree/bindings/mailbox/qcom,apcs-kpss-global.yaml +++ b/Documentation/devicetree/bindings/mailbox/qcom,apcs-kpss-global.yaml @@ -4,7 +4,7 @@ $id: "http://devicetree.org/schemas/mailbox/qcom,apcs-kpss-global.yaml#" $schema: "http://devicetree.org/meta-schemas/core.yaml#" -title: Qualcomm APCS global block bindings +title: Qualcomm APCS global block description: This binding describes the APCS "global" block found in various Qualcomm diff --git a/Documentation/devicetree/bindings/mailbox/sprd-mailbox.yaml b/Documentation/devicetree/bindings/mailbox/sprd-mailbox.yaml index 80feba82cbd611c329893f6d7e66afdfac920eca..bdfb4a8220c546441918e40eb7b9c09c433f058b 100644 --- a/Documentation/devicetree/bindings/mailbox/sprd-mailbox.yaml +++ b/Documentation/devicetree/bindings/mailbox/sprd-mailbox.yaml @@ -4,7 +4,7 @@ $id: "http://devicetree.org/schemas/mailbox/sprd-mailbox.yaml#" $schema: "http://devicetree.org/meta-schemas/core.yaml#" -title: Spreadtrum mailbox controller bindings +title: Spreadtrum mailbox controller maintainers: - Orson Zhai diff --git a/Documentation/devicetree/bindings/mailbox/st,stm32-ipcc.yaml b/Documentation/devicetree/bindings/mailbox/st,stm32-ipcc.yaml index 2c8b47285aa3489488e848c3f5bf4fc288d30a19..0dfe05a04dd00fb573164d17998e461d4ccf77ae 100644 --- a/Documentation/devicetree/bindings/mailbox/st,stm32-ipcc.yaml +++ b/Documentation/devicetree/bindings/mailbox/st,stm32-ipcc.yaml @@ -4,7 +4,7 @@ $id: "http://devicetree.org/schemas/mailbox/st,stm32-ipcc.yaml#" $schema: "http://devicetree.org/meta-schemas/core.yaml#" -title: STMicroelectronics STM32 IPC controller bindings +title: STMicroelectronics STM32 IPC controller description: The IPCC block provides a non blocking signaling mechanism to post and diff --git a/Documentation/devicetree/bindings/media/allwinner,sun6i-a31-isp.yaml b/Documentation/devicetree/bindings/media/allwinner,sun6i-a31-isp.yaml index 6bda4f2b94c2e18188ea21d8a51e51704024efd6..a61a76bb611cd96695e5e39418574e69fd838b67 100644 --- a/Documentation/devicetree/bindings/media/allwinner,sun6i-a31-isp.yaml +++ b/Documentation/devicetree/bindings/media/allwinner,sun6i-a31-isp.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/media/allwinner,sun6i-a31-isp.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Allwinner A31 Image Signal Processor Driver (ISP) Device Tree Bindings +title: Allwinner A31 Image Signal Processor Driver (ISP) maintainers: - Paul Kocialkowski diff --git a/Documentation/devicetree/bindings/media/i2c/dongwoon,dw9768.yaml b/Documentation/devicetree/bindings/media/i2c/dongwoon,dw9768.yaml index 21864ab86ec4647ce0134d9aee17b90b781cd2fe..82d3d18c16a18c10c8e1ea535b10f053c2050f80 100644 --- a/Documentation/devicetree/bindings/media/i2c/dongwoon,dw9768.yaml +++ b/Documentation/devicetree/bindings/media/i2c/dongwoon,dw9768.yaml @@ -5,7 +5,7 @@ $id: http://devicetree.org/schemas/media/i2c/dongwoon,dw9768.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Dongwoon Anatech DW9768 Voice Coil Motor (VCM) Lens Device Tree Bindings +title: Dongwoon Anatech DW9768 Voice Coil Motor (VCM) Lens maintainers: - Dongchun Zhu diff --git a/Documentation/devicetree/bindings/media/i2c/ov8856.yaml b/Documentation/devicetree/bindings/media/i2c/ov8856.yaml index baf92aaaf04984659f5eaf9c55c3df06ff055f31..e17288d579812018a26b1fb79698158b68ebf316 100644 --- a/Documentation/devicetree/bindings/media/i2c/ov8856.yaml +++ b/Documentation/devicetree/bindings/media/i2c/ov8856.yaml @@ -5,7 +5,7 @@ $id: http://devicetree.org/schemas/media/i2c/ov8856.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Omnivision OV8856 CMOS Sensor Device Tree Bindings +title: Omnivision OV8856 CMOS Sensor maintainers: - Dongchun Zhu diff --git a/Documentation/devicetree/bindings/media/i2c/ovti,ov02a10.yaml b/Documentation/devicetree/bindings/media/i2c/ovti,ov02a10.yaml index 63a040944f3dc8eae2bc07be25fd5b74452e7b61..54df9d73dc86f39682bce33351e07e5e659749c3 100644 --- a/Documentation/devicetree/bindings/media/i2c/ovti,ov02a10.yaml +++ b/Documentation/devicetree/bindings/media/i2c/ovti,ov02a10.yaml @@ -5,7 +5,7 @@ $id: http://devicetree.org/schemas/media/i2c/ovti,ov02a10.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Omnivision OV02A10 CMOS Sensor Device Tree Bindings +title: Omnivision OV02A10 CMOS Sensor maintainers: - Dongchun Zhu diff --git a/Documentation/devicetree/bindings/media/i2c/ovti,ov5640.yaml b/Documentation/devicetree/bindings/media/i2c/ovti,ov5640.yaml index 540fd69ac39f137d92cc903416c6e325c9958d04..a621032f9bd078cc06dad6014f10fa96da0f9f1b 100644 --- a/Documentation/devicetree/bindings/media/i2c/ovti,ov5640.yaml +++ b/Documentation/devicetree/bindings/media/i2c/ovti,ov5640.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/media/i2c/ovti,ov5640.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: OmniVision OV5640 Image Sensor Device Tree Bindings +title: OmniVision OV5640 Image Sensor maintainers: - Steve Longerbeam diff --git a/Documentation/devicetree/bindings/media/i2c/ovti,ov5645.yaml b/Documentation/devicetree/bindings/media/i2c/ovti,ov5645.yaml index 52c6281a66847334daeff1ffb61ba4ee2a6462c2..bc9b27afe3ea3319fbabbda0de318c5ed3adc2bd 100644 --- a/Documentation/devicetree/bindings/media/i2c/ovti,ov5645.yaml +++ b/Documentation/devicetree/bindings/media/i2c/ovti,ov5645.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/media/i2c/ovti,ov5645.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: OmniVision OV5645 Image Sensor Device Tree Bindings +title: OmniVision OV5645 Image Sensor maintainers: - Lad Prabhakar diff --git a/Documentation/devicetree/bindings/media/i2c/ovti,ov5648.yaml b/Documentation/devicetree/bindings/media/i2c/ovti,ov5648.yaml index 246dc5fec716c897e1ac587985b791dfa1d2ef34..61e4e9cf87832d07eab18865fc7f54fa1893ecb2 100644 --- a/Documentation/devicetree/bindings/media/i2c/ovti,ov5648.yaml +++ b/Documentation/devicetree/bindings/media/i2c/ovti,ov5648.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/media/i2c/ovti,ov5648.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: OmniVision OV5648 Image Sensor Device Tree Bindings +title: OmniVision OV5648 Image Sensor maintainers: - Paul Kocialkowski diff --git a/Documentation/devicetree/bindings/media/i2c/ovti,ov8865.yaml b/Documentation/devicetree/bindings/media/i2c/ovti,ov8865.yaml index b962863e4f65708dc6228e8c37196689fb3723d7..6bac326dceafd78c50eca5723dd982018579fe4c 100644 --- a/Documentation/devicetree/bindings/media/i2c/ovti,ov8865.yaml +++ b/Documentation/devicetree/bindings/media/i2c/ovti,ov8865.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/media/i2c/ovti,ov8865.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: OmniVision OV8865 Image Sensor Device Tree Bindings +title: OmniVision OV8865 Image Sensor maintainers: - Paul Kocialkowski diff --git a/Documentation/devicetree/bindings/media/i2c/st,st-vgxy61.yaml b/Documentation/devicetree/bindings/media/i2c/st,st-vgxy61.yaml index 6597e1d0e65fd20cd53c6b0866e1467de310b0f9..8c28848b226a03074a8353b0382d30fbeafbb739 100644 --- a/Documentation/devicetree/bindings/media/i2c/st,st-vgxy61.yaml +++ b/Documentation/devicetree/bindings/media/i2c/st,st-vgxy61.yaml @@ -5,7 +5,7 @@ $id: http://devicetree.org/schemas/media/i2c/st,st-vgxy61.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: STMicroelectronics VGxy61 HDR Global Shutter Sensor Family Device Tree Bindings +title: STMicroelectronics VGxy61 HDR Global Shutter Sensor Family maintainers: - Benjamin Mugnier diff --git a/Documentation/devicetree/bindings/media/marvell,mmp2-ccic.yaml b/Documentation/devicetree/bindings/media/marvell,mmp2-ccic.yaml index 0e3478551e137df4056de0097449075971c1546d..de3e483f146a044eb27f63020bfb48f279725c7f 100644 --- a/Documentation/devicetree/bindings/media/marvell,mmp2-ccic.yaml +++ b/Documentation/devicetree/bindings/media/marvell,mmp2-ccic.yaml @@ -5,7 +5,7 @@ $id: http://devicetree.org/schemas/media/marvell,mmp2-ccic.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Marvell MMP2 camera host interface bindings +title: Marvell MMP2 camera host interface maintainers: - Lubomir Rintel diff --git a/Documentation/devicetree/bindings/media/renesas,ceu.yaml b/Documentation/devicetree/bindings/media/renesas,ceu.yaml index 50e0740af15a7a602033188f23817fa95237f47d..d527fc42c3fd10220ed01b694c78e86a2a191a40 100644 --- a/Documentation/devicetree/bindings/media/renesas,ceu.yaml +++ b/Documentation/devicetree/bindings/media/renesas,ceu.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/media/renesas,ceu.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Renesas Capture Engine Unit (CEU) Bindings +title: Renesas Capture Engine Unit (CEU) maintainers: - Jacopo Mondi diff --git a/Documentation/devicetree/bindings/media/st,stm32-cec.yaml b/Documentation/devicetree/bindings/media/st,stm32-cec.yaml index 77144cc6f7db9b04a1602989b74ccdd4a6eb64d0..7f545a587a3923028052a6a6de999e80b53cf8a5 100644 --- a/Documentation/devicetree/bindings/media/st,stm32-cec.yaml +++ b/Documentation/devicetree/bindings/media/st,stm32-cec.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/media/st,stm32-cec.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: STMicroelectronics STM32 CEC bindings +title: STMicroelectronics STM32 CEC maintainers: - Yannick Fertre diff --git a/Documentation/devicetree/bindings/media/st,stm32-dcmi.yaml b/Documentation/devicetree/bindings/media/st,stm32-dcmi.yaml index e80fcdf280f0bb20425712f3d8e7ae56bb28e54b..6b3e413cedb259ef71f7726ad1bb84c3068cbdca 100644 --- a/Documentation/devicetree/bindings/media/st,stm32-dcmi.yaml +++ b/Documentation/devicetree/bindings/media/st,stm32-dcmi.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/media/st,stm32-dcmi.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: STMicroelectronics STM32 Digital Camera Memory Interface (DCMI) binding +title: STMicroelectronics STM32 Digital Camera Memory Interface (DCMI) maintainers: - Hugues Fruchet diff --git a/Documentation/devicetree/bindings/media/st,stm32-dma2d.yaml b/Documentation/devicetree/bindings/media/st,stm32-dma2d.yaml index f97b4a24660568f6071558cfe9f910e849d59064..4afa4a24b868552a8eb50c1c9cbca974ece10dd2 100644 --- a/Documentation/devicetree/bindings/media/st,stm32-dma2d.yaml +++ b/Documentation/devicetree/bindings/media/st,stm32-dma2d.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/media/st,stm32-dma2d.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: STMicroelectronics STM32 Chrom-Art Accelerator DMA2D binding +title: STMicroelectronics STM32 Chrom-Art Accelerator DMA2D description: Chrom-ART Accelerator(DMA2D), graphical hardware accelerator diff --git a/Documentation/devicetree/bindings/media/video-interface-devices.yaml b/Documentation/devicetree/bindings/media/video-interface-devices.yaml index 4527f56a5a6eb550a681bdf57794c3b34d47a309..cf7712ad297c01c946fa4dfdaf9a21646e125099 100644 --- a/Documentation/devicetree/bindings/media/video-interface-devices.yaml +++ b/Documentation/devicetree/bindings/media/video-interface-devices.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/media/video-interface-devices.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Common bindings for video receiver and transmitter devices +title: Common Properties for Video Receiver and Transmitter Devices maintainers: - Jacopo Mondi diff --git a/Documentation/devicetree/bindings/media/video-interfaces.yaml b/Documentation/devicetree/bindings/media/video-interfaces.yaml index 34bdad02818080d4bcfa8560bdf99872cb3016e1..a211d49dc2ac348f59eefca821838cd5489b00e4 100644 --- a/Documentation/devicetree/bindings/media/video-interfaces.yaml +++ b/Documentation/devicetree/bindings/media/video-interfaces.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/media/video-interfaces.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Common bindings for video receiver and transmitter interface endpoints +title: Common Properties for Video Receiver and Transmitter Interface Endpoints maintainers: - Sakari Ailus diff --git a/Documentation/devicetree/bindings/memory-controllers/calxeda-ddr-ctrlr.yaml b/Documentation/devicetree/bindings/memory-controllers/calxeda-ddr-ctrlr.yaml index 96d563fd61f58b93fc1542edf1cd314193a0fefa..e42aa488704d42768a20c0c8d39d937fb86c4d51 100644 --- a/Documentation/devicetree/bindings/memory-controllers/calxeda-ddr-ctrlr.yaml +++ b/Documentation/devicetree/bindings/memory-controllers/calxeda-ddr-ctrlr.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/memory-controllers/calxeda-ddr-ctrlr.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Calxeda DDR memory controller binding +title: Calxeda DDR memory controller description: | The Calxeda DDR memory controller is initialised and programmed by the diff --git a/Documentation/devicetree/bindings/memory-controllers/ingenic,nemc-peripherals.yaml b/Documentation/devicetree/bindings/memory-controllers/ingenic,nemc-peripherals.yaml index b8ed52a44d5710bc1af34948c6059c1230e74136..89ebe3979012c800492a67c5284ce1bb712727ee 100644 --- a/Documentation/devicetree/bindings/memory-controllers/ingenic,nemc-peripherals.yaml +++ b/Documentation/devicetree/bindings/memory-controllers/ingenic,nemc-peripherals.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/memory-controllers/ingenic,nemc-peripherals.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Ingenic SoCs NAND / External Memory Controller (NEMC) devicetree bindings +title: Ingenic SoCs NAND / External Memory Controller (NEMC) maintainers: - Paul Cercueil diff --git a/Documentation/devicetree/bindings/memory-controllers/ingenic,nemc.yaml b/Documentation/devicetree/bindings/memory-controllers/ingenic,nemc.yaml index dd13a5106d6c52c51442b0b2fd895de4749eac24..a02724221ff3ca02e6ae1f698b7e1f358deec7f5 100644 --- a/Documentation/devicetree/bindings/memory-controllers/ingenic,nemc.yaml +++ b/Documentation/devicetree/bindings/memory-controllers/ingenic,nemc.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/memory-controllers/ingenic,nemc.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Ingenic SoCs NAND / External Memory Controller (NEMC) devicetree bindings +title: Ingenic SoCs NAND / External Memory Controller (NEMC) maintainers: - Paul Cercueil diff --git a/Documentation/devicetree/bindings/memory-controllers/st,stm32-fmc2-ebi.yaml b/Documentation/devicetree/bindings/memory-controllers/st,stm32-fmc2-ebi.yaml index d71af02b7f16c0aa220ef732db9dfaf3bdede58f..e76ba767dfd2200accb6a43e5c7529d7aafc9ccc 100644 --- a/Documentation/devicetree/bindings/memory-controllers/st,stm32-fmc2-ebi.yaml +++ b/Documentation/devicetree/bindings/memory-controllers/st,stm32-fmc2-ebi.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/memory-controllers/st,stm32-fmc2-ebi.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: STMicroelectronics Flexible Memory Controller 2 (FMC2) Bindings +title: STMicroelectronics Flexible Memory Controller 2 (FMC2) description: | The FMC2 functional block makes the interface with: synchronous and diff --git a/Documentation/devicetree/bindings/memory-controllers/ti,gpmc-child.yaml b/Documentation/devicetree/bindings/memory-controllers/ti,gpmc-child.yaml index 4a257fac577ef9dd887527276f2dff29fe6a05b9..383d19e0ba26cf8f547aae58a3184e947cdf7d81 100644 --- a/Documentation/devicetree/bindings/memory-controllers/ti,gpmc-child.yaml +++ b/Documentation/devicetree/bindings/memory-controllers/ti,gpmc-child.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/memory-controllers/ti,gpmc-child.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: device tree bindings for children of the Texas Instruments GPMC +title: Texas Instruments GPMC Bus Child Nodes maintainers: - Tony Lindgren diff --git a/Documentation/devicetree/bindings/memory-controllers/ti,gpmc.yaml b/Documentation/devicetree/bindings/memory-controllers/ti,gpmc.yaml index e188a4bf755c00d42f83d1e065ef3f5970364a0b..4f30173ad7479bcde9dcb886aecfa6b98fbccd78 100644 --- a/Documentation/devicetree/bindings/memory-controllers/ti,gpmc.yaml +++ b/Documentation/devicetree/bindings/memory-controllers/ti,gpmc.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/memory-controllers/ti,gpmc.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Texas Instruments GPMC Memory Controller device-tree bindings +title: Texas Instruments GPMC Memory Controller maintainers: - Tony Lindgren diff --git a/Documentation/devicetree/bindings/mfd/actions,atc260x.yaml b/Documentation/devicetree/bindings/mfd/actions,atc260x.yaml index dd43a0c766f34ee41316597420a5edfe9fae2304..c3a368a0fe93566c46f5320a101045a352ae0c25 100644 --- a/Documentation/devicetree/bindings/mfd/actions,atc260x.yaml +++ b/Documentation/devicetree/bindings/mfd/actions,atc260x.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/mfd/actions,atc260x.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Actions Semi ATC260x Power Management IC bindings +title: Actions Semi ATC260x Power Management IC maintainers: - Manivannan Sadhasivam diff --git a/Documentation/devicetree/bindings/mfd/ene-kb3930.yaml b/Documentation/devicetree/bindings/mfd/ene-kb3930.yaml index 08af356f5d275d590a57f232e88f075c8f93554c..9b11b6e2bbf746f46406db268bf49b6775939fb3 100644 --- a/Documentation/devicetree/bindings/mfd/ene-kb3930.yaml +++ b/Documentation/devicetree/bindings/mfd/ene-kb3930.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/mfd/ene-kb3930.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: ENE KB3930 Embedded Controller bindings +title: ENE KB3930 Embedded Controller description: | This binding describes the ENE KB3930 Embedded Controller attached to an diff --git a/Documentation/devicetree/bindings/mfd/ene-kb930.yaml b/Documentation/devicetree/bindings/mfd/ene-kb930.yaml index 06ed9ec8f4bbce472cc0a7d52253f9e371ca031f..02c111def5de5fa480c59e70518e1de89b2aff08 100644 --- a/Documentation/devicetree/bindings/mfd/ene-kb930.yaml +++ b/Documentation/devicetree/bindings/mfd/ene-kb930.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/mfd/ene-kb930.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: ENE KB930 Embedded Controller bindings +title: ENE KB930 Embedded Controller description: | This binding describes the ENE KB930 Embedded Controller attached to an @@ -13,6 +13,8 @@ description: | maintainers: - Dmitry Osipenko +$ref: /schemas/power/supply/power-supply.yaml + properties: compatible: items: @@ -22,15 +24,13 @@ properties: reg: maxItems: 1 - monitored-battery: true - power-supplies: true system-power-controller: true required: - compatible - reg -additionalProperties: false +unevaluatedProperties: false examples: - | diff --git a/Documentation/devicetree/bindings/mfd/fsl,imx8qxp-csr.yaml b/Documentation/devicetree/bindings/mfd/fsl,imx8qxp-csr.yaml index f09577105b507dd730c31e2937b93fe5cd1b7c96..20067002cc4a399fbc1d3053c90a23e9bb91f03f 100644 --- a/Documentation/devicetree/bindings/mfd/fsl,imx8qxp-csr.yaml +++ b/Documentation/devicetree/bindings/mfd/fsl,imx8qxp-csr.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/mfd/fsl,imx8qxp-csr.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Freescale i.MX8qm/qxp Control and Status Registers Module Bindings +title: Freescale i.MX8qm/qxp Control and Status Registers Module maintainers: - Liu Ying diff --git a/Documentation/devicetree/bindings/mfd/qcom,pm8008.yaml b/Documentation/devicetree/bindings/mfd/qcom,pm8008.yaml index ec3138c1bbfc14d265901f90d5a5e84c172ad68f..e6a2387d8650e9c42f2d31dd0481e7c9772b41fd 100644 --- a/Documentation/devicetree/bindings/mfd/qcom,pm8008.yaml +++ b/Documentation/devicetree/bindings/mfd/qcom,pm8008.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/mfd/qcom,pm8008.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Qualcomm Technologies, Inc. PM8008 PMIC bindings +title: Qualcomm Technologies, Inc. PM8008 PMIC maintainers: - Guru Das Srinagesh diff --git a/Documentation/devicetree/bindings/mfd/qcom-pm8xxx.yaml b/Documentation/devicetree/bindings/mfd/qcom-pm8xxx.yaml index 61bd0b3ce02f8ac5b3c122c0a2b31aec0a1025a5..bd6e4aecfe2b14d494335b53d6ce7ed75fc8652e 100644 --- a/Documentation/devicetree/bindings/mfd/qcom-pm8xxx.yaml +++ b/Documentation/devicetree/bindings/mfd/qcom-pm8xxx.yaml @@ -39,6 +39,10 @@ properties: interrupt-controller: true patternProperties: + "led@[0-9a-f]+$": + type: object + $ref: /schemas/leds/qcom,pm8058-led.yaml# + "rtc@[0-9a-f]+$": type: object $ref: "../rtc/qcom-pm8xxx-rtc.yaml" diff --git a/Documentation/devicetree/bindings/mfd/rockchip,rk817.yaml b/Documentation/devicetree/bindings/mfd/rockchip,rk817.yaml index 935e17099213febbe712abb1bc480a3ce493dc27..269fb85b20278c9a4507df8ee9752e235600f2ca 100644 --- a/Documentation/devicetree/bindings/mfd/rockchip,rk817.yaml +++ b/Documentation/devicetree/bindings/mfd/rockchip,rk817.yaml @@ -124,6 +124,8 @@ properties: The child node for the charger to hold additional properties. If a battery is not in use, this node can be omitted. type: object + $ref: /schemas/power/supply/power-supply.yaml + properties: monitored-battery: description: | diff --git a/Documentation/devicetree/bindings/mfd/rohm,bd71815-pmic.yaml b/Documentation/devicetree/bindings/mfd/rohm,bd71815-pmic.yaml index fbface720678c54896b6e1796d6f73d4bb289c37..5fbb94d2e4fae00fab14227f90402cc0ad1ebcc5 100644 --- a/Documentation/devicetree/bindings/mfd/rohm,bd71815-pmic.yaml +++ b/Documentation/devicetree/bindings/mfd/rohm,bd71815-pmic.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/mfd/rohm,bd71815-pmic.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: ROHM BD71815 Power Management Integrated Circuit bindings +title: ROHM BD71815 Power Management Integrated Circuit maintainers: - Matti Vaittinen diff --git a/Documentation/devicetree/bindings/mfd/rohm,bd71828-pmic.yaml b/Documentation/devicetree/bindings/mfd/rohm,bd71828-pmic.yaml index 8380166d176cda73a05258d1b6f8a1f153d6de59..d15ea8e9acad76f2ca793afc1f52d5e78ab95191 100644 --- a/Documentation/devicetree/bindings/mfd/rohm,bd71828-pmic.yaml +++ b/Documentation/devicetree/bindings/mfd/rohm,bd71828-pmic.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/mfd/rohm,bd71828-pmic.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: ROHM BD71828 Power Management Integrated Circuit bindings +title: ROHM BD71828 Power Management Integrated Circuit maintainers: - Matti Vaittinen diff --git a/Documentation/devicetree/bindings/mfd/rohm,bd71837-pmic.yaml b/Documentation/devicetree/bindings/mfd/rohm,bd71837-pmic.yaml index 3bfdd33702ad1386e81448ccca3ef622713c5d13..4aca765caee248f771595956fbb59a91df22de79 100644 --- a/Documentation/devicetree/bindings/mfd/rohm,bd71837-pmic.yaml +++ b/Documentation/devicetree/bindings/mfd/rohm,bd71837-pmic.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/mfd/rohm,bd71837-pmic.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: ROHM BD71837 Power Management Integrated Circuit bindings +title: ROHM BD71837 Power Management Integrated Circuit maintainers: - Matti Vaittinen diff --git a/Documentation/devicetree/bindings/mfd/rohm,bd71847-pmic.yaml b/Documentation/devicetree/bindings/mfd/rohm,bd71847-pmic.yaml index 5d531051a15373086405e8656011720f54b62b55..e6491729715f90f83c060f1f8c5855b9f1bb23c5 100644 --- a/Documentation/devicetree/bindings/mfd/rohm,bd71847-pmic.yaml +++ b/Documentation/devicetree/bindings/mfd/rohm,bd71847-pmic.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/mfd/rohm,bd71847-pmic.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: ROHM BD71847 and BD71850 Power Management Integrated Circuit bindings +title: ROHM BD71847 and BD71850 Power Management Integrated Circuit maintainers: - Matti Vaittinen diff --git a/Documentation/devicetree/bindings/mfd/rohm,bd9576-pmic.yaml b/Documentation/devicetree/bindings/mfd/rohm,bd9576-pmic.yaml index 6483860da955473ffd5efda46cbc32f136a6c3ff..34ff0a322f3a1239fdb35d0ca09f6a1ddbad8231 100644 --- a/Documentation/devicetree/bindings/mfd/rohm,bd9576-pmic.yaml +++ b/Documentation/devicetree/bindings/mfd/rohm,bd9576-pmic.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/mfd/rohm,bd9576-pmic.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: ROHM BD9576MUF and BD9573MUF Power Management Integrated Circuit bindings +title: ROHM BD9576MUF and BD9573MUF Power Management Integrated Circuit maintainers: - Matti Vaittinen diff --git a/Documentation/devicetree/bindings/mfd/st,stm32-lptimer.yaml b/Documentation/devicetree/bindings/mfd/st,stm32-lptimer.yaml index d950dd5d48bd632129991be2134cd328b92ecca8..27329c5dc38e6e1bb87ea50798eda2afb3cdb4a3 100644 --- a/Documentation/devicetree/bindings/mfd/st,stm32-lptimer.yaml +++ b/Documentation/devicetree/bindings/mfd/st,stm32-lptimer.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/mfd/st,stm32-lptimer.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: STMicroelectronics STM32 Low-Power Timers bindings +title: STMicroelectronics STM32 Low-Power Timers description: | The STM32 Low-Power Timer (LPTIM) is a 16-bit timer that provides several diff --git a/Documentation/devicetree/bindings/mfd/st,stm32-timers.yaml b/Documentation/devicetree/bindings/mfd/st,stm32-timers.yaml index e2c3c3b44abba45bd1d8843e70499aa6ac3a6a50..f84e09a5743b77c5be31ac4f6f3a979c0122d44d 100644 --- a/Documentation/devicetree/bindings/mfd/st,stm32-timers.yaml +++ b/Documentation/devicetree/bindings/mfd/st,stm32-timers.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/mfd/st,stm32-timers.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: STMicroelectronics STM32 Timers bindings +title: STMicroelectronics STM32 Timers description: | This hardware block provides 3 types of timer along with PWM functionality: diff --git a/Documentation/devicetree/bindings/mfd/st,stmfx.yaml b/Documentation/devicetree/bindings/mfd/st,stmfx.yaml index b4d54302582fbe17eff79930f0cb57ff9c4000b3..76551c90b128e86781e51b8c201298671dd8cb8a 100644 --- a/Documentation/devicetree/bindings/mfd/st,stmfx.yaml +++ b/Documentation/devicetree/bindings/mfd/st,stmfx.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/mfd/st,stmfx.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: STMicroelectonics Multi-Function eXpander (STMFX) bindings +title: STMicroelectonics Multi-Function eXpander (STMFX) description: ST Multi-Function eXpander (STMFX) is a slave controller using I2C for communication with the main MCU. Its main features are GPIO expansion, diff --git a/Documentation/devicetree/bindings/mfd/st,stpmic1.yaml b/Documentation/devicetree/bindings/mfd/st,stpmic1.yaml index 426658ad81d4679ed0d19997c8e3b1e21f6dbd42..9573e4af949ee5d2ad3b7d6f5e056c4a3c587a27 100644 --- a/Documentation/devicetree/bindings/mfd/st,stpmic1.yaml +++ b/Documentation/devicetree/bindings/mfd/st,stpmic1.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/mfd/st,stpmic1.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: STMicroelectonics STPMIC1 Power Management IC bindings +title: STMicroelectonics STPMIC1 Power Management IC description: STMicroelectronics STPMIC1 Power Management IC diff --git a/Documentation/devicetree/bindings/mips/cpus.yaml b/Documentation/devicetree/bindings/mips/cpus.yaml index e991f4c6668dadd478a4712c0bc281be2a98ad45..cf382dea3922c915df71d5b8fb8cfa2bcdc78d91 100644 --- a/Documentation/devicetree/bindings/mips/cpus.yaml +++ b/Documentation/devicetree/bindings/mips/cpus.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/mips/cpus.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: MIPS CPUs bindings +title: MIPS CPUs maintainers: - Thomas Bogendoerfer diff --git a/Documentation/devicetree/bindings/mips/ingenic/devices.yaml b/Documentation/devicetree/bindings/mips/ingenic/devices.yaml index ee00d414df100142d52d5a826917f35f80c79203..f2e822afe7fbc2a70c4292b5c40f4a1b55861d41 100644 --- a/Documentation/devicetree/bindings/mips/ingenic/devices.yaml +++ b/Documentation/devicetree/bindings/mips/ingenic/devices.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/mips/ingenic/devices.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Ingenic XBurst based Platforms Device Tree Bindings +title: Ingenic XBurst based Platforms maintainers: - 周琰杰 (Zhou Yanjie) diff --git a/Documentation/devicetree/bindings/mips/lantiq/lantiq,dma-xway.yaml b/Documentation/devicetree/bindings/mips/lantiq/lantiq,dma-xway.yaml index 40130fefa2b413e67c5cda5fd60b02837c159daf..15d41bdbdc2639f3f4e69ec58ce6f6db48ea75dc 100644 --- a/Documentation/devicetree/bindings/mips/lantiq/lantiq,dma-xway.yaml +++ b/Documentation/devicetree/bindings/mips/lantiq/lantiq,dma-xway.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/mips/lantiq/lantiq,dma-xway.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Lantiq Xway SoCs DMA Controller DT bindings +title: Lantiq Xway SoCs DMA Controller maintainers: - John Crispin diff --git a/Documentation/devicetree/bindings/mips/loongson/devices.yaml b/Documentation/devicetree/bindings/mips/loongson/devices.yaml index 9fee6708e6f5937698dfa8f06ade8b7d863ca0fd..f13ce386f42cc84c3198eb80d9710a5f169ac7e7 100644 --- a/Documentation/devicetree/bindings/mips/loongson/devices.yaml +++ b/Documentation/devicetree/bindings/mips/loongson/devices.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/mips/loongson/devices.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Loongson based Platforms Device Tree Bindings +title: Loongson based Platforms maintainers: - Jiaxun Yang diff --git a/Documentation/devicetree/bindings/misc/olpc,xo1.75-ec.yaml b/Documentation/devicetree/bindings/misc/olpc,xo1.75-ec.yaml index b3c45c046ba5e37700275b26824af51d94c297e4..e99342f268a64bc97eb000c72a5345ffd437fbab 100644 --- a/Documentation/devicetree/bindings/misc/olpc,xo1.75-ec.yaml +++ b/Documentation/devicetree/bindings/misc/olpc,xo1.75-ec.yaml @@ -5,7 +5,7 @@ $id: http://devicetree.org/schemas/misc/olpc,xo1.75-ec.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: OLPC XO-1.75 Embedded Controller bindings +title: OLPC XO-1.75 Embedded Controller description: | This binding describes the Embedded Controller acting as a SPI bus master diff --git a/Documentation/devicetree/bindings/misc/qcom,fastrpc.yaml b/Documentation/devicetree/bindings/misc/qcom,fastrpc.yaml index d7576f8ac94b436df0670097a467d0c7c5a7bc0b..1ab9588cdd896786c74077063a2fa6af7be014d8 100644 --- a/Documentation/devicetree/bindings/misc/qcom,fastrpc.yaml +++ b/Documentation/devicetree/bindings/misc/qcom,fastrpc.yaml @@ -79,7 +79,7 @@ patternProperties: iommus: minItems: 1 - maxItems: 2 + maxItems: 3 qcom,nsessions: $ref: /schemas/types.yaml#/definitions/uint32 diff --git a/Documentation/devicetree/bindings/mmc/arasan,sdhci.yaml b/Documentation/devicetree/bindings/mmc/arasan,sdhci.yaml index 83be9e93d221bc180503a74cd817cbc63cbf116a..4053de758db607a54f802046709b8d3f69c465ad 100644 --- a/Documentation/devicetree/bindings/mmc/arasan,sdhci.yaml +++ b/Documentation/devicetree/bindings/mmc/arasan,sdhci.yaml @@ -4,7 +4,7 @@ $id: "http://devicetree.org/schemas/mmc/arasan,sdhci.yaml#" $schema: "http://devicetree.org/meta-schemas/core.yaml#" -title: Device Tree Bindings for the Arasan SDHCI Controller +title: Arasan SDHCI Controller maintainers: - Adrian Hunter diff --git a/Documentation/devicetree/bindings/mmc/brcm,sdhci-brcmstb.yaml b/Documentation/devicetree/bindings/mmc/brcm,sdhci-brcmstb.yaml index dead421e17d6233fae63d888a8b5f3d76c3bbd64..c028039bc477ce3efa6133a60e0ff3f03b50c7db 100644 --- a/Documentation/devicetree/bindings/mmc/brcm,sdhci-brcmstb.yaml +++ b/Documentation/devicetree/bindings/mmc/brcm,sdhci-brcmstb.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/mmc/brcm,sdhci-brcmstb.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Broadcom BRCMSTB/BMIPS SDHCI Controller binding +title: Broadcom BRCMSTB/BMIPS SDHCI Controller maintainers: - Al Cooper diff --git a/Documentation/devicetree/bindings/mmc/ingenic,mmc.yaml b/Documentation/devicetree/bindings/mmc/ingenic,mmc.yaml index 2d10aedf2e008f243d3eefc09e799f533bfaad61..bb4e0be0c8935df7b884107f062564288d76df53 100644 --- a/Documentation/devicetree/bindings/mmc/ingenic,mmc.yaml +++ b/Documentation/devicetree/bindings/mmc/ingenic,mmc.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/mmc/ingenic,mmc.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Ingenic SoCs MMC Controller DT bindings +title: Ingenic SoCs MMC Controller maintainers: - Paul Cercueil diff --git a/Documentation/devicetree/bindings/mmc/microchip,dw-sparx5-sdhci.yaml b/Documentation/devicetree/bindings/mmc/microchip,dw-sparx5-sdhci.yaml index 69ff065c9a39ad68f5409210cc49b925cbb02935..fa6cfe092fc9b4364b182fff3e33538677344111 100644 --- a/Documentation/devicetree/bindings/mmc/microchip,dw-sparx5-sdhci.yaml +++ b/Documentation/devicetree/bindings/mmc/microchip,dw-sparx5-sdhci.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/mmc/microchip,dw-sparx5-sdhci.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Microchip Sparx5 Mobile Storage Host Controller Binding +title: Microchip Sparx5 Mobile Storage Host Controller allOf: - $ref: "mmc-controller.yaml" diff --git a/Documentation/devicetree/bindings/mmc/mmc-controller.yaml b/Documentation/devicetree/bindings/mmc/mmc-controller.yaml index e82c0036808865877440872ac3876e8c853b0c7d..86c73fd825fdc53af79c8a38ff847c09e9055a45 100644 --- a/Documentation/devicetree/bindings/mmc/mmc-controller.yaml +++ b/Documentation/devicetree/bindings/mmc/mmc-controller.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/mmc/mmc-controller.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: MMC Controller Generic Binding +title: MMC Controller Common Properties maintainers: - Ulf Hansson diff --git a/Documentation/devicetree/bindings/mmc/mmc-pwrseq-emmc.yaml b/Documentation/devicetree/bindings/mmc/mmc-pwrseq-emmc.yaml index 1fc7e620f328021d6ee7b2f73c0aae9e5dcc0c3b..911a5996e099123205c5d895dbb5152b7bb19e27 100644 --- a/Documentation/devicetree/bindings/mmc/mmc-pwrseq-emmc.yaml +++ b/Documentation/devicetree/bindings/mmc/mmc-pwrseq-emmc.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/mmc/mmc-pwrseq-emmc.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Simple eMMC hardware reset provider binding +title: Simple eMMC hardware reset provider maintainers: - Ulf Hansson diff --git a/Documentation/devicetree/bindings/mmc/mmc-pwrseq-sd8787.yaml b/Documentation/devicetree/bindings/mmc/mmc-pwrseq-sd8787.yaml index 9e23967510304928b89fa46d1ac7e58c06716b5c..3397dbff88c2e3891e1cf91ef2a71a8b35279434 100644 --- a/Documentation/devicetree/bindings/mmc/mmc-pwrseq-sd8787.yaml +++ b/Documentation/devicetree/bindings/mmc/mmc-pwrseq-sd8787.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/mmc/mmc-pwrseq-sd8787.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Marvell SD8787 power sequence provider binding +title: Marvell SD8787 power sequence provider maintainers: - Ulf Hansson diff --git a/Documentation/devicetree/bindings/mmc/mmc-pwrseq-simple.yaml b/Documentation/devicetree/bindings/mmc/mmc-pwrseq-simple.yaml index 226fb191913d2a5dd7b757509a2e10b3edb7a9a9..64e3644eefeb0aad8560f620bf794687c999f4a0 100644 --- a/Documentation/devicetree/bindings/mmc/mmc-pwrseq-simple.yaml +++ b/Documentation/devicetree/bindings/mmc/mmc-pwrseq-simple.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/mmc/mmc-pwrseq-simple.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Simple MMC power sequence provider binding +title: Simple MMC power sequence provider maintainers: - Ulf Hansson diff --git a/Documentation/devicetree/bindings/mmc/mtk-sd.yaml b/Documentation/devicetree/bindings/mmc/mtk-sd.yaml index 8ed94a12a03b7e044504b00c1f4048ce1c4620f0..7a649ebc688c8278210c048da2fe64a441d141f9 100644 --- a/Documentation/devicetree/bindings/mmc/mtk-sd.yaml +++ b/Documentation/devicetree/bindings/mmc/mtk-sd.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/mmc/mtk-sd.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: MTK MSDC Storage Host Controller Binding +title: MTK MSDC Storage Host Controller maintainers: - Chaotian Jing diff --git a/Documentation/devicetree/bindings/mmc/sdhci-pxa.yaml b/Documentation/devicetree/bindings/mmc/sdhci-pxa.yaml index 1c87f4218e18cf55fcaa54eb299e30b66e473cbf..3d46c252578771f7cb6ee95ebc1dab8e0e57af76 100644 --- a/Documentation/devicetree/bindings/mmc/sdhci-pxa.yaml +++ b/Documentation/devicetree/bindings/mmc/sdhci-pxa.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/mmc/sdhci-pxa.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Marvell PXA SDHCI v2/v3 bindings +title: Marvell PXA SDHCI v2/v3 maintainers: - Ulf Hansson diff --git a/Documentation/devicetree/bindings/mmc/snps,dwcmshc-sdhci.yaml b/Documentation/devicetree/bindings/mmc/snps,dwcmshc-sdhci.yaml index 51ba44cad8426733f9b47cc59b6455473550b6dc..a43eb837f8dae06f5422b8bdabf567f30fd8f9af 100644 --- a/Documentation/devicetree/bindings/mmc/snps,dwcmshc-sdhci.yaml +++ b/Documentation/devicetree/bindings/mmc/snps,dwcmshc-sdhci.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/mmc/snps,dwcmshc-sdhci.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Synopsys Designware Mobile Storage Host Controller Binding +title: Synopsys Designware Mobile Storage Host Controller maintainers: - Ulf Hansson diff --git a/Documentation/devicetree/bindings/mmc/synopsys-dw-mshc.yaml b/Documentation/devicetree/bindings/mmc/synopsys-dw-mshc.yaml index e1f5f26f3f1c6311e2881ad7935dbc721ba2c108..b13b5166d20a86f138b7f01b6e6fd6e8ba293320 100644 --- a/Documentation/devicetree/bindings/mmc/synopsys-dw-mshc.yaml +++ b/Documentation/devicetree/bindings/mmc/synopsys-dw-mshc.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/mmc/synopsys-dw-mshc.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Synopsys Designware Mobile Storage Host Controller Binding +title: Synopsys Designware Mobile Storage Host Controller maintainers: - Ulf Hansson diff --git a/Documentation/devicetree/bindings/mtd/gpmi-nand.yaml b/Documentation/devicetree/bindings/mtd/gpmi-nand.yaml index 849aeae319a9213319fbb00da94e90cd9d2250d3..8487089b6e169a2ad3c483af33abed66d3428efa 100644 --- a/Documentation/devicetree/bindings/mtd/gpmi-nand.yaml +++ b/Documentation/devicetree/bindings/mtd/gpmi-nand.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/mtd/gpmi-nand.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Freescale General-Purpose Media Interface (GPMI) binding +title: Freescale General-Purpose Media Interface (GPMI) maintainers: - Han Xu diff --git a/Documentation/devicetree/bindings/mtd/ingenic,nand.yaml b/Documentation/devicetree/bindings/mtd/ingenic,nand.yaml index a811a512ecc5002a6c4e2ad2fe483241f246cc83..a7bdb5d3675cfaa73bdad8809bdff5a6ba0130a5 100644 --- a/Documentation/devicetree/bindings/mtd/ingenic,nand.yaml +++ b/Documentation/devicetree/bindings/mtd/ingenic,nand.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/mtd/ingenic,nand.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Ingenic SoCs NAND controller devicetree bindings +title: Ingenic SoCs NAND controller maintainers: - Paul Cercueil diff --git a/Documentation/devicetree/bindings/mtd/microchip,mchp48l640.yaml b/Documentation/devicetree/bindings/mtd/microchip,mchp48l640.yaml index ea9450fe7c9fd1408d66ef07791d508029ed1b76..00882892f47eb8750472bdb42133d38c2c904f2b 100644 --- a/Documentation/devicetree/bindings/mtd/microchip,mchp48l640.yaml +++ b/Documentation/devicetree/bindings/mtd/microchip,mchp48l640.yaml @@ -4,7 +4,7 @@ $id: "http://devicetree.org/schemas/mtd/microchip,mchp48l640.yaml#" $schema: "http://devicetree.org/meta-schemas/core.yaml#" -title: Microchip 48l640 (and similar) serial EERAM bindings +title: Microchip 48l640 (and similar) serial EERAM maintainers: - Heiko Schocher diff --git a/Documentation/devicetree/bindings/mtd/mxc-nand.yaml b/Documentation/devicetree/bindings/mtd/mxc-nand.yaml index 66da1b476ab7e53b67bd95eeb6c7912804695392..7f6f7c9596c49bbfa4e8c5b63861a16947226ca6 100644 --- a/Documentation/devicetree/bindings/mtd/mxc-nand.yaml +++ b/Documentation/devicetree/bindings/mtd/mxc-nand.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/mtd/mxc-nand.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Freescale's mxc_nand binding +title: Freescale's mxc_nand maintainers: - Uwe Kleine-König diff --git a/Documentation/devicetree/bindings/mtd/nand-chip.yaml b/Documentation/devicetree/bindings/mtd/nand-chip.yaml index 6e2dc025d694da430517bafc1fcef2c903dac7a4..33d079f76c05d4b73d4aed37e5944488a32e2629 100644 --- a/Documentation/devicetree/bindings/mtd/nand-chip.yaml +++ b/Documentation/devicetree/bindings/mtd/nand-chip.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/mtd/nand-chip.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: NAND Chip and NAND Controller Generic Binding +title: NAND Chip Common Properties maintainers: - Miquel Raynal diff --git a/Documentation/devicetree/bindings/mtd/nand-controller.yaml b/Documentation/devicetree/bindings/mtd/nand-controller.yaml index 220aa2c8c0b50ef8fc2848e609eb0576718e1021..efcd415f8641622489e73897365de8590b3456e8 100644 --- a/Documentation/devicetree/bindings/mtd/nand-controller.yaml +++ b/Documentation/devicetree/bindings/mtd/nand-controller.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/mtd/nand-controller.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: NAND Chip and NAND Controller Generic Binding +title: NAND Controller Common Properties maintainers: - Miquel Raynal diff --git a/Documentation/devicetree/bindings/mtd/partitions/qcom,smem-part.yaml b/Documentation/devicetree/bindings/mtd/partitions/qcom,smem-part.yaml index 61d12bda356e62837a64683451ae440f24931600..1c2b4e780ca9ac5b4e155906ee94a33bb62d5b46 100644 --- a/Documentation/devicetree/bindings/mtd/partitions/qcom,smem-part.yaml +++ b/Documentation/devicetree/bindings/mtd/partitions/qcom,smem-part.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/mtd/partitions/qcom,smem-part.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Qualcomm SMEM NAND flash partition parser binding +title: Qualcomm SMEM NAND flash partition parser maintainers: - Manivannan Sadhasivam diff --git a/Documentation/devicetree/bindings/mtd/st,stm32-fmc2-nand.yaml b/Documentation/devicetree/bindings/mtd/st,stm32-fmc2-nand.yaml index 8cbfa1504a0f116dcab66f4f63e6dd5d88f6cbe2..19cf1f18b61c31b8676288be88e9fb9369e5af39 100644 --- a/Documentation/devicetree/bindings/mtd/st,stm32-fmc2-nand.yaml +++ b/Documentation/devicetree/bindings/mtd/st,stm32-fmc2-nand.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/mtd/st,stm32-fmc2-nand.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: STMicroelectronics Flexible Memory Controller 2 (FMC2) Bindings +title: STMicroelectronics Flexible Memory Controller 2 (FMC2) maintainers: - Christophe Kerello diff --git a/Documentation/devicetree/bindings/mux/gpio-mux.yaml b/Documentation/devicetree/bindings/mux/gpio-mux.yaml index ee4de9fbaf4d6201c4e5cb44bb239fdbed36fe77..b597c1f2c57723a3c106d3f3066a9f7ee40e16e9 100644 --- a/Documentation/devicetree/bindings/mux/gpio-mux.yaml +++ b/Documentation/devicetree/bindings/mux/gpio-mux.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/mux/gpio-mux.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: GPIO-based multiplexer controller bindings +title: GPIO-based multiplexer controller maintainers: - Peter Rosin diff --git a/Documentation/devicetree/bindings/mux/mux-consumer.yaml b/Documentation/devicetree/bindings/mux/mux-consumer.yaml index d3d854967359dc9b386200bb42a0529b6a72a635..9e2d78a78e4097e6f529196a07ffce184e34ef13 100644 --- a/Documentation/devicetree/bindings/mux/mux-consumer.yaml +++ b/Documentation/devicetree/bindings/mux/mux-consumer.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/mux/mux-consumer.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Common multiplexer controller consumer bindings +title: Common multiplexer controller consumer maintainers: - Peter Rosin diff --git a/Documentation/devicetree/bindings/mux/mux-controller.yaml b/Documentation/devicetree/bindings/mux/mux-controller.yaml index c855fbad388440eeb0ac779b48acc0f23b326574..8b943082a2416e6b168534cff7f42b18740f8288 100644 --- a/Documentation/devicetree/bindings/mux/mux-controller.yaml +++ b/Documentation/devicetree/bindings/mux/mux-controller.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/mux/mux-controller.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Common multiplexer controller provider bindings +title: Common multiplexer controller provider maintainers: - Peter Rosin diff --git a/Documentation/devicetree/bindings/mux/reg-mux.yaml b/Documentation/devicetree/bindings/mux/reg-mux.yaml index dfd9ea582bb7acfc94fd5cccacf6d01b5490f4ee..dc4be092fc2fcf44f60fdd46134c9d3c67c620e2 100644 --- a/Documentation/devicetree/bindings/mux/reg-mux.yaml +++ b/Documentation/devicetree/bindings/mux/reg-mux.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/mux/reg-mux.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Generic register bitfield-based multiplexer controller bindings +title: Generic register bitfield-based multiplexer controller maintainers: - Peter Rosin diff --git a/Documentation/devicetree/bindings/net/asix,ax88178.yaml b/Documentation/devicetree/bindings/net/asix,ax88178.yaml index a81dbc4792f68083005e7bb64f00bf07fae2a6da..768504ccbf74179c6be86a4d5c2f84650afaefdd 100644 --- a/Documentation/devicetree/bindings/net/asix,ax88178.yaml +++ b/Documentation/devicetree/bindings/net/asix,ax88178.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/net/asix,ax88178.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: The device tree bindings for the USB Ethernet controllers +title: ASIX AX88172/AX88772 USB Ethernet Controllers maintainers: - Oleksij Rempel diff --git a/Documentation/devicetree/bindings/net/bluetooth/bluetooth-controller.yaml b/Documentation/devicetree/bindings/net/bluetooth/bluetooth-controller.yaml index 9309dc40f54f08cc4de17bdeeaa085080729884d..59bb0d7e8ab33eb273a70c369dd1ec52b78a9a80 100644 --- a/Documentation/devicetree/bindings/net/bluetooth/bluetooth-controller.yaml +++ b/Documentation/devicetree/bindings/net/bluetooth/bluetooth-controller.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/net/bluetooth/bluetooth-controller.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Bluetooth Controller Generic Binding +title: Bluetooth Controller Common Properties maintainers: - Marcel Holtmann diff --git a/Documentation/devicetree/bindings/net/brcm,bcmgenet.yaml b/Documentation/devicetree/bindings/net/brcm,bcmgenet.yaml index e5af53508e254b961010c096c8d2ea3209fd75a1..c99034f053e8248f0049e4aab1cf238e4ca5af3f 100644 --- a/Documentation/devicetree/bindings/net/brcm,bcmgenet.yaml +++ b/Documentation/devicetree/bindings/net/brcm,bcmgenet.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/net/brcm,bcmgenet.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Broadcom BCM7xxx Ethernet Controller (GENET) binding +title: Broadcom BCM7xxx Ethernet Controller (GENET) maintainers: - Doug Berger diff --git a/Documentation/devicetree/bindings/net/can/allwinner,sun4i-a10-can.yaml b/Documentation/devicetree/bindings/net/can/allwinner,sun4i-a10-can.yaml index 3c51b2d02957d6ba372b43ec15ccffeb8b6f0764..9c494957a07aa4b12cfce1f94884615398337c26 100644 --- a/Documentation/devicetree/bindings/net/can/allwinner,sun4i-a10-can.yaml +++ b/Documentation/devicetree/bindings/net/can/allwinner,sun4i-a10-can.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/net/can/allwinner,sun4i-a10-can.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Allwinner A10 CAN Controller Device Tree Bindings +title: Allwinner A10 CAN Controller maintainers: - Chen-Yu Tsai diff --git a/Documentation/devicetree/bindings/net/can/bosch,c_can.yaml b/Documentation/devicetree/bindings/net/can/bosch,c_can.yaml index 51aa89ac7e8507aa6aa6fa68d84fc12365cc3c46..4d7d67ee175a3f0a2b4b12aca8fd13020a1c593e 100644 --- a/Documentation/devicetree/bindings/net/can/bosch,c_can.yaml +++ b/Documentation/devicetree/bindings/net/can/bosch,c_can.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/net/can/bosch,c_can.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Bosch C_CAN/D_CAN controller Device Tree Bindings +title: Bosch C_CAN/D_CAN controller description: Bosch C_CAN/D_CAN controller for CAN bus diff --git a/Documentation/devicetree/bindings/net/can/bosch,m_can.yaml b/Documentation/devicetree/bindings/net/can/bosch,m_can.yaml index 26aa0830eea1a4256128718d09bb3867618ba493..67879aab623b555740535139df416af36df1d1c8 100644 --- a/Documentation/devicetree/bindings/net/can/bosch,m_can.yaml +++ b/Documentation/devicetree/bindings/net/can/bosch,m_can.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/net/can/bosch,m_can.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Bosch MCAN controller Bindings +title: Bosch MCAN controller description: Bosch MCAN controller for CAN bus diff --git a/Documentation/devicetree/bindings/net/can/can-controller.yaml b/Documentation/devicetree/bindings/net/can/can-controller.yaml index 1f0e980510749e7da1f0a282dfbeed01c7e43cea..217be90960e8fa2f33485aef2d810550b1498d1b 100644 --- a/Documentation/devicetree/bindings/net/can/can-controller.yaml +++ b/Documentation/devicetree/bindings/net/can/can-controller.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/net/can/can-controller.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: CAN Controller Generic Binding +title: CAN Controller Common Properties maintainers: - Marc Kleine-Budde diff --git a/Documentation/devicetree/bindings/net/can/can-transceiver.yaml b/Documentation/devicetree/bindings/net/can/can-transceiver.yaml index d1ef1fe6ab2902d4cdc6df4a294fa2f3c5926a7f..d422b3921ffa5a75751423d4622516f3555ad254 100644 --- a/Documentation/devicetree/bindings/net/can/can-transceiver.yaml +++ b/Documentation/devicetree/bindings/net/can/can-transceiver.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/net/can/can-transceiver.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: CAN transceiver Bindings +title: CAN transceiver description: CAN transceiver generic properties bindings diff --git a/Documentation/devicetree/bindings/net/can/ctu,ctucanfd.yaml b/Documentation/devicetree/bindings/net/can/ctu,ctucanfd.yaml index 4635cb96fc64c687103e7aa3721200d89936225e..a009a44029385ebd90df88e729834f72e035dff8 100644 --- a/Documentation/devicetree/bindings/net/can/ctu,ctucanfd.yaml +++ b/Documentation/devicetree/bindings/net/can/ctu,ctucanfd.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/net/can/ctu,ctucanfd.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: CTU CAN FD Open-source IP Core Device Tree Bindings +title: CTU CAN FD Open-source IP Core description: | Open-source CAN FD IP core developed at the Czech Technical University in Prague diff --git a/Documentation/devicetree/bindings/net/can/microchip,mcp251xfd.yaml b/Documentation/devicetree/bindings/net/can/microchip,mcp251xfd.yaml index 7a73057707b44b040e858d20b80788bf32cce01a..fce84aecae776f97372052f945683be674486a4f 100644 --- a/Documentation/devicetree/bindings/net/can/microchip,mcp251xfd.yaml +++ b/Documentation/devicetree/bindings/net/can/microchip,mcp251xfd.yaml @@ -4,9 +4,7 @@ $id: http://devicetree.org/schemas/net/can/microchip,mcp251xfd.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: - Microchip MCP2517FD, MCP2518FD and MCP251863 stand-alone CAN - controller device tree bindings +title: Microchip MCP2517FD, MCP2518FD and MCP251863 stand-alone CAN controller maintainers: - Marc Kleine-Budde diff --git a/Documentation/devicetree/bindings/net/dsa/arrow,xrs700x.yaml b/Documentation/devicetree/bindings/net/dsa/arrow,xrs700x.yaml index 259a0c6547f3073fa5eae83d3282a4717d132eb1..2a6d126606ca968c7eef08bd604aa01187863337 100644 --- a/Documentation/devicetree/bindings/net/dsa/arrow,xrs700x.yaml +++ b/Documentation/devicetree/bindings/net/dsa/arrow,xrs700x.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/net/dsa/arrow,xrs700x.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Arrow SpeedChips XRS7000 Series Switch Device Tree Bindings +title: Arrow SpeedChips XRS7000 Series Switch allOf: - $ref: dsa.yaml# diff --git a/Documentation/devicetree/bindings/net/dsa/dsa-port.yaml b/Documentation/devicetree/bindings/net/dsa/dsa-port.yaml index 9abb8eba5fadf07c8f23fdb7a69afad97ae1c1e8..b173fceb89983b5ba5735bad602d0ee6c4ded3ce 100644 --- a/Documentation/devicetree/bindings/net/dsa/dsa-port.yaml +++ b/Documentation/devicetree/bindings/net/dsa/dsa-port.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/net/dsa/dsa-port.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Ethernet Switch port Device Tree Bindings +title: Ethernet Switch port maintainers: - Andrew Lunn diff --git a/Documentation/devicetree/bindings/net/dsa/dsa.yaml b/Documentation/devicetree/bindings/net/dsa/dsa.yaml index b9d48e357e772be31f02dfcd4a322ec3bb650edd..5469ae8a43893cf050100dca38c89c573a4772d6 100644 --- a/Documentation/devicetree/bindings/net/dsa/dsa.yaml +++ b/Documentation/devicetree/bindings/net/dsa/dsa.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/net/dsa/dsa.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Ethernet Switch Device Tree Bindings +title: Ethernet Switch maintainers: - Andrew Lunn diff --git a/Documentation/devicetree/bindings/net/dsa/hirschmann,hellcreek.yaml b/Documentation/devicetree/bindings/net/dsa/hirschmann,hellcreek.yaml index 1d7dab31457d5ed5c375c74ba48cd79986957529..447589b01e8e93511f19d0b492147a3203b9a56b 100644 --- a/Documentation/devicetree/bindings/net/dsa/hirschmann,hellcreek.yaml +++ b/Documentation/devicetree/bindings/net/dsa/hirschmann,hellcreek.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/net/dsa/hirschmann,hellcreek.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Hirschmann Hellcreek TSN Switch Device Tree Bindings +title: Hirschmann Hellcreek TSN Switch allOf: - $ref: dsa.yaml# diff --git a/Documentation/devicetree/bindings/net/dsa/microchip,lan937x.yaml b/Documentation/devicetree/bindings/net/dsa/microchip,lan937x.yaml index 630bf0f8294b2da07ccc9cbd3ac354ba997a15d8..b34de303966ba4ecdc46cf764f2ae63e5c5af888 100644 --- a/Documentation/devicetree/bindings/net/dsa/microchip,lan937x.yaml +++ b/Documentation/devicetree/bindings/net/dsa/microchip,lan937x.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/net/dsa/microchip,lan937x.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: LAN937x Ethernet Switch Series Tree Bindings +title: LAN937x Ethernet Switch Series maintainers: - UNGLinuxDriver@microchip.com diff --git a/Documentation/devicetree/bindings/net/dsa/mscc,ocelot.yaml b/Documentation/devicetree/bindings/net/dsa/mscc,ocelot.yaml index 8d93ed9c172cb7a81b26afa069be080ec13845c6..347a0e1b3d3ff363e5bca7ae64f3e7bbf6fcbd01 100644 --- a/Documentation/devicetree/bindings/net/dsa/mscc,ocelot.yaml +++ b/Documentation/devicetree/bindings/net/dsa/mscc,ocelot.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/net/dsa/mscc,ocelot.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Microchip Ocelot Switch Family Device Tree Bindings +title: Microchip Ocelot Switch Family maintainers: - Vladimir Oltean diff --git a/Documentation/devicetree/bindings/net/dsa/nxp,sja1105.yaml b/Documentation/devicetree/bindings/net/dsa/nxp,sja1105.yaml index 1e26d876d1463035d03a6e49fc578c6a68513d52..df98a16e4e75322d9cfb615b091350645662f7fa 100644 --- a/Documentation/devicetree/bindings/net/dsa/nxp,sja1105.yaml +++ b/Documentation/devicetree/bindings/net/dsa/nxp,sja1105.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/net/dsa/nxp,sja1105.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: NXP SJA1105 Automotive Ethernet Switch Family Device Tree Bindings +title: NXP SJA1105 Automotive Ethernet Switch Family description: The SJA1105 SPI interface requires a CS-to-CLK time (t2 in UM10944.pdf) of at diff --git a/Documentation/devicetree/bindings/net/engleder,tsnep.yaml b/Documentation/devicetree/bindings/net/engleder,tsnep.yaml index a6921e805e37e6200aaaaa5645b8ba637877c6d9..4116667133ce94b49c4c56d7d1ddd01c82d097d6 100644 --- a/Documentation/devicetree/bindings/net/engleder,tsnep.yaml +++ b/Documentation/devicetree/bindings/net/engleder,tsnep.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/net/engleder,tsnep.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: TSN endpoint Ethernet MAC binding +title: TSN endpoint Ethernet MAC maintainers: - Gerhard Engleder diff --git a/Documentation/devicetree/bindings/net/ethernet-controller.yaml b/Documentation/devicetree/bindings/net/ethernet-controller.yaml index 3aef506fa158e947be0268cda0a16401f05e288b..00be387984acb83001278d50b51d40cd86dd042e 100644 --- a/Documentation/devicetree/bindings/net/ethernet-controller.yaml +++ b/Documentation/devicetree/bindings/net/ethernet-controller.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/net/ethernet-controller.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Ethernet Controller Generic Binding +title: Ethernet Controller Common Properties maintainers: - David S. Miller diff --git a/Documentation/devicetree/bindings/net/ethernet-phy.yaml b/Documentation/devicetree/bindings/net/ethernet-phy.yaml index ad808e9ce5b9ed5cecc9f8fc3c673cf9c9886542..1327b81f15a21151e9439d3f705f5a9f912928cb 100644 --- a/Documentation/devicetree/bindings/net/ethernet-phy.yaml +++ b/Documentation/devicetree/bindings/net/ethernet-phy.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/net/ethernet-phy.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Ethernet PHY Generic Binding +title: Ethernet PHY Common Properties maintainers: - Andrew Lunn diff --git a/Documentation/devicetree/bindings/net/fsl,qoriq-mc-dpmac.yaml b/Documentation/devicetree/bindings/net/fsl,qoriq-mc-dpmac.yaml index 600240281e8c54480a24c2fa0b663bc46e9bc899..6e0763898d3a848c857367a74259855799f5c2e7 100644 --- a/Documentation/devicetree/bindings/net/fsl,qoriq-mc-dpmac.yaml +++ b/Documentation/devicetree/bindings/net/fsl,qoriq-mc-dpmac.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/net/fsl,qoriq-mc-dpmac.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: DPAA2 MAC bindings +title: DPAA2 MAC maintainers: - Ioana Ciornei diff --git a/Documentation/devicetree/bindings/net/ingenic,mac.yaml b/Documentation/devicetree/bindings/net/ingenic,mac.yaml index 93b3e991d209ade19086caaf2c5abe9f8cba4e7d..bdea101c2f75554580e3136d30379f6ee9160b56 100644 --- a/Documentation/devicetree/bindings/net/ingenic,mac.yaml +++ b/Documentation/devicetree/bindings/net/ingenic,mac.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/net/ingenic,mac.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Bindings for MAC in Ingenic SoCs +title: MAC in Ingenic SoCs maintainers: - 周琰杰 (Zhou Yanjie) diff --git a/Documentation/devicetree/bindings/net/mctp-i2c-controller.yaml b/Documentation/devicetree/bindings/net/mctp-i2c-controller.yaml index afd11c9422fa2a40cfbce35359fd9aeb95436e0e..8438af53c5c3d2147adf110ef0d697315ab5feb7 100644 --- a/Documentation/devicetree/bindings/net/mctp-i2c-controller.yaml +++ b/Documentation/devicetree/bindings/net/mctp-i2c-controller.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/net/mctp-i2c-controller.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: MCTP I2C transport binding +title: MCTP I2C transport maintainers: - Matt Johnston diff --git a/Documentation/devicetree/bindings/net/mdio.yaml b/Documentation/devicetree/bindings/net/mdio.yaml index b5706d4e7e38f5509b55df3be5c4edcb04bd2bfd..a266ade918ca7cd72a4380c2f53ba9d65d97c92f 100644 --- a/Documentation/devicetree/bindings/net/mdio.yaml +++ b/Documentation/devicetree/bindings/net/mdio.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/net/mdio.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: MDIO Bus Generic Binding +title: MDIO Bus Common Properties maintainers: - Andrew Lunn diff --git a/Documentation/devicetree/bindings/net/microchip,lan95xx.yaml b/Documentation/devicetree/bindings/net/microchip,lan95xx.yaml index 3715c5f8f0e0c378901246fac5fb9751115ec987..0b97e14d947f2991f8d1a40068267c1c2b7f8328 100644 --- a/Documentation/devicetree/bindings/net/microchip,lan95xx.yaml +++ b/Documentation/devicetree/bindings/net/microchip,lan95xx.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/net/microchip,lan95xx.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: The device tree bindings for the USB Ethernet controllers +title: Microchip SMSC9500/LAN9530/LAN9730 USB Ethernet Controllers maintainers: - Oleksij Rempel diff --git a/Documentation/devicetree/bindings/net/wireless/esp,esp8089.yaml b/Documentation/devicetree/bindings/net/wireless/esp,esp8089.yaml index 284ef45add9989cf0f6332198474f6835f5953b5..5557676e9d4bc38c6c5d1d19e688b356c7967ed4 100644 --- a/Documentation/devicetree/bindings/net/wireless/esp,esp8089.yaml +++ b/Documentation/devicetree/bindings/net/wireless/esp,esp8089.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/net/wireless/esp,esp8089.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Espressif ESP8089 Device Tree Bindings +title: Espressif ESP8089 maintainers: - Hans de Goede diff --git a/Documentation/devicetree/bindings/net/wireless/ieee80211.yaml b/Documentation/devicetree/bindings/net/wireless/ieee80211.yaml index d58e1571df9b42dd32c10db6ecddd98bd915e8b4..e68ed9423150e5422f430ee8753344ee9ed9fa92 100644 --- a/Documentation/devicetree/bindings/net/wireless/ieee80211.yaml +++ b/Documentation/devicetree/bindings/net/wireless/ieee80211.yaml @@ -6,7 +6,7 @@ $id: http://devicetree.org/schemas/net/wireless/ieee80211.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Common IEEE 802.11 Binding +title: Common IEEE 802.11 maintainers: - Lorenzo Bianconi diff --git a/Documentation/devicetree/bindings/net/wireless/mediatek,mt76.yaml b/Documentation/devicetree/bindings/net/wireless/mediatek,mt76.yaml index 70e328589cfb8e0ac605c1e398ede17b14255ece..f0c78f9944913b34f1a00460c29be2b65c953886 100644 --- a/Documentation/devicetree/bindings/net/wireless/mediatek,mt76.yaml +++ b/Documentation/devicetree/bindings/net/wireless/mediatek,mt76.yaml @@ -6,7 +6,7 @@ $id: http://devicetree.org/schemas/net/wireless/mediatek,mt76.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: MediaTek mt76 wireless devices Generic Binding +title: MediaTek mt76 wireless devices maintainers: - Felix Fietkau diff --git a/Documentation/devicetree/bindings/net/wireless/microchip,wilc1000.yaml b/Documentation/devicetree/bindings/net/wireless/microchip,wilc1000.yaml index b3405f284580abe61fe7eaa0c1d0ae3015ed4d69..2460ccc082371b08999c47f940cc71805568103d 100644 --- a/Documentation/devicetree/bindings/net/wireless/microchip,wilc1000.yaml +++ b/Documentation/devicetree/bindings/net/wireless/microchip,wilc1000.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/net/wireless/microchip,wilc1000.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Microchip WILC wireless devicetree bindings +title: Microchip WILC wireless maintainers: - Adham Abozaeid diff --git a/Documentation/devicetree/bindings/net/wireless/qca,ath9k.yaml b/Documentation/devicetree/bindings/net/wireless/qca,ath9k.yaml index 7029cb1f38ffb8a79f9dece86cf35b97bc2687b3..0e5412cff2bc6714693b92c5fb69dba2410cdf0f 100644 --- a/Documentation/devicetree/bindings/net/wireless/qca,ath9k.yaml +++ b/Documentation/devicetree/bindings/net/wireless/qca,ath9k.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/net/wireless/qca,ath9k.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Qualcomm Atheros ath9k wireless devices Generic Binding +title: Qualcomm Atheros ath9k wireless devices maintainers: - Toke Høiland-Jørgensen diff --git a/Documentation/devicetree/bindings/net/wireless/qcom,ath11k.yaml b/Documentation/devicetree/bindings/net/wireless/qcom,ath11k.yaml index f7cf135aa37fff9b2370edf1b03af3d16602dfcf..556eb523606a30209c21097619286ba698a43e8a 100644 --- a/Documentation/devicetree/bindings/net/wireless/qcom,ath11k.yaml +++ b/Documentation/devicetree/bindings/net/wireless/qcom,ath11k.yaml @@ -6,7 +6,7 @@ $id: http://devicetree.org/schemas/net/wireless/qcom,ath11k.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Qualcomm Technologies ath11k wireless devices Generic Binding +title: Qualcomm Technologies ath11k wireless devices maintainers: - Kalle Valo diff --git a/Documentation/devicetree/bindings/net/wireless/silabs,wfx.yaml b/Documentation/devicetree/bindings/net/wireless/silabs,wfx.yaml index b35d2f3ad1adcc1a703aaa9c69d66670269206bc..583db5d42226dfab44e5868729f07bca1198ce18 100644 --- a/Documentation/devicetree/bindings/net/wireless/silabs,wfx.yaml +++ b/Documentation/devicetree/bindings/net/wireless/silabs,wfx.yaml @@ -6,7 +6,7 @@ $id: http://devicetree.org/schemas/net/wireless/silabs,wfx.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Silicon Labs WFxxx devicetree bindings +title: Silicon Labs WFxxx maintainers: - Jérôme Pouiller diff --git a/Documentation/devicetree/bindings/nvmem/fsl,scu-ocotp.yaml b/Documentation/devicetree/bindings/nvmem/fsl,scu-ocotp.yaml index 682688299b26d00484627a002f8443ae8ef11fa2..f0a49283649d9844fa81703a7a3c83e348bbb01e 100644 --- a/Documentation/devicetree/bindings/nvmem/fsl,scu-ocotp.yaml +++ b/Documentation/devicetree/bindings/nvmem/fsl,scu-ocotp.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/nvmem/fsl,scu-ocotp.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: i.MX SCU Client Device Node - OCOTP bindings based on SCU Message Protocol +title: i.MX SCU Client Device Node - OCOTP Based on SCU Message Protocol maintainers: - Dong Aisheng diff --git a/Documentation/devicetree/bindings/nvmem/ingenic,jz4780-efuse.yaml b/Documentation/devicetree/bindings/nvmem/ingenic,jz4780-efuse.yaml index bf84768228f566c392370ca6fc7f570a200f76d5..fe2cd7f1afba923534d1f470a3ac02d8a7baa558 100644 --- a/Documentation/devicetree/bindings/nvmem/ingenic,jz4780-efuse.yaml +++ b/Documentation/devicetree/bindings/nvmem/ingenic,jz4780-efuse.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/nvmem/ingenic,jz4780-efuse.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Ingenic JZ EFUSE driver bindings +title: Ingenic JZ EFUSE driver maintainers: - PrasannaKumar Muralidharan diff --git a/Documentation/devicetree/bindings/nvmem/layouts/kontron,sl28-vpd.yaml b/Documentation/devicetree/bindings/nvmem/layouts/kontron,sl28-vpd.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c713e23819f118ae99c61a48911f094796f7b69a --- /dev/null +++ b/Documentation/devicetree/bindings/nvmem/layouts/kontron,sl28-vpd.yaml @@ -0,0 +1,64 @@ +# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/nvmem/layouts/kontron,sl28-vpd.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: NVMEM layout of the Kontron SMARC-sAL28 vital product data + +maintainers: + - Michael Walle + +description: + The vital product data (VPD) of the sl28 boards contains a serial + number and a base MAC address. The actual MAC addresses for the + on-board ethernet devices are derived from this base MAC address by + adding an offset. + +select: false + +properties: + compatible: + const: kontron,sl28-vpd + + serial-number: + type: object + description: The board's serial number + + additionalProperties: false + + base-mac-address: + type: object + description: + Base MAC address for all on-module network interfaces. The first + argument of the phandle will be treated as an offset. + + properties: + "#nvmem-cell-cells": + const: 1 + + additionalProperties: false + +required: + - compatible + +additionalProperties: false + +examples: + - | + otp-1 { + compatible = "user-otp"; + + nvmem-layout { + compatible = "kontron,sl28-vpd"; + + serial_number: serial-number { + }; + + base_mac_address: base-mac-address { + #nvmem-cell-cells = <1>; + }; + }; + }; + +... diff --git a/Documentation/devicetree/bindings/nvmem/layouts/nvmem-layout.yaml b/Documentation/devicetree/bindings/nvmem/layouts/nvmem-layout.yaml new file mode 100644 index 0000000000000000000000000000000000000000..8512ee538c4c1602f9ff1e094b97ea9b08c817b8 --- /dev/null +++ b/Documentation/devicetree/bindings/nvmem/layouts/nvmem-layout.yaml @@ -0,0 +1,34 @@ +# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/nvmem/layouts/nvmem-layout.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: NVMEM (Non Volatile Memory) layouts + +maintainers: + - Srinivas Kandagatla + - Michael Walle + - Miquel Raynal + +description: | + Most NVMEM layouts are static and thus do not require additional description + besides the bytes/bits offset and length. Other layouts can be less statically + define and might require dynamic reading of the NVMEM device in order to + perform their parsing. The nvmem-layout container is here to describe these. + +oneOf: + - $ref: kontron,sl28-vpd.yaml + - $ref: onie,tlv-layout.yaml + +properties: + compatible: true + + '#address-cells': false + + '#size-cells': false + +required: + - compatible + +unevaluatedProperties: false diff --git a/Documentation/devicetree/bindings/nvmem/layouts/onie,tlv-layout.yaml b/Documentation/devicetree/bindings/nvmem/layouts/onie,tlv-layout.yaml new file mode 100644 index 0000000000000000000000000000000000000000..5a0e7671aa3f57530139b2e7c70c701e9a635dee --- /dev/null +++ b/Documentation/devicetree/bindings/nvmem/layouts/onie,tlv-layout.yaml @@ -0,0 +1,147 @@ +# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/nvmem/layouts/onie,tlv-layout.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: NVMEM layout of the ONIE tlv table + +maintainers: + - Miquel Raynal + +description: + Modern networking hardware implementing the Open Compute Project ONIE + infrastructure shall provide a non-volatile memory with a table whose the + content is well specified and gives many information about the manufacturer + (name, country of manufacture, etc) as well as device caracteristics (serial + number, hardware version, mac addresses, etc). The underlaying device type + (flash, EEPROM,...) is not specified. The exact location of each value is also + dynamic and should be discovered at run time because it depends on the + parameters the manufacturer decided to embed. + +select: false + +properties: + compatible: + const: onie,tlv-layout + + product-name: + type: object + additionalProperties: false + + part-number: + type: object + additionalProperties: false + + serial-number: + type: object + additionalProperties: false + + mac-address: + type: object + description: + Base MAC address for all on-module network interfaces. The first + argument of the phandle will be treated as an offset. + + properties: + "#nvmem-cell-cells": + const: 1 + + additionalProperties: false + + manufacture-date: + type: object + additionalProperties: false + + device-version: + type: object + additionalProperties: false + + label-revision: + type: object + additionalProperties: false + + platforn-name: + type: object + additionalProperties: false + + onie-version: + type: object + additionalProperties: false + + num-macs: + type: object + additionalProperties: false + + manufacturer: + type: object + additionalProperties: false + + country-code: + type: object + additionalProperties: false + + vendor: + type: object + additionalProperties: false + + diag-version: + type: object + additionalProperties: false + + service-tag: + type: object + additionalProperties: false + + vendor-extension: + type: object + additionalProperties: false + +required: + - compatible + +additionalProperties: false + +examples: + - | + spi { + #address-cells = <1>; + #size-cells = <0>; + + eeprom@56 { + compatible = "atmel,24c64"; + read-only; + reg = <0x56>; + + nvmem-layout { + compatible = "onie,tlv-layout"; + + serial-number { + }; + }; + }; + }; + + - | + spi { + #address-cells = <1>; + #size-cells = <0>; + + flash@0 { + compatible = "m25p80", "jedec,spi-nor"; + reg = <0>; + + otp { + compatible = "user-otp"; + + nvmem-layout { + compatible = "onie,tlv-layout"; + + mac-address { + #nvmem-cell-cells = <1>; + }; + }; + }; + }; + }; +... diff --git a/Documentation/devicetree/bindings/nvmem/nvmem.yaml b/Documentation/devicetree/bindings/nvmem/nvmem.yaml index 1eb22dba364c75f82c4368e0727c23a3f208489f..75bb93dda9df1ddf81a2e1dd45cec57440e3fd53 100644 --- a/Documentation/devicetree/bindings/nvmem/nvmem.yaml +++ b/Documentation/devicetree/bindings/nvmem/nvmem.yaml @@ -39,6 +39,13 @@ properties: when it's driven low (logical '0') to allow writing. maxItems: 1 + nvmem-layout: + $ref: /schemas/nvmem/layouts/nvmem-layout.yaml + description: + Alternative to the statically defined nvmem cells, this + container may reference more advanced (dynamic) layout + parsers. + patternProperties: "@[0-9a-f]+(,[0-7])?$": type: object @@ -67,6 +74,7 @@ examples: #include qfprom: eeprom@700000 { + compatible = "qcom,msm8974-qfprom", "qcom,qfprom"; #address-cells = <1>; #size-cells = <1>; reg = <0x00700000 0x100000>; diff --git a/Documentation/devicetree/bindings/nvmem/qcom,qfprom.yaml b/Documentation/devicetree/bindings/nvmem/qcom,qfprom.yaml index 2eab2f46cb65ad397aa7cb7133b347bc56bcbd47..8e89b15b535fad69bd460e221d8307d8b783bc3d 100644 --- a/Documentation/devicetree/bindings/nvmem/qcom,qfprom.yaml +++ b/Documentation/devicetree/bindings/nvmem/qcom,qfprom.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/nvmem/qcom,qfprom.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Qualcomm Technologies Inc, QFPROM Efuse bindings +title: Qualcomm Technologies Inc, QFPROM Efuse maintainers: - Srinivas Kandagatla diff --git a/Documentation/devicetree/bindings/nvmem/qcom,spmi-sdam.yaml b/Documentation/devicetree/bindings/nvmem/qcom,spmi-sdam.yaml index ee79e13b5fe0d1efb4e2af4b20a4dd5f29d27828..e08504ef3b6e174927c92a9dffd4fbdbb9c25340 100644 --- a/Documentation/devicetree/bindings/nvmem/qcom,spmi-sdam.yaml +++ b/Documentation/devicetree/bindings/nvmem/qcom,spmi-sdam.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/nvmem/qcom,spmi-sdam.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Qualcomm Technologies, Inc. SPMI SDAM DT bindings +title: Qualcomm Technologies, Inc. SPMI SDAM maintainers: - Shyam Kumar Thella diff --git a/Documentation/devicetree/bindings/nvmem/socionext,uniphier-efuse.yaml b/Documentation/devicetree/bindings/nvmem/socionext,uniphier-efuse.yaml index 2578e39deda9039e2c7c1523910385958559d630..73a0c658dbfd02f6ef5511a0491473c71dbb3549 100644 --- a/Documentation/devicetree/bindings/nvmem/socionext,uniphier-efuse.yaml +++ b/Documentation/devicetree/bindings/nvmem/socionext,uniphier-efuse.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/nvmem/socionext,uniphier-efuse.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Socionext UniPhier eFuse bindings +title: Socionext UniPhier eFuse maintainers: - Keiji Hayashibara diff --git a/Documentation/devicetree/bindings/nvmem/st,stm32-romem.yaml b/Documentation/devicetree/bindings/nvmem/st,stm32-romem.yaml index 448a2678dc624fb669b8f0fd8381382b3311de51..172597cc5c63b50a5a6ceea11b3ced2b0eaaca6a 100644 --- a/Documentation/devicetree/bindings/nvmem/st,stm32-romem.yaml +++ b/Documentation/devicetree/bindings/nvmem/st,stm32-romem.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/nvmem/st,stm32-romem.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: STMicroelectronics STM32 Factory-programmed data bindings +title: STMicroelectronics STM32 Factory-programmed data description: | This represents STM32 Factory-programmed read only non-volatile area: locked @@ -22,6 +22,7 @@ properties: compatible: enum: - st,stm32f4-otp + - st,stm32mp13-bsec - st,stm32mp15-bsec reg: diff --git a/Documentation/devicetree/bindings/opp/opp-v1.yaml b/Documentation/devicetree/bindings/opp/opp-v1.yaml index d585d536a3fb46ee2ee4302baaf30d5cf3f8d0bf..07e26c26781535f9212de068b2c7526147b89b14 100644 --- a/Documentation/devicetree/bindings/opp/opp-v1.yaml +++ b/Documentation/devicetree/bindings/opp/opp-v1.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/opp/opp-v1.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Generic OPP (Operating Performance Points) v1 Bindings +title: Generic OPP (Operating Performance Points) v1 maintainers: - Viresh Kumar diff --git a/Documentation/devicetree/bindings/opp/opp-v2-base.yaml b/Documentation/devicetree/bindings/opp/opp-v2-base.yaml index cf9c2f7bddc29ee92c1e2404c36245ae29fc064c..47e6f36b76370b13908e15f54ac3c6be7d278b5e 100644 --- a/Documentation/devicetree/bindings/opp/opp-v2-base.yaml +++ b/Documentation/devicetree/bindings/opp/opp-v2-base.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/opp/opp-v2-base.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Generic OPP (Operating Performance Points) Common Binding +title: Generic OPP (Operating Performance Points) Common Properties maintainers: - Viresh Kumar diff --git a/Documentation/devicetree/bindings/opp/opp-v2-kryo-cpu.yaml b/Documentation/devicetree/bindings/opp/opp-v2-kryo-cpu.yaml index a202b6c6561d8a59c47daeeacec0d5814177fdff..60cf3cbde4c5b5bc5fff2887577340bf606d470e 100644 --- a/Documentation/devicetree/bindings/opp/opp-v2-kryo-cpu.yaml +++ b/Documentation/devicetree/bindings/opp/opp-v2-kryo-cpu.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/opp/opp-v2-kryo-cpu.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Qualcomm Technologies, Inc. NVMEM OPP bindings +title: Qualcomm Technologies, Inc. NVMEM OPP maintainers: - Ilia Lin diff --git a/Documentation/devicetree/bindings/opp/opp-v2-qcom-level.yaml b/Documentation/devicetree/bindings/opp/opp-v2-qcom-level.yaml index df8442fb11f0c992d1beb61cb482db5409c35edb..b9ce2e099ce9a953622358d96f8ca97042d44d35 100644 --- a/Documentation/devicetree/bindings/opp/opp-v2-qcom-level.yaml +++ b/Documentation/devicetree/bindings/opp/opp-v2-qcom-level.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/opp/opp-v2-qcom-level.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Qualcomm OPP bindings to describe OPP nodes. +title: Qualcomm OPP maintainers: - Niklas Cassel diff --git a/Documentation/devicetree/bindings/opp/opp-v2.yaml b/Documentation/devicetree/bindings/opp/opp-v2.yaml index 2f05920fce79a247432da306ab7c5a3a3cd62430..6972d76233aa457c7e6f6af4f6e2124219ca2b7f 100644 --- a/Documentation/devicetree/bindings/opp/opp-v2.yaml +++ b/Documentation/devicetree/bindings/opp/opp-v2.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/opp/opp-v2.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Generic OPP (Operating Performance Points) Bindings +title: Generic OPP (Operating Performance Points) maintainers: - Viresh Kumar diff --git a/Documentation/devicetree/bindings/pci/baikal,bt1-pcie.yaml b/Documentation/devicetree/bindings/pci/baikal,bt1-pcie.yaml new file mode 100644 index 0000000000000000000000000000000000000000..8eaa07ae9774350fac77fa8e8e939f5abe31b5bb --- /dev/null +++ b/Documentation/devicetree/bindings/pci/baikal,bt1-pcie.yaml @@ -0,0 +1,168 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/pci/baikal,bt1-pcie.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Baikal-T1 PCIe Root Port Controller + +maintainers: + - Serge Semin + +description: + Embedded into Baikal-T1 SoC Root Complex controller with a single port + activated. It's based on the DWC RC PCIe v4.60a IP-core, which is configured + to have just a single Root Port function and is capable of establishing the + link up to Gen.3 speed on x4 lanes. It doesn't have embedded clock and reset + control module, so the proper interface initialization is supposed to be + performed by software. There four in- and four outbound iATU regions + which can be used to emit all required TLP types on the PCIe bus. + +allOf: + - $ref: /schemas/pci/snps,dw-pcie.yaml# + +properties: + compatible: + const: baikal,bt1-pcie + + reg: + description: + DBI, DBI2 and at least 4KB outbound iATU-capable region for the + peripheral devices CFG-space access. + maxItems: 3 + + reg-names: + items: + - const: dbi + - const: dbi2 + - const: config + + interrupts: + description: + MSI, AER, PME, Hot-plug, Link Bandwidth Management, Link Equalization + request and eight Read/Write eDMA IRQ lines are available. + maxItems: 14 + + interrupt-names: + items: + - const: dma0 + - const: dma1 + - const: dma2 + - const: dma3 + - const: dma4 + - const: dma5 + - const: dma6 + - const: dma7 + - const: msi + - const: aer + - const: pme + - const: hp + - const: bw_mg + - const: l_eq + + clocks: + description: + DBI (attached to the APB bus), AXI-bus master and slave interfaces + are fed up by the dedicated application clocks. A common reference + clock signal is supposed to be attached to the corresponding Ref-pad + of the SoC. It will be redistributed amongst the controller core + sub-modules (pipe, core, aux, etc). + maxItems: 4 + + clock-names: + items: + - const: dbi + - const: mstr + - const: slv + - const: ref + + resets: + description: + A comprehensive controller reset logic is supposed to be implemented + by software, so almost all the possible application and core reset + signals are exposed via the system CCU module. + maxItems: 9 + + reset-names: + items: + - const: mstr + - const: slv + - const: pwr + - const: hot + - const: phy + - const: core + - const: pipe + - const: sticky + - const: non-sticky + + baikal,bt1-syscon: + $ref: /schemas/types.yaml#/definitions/phandle + description: + Phandle to the Baikal-T1 System Controller DT node. It's required to + access some additional PM, Reset-related and LTSSM signals. + + num-lanes: + maximum: 4 + + max-link-speed: + maximum: 3 + +required: + - compatible + - reg + - reg-names + - interrupts + - interrupt-names + +unevaluatedProperties: false + +examples: + - | + #include + #include + + pcie@1f052000 { + compatible = "baikal,bt1-pcie"; + device_type = "pci"; + reg = <0x1f052000 0x1000>, <0x1f053000 0x1000>, <0x1bdbf000 0x1000>; + reg-names = "dbi", "dbi2", "config"; + #address-cells = <3>; + #size-cells = <2>; + ranges = <0x81000000 0 0x00000000 0x1bdb0000 0 0x00008000>, + <0x82000000 0 0x20000000 0x08000000 0 0x13db0000>; + bus-range = <0x0 0xff>; + + interrupts = , + , + , + , + , + , + , + , + , + , + , + , + , + ; + interrupt-names = "dma0", "dma1", "dma2", "dma3", + "dma4", "dma5", "dma6", "dma7", + "msi", "aer", "pme", "hp", "bw_mg", + "l_eq"; + + clocks = <&ccu_sys 1>, <&ccu_axi 6>, <&ccu_axi 7>, <&clk_pcie>; + clock-names = "dbi", "mstr", "slv", "ref"; + + resets = <&ccu_axi 6>, <&ccu_axi 7>, <&ccu_sys 7>, <&ccu_sys 10>, + <&ccu_sys 4>, <&ccu_sys 6>, <&ccu_sys 5>, <&ccu_sys 8>, + <&ccu_sys 9>; + reset-names = "mstr", "slv", "pwr", "hot", "phy", "core", "pipe", + "sticky", "non-sticky"; + + reset-gpios = <&port0 0 GPIO_ACTIVE_LOW>; + + num-lanes = <4>; + max-link-speed = <3>; + }; +... diff --git a/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.yaml b/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.yaml index 376e739bcad40a89083067f18d7837a5b64af43e..bad980902f66e26e88efdd45138e3759b3e2f8cd 100644 --- a/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.yaml +++ b/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.yaml @@ -14,9 +14,6 @@ description: |+ This PCIe host controller is based on the Synopsys DesignWare PCIe IP and thus inherits all the common properties defined in snps,dw-pcie.yaml. -allOf: - - $ref: /schemas/pci/snps,dw-pcie.yaml# - properties: compatible: enum: @@ -60,8 +57,8 @@ properties: items: - const: pcie - const: pcie_bus - - const: pcie_phy - - const: pcie_inbound_axi for imx6sx-pcie, pcie_aux for imx8mq-pcie + - enum: [ pcie_phy, pcie_aux ] + - enum: [ pcie_inbound_axi, pcie_aux ] num-lanes: const: 1 @@ -72,6 +69,7 @@ properties: required properties for imx7d-pcie and imx8mq-pcie. power-domains: + minItems: 1 items: - description: The phandle pointing to the DISPLAY domain for imx6sx-pcie, to PCIE_PHY power domain for imx7d-pcie and @@ -80,20 +78,20 @@ properties: for imx6sx-pcie. power-domain-names: + minItems: 1 items: - const: pcie - const: pcie_phy resets: + minItems: 2 maxItems: 3 description: Phandles to PCIe-related reset lines exposed by SRC IP block. Additional required by imx7d-pcie and imx8mq-pcie. reset-names: - items: - - const: pciephy - - const: apps - - const: turnoff + minItems: 2 + maxItems: 3 fsl,tx-deemph-gen1: description: Gen1 De-emphasis value (optional required). @@ -175,6 +173,136 @@ required: - clocks - clock-names +allOf: + - $ref: /schemas/pci/snps,dw-pcie.yaml# + - if: + properties: + compatible: + contains: + const: fsl,imx6sx-pcie + then: + properties: + clock-names: + items: + - {} + - {} + - const: pcie_phy + - const: pcie_inbound_axi + power-domains: + minItems: 2 + power-domain-names: + minItems: 2 + - if: + properties: + compatible: + contains: + const: fsl,imx8mq-pcie + then: + properties: + clock-names: + items: + - {} + - {} + - const: pcie_phy + - const: pcie_aux + - if: + properties: + compatible: + not: + contains: + enum: + - fsl,imx6sx-pcie + - fsl,imx8mq-pcie + then: + properties: + clocks: + maxItems: 3 + clock-names: + maxItems: 3 + + - if: + properties: + compatible: + contains: + enum: + - fsl,imx6q-pcie + - fsl,imx6qp-pcie + - fsl,imx7d-pcie + then: + properties: + clock-names: + maxItems: 3 + contains: + const: pcie_phy + + - if: + properties: + compatible: + contains: + enum: + - fsl,imx8mm-pcie + - fsl,imx8mp-pcie + then: + properties: + clock-names: + maxItems: 3 + contains: + const: pcie_aux + - if: + properties: + compatible: + contains: + enum: + - fsl,imx6q-pcie + - fsl,imx6qp-pcie + then: + properties: + power-domains: false + power-domain-names: false + + - if: + not: + properties: + compatible: + contains: + enum: + - fsl,imx6sx-pcie + - fsl,imx6q-pcie + - fsl,imx6qp-pcie + then: + properties: + power-domains: + maxItems: 1 + power-domain-names: false + + - if: + properties: + compatible: + contains: + enum: + - fsl,imx6q-pcie + - fsl,imx6sx-pcie + - fsl,imx6qp-pcie + - fsl,imx7d-pcie + - fsl,imx8mq-pcie + then: + properties: + resets: + minItems: 3 + reset-names: + items: + - const: pciephy + - const: apps + - const: turnoff + else: + properties: + resets: + maxItems: 2 + reset-names: + items: + - const: apps + - const: turnoff + unevaluatedProperties: false examples: diff --git a/Documentation/devicetree/bindings/pci/mediatek-pcie-gen3.yaml b/Documentation/devicetree/bindings/pci/mediatek-pcie-gen3.yaml index c00be39af64e53e801a5d4d551235b5e16f504fe..7e8c7a2a5f9b6c387b1ec2fd7b5a47dcee5867e1 100644 --- a/Documentation/devicetree/bindings/pci/mediatek-pcie-gen3.yaml +++ b/Documentation/devicetree/bindings/pci/mediatek-pcie-gen3.yaml @@ -43,14 +43,12 @@ description: |+ each set has its own address for MSI message, and supports 32 MSI vectors to generate interrupt. -allOf: - - $ref: /schemas/pci/pci-bus.yaml# - properties: compatible: oneOf: - items: - enum: + - mediatek,mt7986-pcie - mediatek,mt8188-pcie - mediatek,mt8195-pcie - const: mediatek,mt8192-pcie @@ -70,29 +68,29 @@ properties: minItems: 1 maxItems: 8 + iommu-map: + maxItems: 1 + + iommu-map-mask: + const: 0 + resets: minItems: 1 maxItems: 2 reset-names: minItems: 1 + maxItems: 2 items: - - const: phy - - const: mac + enum: [ phy, mac ] clocks: + minItems: 4 maxItems: 6 clock-names: - items: - - const: pl_250m - - const: tl_26m - - const: tl_96m - - const: tl_32k - - const: peri_26m - - enum: - - top_133m # for MT8192 - - peri_mem # for MT8188/MT8195 + minItems: 4 + maxItems: 6 assigned-clocks: maxItems: 1 @@ -107,6 +105,9 @@ properties: items: - const: pcie-phy + power-domains: + maxItems: 1 + '#interrupt-cells': const: 1 @@ -138,6 +139,54 @@ required: - '#interrupt-cells' - interrupt-controller +allOf: + - $ref: /schemas/pci/pci-bus.yaml# + - if: + properties: + compatible: + const: mediatek,mt8192-pcie + then: + properties: + clock-names: + items: + - const: pl_250m + - const: tl_26m + - const: tl_96m + - const: tl_32k + - const: peri_26m + - const: top_133m + - if: + properties: + compatible: + contains: + enum: + - mediatek,mt8188-pcie + - mediatek,mt8195-pcie + then: + properties: + clock-names: + items: + - const: pl_250m + - const: tl_26m + - const: tl_96m + - const: tl_32k + - const: peri_26m + - const: peri_mem + - if: + properties: + compatible: + contains: + enum: + - mediatek,mt7986-pcie + then: + properties: + clock-names: + items: + - const: pl_250m + - const: tl_26m + - const: peri_26m + - const: top_133m + unevaluatedProperties: false examples: diff --git a/Documentation/devicetree/bindings/pci/pci-ep.yaml b/Documentation/devicetree/bindings/pci/pci-ep.yaml index ccec51ab524708c3261746b1a9af20c4169d699f..d1eef48252072a1b077903b19138eb7acf2081b7 100644 --- a/Documentation/devicetree/bindings/pci/pci-ep.yaml +++ b/Documentation/devicetree/bindings/pci/pci-ep.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/pci/pci-ep.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: PCI Endpoint Controller Schema +title: PCI Endpoint Controller description: | Common properties for PCI Endpoint Controller Nodes. diff --git a/Documentation/devicetree/bindings/pci/qcom,pcie-ep.yaml b/Documentation/devicetree/bindings/pci/qcom,pcie-ep.yaml index 977c976ea799467810843d2eef76aa72dcf1758f..8d7eb51edcb45db1cb700556b348cc62f7eac0ea 100644 --- a/Documentation/devicetree/bindings/pci/qcom,pcie-ep.yaml +++ b/Documentation/devicetree/bindings/pci/qcom,pcie-ep.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/pci/qcom,pcie-ep.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Qualcomm PCIe Endpoint Controller binding +title: Qualcomm PCIe Endpoint Controller maintainers: - Manivannan Sadhasivam diff --git a/Documentation/devicetree/bindings/pci/qcom,pcie.yaml b/Documentation/devicetree/bindings/pci/qcom,pcie.yaml index 54f07852d279e7bcdf28660935c6baada64cd4ed..a5859bb3dc28cd6661c1a014ff7272e8b51f83e5 100644 --- a/Documentation/devicetree/bindings/pci/qcom,pcie.yaml +++ b/Documentation/devicetree/bindings/pci/qcom,pcie.yaml @@ -62,6 +62,16 @@ properties: minItems: 3 maxItems: 13 + dma-coherent: true + + interconnects: + maxItems: 2 + + interconnect-names: + items: + - const: pcie-mem + - const: cpu-pcie + resets: minItems: 1 maxItems: 12 @@ -631,6 +641,18 @@ allOf: items: - const: pci # PCIe core reset + - if: + properties: + compatible: + contains: + enum: + - qcom,pcie-sa8540p + - qcom,pcie-sc8280xp + then: + required: + - interconnects + - interconnect-names + - if: not: properties: diff --git a/Documentation/devicetree/bindings/pci/rockchip-dw-pcie.yaml b/Documentation/devicetree/bindings/pci/rockchip-dw-pcie.yaml index bc0a9d1db750bebbdfcbc7c6bf2c3b26f300fa0d..2be72ae1169f99423a67ee81d22ec89224515338 100644 --- a/Documentation/devicetree/bindings/pci/rockchip-dw-pcie.yaml +++ b/Documentation/devicetree/bindings/pci/rockchip-dw-pcie.yaml @@ -14,10 +14,10 @@ maintainers: description: |+ RK3568 SoC PCIe host controller is based on the Synopsys DesignWare PCIe IP and thus inherits all the common properties defined in - designware-pcie.txt. + snps,dw-pcie.yaml. allOf: - - $ref: /schemas/pci/pci-bus.yaml# + - $ref: /schemas/pci/snps,dw-pcie.yaml# properties: compatible: diff --git a/Documentation/devicetree/bindings/pci/snps,dw-pcie-common.yaml b/Documentation/devicetree/bindings/pci/snps,dw-pcie-common.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d87e13496834a48098768a56b4c90a742a70b486 --- /dev/null +++ b/Documentation/devicetree/bindings/pci/snps,dw-pcie-common.yaml @@ -0,0 +1,266 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/pci/snps,dw-pcie-common.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Synopsys DWC PCIe RP/EP controller + +maintainers: + - Jingoo Han + - Gustavo Pimentel + +description: + Generic Synopsys DesignWare PCIe Root Port and Endpoint controller + properties. + +select: false + +properties: + reg: + description: + DWC PCIe CSR space is normally accessed over the dedicated Data Bus + Interface - DBI. In accordance with the reference manual the register + configuration space belongs to the Configuration-Dependent Module (CDM) + and is split up into several sub-parts Standard PCIe configuration + space, Port Logic Registers (PL), Shadow Config-space Registers, + iATU/eDMA registers. The particular sub-space is selected by the + CDM/ELBI (dbi_cs) and CS2 (dbi_cs2) signals (selector bits). Such + configuration provides a flexible interface for the system engineers to + either map the particular space at a desired MMIO address or just leave + them in a contiguous memory space if pure Native or AXI Bridge DBI access + is selected. Note the PCIe CFG-space, PL and Shadow registers are + specific for each activated function, while the rest of the sub-spaces + are common for all of them (if there are more than one). + minItems: 2 + maxItems: 6 + + reg-names: + minItems: 2 + maxItems: 6 + + interrupts: + description: + There are two main sub-blocks which are normally capable of + generating interrupts. It's System Information Interface and MSI + interface. While the former one has some common for the Host and + Endpoint controllers IRQ-signals, the later interface is obviously + Root Complex specific since it's responsible for the incoming MSI + messages signalling. The System Information IRQ signals are mainly + responsible for reporting the generic PCIe hierarchy and Root + Complex events like VPD IO request, general AER, PME, Hot-plug, link + bandwidth change, link equalization request, INTx asserted/deasserted + Message detection, embedded DMA Tx/Rx/Error. + minItems: 1 + maxItems: 26 + + interrupt-names: + minItems: 1 + maxItems: 26 + + clocks: + description: + DWC PCIe reference manual explicitly defines a set of the clocks required + to get the controller working correctly. In general all of them can + be divided into two groups':' application and core clocks. Note the + platforms may have some of the clock sources unspecified in case if the + corresponding domains are fed up from a common clock source. + minItems: 1 + maxItems: 7 + + clock-names: + minItems: 1 + maxItems: 7 + items: + oneOf: + - description: + Data Bus Interface (DBI) clock. Clock signal for the AXI-bus + interface of the Configuration-Dependent Module, which is + basically the set of the controller CSRs. + const: dbi + - description: + Application AXI-bus Master interface clock. Basically this is + a clock for the controller DMA interface (PCI-to-CPU). + const: mstr + - description: + Application AXI-bus Slave interface clock. This is a clock for + the CPU-to-PCI memory IO interface. + const: slv + - description: + Controller Core-PCS PIPE interface clock. It's normally + supplied by an external PCS-PHY. + const: pipe + - description: + Controller Primary clock. It's assumed that all controller input + signals (except resets) are synchronous to this clock. + const: core + - description: + Auxiliary clock for the controller PMC domain. The controller + partitioning implies having some parts to operate with this + clock in some power management states. + const: aux + - description: + Generic reference clock. In case if there are several + interfaces fed up with a common clock source it's advisable to + define it with this name (for instance pipe, core and aux can + be connected to a single source of the periodic signal). + const: ref + - description: + Clock for the PHY registers interface. Originally this is + a PHY-viewport-based interface, but some platform may have + specifically designed one. + const: phy_reg + - description: + Vendor-specific clock names. Consider using the generic names + above for new bindings. + oneOf: + - description: See native 'dbi' clock for details + enum: [ pcie, pcie_apb_sys, aclk_dbi ] + - description: See native 'mstr/slv' clock for details + enum: [ pcie_bus, pcie_inbound_axi, pcie_aclk, aclk_mst, aclk_slv ] + - description: See native 'pipe' clock for details + enum: [ pcie_phy, pcie_phy_ref, link ] + - description: See native 'aux' clock for details + enum: [ pcie_aux ] + - description: See native 'ref' clock for details. + enum: [ gio ] + - description: See nativs 'phy_reg' clock for details + enum: [ pcie_apb_phy, pclk ] + + resets: + description: + DWC PCIe reference manual explicitly defines a set of the reset + signals required to be de-asserted to properly activate the controller + sub-parts. All of these signals can be divided into two sub-groups':' + application and core resets with respect to the main sub-domains they + are supposed to reset. Note the platforms may have some of these signals + unspecified in case if they are automatically handled or aggregated into + a comprehensive control module. + minItems: 1 + maxItems: 10 + + reset-names: + minItems: 1 + maxItems: 10 + items: + oneOf: + - description: Data Bus Interface (DBI) domain reset + const: dbi + - description: AXI-bus Master interface reset + const: mstr + - description: AXI-bus Slave interface reset + const: slv + - description: Application-dependent interface reset + const: app + - description: Controller Non-sticky CSR flags reset + const: non-sticky + - description: Controller sticky CSR flags reset + const: sticky + - description: PIPE-interface (Core-PCS) logic reset + const: pipe + - description: + Controller primary reset (resets everything except PMC module) + const: core + - description: PCS/PHY block reset + const: phy + - description: PMC hot reset signal + const: hot + - description: Cold reset signal + const: pwr + - description: + Vendor-specific reset names. Consider using the generic names + above for new bindings. + oneOf: + - description: See native 'app' reset for details + enum: [ apps, gio, apb ] + - description: See native 'phy' reset for details + enum: [ pciephy, link ] + - description: See native 'pwr' reset for details + enum: [ turnoff ] + + phys: + description: + There can be up to the number of possible lanes PHYs specified placed in + the phandle array in the line-based order. Obviously each the specified + PHYs are supposed to be able to work in the PCIe mode with a speed + implied by the DWC PCIe controller they are attached to. + minItems: 1 + maxItems: 16 + + phy-names: + minItems: 1 + maxItems: 16 + oneOf: + - description: Generic PHY names + items: + pattern: '^pcie[0-9]+$' + - description: + Vendor-specific PHY names. Consider using the generic + names above for new bindings. + items: + oneOf: + - pattern: '^pcie(-?phy[0-9]*)?$' + - pattern: '^p2u-[0-7]$' + + reset-gpio: + deprecated: true + description: + Reference to the GPIO-controlled PERST# signal. It is used to reset all + the peripheral devices available on the PCIe bus. + maxItems: 1 + + reset-gpios: + description: + Reference to the GPIO-controlled PERST# signal. It is used to reset all + the peripheral devices available on the PCIe bus. + maxItems: 1 + + max-link-speed: + maximum: 5 + + num-lanes: + description: + Number of PCIe link lanes to use. Can be omitted if the already brought + up link is supposed to be preserved. + maximum: 16 + + num-ob-windows: + $ref: /schemas/types.yaml#/definitions/uint32 + deprecated: true + description: + Number of outbound address translation windows. This parameter can be + auto-detected based on the iATU memory writability. So there is no + point in having a dedicated DT-property for it. + maximum: 256 + + num-ib-windows: + $ref: /schemas/types.yaml#/definitions/uint32 + deprecated: true + description: + Number of inbound address translation windows. In the same way as + for the outbound AT windows, this parameter can be auto-detected based + on the iATU memory writability. There is no point having a dedicated + DT-property for it either. + maximum: 256 + + num-viewport: + $ref: /schemas/types.yaml#/definitions/uint32 + deprecated: true + description: + Number of outbound view ports configured in hardware. It's the same as + the number of outbound AT windows. + maximum: 256 + + snps,enable-cdm-check: + $ref: /schemas/types.yaml#/definitions/flag + description: + Enable automatic checking of CDM (Configuration Dependent Module) + registers for data corruption. CDM registers include standard PCIe + configuration space registers, Port Logic registers, DMA and iATU + registers. This feature has been available since DWC PCIe v4.80a. + + dma-coherent: true + +additionalProperties: true + +... diff --git a/Documentation/devicetree/bindings/pci/snps,dw-pcie-ep.yaml b/Documentation/devicetree/bindings/pci/snps,dw-pcie-ep.yaml index b78535040f04c6d7ce37058649949f818137f7f0..8fc2151691a47b13d96bd52b39af3a7fe3dfdb94 100644 --- a/Documentation/devicetree/bindings/pci/snps,dw-pcie-ep.yaml +++ b/Documentation/devicetree/bindings/pci/snps,dw-pcie-ep.yaml @@ -13,76 +13,182 @@ maintainers: description: | Synopsys DesignWare PCIe host controller endpoint +# Please create a separate DT-schema for your DWC PCIe Endpoint controller +# and make sure it's assigned with the vendor-specific compatible string. +select: + properties: + compatible: + const: snps,dw-pcie-ep + required: + - compatible + allOf: - $ref: /schemas/pci/pci-ep.yaml# + - $ref: /schemas/pci/snps,dw-pcie-common.yaml# properties: - compatible: - anyOf: - - {} - - const: snps,dw-pcie-ep - reg: - description: | - It should contain Data Bus Interface (dbi) and config registers for all - versions. - For designware core version >= 4.80, it may contain ATU address space. + description: + DBI, DBI2 reg-spaces and outbound memory window are required for the + normal controller functioning. iATU memory IO region is also required + if the space is unrolled (IP-core version >= 4.80a). minItems: 2 - maxItems: 4 + maxItems: 5 reg-names: minItems: 2 - maxItems: 4 + maxItems: 5 items: - enum: [dbi, dbi2, config, atu, addr_space, link, atu_dma, appl] - - reset-gpio: - description: GPIO pin number of PERST# signal - maxItems: 1 - deprecated: true - - reset-gpios: - description: GPIO controlled connection to PERST# signal - maxItems: 1 - - snps,enable-cdm-check: - type: boolean - description: | - This is a boolean property and if present enables - automatic checking of CDM (Configuration Dependent Module) registers - for data corruption. CDM registers include standard PCIe configuration - space registers, Port Logic registers, DMA and iATU (internal Address - Translation Unit) registers. - - num-ib-windows: - $ref: /schemas/types.yaml#/definitions/uint32 - maximum: 256 - description: number of inbound address translation windows - deprecated: true - - num-ob-windows: - $ref: /schemas/types.yaml#/definitions/uint32 - maximum: 256 - description: number of outbound address translation windows - deprecated: true + oneOf: + - description: + Basic DWC PCIe controller configuration-space accessible over + the DBI interface. This memory space is either activated with + CDM/ELBI = 0 and CS2 = 0 or is a contiguous memory region + with all spaces. Note iATU/eDMA CSRs are indirectly accessible + via the PL viewports on the DWC PCIe controllers older than + v4.80a. + const: dbi + - description: + Shadow DWC PCIe config-space registers. This space is selected + by setting CDM/ELBI = 0 and CS2 = 1. This is an intermix of + the PCI-SIG PCIe CFG-space with the shadow registers for some + PCI Header space, PCI Standard and Extended Structures. It's + mainly relevant for the end-point controller configuration, + but still there are some shadow registers available for the + Root Port mode too. + const: dbi2 + - description: + External Local Bus registers. It's an application-dependent + registers normally defined by the platform engineers. The space + can be selected by setting CDM/ELBI = 1 and CS2 = 0 wires or can + be accessed over some platform-specific means (for instance + as a part of a system controller). + enum: [ elbi, app ] + - description: + iATU/eDMA registers common for all device functions. It's an + unrolled memory space with the internal Address Translation + Unit and Enhanced DMA, which is selected by setting CDM/ELBI = 1 + and CS2 = 1. For IP-core releases prior v4.80a, these registers + have been programmed via an indirect addressing scheme using a + set of viewport CSRs mapped into the PL space. Note iATU is + normally mapped to the 0x0 address of this region, while eDMA + is available at 0x80000 base address. + const: atu + - description: + Platform-specific eDMA registers. Some platforms may have eDMA + CSRs mapped in a non-standard base address. The registers offset + can be changed or the MS/LS-bits of the address can be attached + in an additional RTL block before the MEM-IO transactions reach + the DW PCIe slave interface. + const: dma + - description: + PHY/PCS configuration registers. Some platforms can have the + PCS and PHY CSRs accessible over a dedicated memory mapped + region, but mainly these registers are indirectly accessible + either by means of the embedded PHY viewport schema or by some + platform-specific method. + const: phy + - description: + Outbound iATU-capable memory-region which will be used to + generate various application-specific traffic on the PCIe bus + hierarchy. It's usage scenario depends on the endpoint + functionality, for instance it can be used to create MSI(X) + messages. + const: addr_space + - description: + Vendor-specific CSR names. Consider using the generic names above + for new bindings. + oneOf: + - description: See native 'elbi/app' CSR region for details. + enum: [ link, appl ] + - description: See native 'atu' CSR region for details. + enum: [ atu_dma ] + allOf: + - contains: + const: dbi + - contains: + const: addr_space + + interrupts: + description: + There is no mandatory IRQ signals for the normal controller functioning, + but in addition to the native set the platforms may have a link- or + PM-related IRQs specified. + minItems: 1 + maxItems: 20 + + interrupt-names: + minItems: 1 + maxItems: 20 + items: + oneOf: + - description: + Controller request to read or write virtual product data + from/to the VPD capability registers. + const: vpd + - description: + Link Equalization Request flag is set in the Link Status 2 + register (applicable if the corresponding IRQ is enabled in + the Link Control 3 register). + const: l_eq + - description: + Indicates that the eDMA Tx/Rx transfer is complete or that an + error has occurred on the corresponding channel. eDMA can have + eight Tx (Write) and Rx (Read) eDMA channels thus supporting up + to 16 IRQ signals all together. Write eDMA channels shall go + first in the ordered row as per default edma_int[*] bus setup. + pattern: '^dma([0-9]|1[0-5])?$' + - description: + PCIe protocol correctable error or a Data Path protection + correctable error is detected by the automotive/safety + feature. + const: sft_ce + - description: + Indicates that the internal safety mechanism has detected an + uncorrectable error. + const: sft_ue + - description: + Application-specific IRQ raised depending on the vendor-specific + events basis. + const: app + - description: + Vendor-specific IRQ names. Consider using the generic names above + for new bindings. + oneOf: + - description: See native "app" IRQ for details + enum: [ intr ] + + max-functions: + maximum: 32 required: + - compatible - reg - reg-names - - compatible additionalProperties: true examples: - | - bus { - #address-cells = <1>; - #size-cells = <1>; - pcie-ep@dfd00000 { - compatible = "snps,dw-pcie-ep"; - reg = <0xdfc00000 0x0001000>, /* IP registers 1 */ - <0xdfc01000 0x0001000>, /* IP registers 2 */ - <0xd0000000 0x2000000>; /* Configuration space */ - reg-names = "dbi", "dbi2", "addr_space"; - }; + pcie-ep@dfd00000 { + compatible = "snps,dw-pcie-ep"; + reg = <0xdfc00000 0x0001000>, /* IP registers 1 */ + <0xdfc01000 0x0001000>, /* IP registers 2 */ + <0xd0000000 0x2000000>; /* Configuration space */ + reg-names = "dbi", "dbi2", "addr_space"; + + interrupts = <23>, <24>; + interrupt-names = "dma0", "dma1"; + + clocks = <&sys_clk 12>, <&sys_clk 24>; + clock-names = "dbi", "ref"; + + resets = <&sys_rst 12>, <&sys_rst 24>; + reset-names = "dbi", "phy"; + + phys = <&pcie_phy0>, <&pcie_phy1>, <&pcie_phy2>, <&pcie_phy3>; + phy-names = "pcie0", "pcie1", "pcie2", "pcie3"; + + max-link-speed = <3>; + max-functions = /bits/ 8 <4>; }; diff --git a/Documentation/devicetree/bindings/pci/snps,dw-pcie.yaml b/Documentation/devicetree/bindings/pci/snps,dw-pcie.yaml index 7e0b015f14146ebda1e6d55f298b74a34bac6e12..1a83f0f65f191edaaa75d7e2331600b1d7066200 100644 --- a/Documentation/devicetree/bindings/pci/snps,dw-pcie.yaml +++ b/Documentation/devicetree/bindings/pci/snps,dw-pcie.yaml @@ -13,20 +13,25 @@ maintainers: description: | Synopsys DesignWare PCIe host controller +# Please create a separate DT-schema for your DWC PCIe Root Port controller +# and make sure it's assigned with the vendor-specific compatible string. +select: + properties: + compatible: + const: snps,dw-pcie + required: + - compatible + allOf: - $ref: /schemas/pci/pci-bus.yaml# + - $ref: /schemas/pci/snps,dw-pcie-common.yaml# properties: - compatible: - anyOf: - - {} - - const: snps,dw-pcie - reg: - description: | - It should contain Data Bus Interface (dbi) and config registers for all - versions. - For designware core version >= 4.80, it may contain ATU address space. + description: + At least DBI reg-space and peripheral devices CFG-space outbound window + are required for the normal controller work. iATU memory IO region is + also required if the space is unrolled (IP-core version >= 4.80a). minItems: 2 maxItems: 5 @@ -34,71 +39,194 @@ properties: minItems: 2 maxItems: 5 items: - enum: [ dbi, dbi2, config, atu, atu_dma, app, appl, elbi, mgmt, ctrl, - parf, cfg, link, ulreg, smu, mpu, apb, phy, ecam ] - - num-lanes: - description: | - number of lanes to use (this property should be specified unless - the link is brought already up in firmware) - maximum: 16 - - reset-gpio: - description: GPIO pin number of PERST# signal - maxItems: 1 - deprecated: true - - reset-gpios: - description: GPIO controlled connection to PERST# signal - maxItems: 1 - - interrupts: true - - interrupt-names: true - - clocks: true - - snps,enable-cdm-check: - type: boolean - description: | - This is a boolean property and if present enables - automatic checking of CDM (Configuration Dependent Module) registers - for data corruption. CDM registers include standard PCIe configuration - space registers, Port Logic registers, DMA and iATU (internal Address - Translation Unit) registers. - - num-viewport: - $ref: /schemas/types.yaml#/definitions/uint32 - maximum: 256 - description: | - number of view ports configured in hardware. If a platform - does not specify it, the driver autodetects it. - deprecated: true + oneOf: + - description: + Basic DWC PCIe controller configuration-space accessible over + the DBI interface. This memory space is either activated with + CDM/ELBI = 0 and CS2 = 0 or is a contiguous memory region + with all spaces. Note iATU/eDMA CSRs are indirectly accessible + via the PL viewports on the DWC PCIe controllers older than + v4.80a. + const: dbi + - description: + Shadow DWC PCIe config-space registers. This space is selected + by setting CDM/ELBI = 0 and CS2 = 1. This is an intermix of + the PCI-SIG PCIe CFG-space with the shadow registers for some + PCI Header space, PCI Standard and Extended Structures. It's + mainly relevant for the end-point controller configuration, + but still there are some shadow registers available for the + Root Port mode too. + const: dbi2 + - description: + External Local Bus registers. It's an application-dependent + registers normally defined by the platform engineers. The space + can be selected by setting CDM/ELBI = 1 and CS2 = 0 wires or can + be accessed over some platform-specific means (for instance + as a part of a system controller). + enum: [ elbi, app ] + - description: + iATU/eDMA registers common for all device functions. It's an + unrolled memory space with the internal Address Translation + Unit and Enhanced DMA, which is selected by setting CDM/ELBI = 1 + and CS2 = 1. For IP-core releases prior v4.80a, these registers + have been programmed via an indirect addressing scheme using a + set of viewport CSRs mapped into the PL space. Note iATU is + normally mapped to the 0x0 address of this region, while eDMA + is available at 0x80000 base address. + const: atu + - description: + Platform-specific eDMA registers. Some platforms may have eDMA + CSRs mapped in a non-standard base address. The registers offset + can be changed or the MS/LS-bits of the address can be attached + in an additional RTL block before the MEM-IO transactions reach + the DW PCIe slave interface. + const: dma + - description: + PHY/PCS configuration registers. Some platforms can have the + PCS and PHY CSRs accessible over a dedicated memory mapped + region, but mainly these registers are indirectly accessible + either by means of the embedded PHY viewport schema or by some + platform-specific method. + const: phy + - description: + Outbound iATU-capable memory-region which will be used to access + the peripheral PCIe devices configuration space. + const: config + - description: + Vendor-specific CSR names. Consider using the generic names above + for new bindings. + oneOf: + - description: See native 'elbi/app' CSR region for details. + enum: [ apb, mgmt, link, ulreg, appl ] + - description: See native 'atu' CSR region for details. + enum: [ atu_dma ] + - description: Syscon-related CSR regions. + enum: [ smu, mpu ] + - description: Tegra234 aperture + enum: [ ecam ] + allOf: + - contains: + const: dbi + - contains: + const: config + + interrupts: + description: + DWC PCIe Root Port/Complex specific IRQ signals. At least MSI interrupt + signal is supposed to be specified for the host controller. + minItems: 1 + maxItems: 26 + + interrupt-names: + minItems: 1 + maxItems: 26 + items: + oneOf: + - description: + Controller request to read or write virtual product data + from/to the VPD capability registers. + const: vpd + - description: + Link Equalization Request flag is set in the Link Status 2 + register (applicable if the corresponding IRQ is enabled in + the Link Control 3 register). + const: l_eq + - description: + Indicates that the eDMA Tx/Rx transfer is complete or that an + error has occurred on the corresponding channel. eDMA can have + eight Tx (Write) and Rx (Read) eDMA channels thus supporting up + to 16 IRQ signals all together. Write eDMA channels shall go + first in the ordered row as per default edma_int[*] bus setup. + pattern: '^dma([0-9]|1[0-5])?$' + - description: + PCIe protocol correctable error or a Data Path protection + correctable error is detected by the automotive/safety + feature. + const: sft_ce + - description: + Indicates that the internal safety mechanism has detected an + uncorrectable error. + const: sft_ue + - description: + Application-specific IRQ raised depending on the vendor-specific + events basis. + const: app + - description: + DSP AXI MSI Interrupt detected. It gets de-asserted when there is + no more MSI interrupt pending. The interrupt is relevant to the + iMSI-RX - Integrated MSI Receiver (AXI bridge). + const: msi + - description: + Legacy A/B/C/D interrupt signal. Basically it's triggered by + receiving a Assert_INT{A,B,C,D}/Desassert_INT{A,B,C,D} message + from the downstream device. + pattern: "^int(a|b|c|d)$" + - description: + Error condition detected and a flag is set in the Root Error Status + register of the AER capability. It's asserted when the RC + internally generated an error or an error message is received by + the RC. + const: aer + - description: + PME message is received by the port. That means having the PME + status bit set in the Root Status register (the event is + supposed to be unmasked in the Root Control register). + const: pme + - description: + Hot-plug event is detected. That is a bit has been set in the + Slot Status register and the corresponding event is enabled in + the Slot Control register. + const: hp + - description: + Link Autonomous Bandwidth Status flag has been set in the Link + Status register (the event is supposed to be unmasked in the + Link Control register). + const: bw_au + - description: + Bandwidth Management Status flag has been set in the Link + Status register (the event is supposed to be unmasked in the + Link Control register). + const: bw_mg + - description: + Vendor-specific IRQ names. Consider using the generic names above + for new bindings. + oneOf: + - description: See native "app" IRQ for details + enum: [ intr ] + allOf: + - contains: + const: msi additionalProperties: true required: + - compatible - reg - reg-names - - compatible examples: - | - bus { - #address-cells = <1>; - #size-cells = <1>; - pcie@dfc00000 { - device_type = "pci"; - compatible = "snps,dw-pcie"; - reg = <0xdfc00000 0x0001000>, /* IP registers */ - <0xd0000000 0x0002000>; /* Configuration space */ - reg-names = "dbi", "config"; - #address-cells = <3>; - #size-cells = <2>; - ranges = <0x81000000 0 0x00000000 0xde000000 0 0x00010000>, - <0x82000000 0 0xd0400000 0xd0400000 0 0x0d000000>; - interrupts = <25>, <24>; - #interrupt-cells = <1>; - num-lanes = <1>; - }; + pcie@dfc00000 { + compatible = "snps,dw-pcie"; + device_type = "pci"; + reg = <0xdfc00000 0x0001000>, /* IP registers */ + <0xd0000000 0x0002000>; /* Configuration space */ + reg-names = "dbi", "config"; + #address-cells = <3>; + #size-cells = <2>; + ranges = <0x81000000 0 0x00000000 0xde000000 0 0x00010000>, + <0x82000000 0 0xd0400000 0xd0400000 0 0x0d000000>; + bus-range = <0x0 0xff>; + + interrupts = <25>, <24>; + interrupt-names = "msi", "hp"; + #interrupt-cells = <1>; + + reset-gpios = <&port0 0 1>; + + phys = <&pcie_phy>; + phy-names = "pcie"; + + num-lanes = <1>; + max-link-speed = <3>; }; diff --git a/Documentation/devicetree/bindings/pci/ti,j721e-pci-ep.yaml b/Documentation/devicetree/bindings/pci/ti,j721e-pci-ep.yaml index aed437dac363a10131835cf1ca7cce5155c53afd..10e6eabdff534ac947b31209848944709091f04a 100644 --- a/Documentation/devicetree/bindings/pci/ti,j721e-pci-ep.yaml +++ b/Documentation/devicetree/bindings/pci/ti,j721e-pci-ep.yaml @@ -58,6 +58,13 @@ properties: dma-coherent: description: Indicates that the PCIe IP block can ensure the coherency + interrupts: + maxItems: 1 + + interrupt-names: + items: + - const: link_state + required: - compatible - reg diff --git a/Documentation/devicetree/bindings/pci/ti,j721e-pci-host.yaml b/Documentation/devicetree/bindings/pci/ti,j721e-pci-host.yaml index 2115d5a3f0e14ba530573e9b6ceb0c2572274066..b0513b197d0871e32f621317a41283df584ef5e1 100644 --- a/Documentation/devicetree/bindings/pci/ti,j721e-pci-host.yaml +++ b/Documentation/devicetree/bindings/pci/ti,j721e-pci-host.yaml @@ -73,9 +73,31 @@ properties: - const: 0xb00f - items: - const: 0xb010 + - items: + - const: 0xb013 msi-map: true + interrupts: + maxItems: 1 + + interrupt-names: + items: + - const: link_state + + interrupt-controller: + type: object + additionalProperties: false + + properties: + interrupt-controller: true + + '#interrupt-cells': + const: 1 + + interrupts: + maxItems: 1 + required: - compatible - reg diff --git a/Documentation/devicetree/bindings/pci/toshiba,visconti-pcie.yaml b/Documentation/devicetree/bindings/pci/toshiba,visconti-pcie.yaml index 48ed227fc5b9e888781a9df39d52b90b1cf5f6b7..53da2edd7c9abef7d13c588af2fbb63a17d509eb 100644 --- a/Documentation/devicetree/bindings/pci/toshiba,visconti-pcie.yaml +++ b/Documentation/devicetree/bindings/pci/toshiba,visconti-pcie.yaml @@ -36,7 +36,7 @@ properties: - const: mpu interrupts: - maxItems: 1 + maxItems: 2 clocks: items: @@ -94,8 +94,9 @@ examples: #interrupt-cells = <1>; ranges = <0x81000000 0 0x40000000 0 0x40000000 0 0x00010000>, <0x82000000 0 0x50000000 0 0x50000000 0 0x20000000>; - interrupts = ; - interrupt-names = "intr"; + interrupts = , + ; + interrupt-names = "msi", "intr"; interrupt-map-mask = <0 0 0 7>; interrupt-map = <0 0 0 1 &gic GIC_SPI 215 IRQ_TYPE_LEVEL_HIGH diff --git a/Documentation/devicetree/bindings/phy/allwinner,sun6i-a31-mipi-dphy.yaml b/Documentation/devicetree/bindings/phy/allwinner,sun6i-a31-mipi-dphy.yaml index dfb6a89935351d14e8dd7e4409e63ef85468859e..fe9702e7bdd8a1f8cb4657773591401e5212c17f 100644 --- a/Documentation/devicetree/bindings/phy/allwinner,sun6i-a31-mipi-dphy.yaml +++ b/Documentation/devicetree/bindings/phy/allwinner,sun6i-a31-mipi-dphy.yaml @@ -17,13 +17,20 @@ properties: compatible: oneOf: - const: allwinner,sun6i-a31-mipi-dphy + - const: allwinner,sun50i-a100-mipi-dphy - items: - const: allwinner,sun50i-a64-mipi-dphy - const: allwinner,sun6i-a31-mipi-dphy + - items: + - const: allwinner,sun20i-d1-mipi-dphy + - const: allwinner,sun50i-a100-mipi-dphy reg: maxItems: 1 + interrupts: + maxItems: 1 + clocks: items: - description: Bus Clock @@ -53,6 +60,7 @@ required: - "#phy-cells" - compatible - reg + - interrupts - clocks - clock-names - resets @@ -61,9 +69,12 @@ additionalProperties: false examples: - | + #include + dphy0: d-phy@1ca1000 { compatible = "allwinner,sun6i-a31-mipi-dphy"; reg = <0x01ca1000 0x1000>; + interrupts = ; clocks = <&ccu 23>, <&ccu 97>; clock-names = "bus", "mod"; resets = <&ccu 4>; diff --git a/Documentation/devicetree/bindings/phy/allwinner,sun8i-h3-usb-phy.yaml b/Documentation/devicetree/bindings/phy/allwinner,sun8i-h3-usb-phy.yaml index 77539b4601c24d794930fa465d0c1bbf0c16b178..2df012d13655ee361e19b2b6e53e2ecc625206d7 100644 --- a/Documentation/devicetree/bindings/phy/allwinner,sun8i-h3-usb-phy.yaml +++ b/Documentation/devicetree/bindings/phy/allwinner,sun8i-h3-usb-phy.yaml @@ -36,18 +36,22 @@ properties: - const: pmu3 clocks: + minItems: 4 items: - description: USB OTG PHY bus clock - description: USB Host 0 PHY bus clock - description: USB Host 1 PHY bus clock - description: USB Host 2 PHY bus clock + - description: PMU clock for host port 2 clock-names: + minItems: 4 items: - const: usb0_phy - const: usb1_phy - const: usb2_phy - const: usb3_phy + - const: pmu2_clk resets: items: @@ -96,6 +100,28 @@ required: - resets - reset-names +allOf: + - if: + properties: + compatible: + contains: + enum: + - allwinner,sun50i-h616-usb-phy + then: + properties: + clocks: + minItems: 5 + + clock-names: + minItems: 5 + else: + properties: + clocks: + maxItems: 4 + + clock-names: + maxItems: 4 + additionalProperties: false examples: diff --git a/Documentation/devicetree/bindings/phy/brcm,ns2-pcie-phy.yaml b/Documentation/devicetree/bindings/phy/brcm,ns2-pcie-phy.yaml index 70eb48b391c9b2109cd0aac34af4e62a5eef9f12..527010702f5e76d2717f99f62b131dfb7558e583 100644 --- a/Documentation/devicetree/bindings/phy/brcm,ns2-pcie-phy.yaml +++ b/Documentation/devicetree/bindings/phy/brcm,ns2-pcie-phy.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/phy/brcm,ns2-pcie-phy.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Broadcom NS2 PCIe PHY binding document +title: Broadcom NS2 PCIe PHY maintainers: - Ray Jui diff --git a/Documentation/devicetree/bindings/phy/calxeda-combophy.yaml b/Documentation/devicetree/bindings/phy/calxeda-combophy.yaml index 41ee16e21f8d2d069d98dbaa3a36fde5609c91f2..d05a7c7930353a7aa9969b279c317224ff36871b 100644 --- a/Documentation/devicetree/bindings/phy/calxeda-combophy.yaml +++ b/Documentation/devicetree/bindings/phy/calxeda-combophy.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/phy/calxeda-combophy.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Calxeda Highbank Combination PHYs binding for SATA +title: Calxeda Highbank Combination PHYs for SATA description: | The Calxeda Combination PHYs connect the SoC to the internal fabric diff --git a/Documentation/devicetree/bindings/phy/fsl,imx8-pcie-phy.yaml b/Documentation/devicetree/bindings/phy/fsl,imx8-pcie-phy.yaml index 0af765ba2793262a9284cdf52dda61dfc7f97177..182a219387b0e69164148b62a819d4dd5ba462b1 100644 --- a/Documentation/devicetree/bindings/phy/fsl,imx8-pcie-phy.yaml +++ b/Documentation/devicetree/bindings/phy/fsl,imx8-pcie-phy.yaml @@ -16,6 +16,7 @@ properties: compatible: enum: - fsl,imx8mm-pcie-phy + - fsl,imx8mp-pcie-phy reg: maxItems: 1 @@ -28,11 +29,16 @@ properties: - const: ref resets: - maxItems: 1 + minItems: 1 + maxItems: 2 reset-names: - items: - - const: pciephy + oneOf: + - items: # for iMX8MM + - const: pciephy + - items: # for IMX8MP + - const: pciephy + - const: perst fsl,refclk-pad-mode: description: | @@ -60,6 +66,10 @@ properties: description: A boolean property indicating the CLKREQ# signal is not supported in the board design (optional) + power-domains: + description: PCIe PHY power domain (optional). + maxItems: 1 + required: - "#phy-cells" - compatible diff --git a/Documentation/devicetree/bindings/phy/fsl,imx8mq-usb-phy.yaml b/Documentation/devicetree/bindings/phy/fsl,imx8mq-usb-phy.yaml index 2936f3510a6ada5e1a0ad8805646a39bd85f6cc1..e6f9f5540cc3f9143ee9ce4d4b28ac8f60b32aef 100644 --- a/Documentation/devicetree/bindings/phy/fsl,imx8mq-usb-phy.yaml +++ b/Documentation/devicetree/bindings/phy/fsl,imx8mq-usb-phy.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/phy/fsl,imx8mq-usb-phy.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Freescale i.MX8MQ USB3 PHY binding +title: Freescale i.MX8MQ USB3 PHY maintainers: - Li Jun @@ -28,6 +28,9 @@ properties: items: - const: phy + power-domains: + maxItems: 1 + vbus-supply: description: A phandle to the regulator for USB VBUS. diff --git a/Documentation/devicetree/bindings/phy/fsl,lynx-28g.yaml b/Documentation/devicetree/bindings/phy/fsl,lynx-28g.yaml index 4d91e2f4f247de3c1a013a24a4b69e1fc3e69934..ff9f9ca0f19ccd55e4b86bfa6c64dd866b194757 100644 --- a/Documentation/devicetree/bindings/phy/fsl,lynx-28g.yaml +++ b/Documentation/devicetree/bindings/phy/fsl,lynx-28g.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/phy/fsl,lynx-28g.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Freescale Lynx 28G SerDes PHY binding +title: Freescale Lynx 28G SerDes PHY maintainers: - Ioana Ciornei diff --git a/Documentation/devicetree/bindings/phy/ingenic,phy-usb.yaml b/Documentation/devicetree/bindings/phy/ingenic,phy-usb.yaml index 5cab2164863211a0d89e74eb559766bda7f8f547..30b42008db063c5345cfe27c86c64c39333fe225 100644 --- a/Documentation/devicetree/bindings/phy/ingenic,phy-usb.yaml +++ b/Documentation/devicetree/bindings/phy/ingenic,phy-usb.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/phy/ingenic,phy-usb.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Ingenic SoCs USB PHY devicetree bindings +title: Ingenic SoCs USB PHY maintainers: - Paul Cercueil diff --git a/Documentation/devicetree/bindings/phy/intel,keembay-phy-usb.yaml b/Documentation/devicetree/bindings/phy/intel,keembay-phy-usb.yaml index 52815b6c2b88d01953dab8f7b055e7b5b5afe964..5cee4c85ff8bb9d57d1092dac4b3d55d866a0364 100644 --- a/Documentation/devicetree/bindings/phy/intel,keembay-phy-usb.yaml +++ b/Documentation/devicetree/bindings/phy/intel,keembay-phy-usb.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/phy/intel,keembay-phy-usb.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Intel Keem Bay USB PHY bindings +title: Intel Keem Bay USB PHY maintainers: - Wan Ahmad Zainie diff --git a/Documentation/devicetree/bindings/phy/intel,phy-thunderbay-emmc.yaml b/Documentation/devicetree/bindings/phy/intel,phy-thunderbay-emmc.yaml index b09e5ba5e127d66dcdf7e03ed87c9088b82c46d4..361ffc35b16bfff3d0c3194e9e75c0e69a976a09 100644 --- a/Documentation/devicetree/bindings/phy/intel,phy-thunderbay-emmc.yaml +++ b/Documentation/devicetree/bindings/phy/intel,phy-thunderbay-emmc.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/phy/intel,phy-thunderbay-emmc.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Intel Thunder Bay eMMC PHY bindings +title: Intel Thunder Bay eMMC PHY maintainers: - Srikandan Nandhini diff --git a/Documentation/devicetree/bindings/phy/marvell,mmp3-usb-phy.yaml b/Documentation/devicetree/bindings/phy/marvell,mmp3-usb-phy.yaml index c97043eaa8fbe496a466484a795ea2b8683aa39c..be13113f7b47fc798385796bd366dbe79d1696ce 100644 --- a/Documentation/devicetree/bindings/phy/marvell,mmp3-usb-phy.yaml +++ b/Documentation/devicetree/bindings/phy/marvell,mmp3-usb-phy.yaml @@ -5,7 +5,7 @@ $id: http://devicetree.org/schemas/phy/marvell,mmp3-usb-phy.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Marvell MMP3 USB PHY bindings +title: Marvell MMP3 USB PHY maintainers: - Lubomir Rintel diff --git a/Documentation/devicetree/bindings/phy/mediatek,dsi-phy.yaml b/Documentation/devicetree/bindings/phy/mediatek,dsi-phy.yaml index 9c2a7345955d4d3b8c6ff526aa80e03a219a297c..26f2b887cfc1ec319697781a493268b30dab4310 100644 --- a/Documentation/devicetree/bindings/phy/mediatek,dsi-phy.yaml +++ b/Documentation/devicetree/bindings/phy/mediatek,dsi-phy.yaml @@ -5,7 +5,7 @@ $id: http://devicetree.org/schemas/phy/mediatek,dsi-phy.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: MediaTek MIPI Display Serial Interface (DSI) PHY binding +title: MediaTek MIPI Display Serial Interface (DSI) PHY maintainers: - Chun-Kuang Hu diff --git a/Documentation/devicetree/bindings/phy/mediatek,hdmi-phy.yaml b/Documentation/devicetree/bindings/phy/mediatek,hdmi-phy.yaml index 0d94950b84ca3fc3b2ed9755d6bbf2e2ff558ca4..6cfdaadec08597b38153a10e7ec84102e34f9bbe 100644 --- a/Documentation/devicetree/bindings/phy/mediatek,hdmi-phy.yaml +++ b/Documentation/devicetree/bindings/phy/mediatek,hdmi-phy.yaml @@ -5,7 +5,7 @@ $id: http://devicetree.org/schemas/phy/mediatek,hdmi-phy.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: MediaTek High Definition Multimedia Interface (HDMI) PHY binding +title: MediaTek High Definition Multimedia Interface (HDMI) PHY maintainers: - Chun-Kuang Hu diff --git a/Documentation/devicetree/bindings/phy/mediatek,ufs-phy.yaml b/Documentation/devicetree/bindings/phy/mediatek,ufs-phy.yaml index 74cc32c1d2e85cac5e18090de2dbb1d2ccdd87be..3e62b5d4da616e4228cc38a9465422c983a33d70 100644 --- a/Documentation/devicetree/bindings/phy/mediatek,ufs-phy.yaml +++ b/Documentation/devicetree/bindings/phy/mediatek,ufs-phy.yaml @@ -5,7 +5,7 @@ $id: http://devicetree.org/schemas/phy/mediatek,ufs-phy.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: MediaTek Universal Flash Storage (UFS) M-PHY binding +title: MediaTek Universal Flash Storage (UFS) M-PHY maintainers: - Stanley Chu diff --git a/Documentation/devicetree/bindings/phy/phy-cadence-sierra.yaml b/Documentation/devicetree/bindings/phy/phy-cadence-sierra.yaml index a9e227d8b076ac2d293c134ce12d92312a182ed1..6a09472740ed974bcc492ce7af371ee5e4676c58 100644 --- a/Documentation/devicetree/bindings/phy/phy-cadence-sierra.yaml +++ b/Documentation/devicetree/bindings/phy/phy-cadence-sierra.yaml @@ -4,7 +4,7 @@ $id: "http://devicetree.org/schemas/phy/phy-cadence-sierra.yaml#" $schema: "http://devicetree.org/meta-schemas/core.yaml#" -title: Cadence Sierra PHY binding +title: Cadence Sierra PHY description: This binding describes the Cadence Sierra PHY. Sierra PHY supports multilink diff --git a/Documentation/devicetree/bindings/phy/phy-cadence-torrent.yaml b/Documentation/devicetree/bindings/phy/phy-cadence-torrent.yaml index 2fec9e54ad0e652d102b05bc67a16a4b650aa5bb..2ad1faadda2a250dfa8817a715b193d8b0fb43d8 100644 --- a/Documentation/devicetree/bindings/phy/phy-cadence-torrent.yaml +++ b/Documentation/devicetree/bindings/phy/phy-cadence-torrent.yaml @@ -4,7 +4,7 @@ $id: "http://devicetree.org/schemas/phy/phy-cadence-torrent.yaml#" $schema: "http://devicetree.org/meta-schemas/core.yaml#" -title: Cadence Torrent SD0801 PHY binding +title: Cadence Torrent SD0801 PHY description: This binding describes the Cadence SD0801 PHY (also known as Torrent PHY) diff --git a/Documentation/devicetree/bindings/phy/phy-stm32-usbphyc.yaml b/Documentation/devicetree/bindings/phy/phy-stm32-usbphyc.yaml index 801993813b18520989123387d4ecc87de506c223..5b4c915cc9e5620dce0444ccbb1bf2a44f17dc04 100644 --- a/Documentation/devicetree/bindings/phy/phy-stm32-usbphyc.yaml +++ b/Documentation/devicetree/bindings/phy/phy-stm32-usbphyc.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/phy/phy-stm32-usbphyc.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: STMicroelectronics STM32 USB HS PHY controller binding +title: STMicroelectronics STM32 USB HS PHY controller description: diff --git a/Documentation/devicetree/bindings/phy/phy-tegra194-p2u.yaml b/Documentation/devicetree/bindings/phy/phy-tegra194-p2u.yaml index 4dc5205d893ba63026b1fbf197d29cfd6119264b..445b2467f4f6be049f074fc78f51c528f284da96 100644 --- a/Documentation/devicetree/bindings/phy/phy-tegra194-p2u.yaml +++ b/Documentation/devicetree/bindings/phy/phy-tegra194-p2u.yaml @@ -4,7 +4,7 @@ $id: "http://devicetree.org/schemas/phy/phy-tegra194-p2u.yaml#" $schema: "http://devicetree.org/meta-schemas/core.yaml#" -title: NVIDIA Tegra194 & Tegra234 P2U binding +title: NVIDIA Tegra194 & Tegra234 P2U maintainers: - Thierry Reding diff --git a/Documentation/devicetree/bindings/phy/qcom,qmp-pcie-phy.yaml b/Documentation/devicetree/bindings/phy/qcom,ipq8074-qmp-pcie-phy.yaml similarity index 96% rename from Documentation/devicetree/bindings/phy/qcom,qmp-pcie-phy.yaml rename to Documentation/devicetree/bindings/phy/qcom,ipq8074-qmp-pcie-phy.yaml index 324ad7d03a383a6f63138efc939a0b399113e4de..62045dcfb20c5d121591c59cb032981192f6740e 100644 --- a/Documentation/devicetree/bindings/phy/qcom,qmp-pcie-phy.yaml +++ b/Documentation/devicetree/bindings/phy/qcom,ipq8074-qmp-pcie-phy.yaml @@ -1,10 +1,10 @@ # SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) %YAML 1.2 --- -$id: http://devicetree.org/schemas/phy/qcom,qmp-pcie-phy.yaml# +$id: http://devicetree.org/schemas/phy/qcom,ipq8074-qmp-pcie-phy.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Qualcomm QMP PHY controller (PCIe) +title: Qualcomm QMP PHY controller (PCIe, IPQ8074) maintainers: - Vinod Koul @@ -13,6 +13,9 @@ description: QMP PHY controller supports physical layer functionality for a number of controllers on Qualcomm chipsets, such as, PCIe, UFS, and USB. + Note that these bindings are for SoCs up to SC8180X. For newer SoCs, see + qcom,sc8280xp-qmp-pcie-phy.yaml. + properties: compatible: enum: diff --git a/Documentation/devicetree/bindings/phy/qcom,qmp-ufs-phy.yaml b/Documentation/devicetree/bindings/phy/qcom,msm8996-qmp-ufs-phy.yaml similarity index 93% rename from Documentation/devicetree/bindings/phy/qcom,qmp-ufs-phy.yaml rename to Documentation/devicetree/bindings/phy/qcom,msm8996-qmp-ufs-phy.yaml index 815c375d0f7bce89a62beafd8e2d92ff30dee3c6..be41acbd3b6c30fa165a1f9c3fb3f9db9323fe46 100644 --- a/Documentation/devicetree/bindings/phy/qcom,qmp-ufs-phy.yaml +++ b/Documentation/devicetree/bindings/phy/qcom,msm8996-qmp-ufs-phy.yaml @@ -1,10 +1,10 @@ # SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) %YAML 1.2 --- -$id: http://devicetree.org/schemas/phy/qcom,qmp-ufs-phy.yaml# +$id: http://devicetree.org/schemas/phy/qcom,msm8996-qmp-ufs-phy.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Qualcomm QMP PHY controller (UFS) +title: Qualcomm QMP PHY controller (UFS, MSM8996) maintainers: - Vinod Koul @@ -13,13 +13,15 @@ description: QMP PHY controller supports physical layer functionality for a number of controllers on Qualcomm chipsets, such as, PCIe, UFS, and USB. + Note that these bindings are for SoCs up to SC8180X. For newer SoCs, see + qcom,sc8280xp-qmp-ufs-phy.yaml. + properties: compatible: enum: - qcom,msm8996-qmp-ufs-phy - qcom,msm8998-qmp-ufs-phy - qcom,sc8180x-qmp-ufs-phy - - qcom,sc8280xp-qmp-ufs-phy - qcom,sdm845-qmp-ufs-phy - qcom,sm6115-qmp-ufs-phy - qcom,sm6350-qmp-ufs-phy @@ -119,7 +121,6 @@ allOf: enum: - qcom,msm8998-qmp-ufs-phy - qcom,sc8180x-qmp-ufs-phy - - qcom,sc8280xp-qmp-ufs-phy - qcom,sdm845-qmp-ufs-phy - qcom,sm6115-qmp-ufs-phy - qcom,sm6350-qmp-ufs-phy @@ -156,7 +157,6 @@ allOf: contains: enum: - qcom,msm8998-qmp-ufs-phy - - qcom,sc8280xp-qmp-ufs-phy - qcom,sdm845-qmp-ufs-phy - qcom,sm6350-qmp-ufs-phy - qcom,sm8150-qmp-ufs-phy @@ -211,11 +211,12 @@ allOf: examples: - | - #include + #include #include + phy-wrapper@1d87000 { - compatible = "qcom,sc8280xp-qmp-ufs-phy"; - reg = <0x01d87000 0xe10>; + compatible = "qcom,sm8250-qmp-ufs-phy"; + reg = <0x01d87000 0x1c0>; #address-cells = <1>; #size-cells = <1>; ranges = <0x0 0x01d87000 0x1000>; diff --git a/Documentation/devicetree/bindings/phy/qcom,qmp-usb-phy.yaml b/Documentation/devicetree/bindings/phy/qcom,msm8996-qmp-usb3-phy.yaml similarity index 95% rename from Documentation/devicetree/bindings/phy/qcom,qmp-usb-phy.yaml rename to Documentation/devicetree/bindings/phy/qcom,msm8996-qmp-usb3-phy.yaml index 7acb4b7de7f940747efabaa05a9fd9bb35045a12..0c6b3ba7346b976dec1d2569e748557a7dcb4517 100644 --- a/Documentation/devicetree/bindings/phy/qcom,qmp-usb-phy.yaml +++ b/Documentation/devicetree/bindings/phy/qcom,msm8996-qmp-usb3-phy.yaml @@ -1,10 +1,10 @@ # SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) %YAML 1.2 --- -$id: http://devicetree.org/schemas/phy/qcom,qmp-usb-phy.yaml# +$id: http://devicetree.org/schemas/phy/qcom,msm8996-qmp-usb3-phy.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Qualcomm QMP PHY controller (USB) +title: Qualcomm QMP PHY controller (USB, MSM8996) maintainers: - Vinod Koul @@ -13,6 +13,9 @@ description: QMP PHY controller supports physical layer functionality for a number of controllers on Qualcomm chipsets, such as, PCIe, UFS, and USB. + Note that these bindings are for SoCs up to SC8180X. For newer SoCs, see + qcom,sc8280xp-qmp-usb3-uni-phy.yaml. + properties: compatible: enum: @@ -23,7 +26,6 @@ properties: - qcom,qcm2290-qmp-usb3-phy - qcom,sc7180-qmp-usb3-phy - qcom,sc8180x-qmp-usb3-phy - - qcom,sc8280xp-qmp-usb3-uni-phy - qcom,sdm845-qmp-usb3-phy - qcom,sdm845-qmp-usb3-uni-phy - qcom,sdx55-qmp-usb3-uni-phy @@ -201,7 +203,6 @@ allOf: compatible: contains: enum: - - qcom,sc8280xp-qmp-usb3-uni-phy - qcom,sm8150-qmp-usb3-phy - qcom,sm8150-qmp-usb3-uni-phy - qcom,sm8250-qmp-usb3-uni-phy @@ -268,16 +269,6 @@ allOf: - const: phy_phy - const: phy - - if: - properties: - compatible: - contains: - enum: - - qcom,sc8280xp-qmp-usb3-uni-phy - then: - required: - - power-domains - - if: properties: compatible: @@ -349,7 +340,6 @@ allOf: contains: enum: - qcom,msm8996-qmp-usb3-phy - - qcom,sc8280xp-qmp-usb3-uni-phy - qcom,sm8250-qmp-usb3-uni-phy - qcom,sm8350-qmp-usb3-uni-phy then: diff --git a/Documentation/devicetree/bindings/phy/qcom,qmp-usb3-dp-phy.yaml b/Documentation/devicetree/bindings/phy/qcom,sc7180-qmp-usb3-dp-phy.yaml similarity index 91% rename from Documentation/devicetree/bindings/phy/qcom,qmp-usb3-dp-phy.yaml rename to Documentation/devicetree/bindings/phy/qcom,sc7180-qmp-usb3-dp-phy.yaml index 97a7ecafbf852e371558b01c8754b07459fe338c..d9d0ab90edb1bc144d2d83fe99196fc6a252a334 100644 --- a/Documentation/devicetree/bindings/phy/qcom,qmp-usb3-dp-phy.yaml +++ b/Documentation/devicetree/bindings/phy/qcom,sc7180-qmp-usb3-dp-phy.yaml @@ -2,10 +2,17 @@ %YAML 1.2 --- -$id: "http://devicetree.org/schemas/phy/qcom,qmp-usb3-dp-phy.yaml#" -$schema: "http://devicetree.org/meta-schemas/core.yaml#" +$id: http://devicetree.org/schemas/phy/qcom,sc7180-qmp-usb3-dp-phy.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# -title: Qualcomm QMP USB3 DP PHY controller +title: Qualcomm QMP USB3 DP PHY controller (SC7180) + +description: + The QMP PHY controller supports physical layer functionality for a number of + controllers on Qualcomm chipsets, such as, PCIe, UFS and USB. + + Note that these bindings are for SoCs up to SC8180X. For newer SoCs, see + qcom,sc8280xp-qmp-usb43dp-phy.yaml. maintainers: - Wesley Cheng @@ -16,7 +23,6 @@ properties: - qcom,sc7180-qmp-usb3-dp-phy - qcom,sc7280-qmp-usb3-dp-phy - qcom,sc8180x-qmp-usb3-dp-phy - - qcom,sc8280xp-qmp-usb43dp-phy - qcom,sdm845-qmp-usb3-dp-phy - qcom,sm8250-qmp-usb3-dp-phy reg: @@ -162,17 +168,6 @@ required: additionalProperties: false -allOf: - - if: - properties: - compatible: - contains: - enum: - - qcom,sc8280xp-qmp-usb43dp-phy - then: - required: - - power-domains - examples: - | #include diff --git a/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-pcie-phy.yaml b/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-pcie-phy.yaml new file mode 100644 index 0000000000000000000000000000000000000000..80aa8d2507fb608c6760e7ba3ce24b8e9fd5445d --- /dev/null +++ b/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-pcie-phy.yaml @@ -0,0 +1,165 @@ +# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/phy/qcom,sc8280xp-qmp-pcie-phy.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Qualcomm QMP PHY controller (PCIe, SC8280XP) + +maintainers: + - Vinod Koul + +description: + The QMP PHY controller supports physical layer functionality for a number of + controllers on Qualcomm chipsets, such as, PCIe, UFS, and USB. + +properties: + compatible: + enum: + - qcom,sc8280xp-qmp-gen3x1-pcie-phy + - qcom,sc8280xp-qmp-gen3x2-pcie-phy + - qcom,sc8280xp-qmp-gen3x4-pcie-phy + + reg: + minItems: 1 + maxItems: 2 + + clocks: + maxItems: 6 + + clock-names: + items: + - const: aux + - const: cfg_ahb + - const: ref + - const: rchng + - const: pipe + - const: pipediv2 + + power-domains: + maxItems: 1 + + resets: + maxItems: 1 + + reset-names: + items: + - const: phy + + vdda-phy-supply: true + + vdda-pll-supply: true + + qcom,4ln-config-sel: + description: PCIe 4-lane configuration + $ref: /schemas/types.yaml#/definitions/phandle-array + items: + - items: + - description: phandle of TCSR syscon + - description: offset of PCIe 4-lane configuration register + - description: offset of configuration bit for this PHY + + "#clock-cells": + const: 0 + + clock-output-names: + maxItems: 1 + + "#phy-cells": + const: 0 + +required: + - compatible + - reg + - clocks + - clock-names + - power-domains + - resets + - reset-names + - vdda-phy-supply + - vdda-pll-supply + - "#clock-cells" + - clock-output-names + - "#phy-cells" + +additionalProperties: false + +allOf: + - if: + properties: + compatible: + contains: + enum: + - qcom,sc8280xp-qmp-gen3x4-pcie-phy + then: + properties: + reg: + items: + - description: port a + - description: port b + required: + - qcom,4ln-config-sel + else: + properties: + reg: + maxItems: 1 + +examples: + - | + #include + + pcie2b_phy: phy@1c18000 { + compatible = "qcom,sc8280xp-qmp-gen3x2-pcie-phy"; + reg = <0x01c18000 0x2000>; + + clocks = <&gcc GCC_PCIE_2B_AUX_CLK>, + <&gcc GCC_PCIE_2B_CFG_AHB_CLK>, + <&gcc GCC_PCIE_2A2B_CLKREF_CLK>, + <&gcc GCC_PCIE2B_PHY_RCHNG_CLK>, + <&gcc GCC_PCIE_2B_PIPE_CLK>, + <&gcc GCC_PCIE_2B_PIPEDIV2_CLK>; + clock-names = "aux", "cfg_ahb", "ref", "rchng", + "pipe", "pipediv2"; + + power-domains = <&gcc PCIE_2B_GDSC>; + + resets = <&gcc GCC_PCIE_2B_PHY_BCR>; + reset-names = "phy"; + + vdda-phy-supply = <&vreg_l6d>; + vdda-pll-supply = <&vreg_l4d>; + + #clock-cells = <0>; + clock-output-names = "pcie_2b_pipe_clk"; + + #phy-cells = <0>; + }; + + pcie2a_phy: phy@1c24000 { + compatible = "qcom,sc8280xp-qmp-gen3x4-pcie-phy"; + reg = <0x01c24000 0x2000>, <0x01c26000 0x2000>; + + clocks = <&gcc GCC_PCIE_2A_AUX_CLK>, + <&gcc GCC_PCIE_2A_CFG_AHB_CLK>, + <&gcc GCC_PCIE_2A2B_CLKREF_CLK>, + <&gcc GCC_PCIE2A_PHY_RCHNG_CLK>, + <&gcc GCC_PCIE_2A_PIPE_CLK>, + <&gcc GCC_PCIE_2A_PIPEDIV2_CLK>; + clock-names = "aux", "cfg_ahb", "ref", "rchng", + "pipe", "pipediv2"; + + power-domains = <&gcc PCIE_2A_GDSC>; + + resets = <&gcc GCC_PCIE_2A_PHY_BCR>; + reset-names = "phy"; + + vdda-phy-supply = <&vreg_l6d>; + vdda-pll-supply = <&vreg_l4d>; + + qcom,4ln-config-sel = <&tcsr 0xa044 0>; + + #clock-cells = <0>; + clock-output-names = "pcie_2a_pipe_clk"; + + #phy-cells = <0>; + }; diff --git a/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-ufs-phy.yaml b/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-ufs-phy.yaml new file mode 100644 index 0000000000000000000000000000000000000000..dde86a19f79262bc82d04d4c81d0fd1c96f919cc --- /dev/null +++ b/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-ufs-phy.yaml @@ -0,0 +1,83 @@ +# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/phy/qcom,sc8280xp-qmp-ufs-phy.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Qualcomm QMP PHY controller (UFS, SC8280XP) + +maintainers: + - Vinod Koul + +description: + The QMP PHY controller supports physical layer functionality for a number of + controllers on Qualcomm chipsets, such as, PCIe, UFS, and USB. + +properties: + compatible: + enum: + - qcom,sc8280xp-qmp-ufs-phy + + reg: + maxItems: 1 + + clocks: + maxItems: 2 + + clock-names: + items: + - const: ref + - const: ref_aux + + power-domains: + maxItems: 1 + + resets: + maxItems: 1 + + reset-names: + items: + - const: ufsphy + + vdda-phy-supply: true + + vdda-pll-supply: true + + "#phy-cells": + const: 0 + +required: + - compatible + - reg + - clocks + - clock-names + - power-domains + - resets + - reset-names + - vdda-phy-supply + - vdda-pll-supply + - "#phy-cells" + +additionalProperties: false + +examples: + - | + #include + + ufs_mem_phy: phy@1d87000 { + compatible = "qcom,sc8280xp-qmp-ufs-phy"; + reg = <0x01d87000 0x1000>; + + clocks = <&gcc GCC_UFS_REF_CLKREF_CLK>, <&gcc GCC_UFS_PHY_PHY_AUX_CLK>; + clock-names = "ref", "ref_aux"; + + power-domains = <&gcc UFS_PHY_GDSC>; + + resets = <&ufs_mem_hc 0>; + reset-names = "ufsphy"; + + vdda-phy-supply = <&vreg_l6b>; + vdda-pll-supply = <&vreg_l3b>; + + #phy-cells = <0>; + }; diff --git a/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-usb3-uni-phy.yaml b/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-usb3-uni-phy.yaml new file mode 100644 index 0000000000000000000000000000000000000000..16fce10382852e640f844305f20329b4691046f2 --- /dev/null +++ b/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-usb3-uni-phy.yaml @@ -0,0 +1,102 @@ +# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/phy/qcom,sc8280xp-qmp-usb3-uni-phy.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Qualcomm QMP PHY controller (USB, SC8280XP) + +maintainers: + - Vinod Koul + +description: + The QMP PHY controller supports physical layer functionality for a number of + controllers on Qualcomm chipsets, such as, PCIe, UFS, and USB. + +properties: + compatible: + enum: + - qcom,sc8280xp-qmp-usb3-uni-phy + + reg: + maxItems: 1 + + clocks: + maxItems: 4 + + clock-names: + items: + - const: aux + - const: ref + - const: com_aux + - const: pipe + + power-domains: + maxItems: 1 + + resets: + maxItems: 2 + + reset-names: + items: + - const: phy + - const: phy_phy + + vdda-phy-supply: true + + vdda-pll-supply: true + + "#clock-cells": + const: 0 + + clock-output-names: + maxItems: 1 + + "#phy-cells": + const: 0 + +required: + - compatible + - reg + - clocks + - clock-names + - power-domains + - resets + - reset-names + - vdda-phy-supply + - vdda-pll-supply + - "#clock-cells" + - clock-output-names + - "#phy-cells" + +additionalProperties: false + +examples: + - | + #include + #include + + phy@88ef000 { + compatible = "qcom,sc8280xp-qmp-usb3-uni-phy"; + reg = <0x088ef000 0x2000>; + + clocks = <&gcc GCC_USB3_MP_PHY_AUX_CLK>, + <&gcc GCC_USB3_MP0_CLKREF_CLK>, + <&gcc GCC_USB3_MP_PHY_COM_AUX_CLK>, + <&gcc GCC_USB3_MP_PHY_PIPE_0_CLK>; + clock-names = "aux", "ref", "com_aux", "pipe"; + + power-domains = <&gcc USB30_MP_GDSC>; + + resets = <&gcc GCC_USB3_UNIPHY_MP0_BCR>, + <&gcc GCC_USB3UNIPHY_PHY_MP0_BCR>; + reset-names = "phy", "phy_phy"; + + vdda-phy-supply = <&vreg_l3a>; + vdda-pll-supply = <&vreg_l5a>; + + #clock-cells = <0>; + clock-output-names = "usb2_phy0_pipe_clk"; + + #phy-cells = <0>; + }; diff --git a/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-usb43dp-phy.yaml b/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-usb43dp-phy.yaml new file mode 100644 index 0000000000000000000000000000000000000000..6f31693d986891695892d2fb6c2057ade83dac42 --- /dev/null +++ b/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-usb43dp-phy.yaml @@ -0,0 +1,99 @@ +# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/phy/qcom,sc8280xp-qmp-usb43dp-phy.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Qualcomm QMP USB4-USB3-DP PHY controller (SC8280XP) + +maintainers: + - Vinod Koul + +description: + The QMP PHY controller supports physical layer functionality for a number of + controllers on Qualcomm chipsets, such as, PCIe, UFS and USB. + +properties: + compatible: + enum: + - qcom,sc8280xp-qmp-usb43dp-phy + + reg: + maxItems: 1 + + clocks: + maxItems: 4 + + clock-names: + items: + - const: aux + - const: ref + - const: com_aux + - const: usb3_pipe + + power-domains: + maxItems: 1 + + resets: + maxItems: 2 + + reset-names: + items: + - const: phy + - const: common + + vdda-phy-supply: true + + vdda-pll-supply: true + + "#clock-cells": + const: 1 + description: + See include/dt-bindings/dt-bindings/phy/phy-qcom-qmp.h + + "#phy-cells": + const: 1 + description: + See include/dt-bindings/dt-bindings/phy/phy-qcom-qmp.h + +required: + - compatible + - reg + - clocks + - clock-names + - power-domains + - resets + - reset-names + - vdda-phy-supply + - vdda-pll-supply + - "#clock-cells" + - "#phy-cells" + +additionalProperties: false + +examples: + - | + #include + + phy@88eb000 { + compatible = "qcom,sc8280xp-qmp-usb43dp-phy"; + reg = <0x088eb000 0x4000>; + + clocks = <&gcc GCC_USB3_PRIM_PHY_AUX_CLK>, + <&gcc GCC_USB4_EUD_CLKREF_CLK>, + <&gcc GCC_USB3_PRIM_PHY_COM_AUX_CLK>, + <&gcc GCC_USB3_PRIM_PHY_PIPE_CLK>; + clock-names = "aux", "ref", "com_aux", "usb3_pipe"; + + power-domains = <&gcc USB30_PRIM_GDSC>; + + resets = <&gcc GCC_USB3_PHY_PRIM_BCR>, + <&gcc GCC_USB4_DP_PHY_PRIM_BCR>; + reset-names = "phy", "common"; + + vdda-phy-supply = <&vreg_l9d>; + vdda-pll-supply = <&vreg_l4d>; + + #clock-cells = <1>; + #phy-cells = <1>; + }; diff --git a/Documentation/devicetree/bindings/phy/qcom,usb-hs-phy.yaml b/Documentation/devicetree/bindings/phy/qcom,usb-hs-phy.yaml index 0655e485b2604a9bb81f7b5cd228f9caa1ba8a1e..aa97478dd01611ead27be7a6ae5b71ffff02b0bd 100644 --- a/Documentation/devicetree/bindings/phy/qcom,usb-hs-phy.yaml +++ b/Documentation/devicetree/bindings/phy/qcom,usb-hs-phy.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/phy/qcom,usb-hs-phy.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Qualcomm's USB HS PHY binding description +title: Qualcomm's USB HS PHY maintainers: - Bjorn Andersson diff --git a/Documentation/devicetree/bindings/phy/renesas,r8a779f0-ether-serdes.yaml b/Documentation/devicetree/bindings/phy/renesas,r8a779f0-ether-serdes.yaml new file mode 100644 index 0000000000000000000000000000000000000000..93ab72874228c4e71149601a79543519b0415bd9 --- /dev/null +++ b/Documentation/devicetree/bindings/phy/renesas,r8a779f0-ether-serdes.yaml @@ -0,0 +1,54 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/phy/renesas,r8a779f0-ether-serdes.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Renesas Ethernet SERDES + +maintainers: + - Yoshihiro Shimoda + +properties: + compatible: + const: renesas,r8a779f0-ether-serdes + + reg: + maxItems: 1 + + clocks: + maxItems: 1 + + resets: + maxItems: 1 + + power-domains: + maxItems: 1 + + '#phy-cells': + description: Port number of SERDES. + const: 1 + +required: + - compatible + - reg + - clocks + - resets + - power-domains + - '#phy-cells' + +additionalProperties: false + +examples: + - | + #include + #include + + phy@e6444000 { + compatible = "renesas,r8a779f0-ether-serdes"; + reg = <0xe6444000 0xc00>; + clocks = <&cpg CPG_MOD 1506>; + power-domains = <&sysc R8A779F0_PD_ALWAYS_ON>; + resets = <&cpg 1506>; + #phy-cells = <1>; + }; diff --git a/Documentation/devicetree/bindings/phy/ti,phy-am654-serdes.yaml b/Documentation/devicetree/bindings/phy/ti,phy-am654-serdes.yaml index 62dcb84c08aa0a01677b544d27f102e7629c40a3..738c92bb751874f55b31d0bd710411da16adcc31 100644 --- a/Documentation/devicetree/bindings/phy/ti,phy-am654-serdes.yaml +++ b/Documentation/devicetree/bindings/phy/ti,phy-am654-serdes.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/phy/ti,phy-am654-serdes.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: TI AM654 SERDES binding +title: TI AM654 SERDES description: This binding describes the TI AM654 SERDES. AM654 SERDES can be configured diff --git a/Documentation/devicetree/bindings/phy/ti,phy-gmii-sel.yaml b/Documentation/devicetree/bindings/phy/ti,phy-gmii-sel.yaml index da7cac537e15c30a81ac6e30930b84d996803029..6d46f57fa1b4c11342d2307949177801187ece1c 100644 --- a/Documentation/devicetree/bindings/phy/ti,phy-gmii-sel.yaml +++ b/Documentation/devicetree/bindings/phy/ti,phy-gmii-sel.yaml @@ -5,7 +5,7 @@ $id: "http://devicetree.org/schemas/phy/ti,phy-gmii-sel.yaml#" $schema: "http://devicetree.org/meta-schemas/core.yaml#" -title: CPSW Port's Interface Mode Selection PHY Tree Bindings +title: CPSW Port's Interface Mode Selection PHY maintainers: - Kishon Vijay Abraham I @@ -54,6 +54,7 @@ properties: - ti,dm814-phy-gmii-sel - ti,am654-phy-gmii-sel - ti,j7200-cpsw5g-phy-gmii-sel + - ti,j721e-cpsw9g-phy-gmii-sel reg: maxItems: 1 @@ -63,14 +64,17 @@ properties: ti,qsgmii-main-ports: $ref: /schemas/types.yaml#/definitions/uint32-array description: | - Required only for QSGMII mode. Array to select the port for - QSGMII main mode. Rest of the ports are selected as QSGMII_SUB - ports automatically. Any one of the 4 CPSW5G ports can act as the - main port with the rest of them being the QSGMII_SUB ports. - maxItems: 1 + Required only for QSGMII mode. Array to select the port/s for QSGMII + main mode. The size of the array corresponds to the number of QSGMII + interfaces and thus, the number of distinct QSGMII main ports, + supported by the device. If the device supports two QSGMII interfaces + but only one QSGMII interface is desired, repeat the QSGMII main port + value corresponding to the QSGMII interface in the array. + minItems: 1 + maxItems: 2 items: minimum: 1 - maximum: 4 + maximum: 8 allOf: - if: @@ -81,12 +85,43 @@ allOf: - ti,dra7xx-phy-gmii-sel - ti,dm814-phy-gmii-sel - ti,am654-phy-gmii-sel + - ti,j7200-cpsw5g-phy-gmii-sel + - ti,j721e-cpsw9g-phy-gmii-sel then: properties: '#phy-cells': const: 1 description: CPSW port number (starting from 1) + - if: + properties: + compatible: + contains: + enum: + - ti,j7200-cpsw5g-phy-gmii-sel + then: + properties: + ti,qsgmii-main-ports: + maxItems: 1 + items: + minimum: 1 + maximum: 4 + + - if: + properties: + compatible: + contains: + enum: + - ti,j721e-cpsw9g-phy-gmii-sel + then: + properties: + ti,qsgmii-main-ports: + minItems: 2 + maxItems: 2 + items: + minimum: 1 + maximum: 8 + - if: not: properties: @@ -94,6 +129,7 @@ allOf: contains: enum: - ti,j7200-cpsw5g-phy-gmii-sel + - ti,j721e-cpsw9g-phy-gmii-sel then: properties: ti,qsgmii-main-ports: false diff --git a/Documentation/devicetree/bindings/phy/ti,phy-j721e-wiz.yaml b/Documentation/devicetree/bindings/phy/ti,phy-j721e-wiz.yaml index 2225925b6dad898b8cb653245a36df910770e29e..c54b36c104abe9bfc413bd32b5a073bf1eb347e3 100644 --- a/Documentation/devicetree/bindings/phy/ti,phy-j721e-wiz.yaml +++ b/Documentation/devicetree/bindings/phy/ti,phy-j721e-wiz.yaml @@ -15,8 +15,10 @@ properties: enum: - ti,j721e-wiz-16g - ti,j721e-wiz-10g + - ti,j721s2-wiz-10g - ti,am64-wiz-10g - ti,j7200-wiz-10g + - ti,j784s4-wiz-10g power-domains: maxItems: 1 diff --git a/Documentation/devicetree/bindings/phy/transmit-amplitude.yaml b/Documentation/devicetree/bindings/phy/transmit-amplitude.yaml index 51492fe738ec93da3c050c4c8ddf91c834c70baa..617f3c0b3dfb678e558ff47529e5c5940c7f2d46 100644 --- a/Documentation/devicetree/bindings/phy/transmit-amplitude.yaml +++ b/Documentation/devicetree/bindings/phy/transmit-amplitude.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/phy/transmit-amplitude.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Common PHY and network PCS transmit amplitude property binding +title: Common PHY and network PCS transmit amplitude property description: Binding describing the peak-to-peak transmit amplitude for common PHYs diff --git a/Documentation/devicetree/bindings/pinctrl/fsl,scu-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/fsl,scu-pinctrl.yaml index 45ea565ce2387e76a6232f1e9b406cf254151995..fcd729afeee13185a0bb141860cb9f3f2e2ed2c5 100644 --- a/Documentation/devicetree/bindings/pinctrl/fsl,scu-pinctrl.yaml +++ b/Documentation/devicetree/bindings/pinctrl/fsl,scu-pinctrl.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/pinctrl/fsl,scu-pinctrl.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: i.MX SCU Client Device Node - Pinctrl bindings based on SCU Message Protocol +title: i.MX SCU Client Device Node - Pinctrl Based on SCU Message Protocol maintainers: - Dong Aisheng diff --git a/Documentation/devicetree/bindings/pinctrl/ingenic,pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/ingenic,pinctrl.yaml index c2c370448b8177455a807686b8a99753d08f92c1..a4397930e0e8009511fa42a6ac2367ed6794baaf 100644 --- a/Documentation/devicetree/bindings/pinctrl/ingenic,pinctrl.yaml +++ b/Documentation/devicetree/bindings/pinctrl/ingenic,pinctrl.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/pinctrl/ingenic,pinctrl.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Ingenic SoCs pin controller devicetree bindings +title: Ingenic SoCs pin controller description: > Please refer to pinctrl-bindings.txt in this directory for details of the diff --git a/Documentation/devicetree/bindings/pinctrl/intel,lgm-io.yaml b/Documentation/devicetree/bindings/pinctrl/intel,lgm-io.yaml index b425483501886ae92cc03cae6646c7d58eaac90c..ca0fef6e535e2b3e67ddb5956810068ac336c053 100644 --- a/Documentation/devicetree/bindings/pinctrl/intel,lgm-io.yaml +++ b/Documentation/devicetree/bindings/pinctrl/intel,lgm-io.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/pinctrl/intel,lgm-io.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Intel Lightning Mountain SoC pinmux & GPIO controller binding +title: Intel Lightning Mountain SoC pinmux & GPIO controller maintainers: - Rahul Tanwar diff --git a/Documentation/devicetree/bindings/pinctrl/pincfg-node.yaml b/Documentation/devicetree/bindings/pinctrl/pincfg-node.yaml index f5a121311f612e0e2498778231f41c2acc5b9182..be81ed22a036a3f3e073725f697e33954160c554 100644 --- a/Documentation/devicetree/bindings/pinctrl/pincfg-node.yaml +++ b/Documentation/devicetree/bindings/pinctrl/pincfg-node.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/pinctrl/pincfg-node.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Generic pin configuration node schema +title: Generic Pin Configuration Node maintainers: - Linus Walleij diff --git a/Documentation/devicetree/bindings/pinctrl/pinmux-node.yaml b/Documentation/devicetree/bindings/pinctrl/pinmux-node.yaml index 551df3d9b80900de2d6d24cfb79367c002338f31..008c3ab7f1bb3fa7a0517757faf8b4c32a491389 100644 --- a/Documentation/devicetree/bindings/pinctrl/pinmux-node.yaml +++ b/Documentation/devicetree/bindings/pinctrl/pinmux-node.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/pinctrl/pinmux-node.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Generic pin multiplexing node schema +title: Generic Pin Multiplexing Node maintainers: - Linus Walleij diff --git a/Documentation/devicetree/bindings/power/avs/qcom,cpr.yaml b/Documentation/devicetree/bindings/power/avs/qcom,cpr.yaml index 301db7daf8706a21266b1309cd2950fced7fc50a..2fd2178d1fa55923b879501b22f50c39f8c94f6e 100644 --- a/Documentation/devicetree/bindings/power/avs/qcom,cpr.yaml +++ b/Documentation/devicetree/bindings/power/avs/qcom,cpr.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/power/avs/qcom,cpr.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Qualcomm Core Power Reduction (CPR) bindings +title: Qualcomm Core Power Reduction (CPR) maintainers: - Niklas Cassel diff --git a/Documentation/devicetree/bindings/power/domain-idle-state.yaml b/Documentation/devicetree/bindings/power/domain-idle-state.yaml index 4ee920a1de69e8466935bfceece13fb6c772dc47..ec1f6f669e50b2e7b0756cb6a737c685ad81045b 100644 --- a/Documentation/devicetree/bindings/power/domain-idle-state.yaml +++ b/Documentation/devicetree/bindings/power/domain-idle-state.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/power/domain-idle-state.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: PM Domain Idle States binding description +title: PM Domain Idle States maintainers: - Ulf Hansson diff --git a/Documentation/devicetree/bindings/power/fsl,scu-pd.yaml b/Documentation/devicetree/bindings/power/fsl,scu-pd.yaml index 1f72b18ca0fcfba3e80105c25a1b2e219d67749e..407b7cfec783cdc5b7c27d33d027e4bef6db3a44 100644 --- a/Documentation/devicetree/bindings/power/fsl,scu-pd.yaml +++ b/Documentation/devicetree/bindings/power/fsl,scu-pd.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/power/fsl,scu-pd.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: i.MX SCU Client Device Node - Power domain bindings based on SCU Message Protocol +title: i.MX SCU Client Device Node - Power Domain Based on SCU Message Protocol maintainers: - Dong Aisheng diff --git a/Documentation/devicetree/bindings/power/reset/gpio-restart.yaml b/Documentation/devicetree/bindings/power/reset/gpio-restart.yaml index a72d5c7215161af19348854dcc096496934bda47..d3d18e0f5db385240e645963aa6f8127a8e058f1 100644 --- a/Documentation/devicetree/bindings/power/reset/gpio-restart.yaml +++ b/Documentation/devicetree/bindings/power/reset/gpio-restart.yaml @@ -25,6 +25,9 @@ description: > inactive-delay, the GPIO is driven active again. After a delay specified by wait-delay, the restart handler completes allowing other restart handlers to be attempted. +allOf: + - $ref: restart-handler.yaml# + properties: compatible: const: gpio-restart @@ -41,16 +44,6 @@ properties: in its inactive state. priority: - $ref: /schemas/types.yaml#/definitions/uint32 - description: | - A priority ranging from 0 to 255 (default 129) according to the following guidelines: - - 0: Restart handler of last resort, with limited restart capabilities. - 128: Default restart handler; use if no other restart handler is expected to be available, - and/or if restart functionality is sufficient to restart the entire system. - 255: Highest priority restart handler, will preempt all other restart handlers. - minimum: 0 - maximum: 255 default: 129 active-delay: diff --git a/Documentation/devicetree/bindings/power/reset/restart-handler.yaml b/Documentation/devicetree/bindings/power/reset/restart-handler.yaml new file mode 100644 index 0000000000000000000000000000000000000000..1f9a2aac53c08176c91227604af5b35c4f17dc0e --- /dev/null +++ b/Documentation/devicetree/bindings/power/reset/restart-handler.yaml @@ -0,0 +1,30 @@ +# SPDX-License-Identifier: GPL-2.0-only or BSD-2-Clause +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/power/reset/restart-handler.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Restart and shutdown handler generic binding + +maintainers: + - Sebastian Reichel + +description: + Restart and shutdown handler device is responsible for powering off the + system, e.g. my cutting off the power. System might have several restart + handlers, which usually are tried from most precise to last resort. + +properties: + priority: + $ref: /schemas/types.yaml#/definitions/uint32 + description: | + A priority ranging from 0 to 255 according to the following guidelines:: + 0:: Restart handler of last resort, with limited restart capabilities. + 128:: Typical, default restart handler; use if no other restart handler + is expected to be available, and/or if restart functionality is + sufficient to restart the entire system. + 255:: Highest priority restart handler, will preempt all other restart handlers. + minimum: 0 + maximum: 255 + +additionalProperties: true diff --git a/Documentation/devicetree/bindings/power/reset/xlnx,zynqmp-power.yaml b/Documentation/devicetree/bindings/power/reset/xlnx,zynqmp-power.yaml index 46de35861738ec76cdf42d34cb3f7a2e46c11b1a..11f1f98c1cdc1bd9c357306aa3f794819be39825 100644 --- a/Documentation/devicetree/bindings/power/reset/xlnx,zynqmp-power.yaml +++ b/Documentation/devicetree/bindings/power/reset/xlnx,zynqmp-power.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/power/reset/xlnx,zynqmp-power.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Xilinx Zynq MPSoC Power Management Device Tree Bindings +title: Xilinx Zynq MPSoC Power Management maintainers: - Michal Simek diff --git a/Documentation/devicetree/bindings/power/supply/bq2415x.yaml b/Documentation/devicetree/bindings/power/supply/bq2415x.yaml index a3c00e0789183f45976ff6fc32c1c6366f56ed91..f7287ffd4b12c25132533198eddd0c3875519ed0 100644 --- a/Documentation/devicetree/bindings/power/supply/bq2415x.yaml +++ b/Documentation/devicetree/bindings/power/supply/bq2415x.yaml @@ -5,7 +5,7 @@ $id: http://devicetree.org/schemas/power/supply/bq2415x.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Binding for TI bq2415x Li-Ion Charger +title: TI bq2415x Li-Ion Charger maintainers: - Sebastian Reichel diff --git a/Documentation/devicetree/bindings/power/supply/bq24190.yaml b/Documentation/devicetree/bindings/power/supply/bq24190.yaml index 4884ec90e2b8bdfe0357d90467e97403254f822d..001c0ffb408d9f0af8880f6a5b2a09316cf2a81d 100644 --- a/Documentation/devicetree/bindings/power/supply/bq24190.yaml +++ b/Documentation/devicetree/bindings/power/supply/bq24190.yaml @@ -5,7 +5,7 @@ $id: http://devicetree.org/schemas/power/supply/bq24190.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Binding for TI BQ2419x Li-Ion Battery Charger +title: TI BQ2419x Li-Ion Battery Charger maintainers: - Sebastian Reichel diff --git a/Documentation/devicetree/bindings/power/supply/bq24257.yaml b/Documentation/devicetree/bindings/power/supply/bq24257.yaml index c7406bef0fa8fc23b1fb63129c9622ffdd9cb2ab..cc45939d385bd51dc9c33f4524d47c65b787951d 100644 --- a/Documentation/devicetree/bindings/power/supply/bq24257.yaml +++ b/Documentation/devicetree/bindings/power/supply/bq24257.yaml @@ -5,7 +5,7 @@ $id: http://devicetree.org/schemas/power/supply/bq24257.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Binding for bq24250, bq24251 and bq24257 Li-Ion Charger +title: Bq24250, bq24251 and bq24257 Li-Ion Charger maintainers: - Sebastian Reichel diff --git a/Documentation/devicetree/bindings/power/supply/bq24735.yaml b/Documentation/devicetree/bindings/power/supply/bq24735.yaml index dd9176ce71b347c8f89ce0b7d70d22666c3a0b04..388ee16f8a1e4515a3eacbf96060c33b266e884d 100644 --- a/Documentation/devicetree/bindings/power/supply/bq24735.yaml +++ b/Documentation/devicetree/bindings/power/supply/bq24735.yaml @@ -5,7 +5,7 @@ $id: http://devicetree.org/schemas/power/supply/bq24735.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Binding for TI BQ24735 Li-Ion Battery Charger +title: TI BQ24735 Li-Ion Battery Charger maintainers: - Sebastian Reichel diff --git a/Documentation/devicetree/bindings/power/supply/bq25890.yaml b/Documentation/devicetree/bindings/power/supply/bq25890.yaml index 204c0147188f2b06731c1b584fc5a501105d8875..dae27e93af0921343992bf8a666b1bfc712bb4bf 100644 --- a/Documentation/devicetree/bindings/power/supply/bq25890.yaml +++ b/Documentation/devicetree/bindings/power/supply/bq25890.yaml @@ -5,7 +5,7 @@ $id: http://devicetree.org/schemas/power/supply/bq25890.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Binding for bq25890, bq25892, bq25895 and bq25896 Li-Ion Charger +title: Bq25890, bq25892, bq25895 and bq25896 Li-Ion Charger maintainers: - Sebastian Reichel @@ -15,11 +15,15 @@ allOf: properties: compatible: - enum: - - ti,bq25890 - - ti,bq25892 - - ti,bq25895 - - ti,bq25896 + oneOf: + - enum: + - ti,bq25890 + - items: + - enum: + - ti,bq25892 + - ti,bq25895 + - ti,bq25896 + - const: ti,bq25890 reg: maxItems: 1 @@ -93,7 +97,7 @@ required: - ti,boost-voltage - ti,boost-max-current -additionalProperties: false +unevaluatedProperties: false examples: - | diff --git a/Documentation/devicetree/bindings/power/supply/bq27xxx.yaml b/Documentation/devicetree/bindings/power/supply/bq27xxx.yaml index 65fc6049efc1409fa8a91bc4a6c0f61174ed6c3e..347d4433adc5f560dd8ec0e77a79c60dc5604bd2 100644 --- a/Documentation/devicetree/bindings/power/supply/bq27xxx.yaml +++ b/Documentation/devicetree/bindings/power/supply/bq27xxx.yaml @@ -60,13 +60,11 @@ properties: monitored-battery: description: | - phandle of battery characteristics node. The fuel gauge uses the following battery properties: - energy-full-design-microwatt-hours - charge-full-design-microamp-hours - voltage-min-design-microvolt Both or neither of the *-full-design-*-hours properties must be set. - See Documentation/devicetree/bindings/power/supply/battery.yaml power-supplies: true diff --git a/Documentation/devicetree/bindings/power/supply/dlg,da9150-charger.yaml b/Documentation/devicetree/bindings/power/supply/dlg,da9150-charger.yaml index b289388952bf298321e392e4578b5ed2ab6731be..85bebebb285bd60f11d1e3a724f0a6b0d75d43b5 100644 --- a/Documentation/devicetree/bindings/power/supply/dlg,da9150-charger.yaml +++ b/Documentation/devicetree/bindings/power/supply/dlg,da9150-charger.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/power/supply/dlg,da9150-charger.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Dialog Semiconductor DA9150 Charger Power Supply bindings +title: Dialog Semiconductor DA9150 Charger Power Supply maintainers: - Sebastian Reichel diff --git a/Documentation/devicetree/bindings/power/supply/dlg,da9150-fuel-gauge.yaml b/Documentation/devicetree/bindings/power/supply/dlg,da9150-fuel-gauge.yaml index d47caf59d20461d40d7abe45687be761d793419a..7cc94b872937903c7238b7a273925c6499c2e119 100644 --- a/Documentation/devicetree/bindings/power/supply/dlg,da9150-fuel-gauge.yaml +++ b/Documentation/devicetree/bindings/power/supply/dlg,da9150-fuel-gauge.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/power/supply/dlg,da9150-fuel-gauge.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Dialog Semiconductor DA9150 Fuel-Gauge Power Supply bindings +title: Dialog Semiconductor DA9150 Fuel-Gauge Power Supply maintainers: - Sebastian Reichel diff --git a/Documentation/devicetree/bindings/power/supply/ingenic,battery.yaml b/Documentation/devicetree/bindings/power/supply/ingenic,battery.yaml index 46527038bf22743c36a414a96ec50dccb263cb33..741022b4449daf87472784ae4ad051d4952484a1 100644 --- a/Documentation/devicetree/bindings/power/supply/ingenic,battery.yaml +++ b/Documentation/devicetree/bindings/power/supply/ingenic,battery.yaml @@ -5,11 +5,13 @@ $id: http://devicetree.org/schemas/power/supply/ingenic,battery.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Ingenic JZ47xx battery bindings +title: Ingenic JZ47xx battery maintainers: - Artur Rojek +$ref: power-supply.yaml# + properties: compatible: oneOf: @@ -28,8 +30,6 @@ properties: monitored-battery: description: > - phandle to a "simple-battery" compatible node. - This property must be a phandle to a node using the format described in battery.yaml, with the following properties being required: - voltage-min-design-microvolt: drained battery voltage, diff --git a/Documentation/devicetree/bindings/power/supply/isp1704.yaml b/Documentation/devicetree/bindings/power/supply/isp1704.yaml index 7e3449ed70d4d8d904e0b26ea583da1d3f3a9b5a..fb3a812aa5a9ca6d374ae0d4e29ea4f81a92a53c 100644 --- a/Documentation/devicetree/bindings/power/supply/isp1704.yaml +++ b/Documentation/devicetree/bindings/power/supply/isp1704.yaml @@ -5,7 +5,7 @@ $id: http://devicetree.org/schemas/power/supply/isp1704.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Binding for NXP ISP1704 USB Charger Detection +title: NXP ISP1704 USB Charger Detection maintainers: - Sebastian Reichel diff --git a/Documentation/devicetree/bindings/power/supply/lltc,lt3651-charger.yaml b/Documentation/devicetree/bindings/power/supply/lltc,lt3651-charger.yaml index 76cedf95a12c96b0ec27025c282fa6b59a266204..d26ed5eabe28cc976afad55908a5df79ba40a7df 100644 --- a/Documentation/devicetree/bindings/power/supply/lltc,lt3651-charger.yaml +++ b/Documentation/devicetree/bindings/power/supply/lltc,lt3651-charger.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/power/supply/lltc,lt3651-charger.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Analog Devices LT3651 Charger Power Supply bindings +title: Analog Devices LT3651 Charger Power Supply maintainers: - Sebastian Reichel diff --git a/Documentation/devicetree/bindings/power/supply/lltc,ltc294x.yaml b/Documentation/devicetree/bindings/power/supply/lltc,ltc294x.yaml index 109b41a0d56c2d7488edcdd29fc19cbb71b2803d..774582cd3a2c3944762177c23d49098d59220ac3 100644 --- a/Documentation/devicetree/bindings/power/supply/lltc,ltc294x.yaml +++ b/Documentation/devicetree/bindings/power/supply/lltc,ltc294x.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/power/supply/lltc,ltc294x.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Binding for LTC2941, LTC2942, LTC2943 and LTC2944 battery fuel gauges +title: LTC2941, LTC2942, LTC2943 and LTC2944 battery fuel gauges description: | All chips measure battery capacity. diff --git a/Documentation/devicetree/bindings/power/supply/maxim,ds2760.yaml b/Documentation/devicetree/bindings/power/supply/maxim,ds2760.yaml index c838efcf7e160cf630f16bfb69071b498cf955dc..5faa2418fe2ff56ce6b60536b2513dd769adfc2d 100644 --- a/Documentation/devicetree/bindings/power/supply/maxim,ds2760.yaml +++ b/Documentation/devicetree/bindings/power/supply/maxim,ds2760.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/power/supply/maxim,ds2760.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Maxim DS2760 DT bindings +title: Maxim DS2760 maintainers: - Sebastian Reichel diff --git a/Documentation/devicetree/bindings/power/supply/maxim,max14656.yaml b/Documentation/devicetree/bindings/power/supply/maxim,max14656.yaml index 070ef6f96e60d7a4f5ed3639cf87b881059a5438..711066b8cdb9eecbf4c919d7a3708b0ab4dddccb 100644 --- a/Documentation/devicetree/bindings/power/supply/maxim,max14656.yaml +++ b/Documentation/devicetree/bindings/power/supply/maxim,max14656.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/power/supply/maxim,max14656.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Maxim MAX14656 DT bindings +title: Maxim MAX14656 maintainers: - Sebastian Reichel diff --git a/Documentation/devicetree/bindings/power/supply/maxim,max17042.yaml b/Documentation/devicetree/bindings/power/supply/maxim,max17042.yaml index aff5d0792e0fef3b04ceeb42278bcccae944f742..64a0edb7bc473cdb7c5284356b49165ed77312a9 100644 --- a/Documentation/devicetree/bindings/power/supply/maxim,max17042.yaml +++ b/Documentation/devicetree/bindings/power/supply/maxim,max17042.yaml @@ -59,6 +59,8 @@ properties: Voltage threshold to report battery as over voltage (in mV). Default is not to report over-voltage events. + power-supplies: true + required: - compatible - reg diff --git a/Documentation/devicetree/bindings/power/supply/power-supply.yaml b/Documentation/devicetree/bindings/power/supply/power-supply.yaml index 2f672e6e8d72c08ab7efdab9ceb3fc91e488e26c..4e54c937973ee71be411964360c7ca8e43bd1034 100644 --- a/Documentation/devicetree/bindings/power/supply/power-supply.yaml +++ b/Documentation/devicetree/bindings/power/supply/power-supply.yaml @@ -18,4 +18,10 @@ properties: This property is added to a supply in order to list the devices which supply it power, referenced by their phandles. + monitored-battery: + $ref: /schemas/types.yaml#/definitions/phandle + description: + The battery (with "simple-battery" compatible) being monitored by this + power supply. + additionalProperties: true diff --git a/Documentation/devicetree/bindings/power/supply/richtek,rt9455.yaml b/Documentation/devicetree/bindings/power/supply/richtek,rt9455.yaml index bce15101318e7ba27dbef5fb4f427093912b291b..27bebc1757ba36922162087a11cd439140c2ee07 100644 --- a/Documentation/devicetree/bindings/power/supply/richtek,rt9455.yaml +++ b/Documentation/devicetree/bindings/power/supply/richtek,rt9455.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/power/supply/richtek,rt9455.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Binding for Richtek rt9455 battery charger +title: Richtek rt9455 battery charger maintainers: - Sebastian Reichel diff --git a/Documentation/devicetree/bindings/power/supply/rohm,bd99954.yaml b/Documentation/devicetree/bindings/power/supply/rohm,bd99954.yaml index 24b06957b4ca52058513f357667de1977f81b6ea..14d9b42eda27da20c254ef88db4124915838f478 100644 --- a/Documentation/devicetree/bindings/power/supply/rohm,bd99954.yaml +++ b/Documentation/devicetree/bindings/power/supply/rohm,bd99954.yaml @@ -18,6 +18,7 @@ description: | provides a Dual-source Battery Charger, two port BC1.2 detection and a Battery Monitor. +$ref: power-supply.yaml# properties: compatible: diff --git a/Documentation/devicetree/bindings/power/supply/sc2731-charger.yaml b/Documentation/devicetree/bindings/power/supply/sc2731-charger.yaml index eeb043f9bb4f2fc264c86a874b3f1ae497295b7b..a846a4d14ca94606687291faee4d88bdcd8f768c 100644 --- a/Documentation/devicetree/bindings/power/supply/sc2731-charger.yaml +++ b/Documentation/devicetree/bindings/power/supply/sc2731-charger.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/power/supply/sc2731-charger.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Spreadtrum SC2731 PMICs battery charger binding +title: Spreadtrum SC2731 PMICs battery charger maintainers: - Sebastian Reichel @@ -28,7 +28,6 @@ properties: The charger uses the following battery properties - charge-term-current-microamp: current for charge termination phase. - constant-charge-voltage-max-microvolt: maximum constant input voltage. - See Documentation/devicetree/bindings/power/supply/battery.yaml additionalProperties: false diff --git a/Documentation/devicetree/bindings/power/supply/sc27xx-fg.yaml b/Documentation/devicetree/bindings/power/supply/sc27xx-fg.yaml index d90a838a17441ce841fc6d26f13a2d184582e7a6..de43e45a43b7c3bcdd1baca7ab588b9605292259 100644 --- a/Documentation/devicetree/bindings/power/supply/sc27xx-fg.yaml +++ b/Documentation/devicetree/bindings/power/supply/sc27xx-fg.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/power/supply/sc27xx-fg.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Spreadtrum SC27XX PMICs Fuel Gauge Unit Power Supply Bindings +title: Spreadtrum SC27XX PMICs Fuel Gauge Unit Power Supply maintainers: - Sebastian Reichel diff --git a/Documentation/devicetree/bindings/power/supply/ti,lp8727.yaml b/Documentation/devicetree/bindings/power/supply/ti,lp8727.yaml index 93654e732cdaad0e197a228e4732f3263685adf9..ce6fbdba8f6b941c0e1392a518c387910ccd327c 100644 --- a/Documentation/devicetree/bindings/power/supply/ti,lp8727.yaml +++ b/Documentation/devicetree/bindings/power/supply/ti,lp8727.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/power/supply/ti,lp8727.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Binding for TI/National Semiconductor LP8727 Charger +title: TI/National Semiconductor LP8727 Charger maintainers: - Sebastian Reichel diff --git a/Documentation/devicetree/bindings/pwm/microchip,corepwm.yaml b/Documentation/devicetree/bindings/pwm/microchip,corepwm.yaml index cd8e9a8907f84a6b278d0e5e6335a07510858dae..70d563d44c35698ce5f85091f18f630db7767c1d 100644 --- a/Documentation/devicetree/bindings/pwm/microchip,corepwm.yaml +++ b/Documentation/devicetree/bindings/pwm/microchip,corepwm.yaml @@ -5,7 +5,7 @@ $id: http://devicetree.org/schemas/pwm/microchip,corepwm.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Microchip IP corePWM controller bindings +title: Microchip IP corePWM controller maintainers: - Conor Dooley diff --git a/Documentation/devicetree/bindings/regulator/pwm-regulator.yaml b/Documentation/devicetree/bindings/regulator/pwm-regulator.yaml index 82b6f2fde422b761eed7caef7e1c2f277257aa92..7e58471097f8c70e2b6e8d806f533e7530cff8e7 100644 --- a/Documentation/devicetree/bindings/regulator/pwm-regulator.yaml +++ b/Documentation/devicetree/bindings/regulator/pwm-regulator.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/regulator/pwm-regulator.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Bindings for the Generic PWM Regulator +title: Generic PWM Regulator maintainers: - Brian Norris diff --git a/Documentation/devicetree/bindings/regulator/st,stm32-booster.yaml b/Documentation/devicetree/bindings/regulator/st,stm32-booster.yaml index 38bdaef4fa39782e9f84f52dd3247a151dad9320..c82f6f885d97d88a8a828be01eb3bbf1e68a4946 100644 --- a/Documentation/devicetree/bindings/regulator/st,stm32-booster.yaml +++ b/Documentation/devicetree/bindings/regulator/st,stm32-booster.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/regulator/st,stm32-booster.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: STMicroelectronics STM32 booster for ADC analog input switches bindings +title: STMicroelectronics STM32 booster for ADC analog input switches maintainers: - Fabrice Gasnier diff --git a/Documentation/devicetree/bindings/regulator/st,stm32-vrefbuf.yaml b/Documentation/devicetree/bindings/regulator/st,stm32-vrefbuf.yaml index a5a27ee0a9e67296e69784630bfb2424c1fde3ea..c1bf1f90490a07152ea60e11c92b4d7d5edcaa55 100644 --- a/Documentation/devicetree/bindings/regulator/st,stm32-vrefbuf.yaml +++ b/Documentation/devicetree/bindings/regulator/st,stm32-vrefbuf.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/regulator/st,stm32-vrefbuf.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: STMicroelectronics STM32 Voltage reference buffer bindings +title: STMicroelectronics STM32 Voltage reference buffer description: | Some STM32 devices embed a voltage reference buffer which can be used as diff --git a/Documentation/devicetree/bindings/remoteproc/amlogic,meson-mx-ao-arc.yaml b/Documentation/devicetree/bindings/remoteproc/amlogic,meson-mx-ao-arc.yaml index d892d29a656b32c0390c9e2ad9f698571f53301a..11cb42a3fdd1a778a50514b3d350e0d94773bae0 100644 --- a/Documentation/devicetree/bindings/remoteproc/amlogic,meson-mx-ao-arc.yaml +++ b/Documentation/devicetree/bindings/remoteproc/amlogic,meson-mx-ao-arc.yaml @@ -4,7 +4,7 @@ $id: "http://devicetree.org/schemas/remoteproc/amlogic,meson-mx-ao-arc.yaml#" $schema: "http://devicetree.org/meta-schemas/core.yaml#" -title: Amlogic Meson AO ARC Remote Processor bindings +title: Amlogic Meson AO ARC Remote Processor description: Amlogic Meson6, Meson8, Meson8b and Meson8m2 SoCs embed an ARC core diff --git a/Documentation/devicetree/bindings/remoteproc/fsl,imx-rproc.yaml b/Documentation/devicetree/bindings/remoteproc/fsl,imx-rproc.yaml index 3a1f59ad79e23be43b094baf0081ae3e4082cf43..e732255b9019650fbe25386b1aa9eb506d70b195 100644 --- a/Documentation/devicetree/bindings/remoteproc/fsl,imx-rproc.yaml +++ b/Documentation/devicetree/bindings/remoteproc/fsl,imx-rproc.yaml @@ -4,7 +4,7 @@ $id: "http://devicetree.org/schemas/remoteproc/fsl,imx-rproc.yaml#" $schema: "http://devicetree.org/meta-schemas/core.yaml#" -title: NXP i.MX Co-Processor Bindings +title: NXP i.MX Co-Processor description: This binding provides support for ARM Cortex M4 Co-processor found on some NXP iMX SoCs. diff --git a/Documentation/devicetree/bindings/remoteproc/ingenic,vpu.yaml b/Documentation/devicetree/bindings/remoteproc/ingenic,vpu.yaml index aaaaabad46ea6e9bf3fb5675402cf02105700b2e..85b1e43cab08d514ea2dbfdd8891d84374e940c0 100644 --- a/Documentation/devicetree/bindings/remoteproc/ingenic,vpu.yaml +++ b/Documentation/devicetree/bindings/remoteproc/ingenic,vpu.yaml @@ -4,7 +4,7 @@ $id: "http://devicetree.org/schemas/remoteproc/ingenic,vpu.yaml#" $schema: "http://devicetree.org/meta-schemas/core.yaml#" -title: Ingenic Video Processing Unit bindings +title: Ingenic Video Processing Unit description: Inside the Video Processing Unit (VPU) of the recent JZ47xx SoCs from diff --git a/Documentation/devicetree/bindings/remoteproc/mtk,scp.yaml b/Documentation/devicetree/bindings/remoteproc/mtk,scp.yaml index 7e091eaffc18485fcf98168b84c34f0efc952063..895415772d1d6b9872f5bcba62f8376c72953719 100644 --- a/Documentation/devicetree/bindings/remoteproc/mtk,scp.yaml +++ b/Documentation/devicetree/bindings/remoteproc/mtk,scp.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/remoteproc/mtk,scp.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Mediatek SCP Bindings +title: Mediatek SCP maintainers: - Tinghan Shen diff --git a/Documentation/devicetree/bindings/remoteproc/qcom,adsp.yaml b/Documentation/devicetree/bindings/remoteproc/qcom,adsp.yaml index db9e0f0c2bea02334d2d708928abb3111a67dbe8..c1d9cbc359b4eb7cb2cc8704de16e9722f7721ff 100644 --- a/Documentation/devicetree/bindings/remoteproc/qcom,adsp.yaml +++ b/Documentation/devicetree/bindings/remoteproc/qcom,adsp.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/remoteproc/qcom,adsp.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Qualcomm ADSP Peripheral Image Loader binding +title: Qualcomm ADSP Peripheral Image Loader maintainers: - Manivannan Sadhasivam diff --git a/Documentation/devicetree/bindings/remoteproc/qcom,pil-info.yaml b/Documentation/devicetree/bindings/remoteproc/qcom,pil-info.yaml index a7711e3c998cc62a7266d059a2e48e907e69b958..22219d16df20469c9a8271c0353228de53f675b3 100644 --- a/Documentation/devicetree/bindings/remoteproc/qcom,pil-info.yaml +++ b/Documentation/devicetree/bindings/remoteproc/qcom,pil-info.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/remoteproc/qcom,pil-info.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Qualcomm peripheral image loader relocation info binding +title: Qualcomm peripheral image loader relocation info maintainers: - Bjorn Andersson diff --git a/Documentation/devicetree/bindings/remoteproc/renesas,rcar-rproc.yaml b/Documentation/devicetree/bindings/remoteproc/renesas,rcar-rproc.yaml index a7d25fa920e5604423c15d870f10c4944530581c..7e0275d31a3c2183a50e610bcdd37424a26a6a32 100644 --- a/Documentation/devicetree/bindings/remoteproc/renesas,rcar-rproc.yaml +++ b/Documentation/devicetree/bindings/remoteproc/renesas,rcar-rproc.yaml @@ -4,7 +4,7 @@ $id: "http://devicetree.org/schemas/remoteproc/renesas,rcar-rproc.yaml#" $schema: "http://devicetree.org/meta-schemas/core.yaml#" -title: Renesas R-Car remote processor controller bindings +title: Renesas R-Car remote processor controller maintainers: - Julien Massot diff --git a/Documentation/devicetree/bindings/remoteproc/st,stm32-rproc.yaml b/Documentation/devicetree/bindings/remoteproc/st,stm32-rproc.yaml index da50f0e99fe2cbb46dcb2b66973ef09d9eaf1f6e..66b1e3efdaa3f8e59bdcf86613f4a93c4ff737c1 100644 --- a/Documentation/devicetree/bindings/remoteproc/st,stm32-rproc.yaml +++ b/Documentation/devicetree/bindings/remoteproc/st,stm32-rproc.yaml @@ -4,7 +4,7 @@ $id: "http://devicetree.org/schemas/remoteproc/st,stm32-rproc.yaml#" $schema: "http://devicetree.org/meta-schemas/core.yaml#" -title: STMicroelectronics STM32 remote processor controller bindings +title: STMicroelectronics STM32 remote processor controller description: This document defines the binding for the remoteproc component that loads and diff --git a/Documentation/devicetree/bindings/reserved-memory/shared-dma-pool.yaml b/Documentation/devicetree/bindings/reserved-memory/shared-dma-pool.yaml index 618105f079beebc045f380bb575a9cf8504d1b10..47696073b66580292c33d3daf60ae652a9a8c413 100644 --- a/Documentation/devicetree/bindings/reserved-memory/shared-dma-pool.yaml +++ b/Documentation/devicetree/bindings/reserved-memory/shared-dma-pool.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/reserved-memory/shared-dma-pool.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: /reserved-memory DMA pool node bindings +title: /reserved-memory DMA pool maintainers: - devicetree-spec@vger.kernel.org diff --git a/Documentation/devicetree/bindings/reset/ti,sci-reset.yaml b/Documentation/devicetree/bindings/reset/ti,sci-reset.yaml index 4639d2cec5575453cc36f020f8ebe46b8e47035a..dcf9206e12becd23560f79ce72cd083107a78499 100644 --- a/Documentation/devicetree/bindings/reset/ti,sci-reset.yaml +++ b/Documentation/devicetree/bindings/reset/ti,sci-reset.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/reset/ti,sci-reset.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: TI-SCI reset controller node bindings +title: TI-SCI reset controller maintainers: - Nishanth Menon diff --git a/Documentation/devicetree/bindings/reset/ti,tps380x-reset.yaml b/Documentation/devicetree/bindings/reset/ti,tps380x-reset.yaml index afc835eda0efe76b4c9b049fb7d65d18f8fe9611..f436f2cf1df715d993124be3bedca8dd95ea1ffb 100644 --- a/Documentation/devicetree/bindings/reset/ti,tps380x-reset.yaml +++ b/Documentation/devicetree/bindings/reset/ti,tps380x-reset.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/reset/ti,tps380x-reset.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: TI TPS380x reset controller node bindings +title: TI TPS380x reset controller maintainers: - Marco Felsch diff --git a/Documentation/devicetree/bindings/riscv/cpus.yaml b/Documentation/devicetree/bindings/riscv/cpus.yaml index 83ad177a9043b016a7fec3c7d7e8050b8f690e01..c6720764e765c70af73cc0dffc6e837888c74a19 100644 --- a/Documentation/devicetree/bindings/riscv/cpus.yaml +++ b/Documentation/devicetree/bindings/riscv/cpus.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/riscv/cpus.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: RISC-V bindings for 'cpus' DT nodes +title: RISC-V CPUs maintainers: - Paul Walmsley diff --git a/Documentation/devicetree/bindings/rng/ingenic,rng.yaml b/Documentation/devicetree/bindings/rng/ingenic,rng.yaml index b2e4a6a7f93a40ddab8d01906fea26962eff83d8..79a023cbfdbab6ba372572d189c9edbba3f56452 100644 --- a/Documentation/devicetree/bindings/rng/ingenic,rng.yaml +++ b/Documentation/devicetree/bindings/rng/ingenic,rng.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/rng/ingenic,rng.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Bindings for RNG in Ingenic SoCs +title: RNG in Ingenic SoCs maintainers: - 周琰杰 (Zhou Yanjie) diff --git a/Documentation/devicetree/bindings/rng/ingenic,trng.yaml b/Documentation/devicetree/bindings/rng/ingenic,trng.yaml index 044d9a065650c796897a8378033bb167ddbc8938..acaeb63caf24929b6b01ba6e51fd3343ed22d80e 100644 --- a/Documentation/devicetree/bindings/rng/ingenic,trng.yaml +++ b/Documentation/devicetree/bindings/rng/ingenic,trng.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/rng/ingenic,trng.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Bindings for DTRNG in Ingenic SoCs +title: DTRNG in Ingenic SoCs maintainers: - 周琰杰 (Zhou Yanjie) diff --git a/Documentation/devicetree/bindings/rng/intel,ixp46x-rng.yaml b/Documentation/devicetree/bindings/rng/intel,ixp46x-rng.yaml index 067e71e8ebe8cda70940db071e69e192e0791fe3..9f7590ce6b3d6e41769c89c4fa7692c2a6bbaf19 100644 --- a/Documentation/devicetree/bindings/rng/intel,ixp46x-rng.yaml +++ b/Documentation/devicetree/bindings/rng/intel,ixp46x-rng.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/rng/intel,ixp46x-rng.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Intel IXP46x RNG bindings +title: Intel IXP46x RNG description: | The Intel IXP46x has a random number generator at a fixed physical diff --git a/Documentation/devicetree/bindings/rng/nuvoton,npcm-rng.yaml b/Documentation/devicetree/bindings/rng/nuvoton,npcm-rng.yaml index abd134c9d4009c9721a50f4bcc2c7342d8e2f842..e8e4ab1e5b95db1213de533f91e69bfbfe60237b 100644 --- a/Documentation/devicetree/bindings/rng/nuvoton,npcm-rng.yaml +++ b/Documentation/devicetree/bindings/rng/nuvoton,npcm-rng.yaml @@ -16,7 +16,9 @@ maintainers: properties: compatible: - const: nuvoton,npcm750-rng + enum: + - nuvoton,npcm750-rng + - nuvoton,npcm845-rng reg: maxItems: 1 diff --git a/Documentation/devicetree/bindings/rng/silex-insight,ba431-rng.yaml b/Documentation/devicetree/bindings/rng/silex-insight,ba431-rng.yaml index 48ab82abf50ecfadfd589bd5f05afbba4d0254eb..4673d6160ad9c861d684513c9fdfcfd8e04119b4 100644 --- a/Documentation/devicetree/bindings/rng/silex-insight,ba431-rng.yaml +++ b/Documentation/devicetree/bindings/rng/silex-insight,ba431-rng.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/rng/silex-insight,ba431-rng.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Silex Insight BA431 RNG bindings +title: Silex Insight BA431 RNG description: | The BA431 hardware random number generator is an IP that is FIPS-140-2/3 diff --git a/Documentation/devicetree/bindings/rng/st,stm32-rng.yaml b/Documentation/devicetree/bindings/rng/st,stm32-rng.yaml index fcd86f822a9ce4675ec409b411bbc0a1f93abe2d..187b172d0cca5ccb47ffced6d88d10f90a13b02e 100644 --- a/Documentation/devicetree/bindings/rng/st,stm32-rng.yaml +++ b/Documentation/devicetree/bindings/rng/st,stm32-rng.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/rng/st,stm32-rng.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: STMicroelectronics STM32 RNG bindings +title: STMicroelectronics STM32 RNG description: | The STM32 hardware random number generator is a simple fixed purpose diff --git a/Documentation/devicetree/bindings/rng/xiphera,xip8001b-trng.yaml b/Documentation/devicetree/bindings/rng/xiphera,xip8001b-trng.yaml index 1e17e55762f174caf198e5434b7c4fadf7796a3b..d831322911705e1a8fc10c9929265f5a9bfb0489 100644 --- a/Documentation/devicetree/bindings/rng/xiphera,xip8001b-trng.yaml +++ b/Documentation/devicetree/bindings/rng/xiphera,xip8001b-trng.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/rng/xiphera,xip8001b-trng.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Xiphera XIP8001B-trng bindings +title: Xiphera XIP8001B-trng maintainers: - Atte Tommiska diff --git a/Documentation/devicetree/bindings/rtc/amlogic,meson6-rtc.yaml b/Documentation/devicetree/bindings/rtc/amlogic,meson6-rtc.yaml new file mode 100644 index 0000000000000000000000000000000000000000..8bf7d3a9be98c599ba5f87eae33ee870ca2c827b --- /dev/null +++ b/Documentation/devicetree/bindings/rtc/amlogic,meson6-rtc.yaml @@ -0,0 +1,62 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/rtc/amlogic,meson6-rtc.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Amlogic Meson6, Meson8, Meson8b and Meson8m2 RTC + +maintainers: + - Neil Armstrong + - Martin Blumenstingl + +allOf: + - $ref: rtc.yaml# + - $ref: /schemas/nvmem/nvmem.yaml# + +properties: + compatible: + enum: + - amlogic,meson6-rtc + - amlogic,meson8-rtc + - amlogic,meson8b-rtc + - amlogic,meson8m2-rtc + + reg: + maxItems: 1 + + clocks: + maxItems: 1 + + interrupts: + maxItems: 1 + + resets: + maxItems: 1 + + vdd-supply: true + +required: + - compatible + - reg + +unevaluatedProperties: false + +examples: + - | + #include + #include + rtc: rtc@740 { + compatible = "amlogic,meson6-rtc"; + reg = <0x740 0x14>; + interrupts = ; + clocks = <&rtc32k_xtal>; + vdd-supply = <&rtc_vdd>; + resets = <&reset_rtc>; + #address-cells = <1>; + #size-cells = <1>; + + mac@0 { + reg = <0 6>; + }; + }; diff --git a/Documentation/devicetree/bindings/rtc/epson,rx8900.yaml b/Documentation/devicetree/bindings/rtc/epson,rx8900.yaml index d12855e7ffd7611192d01de366aa557475f34702..1df7c45d95c18ef90c8e996be5b83bc243099155 100644 --- a/Documentation/devicetree/bindings/rtc/epson,rx8900.yaml +++ b/Documentation/devicetree/bindings/rtc/epson,rx8900.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/rtc/epson,rx8900.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: EPSON RX8900 / Microcrystal RV8803 Real-Time Clock DT bindings +title: EPSON RX8900 / Microcrystal RV8803 Real-Time Clock maintainers: - Marek Vasut diff --git a/Documentation/devicetree/bindings/rtc/fsl,scu-rtc.yaml b/Documentation/devicetree/bindings/rtc/fsl,scu-rtc.yaml index 8c102b70d735a2a5e5f4996cc09b74ac0531d86a..dd1b1abf1e1bc81405230d1df1eaf6e52c822ba0 100644 --- a/Documentation/devicetree/bindings/rtc/fsl,scu-rtc.yaml +++ b/Documentation/devicetree/bindings/rtc/fsl,scu-rtc.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/rtc/fsl,scu-rtc.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: i.MX SCU Client Device Node - RTC bindings based on SCU Message Protocol +title: i.MX SCU Client Device Node - RTC Based on SCU Message Protocol maintainers: - Dong Aisheng diff --git a/Documentation/devicetree/bindings/rtc/haoyu,hym8563.txt b/Documentation/devicetree/bindings/rtc/haoyu,hym8563.txt deleted file mode 100644 index a8934fe2ab4c19c8186e0bad5dbca2a1b29f1579..0000000000000000000000000000000000000000 --- a/Documentation/devicetree/bindings/rtc/haoyu,hym8563.txt +++ /dev/null @@ -1,30 +0,0 @@ -Haoyu Microelectronics HYM8563 Real Time Clock - -The HYM8563 provides basic rtc and alarm functionality -as well as a clock output of up to 32kHz. - -Required properties: -- compatible: should be: "haoyu,hym8563" -- reg: i2c address -- #clock-cells: the value should be 0 - -Optional properties: -- clock-output-names: From common clock binding -- interrupts: rtc alarm/event interrupt - -Example: - -hym8563: hym8563@51 { - compatible = "haoyu,hym8563"; - reg = <0x51>; - - interrupts = <13 IRQ_TYPE_EDGE_FALLING>; - - #clock-cells = <0>; -}; - -device { -... - clocks = <&hym8563>; -... -}; diff --git a/Documentation/devicetree/bindings/rtc/haoyu,hym8563.yaml b/Documentation/devicetree/bindings/rtc/haoyu,hym8563.yaml new file mode 100644 index 0000000000000000000000000000000000000000..0b9f39ef0edc3956f2a0d0c63507151fbd1d7717 --- /dev/null +++ b/Documentation/devicetree/bindings/rtc/haoyu,hym8563.yaml @@ -0,0 +1,56 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/rtc/haoyu,hym8563.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Haoyu Microelectronics HYM8563 RTC + +maintainers: + - Alexandre Belloni + +properties: + compatible: + const: haoyu,hym8563 + + reg: + maxItems: 1 + + interrupts: + maxItems: 1 + + "#clock-cells": + const: 0 + + clock-output-names: + description: From common clock binding to override the default output clock name. + maxItems: 1 + + wakeup-source: + description: Enables wake up of host system on alarm. + +allOf: + - $ref: rtc.yaml + +unevaluatedProperties: false + +required: + - compatible + - reg + - "#clock-cells" + +examples: + - | + #include + + i2c { + #address-cells = <1>; + #size-cells = <0>; + + rtc@51 { + compatible = "haoyu,hym8563"; + reg = <0x51>; + interrupts = <13 IRQ_TYPE_EDGE_FALLING>; + #clock-cells = <0>; + }; + }; diff --git a/Documentation/devicetree/bindings/rtc/ingenic,rtc.yaml b/Documentation/devicetree/bindings/rtc/ingenic,rtc.yaml index b235b2441997f4d661a803ff6e3d160453162f17..af78b67b3da4d859cfc4e3ccf09c2e5e4e6cf5c4 100644 --- a/Documentation/devicetree/bindings/rtc/ingenic,rtc.yaml +++ b/Documentation/devicetree/bindings/rtc/ingenic,rtc.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/rtc/ingenic,rtc.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Ingenic SoCs Real-Time Clock DT bindings +title: Ingenic SoCs Real-Time Clock maintainers: - Paul Cercueil diff --git a/Documentation/devicetree/bindings/rtc/qcom-pm8xxx-rtc.yaml b/Documentation/devicetree/bindings/rtc/qcom-pm8xxx-rtc.yaml index 23ab5bb4f395644f6b814705f6eb1e968b4e89e0..0a7aa29563c1c86d90aa31425beb4ba27963fc23 100644 --- a/Documentation/devicetree/bindings/rtc/qcom-pm8xxx-rtc.yaml +++ b/Documentation/devicetree/bindings/rtc/qcom-pm8xxx-rtc.yaml @@ -11,12 +11,16 @@ maintainers: properties: compatible: - enum: - - qcom,pm8058-rtc - - qcom,pm8921-rtc - - qcom,pm8941-rtc - - qcom,pm8018-rtc - - qcom,pmk8350-rtc + oneOf: + - enum: + - qcom,pm8058-rtc + - qcom,pm8921-rtc + - qcom,pm8941-rtc + - qcom,pmk8350-rtc + - items: + - enum: + - qcom,pm8018-rtc + - const: qcom,pm8921-rtc reg: minItems: 1 diff --git a/Documentation/devicetree/bindings/rtc/renesas,rzn1-rtc.yaml b/Documentation/devicetree/bindings/rtc/renesas,rzn1-rtc.yaml index 2d4741f51663959b0b330539fd8c86051ce74782..f6e0c613af678955589f133068fbbff778519b46 100644 --- a/Documentation/devicetree/bindings/rtc/renesas,rzn1-rtc.yaml +++ b/Documentation/devicetree/bindings/rtc/renesas,rzn1-rtc.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/rtc/renesas,rzn1-rtc.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Renesas RZ/N1 SoCs Real-Time Clock DT bindings +title: Renesas RZ/N1 SoCs Real-Time Clock maintainers: - Miquel Raynal diff --git a/Documentation/devicetree/bindings/rtc/rtc-m41t80.txt b/Documentation/devicetree/bindings/rtc/rtc-m41t80.txt deleted file mode 100644 index cdd196b1e9bdbe5151902f1ed80ea1a06b43a008..0000000000000000000000000000000000000000 --- a/Documentation/devicetree/bindings/rtc/rtc-m41t80.txt +++ /dev/null @@ -1,39 +0,0 @@ -ST M41T80 family of RTC and compatible - -Required properties: -- compatible: should be one of: - "st,m41t62", - "st,m41t65", - "st,m41t80", - "st,m41t81", - "st,m41t81s", - "st,m41t82", - "st,m41t83", - "st,m41t84", - "st,m41t85", - "st,m41t87", - "microcrystal,rv4162", -- reg: I2C bus address of the device - -Optional properties: -- interrupts: rtc alarm interrupt. -- clock-output-names: From common clock binding to override the default output - clock name -- wakeup-source: Enables wake up of host system on alarm - -Optional child node: -- clock: Provide this if the square wave pin is used as boot-enabled fixed clock. - -Example: - rtc@68 { - compatible = "st,m41t80"; - reg = <0x68>; - interrupt-parent = <&UIC0>; - interrupts = <0x9 0x8>; - - clock { - compatible = "fixed-clock"; - #clock-cells = <0>; - clock-frequency = <32768>; - }; - }; diff --git a/Documentation/devicetree/bindings/rtc/rtc-meson.txt b/Documentation/devicetree/bindings/rtc/rtc-meson.txt deleted file mode 100644 index e921fe66a3622581b5a5b907f4b82f78210edcb9..0000000000000000000000000000000000000000 --- a/Documentation/devicetree/bindings/rtc/rtc-meson.txt +++ /dev/null @@ -1,35 +0,0 @@ -* Amlogic Meson6, Meson8, Meson8b and Meson8m2 RTC - -Required properties: -- compatible: should be one of the following describing the hardware: - * "amlogic,meson6-rtc" - * "amlogic,meson8-rtc" - * "amlogic,meson8b-rtc" - * "amlogic,meson8m2-rtc" - -- reg: physical register space for the controller's memory mapped registers. -- interrupts: the interrupt line of the RTC block. -- clocks: reference to the external 32.768kHz crystal oscillator. -- vdd-supply: reference to the power supply of the RTC block. -- resets: reset controller reference to allow reset of the controller - -Optional properties for the battery-backed non-volatile memory: -- #address-cells: should be 1 to address the battery-backed non-volatile memory -- #size-cells: should be 1 to reference the battery-backed non-volatile memory - -Optional child nodes: -- see ../nvmem/nvmem.txt - -Example: - - rtc: rtc@740 { - compatible = "amlogic,meson6-rtc"; - reg = <0x740 0x14>; - interrupts = ; - clocks = <&rtc32k_xtal>; - vdd-supply = <&rtc_vdd>; - resets = <&reset RESET_RTC>; - - #address-cells = <1>; - #size-cells = <1>; - }; diff --git a/Documentation/devicetree/bindings/rtc/rtc.yaml b/Documentation/devicetree/bindings/rtc/rtc.yaml index 0ec3551f12dd306fd06c6caa288eb78ca479ed6f..c6fff5486fe67b398d928b8c67b81a80295d4117 100644 --- a/Documentation/devicetree/bindings/rtc/rtc.yaml +++ b/Documentation/devicetree/bindings/rtc/rtc.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/rtc/rtc.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: RTC Generic Binding +title: Real Time Clock Common Properties maintainers: - Alexandre Belloni diff --git a/Documentation/devicetree/bindings/rtc/sa1100-rtc.yaml b/Documentation/devicetree/bindings/rtc/sa1100-rtc.yaml index 482e5af215b3db7d977555e8fa0a1d99bc62bc51..b04b87ef6f33418b1dc9f185a1fa50265db967e2 100644 --- a/Documentation/devicetree/bindings/rtc/sa1100-rtc.yaml +++ b/Documentation/devicetree/bindings/rtc/sa1100-rtc.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/rtc/sa1100-rtc.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Marvell Real Time Clock controller bindings +title: Marvell Real Time Clock controller allOf: - $ref: rtc.yaml# diff --git a/Documentation/devicetree/bindings/rtc/st,m41t80.yaml b/Documentation/devicetree/bindings/rtc/st,m41t80.yaml new file mode 100644 index 0000000000000000000000000000000000000000..fc9c6da6483f5ba4c94d2192c804940868d51479 --- /dev/null +++ b/Documentation/devicetree/bindings/rtc/st,m41t80.yaml @@ -0,0 +1,73 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/rtc/st,m41t80.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: ST M41T80 family of RTC and compatible + +maintainers: + - Alexandre Belloni + +properties: + compatible: + enum: + - st,m41t62 + - st,m41t65 + - st,m41t80 + - st,m41t81 + - st,m41t81s + - st,m41t82 + - st,m41t83 + - st,m41t84 + - st,m41t85 + - st,m41t87 + - microcrystal,rv4162 + + reg: + maxItems: 1 + + interrupts: + maxItems: 1 + + "#clock-cells": + const: 1 + + clock-output-names: + maxItems: 1 + description: From common clock binding to override the default output clock name. + + clock: + type: object + $ref: /schemas/clock/fixed-clock.yaml# + properties: + clock-frequency: + const: 32768 + +allOf: + - $ref: rtc.yaml + +unevaluatedProperties: false + +required: + - compatible + - reg + +examples: + - | + i2c { + #address-cells = <1>; + #size-cells = <0>; + rtc@68 { + compatible = "st,m41t80"; + reg = <0x68>; + interrupt-parent = <&UIC0>; + interrupts = <0x9 0x8>; + + clock { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <32768>; + }; + }; + }; diff --git a/Documentation/devicetree/bindings/rtc/st,stm32-rtc.yaml b/Documentation/devicetree/bindings/rtc/st,stm32-rtc.yaml index 764717ce18733c5f6e2cee028df99a7e544327b7..9e66ed33cda481aa8d5b6e60b629f09f27433f0e 100644 --- a/Documentation/devicetree/bindings/rtc/st,stm32-rtc.yaml +++ b/Documentation/devicetree/bindings/rtc/st,stm32-rtc.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/rtc/st,stm32-rtc.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: STMicroelectronics STM32 Real Time Clock Bindings +title: STMicroelectronics STM32 Real Time Clock maintainers: - Gabriel Fernandez diff --git a/Documentation/devicetree/bindings/serial/8250.yaml b/Documentation/devicetree/bindings/serial/8250.yaml index 6258f5f59b195f3e329672adbfd8df25080cce4d..34b8e59aa9d4d7e4e817a0460a73efc23f702755 100644 --- a/Documentation/devicetree/bindings/serial/8250.yaml +++ b/Documentation/devicetree/bindings/serial/8250.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/serial/8250.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: UART (Universal Asynchronous Receiver/Transmitter) bindings +title: UART (Universal Asynchronous Receiver/Transmitter) maintainers: - devicetree@vger.kernel.org diff --git a/Documentation/devicetree/bindings/serial/8250_omap.yaml b/Documentation/devicetree/bindings/serial/8250_omap.yaml index 7b34ec8fa90ecfea0bde3c9a4cdfbb60084685b4..53dc1212ad2e3f02095bc92f1c7f5270c30791aa 100644 --- a/Documentation/devicetree/bindings/serial/8250_omap.yaml +++ b/Documentation/devicetree/bindings/serial/8250_omap.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/serial/8250_omap.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Bindings for 8250 compliant UARTs on TI's OMAP2+ and K3 SoCs +title: 8250 compliant UARTs on TI's OMAP2+ and K3 SoCs maintainers: - Vignesh Raghavendra diff --git a/Documentation/devicetree/bindings/serial/brcm,bcm7271-uart.yaml b/Documentation/devicetree/bindings/serial/brcm,bcm7271-uart.yaml index 6d176588df47d84571e49a3ce9f9bf2e05c50a8c..89c462653e2d332c118526a51ee447e4c6eed526 100644 --- a/Documentation/devicetree/bindings/serial/brcm,bcm7271-uart.yaml +++ b/Documentation/devicetree/bindings/serial/brcm,bcm7271-uart.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/serial/brcm,bcm7271-uart.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Broadcom 8250 based serial port devicetree bindings +title: Broadcom 8250 based serial port maintainers: - Al Cooper diff --git a/Documentation/devicetree/bindings/serial/ingenic,uart.yaml b/Documentation/devicetree/bindings/serial/ingenic,uart.yaml index 9ca7a18ecd8b189abb18384ddd3a1597d550ce72..d5f153bdeb0d27db7c5fc09b4783ed2ae63556aa 100644 --- a/Documentation/devicetree/bindings/serial/ingenic,uart.yaml +++ b/Documentation/devicetree/bindings/serial/ingenic,uart.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/serial/ingenic,uart.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Ingenic SoCs UART controller devicetree bindings +title: Ingenic SoCs UART controller maintainers: - Paul Cercueil @@ -20,6 +20,7 @@ properties: oneOf: - enum: - ingenic,jz4740-uart + - ingenic,jz4750-uart - ingenic,jz4760-uart - ingenic,jz4780-uart - ingenic,x1000-uart @@ -31,6 +32,9 @@ properties: - items: - const: ingenic,jz4725b-uart - const: ingenic,jz4740-uart + - items: + - const: ingenic,jz4755-uart + - const: ingenic,jz4750-uart reg: maxItems: 1 diff --git a/Documentation/devicetree/bindings/serial/renesas,scif.yaml b/Documentation/devicetree/bindings/serial/renesas,scif.yaml index f930e7f1349fcea8e906dccd1cad0af7c883ba00..f81f2d67a1ed453c45183c84ca2acb4d2fef1bc3 100644 --- a/Documentation/devicetree/bindings/serial/renesas,scif.yaml +++ b/Documentation/devicetree/bindings/serial/renesas,scif.yaml @@ -67,6 +67,7 @@ properties: - enum: - renesas,scif-r8a779a0 # R-Car V3U - renesas,scif-r8a779f0 # R-Car S4-8 + - renesas,scif-r8a779g0 # R-Car V4H - const: renesas,rcar-gen4-scif # R-Car Gen4 - const: renesas,scif # generic SCIF compatible UART diff --git a/Documentation/devicetree/bindings/serial/rs485.yaml b/Documentation/devicetree/bindings/serial/rs485.yaml index 90a1bab40f050743e72fa5468dc244cfdbd75c50..789763cf427aceacc9ff584a42389c56e24e34a2 100644 --- a/Documentation/devicetree/bindings/serial/rs485.yaml +++ b/Documentation/devicetree/bindings/serial/rs485.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/serial/rs485.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: RS485 serial communications Bindings +title: RS485 serial communications description: The RTS signal is capable of automatically controlling line direction for the built-in half-duplex mode. The properties described diff --git a/Documentation/devicetree/bindings/serial/serial.yaml b/Documentation/devicetree/bindings/serial/serial.yaml index c75ba3fb64650050715a074f884cf3471e74a86a..11e822bf09e2c708801d0497fa414f21067ed79a 100644 --- a/Documentation/devicetree/bindings/serial/serial.yaml +++ b/Documentation/devicetree/bindings/serial/serial.yaml @@ -4,7 +4,7 @@ $id: "http://devicetree.org/schemas/serial/serial.yaml#" $schema: "http://devicetree.org/meta-schemas/core.yaml#" -title: Serial Interface Generic DT Bindings +title: Serial Interface Generic maintainers: - Rob Herring diff --git a/Documentation/devicetree/bindings/serial/st,stm32-uart.yaml b/Documentation/devicetree/bindings/serial/st,stm32-uart.yaml index 333dc42722d259d872a5cc9bb2c025686b3afa97..85876c668f6d0f6aeb4414122246b57ed38af80d 100644 --- a/Documentation/devicetree/bindings/serial/st,stm32-uart.yaml +++ b/Documentation/devicetree/bindings/serial/st,stm32-uart.yaml @@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml# maintainers: - Erwan Le Ray -title: STMicroelectronics STM32 USART bindings +title: STMicroelectronics STM32 USART properties: compatible: diff --git a/Documentation/devicetree/bindings/serial/xlnx,opb-uartlite.yaml b/Documentation/devicetree/bindings/serial/xlnx,opb-uartlite.yaml index f7617b88c7c36189b9bac7dd09e6f410242f282c..2f4390e8d4e88ba79c4dcb47da0e2a19ef674660 100644 --- a/Documentation/devicetree/bindings/serial/xlnx,opb-uartlite.yaml +++ b/Documentation/devicetree/bindings/serial/xlnx,opb-uartlite.yaml @@ -67,8 +67,7 @@ allOf: - if: properties: xlnx,use-parity: - contains: - const: 1 + const: 1 then: required: - xlnx,odd-parity diff --git a/Documentation/devicetree/bindings/serio/ps2-gpio.yaml b/Documentation/devicetree/bindings/serio/ps2-gpio.yaml index a63d9172346f1acd41952653665784abb729cd53..99848bc34f6e6fbb947c50ff45d05fbb611922f6 100644 --- a/Documentation/devicetree/bindings/serio/ps2-gpio.yaml +++ b/Documentation/devicetree/bindings/serio/ps2-gpio.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/serio/ps2-gpio.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Bindings for GPIO based PS/2 +title: GPIO based PS/2 maintainers: - Danilo Krummrich diff --git a/Documentation/devicetree/bindings/slimbus/bus.txt b/Documentation/devicetree/bindings/slimbus/bus.txt deleted file mode 100644 index bbe871f82a8b97fcf1547bac924a1a15fdd342ac..0000000000000000000000000000000000000000 --- a/Documentation/devicetree/bindings/slimbus/bus.txt +++ /dev/null @@ -1,60 +0,0 @@ -SLIM(Serial Low Power Interchip Media Bus) bus - -SLIMbus is a 2-wire bus, and is used to communicate with peripheral -components like audio-codec. - -Required property for SLIMbus controller node: -- compatible - name of SLIMbus controller - -Child nodes: -Every SLIMbus controller node can contain zero or more child nodes -representing slave devices on the bus. Every SLIMbus slave device is -uniquely determined by the enumeration address containing 4 fields: -Manufacturer ID, Product code, Device index, and Instance value for -the device. -If child node is not present and it is instantiated after device -discovery (slave device reporting itself present). - -In some cases it may be necessary to describe non-probeable device -details such as non-standard ways of powering up a device. In -such cases, child nodes for those devices will be present as -slaves of the SLIMbus controller, as detailed below. - -Required property for SLIMbus child node if it is present: -- reg - Should be ('Device index', 'Instance ID') from SLIMbus - Enumeration Address. - Device Index Uniquely identifies multiple Devices within - a single Component. - Instance ID Is for the cases where multiple Devices of the - same type or Class are attached to the bus. - -- compatible -"slimMID,PID". The textual representation of Manufacturer ID, - Product Code, shall be in lower case hexadecimal with leading - zeroes suppressed - -Optional property for SLIMbus child node if it is present: -- slim-ifc-dev - Should be phandle to SLIMBus Interface device. - Required for devices which deal with streams. - -SLIMbus example for Qualcomm's slimbus manager component: - - slim@28080000 { - compatible = "qcom,apq8064-slim", "qcom,slim"; - reg = <0x28080000 0x2000>, - interrupts = <0 33 0>; - clocks = <&lcc SLIMBUS_SRC>, <&lcc AUDIO_SLIMBUS_CLK>; - clock-names = "iface", "core"; - #address-cells = <2>; - #size-cell = <0>; - - codec_ifd: ifd@0,0{ - compatible = "slim217,60"; - reg = <0 0>; - }; - - codec: wcd9310@1,0{ - compatible = "slim217,60"; - reg = <1 0>; - slim-ifc-dev = <&codec_ifd>; - }; - }; diff --git a/Documentation/devicetree/bindings/slimbus/qcom,slim-ngd.yaml b/Documentation/devicetree/bindings/slimbus/qcom,slim-ngd.yaml new file mode 100644 index 0000000000000000000000000000000000000000..abf61c15246eb62262bec1e05ade868df69d2f6b --- /dev/null +++ b/Documentation/devicetree/bindings/slimbus/qcom,slim-ngd.yaml @@ -0,0 +1,120 @@ +# SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/slimbus/qcom,slim-ngd.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Qualcomm SoC SLIMBus Non Generic Device (NGD) Controller + +maintainers: + - Krzysztof Kozlowski + - Srinivas Kandagatla + +description: + SLIMBus NGD controller is a light-weight driver responsible for communicating + with SLIMBus slaves directly over the bus using messaging interface and + communicating with master component residing on ADSP for bandwidth and + data-channel management + +properties: + compatible: + enum: + - qcom,slim-ngd-v1.5.0 # for MSM8996 + - qcom,slim-ngd-v2.1.0 # for SDM845 + + reg: + maxItems: 1 + + "#address-cells": + const: 1 + + "#size-cells": + const: 0 + + dmas: + maxItems: 2 + + dma-names: + items: + - const: rx + - const: tx + + interrupts: + maxItems: 1 + + iommus: + maxItems: 1 + +patternProperties: + "^slim@[0-9a-f]+$": + type: object + $ref: slimbus.yaml# + description: + Each subnode represents an instance of NGD + + properties: + reg: + maxItems: 1 + + unevaluatedProperties: false + +required: + - compatible + - reg + - "#address-cells" + - "#size-cells" + - dmas + - dma-names + - interrupts + +additionalProperties: false + +examples: + - | + #include + #include + + slim-ngd@171c0000 { + compatible = "qcom,slim-ngd-v2.1.0"; + reg = <0x171c0000 0x2c000>; + interrupts = ; + + dmas = <&slimbam 3>, <&slimbam 4>; + dma-names = "rx", "tx"; + iommus = <&apps_smmu 0x1806 0x0>; + #address-cells = <1>; + #size-cells = <0>; + + slim@1 { + reg = <1>; + #address-cells = <2>; + #size-cells = <0>; + + codec@1,0 { + compatible = "slim217,250"; + reg = <1 0>; + slim-ifc-dev = <&wcd9340_ifd>; + + #sound-dai-cells = <1>; + + interrupts-extended = <&tlmm 54 IRQ_TYPE_LEVEL_HIGH>; + interrupt-controller; + #interrupt-cells = <1>; + + #clock-cells = <0>; + clock-frequency = <9600000>; + clock-output-names = "mclk"; + qcom,micbias1-microvolt = <1800000>; + qcom,micbias2-microvolt = <1800000>; + qcom,micbias3-microvolt = <1800000>; + qcom,micbias4-microvolt = <1800000>; + + #address-cells = <1>; + #size-cells = <1>; + + reset-gpios = <&tlmm 64 GPIO_ACTIVE_HIGH>; + + /* Rest of the WCD9340 codec */ + }; + }; + }; diff --git a/Documentation/devicetree/bindings/slimbus/qcom,slim.yaml b/Documentation/devicetree/bindings/slimbus/qcom,slim.yaml new file mode 100644 index 0000000000000000000000000000000000000000..883bda58ca977c282d69520e237c93e027c2c545 --- /dev/null +++ b/Documentation/devicetree/bindings/slimbus/qcom,slim.yaml @@ -0,0 +1,86 @@ +# SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/slimbus/qcom,slim.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Qualcomm SoC SLIMbus controller + +maintainers: + - Krzysztof Kozlowski + - Srinivas Kandagatla + +description: + SLIMbus controller used when applications processor controls SLIMbus master + component. + +allOf: + - $ref: slimbus.yaml# + +properties: + compatible: + items: + - enum: + - qcom,apq8064-slim + - const: qcom,slim + + reg: + items: + - description: Physical address of controller register blocks + - description: SLEW RATE register + + reg-names: + items: + - const: ctrl + - const: slew + + clocks: + items: + - description: Interface clock for this controller + - description: Interrupt for controller core's BAM + + clock-names: + items: + - const: iface + - const: core + + interrupts: + maxItems: 1 + +required: + - compatible + - reg + - reg-names + - clocks + - clock-names + - interrupts + +unevaluatedProperties: false + +examples: + - | + #include + #include + #include + + soc { + #address-cells = <1>; + #size-cells = <1>; + ranges; + + slim@28080000 { + compatible = "qcom,apq8064-slim", "qcom,slim"; + reg = <0x28080000 0x2000>, <0x80207c 4>; + reg-names = "ctrl", "slew"; + interrupts = ; + clocks = <&lcc SLIMBUS_SRC>, <&lcc AUDIO_SLIMBUS_CLK>; + clock-names = "iface", "core"; + #address-cells = <2>; + #size-cells = <0>; + + audio-codec@1,0 { + compatible = "slim217,60"; + reg = <1 0>; + }; + }; + }; diff --git a/Documentation/devicetree/bindings/slimbus/slim-ngd-qcom-ctrl.txt b/Documentation/devicetree/bindings/slimbus/slim-ngd-qcom-ctrl.txt deleted file mode 100644 index e94a2ad3a710d3b990336be89215c22625fe7497..0000000000000000000000000000000000000000 --- a/Documentation/devicetree/bindings/slimbus/slim-ngd-qcom-ctrl.txt +++ /dev/null @@ -1,84 +0,0 @@ -Qualcomm SLIMBus Non Generic Device (NGD) Controller binding - -SLIMBus NGD controller is a light-weight driver responsible for communicating -with SLIMBus slaves directly over the bus using messaging interface and -communicating with master component residing on ADSP for bandwidth and -data-channel management - -Please refer to slimbus/bus.txt for details of the common SLIMBus bindings. - -- compatible: - Usage: required - Value type: - Definition: must be "qcom,slim-ngd-v.." - must be one of the following. - "qcom,slim-ngd-v1.5.0" for MSM8996 - "qcom,slim-ngd-v2.1.0" for SDM845 - -- reg: - Usage: required - Value type: - Definition: must specify the base address and size of the controller - register space. -- dmas - Usage: required - Value type: - Definition: List of rx and tx dma channels - -- dma-names - Usage: required - Value type: - Definition: must be "rx" and "tx". - -- interrupts: - Usage: required - Value type: - Definition: must list controller IRQ. - -#address-cells - Usage: required - Value type: - Definition: Should be 1, reflecting the instance id of ngd. - -#size-cells - Usage: required - Value type: - Definition: Should be 0 - -= NGD Devices -Each subnode represents an instance of NGD, must contain the following -properties: - -- reg: - Usage: required - Value type: - Definition: Should be instance id of ngd. - -#address-cells - Usage: required - Refer to slimbus/bus.txt for details of the common SLIMBus bindings. - -#size-cells - Usage: required - Refer to slimbus/bus.txt for details of the common SLIMBus bindings. - -= EXAMPLE - -slim@91c0000 { - compatible = "qcom,slim-ngd-v1.5.0"; - reg = <0x91c0000 0x2c000>; - interrupts = <0 163 0>; - dmas = <&slimbam 3>, <&slimbam 4>; - dma-names = "rx", "tx"; - #address-cells = <1>; - #size-cells = <0>; - ngd@1 { - reg = <1>; - #address-cells = <1>; - #size-cells = <1>; - codec@1 { - compatible = "slim217,1a0"; - reg = <1 0>; - }; - }; -}; diff --git a/Documentation/devicetree/bindings/slimbus/slim-qcom-ctrl.txt b/Documentation/devicetree/bindings/slimbus/slim-qcom-ctrl.txt deleted file mode 100644 index 922dcb8ff24ab23df7a177391e478b692fc0f708..0000000000000000000000000000000000000000 --- a/Documentation/devicetree/bindings/slimbus/slim-qcom-ctrl.txt +++ /dev/null @@ -1,39 +0,0 @@ -Qualcomm SLIMbus controller -This controller is used if applications processor driver controls SLIMbus -master component. - -Required properties: - - - #address-cells - refer to Documentation/devicetree/bindings/slimbus/bus.txt - - #size-cells - refer to Documentation/devicetree/bindings/slimbus/bus.txt - - - reg : Offset and length of the register region(s) for the device - - reg-names : Register region name(s) referenced in reg above - Required register resource entries are: - "ctrl": Physical address of controller register blocks - "slew": required for "qcom,apq8064-slim" SOC. - - compatible : should be "qcom,-slim" for SOC specific compatible - followed by "qcom,slim" for fallback. - - interrupts : Interrupt number used by this controller - - clocks : Interface and core clocks used by this SLIMbus controller - - clock-names : Required clock-name entries are: - "iface" : Interface clock for this controller - "core" : Interrupt for controller core's BAM - -Example: - - slim@28080000 { - compatible = "qcom,apq8064-slim", "qcom,slim"; - reg = <0x28080000 0x2000>, <0x80207C 4>; - reg-names = "ctrl", "slew"; - interrupts = <0 33 0>; - clocks = <&lcc SLIMBUS_SRC>, <&lcc AUDIO_SLIMBUS_CLK>; - clock-names = "iface", "core"; - #address-cells = <2>; - #size-cell = <0>; - - wcd9310: audio-codec@1,0{ - compatible = "slim217,60"; - reg = <1 0>; - }; - }; diff --git a/Documentation/devicetree/bindings/slimbus/slimbus.yaml b/Documentation/devicetree/bindings/slimbus/slimbus.yaml new file mode 100644 index 0000000000000000000000000000000000000000..22513fb7c59a8be950816c900dba0450fa47f2ab --- /dev/null +++ b/Documentation/devicetree/bindings/slimbus/slimbus.yaml @@ -0,0 +1,95 @@ +# SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/slimbus/slimbus.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: SLIM (Serial Low Power Interchip Media) bus + +maintainers: + - Srinivas Kandagatla + +description: + SLIMbus is a 2-wire bus, and is used to communicate with peripheral + components like audio-codec. + +properties: + $nodename: + pattern: "^slim(@.*|-[0-9a-f])*$" + + "#address-cells": + const: 2 + + "#size-cells": + const: 0 + +patternProperties: + "^.*@[0-9a-f]+,[0-9a-f]+$": + type: object + description: | + Every SLIMbus controller node can contain zero or more child nodes + representing slave devices on the bus. Every SLIMbus slave device is + uniquely determined by the enumeration address containing 4 fields:: + Manufacturer ID, Product code, Device index, and Instance value for the + device. + + If child node is not present and it is instantiated after device + discovery (slave device reporting itself present). + + In some cases it may be necessary to describe non-probeable device + details such as non-standard ways of powering up a device. In such cases, + child nodes for those devices will be present as slaves of the SLIMbus + controller. + + properties: + compatible: + pattern: "^slim[0-9a-f]+,[0-9a-f]+$" + + reg: + maxItems: 1 + description: | + Pair of (device index, instande ID), where:: + - Device index, which uniquely identifies multiple devices within a + single component. + - Instance ID, can be used for the cases where multiple devices of + the same type or class are attached to the bus. + + required: + - compatible + - reg + + additionalProperties: true + +required: + - "#address-cells" + - "#size-cells" + +additionalProperties: true + +examples: + - | + #include + #include + #include + + soc { + #address-cells = <1>; + #size-cells = <1>; + ranges; + + slim@28080000 { + compatible = "qcom,apq8064-slim", "qcom,slim"; + reg = <0x28080000 0x2000>, <0x80207c 4>; + reg-names = "ctrl", "slew"; + interrupts = ; + clocks = <&lcc SLIMBUS_SRC>, <&lcc AUDIO_SLIMBUS_CLK>; + clock-names = "iface", "core"; + #address-cells = <2>; + #size-cells = <0>; + + audio-codec@1,0 { + compatible = "slim217,60"; + reg = <1 0>; + }; + }; + }; diff --git a/Documentation/devicetree/bindings/soc/mediatek/mtk-svs.yaml b/Documentation/devicetree/bindings/soc/mediatek/mtk-svs.yaml index d911fa2d40ef9f9e7abd71f3fbecfa42e8d8558f..f21eb907ee902e6dfbf5f2e66bcad424a736db89 100644 --- a/Documentation/devicetree/bindings/soc/mediatek/mtk-svs.yaml +++ b/Documentation/devicetree/bindings/soc/mediatek/mtk-svs.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/soc/mediatek/mtk-svs.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: MediaTek Smart Voltage Scaling (SVS) Device Tree Bindings +title: MediaTek Smart Voltage Scaling (SVS) maintainers: - Roger Lu diff --git a/Documentation/devicetree/bindings/soc/qcom/qcom,aoss-qmp.yaml b/Documentation/devicetree/bindings/soc/qcom/qcom,aoss-qmp.yaml index 98d087cf4fc03d50a2dda481152a1ed0f8d464f8..ab607efbb64c1218dd75cfd5edc0fc5217dcc7de 100644 --- a/Documentation/devicetree/bindings/soc/qcom/qcom,aoss-qmp.yaml +++ b/Documentation/devicetree/bindings/soc/qcom/qcom,aoss-qmp.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/soc/qcom/qcom,aoss-qmp.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Qualcomm Always-On Subsystem side channel binding +title: Qualcomm Always-On Subsystem side channel maintainers: - Bjorn Andersson diff --git a/Documentation/devicetree/bindings/soc/qcom/qcom,apr.yaml b/Documentation/devicetree/bindings/soc/qcom/qcom,apr.yaml index a6bc3197d5ddc0d7679f61e7e7feff2f47d7b31e..6026c21736d880e4685ff0b227fd66c8b2c64cb7 100644 --- a/Documentation/devicetree/bindings/soc/qcom/qcom,apr.yaml +++ b/Documentation/devicetree/bindings/soc/qcom/qcom,apr.yaml @@ -4,7 +4,7 @@ $id: "http://devicetree.org/schemas/soc/qcom/qcom,apr.yaml#" $schema: "http://devicetree.org/meta-schemas/core.yaml#" -title: Qualcomm APR/GPR (Asynchronous/Generic Packet Router) binding +title: Qualcomm APR/GPR (Asynchronous/Generic Packet Router) maintainers: - Srinivas Kandagatla diff --git a/Documentation/devicetree/bindings/soc/qcom/qcom,geni-se.yaml b/Documentation/devicetree/bindings/soc/qcom/qcom,geni-se.yaml index 2bf5293fc9952e30ba9fea3bef7198eeb2cdd4a9..ab4df020528534c0349161c5aa95124adaf24ca7 100644 --- a/Documentation/devicetree/bindings/soc/qcom/qcom,geni-se.yaml +++ b/Documentation/devicetree/bindings/soc/qcom/qcom,geni-se.yaml @@ -21,20 +21,19 @@ properties: compatible: enum: - qcom,geni-se-qup + - qcom,geni-se-i2c-master-hub reg: description: QUP wrapper common register address and length. maxItems: 1 clock-names: - items: - - const: m-ahb - - const: s-ahb + minItems: 1 + maxItems: 2 clocks: - items: - - description: Master AHB Clock - - description: Slave AHB Clock + minItems: 1 + maxItems: 2 "#address-cells": const: 2 @@ -81,6 +80,39 @@ patternProperties: description: GENI Serial Engine based UART Controller. $ref: /schemas/serial/qcom,serial-geni-qcom.yaml# +allOf: + - if: + properties: + compatible: + contains: + const: qcom,geni-se-i2c-master-hub + then: + properties: + clock-names: + items: + - const: s-ahb + + clocks: + items: + - description: Slave AHB Clock + + iommus: false + + patternProperties: + "spi@[0-9a-f]+$": false + "serial@[0-9a-f]+$": false + else: + properties: + clock-names: + items: + - const: m-ahb + - const: s-ahb + + clocks: + items: + - description: Master AHB Clock + - description: Slave AHB Clock + additionalProperties: false examples: diff --git a/Documentation/devicetree/bindings/soc/qcom/qcom,smem.yaml b/Documentation/devicetree/bindings/soc/qcom/qcom,smem.yaml index 4149cf2b66be3915756aff10caaea32af14de608..497614ddf0058368f357af8ec9a49800a080a01d 100644 --- a/Documentation/devicetree/bindings/soc/qcom/qcom,smem.yaml +++ b/Documentation/devicetree/bindings/soc/qcom/qcom,smem.yaml @@ -4,7 +4,7 @@ $id: "http://devicetree.org/schemas/soc/qcom/qcom,smem.yaml#" $schema: "http://devicetree.org/meta-schemas/core.yaml#" -title: Qualcomm Shared Memory Manager binding +title: Qualcomm Shared Memory Manager maintainers: - Andy Gross diff --git a/Documentation/devicetree/bindings/soc/qcom/qcom,spm.yaml b/Documentation/devicetree/bindings/soc/qcom/qcom,spm.yaml index 38818c37c3ea8fe29bfbbbc0582578a424d0767e..aca3d40bcccbf2deb8e669a3a3fd8e1c7ecda799 100644 --- a/Documentation/devicetree/bindings/soc/qcom/qcom,spm.yaml +++ b/Documentation/devicetree/bindings/soc/qcom/qcom,spm.yaml @@ -4,7 +4,7 @@ $id: "http://devicetree.org/schemas/soc/qcom/qcom,spm.yaml#" $schema: "http://devicetree.org/meta-schemas/core.yaml#" -title: Qualcomm Subsystem Power Manager binding +title: Qualcomm Subsystem Power Manager maintainers: - Andy Gross diff --git a/Documentation/devicetree/bindings/soc/qcom/qcom-stats.yaml b/Documentation/devicetree/bindings/soc/qcom/qcom-stats.yaml index 48eda4d0d391d73de70e862cc4b07fa1108fad39..7ab8cfff18c196ed4ced3604464d506ed86062f1 100644 --- a/Documentation/devicetree/bindings/soc/qcom/qcom-stats.yaml +++ b/Documentation/devicetree/bindings/soc/qcom/qcom-stats.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/soc/qcom/qcom-stats.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Qualcomm Technologies, Inc. (QTI) Stats bindings +title: Qualcomm Technologies, Inc. (QTI) Stats maintainers: - Maulik Shah diff --git a/Documentation/devicetree/bindings/soc/samsung/exynos-usi.yaml b/Documentation/devicetree/bindings/soc/samsung/exynos-usi.yaml index 60b49562ff69496fe61766d7d5dc0739ea2460b7..a6836904a4f83592ed8ce144ea612441b8795400 100644 --- a/Documentation/devicetree/bindings/soc/samsung/exynos-usi.yaml +++ b/Documentation/devicetree/bindings/soc/samsung/exynos-usi.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/soc/samsung/exynos-usi.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Samsung's Exynos USI (Universal Serial Interface) binding +title: Samsung's Exynos USI (Universal Serial Interface) maintainers: - Sam Protsenko diff --git a/Documentation/devicetree/bindings/soc/ti/sci-pm-domain.yaml b/Documentation/devicetree/bindings/soc/ti/sci-pm-domain.yaml index 9e6cb4ee9755d7d2988cf509858eca21842ac2b5..5df7688a1e1ce0e32f9598903f376e6b8eda180a 100644 --- a/Documentation/devicetree/bindings/soc/ti/sci-pm-domain.yaml +++ b/Documentation/devicetree/bindings/soc/ti/sci-pm-domain.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/soc/ti/sci-pm-domain.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: TI-SCI generic power domain node bindings +title: TI-SCI generic power domain maintainers: - Nishanth Menon diff --git a/Documentation/devicetree/bindings/sound/audio-graph-port.yaml b/Documentation/devicetree/bindings/sound/audio-graph-port.yaml index 64654ceef20899d66bde9b27f120fd864d4013bc..f5b8b6d13077cc9ee72a0d7a478dd9b10becd52b 100644 --- a/Documentation/devicetree/bindings/sound/audio-graph-port.yaml +++ b/Documentation/devicetree/bindings/sound/audio-graph-port.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/sound/audio-graph-port.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Audio Graph Card 'port' Node Bindings +title: Audio Graph Card 'port' maintainers: - Kuninori Morimoto diff --git a/Documentation/devicetree/bindings/sound/cirrus,cs42l51.yaml b/Documentation/devicetree/bindings/sound/cirrus,cs42l51.yaml index 422cbf38bfdbc87610c35dd3d12af39755abda08..670b67ec0b617938a5e936e4b4f23eb5486b3eeb 100644 --- a/Documentation/devicetree/bindings/sound/cirrus,cs42l51.yaml +++ b/Documentation/devicetree/bindings/sound/cirrus,cs42l51.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/sound/cirrus,cs42l51.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: CS42L51 audio codec DT bindings +title: CS42L51 audio codec maintainers: - Olivier Moysan diff --git a/Documentation/devicetree/bindings/sound/ingenic,aic.yaml b/Documentation/devicetree/bindings/sound/ingenic,aic.yaml index ba44406c9cafc17cbd9f1e68ba1e3fb9ccfcfbd1..c59a7cd9eaa9b222a1f5c7352d13cc75d2217848 100644 --- a/Documentation/devicetree/bindings/sound/ingenic,aic.yaml +++ b/Documentation/devicetree/bindings/sound/ingenic,aic.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/sound/ingenic,aic.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Ingenic SoCs AC97 / I2S Controller (AIC) DT bindings +title: Ingenic SoCs AC97 / I2S Controller (AIC) maintainers: - Paul Cercueil diff --git a/Documentation/devicetree/bindings/sound/ingenic,codec.yaml b/Documentation/devicetree/bindings/sound/ingenic,codec.yaml index a07d607e9b937f30a64c20a722c5e1cd9ead87c4..b58b90850e35aba5515ee4f91448bea0aa161805 100644 --- a/Documentation/devicetree/bindings/sound/ingenic,codec.yaml +++ b/Documentation/devicetree/bindings/sound/ingenic,codec.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/sound/ingenic,codec.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Ingenic JZ47xx internal codec DT bindings +title: Ingenic JZ47xx internal codec maintainers: - Paul Cercueil diff --git a/Documentation/devicetree/bindings/sound/marvell,mmp-sspa.yaml b/Documentation/devicetree/bindings/sound/marvell,mmp-sspa.yaml index 92d896e0d3238e6f0f1bf5735d69b54fbc071010..f302fe89a25362eb9faf2a6e3e318666bd1fc04e 100644 --- a/Documentation/devicetree/bindings/sound/marvell,mmp-sspa.yaml +++ b/Documentation/devicetree/bindings/sound/marvell,mmp-sspa.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/sound/marvell,mmp-sspa.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Marvel SSPA Digital Audio Interface Bindings +title: Marvel SSPA Digital Audio Interface maintainers: - Lubomir Rintel diff --git a/Documentation/devicetree/bindings/sound/qcom,lpass-cpu.yaml b/Documentation/devicetree/bindings/sound/qcom,lpass-cpu.yaml index 5e26b3e9db2cd6f7755d6af1b56b355a23800ce8..bb42220916b350dcef1caf1f51f2866add79046b 100644 --- a/Documentation/devicetree/bindings/sound/qcom,lpass-cpu.yaml +++ b/Documentation/devicetree/bindings/sound/qcom,lpass-cpu.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/sound/qcom,lpass-cpu.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Qualcomm Technologies Inc. LPASS CPU dai driver bindings +title: Qualcomm Technologies Inc. LPASS CPU dai driver maintainers: - Srinivas Kandagatla diff --git a/Documentation/devicetree/bindings/sound/qcom,lpass-rx-macro.yaml b/Documentation/devicetree/bindings/sound/qcom,lpass-rx-macro.yaml index 23564fd394a2c8918547562a701d630d05694132..79c6f8da1319c816269b713ad8ce90436cd585e7 100644 --- a/Documentation/devicetree/bindings/sound/qcom,lpass-rx-macro.yaml +++ b/Documentation/devicetree/bindings/sound/qcom,lpass-rx-macro.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/sound/qcom,lpass-rx-macro.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: LPASS(Low Power Audio Subsystem) RX Macro audio codec DT bindings +title: LPASS(Low Power Audio Subsystem) RX Macro audio codec maintainers: - Srinivas Kandagatla diff --git a/Documentation/devicetree/bindings/sound/qcom,lpass-tx-macro.yaml b/Documentation/devicetree/bindings/sound/qcom,lpass-tx-macro.yaml index 38708578ee29008e41ca6274c3c5584ed9500b87..66431aade3b70a1794bd317af60a9a2cadd8cefd 100644 --- a/Documentation/devicetree/bindings/sound/qcom,lpass-tx-macro.yaml +++ b/Documentation/devicetree/bindings/sound/qcom,lpass-tx-macro.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/sound/qcom,lpass-tx-macro.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: LPASS(Low Power Audio Subsystem) TX Macro audio codec DT bindings +title: LPASS(Low Power Audio Subsystem) TX Macro audio codec maintainers: - Srinivas Kandagatla diff --git a/Documentation/devicetree/bindings/sound/qcom,lpass-va-macro.yaml b/Documentation/devicetree/bindings/sound/qcom,lpass-va-macro.yaml index 188883a2e67149c6e53923160c7fc387f68a00ac..26f0343b5aac038e440abe87e3cae27a1ff33cd0 100644 --- a/Documentation/devicetree/bindings/sound/qcom,lpass-va-macro.yaml +++ b/Documentation/devicetree/bindings/sound/qcom,lpass-va-macro.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/sound/qcom,lpass-va-macro.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: LPASS(Low Power Audio Subsystem) VA Macro audio codec DT bindings +title: LPASS(Low Power Audio Subsystem) VA Macro audio codec maintainers: - Srinivas Kandagatla diff --git a/Documentation/devicetree/bindings/sound/qcom,lpass-wsa-macro.yaml b/Documentation/devicetree/bindings/sound/qcom,lpass-wsa-macro.yaml index bebca3e3f86fe22d8485302c5770ed90d16f98e4..2bf8d082f8f1044d64bb9493eea892f14b629378 100644 --- a/Documentation/devicetree/bindings/sound/qcom,lpass-wsa-macro.yaml +++ b/Documentation/devicetree/bindings/sound/qcom,lpass-wsa-macro.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/sound/qcom,lpass-wsa-macro.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: LPASS(Low Power Audio Subsystem) VA Macro audio codec DT bindings +title: LPASS(Low Power Audio Subsystem) VA Macro audio codec maintainers: - Srinivas Kandagatla diff --git a/Documentation/devicetree/bindings/sound/qcom,q6apm-dai.yaml b/Documentation/devicetree/bindings/sound/qcom,q6apm-dai.yaml index 73a4afad5a743340a3c22782cf88a39d1cdf7f7e..a53c9ef938fa13440f9189d06025d49c8afbefa6 100644 --- a/Documentation/devicetree/bindings/sound/qcom,q6apm-dai.yaml +++ b/Documentation/devicetree/bindings/sound/qcom,q6apm-dai.yaml @@ -4,7 +4,7 @@ $id: "http://devicetree.org/schemas/sound/qcom,q6apm-dai.yaml#" $schema: "http://devicetree.org/meta-schemas/core.yaml#" -title: Qualcomm Audio Process Manager Digital Audio Interfaces binding +title: Qualcomm Audio Process Manager Digital Audio Interfaces maintainers: - Srinivas Kandagatla diff --git a/Documentation/devicetree/bindings/sound/qcom,q6dsp-lpass-clocks.yaml b/Documentation/devicetree/bindings/sound/qcom,q6dsp-lpass-clocks.yaml index aa6c0ecba5cfcc48233b274fa8f4653600b1826c..1168410f6fbd7e0ab1a29a1ed026815c0df58f4a 100644 --- a/Documentation/devicetree/bindings/sound/qcom,q6dsp-lpass-clocks.yaml +++ b/Documentation/devicetree/bindings/sound/qcom,q6dsp-lpass-clocks.yaml @@ -4,7 +4,7 @@ $id: "http://devicetree.org/schemas/sound/qcom,q6dsp-lpass-clocks.yaml#" $schema: "http://devicetree.org/meta-schemas/core.yaml#" -title: Qualcomm DSP LPASS Clock Controller binding +title: Qualcomm DSP LPASS Clock Controller maintainers: - Srinivas Kandagatla diff --git a/Documentation/devicetree/bindings/sound/qcom,q6dsp-lpass-ports.yaml b/Documentation/devicetree/bindings/sound/qcom,q6dsp-lpass-ports.yaml index d8ebf2e528d27e731c4a77cf55b556b196212b92..d06f188030a3a60659a503b613ea5deb268e4c58 100644 --- a/Documentation/devicetree/bindings/sound/qcom,q6dsp-lpass-ports.yaml +++ b/Documentation/devicetree/bindings/sound/qcom,q6dsp-lpass-ports.yaml @@ -4,7 +4,7 @@ $id: "http://devicetree.org/schemas/sound/qcom,q6dsp-lpass-ports.yaml#" $schema: "http://devicetree.org/meta-schemas/core.yaml#" -title: Qualcomm DSP LPASS(Low Power Audio SubSystem) Audio Ports binding +title: Qualcomm DSP LPASS(Low Power Audio SubSystem) Audio Ports maintainers: - Srinivas Kandagatla diff --git a/Documentation/devicetree/bindings/sound/qcom,wcd934x.yaml b/Documentation/devicetree/bindings/sound/qcom,wcd934x.yaml index 8ca19f2b0b3dec9a69a278f9c7bf9f2a48a4baad..184e8ccbdd137f92b59f762eb7f8e51d1d19d7c3 100644 --- a/Documentation/devicetree/bindings/sound/qcom,wcd934x.yaml +++ b/Documentation/devicetree/bindings/sound/qcom,wcd934x.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/sound/qcom,wcd934x.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Bindings for Qualcomm WCD9340/WCD9341 Audio Codec +title: Qualcomm WCD9340/WCD9341 Audio Codec maintainers: - Srinivas Kandagatla diff --git a/Documentation/devicetree/bindings/sound/qcom,wcd938x-sdw.yaml b/Documentation/devicetree/bindings/sound/qcom,wcd938x-sdw.yaml index 49a267b306f63b06580e21f4dc43ef6c15eefb1b..b430dd3e1841ac256c403e13b4135e02de531872 100644 --- a/Documentation/devicetree/bindings/sound/qcom,wcd938x-sdw.yaml +++ b/Documentation/devicetree/bindings/sound/qcom,wcd938x-sdw.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/sound/qcom,wcd938x-sdw.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Bindings for Qualcomm SoundWire Slave devices on WCD9380/WCD9385 +title: Qualcomm SoundWire Slave devices on WCD9380/WCD9385 maintainers: - Srinivas Kandagatla diff --git a/Documentation/devicetree/bindings/sound/qcom,wcd938x.yaml b/Documentation/devicetree/bindings/sound/qcom,wcd938x.yaml index 67d84463eaeb64eb64d3af41ecb97110b242c8fa..018565793a3ec20e1afd0e061d0c98336895ea33 100644 --- a/Documentation/devicetree/bindings/sound/qcom,wcd938x.yaml +++ b/Documentation/devicetree/bindings/sound/qcom,wcd938x.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/sound/qcom,wcd938x.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Bindings for Qualcomm WCD9380/WCD9385 Audio Codec +title: Qualcomm WCD9380/WCD9385 Audio Codec maintainers: - Srinivas Kandagatla diff --git a/Documentation/devicetree/bindings/sound/qcom,wsa881x.yaml b/Documentation/devicetree/bindings/sound/qcom,wsa881x.yaml index ea44d03e58caaa8574e3926dc0d0c890b6967073..d702b489320fa00d917e5f67e02f1aaa6913f4d4 100644 --- a/Documentation/devicetree/bindings/sound/qcom,wsa881x.yaml +++ b/Documentation/devicetree/bindings/sound/qcom,wsa881x.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/sound/qcom,wsa881x.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Bindings for Qualcomm WSA8810/WSA8815 Class-D Smart Speaker Amplifier +title: Qualcomm WSA8810/WSA8815 Class-D Smart Speaker Amplifier maintainers: - Srinivas Kandagatla diff --git a/Documentation/devicetree/bindings/sound/qcom,wsa883x.yaml b/Documentation/devicetree/bindings/sound/qcom,wsa883x.yaml index 65b0e67f82a3062a7941d93ae44c0144c2ec148b..ba572a7f4f3c0137eb0943448cef3d1af086a5e9 100644 --- a/Documentation/devicetree/bindings/sound/qcom,wsa883x.yaml +++ b/Documentation/devicetree/bindings/sound/qcom,wsa883x.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/sound/qcom,wsa883x.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Bindings for The Qualcomm WSA8830/WSA8832/WSA8835 +title: Qualcomm WSA8830/WSA8832/WSA8835 smart speaker amplifier maintainers: diff --git a/Documentation/devicetree/bindings/sound/realtek,rt1015p.yaml b/Documentation/devicetree/bindings/sound/realtek,rt1015p.yaml index ea7d4900ee4a563d00755aff6b587e0dbfd2d21c..7dac9e6f7f08e10061f9d68b227b37e045f28897 100644 --- a/Documentation/devicetree/bindings/sound/realtek,rt1015p.yaml +++ b/Documentation/devicetree/bindings/sound/realtek,rt1015p.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/sound/realtek,rt1015p.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Realtek rt1015p codec devicetree bindings +title: Realtek rt1015p codec maintainers: - Tzung-Bi Shih diff --git a/Documentation/devicetree/bindings/sound/realtek,rt5682s.yaml b/Documentation/devicetree/bindings/sound/realtek,rt5682s.yaml index e631ace7aad1fc66d16d2dc293942749baa450eb..ecfa7a5768663bf029a6c0685beea58f846289e2 100644 --- a/Documentation/devicetree/bindings/sound/realtek,rt5682s.yaml +++ b/Documentation/devicetree/bindings/sound/realtek,rt5682s.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/sound/realtek,rt5682s.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Realtek rt5682s codec devicetree bindings +title: Realtek rt5682s codec maintainers: - Derek Fang diff --git a/Documentation/devicetree/bindings/sound/ti,src4xxx.yaml b/Documentation/devicetree/bindings/sound/ti,src4xxx.yaml index 988ce8d8028fba666baf150e4855b58cda1a2a93..27230c682d10fdc00a4488040e8e306f1c7a16b1 100644 --- a/Documentation/devicetree/bindings/sound/ti,src4xxx.yaml +++ b/Documentation/devicetree/bindings/sound/ti,src4xxx.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/sound/ti,src4xxx.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Texas Instruments SRC4392 Device Tree Bindings +title: Texas Instruments SRC4392 description: | The SRC4392 is a digital audio codec that can be connected via diff --git a/Documentation/devicetree/bindings/soundwire/qcom,sdw.txt b/Documentation/devicetree/bindings/soundwire/qcom,sdw.txt deleted file mode 100644 index c85c25779e3fcb6a52af35932537578fc4a3dec4..0000000000000000000000000000000000000000 --- a/Documentation/devicetree/bindings/soundwire/qcom,sdw.txt +++ /dev/null @@ -1,214 +0,0 @@ -Qualcomm SoundWire Controller Bindings - - -This binding describes the Qualcomm SoundWire Controller along with its -board specific bus parameters. - -- compatible: - Usage: required - Value type: - Definition: must be "qcom,soundwire-v..", - Example: - "qcom,soundwire-v1.3.0" - "qcom,soundwire-v1.5.0" - "qcom,soundwire-v1.5.1" - "qcom,soundwire-v1.6.0" -- reg: - Usage: required - Value type: - Definition: the base address and size of SoundWire controller - address space. - -- interrupts: - Usage: required - Value type: - Definition: should specify the SoundWire Controller core and optional - wake IRQ - -- interrupt-names: - Usage: Optional - Value type: boolean - Value type: - Definition: should be "core" for core and "wakeup" for wake interrupt. - -- wakeup-source: - Usage: Optional - Value type: boolean - Definition: should specify if SoundWire Controller is wake up capable. - -- clock-names: - Usage: required - Value type: - Definition: should be "iface" for SoundWire Controller interface clock - -- clocks: - Usage: required - Value type: - Definition: should specify the SoundWire Controller interface clock - -- #sound-dai-cells: - Usage: required - Value type: - Definition: must be 1 for digital audio interfaces on the controller. - -- qcom,dout-ports: - Usage: required - Value type: - Definition: must be count of data out ports - -- qcom,din-ports: - Usage: required - Value type: - Definition: must be count of data in ports - -- qcom,ports-offset1: - Usage: required - Value type: - Definition: should specify payload transport window offset1 of each - data port. Out ports followed by In ports. - Value of 0xFF indicates that this option is not implemented - or applicable for the respective data port. - More info in MIPI Alliance SoundWire 1.0 Specifications. - -- qcom,ports-offset2: - Usage: required - Value type: - Definition: should specify payload transport window offset2 of each - data port. Out ports followed by In ports. - Value of 0xFF indicates that this option is not implemented - or applicable for the respective data port. - More info in MIPI Alliance SoundWire 1.0 Specifications. - -- qcom,ports-sinterval-low: - Usage: required - Value type: - Definition: should be sample interval low of each data port. - Out ports followed by In ports. Used for Sample Interval - calculation. - Value of 0xFF indicates that this option is not implemented - or applicable for the respective data port. - More info in MIPI Alliance SoundWire 1.0 Specifications. - -- qcom,ports-word-length: - Usage: optional - Value type: - Definition: should be size of payload channel sample. - Value of 0xFF indicates that this option is not implemented - or applicable for the respective data port. - More info in MIPI Alliance SoundWire 1.0 Specifications. - -- qcom,ports-block-pack-mode: - Usage: optional - Value type: - Definition: should be 0 or 1 to indicate the block packing mode. - 0 to indicate Blocks are per Channel - 1 to indicate Blocks are per Port. - Out ports followed by In ports. - Value of 0xFF indicates that this option is not implemented - or applicable for the respective data port. - More info in MIPI Alliance SoundWire 1.0 Specifications. - -- qcom,ports-block-group-count: - Usage: optional - Value type: - Definition: should be in range 1 to 4 to indicate how many sample - intervals are combined into a payload. - Out ports followed by In ports. - Value of 0xFF indicates that this option is not implemented - or applicable for the respective data port. - More info in MIPI Alliance SoundWire 1.0 Specifications. - -- qcom,ports-lane-control: - Usage: optional - Value type: - Definition: should be in range 0 to 7 to identify which data lane - the data port uses. - Out ports followed by In ports. - Value of 0xFF indicates that this option is not implemented - or applicable for the respective data port. - More info in MIPI Alliance SoundWire 1.0 Specifications. - -- qcom,ports-hstart: - Usage: optional - Value type: - Definition: should be number identifying lowerst numbered coloum in - SoundWire Frame, i.e. left edge of the Transport sub-frame - for each port. Values between 0 and 15 are valid. - Out ports followed by In ports. - Value of 0xFF indicates that this option is not implemented - or applicable for the respective data port. - More info in MIPI Alliance SoundWire 1.0 Specifications. - -- qcom,ports-hstop: - Usage: optional - Value type: - Definition: should be number identifying highest numbered coloum in - SoundWire Frame, i.e. the right edge of the Transport - sub-frame for each port. Values between 0 and 15 are valid. - Out ports followed by In ports. - Value of 0xFF indicates that this option is not implemented - or applicable for the respective data port. - More info in MIPI Alliance SoundWire 1.0 Specifications. - -- qcom,dports-type: - Usage: optional - Value type: - Definition: should be one of the following types - 0 for reduced port - 1 for simple ports - 2 for full port - Out ports followed by In ports. - Value of 0xFF indicates that this option is not implemented - or applicable for the respective data port. - More info in MIPI Alliance SoundWire 1.0 Specifications. - -- reset: - Usage: optional - Value type: - Definition: Should specify the SoundWire audio CSR reset controller interface, - which is required for SoundWire version 1.6.0 and above. - -- reset-names: - Usage: optional - Value type: - Definition: should be "swr_audio_cgcr" for SoundWire audio CSR reset - controller interface. - -Note: - More Information on detail of encoding of these fields can be -found in MIPI Alliance SoundWire 1.0 Specifications. - -= SoundWire devices -Each subnode of the bus represents SoundWire device attached to it. -The properties of these nodes are defined by the individual bindings. - -= EXAMPLE -The following example represents a SoundWire controller on DB845c board -which has controller integrated inside WCD934x codec on SDM845 SoC. - -soundwire: soundwire@c85 { - compatible = "qcom,soundwire-v1.3.0"; - reg = <0xc85 0x20>; - interrupts = <20 IRQ_TYPE_EDGE_RISING>; - clocks = <&wcc>; - clock-names = "iface"; - resets = <&lpass_audiocc LPASS_AUDIO_SWR_TX_CGCR>; - reset-names = "swr_audio_cgcr"; - #sound-dai-cells = <1>; - qcom,dports-type = <0>; - qcom,dout-ports = <6>; - qcom,din-ports = <2>; - qcom,ports-sinterval-low = /bits/ 8 <0x07 0x1F 0x3F 0x7 0x1F 0x3F 0x0F 0x0F>; - qcom,ports-offset1 = /bits/ 8 <0x01 0x02 0x0C 0x6 0x12 0x0D 0x07 0x0A >; - qcom,ports-offset2 = /bits/ 8 <0x00 0x00 0x1F 0x00 0x00 0x1F 0x00 0x00>; - - /* Left Speaker */ - left{ - .... - }; - - /* Right Speaker */ - right{ - .... - }; -}; diff --git a/Documentation/devicetree/bindings/soundwire/qcom,soundwire.yaml b/Documentation/devicetree/bindings/soundwire/qcom,soundwire.yaml new file mode 100644 index 0000000000000000000000000000000000000000..bcbfa71536cda602d40e1e6a4a0d3b9bfb468a31 --- /dev/null +++ b/Documentation/devicetree/bindings/soundwire/qcom,soundwire.yaml @@ -0,0 +1,270 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/soundwire/qcom,soundwire.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Qualcomm SoundWire Controller + +maintainers: + - Srinivas Kandagatla + - Srinivasa Rao Mandadapu + +description: + The Qualcomm SoundWire controller along with its board specific bus parameters. + +properties: + compatible: + enum: + - qcom,soundwire-v1.3.0 + - qcom,soundwire-v1.5.0 + - qcom,soundwire-v1.5.1 + - qcom,soundwire-v1.6.0 + - qcom,soundwire-v1.7.0 + + reg: + maxItems: 1 + + interrupts: + minItems: 1 + items: + - description: specify the SoundWire controller core. + - description: specify the Soundwire controller wake IRQ. + + interrupt-names: + minItems: 1 + items: + - const: core + - const: wakeup + + clocks: + items: + - description: iface clock + + clock-names: + items: + - const: iface + + resets: + items: + - description: SWR_AUDIO_CGCR RESET + + reset-names: + items: + - const: swr_audio_cgcr + + '#sound-dai-cells': + const: 1 + + '#address-cells': + const: 2 + + '#size-cells': + const: 0 + + wakeup-source: true + + qcom,din-ports: + $ref: /schemas/types.yaml#/definitions/uint32 + description: count of data in ports + + qcom,dout-ports: + $ref: /schemas/types.yaml#/definitions/uint32 + description: count of data out ports + + qcom,ports-word-length: + $ref: /schemas/types.yaml#/definitions/uint8-array + description: + Size of payload channel sample. + Value of 0xff indicates that this option is not implemented + or applicable for the respective data port. + More info in MIPI Alliance SoundWire 1.0 Specifications. + minItems: 3 + maxItems: 5 + + qcom,ports-sinterval-low: + $ref: /schemas/types.yaml#/definitions/uint8-array + description: + Sample interval low of each data port. + Out ports followed by In ports. Used for Sample Interval calculation. + Value of 0xff indicates that this option is not implemented + or applicable for the respective data port. + More info in MIPI Alliance SoundWire 1.0 Specifications. + minItems: 3 + maxItems: 8 + + qcom,ports-offset1: + $ref: /schemas/types.yaml#/definitions/uint8-array + description: + Payload transport window offset1 of each data port. + Out ports followed by In ports. + Value of 0xff indicates that this option is not implemented + or applicable for the respective data port. + More info in MIPI Alliance SoundWire 1.0 Specifications. + minItems: 3 + maxItems: 8 + + qcom,ports-offset2: + $ref: /schemas/types.yaml#/definitions/uint8-array + description: + Payload transport window offset2 of each data port. + Out ports followed by In ports. + Value of 0xff indicates that this option is not implemented + or applicable for the respective data port. + More info in MIPI Alliance SoundWire 1.0 Specifications. + minItems: 3 + maxItems: 8 + + qcom,ports-lane-control: + $ref: /schemas/types.yaml#/definitions/uint8-array + description: + Identify which data lane the data port uses. + Out ports followed by In ports. + Value of 0xff indicates that this option is not implemented + or applicable for the respective data port. + More info in MIPI Alliance SoundWire 1.0 Specifications. + minItems: 3 + maxItems: 5 + + qcom,ports-block-pack-mode: + $ref: /schemas/types.yaml#/definitions/uint8-array + description: + Indicate the block packing mode. + 0 to indicate Blocks are per Channel + 1 to indicate Blocks are per Port. + Out ports followed by In ports. + Value of 0xff indicates that this option is not implemented + or applicable for the respective data port. + More info in MIPI Alliance SoundWire 1.0 Specifications. + minItems: 3 + maxItems: 8 + items: + oneOf: + - minimum: 0 + maximum: 1 + - const: 0xff + + qcom,ports-hstart: + $ref: /schemas/types.yaml#/definitions/uint8-array + description: + Identifying lowerst numbered coloum in SoundWire Frame, + i.e. left edge of the Transport sub-frame for each port. + Out ports followed by In ports. + Value of 0xff indicates that this option is not implemented + or applicable for the respective data port. + More info in MIPI Alliance SoundWire 1.0 Specifications. + minItems: 3 + maxItems: 5 + items: + oneOf: + - minimum: 0 + maximum: 15 + - const: 0xff + + qcom,ports-hstop: + $ref: /schemas/types.yaml#/definitions/uint8-array + description: + Identifying highest numbered coloum in SoundWire Frame, + i.e. the right edge of the Transport + sub-frame for each port. Out ports followed by In ports. + Value of 0xff indicates that this option is not implemented + or applicable for the respective data port. + More info in MIPI Alliance SoundWire 1.0 Specifications. + minItems: 3 + maxItems: 5 + items: + oneOf: + - minimum: 0 + maximum: 15 + - const: 0xff + + qcom,ports-block-group-count: + $ref: /schemas/types.yaml#/definitions/uint8-array + description: + In range 1 to 4 to indicate how many sample intervals are combined + into a payload. Out ports followed by In ports. + Value of 0xff indicates that this option is not implemented + or applicable for the respective data port. + More info in MIPI Alliance SoundWire 1.0 Specifications. + minItems: 3 + maxItems: 5 + items: + oneOf: + - minimum: 0 + maximum: 4 + - const: 0xff + + label: + maxItems: 1 + +patternProperties: + "^.*@[0-9a-f],[0-9a-f]$": + type: object + description: + Child nodes for a standalone audio codec or speaker amplifier IC. + It has RX and TX Soundwire secondary devices. + properties: + compatible: + pattern: "^sdw[0-9a-f]{1}[0-9a-f]{4}[0-9a-f]{4}[0-9a-f]{2}$" + +required: + - compatible + - reg + - interrupts + - clocks + - clock-names + - '#sound-dai-cells' + - '#address-cells' + - '#size-cells' + - qcom,dout-ports + - qcom,din-ports + - qcom,ports-sinterval-low + - qcom,ports-offset1 + - qcom,ports-offset2 + +additionalProperties: false + +examples: + - | + #include + #include + #include + + soundwire@3210000 { + compatible = "qcom,soundwire-v1.6.0"; + reg = <0x03210000 0x2000>; + + interrupts = , + <&pdc 130 IRQ_TYPE_LEVEL_HIGH>; + + interrupt-names = "core", "wakeup"; + + clocks = <&lpass_rx_macro>; + clock-names = "iface"; + + qcom,din-ports = <0>; + qcom,dout-ports = <5>; + + resets = <&lpass_audiocc LPASS_AUDIO_SWR_RX_CGCR>; + reset-names = "swr_audio_cgcr"; + + qcom,ports-word-length = /bits/ 8 <0x01 0x07 0x04 0xff 0xff>; + qcom,ports-sinterval-low = /bits/ 8 <0x03 0x3f 0x1f 0x03 0x03>; + qcom,ports-offset1 = /bits/ 8 <0x00 0x00 0x0b 0x01 0x01>; + qcom,ports-offset2 = /bits/ 8 <0x00 0x00 0x0b 0x00 0x00>; + qcom,ports-lane-control = /bits/ 8 <0x01 0x00 0x00 0x00 0x00>; + qcom,ports-block-pack-mode = /bits/ 8 <0xff 0x00 0x01 0xff 0xff>; + qcom,ports-hstart = /bits/ 8 <0xff 0x03 0xff 0xff 0xff>; + qcom,ports-hstop = /bits/ 8 <0xff 0x06 0xff 0xff 0xff>; + qcom,ports-block-group-count = /bits/ 8 <0xff 0xff 0xff 0xff 0x00>; + + #sound-dai-cells = <1>; + #address-cells = <2>; + #size-cells = <0>; + + codec@0,4 { + compatible = "sdw20217010d00"; + reg = <0 4>; + qcom,rx-port-mapping = <1 2 3 4 5>; + }; + }; diff --git a/Documentation/devicetree/bindings/soundwire/soundwire-controller.yaml b/Documentation/devicetree/bindings/soundwire/soundwire-controller.yaml index 4aad121eff3f22dc6c7360aae8dab750e2e41708..fdeb8af417d7ca5f644918e745e2a5be7371e445 100644 --- a/Documentation/devicetree/bindings/soundwire/soundwire-controller.yaml +++ b/Documentation/devicetree/bindings/soundwire/soundwire-controller.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/soundwire/soundwire-controller.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: SoundWire Controller Generic Binding +title: SoundWire Controller Common Properties maintainers: - Srinivas Kandagatla diff --git a/Documentation/devicetree/bindings/spi/aspeed,ast2600-fmc.yaml b/Documentation/devicetree/bindings/spi/aspeed,ast2600-fmc.yaml index fa8f4ac2098551522d6bb081b11873ebc7385337..e6c817de34497802a11bc5bc7680c8751af82c8f 100644 --- a/Documentation/devicetree/bindings/spi/aspeed,ast2600-fmc.yaml +++ b/Documentation/devicetree/bindings/spi/aspeed,ast2600-fmc.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/spi/aspeed,ast2600-fmc.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Aspeed SMC controllers bindings +title: Aspeed SMC controllers maintainers: - Chin-Ting Kuo diff --git a/Documentation/devicetree/bindings/spi/ingenic,spi.yaml b/Documentation/devicetree/bindings/spi/ingenic,spi.yaml index 360f76c226d92bb2d5df72b8c29a8e9302db3222..c08d55b900bbc02dfb10887c8150dfd406c4b6d9 100644 --- a/Documentation/devicetree/bindings/spi/ingenic,spi.yaml +++ b/Documentation/devicetree/bindings/spi/ingenic,spi.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/spi/ingenic,spi.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Ingenic SoCs SPI controller devicetree bindings +title: Ingenic SoCs SPI controller maintainers: - Artur Rojek diff --git a/Documentation/devicetree/bindings/spi/marvell,mmp2-ssp.yaml b/Documentation/devicetree/bindings/spi/marvell,mmp2-ssp.yaml index 0abcac385e7cb44d5f8cba37bb5e792cdbc9eadf..5f4f6b5615d0650496e5854b7181f6d155ce5f9f 100644 --- a/Documentation/devicetree/bindings/spi/marvell,mmp2-ssp.yaml +++ b/Documentation/devicetree/bindings/spi/marvell,mmp2-ssp.yaml @@ -5,7 +5,7 @@ $id: http://devicetree.org/schemas/spi/marvell,mmp2-ssp.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: PXA2xx SSP SPI Controller bindings +title: PXA2xx SSP SPI Controller maintainers: - Lubomir Rintel diff --git a/Documentation/devicetree/bindings/spi/omap-spi.yaml b/Documentation/devicetree/bindings/spi/omap-spi.yaml index 9952199cae119d86e664ac57a8ee522598b9f680..352affa4b7f86b1babc305fdb278793f1ca86847 100644 --- a/Documentation/devicetree/bindings/spi/omap-spi.yaml +++ b/Documentation/devicetree/bindings/spi/omap-spi.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/spi/omap-spi.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: SPI controller bindings for OMAP and K3 SoCs +title: SPI Controller on OMAP and K3 SoCs maintainers: - Aswath Govindraju diff --git a/Documentation/devicetree/bindings/spi/spi-controller.yaml b/Documentation/devicetree/bindings/spi/spi-controller.yaml index 01042a7f382e2b6f22d995333e161aa81ff5d6f2..5a7c72cadf76bdf22cf7f63996d79fd514fd78e6 100644 --- a/Documentation/devicetree/bindings/spi/spi-controller.yaml +++ b/Documentation/devicetree/bindings/spi/spi-controller.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/spi/spi-controller.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: SPI Controller Generic Binding +title: SPI Controller Common Properties maintainers: - Mark Brown diff --git a/Documentation/devicetree/bindings/spi/spi-gpio.yaml b/Documentation/devicetree/bindings/spi/spi-gpio.yaml index 0d0b6d9dad1ca4ae2a12a6cd9fa135f773e8a806..f29b89076c99092f1443237a934d4df61b6e924e 100644 --- a/Documentation/devicetree/bindings/spi/spi-gpio.yaml +++ b/Documentation/devicetree/bindings/spi/spi-gpio.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/spi/spi-gpio.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: SPI-GPIO devicetree bindings +title: SPI-GPIO maintainers: - Rob Herring diff --git a/Documentation/devicetree/bindings/spi/st,stm32-qspi.yaml b/Documentation/devicetree/bindings/spi/st,stm32-qspi.yaml index 6ec6f556182f42fab0295251789a5e7d3bc8df43..1eb17f7a4d862878d62c2ec5d5430f1c1bd5379b 100644 --- a/Documentation/devicetree/bindings/spi/st,stm32-qspi.yaml +++ b/Documentation/devicetree/bindings/spi/st,stm32-qspi.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/spi/st,stm32-qspi.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: STMicroelectronics STM32 Quad Serial Peripheral Interface (QSPI) bindings +title: STMicroelectronics STM32 Quad Serial Peripheral Interface (QSPI) maintainers: - Christophe Kerello diff --git a/Documentation/devicetree/bindings/spi/st,stm32-spi.yaml b/Documentation/devicetree/bindings/spi/st,stm32-spi.yaml index 3d64bed266ac7462d9cc3b9fb25adb73383381a3..1cda15f91cc3015abc0d0a8135266cd220748343 100644 --- a/Documentation/devicetree/bindings/spi/st,stm32-spi.yaml +++ b/Documentation/devicetree/bindings/spi/st,stm32-spi.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/spi/st,stm32-spi.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: STMicroelectronics STM32 SPI Controller bindings +title: STMicroelectronics STM32 SPI Controller description: | The STM32 SPI controller is used to communicate with external devices using diff --git a/Documentation/devicetree/bindings/spmi/qcom,spmi-pmic-arb.yaml b/Documentation/devicetree/bindings/spmi/qcom,spmi-pmic-arb.yaml index fee4f0eb466520ed06fad07ac16aee8179741ca9..f983b4af6db93f03b39dc65a35f3da2c9489d2f9 100644 --- a/Documentation/devicetree/bindings/spmi/qcom,spmi-pmic-arb.yaml +++ b/Documentation/devicetree/bindings/spmi/qcom,spmi-pmic-arb.yaml @@ -85,6 +85,14 @@ properties: description: > which of the PMIC Arb provided channels to use for accesses + qcom,bus-id: + $ref: /schemas/types.yaml#/definitions/uint32 + minimum: 0 + maximum: 1 + description: > + SPMI bus instance. only applicable to PMIC arbiter version 7 and beyond. + Supported values, 0 = primary bus, 1 = secondary bus + required: - compatible - reg-names @@ -113,5 +121,7 @@ examples: interrupt-controller; #interrupt-cells = <4>; + + qcom,bus-id = <0>; }; diff --git a/Documentation/devicetree/bindings/thermal/fsl,scu-thermal.yaml b/Documentation/devicetree/bindings/thermal/fsl,scu-thermal.yaml index f9e4b3c8d0eebb166d6a07dc46bc549ab9306b6a..3721c8c8ec6401cb5c2ec8eefdd42dd1a69226ff 100644 --- a/Documentation/devicetree/bindings/thermal/fsl,scu-thermal.yaml +++ b/Documentation/devicetree/bindings/thermal/fsl,scu-thermal.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/thermal/fsl,scu-thermal.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: i.MX SCU Client Device Node - Thermal bindings based on SCU Message Protocol +title: i.MX SCU Client Device Node - Thermal Based on SCU Message Protocol maintainers: - Dong Aisheng diff --git a/Documentation/devicetree/bindings/thermal/generic-adc-thermal.yaml b/Documentation/devicetree/bindings/thermal/generic-adc-thermal.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f1fc3b0d8608544b4cae3b637451c4776a11ee10 --- /dev/null +++ b/Documentation/devicetree/bindings/thermal/generic-adc-thermal.yaml @@ -0,0 +1,84 @@ +# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/thermal/generic-adc-thermal.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: General Purpose Analog To Digital Converter (ADC) based thermal sensor + +maintainers: + - Laxman Dewangan + +description: + On some of platforms, thermal sensor like thermistors are connected to + one of ADC channel and sensor resistance is read via voltage across the + sensor resistor. The voltage read across the sensor is mapped to + temperature using voltage-temperature lookup table. + +properties: + compatible: + const: generic-adc-thermal + + '#thermal-sensor-cells': + const: 0 + + io-channels: + maxItems: 1 + + io-channel-names: + const: sensor-channel + + temperature-lookup-table: + description: | + Lookup table to map the relation between ADC value and temperature. + When ADC is read, the value is looked up on the table to get the + equivalent temperature. + + If not specified, driver assumes the ADC channel gives milliCelsius + directly. + $ref: /schemas/types.yaml#/definitions/int32-matrix + items: + items: + - description: Temperature in milliCelsius + - description: ADC read value + +required: + - compatible + - '#thermal-sensor-cells' + - io-channels + - io-channel-names + +additionalProperties: false + +examples: + - | + #include + + thermal-sensor { + compatible = "generic-adc-thermal"; + #thermal-sensor-cells = <0>; + io-channels = <&ads1015 1>; + io-channel-names = "sensor-channel"; + temperature-lookup-table = < + (-40000) 2578 + (-39000) 2577 + (-38000) 2576 + (-37000) 2575 + (-36000) 2574 + (-35000) 2573 + (-34000) 2572 + (-33000) 2571 + (-32000) 2569 + (-31000) 2568 + (-30000) 2567 + /* skip */ + 118000 254 + 119000 247 + 120000 240 + 121000 233 + 122000 226 + 123000 220 + 124000 214 + 125000 208>; + }; +... diff --git a/Documentation/devicetree/bindings/thermal/imx-thermal.yaml b/Documentation/devicetree/bindings/thermal/imx-thermal.yaml index 16b57f57d10329ce2fdb5a183233a3e44791a359..b22c8b59d5c7c0e3bc6ceebd14e9f9fa855e749e 100644 --- a/Documentation/devicetree/bindings/thermal/imx-thermal.yaml +++ b/Documentation/devicetree/bindings/thermal/imx-thermal.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/thermal/imx-thermal.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: NXP i.MX Thermal Binding +title: NXP i.MX Thermal maintainers: - Shawn Guo diff --git a/Documentation/devicetree/bindings/thermal/imx8mm-thermal.yaml b/Documentation/devicetree/bindings/thermal/imx8mm-thermal.yaml index 89c54e08ee61b8a3c19502996573203444975e43..d2c1e4573c3278f5e391b3c4af708f214f9b0db8 100644 --- a/Documentation/devicetree/bindings/thermal/imx8mm-thermal.yaml +++ b/Documentation/devicetree/bindings/thermal/imx8mm-thermal.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/thermal/imx8mm-thermal.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: NXP i.MX8M Mini Thermal Binding +title: NXP i.MX8M Mini Thermal maintainers: - Anson Huang @@ -32,6 +32,13 @@ properties: clocks: maxItems: 1 + nvmem-cells: + maxItems: 1 + description: Phandle to the calibration data provided by ocotp + + nvmem-cell-names: + const: calib + "#thermal-sensor-cells": description: | Number of cells required to uniquely identify the thermal diff --git a/Documentation/devicetree/bindings/thermal/mediatek-thermal.txt b/Documentation/devicetree/bindings/thermal/mediatek-thermal.txt index 5c7e7bdd029abfab9cb14c3076afa9b647d596c8..38b32bb447e39ab9e24ba1a775290b2b96725787 100644 --- a/Documentation/devicetree/bindings/thermal/mediatek-thermal.txt +++ b/Documentation/devicetree/bindings/thermal/mediatek-thermal.txt @@ -13,6 +13,8 @@ Required properties: - "mediatek,mt2701-thermal" : For MT2701 family of SoCs - "mediatek,mt2712-thermal" : For MT2712 family of SoCs - "mediatek,mt7622-thermal" : For MT7622 SoC + - "mediatek,mt7981-thermal", "mediatek,mt7986-thermal" : For MT7981 SoC + - "mediatek,mt7986-thermal" : For MT7986 SoC - "mediatek,mt8183-thermal" : For MT8183 family of SoCs - "mediatek,mt8516-thermal", "mediatek,mt2701-thermal : For MT8516 family of SoCs - reg: Address range of the thermal controller diff --git a/Documentation/devicetree/bindings/thermal/qcom-tsens.yaml b/Documentation/devicetree/bindings/thermal/qcom-tsens.yaml index c41fcf4041176633e012cb96378b9d86f9439c50..0231f187b09775bdd7eba1680130439bc4c745a7 100644 --- a/Documentation/devicetree/bindings/thermal/qcom-tsens.yaml +++ b/Documentation/devicetree/bindings/thermal/qcom-tsens.yaml @@ -58,8 +58,14 @@ properties: - qcom,sm8150-tsens - qcom,sm8250-tsens - qcom,sm8350-tsens + - qcom,sm8450-tsens + - qcom,sm8550-tsens - const: qcom,tsens-v2 + - description: v2 of TSENS with combined interrupt + enum: + - qcom,ipq8074-tsens + reg: items: - description: TM registers @@ -67,15 +73,11 @@ properties: interrupts: minItems: 1 - items: - - description: Combined interrupt if upper or lower threshold crossed - - description: Interrupt if critical threshold crossed + maxItems: 2 interrupt-names: minItems: 1 - items: - - const: uplow - - const: critical + maxItems: 2 nvmem-cells: minItems: 1 @@ -129,22 +131,64 @@ allOf: then: properties: interrupts: - maxItems: 1 + items: + - description: Combined interrupt if upper or lower threshold crossed interrupt-names: - maxItems: 1 + items: + - const: uplow - else: + - if: + properties: + compatible: + contains: + enum: + - qcom,msm8953-tsens + - qcom,msm8996-tsens + - qcom,msm8998-tsens + - qcom,sc7180-tsens + - qcom,sc7280-tsens + - qcom,sc8180x-tsens + - qcom,sc8280xp-tsens + - qcom,sdm630-tsens + - qcom,sdm845-tsens + - qcom,sm6350-tsens + - qcom,sm8150-tsens + - qcom,sm8250-tsens + - qcom,sm8350-tsens + - qcom,sm8450-tsens + - qcom,tsens-v2 + then: properties: interrupts: - minItems: 2 + items: + - description: Combined interrupt if upper or lower threshold crossed + - description: Interrupt if critical threshold crossed interrupt-names: - minItems: 2 + items: + - const: uplow + - const: critical - if: properties: compatible: contains: enum: + - qcom,ipq8074-tsens + then: + properties: + interrupts: + items: + - description: Combined interrupt if upper, lower or critical thresholds crossed + interrupt-names: + items: + - const: combined + + - if: + properties: + compatible: + contains: + enum: + - qcom,ipq8074-tsens - qcom,tsens-v0_1 - qcom,tsens-v1 - qcom,tsens-v2 @@ -227,4 +271,19 @@ examples: #qcom,sensors = <13>; #thermal-sensor-cells = <1>; }; + + - | + #include + // Example 4 (for any IPQ8074 based SoC-s): + tsens4: thermal-sensor@4a9000 { + compatible = "qcom,ipq8074-tsens"; + reg = <0x4a9000 0x1000>, + <0x4a8000 0x1000>; + + interrupts = ; + interrupt-names = "combined"; + + #qcom,sensors = <16>; + #thermal-sensor-cells = <1>; + }; ... diff --git a/Documentation/devicetree/bindings/thermal/rzg2l-thermal.yaml b/Documentation/devicetree/bindings/thermal/rzg2l-thermal.yaml index 1d837339784817ea610767a467c73f9e0048f17f..03f4b926e53c96b253c8c75446e8f9f336e6f516 100644 --- a/Documentation/devicetree/bindings/thermal/rzg2l-thermal.yaml +++ b/Documentation/devicetree/bindings/thermal/rzg2l-thermal.yaml @@ -17,7 +17,7 @@ properties: compatible: items: - enum: - - renesas,r9a07g043-tsu # RZ/G2UL + - renesas,r9a07g043-tsu # RZ/G2UL and RZ/Five - renesas,r9a07g044-tsu # RZ/G2{L,LC} - renesas,r9a07g054-tsu # RZ/V2L - const: renesas,rzg2l-tsu diff --git a/Documentation/devicetree/bindings/thermal/sprd-thermal.yaml b/Documentation/devicetree/bindings/thermal/sprd-thermal.yaml index 6d65a3cf2af2586a42f2d16ff9710428b2c67d02..76aaa004c8ac5c7cac106b52d8327fec0e42b91b 100644 --- a/Documentation/devicetree/bindings/thermal/sprd-thermal.yaml +++ b/Documentation/devicetree/bindings/thermal/sprd-thermal.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/thermal/sprd-thermal.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Spreadtrum thermal sensor controller bindings +title: Spreadtrum thermal sensor controller maintainers: - Orson Zhai diff --git a/Documentation/devicetree/bindings/thermal/st,stm32-thermal.yaml b/Documentation/devicetree/bindings/thermal/st,stm32-thermal.yaml index bee41cff51429a578cf60b18f61f1fac5a9386fc..ab043084f6678b2123e144574da81fbfe660eda9 100644 --- a/Documentation/devicetree/bindings/thermal/st,stm32-thermal.yaml +++ b/Documentation/devicetree/bindings/thermal/st,stm32-thermal.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/thermal/st,stm32-thermal.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: STMicroelectronics STM32 digital thermal sensor (DTS) binding +title: STMicroelectronics STM32 digital thermal sensor (DTS) maintainers: - Pascal Paillet diff --git a/Documentation/devicetree/bindings/thermal/thermal-cooling-devices.yaml b/Documentation/devicetree/bindings/thermal/thermal-cooling-devices.yaml index 7bb9327caa13cc001df1089c23fcf852bf2f9ccc..b9022f1613d815dccae26bf20091361f262424af 100644 --- a/Documentation/devicetree/bindings/thermal/thermal-cooling-devices.yaml +++ b/Documentation/devicetree/bindings/thermal/thermal-cooling-devices.yaml @@ -5,7 +5,7 @@ $id: http://devicetree.org/schemas/thermal/thermal-cooling-devices.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Thermal cooling device binding +title: Thermal cooling device maintainers: - Amit Kucheria diff --git a/Documentation/devicetree/bindings/thermal/thermal-generic-adc.txt b/Documentation/devicetree/bindings/thermal/thermal-generic-adc.txt deleted file mode 100644 index e136946a2f4fd9f42c2736a6e0926166bdc82e2c..0000000000000000000000000000000000000000 --- a/Documentation/devicetree/bindings/thermal/thermal-generic-adc.txt +++ /dev/null @@ -1,95 +0,0 @@ -General Purpose Analog To Digital Converter (ADC) based thermal sensor. - -On some of platforms, thermal sensor like thermistors are connected to -one of ADC channel and sensor resistance is read via voltage across the -sensor resistor. The voltage read across the sensor is mapped to -temperature using voltage-temperature lookup table. - -Required properties: -=================== -- compatible: Must be "generic-adc-thermal". -- #thermal-sensor-cells: Should be 1. See Documentation/devicetree/bindings/thermal/thermal-sensor.yaml for a description - of this property. -Optional properties: -=================== -- temperature-lookup-table: Two dimensional array of Integer; lookup table - to map the relation between ADC value and - temperature. When ADC is read, the value is - looked up on the table to get the equivalent - temperature. - - The first value of the each row of array is the - temperature in milliCelsius and second value of - the each row of array is the ADC read value. - - If not specified, driver assumes the ADC channel - gives milliCelsius directly. - -Example : -#include - -i2c@7000c400 { - ads1015: ads1015@4a { - reg = <0x4a>; - compatible = "ads1015"; - sampling-frequency = <3300>; - #io-channel-cells = <1>; - }; -}; - -tboard_thermistor: thermal-sensor { - compatible = "generic-adc-thermal"; - #thermal-sensor-cells = <0>; - io-channels = <&ads1015 1>; - io-channel-names = "sensor-channel"; - temperature-lookup-table = < (-40000) 2578 - (-39000) 2577 - (-38000) 2576 - (-37000) 2575 - (-36000) 2574 - (-35000) 2573 - (-34000) 2572 - (-33000) 2571 - (-32000) 2569 - (-31000) 2568 - (-30000) 2567 - :::::::::: - 118000 254 - 119000 247 - 120000 240 - 121000 233 - 122000 226 - 123000 220 - 124000 214 - 125000 208>; -}; - -dummy_cool_dev: dummy-cool-dev { - compatible = "dummy-cooling-dev"; - #cooling-cells = <2>; /* min followed by max */ -}; - -thermal-zones { - Tboard { - polling-delay = <15000>; /* milliseconds */ - polling-delay-passive = <0>; /* milliseconds */ - thermal-sensors = <&tboard_thermistor>; - - trips { - therm_est_trip: therm_est_trip { - temperature = <40000>; - type = "active"; - hysteresis = <1000>; - }; - }; - - cooling-maps { - map0 { - trip = <&therm_est_trip>; - cooling-device = <&dummy_cool_dev THERMAL_NO_LIMIT THERMAL_NO_LIMIT>; - contribution = <100>; - }; - - }; - }; -}; diff --git a/Documentation/devicetree/bindings/thermal/thermal-idle.yaml b/Documentation/devicetree/bindings/thermal/thermal-idle.yaml index 0fd6d9ae61965fe0a1e421bfe9a6bce6fa901cbe..1b77d542a7b8ddc074a7a6e253c56943873ec7d7 100644 --- a/Documentation/devicetree/bindings/thermal/thermal-idle.yaml +++ b/Documentation/devicetree/bindings/thermal/thermal-idle.yaml @@ -5,7 +5,7 @@ $id: http://devicetree.org/schemas/thermal/thermal-idle.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Thermal idle cooling device binding +title: Thermal idle cooling device maintainers: - Daniel Lezcano diff --git a/Documentation/devicetree/bindings/thermal/thermal-sensor.yaml b/Documentation/devicetree/bindings/thermal/thermal-sensor.yaml index 4bd345c71eb83eb88cc603e705bda84bd63d7c7c..57565b3fb07c1f60b72b1d9d920e599c8c1e0339 100644 --- a/Documentation/devicetree/bindings/thermal/thermal-sensor.yaml +++ b/Documentation/devicetree/bindings/thermal/thermal-sensor.yaml @@ -5,7 +5,7 @@ $id: http://devicetree.org/schemas/thermal/thermal-sensor.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Thermal sensor binding +title: Thermal sensor maintainers: - Amit Kucheria diff --git a/Documentation/devicetree/bindings/thermal/thermal-zones.yaml b/Documentation/devicetree/bindings/thermal/thermal-zones.yaml index 8d2c6d74b605a1ebde7d2e3d3b9cb78610d40dec..8581821fa4e1774413771178d3cbe2abf9dce950 100644 --- a/Documentation/devicetree/bindings/thermal/thermal-zones.yaml +++ b/Documentation/devicetree/bindings/thermal/thermal-zones.yaml @@ -5,7 +5,7 @@ $id: http://devicetree.org/schemas/thermal/thermal-zones.yaml# $schema: http://devicetree.org/meta-schemas/base.yaml# -title: Thermal zone binding +title: Thermal zone maintainers: - Amit Kucheria diff --git a/Documentation/devicetree/bindings/thermal/ti,am654-thermal.yaml b/Documentation/devicetree/bindings/thermal/ti,am654-thermal.yaml index ea14de80ec759d95666437c650eeddcb5d338e15..7ed0abe9290f66d993a64aacbbc6c032f572205c 100644 --- a/Documentation/devicetree/bindings/thermal/ti,am654-thermal.yaml +++ b/Documentation/devicetree/bindings/thermal/ti,am654-thermal.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/thermal/ti,am654-thermal.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Texas Instruments AM654 VTM (DTS) binding +title: Texas Instruments AM654 VTM (DTS) maintainers: - Keerthy diff --git a/Documentation/devicetree/bindings/thermal/ti,j72xx-thermal.yaml b/Documentation/devicetree/bindings/thermal/ti,j72xx-thermal.yaml index c74f124ebfc00120fce35961e994dbee19cfe5db..171b3622ed847533ae7d1d527554be0276def8f5 100644 --- a/Documentation/devicetree/bindings/thermal/ti,j72xx-thermal.yaml +++ b/Documentation/devicetree/bindings/thermal/ti,j72xx-thermal.yaml @@ -4,11 +4,24 @@ $id: http://devicetree.org/schemas/thermal/ti,j72xx-thermal.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Texas Instruments J72XX VTM (DTS) binding +title: Texas Instruments J72XX VTM (DTS) maintainers: - Keerthy +description: | + The TI K3 family of SoCs typically have a Voltage & Thermal + Management (VTM) device to control up to 8 temperature diode + sensors to measure silicon junction temperatures from different + hotspots of the chip as well as provide temperature, interrupt + and alerting information. + + The following polynomial equation can then be used to convert + value returned by this device into a temperature in Celsius + + Temp(C) = (-9.2627e-12) * x^4 + (6.0373e-08) * x^3 + \ + (-1.7058e-04) * x^2 + (3.2512e-01) * x + (-4.9003e+01) + properties: compatible: enum: @@ -19,7 +32,12 @@ properties: items: - description: VTM cfg1 register space - description: VTM cfg2 register space - - description: VTM efuse register space + - description: | + A software trimming method must be applied to some Jacinto + devices to function properly. This eFuse region provides + the information needed for these SoCs to report + temperatures accurately. + minItems: 2 power-domains: maxItems: 1 @@ -27,6 +45,21 @@ properties: "#thermal-sensor-cells": const: 1 +allOf: + - if: + properties: + compatible: + contains: + const: ti,j721e-vtm + then: + properties: + reg: + minItems: 3 + else: + properties: + reg: + maxItems: 2 + required: - compatible - reg diff --git a/Documentation/devicetree/bindings/timer/ingenic,sysost.yaml b/Documentation/devicetree/bindings/timer/ingenic,sysost.yaml index 98648bf9e151db50edf574918c89c442eb9abf96..bdc82d8bce0ef7846cb3b89820deac42c858a47e 100644 --- a/Documentation/devicetree/bindings/timer/ingenic,sysost.yaml +++ b/Documentation/devicetree/bindings/timer/ingenic,sysost.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/timer/ingenic,sysost.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Bindings for SYSOST in Ingenic XBurst family SoCs +title: SYSOST in Ingenic XBurst family SoCs maintainers: - 周琰杰 (Zhou Yanjie) diff --git a/Documentation/devicetree/bindings/timer/ingenic,tcu.yaml b/Documentation/devicetree/bindings/timer/ingenic,tcu.yaml index a84fef0fe628f47a3140003e0b319e8dc9051f32..2d14610888a712e2e9b2faa31ba93112902754d5 100644 --- a/Documentation/devicetree/bindings/timer/ingenic,tcu.yaml +++ b/Documentation/devicetree/bindings/timer/ingenic,tcu.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/timer/ingenic,tcu.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Ingenic SoCs Timer/Counter Unit (TCU) devicetree bindings +title: Ingenic SoCs Timer/Counter Unit (TCU) description: | For a description of the TCU hardware and drivers, have a look at diff --git a/Documentation/devicetree/bindings/timer/mrvl,mmp-timer.yaml b/Documentation/devicetree/bindings/timer/mrvl,mmp-timer.yaml index 1fbc260a0cbdf8a87f60a8faed2c9a6910e7f585..1ee4aab695d38440c85af71c994f44c0b126a97d 100644 --- a/Documentation/devicetree/bindings/timer/mrvl,mmp-timer.yaml +++ b/Documentation/devicetree/bindings/timer/mrvl,mmp-timer.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/timer/mrvl,mmp-timer.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Marvell MMP Timer bindings +title: Marvell MMP Timer maintainers: - Daniel Lezcano diff --git a/Documentation/devicetree/bindings/timer/st,stm32-timer.yaml b/Documentation/devicetree/bindings/timer/st,stm32-timer.yaml index 937aa8a563660f7935dc38ee2b29a4f15e9f3481..9ec11537620ac0ad2b9f38e0c5f35bfc91929228 100644 --- a/Documentation/devicetree/bindings/timer/st,stm32-timer.yaml +++ b/Documentation/devicetree/bindings/timer/st,stm32-timer.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/timer/st,stm32-timer.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: STMicroelectronics STM32 general-purpose 16 and 32 bits timers bindings +title: STMicroelectronics STM32 general-purpose 16 and 32 bits timers maintainers: - Fabrice Gasnier diff --git a/Documentation/devicetree/bindings/trivial-devices.yaml b/Documentation/devicetree/bindings/trivial-devices.yaml index 61746755c10777b7ee2871647ff110d5359dd8e7..f5c0a6283e61ffd76ec0239c26fe145b2022e4d8 100644 --- a/Documentation/devicetree/bindings/trivial-devices.yaml +++ b/Documentation/devicetree/bindings/trivial-devices.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/trivial-devices.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Trivial I2C and SPI devices that have simple device tree bindings +title: Trivial I2C and SPI devices maintainers: - Rob Herring diff --git a/Documentation/devicetree/bindings/usb/analogix,anx7411.yaml b/Documentation/devicetree/bindings/usb/analogix,anx7411.yaml index 0e72c08e65665cc608e135d428f9cea6cf605651..e4d893369d57bffc913114ce8310e7d249e6f3fd 100644 --- a/Documentation/devicetree/bindings/usb/analogix,anx7411.yaml +++ b/Documentation/devicetree/bindings/usb/analogix,anx7411.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/usb/analogix,anx7411.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Analogix ANX7411 Type-C controller bindings +title: Analogix ANX7411 Type-C controller maintainers: - Xin Ji diff --git a/Documentation/devicetree/bindings/usb/cdns,usb3.yaml b/Documentation/devicetree/bindings/usb/cdns,usb3.yaml index dc9d6ed0781d2373ce31c76c4edf0ba1c16927b0..cae46c4982adf69e3369fa450924df14e03fa804 100644 --- a/Documentation/devicetree/bindings/usb/cdns,usb3.yaml +++ b/Documentation/devicetree/bindings/usb/cdns,usb3.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/usb/cdns,usb3.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Cadence USBSS-DRD controller bindings +title: Cadence USBSS-DRD controller maintainers: - Pawel Laszczak diff --git a/Documentation/devicetree/bindings/usb/dwc2.yaml b/Documentation/devicetree/bindings/usb/dwc2.yaml index dc4988c0009cf25593d2d0055f13f375e2cee5c9..371ba93f3ce5895c17e440c5d4d27ea2a541f39a 100644 --- a/Documentation/devicetree/bindings/usb/dwc2.yaml +++ b/Documentation/devicetree/bindings/usb/dwc2.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/usb/dwc2.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: DesignWare HS OTG USB 2.0 controller Bindings +title: DesignWare HS OTG USB 2.0 controller maintainers: - Rob Herring @@ -43,7 +43,10 @@ properties: - const: rockchip,rk3066-usb - const: snps,dwc2 - const: lantiq,arx100-usb + - const: lantiq,ase-usb + - const: lantiq,danube-usb - const: lantiq,xrx200-usb + - const: lantiq,xrx300-usb - items: - enum: - amlogic,meson8-usb diff --git a/Documentation/devicetree/bindings/usb/faraday,fotg210.yaml b/Documentation/devicetree/bindings/usb/faraday,fotg210.yaml index c69bbfbcf733d74c15d5c6582f704c56f005d508..84b3b69256b1e8996ff4920221d1b760d69dff24 100644 --- a/Documentation/devicetree/bindings/usb/faraday,fotg210.yaml +++ b/Documentation/devicetree/bindings/usb/faraday,fotg210.yaml @@ -5,7 +5,7 @@ $id: http://devicetree.org/schemas/usb/faraday,fotg210.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Faraday Technology FOTG210 HS OTG USB 2.0 controller Bindings +title: Faraday Technology FOTG210 HS OTG USB 2.0 controller maintainers: - Linus Walleij diff --git a/Documentation/devicetree/bindings/usb/genesys,gl850g.yaml b/Documentation/devicetree/bindings/usb/genesys,gl850g.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a9f831448ccae2ce8c48475e3b70886446003f39 --- /dev/null +++ b/Documentation/devicetree/bindings/usb/genesys,gl850g.yaml @@ -0,0 +1,48 @@ +# SPDX-License-Identifier: GPL-2.0-only or BSD-2-Clause +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/usb/genesys,gl850g.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Genesys Logic GL850G USB 2.0 hub controller + +maintainers: + - Icenowy Zheng + +allOf: + - $ref: usb-device.yaml# + +properties: + compatible: + enum: + - usb5e3,608 + + reg: true + + reset-gpios: + description: GPIO controlling the RESET# pin. + + vdd-supply: + description: + the regulator that provides 3.3V core power to the hub. + +required: + - compatible + - reg + +additionalProperties: false + +examples: + - | + #include + usb { + dr_mode = "host"; + #address-cells = <1>; + #size-cells = <0>; + + hub: hub@1 { + compatible = "usb5e3,608"; + reg = <1>; + reset-gpios = <&pio 7 2 GPIO_ACTIVE_LOW>; + }; + }; diff --git a/Documentation/devicetree/bindings/usb/ingenic,musb.yaml b/Documentation/devicetree/bindings/usb/ingenic,musb.yaml index 59212358fcce4b5a6a174556c0257efc163dfeeb..4cc1496a913c2e1598d205660d71add021718379 100644 --- a/Documentation/devicetree/bindings/usb/ingenic,musb.yaml +++ b/Documentation/devicetree/bindings/usb/ingenic,musb.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/usb/ingenic,musb.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Ingenic JZ47xx USB IP DT bindings +title: Ingenic JZ47xx USB IP maintainers: - Paul Cercueil diff --git a/Documentation/devicetree/bindings/usb/marvell,pxau2o-ehci.yaml b/Documentation/devicetree/bindings/usb/marvell,pxau2o-ehci.yaml index 3cf93dd45eb706470a526064054bb6b9ba26d181..a0246aa1f2360f8fa61550cb735e48e57db24927 100644 --- a/Documentation/devicetree/bindings/usb/marvell,pxau2o-ehci.yaml +++ b/Documentation/devicetree/bindings/usb/marvell,pxau2o-ehci.yaml @@ -5,7 +5,7 @@ $id: http://devicetree.org/schemas/usb/marvell,pxau2o-ehci.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Marvell PXA/MMP EHCI bindings +title: Marvell PXA/MMP EHCI maintainers: - Lubomir Rintel diff --git a/Documentation/devicetree/bindings/usb/maxim,max33359.yaml b/Documentation/devicetree/bindings/usb/maxim,max33359.yaml index 93a19eda610b975a2be7502f2aafa37ca3755a44..8e513a6af3789bca03c273c4625be85658496fcc 100644 --- a/Documentation/devicetree/bindings/usb/maxim,max33359.yaml +++ b/Documentation/devicetree/bindings/usb/maxim,max33359.yaml @@ -4,7 +4,7 @@ $id: "http://devicetree.org/schemas/usb/maxim,max33359.yaml#" $schema: "http://devicetree.org/meta-schemas/core.yaml#" -title: Maxim TCPCI Type-C PD controller DT bindings +title: Maxim TCPCI Type-C PD controller maintainers: - Badhri Jagan Sridharan diff --git a/Documentation/devicetree/bindings/usb/mediatek,mt6360-tcpc.yaml b/Documentation/devicetree/bindings/usb/mediatek,mt6360-tcpc.yaml index 8db1f8b597c34548f73ebccfd17440e838efae1a..c72257c19220f2e55e3943d38b64a3994a6711b0 100644 --- a/Documentation/devicetree/bindings/usb/mediatek,mt6360-tcpc.yaml +++ b/Documentation/devicetree/bindings/usb/mediatek,mt6360-tcpc.yaml @@ -4,7 +4,7 @@ $id: "http://devicetree.org/schemas/usb/mediatek,mt6360-tcpc.yaml#" $schema: "http://devicetree.org/meta-schemas/core.yaml#" -title: Mediatek MT6360 Type-C Port Switch and Power Delivery controller DT bindings +title: Mediatek MT6360 Type-C Port Switch and Power Delivery controller maintainers: - ChiYuan Huang diff --git a/Documentation/devicetree/bindings/usb/mediatek,mtk-xhci.yaml b/Documentation/devicetree/bindings/usb/mediatek,mtk-xhci.yaml index 939623867a646d5cff3a622c48e822061b3fe135..a3c37944c6305f071f7c10407c2e4475882718db 100644 --- a/Documentation/devicetree/bindings/usb/mediatek,mtk-xhci.yaml +++ b/Documentation/devicetree/bindings/usb/mediatek,mtk-xhci.yaml @@ -28,6 +28,7 @@ properties: - mediatek,mt7622-xhci - mediatek,mt7623-xhci - mediatek,mt7629-xhci + - mediatek,mt7986-xhci - mediatek,mt8173-xhci - mediatek,mt8183-xhci - mediatek,mt8186-xhci diff --git a/Documentation/devicetree/bindings/usb/mediatek,mtu3.yaml b/Documentation/devicetree/bindings/usb/mediatek,mtu3.yaml index 80750b0f458a82b48856920c99eb43487be73fa3..7168110e2f9de8be6e780195c28de9a0b8e21a53 100644 --- a/Documentation/devicetree/bindings/usb/mediatek,mtu3.yaml +++ b/Documentation/devicetree/bindings/usb/mediatek,mtu3.yaml @@ -24,6 +24,7 @@ properties: - mediatek,mt2712-mtu3 - mediatek,mt8173-mtu3 - mediatek,mt8183-mtu3 + - mediatek,mt8186-mtu3 - mediatek,mt8188-mtu3 - mediatek,mt8192-mtu3 - mediatek,mt8195-mtu3 diff --git a/Documentation/devicetree/bindings/usb/nvidia,tegra-xudc.yaml b/Documentation/devicetree/bindings/usb/nvidia,tegra-xudc.yaml index fd6e7c81426ea4edd5de2fd9913547d1fa5c9e74..f6cb19efd98b1246c6e4505037a23e00b0352906 100644 --- a/Documentation/devicetree/bindings/usb/nvidia,tegra-xudc.yaml +++ b/Documentation/devicetree/bindings/usb/nvidia,tegra-xudc.yaml @@ -4,7 +4,7 @@ $id: "http://devicetree.org/schemas/usb/nvidia,tegra-xudc.yaml#" $schema: "http://devicetree.org/meta-schemas/core.yaml#" -title: Device tree binding for NVIDIA Tegra XUSB device mode controller (XUDC) +title: NVIDIA Tegra XUSB device mode controller (XUDC) description: The Tegra XUDC controller supports both USB 2.0 HighSpeed/FullSpeed and diff --git a/Documentation/devicetree/bindings/usb/nvidia,tegra124-xusb.yaml b/Documentation/devicetree/bindings/usb/nvidia,tegra124-xusb.yaml index 4a6616bf9bab7be80cd6af52d19a3ce7ef4b205b..d6ca8c93073dfdd93849345c9e3327b68426234e 100644 --- a/Documentation/devicetree/bindings/usb/nvidia,tegra124-xusb.yaml +++ b/Documentation/devicetree/bindings/usb/nvidia,tegra124-xusb.yaml @@ -186,9 +186,7 @@ examples: nvidia,xusb-padctl = <&padctl>; - phys = <&{/padctl@0,7009f000/pads/usb2/lanes/usb2-1}>, /* mini-PCIe USB */ - <&{/padctl@0,7009f000/pads/usb2/lanes/usb2-2}>, /* USB A */ - <&{/padctl@0,7009f000/pads/pcie/lanes/pcie-0}>; /* USB A */ + phys = <&phy_usb2_1>, <&phy_usb2_2>, <&phy_pcie_0>; phy-names = "usb2-1", "usb2-2", "usb3-0"; avddio-pex-supply = <&vdd_1v05_run>; diff --git a/Documentation/devicetree/bindings/usb/nvidia,tegra186-xusb.yaml b/Documentation/devicetree/bindings/usb/nvidia,tegra186-xusb.yaml index 6f62944fc5971c40fb72a39cc60eacd791c6ee39..a04c6ce1e0f686648d971548f99374112a653cdc 100644 --- a/Documentation/devicetree/bindings/usb/nvidia,tegra186-xusb.yaml +++ b/Documentation/devicetree/bindings/usb/nvidia,tegra186-xusb.yaml @@ -166,8 +166,6 @@ examples: #address-cells = <1>; #size-cells = <0>; - phys = <&{/padctl@3520000/pads/usb2/lanes/usb2-0}>, - <&{/padctl@3520000/pads/usb2/lanes/usb2-1}>, - <&{/padctl@3520000/pads/usb3/lanes/usb3-0}>; + phys = <&phy_usb2_0>, <&phy_usb2_1>, <&phy_usb3_0>; phy-names = "usb2-0", "usb2-1", "usb3-0"; }; diff --git a/Documentation/devicetree/bindings/usb/nvidia,tegra194-xusb.yaml b/Documentation/devicetree/bindings/usb/nvidia,tegra194-xusb.yaml index 65ae9ae9b0b7a56f678f454d12f19a8bd2c741a4..b356793f73a15f4193432706357ad6d7d19c488c 100644 --- a/Documentation/devicetree/bindings/usb/nvidia,tegra194-xusb.yaml +++ b/Documentation/devicetree/bindings/usb/nvidia,tegra194-xusb.yaml @@ -169,11 +169,7 @@ examples: nvidia,xusb-padctl = <&xusb_padctl>; - phys = <&{/bus@0/padctl@3520000/pads/usb2/lanes/usb2-0}>, - <&{/bus@0/padctl@3520000/pads/usb2/lanes/usb2-1}>, - <&{/bus@0/padctl@3520000/pads/usb2/lanes/usb2-3}>, - <&{/bus@0/padctl@3520000/pads/usb3/lanes/usb3-0}>, - <&{/bus@0/padctl@3520000/pads/usb3/lanes/usb3-2}>, - <&{/bus@0/padctl@3520000/pads/usb3/lanes/usb3-3}>; + phys = <&phy_usb2_0>, <&phy_usb2_1>, <&phy_usb2_3>, <&phy_usb3_0>, + <&phy_usb3_2>, <&phy_usb3_3>; phy-names = "usb2-0", "usb2-1", "usb2-3", "usb3-0", "usb3-2", "usb3-3"; }; diff --git a/Documentation/devicetree/bindings/usb/nvidia,tegra210-xusb.yaml b/Documentation/devicetree/bindings/usb/nvidia,tegra210-xusb.yaml index da1e1ec0e7c862342e39c76e8641a26ed6f1731b..90296613b3a59f9fddc3d95280c8b2be4ca4d549 100644 --- a/Documentation/devicetree/bindings/usb/nvidia,tegra210-xusb.yaml +++ b/Documentation/devicetree/bindings/usb/nvidia,tegra210-xusb.yaml @@ -173,12 +173,8 @@ examples: nvidia,xusb-padctl = <&padctl>; - phys = <&{/padctl@7009f000/pads/usb2/lanes/usb2-0}>, - <&{/padctl@7009f000/pads/usb2/lanes/usb2-1}>, - <&{/padctl@7009f000/pads/usb2/lanes/usb2-2}>, - <&{/padctl@7009f000/pads/usb2/lanes/usb2-3}>, - <&{/padctl@7009f000/pads/pcie/lanes/pcie-6}>, - <&{/padctl@7009f000/pads/pcie/lanes/pcie-5}>; + phys = <&phy_usb2_0>, <&phy_usb2_1>, <&phy_usb2_2>, <&phy_usb2_3>, + <&phy_pcie_6>, <&phy_pcie_5>; phy-names = "usb2-0", "usb2-1", "usb2-2", "usb2-3", "usb3-0", "usb3-1"; dvddio-pex-supply = <&vdd_pex_1v05>; diff --git a/Documentation/devicetree/bindings/usb/nxp,isp1760.yaml b/Documentation/devicetree/bindings/usb/nxp,isp1760.yaml index f238848ad094d88c14d75c834a27798d528a911a..e2743a4b952081a375d4d54501e14198efc5e8e8 100644 --- a/Documentation/devicetree/bindings/usb/nxp,isp1760.yaml +++ b/Documentation/devicetree/bindings/usb/nxp,isp1760.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/usb/nxp,isp1760.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: NXP ISP1760 family controller bindings +title: NXP ISP1760 family controller maintainers: - Sebastian Siewior diff --git a/Documentation/devicetree/bindings/usb/qcom,dwc3.yaml b/Documentation/devicetree/bindings/usb/qcom,dwc3.yaml index a6e6abb4dfa9b7e5026b43ad59dc86f1701335a6..a3f8a3f4985297ee9abf7b0913d5245be3e61fa7 100644 --- a/Documentation/devicetree/bindings/usb/qcom,dwc3.yaml +++ b/Documentation/devicetree/bindings/usb/qcom,dwc3.yaml @@ -39,6 +39,7 @@ properties: - qcom,sm8250-dwc3 - qcom,sm8350-dwc3 - qcom,sm8450-dwc3 + - qcom,sm8550-dwc3 - const: qcom,dwc3 reg: @@ -301,6 +302,7 @@ allOf: - qcom,sm8150-dwc3 - qcom,sm8250-dwc3 - qcom,sm8450-dwc3 + - qcom,sm8550-dwc3 then: properties: clocks: @@ -358,6 +360,7 @@ allOf: - qcom,sm8250-dwc3 - qcom,sm8350-dwc3 - qcom,sm8450-dwc3 + - qcom,sm8550-dwc3 then: properties: interrupts: diff --git a/Documentation/devicetree/bindings/usb/realtek,rts5411.yaml b/Documentation/devicetree/bindings/usb/realtek,rts5411.yaml index 50f2b505bdeb6a43b338bb3b58af2754efcfaa98..623d04a88a81750ddc0e3359d5e8c9b24a875206 100644 --- a/Documentation/devicetree/bindings/usb/realtek,rts5411.yaml +++ b/Documentation/devicetree/bindings/usb/realtek,rts5411.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/usb/realtek,rts5411.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Binding for the Realtek RTS5411 USB 3.0 hub controller +title: Realtek RTS5411 USB 3.0 hub controller maintainers: - Matthias Kaehlcke diff --git a/Documentation/devicetree/bindings/usb/richtek,rt1719.yaml b/Documentation/devicetree/bindings/usb/richtek,rt1719.yaml index 65a93f7738d5db5da345f846260592139d0ca6a5..e3e87e4d329259cc19a51e715d41f73d5e00eb2d 100644 --- a/Documentation/devicetree/bindings/usb/richtek,rt1719.yaml +++ b/Documentation/devicetree/bindings/usb/richtek,rt1719.yaml @@ -4,7 +4,7 @@ $id: "http://devicetree.org/schemas/usb/richtek,rt1719.yaml#" $schema: "http://devicetree.org/meta-schemas/core.yaml#" -title: Richtek RT1719 sink-only Type-C PD controller bindings +title: Richtek RT1719 sink-only Type-C PD controller maintainers: - ChiYuan Huang diff --git a/Documentation/devicetree/bindings/usb/st,stusb160x.yaml b/Documentation/devicetree/bindings/usb/st,stusb160x.yaml index b8974807b666de2092531883538504e52da2603b..ffcd9897ea38de7327b4b2364b0a70007e895631 100644 --- a/Documentation/devicetree/bindings/usb/st,stusb160x.yaml +++ b/Documentation/devicetree/bindings/usb/st,stusb160x.yaml @@ -4,7 +4,7 @@ $id: "http://devicetree.org/schemas/usb/st,stusb160x.yaml#" $schema: "http://devicetree.org/meta-schemas/core.yaml#" -title: STMicroelectronics STUSB160x Type-C controller bindings +title: STMicroelectronics STUSB160x Type-C controller maintainers: - Amelie Delaunay diff --git a/Documentation/devicetree/bindings/usb/ti,j721e-usb.yaml b/Documentation/devicetree/bindings/usb/ti,j721e-usb.yaml index eedde385d2994982dae00fb2c2fe7cce1249c21f..f81ba3e902973c087680f7fc0b8de5345548915d 100644 --- a/Documentation/devicetree/bindings/usb/ti,j721e-usb.yaml +++ b/Documentation/devicetree/bindings/usb/ti,j721e-usb.yaml @@ -4,7 +4,7 @@ $id: "http://devicetree.org/schemas/usb/ti,j721e-usb.yaml#" $schema: "http://devicetree.org/meta-schemas/core.yaml#" -title: Bindings for the TI wrapper module for the Cadence USBSS-DRD controller +title: TI wrapper module for the Cadence USBSS-DRD controller maintainers: - Roger Quadros diff --git a/Documentation/devicetree/bindings/usb/ti,tps6598x.yaml b/Documentation/devicetree/bindings/usb/ti,tps6598x.yaml index a4c53b1f1af3572fa1122092db1176915084ce38..fef4acdc4773bedab11ba2e366bd0a30c9765a49 100644 --- a/Documentation/devicetree/bindings/usb/ti,tps6598x.yaml +++ b/Documentation/devicetree/bindings/usb/ti,tps6598x.yaml @@ -4,7 +4,7 @@ $id: "http://devicetree.org/schemas/usb/ti,tps6598x.yaml#" $schema: "http://devicetree.org/meta-schemas/core.yaml#" -title: Texas Instruments 6598x Type-C Port Switch and Power Delivery controller DT bindings +title: Texas Instruments 6598x Type-C Port Switch and Power Delivery controller maintainers: - Bryan O'Donoghue diff --git a/Documentation/devicetree/bindings/usb/ti,usb8041.yaml b/Documentation/devicetree/bindings/usb/ti,usb8041.yaml index e04fbd8ab0b7d1c246cee4af63e89a43b173cf02..88ea6c952c66cdf3b7f5cc52ced7136d5809f530 100644 --- a/Documentation/devicetree/bindings/usb/ti,usb8041.yaml +++ b/Documentation/devicetree/bindings/usb/ti,usb8041.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/usb/ti,usb8041.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Binding for the TI USB8041 USB 3.0 hub controller +title: TI USB8041 USB 3.0 hub controller maintainers: - Alexander Stein diff --git a/Documentation/devicetree/bindings/usb/usb-device.yaml b/Documentation/devicetree/bindings/usb/usb-device.yaml index b77960a7a37ba9cfd2e27627d8ba5d5cea6708d4..7a771125ec76c30afe8ab2248ac0aec2751ab576 100644 --- a/Documentation/devicetree/bindings/usb/usb-device.yaml +++ b/Documentation/devicetree/bindings/usb/usb-device.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/usb/usb-device.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: The device tree bindings for the Generic USB Device +title: Generic USB Device maintainers: - Greg Kroah-Hartman diff --git a/Documentation/devicetree/bindings/usb/usb-drd.yaml b/Documentation/devicetree/bindings/usb/usb-drd.yaml index 1567549b05ceca125c7b0bcbf5dab35bcbf44d9e..114fb5dc0498a63ef13a8546db7d823fea77ac7c 100644 --- a/Documentation/devicetree/bindings/usb/usb-drd.yaml +++ b/Documentation/devicetree/bindings/usb/usb-drd.yaml @@ -27,6 +27,7 @@ properties: should default to OTG. $ref: /schemas/types.yaml#/definitions/string enum: [host, peripheral, otg] + default: otg hnp-disable: description: diff --git a/Documentation/devicetree/bindings/usb/usb-nop-xceiv.yaml b/Documentation/devicetree/bindings/usb/usb-nop-xceiv.yaml index 2824c17285eeafc8dd3ceafc46e20d13105f914c..326131dcf14d7abf9766f1a8fc4d0cb17ecd02d3 100644 --- a/Documentation/devicetree/bindings/usb/usb-nop-xceiv.yaml +++ b/Documentation/devicetree/bindings/usb/usb-nop-xceiv.yaml @@ -39,6 +39,11 @@ properties: the VBus line. $ref: /schemas/types.yaml#/definitions/phandle + wakeup-source: + description: + Specify if the USB phy can detect the remote wakeup signal + while the system sleep. + required: - compatible - '#phy-cells' diff --git a/Documentation/devicetree/bindings/usb/usb251xb.txt b/Documentation/devicetree/bindings/usb/usb251xb.txt deleted file mode 100644 index 1a934eab175ee294f03232dda1116d51cb55d367..0000000000000000000000000000000000000000 --- a/Documentation/devicetree/bindings/usb/usb251xb.txt +++ /dev/null @@ -1,89 +0,0 @@ -Microchip USB 2.0 Hi-Speed Hub Controller - -The device node for the configuration of a Microchip USB251x/xBi USB 2.0 -Hi-Speed Controller. - -Required properties : - - compatible : Should be "microchip,usb251xb" or one of the specific types: - "microchip,usb2512b", "microchip,usb2512bi", "microchip,usb2513b", - "microchip,usb2513bi", "microchip,usb2514b", "microchip,usb2514bi", - "microchip,usb2517", "microchip,usb2517i", "microchip,usb2422" - - reg : I2C address on the selected bus (default is <0x2C>) - -Optional properties : - - reset-gpios : Should specify the gpio for hub reset - - vdd-supply : Should specify the phandle to the regulator supplying vdd - - skip-config : Skip Hub configuration, but only send the USB-Attach command - - vendor-id : Set USB Vendor ID of the hub (16 bit, default is 0x0424) - - product-id : Set USB Product ID of the hub (16 bit, default depends on type) - - device-id : Set USB Device ID of the hub (16 bit, default is 0x0bb3) - - language-id : Set USB Language ID (16 bit, default is 0x0000) - - manufacturer : Set USB Manufacturer string (max 31 characters long) - - product : Set USB Product string (max 31 characters long) - - serial : Set USB Serial string (max 31 characters long) - - {bus,self}-powered : selects between self- and bus-powered operation - (boolean, default is self-powered) - - disable-hi-speed : disable USB Hi-Speed support (boolean) - - {multi,single}-tt : selects between multi- and single-transaction-translator - (boolean, default is multi-tt) - - disable-eop : disable End of Packet generation in full-speed mode (boolean) - - {ganged,individual}-sensing : select over-current sense type in self-powered - mode (boolean, default is individual) - - {ganged,individual}-port-switching : select port power switching mode - (boolean, default is individual) - - dynamic-power-switching : enable auto-switching from self- to bus-powered - operation if the local power source is removed or unavailable (boolean) - - oc-delay-us : Delay time (in microseconds) for filtering the over-current - sense inputs. Valid values are 100, 4000, 8000 (default) and 16000. If - an invalid value is given, the default is used instead. - - compound-device : indicate the hub is part of a compound device (boolean) - - port-mapping-mode : enable port mapping mode (boolean) - - led-{usb,speed}-mode : led usb/speed indication mode selection - (boolean, default is speed mode) - - string-support : enable string descriptor support (required for manufacturer, - product and serial string configuration) - - non-removable-ports : Should specify the ports which have a non-removable - device connected. - - sp-disabled-ports : Specifies the ports which will be self-power disabled - - bp-disabled-ports : Specifies the ports which will be bus-power disabled - - sp-max-total-current-microamp: Specifies max current consumed by the hub - from VBUS when operating in self-powered hub. It includes the hub - silicon along with all associated circuitry including a permanently - attached peripheral (range: 0 - 100000 uA, default 1000 uA) - - bp-max-total-current-microamp: Specifies max current consumed by the hub - from VBUS when operating in self-powered hub. It includes the hub - silicon along with all associated circuitry including a permanently - attached peripheral (range: 0 - 510000 uA, default 100000 uA) - - sp-max-removable-current-microamp: Specifies max current consumed by the hub - from VBUS when operating in self-powered hub. It includes the hub - silicon along with all associated circuitry excluding a permanently - attached peripheral (range: 0 - 100000 uA, default 1000 uA) - - bp-max-removable-current-microamp: Specifies max current consumed by the hub - from VBUS when operating in self-powered hub. It includes the hub - silicon along with all associated circuitry excluding a permanently - attached peripheral (range: 0 - 510000 uA, default 100000 uA) - - power-on-time-ms : Specifies the time it takes from the time the host - initiates the power-on sequence to a port until the port has adequate - power. The value is given in ms in a 0 - 510 range (default is 100ms). - - swap-dx-lanes : Specifies the ports which will swap the differential-pair - (D+/D-), default is not-swapped. - -Examples: - usb2512b@2c { - compatible = "microchip,usb2512b"; - reg = <0x2c>; - reset-gpios = <&gpio1 4 GPIO_ACTIVE_LOW>; - }; - - usb2514b@2c { - compatible = "microchip,usb2514b"; - reg = <0x2c>; - vendor-id = /bits/ 16 <0x0000>; - product-id = /bits/ 16 <0x0000>; - string-support; - manufacturer = "Foo"; - product = "Foo-Bar"; - serial = "1234567890A"; - /* correct misplaced usb connectors on port 1,2 */ - swap-dx-lanes = <1 2>; - }; diff --git a/Documentation/devicetree/bindings/usb/usb251xb.yaml b/Documentation/devicetree/bindings/usb/usb251xb.yaml new file mode 100644 index 0000000000000000000000000000000000000000..4d153081681784f2a790be00af89d0f2f444eb23 --- /dev/null +++ b/Documentation/devicetree/bindings/usb/usb251xb.yaml @@ -0,0 +1,271 @@ +# SPDX-License-Identifier: GPL-2.0 +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/usb/usb251xb.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Microchip USB 2.0 Hi-Speed Hub Controller + +maintainers: + - Richard Leitner + +properties: + compatible: + enum: + - microchip,usb2422 + - microchip,usb2512b + - microchip,usb2512bi + - microchip,usb2513b + - microchip,usb2513bi + - microchip,usb2514b + - microchip,usb2514bi + - microchip,usb2517 + - microchip,usb2517i + - microchip,usb251xb + + reg: + maxItems: 1 + + reset-gpios: + description: | + Should specify the gpio for hub reset + + vdd-supply: + description: | + Should specify the phandle to the regulator supplying vdd + + skip-config: + $ref: /schemas/types.yaml#/definitions/flag + description: | + Skip Hub configuration, but only send the USB-Attach command + + vendor-id: + $ref: /schemas/types.yaml#/definitions/uint16 + default: 0x0424 + description: | + Set USB Vendor ID of the hub + + product-id: + $ref: /schemas/types.yaml#/definitions/uint16 + description: | + Set USB Product ID of the hub + + device-id: + $ref: /schemas/types.yaml#/definitions/uint16 + default: 0x0bb3 + description: | + Set USB Device ID of the hub + + language-id: + $ref: /schemas/types.yaml#/definitions/uint16 + default: 0x0000 + description: | + Set USB Language ID + + manufacturer: + $ref: /schemas/types.yaml#/definitions/string + description: | + Set USB Manufacturer string (max 31 characters long) + + product: + $ref: /schemas/types.yaml#/definitions/string + description: | + Set USB Product string (max 31 characters long) + + serial: + $ref: /schemas/types.yaml#/definitions/string + description: | + Set USB Serial string (max 31 characters long) + + bus-powered: + $ref: /schemas/types.yaml#/definitions/flag + description: | + selects between self- and bus-powered operation + (boolean, default is self-powered) + + self-powered: + $ref: /schemas/types.yaml#/definitions/flag + description: | + selects between self- and bus-powered operation + (boolean, default is self-powered) + + disable-hi-speed: + $ref: /schemas/types.yaml#/definitions/flag + description: | + disable USB Hi-Speed support (boolean) + + multi-tt: + $ref: /schemas/types.yaml#/definitions/flag + description: | + selects between multi- and single-transaction-translator + (boolean, default is multi-tt) + + single-tt: + $ref: /schemas/types.yaml#/definitions/flag + description: | + selects between multi- and single-transaction-translator + (boolean, default is multi-tt) + + disable-eop: + $ref: /schemas/types.yaml#/definitions/flag + description: | + disable End of Packet generation in full-speed mode (boolean) + + ganged-sensing: + $ref: /schemas/types.yaml#/definitions/flag + description: | + select over-current sense type in self-powered mode + (boolean, default is individual) + + individual-sensing: + $ref: /schemas/types.yaml#/definitions/flag + description: | + select over-current sense type in self-powered mode + (boolean, default is individual) + + ganged-port-switching: + $ref: /schemas/types.yaml#/definitions/flag + description: | + select port power switching mode (boolean, default is individual) + + individual-port-switching: + $ref: /schemas/types.yaml#/definitions/flag + description: | + select port power switching mode (boolean, default is individual) + + dynamic-power-switching: + $ref: /schemas/types.yaml#/definitions/flag + description: | + enable auto-switching from self- to bus-powered operation if the + local power source is removed or unavailable (boolean) + + oc-delay-us: + enum: [100, 4000, 8000, 16000] + default: 8000 + description: | + Delay time (in microseconds) for filtering the over-current sense + inputs. If an invalid value is given, the default is used instead. + + compound-device: + $ref: /schemas/types.yaml#/definitions/flag + description: | + indicate the hub is part of a compound device (boolean) + + port-mapping-mode: + $ref: /schemas/types.yaml#/definitions/flag + description: | + enable port mapping mode (boolean) + + led-usb-mode: + $ref: /schemas/types.yaml#/definitions/flag + description: | + led usb/speed indication mode selection (boolean, default is speed mode) + + led-speed-mode: + $ref: /schemas/types.yaml#/definitions/flag + description: | + led usb/speed indication mode selection (boolean, default is speed mode) + + string-support: + $ref: /schemas/types.yaml#/definitions/flag + description: | + enable string descriptor support (required for manufacturer, product + and serial string configuration) + + non-removable-ports: + $ref: /schemas/types.yaml#/definitions/uint8-array + description: | + Should specify the ports which have a non-removable device connected. + + sp-disabled-ports: + $ref: /schemas/types.yaml#/definitions/uint8-array + description: | + Specifies the ports which will be self-power disabled + + bp-disabled-ports: + $ref: /schemas/types.yaml#/definitions/uint8-array + description: | + Specifies the ports which will be bus-power disabled + + sp-max-total-current-microamp: + maximum: 100000 + default: 1000 + description: | + Specifies max current consumed by the hub from VBUS when + operating in self-powered hub. It includes the hub silicon + along with all associated circuitry including a permanently + attached peripheral. + + bp-max-total-current-microamp: + maximum: 510000 + default: 100000 + description: | + Specifies max current consumed by the hub from VBUS when + operating in self-powered hub. It includes the hub silicon + along with all associated circuitry including a permanently + attached peripheral. + + sp-max-removable-current-microamp: + maximum: 100000 + default: 1000 + description: | + Specifies max current consumed by the hub from VBUS when + operating in self-powered hub. It includes the hub silicon + along with all associated circuitry excluding a permanently + attached peripheral. + + bp-max-removable-current-microamp: + maximum: 510000 + default: 100000 + description: | + Specifies max current consumed by the hub from VBUS when + operating in self-powered hub. It includes the hub silicon + along with all associated circuitry excluding a permanently + attached peripheral. + + power-on-time-ms: + maximum: 510 + default: 100 + description: | + Specifies the time it takes from the time the host initiates the + power-on sequence to a port until the port has adequate power. + + swap-dx-lanes: + $ref: /schemas/types.yaml#/definitions/uint8-array + description: | + Specifies the ports which will swap the differential-pair (D+/D-), + default is not-swapped. + +additionalProperties: false + +required: + - compatible + - reg + +examples: + - | + #include + + i2c { + #address-cells = <1>; + #size-cells = <0>; + + usb-hub@2c { + compatible = "microchip,usb2512b"; + reg = <0x2c>; + reset-gpios = <&gpio1 4 GPIO_ACTIVE_LOW>; + }; + + usb-hub@2d { + compatible = "microchip,usb2514b"; + reg = <0x2d>; + vendor-id = /bits/ 16 <0x0000>; + product-id = /bits/ 16 <0x0000>; + string-support; + manufacturer = "Foo"; + product = "Foo-Bar"; + serial = "1234567890A"; + /* correct misplaced usb connectors on port 1,2 */ + swap-dx-lanes = <1 2>; + }; + }; diff --git a/Documentation/devicetree/bindings/usb/willsemi,wusb3801.yaml b/Documentation/devicetree/bindings/usb/willsemi,wusb3801.yaml index 5aa4ffd67119136ed5b2509a21f1e8b8a0b273b8..937670de01cc2c77e662d9e1632bbc4dae6cb6e9 100644 --- a/Documentation/devicetree/bindings/usb/willsemi,wusb3801.yaml +++ b/Documentation/devicetree/bindings/usb/willsemi,wusb3801.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/usb/willsemi,wusb3801.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: WUSB3801 Type-C port controller DT bindings +title: WUSB3801 Type-C port controller description: The Will Semiconductor WUSB3801 is a USB Type-C port controller which diff --git a/Documentation/devicetree/bindings/vendor-prefixes.yaml b/Documentation/devicetree/bindings/vendor-prefixes.yaml index d5f1c4c1ef45df04b8f1f50213cf8527751362c7..70ffb3780621bdf4a7c6e6e951de4a92cd11165e 100644 --- a/Documentation/devicetree/bindings/vendor-prefixes.yaml +++ b/Documentation/devicetree/bindings/vendor-prefixes.yaml @@ -198,10 +198,10 @@ patternProperties: description: Bosch Sensortec GmbH "^boundary,.*": description: Boundary Devices Inc. - "^broadmobi,.*": - description: Shanghai Broadmobi Communication Technology Co.,Ltd. "^brcm,.*": description: Broadcom Corporation + "^broadmobi,.*": + description: Shanghai Broadmobi Communication Technology Co.,Ltd. "^bsh,.*": description: BSH Hausgeraete GmbH "^bticino,.*": @@ -494,6 +494,8 @@ patternProperties: description: GE Fanuc Intelligent Platforms Embedded Systems, Inc. "^gemei,.*": description: Gemei Digital Technology Co., Ltd. + "^genesys,.*": + description: Genesys Logic, Inc. "^geniatech,.*": description: Geniatech, Inc. "^giantec,.*": @@ -547,6 +549,8 @@ patternProperties: description: Hitex Development Tools "^holt,.*": description: Holt Integrated Circuits, Inc. + "^holtek,.*": + description: Holtek Semiconductor, Inc. "^honestar,.*": description: Honestar Technologies Co., Ltd. "^honeywell,.*": @@ -559,8 +563,6 @@ patternProperties: description: Hewlett Packard Enterprise "^hsg,.*": description: HannStar Display Co. - "^holtek,.*": - description: Holtek Semiconductor, Inc. "^huawei,.*": description: Huawei Technologies Co., Ltd. "^hugsun,.*": @@ -605,12 +607,10 @@ patternProperties: description: Infineon Technologies "^inforce,.*": description: Inforce Computing - "^ingrasys,.*": - description: Ingrasys Technology Inc. - "^ivo,.*": - description: InfoVision Optoelectronics Kunshan Co. Ltd. "^ingenic,.*": description: Ingenic Semiconductor + "^ingrasys,.*": + description: Ingrasys Technology Inc. "^injoinic,.*": description: Injoinic Technology Corp. "^innocomm,.*": @@ -647,6 +647,8 @@ patternProperties: description: ITEAD Intelligent Systems Co.Ltd "^itian,.*": description: ITian Corporation + "^ivo,.*": + description: InfoVision Optoelectronics Kunshan Co. Ltd. "^iwave,.*": description: iWave Systems Technologies Pvt. Ltd. "^jadard,.*": @@ -895,14 +897,14 @@ patternProperties: description: Shenzhen Netxeon Technology CO., LTD "^neweast,.*": description: Guangdong Neweast Optoelectronics CO., LTD + "^newhaven,.*": + description: Newhaven Display International "^newvision,.*": description: New Vision Display (Shenzhen) Co., Ltd. "^nexbox,.*": description: Nexbox "^nextthing,.*": description: Next Thing Co. - "^newhaven,.*": - description: Newhaven Display International "^ni,.*": description: National Instruments "^nintendo,.*": @@ -941,6 +943,8 @@ patternProperties: description: One Laptop Per Child "^oneplus,.*": description: OnePlus Technology (Shenzhen) Co., Ltd. + "^onie,.*": + description: Open Network Install Environment group "^onion,.*": description: Onion Corporation "^onnn,.*": @@ -1049,10 +1053,10 @@ patternProperties: description: QEMU, a generic and open source machine emulator and virtualizer "^qi,.*": description: Qi Hardware - "^qihua,.*": - description: Chengdu Kaixuan Information Technology Co., Ltd. "^qiaodian,.*": description: QiaoDian XianShi Corporation + "^qihua,.*": + description: Chengdu Kaixuan Information Technology Co., Ltd. "^qishenglong,.*": description: Shenzhen QiShenglong Industrialist Co., Ltd. "^qnap,.*": @@ -1079,22 +1083,22 @@ patternProperties: description: reMarkable AS "^renesas,.*": description: Renesas Electronics Corporation - "^rex,.*": - description: iMX6 Rex Project "^rervision,.*": description: Shenzhen Rervision Technology Co., Ltd. "^revotics,.*": description: Revolution Robotics, Inc. (Revotics) + "^rex,.*": + description: iMX6 Rex Project "^richtek,.*": description: Richtek Technology Corporation "^ricoh,.*": description: Ricoh Co. Ltd. "^rikomagic,.*": description: Rikomagic Tech Corp. Ltd - "^riscv,.*": - description: RISC-V Foundation "^riot,.*": description: Embest RIoT + "^riscv,.*": + description: RISC-V Foundation "^rockchip,.*": description: Fuzhou Rockchip Electronics Co., Ltd "^rocktech,.*": @@ -1157,6 +1161,8 @@ patternProperties: description: Si-En Technology Ltd. "^si-linux,.*": description: Silicon Linux Corporation + "^siemens,.*": + description: Siemens AG "^sifive,.*": description: SiFive, Inc. "^sigma,.*": @@ -1179,8 +1185,8 @@ patternProperties: description: Siliconfile Technologies lnc. "^siliconmitus,.*": description: Silicon Mitus, Inc. - "^siemens,.*": - description: Siemens AG + "^silvaco,.*": + description: Silvaco, Inc. "^simtek,.*": description: Cypress Semiconductor Corporation (Simtek Corporation) "^sinlinx,.*": @@ -1266,8 +1272,6 @@ patternProperties: description: Sun Microsystems, Inc "^supermicro,.*": description: Super Micro Computer, Inc. - "^silvaco,.*": - description: Silvaco, Inc. "^swir,.*": description: Sierra Wireless "^syna,.*": @@ -1289,16 +1293,18 @@ patternProperties: description: Shenzhen City Tang Cheng Technology Co., Ltd. "^tdo,.*": description: Shangai Top Display Optoelectronics Co., Ltd + "^team-source-display,.*": + description: Shenzhen Team Source Display Technology Co., Ltd. (TSD) "^technexion,.*": description: TechNexion "^technologic,.*": description: Technologic Systems + "^techstar,.*": + description: Shenzhen Techstar Electronics Co., Ltd. "^teltonika,.*": description: Teltonika Networks "^tempo,.*": description: Tempo Semiconductor - "^techstar,.*": - description: Shenzhen Techstar Electronics Co., Ltd. "^terasic,.*": description: Terasic Inc. "^tesla,.*": @@ -1352,10 +1358,6 @@ patternProperties: description: Tronsmart "^truly,.*": description: Truly Semiconductors Limited - "^visionox,.*": - description: Visionox - "^team-source-display,.*": - description: Shenzhen Team Source Display Technology Co., Ltd. (TSD) "^tsd,.*": description: Theobroma Systems Design und Consulting GmbH "^tyan,.*": @@ -1364,10 +1366,10 @@ patternProperties: description: u-blox "^u-boot,.*": description: U-Boot bootloader - "^ucrobotics,.*": - description: uCRobotics "^ubnt,.*": description: Ubiquiti Networks + "^ucrobotics,.*": + description: uCRobotics "^udoo,.*": description: Udoo "^ugoos,.*": @@ -1406,6 +1408,8 @@ patternProperties: description: Used for virtual device without specific vendor. "^vishay,.*": description: Vishay Intertechnology, Inc + "^visionox,.*": + description: Visionox "^vitesse,.*": description: Vitesse Semiconductor Corporation "^vivante,.*": @@ -1420,6 +1424,8 @@ patternProperties: description: Vision Optical Technology Co., Ltd. "^vxt,.*": description: VXT Ltd + "^wanchanglong,.*": + description: Wanchanglong Electronics Technology(SHENZHEN)Co.,Ltd. "^wand,.*": description: Wandbord (Technexion) "^waveshare,.*": @@ -1460,8 +1466,6 @@ patternProperties: description: Wondermedia Technologies, Inc. "^wobo,.*": description: Wobo - "^wanchanglong,.*": - description: Wanchanglong Electronics Technology(SHENZHEN)Co.,Ltd. "^x-powers,.*": description: X-Powers "^xen,.*": @@ -1504,10 +1508,10 @@ patternProperties: description: Shenzhen Yashi Changhua Intelligent Technology Co., Ltd. "^ysoft,.*": description: Y Soft Corporation a.s. - "^zealz,.*": - description: Zealz "^zarlink,.*": description: Zarlink Semiconductor + "^zealz,.*": + description: Zealz "^zeitec,.*": description: ZEITEC Semiconductor Co., LTD. "^zidoo,.*": diff --git a/Documentation/devicetree/bindings/virtio/virtio-device.yaml b/Documentation/devicetree/bindings/virtio/virtio-device.yaml index 1778ea9b5aa500e60906b7e741c538118b75959e..8c6919ba9497e26cccbdf7f377abfe09e6c09cba 100644 --- a/Documentation/devicetree/bindings/virtio/virtio-device.yaml +++ b/Documentation/devicetree/bindings/virtio/virtio-device.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/virtio/virtio-device.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Virtio device bindings +title: Virtio device maintainers: - Viresh Kumar diff --git a/Documentation/devicetree/bindings/watchdog/fsl,scu-wdt.yaml b/Documentation/devicetree/bindings/watchdog/fsl,scu-wdt.yaml index f84c45d687d7bc74c3d2bf7138a31a59be7582da..47701248cd8d4c921aa3fff7d559f8c36d914b50 100644 --- a/Documentation/devicetree/bindings/watchdog/fsl,scu-wdt.yaml +++ b/Documentation/devicetree/bindings/watchdog/fsl,scu-wdt.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/watchdog/fsl,scu-wdt.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: i.MX SCU Client Device Node - Watchdog bindings based on SCU Message Protocol +title: i.MX SCU Client Device Node - Watchdog Based on SCU Message Protocol maintainers: - Dong Aisheng diff --git a/Documentation/devicetree/bindings/watchdog/gpio-wdt.txt b/Documentation/devicetree/bindings/watchdog/gpio-wdt.txt deleted file mode 100644 index 198794963786bac2d46e4e9af0b39b28828669f2..0000000000000000000000000000000000000000 --- a/Documentation/devicetree/bindings/watchdog/gpio-wdt.txt +++ /dev/null @@ -1,28 +0,0 @@ -* GPIO-controlled Watchdog - -Required Properties: -- compatible: Should contain "linux,wdt-gpio". -- gpios: From common gpio binding; gpio connection to WDT reset pin. -- hw_algo: The algorithm used by the driver. Should be one of the - following values: - - toggle: Either a high-to-low or a low-to-high transition clears - the WDT counter. The watchdog timer is disabled when GPIO is - left floating or connected to a three-state buffer. - - level: Low or high level starts counting WDT timeout, - the opposite level disables the WDT. Active level is determined - by the GPIO flags. -- hw_margin_ms: Maximum time to reset watchdog circuit (milliseconds). - -Optional Properties: -- always-running: If the watchdog timer cannot be disabled, add this flag to - have the driver keep toggling the signal without a client. It will only cease - to toggle the signal when the device is open and the timeout elapsed. - -Example: - watchdog: watchdog { - /* ADM706 */ - compatible = "linux,wdt-gpio"; - gpios = <&gpio3 9 GPIO_ACTIVE_LOW>; - hw_algo = "toggle"; - hw_margin_ms = <1600>; - }; diff --git a/Documentation/devicetree/bindings/watchdog/linux,wdt-gpio.yaml b/Documentation/devicetree/bindings/watchdog/linux,wdt-gpio.yaml new file mode 100644 index 0000000000000000000000000000000000000000..50af79af6416ebcc24dc321e51c440d1c95c63ca --- /dev/null +++ b/Documentation/devicetree/bindings/watchdog/linux,wdt-gpio.yaml @@ -0,0 +1,55 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/watchdog/linux,wdt-gpio.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: GPIO-controlled Watchdog + +maintainers: + - Guenter Roeck + +properties: + compatible: + const: linux,wdt-gpio + + gpios: + description: gpio connection to WDT reset pin + maxItems: 1 + + hw_algo: + description: The algorithm used by the driver. + enum: [ level, toggle ] + + hw_margin_ms: + description: Maximum time to reset watchdog circuit (milliseconds). + $ref: /schemas/types.yaml#/definitions/uint32 + + always-running: + type: boolean + description: + If the watchdog timer cannot be disabled, add this flag to have the driver + keep toggling the signal without a client. + It will only cease to toggle the signal when the device is open and the + timeout elapsed. + +required: + - compatible + - gpios + - hw_algo + - hw_margin_ms + +allOf: + - $ref: watchdog.yaml# + +additionalProperties: false + +examples: + - | + #include + watchdog { + compatible = "linux,wdt-gpio"; + gpios = <&gpio3 9 GPIO_ACTIVE_LOW>; + hw_algo = "toggle"; + hw_margin_ms = <1600>; + }; diff --git a/Documentation/devicetree/bindings/watchdog/mediatek,mtk-wdt.yaml b/Documentation/devicetree/bindings/watchdog/mediatek,mtk-wdt.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b3605608410c63e4ef5eb21ea02b47facab91680 --- /dev/null +++ b/Documentation/devicetree/bindings/watchdog/mediatek,mtk-wdt.yaml @@ -0,0 +1,80 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/watchdog/mediatek,mtk-wdt.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: MediaTek SoCs Watchdog timer + +maintainers: + - Matthias Brugger + +description: + The watchdog supports a pre-timeout interrupt that fires + timeout-sec/2 before the expiry. + +allOf: + - $ref: watchdog.yaml# + +properties: + compatible: + oneOf: + - enum: + - mediatek,mt2712-wdt + - mediatek,mt6589-wdt + - mediatek,mt6795-wdt + - mediatek,mt7986-wdt + - mediatek,mt8183-wdt + - mediatek,mt8186-wdt + - mediatek,mt8188-wdt + - mediatek,mt8192-wdt + - mediatek,mt8195-wdt + - items: + - enum: + - mediatek,mt2701-wdt + - mediatek,mt6582-wdt + - mediatek,mt6797-wdt + - mediatek,mt7622-wdt + - mediatek,mt7623-wdt + - mediatek,mt7629-wdt + - mediatek,mt8173-wdt + - mediatek,mt8516-wdt + - const: mediatek,mt6589-wdt + + reg: + maxItems: 1 + + interrupts: + items: + - description: Watchdog pre-timeout (bark) interrupt + + mediatek,disable-extrst: + description: Disable sending output reset signal + type: boolean + + '#reset-cells': + const: 1 + +required: + - compatible + - reg + +unevaluatedProperties: false + +examples: + - | + #include + + soc { + #address-cells = <2>; + #size-cells = <2>; + + watchdog: watchdog@10007000 { + compatible = "mediatek,mt8183-wdt"; + reg = <0 0x10007000 0 0x100>; + interrupts = ; + mediatek,disable-extrst; + timeout-sec = <10>; + #reset-cells = <1>; + }; + }; diff --git a/Documentation/devicetree/bindings/watchdog/mtk-wdt.txt b/Documentation/devicetree/bindings/watchdog/mtk-wdt.txt deleted file mode 100644 index 762c62e428ef269279c97c874f921399b218f8d9..0000000000000000000000000000000000000000 --- a/Documentation/devicetree/bindings/watchdog/mtk-wdt.txt +++ /dev/null @@ -1,42 +0,0 @@ -Mediatek SoCs Watchdog timer - -The watchdog supports a pre-timeout interrupt that fires timeout-sec/2 -before the expiry. - -Required properties: - -- compatible should contain: - "mediatek,mt2701-wdt", "mediatek,mt6589-wdt": for MT2701 - "mediatek,mt2712-wdt": for MT2712 - "mediatek,mt6582-wdt", "mediatek,mt6589-wdt": for MT6582 - "mediatek,mt6589-wdt": for MT6589 - "mediatek,mt6797-wdt", "mediatek,mt6589-wdt": for MT6797 - "mediatek,mt7622-wdt", "mediatek,mt6589-wdt": for MT7622 - "mediatek,mt7623-wdt", "mediatek,mt6589-wdt": for MT7623 - "mediatek,mt7629-wdt", "mediatek,mt6589-wdt": for MT7629 - "mediatek,mt7986-wdt", "mediatek,mt6589-wdt": for MT7986 - "mediatek,mt8183-wdt": for MT8183 - "mediatek,mt8186-wdt", "mediatek,mt6589-wdt": for MT8186 - "mediatek,mt8516-wdt", "mediatek,mt6589-wdt": for MT8516 - "mediatek,mt8192-wdt": for MT8192 - "mediatek,mt8195-wdt", "mediatek,mt6589-wdt": for MT8195 - -- reg : Specifies base physical address and size of the registers. - -Optional properties: -- mediatek,disable-extrst: disable send output reset signal -- interrupts: Watchdog pre-timeout (bark) interrupt. -- timeout-sec: contains the watchdog timeout in seconds. -- #reset-cells: Should be 1. - -Example: - -watchdog: watchdog@10007000 { - compatible = "mediatek,mt8183-wdt", - "mediatek,mt6589-wdt"; - mediatek,disable-extrst; - reg = <0 0x10007000 0 0x100>; - interrupts = ; - timeout-sec = <10>; - #reset-cells = <1>; -}; diff --git a/Documentation/devicetree/bindings/watchdog/st,stm32-iwdg.yaml b/Documentation/devicetree/bindings/watchdog/st,stm32-iwdg.yaml index 39736449ba6462f23b6f71b06e2355d8e25fa7ce..a8e266f80c20e8a3a3bc38380c0fa0827c68c5d5 100644 --- a/Documentation/devicetree/bindings/watchdog/st,stm32-iwdg.yaml +++ b/Documentation/devicetree/bindings/watchdog/st,stm32-iwdg.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/watchdog/st,stm32-iwdg.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: STMicroelectronics STM32 Independent WatchDoG (IWDG) bindings +title: STMicroelectronics STM32 Independent WatchDoG (IWDG) maintainers: - Yannick Fertre diff --git a/Documentation/devicetree/bindings/watchdog/watchdog.yaml b/Documentation/devicetree/bindings/watchdog/watchdog.yaml index e3dfb02f0ca52467ea3276dee2b2b8ce34bf5779..fccae0d00110342e7c3ca58332f00b2bed9ee9f1 100644 --- a/Documentation/devicetree/bindings/watchdog/watchdog.yaml +++ b/Documentation/devicetree/bindings/watchdog/watchdog.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/watchdog/watchdog.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Watchdog Generic Bindings +title: Watchdog Common Properties maintainers: - Guenter Roeck diff --git a/Documentation/driver-api/driver-model/devres.rst b/Documentation/driver-api/driver-model/devres.rst index 8abf9dda32417677f6271bee08d1c9ec1c206ed5..4249eb4239e07c80bb30574495ad5bcbd0534569 100644 --- a/Documentation/driver-api/driver-model/devres.rst +++ b/Documentation/driver-api/driver-model/devres.rst @@ -365,6 +365,7 @@ MEM devm_kmemdup() devm_krealloc() devm_kstrdup() + devm_kstrdup_const() devm_kvasprintf() devm_kzalloc() @@ -395,6 +396,8 @@ PCI PHY devm_usb_get_phy() + devm_usb_get_phy_by_node() + devm_usb_get_phy_by_phandle() devm_usb_put_phy() PINCTRL @@ -447,6 +450,7 @@ SERDEV SLAVE DMA ENGINE devm_acpi_dma_controller_register() + devm_acpi_dma_controller_free() SPI devm_spi_alloc_master() diff --git a/Documentation/driver-api/gpio/legacy.rst b/Documentation/driver-api/gpio/legacy.rst index 9b12eeb89170f3d9cfa0cb2347a33928d60bec5f..e17910cc3271937c198859a9adcea7c1f3842831 100644 --- a/Documentation/driver-api/gpio/legacy.rst +++ b/Documentation/driver-api/gpio/legacy.rst @@ -558,11 +558,6 @@ Platform Support To force-enable this framework, a platform's Kconfig will "select" GPIOLIB, else it is up to the user to configure support for GPIO. -It may also provide a custom value for ARCH_NR_GPIOS, so that it better -reflects the number of GPIOs in actual use on that platform, without -wasting static table space. (It should count both built-in/SoC GPIOs and -also ones on GPIO expanders. - If neither of these options are selected, the platform does not support GPIOs through GPIO-lib and the code cannot be enabled by the user. diff --git a/Documentation/driver-api/phy/phy.rst b/Documentation/driver-api/phy/phy.rst index 8fc1ce0bb905de86dd4aa332e1dfc82cdd030339..8e8b3e8f95238d18dadc97b09bfdb08bc6230b74 100644 --- a/Documentation/driver-api/phy/phy.rst +++ b/Documentation/driver-api/phy/phy.rst @@ -94,7 +94,8 @@ Inorder to dereference the private data (in phy_ops), the phy provider driver can use phy_set_drvdata() after creating the PHY and use phy_get_drvdata() in phy_ops to get back the private data. -4. Getting a reference to the PHY +Getting a reference to the PHY +============================== Before the controller can make use of the PHY, it has to get a reference to it. This framework provides the following APIs to get a reference to the PHY. @@ -130,6 +131,28 @@ the phy_init() and phy_exit() calls, and phy_power_on() and phy_power_off() calls are all NOP when applied to a NULL phy. The NULL phy is useful in devices for handling optional phy devices. +Order of API calls +================== + +The general order of calls should be:: + + [devm_][of_]phy_get() + phy_init() + phy_power_on() + [phy_set_mode[_ext]()] + ... + phy_power_off() + phy_exit() + [[of_]phy_put()] + +Some PHY drivers may not implement :c:func:`phy_init` or :c:func:`phy_power_on`, +but controllers should always call these functions to be compatible with other +PHYs. Some PHYs may require :c:func:`phy_set_mode `, while +others may use a default mode (typically configured via devicetree or other +firmware). For compatibility, you should always call this function if you know +what mode you will be using. Generally, this function should be called after +:c:func:`phy_power_on`, although some PHY drivers may allow it at any time. + Releasing a reference to the PHY ================================ diff --git a/Documentation/driver-api/serial/driver.rst b/Documentation/driver-api/serial/driver.rst index 23c6b956cd90d036d0cb352ec60bc26d245435a0..98d268555dcc93f664f2514a515926672ba3710a 100644 --- a/Documentation/driver-api/serial/driver.rst +++ b/Documentation/driver-api/serial/driver.rst @@ -78,6 +78,9 @@ Other functions uart_get_lsr_info uart_handle_dcd_change uart_handle_cts_change uart_try_toggle_sysrq uart_get_console +.. kernel-doc:: include/linux/serial_core.h + :identifiers: uart_port_tx_limited uart_port_tx + Other notes ----------- diff --git a/Documentation/driver-api/serial/serial-rs485.rst b/Documentation/driver-api/serial/serial-rs485.rst index 6ebad75c74ed95f3a1ff69cc65e7c5a44e3f20a5..dce061ef7647704c3e7e3aece026666dd6ca6fb3 100644 --- a/Documentation/driver-api/serial/serial-rs485.rst +++ b/Documentation/driver-api/serial/serial-rs485.rst @@ -29,23 +29,28 @@ RS485 Serial Communications 3. Data Structures Already Available in the Kernel ================================================== - The Linux kernel provides the serial_rs485 structure (see [1]) to handle - RS485 communications. This data structure is used to set and configure RS485 + The Linux kernel provides the struct serial_rs485 to handle RS485 + communications. This data structure is used to set and configure RS485 parameters in the platform data and in ioctls. - The device tree can also provide RS485 boot time parameters (see [2] - for bindings). The driver is in charge of filling this data structure from - the values given by the device tree. + The device tree can also provide RS485 boot time parameters + [#DT-bindings]_. The serial core fills the struct serial_rs485 from the + values given by the device tree when the driver calls + uart_get_rs485_mode(). Any driver for devices capable of working both as RS232 and RS485 should - implement the rs485_config callback and provide rs485_supported in the - uart_port structure. The serial core calls rs485_config to do the device - specific part in response to TIOCSRS485 ioctl (see below). The rs485_config - callback receives a pointer to a sanitizated serial_rs485 structure. The - serial_rs485 userspace provides is sanitized before calling rs485_config - using rs485_supported that indicates what RS485 features the driver supports - for the uart_port. TIOCGRS485 ioctl can be used to read back the - serial_rs485 structure matching to the current configuration. + implement the ``rs485_config`` callback and provide ``rs485_supported`` + in the ``struct uart_port``. The serial core calls ``rs485_config`` to do + the device specific part in response to TIOCSRS485 ioctl (see below). The + ``rs485_config`` callback receives a pointer to a sanitizated struct + serial_rs485. The struct serial_rs485 userspace provides is sanitized + before calling ``rs485_config`` using ``rs485_supported`` that indicates + what RS485 features the driver supports for the ``struct uart_port``. + TIOCGRS485 ioctl can be used to read back the struct serial_rs485 + matching to the current configuration. + +.. kernel-doc:: include/uapi/linux/serial.h + :identifiers: serial_rs485 uart_get_rs485_mode 4. Usage from user-level ======================== @@ -103,29 +108,28 @@ RS485 Serial Communications ======================== The Linux kernel provides addressing mode for multipoint RS-485 serial - communications line. The addressing mode is enabled with SER_RS485_ADDRB - flag in serial_rs485. Struct serial_rs485 has two additional flags and - fields for enabling receive and destination addresses. + communications line. The addressing mode is enabled with + ``SER_RS485_ADDRB`` flag in struct serial_rs485. The struct serial_rs485 + has two additional flags and fields for enabling receive and destination + addresses. Address mode flags: - - SER_RS485_ADDRB: Enabled addressing mode (sets also ADDRB in termios). - - SER_RS485_ADDR_RECV: Receive (filter) address enabled. - - SER_RS485_ADDR_DEST: Set destination address. + - ``SER_RS485_ADDRB``: Enabled addressing mode (sets also ADDRB in termios). + - ``SER_RS485_ADDR_RECV``: Receive (filter) address enabled. + - ``SER_RS485_ADDR_DEST``: Set destination address. - Address fields (enabled with corresponding SER_RS485_ADDR_* flag): - - addr_recv: Receive address. - - addr_dest: Destination address. + Address fields (enabled with corresponding ``SER_RS485_ADDR_*`` flag): + - ``addr_recv``: Receive address. + - ``addr_dest``: Destination address. Once a receive address is set, the communication can occur only with the particular device and other peers are filtered out. It is left up to the receiver side to enforce the filtering. Receive address will be cleared - if SER_RS485_ADDR_RECV is not set. + if ``SER_RS485_ADDR_RECV`` is not set. Note: not all devices supporting RS485 support multipoint addressing. 6. References ============= - [1] include/uapi/linux/serial.h - - [2] Documentation/devicetree/bindings/serial/rs485.txt +.. [#DT-bindings] Documentation/devicetree/bindings/serial/rs485.txt diff --git a/Documentation/features/vm/huge-vmap/arch-support.txt b/Documentation/features/vm/huge-vmap/arch-support.txt index 82b0be425775766c27516db1a7149eb863f02dc6..34647d9bdca46e2ce670dafb3af926b4768b6f7f 100644 --- a/Documentation/features/vm/huge-vmap/arch-support.txt +++ b/Documentation/features/vm/huge-vmap/arch-support.txt @@ -21,7 +21,7 @@ | openrisc: | TODO | | parisc: | TODO | | powerpc: | ok | - | riscv: | TODO | + | riscv: | ok | | s390: | TODO | | sh: | TODO | | sparc: | TODO | diff --git a/Documentation/filesystems/f2fs.rst b/Documentation/filesystems/f2fs.rst index 17df9a02ccff14a17be195730ff5ce71ffeb79ae..220f3e0d3f559f47ab61b46b6e44c7754471b941 100644 --- a/Documentation/filesystems/f2fs.rst +++ b/Documentation/filesystems/f2fs.rst @@ -25,10 +25,14 @@ a consistency checking tool (fsck.f2fs), and a debugging tool (dump.f2fs). - git://git.kernel.org/pub/scm/linux/kernel/git/jaegeuk/f2fs-tools.git -For reporting bugs and sending patches, please use the following mailing list: +For sending patches, please use the following mailing list: - linux-f2fs-devel@lists.sourceforge.net +For reporting bugs, please use the following f2fs bug tracker link: + +- https://bugzilla.kernel.org/enter_bug.cgi?product=File%20System&component=f2fs + Background and Design issues ============================ @@ -154,6 +158,8 @@ nobarrier This option can be used if underlying storage guarantees If this option is set, no cache_flush commands are issued but f2fs still guarantees the write ordering of all the data writes. +barrier If this option is set, cache_flush commands are allowed to be + issued. fastboot This option is used when a system wants to reduce mount time as much as possible, even though normal performance can be sacrificed. @@ -199,6 +205,7 @@ fault_type=%d Support configuring fault injection type, should be FAULT_SLAB_ALLOC 0x000008000 FAULT_DQUOT_INIT 0x000010000 FAULT_LOCK_OP 0x000020000 + FAULT_BLKADDR 0x000040000 =================== =========== mode=%s Control block allocation mode which supports "adaptive" and "lfs". In "lfs" mode, there should be no random @@ -340,6 +347,10 @@ memory=%s Control memory mode. This supports "normal" and "low" modes. Because of the nature of low memory devices, in this mode, f2fs will try to save memory sometimes by sacrificing performance. "normal" mode is the default mode and same as before. +age_extent_cache Enable an age extent cache based on rb-tree. It records + data block update frequency of the extent per inode, in + order to provide better temperature hints for data block + allocation. ======================== ============================================================ Debugfs Entries diff --git a/Documentation/filesystems/proc.rst b/Documentation/filesystems/proc.rst index f4ee84d7b351752b8b67bf3d2b0f325307857824..e224b6d5b642309494bf4ce7340980327e93b169 100644 --- a/Documentation/filesystems/proc.rst +++ b/Documentation/filesystems/proc.rst @@ -428,14 +428,16 @@ with the memory region, as the case would be with BSS (uninitialized data). The "pathname" shows the name associated file for this mapping. If the mapping is not associated with a file: - ============= ==================================== + =================== =========================================== [heap] the heap of the program [stack] the stack of the main process [vdso] the "virtual dynamic shared object", the kernel system call handler - [anon:] an anonymous mapping that has been + [anon:] a private anonymous mapping that has been named by userspace - ============= ==================================== + [anon_shmem:] an anonymous shared memory mapping that has + been named by userspace + =================== =========================================== or if empty, the mapping is anonymous. diff --git a/Documentation/mm/arch_pgtable_helpers.rst b/Documentation/mm/arch_pgtable_helpers.rst index cbaee9e592410fe6a5f8bd378d201a124e55b7cc..fd2a19df884ed38c95913cf5e532c15f252ce412 100644 --- a/Documentation/mm/arch_pgtable_helpers.rst +++ b/Documentation/mm/arch_pgtable_helpers.rst @@ -94,7 +94,7 @@ PMD Page Table Helpers +---------------------------+--------------------------------------------------+ | pmd_trans_huge | Tests a Transparent Huge Page (THP) at PMD | +---------------------------+--------------------------------------------------+ -| pmd_present | Tests a valid mapped PMD | +| pmd_present | Tests whether pmd_page() points to valid memory | +---------------------------+--------------------------------------------------+ | pmd_young | Tests a young PMD | +---------------------------+--------------------------------------------------+ diff --git a/Documentation/mm/transhuge.rst b/Documentation/mm/transhuge.rst index 216db1d67d04ebd680b521e2de61de13c2046221..ec3dc5b042260be66a96277905ba108ff0e2d252 100644 --- a/Documentation/mm/transhuge.rst +++ b/Documentation/mm/transhuge.rst @@ -117,31 +117,15 @@ pages: - ->_refcount in tail pages is always zero: get_page_unless_zero() never succeeds on tail pages. - - map/unmap of the pages with PTE entry increment/decrement ->_mapcount - on relevant sub-page of the compound page. - - - map/unmap of the whole compound page is accounted for in compound_mapcount - (stored in first tail page). For file huge pages, we also increment - ->_mapcount of all sub-pages in order to have race-free detection of - last unmap of subpages. - -PageDoubleMap() indicates that the page is *possibly* mapped with PTEs. - -For anonymous pages, PageDoubleMap() also indicates ->_mapcount in all -subpages is offset up by one. This additional reference is required to -get race-free detection of unmap of subpages when we have them mapped with -both PMDs and PTEs. - -This optimization is required to lower the overhead of per-subpage mapcount -tracking. The alternative is to alter ->_mapcount in all subpages on each -map/unmap of the whole compound page. - -For anonymous pages, we set PG_double_map when a PMD of the page is split -for the first time, but still have a PMD mapping. The additional references -go away with the last compound_mapcount. - -File pages get PG_double_map set on the first map of the page with PTE and -goes away when the page gets evicted from the page cache. + - map/unmap of PMD entry for the whole compound page increment/decrement + ->compound_mapcount, stored in the first tail page of the compound page; + and also increment/decrement ->subpages_mapcount (also in the first tail) + by COMPOUND_MAPPED when compound_mapcount goes from -1 to 0 or 0 to -1. + + - map/unmap of sub-pages with PTE entry increment/decrement ->_mapcount + on relevant sub-page of the compound page, and also increment/decrement + ->subpages_mapcount, stored in first tail page of the compound page, when + _mapcount goes from -1 to 0 or 0 to -1: counting sub-pages mapped by PTE. split_huge_page internally has to distribute the refcounts in the head page to the tail pages before clearing all PG_head/tail bits from the page diff --git a/Documentation/networking/devlink/index.rst b/Documentation/networking/devlink/index.rst index 4b653d0406276d966b6949aa6050e3318991d363..fee4d3968309bb1f57890c482a1f5799a96069de 100644 --- a/Documentation/networking/devlink/index.rst +++ b/Documentation/networking/devlink/index.rst @@ -50,6 +50,7 @@ parameters, info versions, and other features it supports. :maxdepth: 1 bnxt + etas_es58x hns3 ionic ice diff --git a/Documentation/networking/nf_conntrack-sysctl.rst b/Documentation/networking/nf_conntrack-sysctl.rst index 1120d71f28d7bca75e6a540c03c539175c3d6cc2..49db1d11d7c4a8788f5068e4ac97a420202044f7 100644 --- a/Documentation/networking/nf_conntrack-sysctl.rst +++ b/Documentation/networking/nf_conntrack-sysctl.rst @@ -163,6 +163,39 @@ nf_conntrack_timestamp - BOOLEAN Enable connection tracking flow timestamping. +nf_conntrack_sctp_timeout_closed - INTEGER (seconds) + default 10 + +nf_conntrack_sctp_timeout_cookie_wait - INTEGER (seconds) + default 3 + +nf_conntrack_sctp_timeout_cookie_echoed - INTEGER (seconds) + default 3 + +nf_conntrack_sctp_timeout_established - INTEGER (seconds) + default 432000 (5 days) + +nf_conntrack_sctp_timeout_shutdown_sent - INTEGER (seconds) + default 0.3 + +nf_conntrack_sctp_timeout_shutdown_recd - INTEGER (seconds) + default 0.3 + +nf_conntrack_sctp_timeout_shutdown_ack_sent - INTEGER (seconds) + default 3 + +nf_conntrack_sctp_timeout_heartbeat_sent - INTEGER (seconds) + default 30 + + This timeout is used to setup conntrack entry on secondary paths. + Default is set to hb_interval. + +nf_conntrack_sctp_timeout_heartbeat_acked - INTEGER (seconds) + default 210 + + This timeout is used to setup conntrack entry on secondary paths. + Default is set to (hb_interval * path_max_retrans + rto_max) + nf_conntrack_udp_timeout - INTEGER (seconds) default 30 diff --git a/Documentation/powerpc/cpu_families.rst b/Documentation/powerpc/cpu_families.rst index 9b84e045e7134b70ff5183185ae9d850cae1aae2..eb7e60649b4337419b0f6834b0e2f18ef4b3e67d 100644 --- a/Documentation/powerpc/cpu_families.rst +++ b/Documentation/powerpc/cpu_families.rst @@ -10,6 +10,7 @@ Book3S (aka sPAPR) ------------------ - Hash MMU (except 603 and e300) +- Radix MMU (POWER9 and later) - Software loaded TLB (603 and e300) - Selectable Software loaded TLB in addition to hash MMU (755, 7450, e600) - Mix of 32 & 64 bit:: @@ -100,6 +101,18 @@ Book3S (aka sPAPR) v +--------------+ | POWER8 | + +--------------+ + | + | + v + +--------------+ + | POWER9 | + +--------------+ + | + | + v + +--------------+ + | POWER10 | +--------------+ diff --git a/Documentation/process/changes.rst b/Documentation/process/changes.rst index 9844ca3a71a61689f171bc02fd10a12566ee5f0b..ef540865ad22e3b78352007c5b0f1c05f5a3e2b1 100644 --- a/Documentation/process/changes.rst +++ b/Documentation/process/changes.rst @@ -35,7 +35,7 @@ Rust (optional) 1.62.0 rustc --version bindgen (optional) 0.56.0 bindgen --version GNU make 3.82 make --version bash 4.2 bash --version -binutils 2.23 ld -v +binutils 2.25 ld -v flex 2.5.35 flex --version bison 2.0 bison --version pahole 1.16 pahole --version @@ -119,7 +119,7 @@ Bash 4.2 or newer is needed. Binutils -------- -Binutils 2.23 or newer is needed to build the kernel. +Binutils 2.25 or newer is needed to build the kernel. pkg-config ---------- diff --git a/Documentation/riscv/patch-acceptance.rst b/Documentation/riscv/patch-acceptance.rst index dfe0ac5624fb240ce697a5067666cf46b2dc7209..07d5a5623e2a3e87e5244b94b789c8174a983efe 100644 --- a/Documentation/riscv/patch-acceptance.rst +++ b/Documentation/riscv/patch-acceptance.rst @@ -20,16 +20,22 @@ Submit Checklist Addendum ------------------------- We'll only accept patches for new modules or extensions if the specifications for those modules or extensions are listed as being -"Frozen" or "Ratified" by the RISC-V Foundation. (Developers may, of -course, maintain their own Linux kernel trees that contain code for -any draft extensions that they wish.) +unlikely to be incompatibly changed in the future. For +specifications from the RISC-V foundation this means "Frozen" or +"Ratified", for the UEFI forum specifications this means a published +ECR. (Developers may, of course, maintain their own Linux kernel trees +that contain code for any draft extensions that they wish.) -Additionally, the RISC-V specification allows implementors to create +Additionally, the RISC-V specification allows implementers to create their own custom extensions. These custom extensions aren't required to go through any review or ratification process by the RISC-V Foundation. To avoid the maintenance complexity and potential performance impact of adding kernel code for implementor-specific -RISC-V extensions, we'll only to accept patches for extensions that -have been officially frozen or ratified by the RISC-V Foundation. -(Implementors, may, of course, maintain their own Linux kernel trees -containing code for any custom extensions that they wish.) +RISC-V extensions, we'll only consider patches for extensions that either: + +- Have been officially frozen or ratified by the RISC-V Foundation, or +- Have been implemented in hardware that is widely available, per standard + Linux practice. + +(Implementers, may, of course, maintain their own Linux kernel trees containing +code for any custom extensions that they wish.) diff --git a/Documentation/scsi/scsi_eh.rst b/Documentation/scsi/scsi_eh.rst index bad624fab8231866202dd61e79e8b7f6e3fd60e8..104d09e9af099fb8ce0e6e3580fb3b489baf63e9 100644 --- a/Documentation/scsi/scsi_eh.rst +++ b/Documentation/scsi/scsi_eh.rst @@ -92,14 +92,17 @@ The timeout handler is scsi_timeout(). When a timeout occurs, this function 1. invokes optional hostt->eh_timed_out() callback. Return value can be one of - - BLK_EH_RESET_TIMER + - SCSI_EH_RESET_TIMER This indicates that more time is required to finish the command. Timer is restarted. - - BLK_EH_DONE + - SCSI_EH_NOT_HANDLED eh_timed_out() callback did not handle the command. Step #2 is taken. + - SCSI_EH_DONE + eh_timed_out() completed the command. + 2. scsi_abort_command() is invoked to schedule an asynchronous abort which may issue a retry scmd->allowed + 1 times. Asynchronous aborts are not invoked for commands for which the SCSI_EH_ABORT_SCHEDULED flag is set (this diff --git a/Documentation/trace/histogram.rst b/Documentation/trace/histogram.rst index 87bd772836c0c56ee93e37b5386128c728c3c03f..f95459aa984f385f7a46c990663f58b5d30fb388 100644 --- a/Documentation/trace/histogram.rst +++ b/Documentation/trace/histogram.rst @@ -25,7 +25,7 @@ Documentation written by Tom Zanussi hist:keys=[:values=] [:sort=][:size=#entries][:pause][:continue] - [:clear][:name=histname1][:.] [if ] + [:clear][:name=histname1][:nohitcount][:.] [if ] When a matching event is hit, an entry is added to a hash table using the key(s) and value(s) named. Keys and values correspond to @@ -79,6 +79,8 @@ Documentation written by Tom Zanussi .log2 display log2 value rather than raw number .buckets=size display grouping of values rather than raw number .usecs display a common_timestamp in microseconds + .percent display a number of percentage value + .graph display a bar-graph of a value ============= ================================================= Note that in general the semantics of a given field aren't @@ -137,6 +139,12 @@ Documentation written by Tom Zanussi existing trigger, rather than via the '>' operator, which will cause the trigger to be removed through truncation. + The 'nohitcount' (or NOHC) parameter will suppress display of + raw hitcount in the histogram. This option requires at least one + value field which is not a 'raw hitcount'. For example, + 'hist:...:vals=hitcount:nohitcount' is rejected, but + 'hist:...:vals=hitcount.percent:nohitcount' is OK. + - enable_hist/disable_hist The enable_hist and disable_hist triggers can be used to have one diff --git a/Documentation/trace/osnoise-tracer.rst b/Documentation/trace/osnoise-tracer.rst index 963def9f97c6ec4ad99394f570182dbf5d76959f..140ef2533d26a0545ba8381365844ea2d7150563 100644 --- a/Documentation/trace/osnoise-tracer.rst +++ b/Documentation/trace/osnoise-tracer.rst @@ -92,8 +92,8 @@ Note that the example above shows a high number of HW noise samples. The reason being is that this sample was taken on a virtual machine, and the host interference is detected as a hardware interference. -Tracer options ---------------------- +Tracer Configuration +-------------------- The tracer has a set of options inside the osnoise directory, they are: @@ -109,6 +109,27 @@ The tracer has a set of options inside the osnoise directory, they are: - tracing_threshold: the minimum delta between two time() reads to be considered as noise, in us. When set to 0, the default value will be used, which is currently 5 us. + - osnoise/options: a set of on/off options that can be enabled by + writing the option name to the file or disabled by writing the option + name preceded with the 'NO\_' prefix. For example, writing + NO_OSNOISE_WORKLOAD disables the OSNOISE_WORKLOAD option. The + special DEAFAULTS option resets all options to the default value. + +Tracer Options +-------------- + +The osnoise/options file exposes a set of on/off configuration options for +the osnoise tracer. These options are: + + - DEFAULTS: reset the options to the default value. + - OSNOISE_WORKLOAD: do not dispatch osnoise workload (see dedicated + section below). + - PANIC_ON_STOP: call panic() if the tracer stops. This option serves to + capture a vmcore. + - OSNOISE_PREEMPT_DISABLE: disable preemption while running the osnoise + workload, allowing only IRQ and hardware-related noise. + - OSNOISE_IRQ_DISABLE: disable IRQs while running the osnoise workload, + allowing only NMIs and hardware-related noise, like hwlat tracer. Additional Tracing ------------------ @@ -150,3 +171,10 @@ tracepoints is smaller than eight us reported in the sample_threshold. The reason roots in the overhead of the entry and exit code that happens before and after any interference execution. This justifies the dual approach: measuring thread and tracing. + +Running osnoise tracer without workload +--------------------------------------- + +By enabling the osnoise tracer with the NO_OSNOISE_WORKLOAD option set, +the osnoise: tracepoints serve to measure the execution time of +any type of Linux task, free from the interference of other tasks. diff --git a/Documentation/userspace-api/index.rst b/Documentation/userspace-api/index.rst index c78da9ce0ec44e5c9feca7ee262e31ed668b9d99..f16337bdb8520fac879bc4aee481bf7cfe8b0ce4 100644 --- a/Documentation/userspace-api/index.rst +++ b/Documentation/userspace-api/index.rst @@ -25,6 +25,7 @@ place where this information is gathered. ebpf/index ioctl/index iommu + iommufd media/index netlink/index sysfs-platform_profile diff --git a/Documentation/userspace-api/ioctl/ioctl-number.rst b/Documentation/userspace-api/ioctl/ioctl-number.rst index 5f81e2a24a5c04221e49af03fcdde934a35cc855..eb045fc495a4e3426604eedde78fd5656d6b6920 100644 --- a/Documentation/userspace-api/ioctl/ioctl-number.rst +++ b/Documentation/userspace-api/ioctl/ioctl-number.rst @@ -105,6 +105,7 @@ Code Seq# Include File Comments '8' all SNP8023 advanced NIC card ';' 64-7F linux/vfio.h +';' 80-FF linux/iommufd.h '=' 00-3f uapi/linux/ptp_clock.h '@' 00-0F linux/radeonfb.h conflict! '@' 00-0F drivers/video/aty/aty128fb.c conflict! diff --git a/Documentation/userspace-api/iommufd.rst b/Documentation/userspace-api/iommufd.rst new file mode 100644 index 0000000000000000000000000000000000000000..79dd9eb515874d6fb6029b6ae848daf05ffeec81 --- /dev/null +++ b/Documentation/userspace-api/iommufd.rst @@ -0,0 +1,223 @@ +.. SPDX-License-Identifier: GPL-2.0+ + +======= +IOMMUFD +======= + +:Author: Jason Gunthorpe +:Author: Kevin Tian + +Overview +======== + +IOMMUFD is the user API to control the IOMMU subsystem as it relates to managing +IO page tables from userspace using file descriptors. It intends to be general +and consumable by any driver that wants to expose DMA to userspace. These +drivers are eventually expected to deprecate any internal IOMMU logic +they may already/historically implement (e.g. vfio_iommu_type1.c). + +At minimum iommufd provides universal support of managing I/O address spaces and +I/O page tables for all IOMMUs, with room in the design to add non-generic +features to cater to specific hardware functionality. + +In this context the capital letter (IOMMUFD) refers to the subsystem while the +small letter (iommufd) refers to the file descriptors created via /dev/iommu for +use by userspace. + +Key Concepts +============ + +User Visible Objects +-------------------- + +Following IOMMUFD objects are exposed to userspace: + +- IOMMUFD_OBJ_IOAS, representing an I/O address space (IOAS), allowing map/unmap + of user space memory into ranges of I/O Virtual Address (IOVA). + + The IOAS is a functional replacement for the VFIO container, and like the VFIO + container it copies an IOVA map to a list of iommu_domains held within it. + +- IOMMUFD_OBJ_DEVICE, representing a device that is bound to iommufd by an + external driver. + +- IOMMUFD_OBJ_HW_PAGETABLE, representing an actual hardware I/O page table + (i.e. a single struct iommu_domain) managed by the iommu driver. + + The IOAS has a list of HW_PAGETABLES that share the same IOVA mapping and + it will synchronize its mapping with each member HW_PAGETABLE. + +All user-visible objects are destroyed via the IOMMU_DESTROY uAPI. + +The diagram below shows relationship between user-visible objects and kernel +datastructures (external to iommufd), with numbers referred to operations +creating the objects and links:: + + _________________________________________________________ + | iommufd | + | [1] | + | _________________ | + | | | | + | | | | + | | | | + | | | | + | | | | + | | | | + | | | [3] [2] | + | | | ____________ __________ | + | | IOAS |<--| |<------| | | + | | | |HW_PAGETABLE| | DEVICE | | + | | | |____________| |__________| | + | | | | | | + | | | | | | + | | | | | | + | | | | | | + | | | | | | + | |_________________| | | | + | | | | | + |_________|___________________|___________________|_______| + | | | + | _____v______ _______v_____ + | PFN storage | | | | + |------------>|iommu_domain| |struct device| + |____________| |_____________| + +1. IOMMUFD_OBJ_IOAS is created via the IOMMU_IOAS_ALLOC uAPI. An iommufd can + hold multiple IOAS objects. IOAS is the most generic object and does not + expose interfaces that are specific to single IOMMU drivers. All operations + on the IOAS must operate equally on each of the iommu_domains inside of it. + +2. IOMMUFD_OBJ_DEVICE is created when an external driver calls the IOMMUFD kAPI + to bind a device to an iommufd. The driver is expected to implement a set of + ioctls to allow userspace to initiate the binding operation. Successful + completion of this operation establishes the desired DMA ownership over the + device. The driver must also set the driver_managed_dma flag and must not + touch the device until this operation succeeds. + +3. IOMMUFD_OBJ_HW_PAGETABLE is created when an external driver calls the IOMMUFD + kAPI to attach a bound device to an IOAS. Similarly the external driver uAPI + allows userspace to initiate the attaching operation. If a compatible + pagetable already exists then it is reused for the attachment. Otherwise a + new pagetable object and iommu_domain is created. Successful completion of + this operation sets up the linkages among IOAS, device and iommu_domain. Once + this completes the device could do DMA. + + Every iommu_domain inside the IOAS is also represented to userspace as a + HW_PAGETABLE object. + + .. note:: + + Future IOMMUFD updates will provide an API to create and manipulate the + HW_PAGETABLE directly. + +A device can only bind to an iommufd due to DMA ownership claim and attach to at +most one IOAS object (no support of PASID yet). + +Kernel Datastructure +-------------------- + +User visible objects are backed by following datastructures: + +- iommufd_ioas for IOMMUFD_OBJ_IOAS. +- iommufd_device for IOMMUFD_OBJ_DEVICE. +- iommufd_hw_pagetable for IOMMUFD_OBJ_HW_PAGETABLE. + +Several terminologies when looking at these datastructures: + +- Automatic domain - refers to an iommu domain created automatically when + attaching a device to an IOAS object. This is compatible to the semantics of + VFIO type1. + +- Manual domain - refers to an iommu domain designated by the user as the + target pagetable to be attached to by a device. Though currently there are + no uAPIs to directly create such domain, the datastructure and algorithms + are ready for handling that use case. + +- In-kernel user - refers to something like a VFIO mdev that is using the + IOMMUFD access interface to access the IOAS. This starts by creating an + iommufd_access object that is similar to the domain binding a physical device + would do. The access object will then allow converting IOVA ranges into struct + page * lists, or doing direct read/write to an IOVA. + +iommufd_ioas serves as the metadata datastructure to manage how IOVA ranges are +mapped to memory pages, composed of: + +- struct io_pagetable holding the IOVA map +- struct iopt_area's representing populated portions of IOVA +- struct iopt_pages representing the storage of PFNs +- struct iommu_domain representing the IO page table in the IOMMU +- struct iopt_pages_access representing in-kernel users of PFNs +- struct xarray pinned_pfns holding a list of pages pinned by in-kernel users + +Each iopt_pages represents a logical linear array of full PFNs. The PFNs are +ultimately derived from userspace VAs via an mm_struct. Once they have been +pinned the PFNs are stored in IOPTEs of an iommu_domain or inside the pinned_pfns +xarray if they have been pinned through an iommufd_access. + +PFN have to be copied between all combinations of storage locations, depending +on what domains are present and what kinds of in-kernel "software access" users +exist. The mechanism ensures that a page is pinned only once. + +An io_pagetable is composed of iopt_areas pointing at iopt_pages, along with a +list of iommu_domains that mirror the IOVA to PFN map. + +Multiple io_pagetable-s, through their iopt_area-s, can share a single +iopt_pages which avoids multi-pinning and double accounting of page +consumption. + +iommufd_ioas is sharable between subsystems, e.g. VFIO and VDPA, as long as +devices managed by different subsystems are bound to a same iommufd. + +IOMMUFD User API +================ + +.. kernel-doc:: include/uapi/linux/iommufd.h + +IOMMUFD Kernel API +================== + +The IOMMUFD kAPI is device-centric with group-related tricks managed behind the +scene. This allows the external drivers calling such kAPI to implement a simple +device-centric uAPI for connecting its device to an iommufd, instead of +explicitly imposing the group semantics in its uAPI as VFIO does. + +.. kernel-doc:: drivers/iommu/iommufd/device.c + :export: + +.. kernel-doc:: drivers/iommu/iommufd/main.c + :export: + +VFIO and IOMMUFD +---------------- + +Connecting a VFIO device to iommufd can be done in two ways. + +First is a VFIO compatible way by directly implementing the /dev/vfio/vfio +container IOCTLs by mapping them into io_pagetable operations. Doing so allows +the use of iommufd in legacy VFIO applications by symlinking /dev/vfio/vfio to +/dev/iommufd or extending VFIO to SET_CONTAINER using an iommufd instead of a +container fd. + +The second approach directly extends VFIO to support a new set of device-centric +user API based on aforementioned IOMMUFD kernel API. It requires userspace +change but better matches the IOMMUFD API semantics and easier to support new +iommufd features when comparing it to the first approach. + +Currently both approaches are still work-in-progress. + +There are still a few gaps to be resolved to catch up with VFIO type1, as +documented in iommufd_vfio_check_extension(). + +Future TODOs +============ + +Currently IOMMUFD supports only kernel-managed I/O page table, similar to VFIO +type1. New features on the radar include: + + - Binding iommu_domain's to PASID/SSID + - Userspace page tables, for ARM, x86 and S390 + - Kernel bypass'd invalidation of user page tables + - Re-use of the KVM page table in the IOMMU + - Dirty page tracking in the IOMMU + - Runtime Increase/Decrease of IOPTE size + - PRI support with faults resolved in userspace diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst index 896914e3a8475a12fe3b841c736566726ba47157..0dd5d8733dd583ff77a4ef9072831e12e289a3b1 100644 --- a/Documentation/virt/kvm/api.rst +++ b/Documentation/virt/kvm/api.rst @@ -272,18 +272,6 @@ the VCPU file descriptor can be mmap-ed, including: KVM_CAP_DIRTY_LOG_RING, see section 8.3. -4.6 KVM_SET_MEMORY_REGION -------------------------- - -:Capability: basic -:Architectures: all -:Type: vm ioctl -:Parameters: struct kvm_memory_region (in) -:Returns: 0 on success, -1 on error - -This ioctl is obsolete and has been removed. - - 4.7 KVM_CREATE_VCPU ------------------- @@ -368,17 +356,6 @@ see the description of the capability. Note that the Xen shared info page, if configured, shall always be assumed to be dirty. KVM will not explicitly mark it such. -4.9 KVM_SET_MEMORY_ALIAS ------------------------- - -:Capability: basic -:Architectures: x86 -:Type: vm ioctl -:Parameters: struct kvm_memory_alias (in) -:Returns: 0 (success), -1 (error) - -This ioctl is obsolete and has been removed. - 4.10 KVM_RUN ------------ @@ -1332,7 +1309,7 @@ yet and must be cleared on entry. __u64 userspace_addr; /* start of the userspace allocated memory */ }; - /* for kvm_memory_region::flags */ + /* for kvm_userspace_memory_region::flags */ #define KVM_MEM_LOG_DIRTY_PAGES (1UL << 0) #define KVM_MEM_READONLY (1UL << 1) @@ -1377,10 +1354,6 @@ the memory region are automatically reflected into the guest. For example, an mmap() that affects the region will be made visible immediately. Another example is madvise(MADV_DROP). -It is recommended to use this API instead of the KVM_SET_MEMORY_REGION ioctl. -The KVM_SET_MEMORY_REGION does not allow fine grained control over memory -allocation and is deprecated. - 4.36 KVM_SET_TSS_ADDR --------------------- @@ -3293,6 +3266,7 @@ valid entries found. ---------------------- :Capability: KVM_CAP_DEVICE_CTRL +:Architectures: all :Type: vm ioctl :Parameters: struct kvm_create_device (in/out) :Returns: 0 on success, -1 on error @@ -3333,6 +3307,7 @@ number. :Capability: KVM_CAP_DEVICE_CTRL, KVM_CAP_VM_ATTRIBUTES for vm device, KVM_CAP_VCPU_ATTRIBUTES for vcpu device KVM_CAP_SYS_ATTRIBUTES for system (/dev/kvm) device (no set) +:Architectures: x86, arm64, s390 :Type: device ioctl, vm ioctl, vcpu ioctl :Parameters: struct kvm_device_attr :Returns: 0 on success, -1 on error @@ -4104,80 +4079,71 @@ flags values for ``struct kvm_msr_filter_range``: ``KVM_MSR_FILTER_READ`` Filter read accesses to MSRs using the given bitmap. A 0 in the bitmap - indicates that a read should immediately fail, while a 1 indicates that - a read for a particular MSR should be handled regardless of the default + indicates that read accesses should be denied, while a 1 indicates that + a read for a particular MSR should be allowed regardless of the default filter action. ``KVM_MSR_FILTER_WRITE`` Filter write accesses to MSRs using the given bitmap. A 0 in the bitmap - indicates that a write should immediately fail, while a 1 indicates that - a write for a particular MSR should be handled regardless of the default + indicates that write accesses should be denied, while a 1 indicates that + a write for a particular MSR should be allowed regardless of the default filter action. -``KVM_MSR_FILTER_READ | KVM_MSR_FILTER_WRITE`` - - Filter both read and write accesses to MSRs using the given bitmap. A 0 - in the bitmap indicates that both reads and writes should immediately fail, - while a 1 indicates that reads and writes for a particular MSR are not - filtered by this range. - flags values for ``struct kvm_msr_filter``: ``KVM_MSR_FILTER_DEFAULT_ALLOW`` If no filter range matches an MSR index that is getting accessed, KVM will - fall back to allowing access to the MSR. + allow accesses to all MSRs by default. ``KVM_MSR_FILTER_DEFAULT_DENY`` If no filter range matches an MSR index that is getting accessed, KVM will - fall back to rejecting access to the MSR. In this mode, all MSRs that should - be processed by KVM need to explicitly be marked as allowed in the bitmaps. + deny accesses to all MSRs by default. + +This ioctl allows userspace to define up to 16 bitmaps of MSR ranges to deny +guest MSR accesses that would normally be allowed by KVM. If an MSR is not +covered by a specific range, the "default" filtering behavior applies. Each +bitmap range covers MSRs from [base .. base+nmsrs). -This ioctl allows user space to define up to 16 bitmaps of MSR ranges to -specify whether a certain MSR access should be explicitly filtered for or not. +If an MSR access is denied by userspace, the resulting KVM behavior depends on +whether or not KVM_CAP_X86_USER_SPACE_MSR's KVM_MSR_EXIT_REASON_FILTER is +enabled. If KVM_MSR_EXIT_REASON_FILTER is enabled, KVM will exit to userspace +on denied accesses, i.e. userspace effectively intercepts the MSR access. If +KVM_MSR_EXIT_REASON_FILTER is not enabled, KVM will inject a #GP into the guest +on denied accesses. -If this ioctl has never been invoked, MSR accesses are not guarded and the -default KVM in-kernel emulation behavior is fully preserved. +If an MSR access is allowed by userspace, KVM will emulate and/or virtualize +the access in accordance with the vCPU model. Note, KVM may still ultimately +inject a #GP if an access is allowed by userspace, e.g. if KVM doesn't support +the MSR, or to follow architectural behavior for the MSR. + +By default, KVM operates in KVM_MSR_FILTER_DEFAULT_ALLOW mode with no MSR range +filters. Calling this ioctl with an empty set of ranges (all nmsrs == 0) disables MSR filtering. In that mode, ``KVM_MSR_FILTER_DEFAULT_DENY`` is invalid and causes an error. -As soon as the filtering is in place, every MSR access is processed through -the filtering except for accesses to the x2APIC MSRs (from 0x800 to 0x8ff); -x2APIC MSRs are always allowed, independent of the ``default_allow`` setting, -and their behavior depends on the ``X2APIC_ENABLE`` bit of the APIC base -register. - .. warning:: - MSR accesses coming from nested vmentry/vmexit are not filtered. + MSR accesses as part of nested VM-Enter/VM-Exit are not filtered. This includes both writes to individual VMCS fields and reads/writes through the MSR lists pointed to by the VMCS. -If a bit is within one of the defined ranges, read and write accesses are -guarded by the bitmap's value for the MSR index if the kind of access -is included in the ``struct kvm_msr_filter_range`` flags. If no range -cover this particular access, the behavior is determined by the flags -field in the kvm_msr_filter struct: ``KVM_MSR_FILTER_DEFAULT_ALLOW`` -and ``KVM_MSR_FILTER_DEFAULT_DENY``. - -Each bitmap range specifies a range of MSRs to potentially allow access on. -The range goes from MSR index [base .. base+nmsrs]. The flags field -indicates whether reads, writes or both reads and writes are filtered -by setting a 1 bit in the bitmap for the corresponding MSR index. - -If an MSR access is not permitted through the filtering, it generates a -#GP inside the guest. When combined with KVM_CAP_X86_USER_SPACE_MSR, that -allows user space to deflect and potentially handle various MSR accesses -into user space. + x2APIC MSR accesses cannot be filtered (KVM silently ignores filters that + cover any x2APIC MSRs). Note, invoking this ioctl while a vCPU is running is inherently racy. However, KVM does guarantee that vCPUs will see either the previous filter or the new filter, e.g. MSRs with identical settings in both the old and new filter will have deterministic behavior. +Similarly, if userspace wishes to intercept on denied accesses, +KVM_MSR_EXIT_REASON_FILTER must be enabled before activating any filters, and +left enabled until after all filters are deactivated. Failure to do so may +result in KVM injecting a #GP instead of exiting to userspace. + 4.98 KVM_CREATE_SPAPR_TCE_64 ---------------------------- @@ -5163,10 +5129,13 @@ KVM_PV_ENABLE ===== ============================= KVM_PV_DISABLE - Deregister the VM from the Ultravisor and reclaim the memory that - had been donated to the Ultravisor, making it usable by the kernel - again. All registered VCPUs are converted back to non-protected - ones. + Deregister the VM from the Ultravisor and reclaim the memory that had + been donated to the Ultravisor, making it usable by the kernel again. + All registered VCPUs are converted back to non-protected ones. If a + previous protected VM had been prepared for asynchonous teardown with + KVM_PV_ASYNC_CLEANUP_PREPARE and not subsequently torn down with + KVM_PV_ASYNC_CLEANUP_PERFORM, it will be torn down in this call + together with the current protected VM. KVM_PV_VM_SET_SEC_PARMS Pass the image header from VM memory to the Ultravisor in @@ -5289,6 +5258,36 @@ KVM_PV_DUMP authentication tag all of which are needed to decrypt the dump at a later time. +KVM_PV_ASYNC_CLEANUP_PREPARE + :Capability: KVM_CAP_S390_PROTECTED_ASYNC_DISABLE + + Prepare the current protected VM for asynchronous teardown. Most + resources used by the current protected VM will be set aside for a + subsequent asynchronous teardown. The current protected VM will then + resume execution immediately as non-protected. There can be at most + one protected VM prepared for asynchronous teardown at any time. If + a protected VM had already been prepared for teardown without + subsequently calling KVM_PV_ASYNC_CLEANUP_PERFORM, this call will + fail. In that case, the userspace process should issue a normal + KVM_PV_DISABLE. The resources set aside with this call will need to + be cleaned up with a subsequent call to KVM_PV_ASYNC_CLEANUP_PERFORM + or KVM_PV_DISABLE, otherwise they will be cleaned up when KVM + terminates. KVM_PV_ASYNC_CLEANUP_PREPARE can be called again as soon + as cleanup starts, i.e. before KVM_PV_ASYNC_CLEANUP_PERFORM finishes. + +KVM_PV_ASYNC_CLEANUP_PERFORM + :Capability: KVM_CAP_S390_PROTECTED_ASYNC_DISABLE + + Tear down the protected VM previously prepared for teardown with + KVM_PV_ASYNC_CLEANUP_PREPARE. The resources that had been set aside + will be freed during the execution of this command. This PV command + should ideally be issued by userspace from a separate thread. If a + fatal signal is received (or the process terminates naturally), the + command will terminate immediately without completing, and the normal + KVM shutdown procedure will take care of cleaning up all remaining + protected VMs, including the ones whose teardown was interrupted by + process termination. + 4.126 KVM_XEN_HVM_SET_ATTR -------------------------- @@ -5306,6 +5305,7 @@ KVM_PV_DUMP union { __u8 long_mode; __u8 vector; + __u8 runstate_update_flag; struct { __u64 gfn; } shared_info; @@ -5383,6 +5383,14 @@ KVM_XEN_ATTR_TYPE_XEN_VERSION event channel delivery, so responding within the kernel without exiting to userspace is beneficial. +KVM_XEN_ATTR_TYPE_RUNSTATE_UPDATE_FLAG + This attribute is available when the KVM_CAP_XEN_HVM ioctl indicates + support for KVM_XEN_HVM_CONFIG_RUNSTATE_UPDATE_FLAG. It enables the + XEN_RUNSTATE_UPDATE flag which allows guest vCPUs to safely read + other vCPUs' vcpu_runstate_info. Xen guests enable this feature via + the VM_ASST_TYPE_runstate_update_flag of the HYPERVISOR_vm_assist + hypercall. + 4.127 KVM_XEN_HVM_GET_ATTR -------------------------- @@ -6440,31 +6448,35 @@ if it decides to decode and emulate the instruction. Used on x86 systems. When the VM capability KVM_CAP_X86_USER_SPACE_MSR is enabled, MSR accesses to registers that would invoke a #GP by KVM kernel code -will instead trigger a KVM_EXIT_X86_RDMSR exit for reads and KVM_EXIT_X86_WRMSR +may instead trigger a KVM_EXIT_X86_RDMSR exit for reads and KVM_EXIT_X86_WRMSR exit for writes. -The "reason" field specifies why the MSR trap occurred. User space will only -receive MSR exit traps when a particular reason was requested during through +The "reason" field specifies why the MSR interception occurred. Userspace will +only receive MSR exits when a particular reason was requested during through ENABLE_CAP. Currently valid exit reasons are: - KVM_MSR_EXIT_REASON_UNKNOWN - access to MSR that is unknown to KVM - KVM_MSR_EXIT_REASON_INVAL - access to invalid MSRs or reserved bits - KVM_MSR_EXIT_REASON_FILTER - access blocked by KVM_X86_SET_MSR_FILTER +============================ ======================================== + KVM_MSR_EXIT_REASON_UNKNOWN access to MSR that is unknown to KVM + KVM_MSR_EXIT_REASON_INVAL access to invalid MSRs or reserved bits + KVM_MSR_EXIT_REASON_FILTER access blocked by KVM_X86_SET_MSR_FILTER +============================ ======================================== -For KVM_EXIT_X86_RDMSR, the "index" field tells user space which MSR the guest -wants to read. To respond to this request with a successful read, user space +For KVM_EXIT_X86_RDMSR, the "index" field tells userspace which MSR the guest +wants to read. To respond to this request with a successful read, userspace writes the respective data into the "data" field and must continue guest execution to ensure the read data is transferred into guest register state. -If the RDMSR request was unsuccessful, user space indicates that with a "1" in +If the RDMSR request was unsuccessful, userspace indicates that with a "1" in the "error" field. This will inject a #GP into the guest when the VCPU is executed again. -For KVM_EXIT_X86_WRMSR, the "index" field tells user space which MSR the guest -wants to write. Once finished processing the event, user space must continue -vCPU execution. If the MSR write was unsuccessful, user space also sets the +For KVM_EXIT_X86_WRMSR, the "index" field tells userspace which MSR the guest +wants to write. Once finished processing the event, userspace must continue +vCPU execution. If the MSR write was unsuccessful, userspace also sets the "error" field to "1". +See KVM_X86_SET_MSR_FILTER for details on the interaction with MSR filtering. + :: @@ -7229,19 +7241,29 @@ polling. :Parameters: args[0] contains the mask of KVM_MSR_EXIT_REASON_* events to report :Returns: 0 on success; -1 on error -This capability enables trapping of #GP invoking RDMSR and WRMSR instructions -into user space. +This capability allows userspace to intercept RDMSR and WRMSR instructions if +access to an MSR is denied. By default, KVM injects #GP on denied accesses. When a guest requests to read or write an MSR, KVM may not implement all MSRs that are relevant to a respective system. It also does not differentiate by CPU type. -To allow more fine grained control over MSR handling, user space may enable +To allow more fine grained control over MSR handling, userspace may enable this capability. With it enabled, MSR accesses that match the mask specified in -args[0] and trigger a #GP event inside the guest by KVM will instead trigger -KVM_EXIT_X86_RDMSR and KVM_EXIT_X86_WRMSR exit notifications which user space -can then handle to implement model specific MSR handling and/or user notifications -to inform a user that an MSR was not handled. +args[0] and would trigger a #GP inside the guest will instead trigger +KVM_EXIT_X86_RDMSR and KVM_EXIT_X86_WRMSR exit notifications. Userspace +can then implement model specific MSR handling and/or user notifications +to inform a user that an MSR was not emulated/virtualized by KVM. + +The valid mask flags are: + +============================ =============================================== + KVM_MSR_EXIT_REASON_UNKNOWN intercept accesses to unknown (to KVM) MSRs + KVM_MSR_EXIT_REASON_INVAL intercept accesses that are architecturally + invalid according to the vCPU model and/or mode + KVM_MSR_EXIT_REASON_FILTER intercept accesses that are denied by userspace + via KVM_X86_SET_MSR_FILTER +============================ =============================================== 7.22 KVM_CAP_X86_BUS_LOCK_EXIT ------------------------------- @@ -7384,8 +7406,9 @@ hibernation of the host; however the VMM needs to manually save/restore the tags as appropriate if the VM is migrated. When this capability is enabled all memory in memslots must be mapped as -not-shareable (no MAP_SHARED), attempts to create a memslot with a -MAP_SHARED mmap will result in an -EINVAL return. +``MAP_ANONYMOUS`` or with a RAM-based file mapping (``tmpfs``, ``memfd``), +attempts to create a memslot with an invalid mmap will result in an +-EINVAL return. When enabled the VMM may make use of the ``KVM_ARM_MTE_COPY_TAGS`` ioctl to perform a bulk copy of tags to/from the guest. @@ -7901,7 +7924,7 @@ KVM_EXIT_X86_WRMSR exit notifications. This capability indicates that KVM supports that accesses to user defined MSRs may be rejected. With this capability exposed, KVM exports new VM ioctl KVM_X86_SET_MSR_FILTER which user space can call to specify bitmaps of MSR -ranges that KVM should reject access to. +ranges that KVM should deny access to. In combination with KVM_CAP_X86_USER_SPACE_MSR, this allows user space to trap and emulate MSRs that are outside of the scope of KVM as well as @@ -7920,7 +7943,7 @@ regardless of what has actually been exposed through the CPUID leaf. 8.29 KVM_CAP_DIRTY_LOG_RING/KVM_CAP_DIRTY_LOG_RING_ACQ_REL ---------------------------------------------------------- -:Architectures: x86 +:Architectures: x86, arm64 :Parameters: args[0] - size of the dirty log ring KVM is capable of tracking dirty memory using ring buffers that are @@ -8002,13 +8025,6 @@ flushing is done by the KVM_GET_DIRTY_LOG ioctl). To achieve that, one needs to kick the vcpu out of KVM_RUN using a signal. The resulting vmexit ensures that all dirty GFNs are flushed to the dirty rings. -NOTE: the capability KVM_CAP_DIRTY_LOG_RING and the corresponding -ioctl KVM_RESET_DIRTY_RINGS are mutual exclusive to the existing ioctls -KVM_GET_DIRTY_LOG and KVM_CLEAR_DIRTY_LOG. After enabling -KVM_CAP_DIRTY_LOG_RING with an acceptable dirty ring size, the virtual -machine will switch to ring-buffer dirty page tracking and further -KVM_GET_DIRTY_LOG or KVM_CLEAR_DIRTY_LOG ioctls will fail. - NOTE: KVM_CAP_DIRTY_LOG_RING_ACQ_REL is the only capability that should be exposed by weakly ordered architecture, in order to indicate the additional memory ordering requirements imposed on userspace when @@ -8017,6 +8033,33 @@ Architecture with TSO-like ordering (such as x86) are allowed to expose both KVM_CAP_DIRTY_LOG_RING and KVM_CAP_DIRTY_LOG_RING_ACQ_REL to userspace. +After enabling the dirty rings, the userspace needs to detect the +capability of KVM_CAP_DIRTY_LOG_RING_WITH_BITMAP to see whether the +ring structures can be backed by per-slot bitmaps. With this capability +advertised, it means the architecture can dirty guest pages without +vcpu/ring context, so that some of the dirty information will still be +maintained in the bitmap structure. KVM_CAP_DIRTY_LOG_RING_WITH_BITMAP +can't be enabled if the capability of KVM_CAP_DIRTY_LOG_RING_ACQ_REL +hasn't been enabled, or any memslot has been existing. + +Note that the bitmap here is only a backup of the ring structure. The +use of the ring and bitmap combination is only beneficial if there is +only a very small amount of memory that is dirtied out of vcpu/ring +context. Otherwise, the stand-alone per-slot bitmap mechanism needs to +be considered. + +To collect dirty bits in the backup bitmap, userspace can use the same +KVM_GET_DIRTY_LOG ioctl. KVM_CLEAR_DIRTY_LOG isn't needed as long as all +the generation of the dirty bits is done in a single pass. Collecting +the dirty bitmap should be the very last thing that the VMM does before +considering the state as complete. VMM needs to ensure that the dirty +state is final and avoid missing dirty pages from another ioctl ordered +after the bitmap collection. + +NOTE: One example of using the backup bitmap is saving arm64 vgic/its +tables through KVM_DEV_ARM_{VGIC_GRP_CTRL, ITS_SAVE_TABLES} command on +KVM device "kvm-arm-vgic-its" when dirty ring is enabled. + 8.30 KVM_CAP_XEN_HVM -------------------- @@ -8025,12 +8068,13 @@ to userspace. This capability indicates the features that Xen supports for hosting Xen PVHVM guests. Valid flags are:: - #define KVM_XEN_HVM_CONFIG_HYPERCALL_MSR (1 << 0) - #define KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL (1 << 1) - #define KVM_XEN_HVM_CONFIG_SHARED_INFO (1 << 2) - #define KVM_XEN_HVM_CONFIG_RUNSTATE (1 << 3) - #define KVM_XEN_HVM_CONFIG_EVTCHN_2LEVEL (1 << 4) - #define KVM_XEN_HVM_CONFIG_EVTCHN_SEND (1 << 5) + #define KVM_XEN_HVM_CONFIG_HYPERCALL_MSR (1 << 0) + #define KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL (1 << 1) + #define KVM_XEN_HVM_CONFIG_SHARED_INFO (1 << 2) + #define KVM_XEN_HVM_CONFIG_RUNSTATE (1 << 3) + #define KVM_XEN_HVM_CONFIG_EVTCHN_2LEVEL (1 << 4) + #define KVM_XEN_HVM_CONFIG_EVTCHN_SEND (1 << 5) + #define KVM_XEN_HVM_CONFIG_RUNSTATE_UPDATE_FLAG (1 << 6) The KVM_XEN_HVM_CONFIG_HYPERCALL_MSR flag indicates that the KVM_XEN_HVM_CONFIG ioctl is available, for the guest to set its hypercall page. @@ -8062,6 +8106,18 @@ KVM_XEN_VCPU_ATTR_TYPE_VCPU_ID/TIMER/UPCALL_VECTOR vCPU attributes. related to event channel delivery, timers, and the XENVER_version interception. +The KVM_XEN_HVM_CONFIG_RUNSTATE_UPDATE_FLAG flag indicates that KVM supports +the KVM_XEN_ATTR_TYPE_RUNSTATE_UPDATE_FLAG attribute in the KVM_XEN_SET_ATTR +and KVM_XEN_GET_ATTR ioctls. This controls whether KVM will set the +XEN_RUNSTATE_UPDATE flag in guest memory mapped vcpu_runstate_info during +updates of the runstate information. Note that versions of KVM which support +the RUNSTATE feature above, but not thie RUNSTATE_UPDATE_FLAG feature, will +always set the XEN_RUNSTATE_UPDATE flag when updating the guest structure, +which is perhaps counterintuitive. When this flag is advertised, KVM will +behave more correctly, not using the XEN_RUNSTATE_UPDATE flag until/unless +specifically enabled (by the guest making the hypercall, causing the VMM +to enable the KVM_XEN_ATTR_TYPE_RUNSTATE_UPDATE_FLAG attribute). + 8.31 KVM_CAP_PPC_MULTITCE ------------------------- diff --git a/Documentation/virt/kvm/arm/pvtime.rst b/Documentation/virt/kvm/arm/pvtime.rst index 392521af7c90562fa61d0245df8d94ce002d3c93..e88b34e586be29337f9b89460d3023459235b08a 100644 --- a/Documentation/virt/kvm/arm/pvtime.rst +++ b/Documentation/virt/kvm/arm/pvtime.rst @@ -23,21 +23,23 @@ the PV_TIME_FEATURES hypercall should be probed using the SMCCC 1.1 ARCH_FEATURES mechanism before calling it. PV_TIME_FEATURES - ============= ======== ========== + + ============= ======== ================================================= Function ID: (uint32) 0xC5000020 PV_call_id: (uint32) The function to query for support. Currently only PV_TIME_ST is supported. Return value: (int64) NOT_SUPPORTED (-1) or SUCCESS (0) if the relevant PV-time feature is supported by the hypervisor. - ============= ======== ========== + ============= ======== ================================================= PV_TIME_ST - ============= ======== ========== + + ============= ======== ============================================== Function ID: (uint32) 0xC5000021 Return value: (int64) IPA of the stolen time data structure for this VCPU. On failure: NOT_SUPPORTED (-1) - ============= ======== ========== + ============= ======== ============================================== The IPA returned by PV_TIME_ST should be mapped by the guest as normal memory with inner and outer write back caching attributes, in the inner shareable @@ -76,5 +78,5 @@ It is advisable that one or more 64k pages are set aside for the purpose of these structures and not used for other purposes, this enables the guest to map the region using 64k pages and avoids conflicting attributes with other memory. -For the user space interface see Documentation/virt/kvm/devices/vcpu.rst -section "3. GROUP: KVM_ARM_VCPU_PVTIME_CTRL". +For the user space interface see +:ref:`Documentation/virt/kvm/devices/vcpu.rst `. \ No newline at end of file diff --git a/Documentation/virt/kvm/devices/arm-vgic-its.rst b/Documentation/virt/kvm/devices/arm-vgic-its.rst index d257eddbae29688651073903b4b6cbdb83347658..e053124f77c48a7b9757c0d27fa9b02717cc8b33 100644 --- a/Documentation/virt/kvm/devices/arm-vgic-its.rst +++ b/Documentation/virt/kvm/devices/arm-vgic-its.rst @@ -52,7 +52,10 @@ KVM_DEV_ARM_VGIC_GRP_CTRL KVM_DEV_ARM_ITS_SAVE_TABLES save the ITS table data into guest RAM, at the location provisioned - by the guest in corresponding registers/table entries. + by the guest in corresponding registers/table entries. Should userspace + require a form of dirty tracking to identify which pages are modified + by the saving process, it should use a bitmap even if using another + mechanism to track the memory dirtied by the vCPUs. The layout of the tables in guest memory defines an ABI. The entries are laid out in little endian format as described in the last paragraph. diff --git a/Documentation/virt/kvm/devices/vcpu.rst b/Documentation/virt/kvm/devices/vcpu.rst index 716aa3edae14fde2625025fe86d418020ef63695..31f14ec4a65b6a592f1cc36dfab05a0b14819fdc 100644 --- a/Documentation/virt/kvm/devices/vcpu.rst +++ b/Documentation/virt/kvm/devices/vcpu.rst @@ -171,6 +171,8 @@ configured values on other VCPUs. Userspace should configure the interrupt numbers on at least one VCPU after creating all VCPUs and before running any VCPUs. +.. _kvm_arm_vcpu_pvtime_ctrl: + 3. GROUP: KVM_ARM_VCPU_PVTIME_CTRL ================================== diff --git a/LICENSES/dual/copyleft-next-0.3.1 b/LICENSES/dual/copyleft-next-0.3.1 new file mode 100644 index 0000000000000000000000000000000000000000..c81acf710657273ddc8a29b9c689650ab3844b24 --- /dev/null +++ b/LICENSES/dual/copyleft-next-0.3.1 @@ -0,0 +1,236 @@ +Valid-License-Identifier: copyleft-next-0.3.1 +SPDX-URL: https://spdx.org/licenses/copyleft-next-0.3.1 +Usage-Guide: + copyleft-next-0.3.1 is explicitly compatible with GPLv2 (or later) and + can therefore be used for kernel code. Though the best and recommended + practice is to express this in the SPDX license identifier by + licensing the code under both licenses expressed by the OR operator. + To use the copyleft-next-0.3.1 license put the following SPDX tag/value + pair into a comment according to the placement guidelines in the + licensing rules documentation: + SPDX-License-Identifier: GPL-2.0-only OR copyleft-next 0.3.1 + SPDX-License-Identifier: GPL-2.0-or-later OR copyleft-next-0.3.1 +License-Text: + +======================================================================= + + copyleft-next 0.3.1 ("this License") + Release date: 2016-04-29 + +1. License Grants; No Trademark License + + Subject to the terms of this License, I grant You: + + a) A non-exclusive, worldwide, perpetual, royalty-free, irrevocable + copyright license, to reproduce, Distribute, prepare derivative works + of, publicly perform and publicly display My Work. + + b) A non-exclusive, worldwide, perpetual, royalty-free, irrevocable + patent license under Licensed Patents to make, have made, use, sell, + offer for sale, and import Covered Works. + + This License does not grant any rights in My name, trademarks, service + marks, or logos. + +2. Distribution: General Conditions + + You may Distribute Covered Works, provided that You (i) inform + recipients how they can obtain a copy of this License; (ii) satisfy the + applicable conditions of sections 3 through 6; and (iii) preserve all + Legal Notices contained in My Work (to the extent they remain + pertinent). "Legal Notices" means copyright notices, license notices, + license texts, and author attributions, but does not include logos, + other graphical images, trademarks or trademark legends. + +3. Conditions for Distributing Derived Works; Outbound GPL Compatibility + + If You Distribute a Derived Work, You must license the entire Derived + Work as a whole under this License, with prominent notice of such + licensing. This condition may not be avoided through such means as + separate Distribution of portions of the Derived Work. + + If the Derived Work includes material licensed under the GPL, You may + instead license the Derived Work under the GPL. + +4. Condition Against Further Restrictions; Inbound License Compatibility + + When Distributing a Covered Work, You may not impose further + restrictions on the exercise of rights in the Covered Work granted under + this License. This condition is not excused merely because such + restrictions result from Your compliance with conditions or obligations + extrinsic to this License (such as a court order or an agreement with a + third party). + + However, You may Distribute a Covered Work incorporating material + governed by a license that is both OSI-Approved and FSF-Free as of the + release date of this License, provided that compliance with such + other license would not conflict with any conditions stated in other + sections of this License. + +5. Conditions for Distributing Object Code + + You may Distribute an Object Code form of a Covered Work, provided that + you accompany the Object Code with a URL through which the Corresponding + Source is made available, at no charge, by some standard or customary + means of providing network access to source code. + + If you Distribute the Object Code in a physical product or tangible + storage medium ("Product"), the Corresponding Source must be available + through such URL for two years from the date of Your most recent + Distribution of the Object Code in the Product. However, if the Product + itself contains or is accompanied by the Corresponding Source (made + available in a customarily accessible manner), You need not also comply + with the first paragraph of this section. + + Each direct and indirect recipient of the Covered Work from You is an + intended third-party beneficiary of this License solely as to this + section 5, with the right to enforce its terms. + +6. Symmetrical Licensing Condition for Upstream Contributions + + If You Distribute a work to Me specifically for inclusion in or + modification of a Covered Work (a "Patch"), and no explicit licensing + terms apply to the Patch, You license the Patch under this License, to + the extent of Your copyright in the Patch. This condition does not + negate the other conditions of this License, if applicable to the Patch. + +7. Nullification of Copyleft/Proprietary Dual Licensing + + If I offer to license, for a fee, a Covered Work under terms other than + a license that is OSI-Approved or FSF-Free as of the release date of this + License or a numbered version of copyleft-next released by the + Copyleft-Next Project, then the license I grant You under section 1 is no + longer subject to the conditions in sections 3 through 5. + +8. Copyleft Sunset + + The conditions in sections 3 through 5 no longer apply once fifteen + years have elapsed from the date of My first Distribution of My Work + under this License. + +9. Pass-Through + + When You Distribute a Covered Work, the recipient automatically receives + a license to My Work from Me, subject to the terms of this License. + +10. Termination + + Your license grants under section 1 are automatically terminated if You + + a) fail to comply with the conditions of this License, unless You cure + such noncompliance within thirty days after becoming aware of it, or + + b) initiate a patent infringement litigation claim (excluding + declaratory judgment actions, counterclaims, and cross-claims) + alleging that any part of My Work directly or indirectly infringes + any patent. + + Termination of Your license grants extends to all copies of Covered + Works You subsequently obtain. Termination does not terminate the + rights of those who have received copies or rights from You subject to + this License. + + To the extent permission to make copies of a Covered Work is necessary + merely for running it, such permission is not terminable. + +11. Later License Versions + + The Copyleft-Next Project may release new versions of copyleft-next, + designated by a distinguishing version number ("Later Versions"). + Unless I explicitly remove the option of Distributing Covered Works + under Later Versions, You may Distribute Covered Works under any Later + Version. + +** 12. No Warranty ** +** ** +** My Work is provided "as-is", without warranty. You bear the risk ** +** of using it. To the extent permitted by applicable law, each ** +** Distributor of My Work excludes the implied warranties of title, ** +** merchantability, fitness for a particular purpose and ** +** non-infringement. ** + +** 13. Limitation of Liability ** +** ** +** To the extent permitted by applicable law, in no event will any ** +** Distributor of My Work be liable to You for any damages ** +** whatsoever, whether direct, indirect, special, incidental, or ** +** consequential damages, whether arising under contract, tort ** +** (including negligence), or otherwise, even where the Distributor ** +** knew or should have known about the possibility of such damages. ** + +14. Severability + + The invalidity or unenforceability of any provision of this License + does not affect the validity or enforceability of the remainder of + this License. Such provision is to be reformed to the minimum extent + necessary to make it valid and enforceable. + +15. Definitions + + "Copyleft-Next Project" means the project that maintains the source + code repository at + as of the release date of this License. + + "Corresponding Source" of a Covered Work in Object Code form means (i) + the Source Code form of the Covered Work; (ii) all scripts, + instructions and similar information that are reasonably necessary for + a skilled developer to generate such Object Code from the Source Code + provided under (i); and (iii) a list clearly identifying all Separate + Works (other than those provided in compliance with (ii)) that were + specifically used in building and (if applicable) installing the + Covered Work (for example, a specified proprietary compiler including + its version number). Corresponding Source must be machine-readable. + + "Covered Work" means My Work or a Derived Work. + + "Derived Work" means a work of authorship that copies from, modifies, + adapts, is based on, is a derivative work of, transforms, translates or + contains all or part of My Work, such that copyright permission is + required. The following are not Derived Works: (i) Mere Aggregation; + (ii) a mere reproduction of My Work; and (iii) if My Work fails to + explicitly state an expectation otherwise, a work that merely makes + reference to My Work. + + "Distribute" means to distribute, transfer or make a copy available to + someone else, such that copyright permission is required. + + "Distributor" means Me and anyone else who Distributes a Covered Work. + + "FSF-Free" means classified as 'free' by the Free Software Foundation. + + "GPL" means a version of the GNU General Public License or the GNU + Affero General Public License. + + "I"/"Me"/"My" refers to the individual or legal entity that places My + Work under this License. "You"/"Your" refers to the individual or legal + entity exercising rights in My Work under this License. A legal entity + includes each entity that controls, is controlled by, or is under + common control with such legal entity. "Control" means (a) the power to + direct the actions of such legal entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent of the + outstanding shares or beneficial ownership of such legal entity. + + "Licensed Patents" means all patent claims licensable royalty-free by + Me, now or in the future, that are necessarily infringed by making, + using, or selling My Work, and excludes claims that would be infringed + only as a consequence of further modification of My Work. + + "Mere Aggregation" means an aggregation of a Covered Work with a + Separate Work. + + "My Work" means the particular work of authorship I license to You + under this License. + + "Object Code" means any form of a work that is not Source Code. + + "OSI-Approved" means approved as 'Open Source' by the Open Source + Initiative. + + "Separate Work" means a work that is separate from and independent of a + particular Covered Work and is not by its nature an extension or + enhancement of the Covered Work, and/or a runtime library, standard + library or similar component that is used to generate an Object Code + form of a Covered Work. + + "Source Code" means the preferred form of a work for making + modifications to it. diff --git a/MAINTAINERS b/MAINTAINERS index 5a4526a171d6a8b8697caac89741f2125b05cdc3..7f0b7181e60a5dcccdb6d2acd3704563742ccea3 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -312,6 +312,13 @@ L: linux-iio@vger.kernel.org S: Maintained F: drivers/counter/104-quad-8.c +ACCES IDIO-16 GPIO LIBRARY +M: William Breathitt Gray +L: linux-gpio@vger.kernel.org +S: Maintained +F: drivers/gpio/gpio-idio-16.c +F: drivers/gpio/gpio-idio-16.h + ACCES PCI-IDIO-16 GPIO DRIVER M: William Breathitt Gray L: linux-gpio@vger.kernel.org @@ -1145,6 +1152,15 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net.git T: git git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next.git F: drivers/net/amt.c +ANALOG DEVICES INC AD4130 DRIVER +M: Cosmin Tanislav +L: linux-iio@vger.kernel.org +S: Supported +W: http://ez.analog.com/community/linux-device-drivers +F: Documentation/ABI/testing/sysfs-bus-iio-adc-ad4130 +F: Documentation/devicetree/bindings/iio/adc/adi,ad4130.yaml +F: drivers/iio/adc/ad4130.c + ANALOG DEVICES INC AD7192 DRIVER M: Alexandru Tachici L: linux-iio@vger.kernel.org @@ -1194,6 +1210,14 @@ W: https://ez.analog.com/linux-software-drivers F: Documentation/devicetree/bindings/iio/adc/adi,ad7780.yaml F: drivers/iio/adc/ad7780.c +ANALOG DEVICES INC AD74115 DRIVER +M: Cosmin Tanislav +L: linux-iio@vger.kernel.org +S: Supported +W: http://ez.analog.com/community/linux-device-drivers +F: Documentation/devicetree/bindings/iio/addac/adi,ad74115.yaml +F: drivers/iio/addac/ad74115.c + ANALOG DEVICES INC AD74413R DRIVER M: Cosmin Tanislav L: linux-iio@vger.kernel.org @@ -1217,6 +1241,14 @@ W: https://ez.analog.com/linux-software-drivers F: Documentation/devicetree/bindings/iio/amplifiers/adi,ada4250.yaml F: drivers/iio/amplifiers/ada4250.c +ANALOG DEVICES INC ADF4377 DRIVER +M: Antoniu Miclaus +L: linux-iio@vger.kernel.org +S: Supported +W: https://ez.analog.com/linux-software-drivers +F: Documentation/devicetree/bindings/iio/frequency/adi,adf4377.yaml +F: drivers/iio/frequency/adf4377.c + ANALOG DEVICES INC ADGS1408 DRIVER M: Mircea Caprioru S: Supported @@ -2298,6 +2330,7 @@ F: Documentation/devicetree/bindings/bus/intel,ixp4xx-expansion-bus-controller.y F: Documentation/devicetree/bindings/gpio/intel,ixp4xx-gpio.txt F: Documentation/devicetree/bindings/interrupt-controller/intel,ixp4xx-interrupt.yaml F: Documentation/devicetree/bindings/timer/intel,ixp4xx-timer.yaml +F: arch/arm/boot/dts/intel-ixp* F: arch/arm/mach-ixp4xx/ F: drivers/bus/intel-ixp4xx-eb.c F: drivers/clocksource/timer-ixp4xx.c @@ -7889,6 +7922,7 @@ M: Chao Yu L: linux-f2fs-devel@lists.sourceforge.net S: Maintained W: https://f2fs.wiki.kernel.org/ +B: https://bugzilla.kernel.org/enter_bug.cgi?product=File%20System&component=f2fs T: git git://git.kernel.org/pub/scm/linux/kernel/git/jaegeuk/f2fs.git F: Documentation/ABI/testing/sysfs-fs-f2fs F: Documentation/filesystems/f2fs.rst @@ -7927,6 +7961,12 @@ F: fs/notify/fanotify/ F: include/linux/fanotify.h F: include/uapi/linux/fanotify.h +FARADAY FOTG210 USB2 DUAL-ROLE CONTROLLER +M: Linus Walleij +L: linux-usb@vger.kernel.org +S: Maintained +F: drivers/usb/fotg210/ + FARSYNC SYNCHRONOUS DRIVER M: Kevin Curtis S: Supported @@ -8105,6 +8145,8 @@ S: Supported T: git git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux.git for-next/hardening F: include/linux/fortify-string.h F: lib/fortify_kunit.c +F: lib/memcpy_kunit.c +F: lib/strscpy_kunit.c F: lib/test_fortify/* F: scripts/test_fortify.sh K: \b__NO_FORTIFY\b @@ -8518,6 +8560,9 @@ FUNCTION HOOKS (FTRACE) M: Steven Rostedt M: Masami Hiramatsu R: Mark Rutland +L: linux-kernel@vger.kernel.org +L: linux-trace-kernel@vger.kernel.org +Q: https://patchwork.kernel.org/project/linux-trace-kernel/list/ S: Maintained T: git git://git.kernel.org/pub/scm/linux/kernel/git/trace/linux-trace.git F: Documentation/trace/ftrace* @@ -9263,6 +9308,7 @@ HISILICON GPIO DRIVER M: Jay Fang L: linux-gpio@vger.kernel.org S: Maintained +F: Documentation/devicetree/bindings/gpio/hisilicon,ascend910-gpio.yaml F: drivers/gpio/gpio-hisi.c HISILICON HIGH PERFORMANCE RSA ENGINE DRIVER (HPRE) @@ -9279,6 +9325,7 @@ M: Yicong Yang L: linux-i2c@vger.kernel.org S: Maintained W: https://www.hisilicon.com +F: Documentation/devicetree/bindings/i2c/hisilicon,ascend910-i2c.yaml F: drivers/i2c/busses/i2c-hisi.c HISILICON LPC BUS DRIVER @@ -9362,7 +9409,7 @@ F: Documentation/devicetree/bindings/infiniband/hisilicon-hns-roce.txt F: drivers/infiniband/hw/hns/ HISILICON SAS Controller -M: John Garry +M: Xiang Chen S: Supported W: http://www.hisilicon.com F: Documentation/devicetree/bindings/scsi/hisilicon-sas.txt @@ -9770,8 +9817,7 @@ F: Documentation/devicetree/bindings/i3c/cdns,i3c-master.yaml F: drivers/i3c/master/i3c-master-cdns.c I3C DRIVER FOR SYNOPSYS DESIGNWARE -M: Vitor Soares -S: Maintained +S: Orphan F: Documentation/devicetree/bindings/i3c/snps,dw-i3c-master.yaml F: drivers/i3c/master/dw* @@ -10415,11 +10461,6 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu.git F: drivers/iommu/intel/ F: include/linux/intel-svm.h -INTEL IOP-ADMA DMA DRIVER -R: Dan Williams -S: Odd fixes -F: drivers/dma/iop-adma.c - INTEL IPU3 CSI-2 CIO2 DRIVER M: Yong Zhi M: Sakari Ailus @@ -10793,6 +10834,18 @@ F: drivers/iommu/dma-iommu.h F: drivers/iommu/iova.c F: include/linux/iova.h +IOMMUFD +M: Jason Gunthorpe +M: Kevin Tian +L: iommu@lists.linux.dev +S: Maintained +T: git git://git.kernel.org/pub/scm/linux/kernel/git/jgg/iommufd.git +F: Documentation/userspace-api/iommufd.rst +F: drivers/iommu/iommufd/ +F: include/linux/iommufd.h +F: include/uapi/linux/iommufd.h +F: tools/testing/selftests/iommu/ + IOMMU SUBSYSTEM M: Joerg Roedel M: Will Deacon @@ -11197,6 +11250,8 @@ M: Kees Cook L: linux-hardening@vger.kernel.org S: Supported T: git git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux.git for-next/hardening +F: Documentation/ABI/testing/sysfs-kernel-oops_count +F: Documentation/ABI/testing/sysfs-kernel-warn_count F: include/linux/overflow.h F: include/linux/randomize_kstack.h F: mm/usercopy.c @@ -11414,6 +11469,16 @@ F: arch/x86/kvm/svm/hyperv.* F: arch/x86/kvm/svm/svm_onhyperv.* F: arch/x86/kvm/vmx/evmcs.* +KVM X86 Xen (KVM/Xen) +M: David Woodhouse +M: Paul Durrant +M: Sean Christopherson +M: Paolo Bonzini +L: kvm@vger.kernel.org +S: Supported +T: git git://git.kernel.org/pub/scm/virt/kvm/kvm.git +F: arch/x86/kvm/xen.* + KERNFS M: Greg Kroah-Hartman M: Tejun Heo @@ -11534,6 +11599,12 @@ F: drivers/mfd/khadas-mcu.c F: include/linux/mfd/khadas-mcu.h F: drivers/thermal/khadas_mcu_fan.c +KIONIX/ROHM KX022A ACCELEROMETER +M: Matti Vaittinen +L: linux-iio@vger.kernel.org +S: Supported +F: drivers/iio/accel/kionix-kx022a* + KMEMLEAK M: Catalin Marinas S: Maintained @@ -11571,6 +11642,9 @@ M: Naveen N. Rao M: Anil S Keshavamurthy M: "David S. Miller" M: Masami Hiramatsu +L: linux-kernel@vger.kernel.org +L: linux-trace-kernel@vger.kernel.org +Q: https://patchwork.kernel.org/project/linux-trace-kernel/list/ S: Maintained T: git git://git.kernel.org/pub/scm/linux/kernel/git/trace/linux-trace.git F: Documentation/trace/kprobes.rst @@ -11654,11 +11728,13 @@ F: scripts/leaking_addresses.pl LED SUBSYSTEM M: Pavel Machek +M: Lee Jones L: linux-leds@vger.kernel.org S: Maintained T: git git://git.kernel.org/pub/scm/linux/kernel/git/pavel/linux-leds.git F: Documentation/devicetree/bindings/leds/ F: drivers/leds/ +F: include/dt-bindings/leds/ F: include/linux/leds.h LEGACY EEPROM DRIVER @@ -12610,6 +12686,12 @@ S: Maintained F: Documentation/devicetree/bindings/regulator/maxim,max20086.yaml F: drivers/regulator/max20086-regulator.c +MAXIM MAX30208 TEMPERATURE SENSOR DRIVER +M: Rajat Khandelwal +L: linux-iio@vger.kernel.org +S: Maintained +F: drivers/iio/temperature/max30208.c + MAXIM MAX77650 PMIC MFD DRIVER M: Bartosz Golaszewski L: linux-kernel@vger.kernel.org @@ -13399,10 +13481,20 @@ F: include/linux/memory_hotplug.h F: include/linux/mm.h F: include/linux/mmzone.h F: include/linux/pagewalk.h -F: include/linux/vmalloc.h F: mm/ F: tools/testing/selftests/vm/ +VMALLOC +M: Andrew Morton +R: Uladzislau Rezki +R: Christoph Hellwig +L: linux-mm@kvack.org +S: Maintained +W: http://www.linux-mm.org +T: git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm +F: include/linux/vmalloc.h +F: mm/vmalloc.c + MEMORY HOT(UN)PLUG M: David Hildenbrand M: Oscar Salvador @@ -13509,7 +13601,6 @@ F: arch/arm64/boot/dts/marvell/armada-3720-uDPU.dts MHI BUS M: Manivannan Sadhasivam -R: Hemant Kumar L: mhi@lists.linux.dev L: linux-arm-msm@vger.kernel.org S: Maintained @@ -13534,7 +13625,6 @@ L: dmaengine@vger.kernel.org S: Supported F: Documentation/devicetree/bindings/dma/atmel-dma.txt F: drivers/dma/at_hdmac.c -F: drivers/dma/at_hdmac_regs.h F: drivers/dma/at_xdmac.c F: include/dt-bindings/dma/at91.h @@ -13744,7 +13834,7 @@ MICROCHIP USB251XB DRIVER M: Richard Leitner L: linux-usb@vger.kernel.org S: Maintained -F: Documentation/devicetree/bindings/usb/usb251xb.txt +F: Documentation/devicetree/bindings/usb/usb251xb.yaml F: drivers/usb/misc/usb251xb.c MICROCHIP USBA UDC DRIVER @@ -13784,6 +13874,15 @@ F: drivers/scsi/smartpqi/smartpqi*.[ch] F: include/linux/cciss*.h F: include/uapi/linux/cciss*.h +MICROSOFT MANA RDMA DRIVER +M: Long Li +M: Ajay Sharma +L: linux-rdma@vger.kernel.org +S: Supported +F: drivers/infiniband/hw/mana/ +F: include/net/mana +F: include/uapi/rdma/mana-abi.h + MICROSOFT SURFACE AGGREGATOR TABLET-MODE SWITCH M: Maximilian Luz L: platform-driver-x86@vger.kernel.org @@ -14677,10 +14776,9 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/dinguyen/linux.git F: arch/nios2/ NITRO ENCLAVES (NE) -M: Andra Paraschiv -M: Alexandru Vasile M: Alexandru Ciobotaru L: linux-kernel@vger.kernel.org +L: The AWS Nitro Enclaves Team S: Supported W: https://aws.amazon.com/ec2/nitro/nitro-enclaves/ F: Documentation/virt/ne_overview.rst @@ -16330,7 +16428,7 @@ F: tools/lib/perf/ F: tools/perf/ PERFORMANCE EVENTS TOOLING ARM64 -R: John Garry +R: John Garry R: Will Deacon R: James Clark R: Mike Leach @@ -17907,6 +18005,13 @@ F: Documentation/ABI/*/sysfs-driver-hid-roccat* F: drivers/hid/hid-roccat* F: include/linux/hid-roccat* +ROCKCHIP CRYPTO DRIVERS +M: Corentin Labbe +L: linux-crypto@vger.kernel.org +S: Maintained +F: Documentation/devicetree/bindings/crypto/rockchip,rk3288-crypto.yaml +F: drivers/crypto/rockchip/ + ROCKCHIP I2S TDM DRIVER M: Nicolas Frattaroli L: linux-rockchip@lists.infradead.org @@ -19020,7 +19125,7 @@ M: Jason A. Donenfeld S: Maintained F: include/linux/siphash.h F: lib/siphash.c -F: lib/test_siphash.c +F: lib/siphash_kunit.c SIS 190 ETHERNET DRIVER M: Francois Romieu @@ -21018,6 +21123,9 @@ F: drivers/hwmon/pmbus/tps546d24.c TRACING M: Steven Rostedt M: Masami Hiramatsu +L: linux-kernel@vger.kernel.org +L: linux-trace-kernel@vger.kernel.org +Q: https://patchwork.kernel.org/project/linux-trace-kernel/list/ S: Maintained T: git git://git.kernel.org/pub/scm/linux/kernel/git/trace/linux-trace.git F: Documentation/trace/* @@ -21721,7 +21829,7 @@ M: Alex Williamson R: Cornelia Huck L: kvm@vger.kernel.org S: Maintained -T: git git://github.com/awilliam/linux-vfio.git +T: git https://github.com/awilliam/linux-vfio.git F: Documentation/ABI/testing/sysfs-devices-vfio-dev F: Documentation/driver-api/vfio.rst F: drivers/vfio/ @@ -22942,8 +23050,7 @@ F: drivers/media/pci/zoran/ ZRAM COMPRESSED RAM BLOCK DEVICE DRVIER M: Minchan Kim -M: Nitin Gupta -R: Sergey Senozhatsky +M: Sergey Senozhatsky L: linux-kernel@vger.kernel.org S: Maintained F: Documentation/admin-guide/blockdev/zram.rst @@ -22956,8 +23063,7 @@ F: drivers/tty/serial/zs.* ZSMALLOC COMPRESSED SLAB MEMORY ALLOCATOR M: Minchan Kim -M: Nitin Gupta -R: Sergey Senozhatsky +M: Sergey Senozhatsky L: linux-mm@kvack.org S: Maintained F: Documentation/mm/zsmalloc.rst @@ -22968,7 +23074,7 @@ ZSTD M: Nick Terrell S: Maintained B: https://github.com/facebook/zstd/issues -T: git git://github.com/terrelln/linux.git +T: git https://github.com/terrelln/linux.git F: include/linux/zstd* F: lib/zstd/ F: lib/decompress_unzstd.c diff --git a/Makefile b/Makefile index bb60045526c43f8c3c4ef36a493cff5b77f6478a..25247f931872255362e017d267f093d3624dd535 100644 --- a/Makefile +++ b/Makefile @@ -11,6 +11,10 @@ NAME = Hurr durr I'ma ninja sloth # Comments in this file are targeted only to the developer, do not # expect to learn how to build the kernel reading this file. +ifeq ($(filter undefine,$(.FEATURES)),) +$(error GNU Make >= 3.82 is required. Your Make version is $(MAKE_VERSION)) +endif + $(if $(filter __%, $(MAKECMDGOALS)), \ $(error targets prefixed with '__' are only for internal use)) @@ -93,10 +97,17 @@ endif # If the user is running make -s (silent mode), suppress echoing of # commands +# make-4.0 (and later) keep single letter options in the 1st word of MAKEFLAGS. -ifneq ($(findstring s,$(filter-out --%,$(MAKEFLAGS))),) - quiet=silent_ - KBUILD_VERBOSE = 0 +ifeq ($(filter 3.%,$(MAKE_VERSION)),) +silence:=$(findstring s,$(firstword -$(MAKEFLAGS))) +else +silence:=$(findstring s,$(filter-out --%,$(MAKEFLAGS))) +endif + +ifeq ($(silence),s) +quiet=silent_ +KBUILD_VERBOSE = 0 endif export quiet Q KBUILD_VERBOSE @@ -369,7 +380,7 @@ else # !mixed-build include $(srctree)/scripts/Kbuild.include # Read KERNELRELEASE from include/config/kernel.release (if it exists) -KERNELRELEASE = $(shell cat include/config/kernel.release 2> /dev/null) +KERNELRELEASE = $(call read-file, include/config/kernel.release) KERNELVERSION = $(VERSION)$(if $(PATCHLEVEL),.$(PATCHLEVEL)$(if $(SUBLEVEL),.$(SUBLEVEL)))$(EXTRAVERSION) export VERSION PATCHLEVEL SUBLEVEL KERNELRELEASE KERNELVERSION @@ -859,7 +870,8 @@ stackp-flags-$(CONFIG_STACKPROTECTOR_STRONG) := -fstack-protector-strong KBUILD_CFLAGS += $(stackp-flags-y) -KBUILD_CFLAGS-$(CONFIG_WERROR) += -Werror +KBUILD_CPPFLAGS-$(CONFIG_WERROR) += -Werror +KBUILD_CPPFLAGS += $(KBUILD_CPPFLAGS-y) KBUILD_CFLAGS-$(CONFIG_CC_NO_ARRAY_BOUNDS) += -Wno-array-bounds KBUILD_RUSTFLAGS-$(CONFIG_WERROR) += -Dwarnings @@ -933,7 +945,9 @@ ifdef CONFIG_FTRACE_MCOUNT_USE_CC endif endif ifdef CONFIG_FTRACE_MCOUNT_USE_OBJTOOL - CC_FLAGS_USING += -DCC_USING_NOP_MCOUNT + ifdef CONFIG_HAVE_OBJTOOL_NOP_MCOUNT + CC_FLAGS_USING += -DCC_USING_NOP_MCOUNT + endif endif ifdef CONFIG_FTRACE_MCOUNT_USE_RECORDMCOUNT ifdef CONFIG_HAVE_C_RECORDMCOUNT @@ -988,7 +1002,7 @@ KBUILD_LDFLAGS += -mllvm -import-instr-limit=5 # Check for frame size exceeding threshold during prolog/epilog insertion # when using lld < 13.0.0. ifneq ($(CONFIG_FRAME_WARN),0) -ifeq ($(shell test $(CONFIG_LLD_VERSION) -lt 130000; echo $$?),0) +ifeq ($(call test-lt, $(CONFIG_LLD_VERSION), 130000),y) KBUILD_LDFLAGS += -plugin-opt=-warn-stack-size=$(CONFIG_FRAME_WARN) endif endif @@ -1006,8 +1020,8 @@ KBUILD_CFLAGS += $(CC_FLAGS_CFI) export CC_FLAGS_CFI endif -ifdef CONFIG_DEBUG_FORCE_FUNCTION_ALIGN_64B -KBUILD_CFLAGS += -falign-functions=64 +ifneq ($(CONFIG_FUNCTION_ALIGNMENT),0) +KBUILD_CFLAGS += -falign-functions=$(CONFIG_FUNCTION_ALIGNMENT) endif # arch Makefile may override CC so keep this after arch Makefile is included @@ -1120,7 +1134,7 @@ endif # We never want expected sections to be placed heuristically by the # linker. All sections should be explicitly named in the linker script. ifdef CONFIG_LD_ORPHAN_WARN -LDFLAGS_vmlinux += --orphan-handling=warn +LDFLAGS_vmlinux += --orphan-handling=$(CONFIG_LD_ORPHAN_WARN_LEVEL) endif # Align the bit size of userspace programs with the kernel @@ -1558,7 +1572,7 @@ __modinst_pre: rm -f $(MODLIB)/build ; \ ln -s $(CURDIR) $(MODLIB)/build ; \ fi - @sed 's:^:kernel/:' modules.order > $(MODLIB)/modules.order + @sed 's:^\(.*\)\.o$$:kernel/\1.ko:' modules.order > $(MODLIB)/modules.order @cp -f modules.builtin $(MODLIB)/ @cp -f $(objtree)/modules.builtin.modinfo $(MODLIB)/ diff --git a/arch/Kconfig b/arch/Kconfig index 2d0e7099eb3ffa211a84f92030f46f13274b2c2d..12e3ddabac9d3be02f6c9f6c5433bd1ac4b65dbe 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -638,7 +638,7 @@ config ARCH_SUPPORTS_SHADOW_CALL_STACK config SHADOW_CALL_STACK bool "Shadow Call Stack" depends on ARCH_SUPPORTS_SHADOW_CALL_STACK - depends on DYNAMIC_FTRACE_WITH_REGS || !FUNCTION_GRAPH_TRACER + depends on DYNAMIC_FTRACE_WITH_ARGS || DYNAMIC_FTRACE_WITH_REGS || !FUNCTION_GRAPH_TRACER help This option enables the compiler's Shadow Call Stack, which uses a shadow stack to protect function return addresses from @@ -1438,4 +1438,28 @@ source "kernel/gcov/Kconfig" source "scripts/gcc-plugins/Kconfig" +config FUNCTION_ALIGNMENT_4B + bool + +config FUNCTION_ALIGNMENT_8B + bool + +config FUNCTION_ALIGNMENT_16B + bool + +config FUNCTION_ALIGNMENT_32B + bool + +config FUNCTION_ALIGNMENT_64B + bool + +config FUNCTION_ALIGNMENT + int + default 64 if FUNCTION_ALIGNMENT_64B + default 32 if FUNCTION_ALIGNMENT_32B + default 16 if FUNCTION_ALIGNMENT_16B + default 8 if FUNCTION_ALIGNMENT_8B + default 4 if FUNCTION_ALIGNMENT_4B + default 0 + endmenu diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h index 3ea9661c09ffc10c969ea4059fd8ff4a27dd71fe..9e45f6735d5d2cacf678fb9c83ba8dbf04de20e5 100644 --- a/arch/alpha/include/asm/pgtable.h +++ b/arch/alpha/include/asm/pgtable.h @@ -313,8 +313,6 @@ extern inline pte_t mk_swap_pte(unsigned long type, unsigned long offset) #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) -#define kern_addr_valid(addr) (1) - #define pte_ERROR(e) \ printk("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e)) #define pmd_ERROR(e) \ diff --git a/arch/arc/include/asm/pgtable-bits-arcv2.h b/arch/arc/include/asm/pgtable-bits-arcv2.h index b23be557403e3dfefbfe77c72ef445fa5b9a1584..515e82db519fe332bfefd08646f6757f5c4b24d4 100644 --- a/arch/arc/include/asm/pgtable-bits-arcv2.h +++ b/arch/arc/include/asm/pgtable-bits-arcv2.h @@ -120,8 +120,6 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) -#define kern_addr_valid(addr) (1) - #ifdef CONFIG_TRANSPARENT_HUGEPAGE #include #endif diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 65d0daf6326218c1b0c8101aca08dadc6f5aa8ca..43c7773b89ae7972d82eb3ab15937bb787d715ed 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -345,12 +345,14 @@ comment "CPU Core family selection" config ARCH_MULTI_V4 bool "ARMv4 based platforms (FA526, StrongARM)" depends on !ARCH_MULTI_V6_V7 + depends on !LD_IS_LLD select ARCH_MULTI_V4_V5 select CPU_FA526 if !(CPU_SA110 || CPU_SA1100) config ARCH_MULTI_V4T bool "ARMv4T based platforms (ARM720T, ARM920T, ...)" depends on !ARCH_MULTI_V6_V7 + depends on !LD_IS_LLD select ARCH_MULTI_V4_V5 select CPU_ARM920T if !(CPU_ARM7TDMI || CPU_ARM720T || \ CPU_ARM740T || CPU_ARM9TDMI || CPU_ARM922T || \ @@ -1158,27 +1160,6 @@ config ARM_PSCI 0022A ("Power State Coordination Interface System Software on ARM processors"). -# The GPIO number here must be sorted by descending number. In case of -# a multiplatform kernel, we just want the highest value required by the -# selected platforms. -config ARCH_NR_GPIO - int - default 2048 if ARCH_INTEL_SOCFPGA - default 1024 if ARCH_BRCMSTB || ARCH_RENESAS || ARCH_TEGRA || \ - ARCH_ZYNQ || ARCH_ASPEED - default 512 if ARCH_EXYNOS || ARCH_KEYSTONE || SOC_OMAP5 || \ - SOC_DRA7XX || ARCH_S3C24XX || ARCH_S3C64XX || ARCH_S5PV210 - default 416 if ARCH_SUNXI - default 392 if ARCH_U8500 - default 352 if ARCH_VT8500 - default 288 if ARCH_ROCKCHIP - default 264 if MACH_H4700 - default 0 - help - Maximum number of GPIOs in the system. - - If unsure, leave the default value. - config HZ_FIXED int default 128 if SOC_AT91RM9200 diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile index 8c63f0a26f56b04c1b27b8b8ef597a59d6836eec..2ef651a78fa2a979cdf3712c01f8481b69789093 100644 --- a/arch/arm/boot/compressed/Makefile +++ b/arch/arm/boot/compressed/Makefile @@ -124,7 +124,7 @@ LDFLAGS_vmlinux += --no-undefined LDFLAGS_vmlinux += -X # Report orphan sections ifdef CONFIG_LD_ORPHAN_WARN -LDFLAGS_vmlinux += --orphan-handling=warn +LDFLAGS_vmlinux += --orphan-handling=$(CONFIG_LD_ORPHAN_WARN_LEVEL) endif # Next argument is a linker script LDFLAGS_vmlinux += -T diff --git a/arch/arm/boot/dts/spear300.dtsi b/arch/arm/boot/dts/spear300.dtsi index b39bd5a226276f089595704b3b4a71c9bdba3c52..f1135e887f7b8c9f885823b5582cf41879e3abac 100644 --- a/arch/arm/boot/dts/spear300.dtsi +++ b/arch/arm/boot/dts/spear300.dtsi @@ -46,7 +46,7 @@ status = "disabled"; }; - shirq: interrupt-controller@0x50000000 { + shirq: interrupt-controller@50000000 { compatible = "st,spear300-shirq"; reg = <0x50000000 0x1000>; interrupts = <28>; diff --git a/arch/arm/boot/dts/spear310.dtsi b/arch/arm/boot/dts/spear310.dtsi index 77570833d46b292fb29204091f861bd0810c275c..ce08d8820940356ae2dbf6ce946a9e275850f68c 100644 --- a/arch/arm/boot/dts/spear310.dtsi +++ b/arch/arm/boot/dts/spear310.dtsi @@ -34,7 +34,7 @@ status = "disabled"; }; - shirq: interrupt-controller@0xb4000000 { + shirq: interrupt-controller@b4000000 { compatible = "st,spear310-shirq"; reg = <0xb4000000 0x1000>; interrupts = <28 29 30 1>; diff --git a/arch/arm/boot/dts/spear320.dtsi b/arch/arm/boot/dts/spear320.dtsi index b12474446a48732e44a3fa9af2d350ac002efede..56f141297ea35f4be2013491a49e6e3ac9ddd04c 100644 --- a/arch/arm/boot/dts/spear320.dtsi +++ b/arch/arm/boot/dts/spear320.dtsi @@ -49,7 +49,7 @@ status = "disabled"; }; - shirq: interrupt-controller@0xb3000000 { + shirq: interrupt-controller@b3000000 { compatible = "st,spear320-shirq"; reg = <0xb3000000 0x1000>; interrupts = <30 28 29 1>; diff --git a/arch/arm/crypto/Kconfig b/arch/arm/crypto/Kconfig index 3858c4d4cb98854d023075626878bc928062a42d..7b2b7d043d9be9e6f5c432badf2adb9938883eaf 100644 --- a/arch/arm/crypto/Kconfig +++ b/arch/arm/crypto/Kconfig @@ -18,7 +18,7 @@ config CRYPTO_GHASH_ARM_CE depends on KERNEL_MODE_NEON select CRYPTO_HASH select CRYPTO_CRYPTD - select CRYPTO_GF128MUL + select CRYPTO_LIB_GF128MUL help GCM GHASH function (NIST SP800-38D) diff --git a/arch/arm/crypto/aes-cipher-glue.c b/arch/arm/crypto/aes-cipher-glue.c index 8cd00f56800e7f97574a7f7756c896f546c7284c..6dfaef2d8f913c539f484f00c7f08fe77232f85d 100644 --- a/arch/arm/crypto/aes-cipher-glue.c +++ b/arch/arm/crypto/aes-cipher-glue.c @@ -7,7 +7,7 @@ */ #include -#include +#include #include asmlinkage void __aes_arm_encrypt(u32 *rk, int rounds, const u8 *in, u8 *out); diff --git a/arch/arm/crypto/nh-neon-core.S b/arch/arm/crypto/nh-neon-core.S index 434d80ab531c2a600fbcffc89c21e6a8ad5ef284..01620a0782ca93f2aa34730b526fe25f1c273e83 100644 --- a/arch/arm/crypto/nh-neon-core.S +++ b/arch/arm/crypto/nh-neon-core.S @@ -69,7 +69,7 @@ /* * void nh_neon(const u32 *key, const u8 *message, size_t message_len, - * u8 hash[NH_HASH_BYTES]) + * __le64 hash[NH_NUM_PASSES]) * * It's guaranteed that message_len % 16 == 0. */ diff --git a/arch/arm/crypto/nhpoly1305-neon-glue.c b/arch/arm/crypto/nhpoly1305-neon-glue.c index ffa8d73fe722c231720e86d3d6aeb91ab283754b..e93e41ff265665a094efd8cc2c791bef5479e7dd 100644 --- a/arch/arm/crypto/nhpoly1305-neon-glue.c +++ b/arch/arm/crypto/nhpoly1305-neon-glue.c @@ -14,14 +14,7 @@ #include asmlinkage void nh_neon(const u32 *key, const u8 *message, size_t message_len, - u8 hash[NH_HASH_BYTES]); - -/* wrapper to avoid indirect call to assembly, which doesn't work with CFI */ -static void _nh_neon(const u32 *key, const u8 *message, size_t message_len, - __le64 hash[NH_NUM_PASSES]) -{ - nh_neon(key, message, message_len, (u8 *)hash); -} + __le64 hash[NH_NUM_PASSES]); static int nhpoly1305_neon_update(struct shash_desc *desc, const u8 *src, unsigned int srclen) @@ -33,7 +26,7 @@ static int nhpoly1305_neon_update(struct shash_desc *desc, unsigned int n = min_t(unsigned int, srclen, SZ_4K); kernel_neon_begin(); - crypto_nhpoly1305_update_helper(desc, src, n, _nh_neon); + crypto_nhpoly1305_update_helper(desc, src, n, nh_neon); kernel_neon_end(); src += n; srclen -= n; diff --git a/arch/arm/include/asm/gpio.h b/arch/arm/include/asm/gpio.h index f3bb8a2bf788eff111986bd8c2ae0a9f4da8aeda..4ebbb58f06ea62954d004f39562ef141e6f5c462 100644 --- a/arch/arm/include/asm/gpio.h +++ b/arch/arm/include/asm/gpio.h @@ -2,7 +2,6 @@ #ifndef _ARCH_ARM_GPIO_H #define _ARCH_ARM_GPIO_H -/* Note: this may rely upon the value of ARCH_NR_GPIOS set in mach/gpio.h */ #include /* The trivial gpiolib dispatchers */ diff --git a/arch/arm/include/asm/pgtable-nommu.h b/arch/arm/include/asm/pgtable-nommu.h index 090011394477f1090abe0c21ab0095b48da73ab9..61480d096054d32d41dd8f56ec31a4ce62a213a2 100644 --- a/arch/arm/include/asm/pgtable-nommu.h +++ b/arch/arm/include/asm/pgtable-nommu.h @@ -21,8 +21,6 @@ #define pgd_none(pgd) (0) #define pgd_bad(pgd) (0) #define pgd_clear(pgdp) -#define kern_addr_valid(addr) (1) -/* FIXME */ /* * PMD_SHIFT determines the size of the area a second-level page table can map * PGDIR_SHIFT determines what a third-level page table entry can map diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h index ef48a55e9af83bd56c5f4ab21f684a1876b0ae44..f049072b2e8586089cad1e9c26390d028b59286e 100644 --- a/arch/arm/include/asm/pgtable.h +++ b/arch/arm/include/asm/pgtable.h @@ -300,10 +300,6 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) */ #define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS) -/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */ -/* FIXME: this is not correct */ -#define kern_addr_valid(addr) (1) - /* * We provide our own arch_get_unmapped_area to cope with VIPT caches. */ diff --git a/arch/arm/mach-omap1/sram-init.c b/arch/arm/mach-omap1/sram-init.c index 27c42e2a21cc1b52ea23864f7113875dbcbb91c5..dabf0c4defebf1c76dd7691cd799be509fdafbf6 100644 --- a/arch/arm/mach-omap1/sram-init.c +++ b/arch/arm/mach-omap1/sram-init.c @@ -10,11 +10,11 @@ #include #include #include +#include #include #include #include -#include #include @@ -74,8 +74,7 @@ void *omap_sram_push(void *funcp, unsigned long size) dst = fncpy(sram, funcp, size); - set_memory_ro(base, pages); - set_memory_x(base, pages); + set_memory_rox(base, pages); return dst; } @@ -126,8 +125,7 @@ static void __init omap_detect_and_map_sram(void) base = (unsigned long)omap_sram_base; pages = PAGE_ALIGN(omap_sram_size) / PAGE_SIZE; - set_memory_ro(base, pages); - set_memory_x(base, pages); + set_memory_rox(base, pages); } static void (*_omap_sram_reprogram_clock)(u32 dpllctl, u32 ckctl); diff --git a/arch/arm/mach-omap2/sram.c b/arch/arm/mach-omap2/sram.c index 39cf270da718a753210b65f4864c1e12164dc8ef..815d390109d21bef4a9af992138c63ff64a1daba 100644 --- a/arch/arm/mach-omap2/sram.c +++ b/arch/arm/mach-omap2/sram.c @@ -14,11 +14,11 @@ #include #include #include +#include #include #include #include -#include #include @@ -96,8 +96,7 @@ void *omap_sram_push(void *funcp, unsigned long size) dst = fncpy(sram, funcp, size); - set_memory_ro(base, pages); - set_memory_x(base, pages); + set_memory_rox(base, pages); return dst; } @@ -217,8 +216,7 @@ static void __init omap2_map_sram(void) base = (unsigned long)omap_sram_base; pages = PAGE_ALIGN(omap_sram_size) / PAGE_SIZE; - set_memory_ro(base, pages); - set_memory_x(base, pages); + set_memory_rox(base, pages); } static void (*_omap2_sram_ddr_init)(u32 *slow_dll_ctrl, u32 fast_dll_ctrl, diff --git a/arch/arm/mach-pxa/pxa27x.c b/arch/arm/mach-pxa/pxa27x.c index afbf6ace954fee1c0bf2faaab889864ee47d01f5..eea507fd5095f47548ea5e69485551d61bed69b2 100644 --- a/arch/arm/mach-pxa/pxa27x.c +++ b/arch/arm/mach-pxa/pxa27x.c @@ -133,8 +133,12 @@ void pxa27x_cpu_pm_enter(suspend_state_t state) #ifndef CONFIG_IWMMXT u64 acc0; +#ifndef CONFIG_AS_IS_LLVM asm volatile(".arch_extension xscale\n\t" "mra %Q0, %R0, acc0" : "=r" (acc0)); +#else + asm volatile("mrrc p0, 0, %Q0, %R0, c0" : "=r" (acc0)); +#endif #endif /* ensure voltage-change sequencer not initiated, which hangs */ @@ -153,8 +157,12 @@ void pxa27x_cpu_pm_enter(suspend_state_t state) case PM_SUSPEND_MEM: cpu_suspend(pwrmode, pxa27x_finish_suspend); #ifndef CONFIG_IWMMXT +#ifndef CONFIG_AS_IS_LLVM asm volatile(".arch_extension xscale\n\t" "mar acc0, %Q0, %R0" : "=r" (acc0)); +#else + asm volatile("mcrr p0, 0, %Q0, %R0, c0" :: "r" (acc0)); +#endif #endif break; } diff --git a/arch/arm/mach-pxa/pxa3xx.c b/arch/arm/mach-pxa/pxa3xx.c index 979642aa7ffe01e29f516ac9c15d3ceb7797af8e..b26f00fc75d54020e62fe038ed58cd35294b8e4c 100644 --- a/arch/arm/mach-pxa/pxa3xx.c +++ b/arch/arm/mach-pxa/pxa3xx.c @@ -108,8 +108,12 @@ static void pxa3xx_cpu_pm_suspend(void) #ifndef CONFIG_IWMMXT u64 acc0; +#ifdef CONFIG_CC_IS_GCC asm volatile(".arch_extension xscale\n\t" "mra %Q0, %R0, acc0" : "=r" (acc0)); +#else + asm volatile("mrrc p0, 0, %Q0, %R0, c0" : "=r" (acc0)); +#endif #endif /* resuming from D2 requires the HSIO2/BOOT/TPM clocks enabled */ @@ -137,8 +141,12 @@ static void pxa3xx_cpu_pm_suspend(void) AD3ER = 0; #ifndef CONFIG_IWMMXT +#ifndef CONFIG_AS_IS_LLVM asm volatile(".arch_extension xscale\n\t" "mar acc0, %Q0, %R0" : "=r" (acc0)); +#else + asm volatile("mcrr p0, 0, %Q0, %R0, c0" :: "r" (acc0)); +#endif #endif } diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 7fc3457a8891c22d05ba23baa6ebf5b4c3bdb5cb..03934808b2ed0717cbc616dbcc49d962b75f0090 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -972,22 +972,6 @@ config ARM64_ERRATUM_2457168 If unsure, say Y. -config ARM64_ERRATUM_2645198 - bool "Cortex-A715: 2645198: Workaround possible [ESR|FAR]_ELx corruption" - default y - help - This option adds the workaround for ARM Cortex-A715 erratum 2645198. - - If a Cortex-A715 cpu sees a page mapping permissions change from executable - to non-executable, it may corrupt the ESR_ELx and FAR_ELx registers on the - next instruction abort caused by permission fault. - - Only user-space does executable to non-executable permission transition via - mprotect() system call. Workaround the problem by doing a break-before-make - TLB invalidation, for all changes to executable user space mappings. - - If unsure, say Y. - config CAVIUM_ERRATUM_22375 bool "Cavium erratum 22375, 24313" default y @@ -1988,6 +1972,7 @@ config ARM64_MTE depends on ARM64_PAN select ARCH_HAS_SUBPAGE_FAULTS select ARCH_USES_HIGH_VMA_FLAGS + select ARCH_USES_PG_ARCH_X help Memory Tagging (part of the ARMv8.5 Extensions) provides architectural support for run-time, always-on detection of @@ -2168,18 +2153,6 @@ config STACKPROTECTOR_PER_TASK def_bool y depends on STACKPROTECTOR && CC_HAVE_STACKPROTECTOR_SYSREG -# The GPIO number here must be sorted by descending number. In case of -# a multiplatform kernel, we just want the highest value required by the -# selected platforms. -config ARCH_NR_GPIO - int - default 2048 if ARCH_APPLE - default 0 - help - Maximum number of GPIOs in the system. - - If unsure, leave the default value. - config UNWIND_PATCH_PAC_INTO_SCS bool "Enable shadow call stack dynamically using code patching" # needs Clang with https://reviews.llvm.org/D111780 incorporated diff --git a/arch/arm64/boot/dts/mediatek/mt8183.dtsi b/arch/arm64/boot/dts/mediatek/mt8183.dtsi index a70b669c49baa3ccc3314320c0c4d2c8841f5c42..402136bfd5350b044d7fb74c6338c3273c14e0f5 100644 --- a/arch/arm64/boot/dts/mediatek/mt8183.dtsi +++ b/arch/arm64/boot/dts/mediatek/mt8183.dtsi @@ -1678,7 +1678,7 @@ ; interrupt-names = "job", "mmu", "gpu"; - clocks = <&topckgen CLK_TOP_MFGPLL_CK>; + clocks = <&mfgcfg CLK_MFG_BG3D>; power-domains = <&spm MT8183_POWER_DOMAIN_MFG_CORE0>, diff --git a/arch/arm64/boot/dts/mediatek/mt8195-demo.dts b/arch/arm64/boot/dts/mediatek/mt8195-demo.dts index 4fbd99eb496a2a19a3c14974af6ec573307ce06d..dec85d2548384ca5ed1ac8e4868d3d20c4f8cc85 100644 --- a/arch/arm64/boot/dts/mediatek/mt8195-demo.dts +++ b/arch/arm64/boot/dts/mediatek/mt8195-demo.dts @@ -56,10 +56,10 @@ #size-cells = <2>; ranges; - /* 192 KiB reserved for ARM Trusted Firmware (BL31) */ + /* 2 MiB reserved for ARM Trusted Firmware (BL31) */ bl31_secmon_reserved: secmon@54600000 { no-map; - reg = <0 0x54600000 0x0 0x30000>; + reg = <0 0x54600000 0x0 0x200000>; }; /* 12 MiB reserved for OP-TEE (BL32) diff --git a/arch/arm64/crypto/Kconfig b/arch/arm64/crypto/Kconfig index 8bd80508a710d1120afc320701111b8397f34f86..6d06b448a66e0ca6d62c728e04754ac2cbb65f6e 100644 --- a/arch/arm64/crypto/Kconfig +++ b/arch/arm64/crypto/Kconfig @@ -6,8 +6,8 @@ config CRYPTO_GHASH_ARM64_CE tristate "Hash functions: GHASH (ARMv8 Crypto Extensions)" depends on KERNEL_MODE_NEON select CRYPTO_HASH - select CRYPTO_GF128MUL select CRYPTO_LIB_AES + select CRYPTO_LIB_GF128MUL select CRYPTO_AEAD help GCM GHASH function (NIST SP800-38D) @@ -96,6 +96,17 @@ config CRYPTO_SHA3_ARM64 Architecture: arm64 using: - ARMv8.2 Crypto Extensions +config CRYPTO_SM3_NEON + tristate "Hash functions: SM3 (NEON)" + depends on KERNEL_MODE_NEON + select CRYPTO_HASH + select CRYPTO_SM3 + help + SM3 (ShangMi 3) secure hash function (OSCCA GM/T 0004-2012) + + Architecture: arm64 using: + - NEON (Advanced SIMD) extensions + config CRYPTO_SM3_ARM64_CE tristate "Hash functions: SM3 (ARMv8.2 Crypto Extensions)" depends on KERNEL_MODE_NEON @@ -220,7 +231,7 @@ config CRYPTO_SM4_ARM64_CE - NEON (Advanced SIMD) extensions config CRYPTO_SM4_ARM64_CE_BLK - tristate "Ciphers: SM4, modes: ECB/CBC/CFB/CTR (ARMv8 Crypto Extensions)" + tristate "Ciphers: SM4, modes: ECB/CBC/CFB/CTR/XTS (ARMv8 Crypto Extensions)" depends on KERNEL_MODE_NEON select CRYPTO_SKCIPHER select CRYPTO_SM4 @@ -231,6 +242,8 @@ config CRYPTO_SM4_ARM64_CE_BLK - CBC (Cipher Block Chaining) mode (NIST SP800-38A) - CFB (Cipher Feedback) mode (NIST SP800-38A) - CTR (Counter) mode (NIST SP800-38A) + - XTS (XOR Encrypt XOR with ciphertext stealing) mode (NIST SP800-38E + and IEEE 1619) Architecture: arm64 using: - ARMv8 Crypto Extensions @@ -268,6 +281,38 @@ config CRYPTO_AES_ARM64_CE_CCM - ARMv8 Crypto Extensions - NEON (Advanced SIMD) extensions +config CRYPTO_SM4_ARM64_CE_CCM + tristate "AEAD cipher: SM4 in CCM mode (ARMv8 Crypto Extensions)" + depends on KERNEL_MODE_NEON + select CRYPTO_ALGAPI + select CRYPTO_AEAD + select CRYPTO_SM4 + select CRYPTO_SM4_ARM64_CE_BLK + help + AEAD cipher: SM4 cipher algorithms (OSCCA GB/T 32907-2016) with + CCM (Counter with Cipher Block Chaining-Message Authentication Code) + authenticated encryption mode (NIST SP800-38C) + + Architecture: arm64 using: + - ARMv8 Crypto Extensions + - NEON (Advanced SIMD) extensions + +config CRYPTO_SM4_ARM64_CE_GCM + tristate "AEAD cipher: SM4 in GCM mode (ARMv8 Crypto Extensions)" + depends on KERNEL_MODE_NEON + select CRYPTO_ALGAPI + select CRYPTO_AEAD + select CRYPTO_SM4 + select CRYPTO_SM4_ARM64_CE_BLK + help + AEAD cipher: SM4 cipher algorithms (OSCCA GB/T 32907-2016) with + GCM (Galois/Counter Mode) authenticated encryption mode (NIST SP800-38D) + + Architecture: arm64 using: + - ARMv8 Crypto Extensions + - PMULL (Polynomial Multiply Long) instructions + - NEON (Advanced SIMD) extensions + config CRYPTO_CRCT10DIF_ARM64_CE tristate "CRCT10DIF (PMULL)" depends on KERNEL_MODE_NEON && CRC_T10DIF diff --git a/arch/arm64/crypto/Makefile b/arch/arm64/crypto/Makefile index 24bb0c4610de24f52e06f2230a8d87da34eeee7e..4818e204c2aca2d28e65e38b290b5d9879991436 100644 --- a/arch/arm64/crypto/Makefile +++ b/arch/arm64/crypto/Makefile @@ -17,6 +17,9 @@ sha512-ce-y := sha512-ce-glue.o sha512-ce-core.o obj-$(CONFIG_CRYPTO_SHA3_ARM64) += sha3-ce.o sha3-ce-y := sha3-ce-glue.o sha3-ce-core.o +obj-$(CONFIG_CRYPTO_SM3_NEON) += sm3-neon.o +sm3-neon-y := sm3-neon-glue.o sm3-neon-core.o + obj-$(CONFIG_CRYPTO_SM3_ARM64_CE) += sm3-ce.o sm3-ce-y := sm3-ce-glue.o sm3-ce-core.o @@ -26,6 +29,12 @@ sm4-ce-cipher-y := sm4-ce-cipher-glue.o sm4-ce-cipher-core.o obj-$(CONFIG_CRYPTO_SM4_ARM64_CE_BLK) += sm4-ce.o sm4-ce-y := sm4-ce-glue.o sm4-ce-core.o +obj-$(CONFIG_CRYPTO_SM4_ARM64_CE_CCM) += sm4-ce-ccm.o +sm4-ce-ccm-y := sm4-ce-ccm-glue.o sm4-ce-ccm-core.o + +obj-$(CONFIG_CRYPTO_SM4_ARM64_CE_GCM) += sm4-ce-gcm.o +sm4-ce-gcm-y := sm4-ce-gcm-glue.o sm4-ce-gcm-core.o + obj-$(CONFIG_CRYPTO_SM4_ARM64_NEON_BLK) += sm4-neon.o sm4-neon-y := sm4-neon-glue.o sm4-neon-core.o diff --git a/arch/arm64/crypto/aes-ce-glue.c b/arch/arm64/crypto/aes-ce-glue.c index 56a5f6f0b0c123ac8668b0125b4b04bb9af052ed..e921823ca103a4e4081282f88a68fb9dfd09b72d 100644 --- a/arch/arm64/crypto/aes-ce-glue.c +++ b/arch/arm64/crypto/aes-ce-glue.c @@ -9,9 +9,9 @@ #include #include #include +#include #include #include -#include #include #include "aes-ce-setkey.h" diff --git a/arch/arm64/crypto/aes-cipher-glue.c b/arch/arm64/crypto/aes-cipher-glue.c index 8caf6dfefce88ec91cd0b3f5e510a30ebcde7849..4ec55e568941c0306cbb1809334c4e10827968bf 100644 --- a/arch/arm64/crypto/aes-cipher-glue.c +++ b/arch/arm64/crypto/aes-cipher-glue.c @@ -6,7 +6,7 @@ */ #include -#include +#include #include asmlinkage void __aes_arm64_encrypt(u32 *rk, u8 *out, const u8 *in, int rounds); diff --git a/arch/arm64/crypto/aes-modes.S b/arch/arm64/crypto/aes-modes.S index 5abc834271f4a61097e7a71d365d2fef8d51b12b..0e834a2c062cf2659ba974f0546ba416fad556d1 100644 --- a/arch/arm64/crypto/aes-modes.S +++ b/arch/arm64/crypto/aes-modes.S @@ -52,8 +52,7 @@ SYM_FUNC_END(aes_decrypt_block5x) */ AES_FUNC_START(aes_ecb_encrypt) - stp x29, x30, [sp, #-16]! - mov x29, sp + frame_push 0 enc_prepare w3, x2, x5 @@ -77,14 +76,13 @@ ST5( st1 {v4.16b}, [x0], #16 ) subs w4, w4, #1 bne .Lecbencloop .Lecbencout: - ldp x29, x30, [sp], #16 + frame_pop ret AES_FUNC_END(aes_ecb_encrypt) AES_FUNC_START(aes_ecb_decrypt) - stp x29, x30, [sp, #-16]! - mov x29, sp + frame_push 0 dec_prepare w3, x2, x5 @@ -108,7 +106,7 @@ ST5( st1 {v4.16b}, [x0], #16 ) subs w4, w4, #1 bne .Lecbdecloop .Lecbdecout: - ldp x29, x30, [sp], #16 + frame_pop ret AES_FUNC_END(aes_ecb_decrypt) @@ -171,9 +169,6 @@ AES_FUNC_END(aes_cbc_encrypt) AES_FUNC_END(aes_essiv_cbc_encrypt) AES_FUNC_START(aes_essiv_cbc_decrypt) - stp x29, x30, [sp, #-16]! - mov x29, sp - ld1 {cbciv.16b}, [x5] /* get iv */ mov w8, #14 /* AES-256: 14 rounds */ @@ -182,11 +177,9 @@ AES_FUNC_START(aes_essiv_cbc_decrypt) b .Lessivcbcdecstart AES_FUNC_START(aes_cbc_decrypt) - stp x29, x30, [sp, #-16]! - mov x29, sp - ld1 {cbciv.16b}, [x5] /* get iv */ .Lessivcbcdecstart: + frame_push 0 dec_prepare w3, x2, x6 .LcbcdecloopNx: @@ -236,7 +229,7 @@ ST5( st1 {v4.16b}, [x0], #16 ) bne .Lcbcdecloop .Lcbcdecout: st1 {cbciv.16b}, [x5] /* return iv */ - ldp x29, x30, [sp], #16 + frame_pop ret AES_FUNC_END(aes_cbc_decrypt) AES_FUNC_END(aes_essiv_cbc_decrypt) @@ -337,8 +330,7 @@ AES_FUNC_END(aes_cbc_cts_decrypt) BLOCKS .req x13 BLOCKS_W .req w13 - stp x29, x30, [sp, #-16]! - mov x29, sp + frame_push 0 enc_prepare ROUNDS_W, KEY, IV_PART ld1 {vctr.16b}, [IV] @@ -481,7 +473,7 @@ ST5( st1 {v4.16b}, [OUT], #16 ) .if !\xctr st1 {vctr.16b}, [IV] /* return next CTR value */ .endif - ldp x29, x30, [sp], #16 + frame_pop ret .Lctrtail\xctr: @@ -645,8 +637,7 @@ AES_FUNC_END(aes_xctr_encrypt) .endm AES_FUNC_START(aes_xts_encrypt) - stp x29, x30, [sp, #-16]! - mov x29, sp + frame_push 0 ld1 {v4.16b}, [x6] xts_load_mask v8 @@ -704,7 +695,7 @@ AES_FUNC_START(aes_xts_encrypt) st1 {v0.16b}, [x0] .Lxtsencret: st1 {v4.16b}, [x6] - ldp x29, x30, [sp], #16 + frame_pop ret .LxtsencctsNx: @@ -732,8 +723,7 @@ AES_FUNC_START(aes_xts_encrypt) AES_FUNC_END(aes_xts_encrypt) AES_FUNC_START(aes_xts_decrypt) - stp x29, x30, [sp, #-16]! - mov x29, sp + frame_push 0 /* subtract 16 bytes if we are doing CTS */ sub w8, w4, #0x10 @@ -794,7 +784,7 @@ AES_FUNC_START(aes_xts_decrypt) b .Lxtsdecloop .Lxtsdecout: st1 {v4.16b}, [x6] - ldp x29, x30, [sp], #16 + frame_pop ret .Lxtsdeccts: diff --git a/arch/arm64/crypto/aes-neonbs-core.S b/arch/arm64/crypto/aes-neonbs-core.S index d427f4556b6eb2d8ddd3afb7dcf577ff3e6882ff..7278a37c2d5cd0005f935810703bd94bcaa7ee32 100644 --- a/arch/arm64/crypto/aes-neonbs-core.S +++ b/arch/arm64/crypto/aes-neonbs-core.S @@ -760,7 +760,7 @@ SYM_FUNC_START_LOCAL(__xts_crypt8) eor v6.16b, v6.16b, v31.16b eor v7.16b, v7.16b, v16.16b - stp q16, q17, [sp, #16] + stp q16, q17, [x6] mov bskey, x2 mov rounds, x3 @@ -768,8 +768,8 @@ SYM_FUNC_START_LOCAL(__xts_crypt8) SYM_FUNC_END(__xts_crypt8) .macro __xts_crypt, do8, o0, o1, o2, o3, o4, o5, o6, o7 - stp x29, x30, [sp, #-48]! - mov x29, sp + frame_push 0, 32 + add x6, sp, #.Lframe_local_offset ld1 {v25.16b}, [x5] @@ -781,7 +781,7 @@ SYM_FUNC_END(__xts_crypt8) eor v18.16b, \o2\().16b, v27.16b eor v19.16b, \o3\().16b, v28.16b - ldp q24, q25, [sp, #16] + ldp q24, q25, [x6] eor v20.16b, \o4\().16b, v29.16b eor v21.16b, \o5\().16b, v30.16b @@ -795,7 +795,7 @@ SYM_FUNC_END(__xts_crypt8) b.gt 0b st1 {v25.16b}, [x5] - ldp x29, x30, [sp], #48 + frame_pop ret .endm @@ -820,9 +820,7 @@ SYM_FUNC_END(aesbs_xts_decrypt) * int rounds, int blocks, u8 iv[]) */ SYM_FUNC_START(aesbs_ctr_encrypt) - stp x29, x30, [sp, #-16]! - mov x29, sp - + frame_push 0 ldp x7, x8, [x5] ld1 {v0.16b}, [x5] CPU_LE( rev x7, x7 ) @@ -862,6 +860,6 @@ CPU_LE( rev x8, x8 ) b.gt 0b st1 {v0.16b}, [x5] - ldp x29, x30, [sp], #16 + frame_pop ret SYM_FUNC_END(aesbs_ctr_encrypt) diff --git a/arch/arm64/crypto/crct10dif-ce-core.S b/arch/arm64/crypto/crct10dif-ce-core.S index dce6dcebfca189ee4f99d8c7ee03c00cc879f878..5604de61d06d04eeac8fb616859098b53a8cedd8 100644 --- a/arch/arm64/crypto/crct10dif-ce-core.S +++ b/arch/arm64/crypto/crct10dif-ce-core.S @@ -429,7 +429,7 @@ CPU_LE( ext v0.16b, v0.16b, v0.16b, #8 ) umov w0, v0.h[0] .ifc \p, p8 - ldp x29, x30, [sp], #16 + frame_pop .endif ret @@ -466,8 +466,7 @@ CPU_LE( ext v7.16b, v7.16b, v7.16b, #8 ) // Assumes len >= 16. // SYM_FUNC_START(crc_t10dif_pmull_p8) - stp x29, x30, [sp, #-16]! - mov x29, sp + frame_push 1 crc_t10dif_pmull p8 SYM_FUNC_END(crc_t10dif_pmull_p8) diff --git a/arch/arm64/crypto/ghash-ce-core.S b/arch/arm64/crypto/ghash-ce-core.S index ebe5558929b7bba68fa2802906fbf88a759de077..23ee9a5eaf27c23c5b30ead46d7761e1909cd4a5 100644 --- a/arch/arm64/crypto/ghash-ce-core.S +++ b/arch/arm64/crypto/ghash-ce-core.S @@ -436,9 +436,7 @@ SYM_FUNC_END(pmull_ghash_update_p8) .align 6 .macro pmull_gcm_do_crypt, enc - stp x29, x30, [sp, #-32]! - mov x29, sp - str x19, [sp, #24] + frame_push 1 load_round_keys x7, x6, x8 @@ -529,7 +527,7 @@ CPU_LE( rev w8, w8 ) .endif bne 0b -3: ldp x19, x10, [sp, #24] +3: ldr x10, [sp, #.Lframe_local_offset] cbz x10, 5f // output tag? ld1 {INP3.16b}, [x10] // load lengths[] @@ -562,7 +560,7 @@ CPU_LE( rev w8, w8 ) smov w0, v0.b[0] // return b0 .endif -4: ldp x29, x30, [sp], #32 +4: frame_pop ret 5: diff --git a/arch/arm64/crypto/ghash-ce-glue.c b/arch/arm64/crypto/ghash-ce-glue.c index 15794fe21a0b2eca5360bd6f793b6b4b7fb36181..e5e9adc1fcf4e0279e1a9e368d1c53e506aeccd8 100644 --- a/arch/arm64/crypto/ghash-ce-glue.c +++ b/arch/arm64/crypto/ghash-ce-glue.c @@ -508,7 +508,7 @@ static void __exit ghash_ce_mod_exit(void) crypto_unregister_shash(&ghash_alg); } -static const struct cpu_feature ghash_cpu_feature[] = { +static const struct cpu_feature __maybe_unused ghash_cpu_feature[] = { { cpu_feature(PMULL) }, { } }; MODULE_DEVICE_TABLE(cpu, ghash_cpu_feature); diff --git a/arch/arm64/crypto/nh-neon-core.S b/arch/arm64/crypto/nh-neon-core.S index 51c0a534ef87ccf1083f48051b1e617913479c46..13eda08fda1e568d3f657f5128a58188a9f08b1a 100644 --- a/arch/arm64/crypto/nh-neon-core.S +++ b/arch/arm64/crypto/nh-neon-core.S @@ -8,6 +8,7 @@ */ #include +#include KEY .req x0 MESSAGE .req x1 @@ -58,11 +59,11 @@ /* * void nh_neon(const u32 *key, const u8 *message, size_t message_len, - * u8 hash[NH_HASH_BYTES]) + * __le64 hash[NH_NUM_PASSES]) * * It's guaranteed that message_len % 16 == 0. */ -SYM_FUNC_START(nh_neon) +SYM_TYPED_FUNC_START(nh_neon) ld1 {K0.4s,K1.4s}, [KEY], #32 movi PASS0_SUMS.2d, #0 diff --git a/arch/arm64/crypto/nhpoly1305-neon-glue.c b/arch/arm64/crypto/nhpoly1305-neon-glue.c index c5405e6a6db76f7cbbbccda47cc3a116c1c11e45..cd882c35d9252d8608d69e4fe0d9d6978681fb37 100644 --- a/arch/arm64/crypto/nhpoly1305-neon-glue.c +++ b/arch/arm64/crypto/nhpoly1305-neon-glue.c @@ -14,14 +14,7 @@ #include asmlinkage void nh_neon(const u32 *key, const u8 *message, size_t message_len, - u8 hash[NH_HASH_BYTES]); - -/* wrapper to avoid indirect call to assembly, which doesn't work with CFI */ -static void _nh_neon(const u32 *key, const u8 *message, size_t message_len, - __le64 hash[NH_NUM_PASSES]) -{ - nh_neon(key, message, message_len, (u8 *)hash); -} + __le64 hash[NH_NUM_PASSES]); static int nhpoly1305_neon_update(struct shash_desc *desc, const u8 *src, unsigned int srclen) @@ -33,7 +26,7 @@ static int nhpoly1305_neon_update(struct shash_desc *desc, unsigned int n = min_t(unsigned int, srclen, SZ_4K); kernel_neon_begin(); - crypto_nhpoly1305_update_helper(desc, src, n, _nh_neon); + crypto_nhpoly1305_update_helper(desc, src, n, nh_neon); kernel_neon_end(); src += n; srclen -= n; diff --git a/arch/arm64/crypto/sm3-ce-glue.c b/arch/arm64/crypto/sm3-ce-glue.c index ee98954ae8ca682651a00b6de2241bd61210813a..54bf6ebcfffb1567562af61f188c9bf6ccade166 100644 --- a/arch/arm64/crypto/sm3-ce-glue.c +++ b/arch/arm64/crypto/sm3-ce-glue.c @@ -84,7 +84,7 @@ static struct shash_alg sm3_alg = { .base.cra_driver_name = "sm3-ce", .base.cra_blocksize = SM3_BLOCK_SIZE, .base.cra_module = THIS_MODULE, - .base.cra_priority = 200, + .base.cra_priority = 400, }; static int __init sm3_ce_mod_init(void) diff --git a/arch/arm64/crypto/sm3-neon-core.S b/arch/arm64/crypto/sm3-neon-core.S new file mode 100644 index 0000000000000000000000000000000000000000..4357e0e51be3881bd44e7d5837294c222f0a787b --- /dev/null +++ b/arch/arm64/crypto/sm3-neon-core.S @@ -0,0 +1,601 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * sm3-neon-core.S - SM3 secure hash using NEON instructions + * + * Linux/arm64 port of the libgcrypt SM3 implementation for AArch64 + * + * Copyright (C) 2021 Jussi Kivilinna + * Copyright (c) 2022 Tianjia Zhang + */ + +#include +#include +#include + +/* Context structure */ + +#define state_h0 0 +#define state_h1 4 +#define state_h2 8 +#define state_h3 12 +#define state_h4 16 +#define state_h5 20 +#define state_h6 24 +#define state_h7 28 + +/* Stack structure */ + +#define STACK_W_SIZE (32 * 2 * 3) + +#define STACK_W (0) +#define STACK_SIZE (STACK_W + STACK_W_SIZE) + +/* Register macros */ + +#define RSTATE x0 +#define RDATA x1 +#define RNBLKS x2 +#define RKPTR x28 +#define RFRAME x29 + +#define ra w3 +#define rb w4 +#define rc w5 +#define rd w6 +#define re w7 +#define rf w8 +#define rg w9 +#define rh w10 + +#define t0 w11 +#define t1 w12 +#define t2 w13 +#define t3 w14 +#define t4 w15 +#define t5 w16 +#define t6 w17 + +#define k_even w19 +#define k_odd w20 + +#define addr0 x21 +#define addr1 x22 + +#define s0 w23 +#define s1 w24 +#define s2 w25 +#define s3 w26 + +#define W0 v0 +#define W1 v1 +#define W2 v2 +#define W3 v3 +#define W4 v4 +#define W5 v5 + +#define XTMP0 v6 +#define XTMP1 v7 +#define XTMP2 v16 +#define XTMP3 v17 +#define XTMP4 v18 +#define XTMP5 v19 +#define XTMP6 v20 + +/* Helper macros. */ + +#define _(...) /*_*/ + +#define clear_vec(x) \ + movi x.8h, #0; + +#define rolw(o, a, n) \ + ror o, a, #(32 - n); + +/* Round function macros. */ + +#define GG1_1(x, y, z, o, t) \ + eor o, x, y; +#define GG1_2(x, y, z, o, t) \ + eor o, o, z; +#define GG1_3(x, y, z, o, t) + +#define FF1_1(x, y, z, o, t) GG1_1(x, y, z, o, t) +#define FF1_2(x, y, z, o, t) +#define FF1_3(x, y, z, o, t) GG1_2(x, y, z, o, t) + +#define GG2_1(x, y, z, o, t) \ + bic o, z, x; +#define GG2_2(x, y, z, o, t) \ + and t, y, x; +#define GG2_3(x, y, z, o, t) \ + eor o, o, t; + +#define FF2_1(x, y, z, o, t) \ + eor o, x, y; +#define FF2_2(x, y, z, o, t) \ + and t, x, y; \ + and o, o, z; +#define FF2_3(x, y, z, o, t) \ + eor o, o, t; + +#define R(i, a, b, c, d, e, f, g, h, k, K_LOAD, round, widx, wtype, IOP, iop_param) \ + K_LOAD(round); \ + ldr t5, [sp, #(wtype##_W1_ADDR(round, widx))]; \ + rolw(t0, a, 12); /* rol(a, 12) => t0 */ \ + IOP(1, iop_param); \ + FF##i##_1(a, b, c, t1, t2); \ + ldr t6, [sp, #(wtype##_W1W2_ADDR(round, widx))]; \ + add k, k, e; \ + IOP(2, iop_param); \ + GG##i##_1(e, f, g, t3, t4); \ + FF##i##_2(a, b, c, t1, t2); \ + IOP(3, iop_param); \ + add k, k, t0; \ + add h, h, t5; \ + add d, d, t6; /* w1w2 + d => d */ \ + IOP(4, iop_param); \ + rolw(k, k, 7); /* rol (t0 + e + t), 7) => k */ \ + GG##i##_2(e, f, g, t3, t4); \ + add h, h, k; /* h + w1 + k => h */ \ + IOP(5, iop_param); \ + FF##i##_3(a, b, c, t1, t2); \ + eor t0, t0, k; /* k ^ t0 => t0 */ \ + GG##i##_3(e, f, g, t3, t4); \ + add d, d, t1; /* FF(a,b,c) + d => d */ \ + IOP(6, iop_param); \ + add t3, t3, h; /* GG(e,f,g) + h => t3 */ \ + rolw(b, b, 9); /* rol(b, 9) => b */ \ + eor h, t3, t3, ror #(32-9); \ + IOP(7, iop_param); \ + add d, d, t0; /* t0 + d => d */ \ + rolw(f, f, 19); /* rol(f, 19) => f */ \ + IOP(8, iop_param); \ + eor h, h, t3, ror #(32-17); /* P0(t3) => h */ + +#define R1(a, b, c, d, e, f, g, h, k, K_LOAD, round, widx, wtype, IOP, iop_param) \ + R(1, ##a, ##b, ##c, ##d, ##e, ##f, ##g, ##h, ##k, K_LOAD, round, widx, wtype, IOP, iop_param) + +#define R2(a, b, c, d, e, f, g, h, k, K_LOAD, round, widx, wtype, IOP, iop_param) \ + R(2, ##a, ##b, ##c, ##d, ##e, ##f, ##g, ##h, ##k, K_LOAD, round, widx, wtype, IOP, iop_param) + +#define KL(round) \ + ldp k_even, k_odd, [RKPTR, #(4*(round))]; + +/* Input expansion macros. */ + +/* Byte-swapped input address. */ +#define IW_W_ADDR(round, widx, offs) \ + (STACK_W + ((round) / 4) * 64 + (offs) + ((widx) * 4)) + +/* Expanded input address. */ +#define XW_W_ADDR(round, widx, offs) \ + (STACK_W + ((((round) / 3) - 4) % 2) * 64 + (offs) + ((widx) * 4)) + +/* Rounds 1-12, byte-swapped input block addresses. */ +#define IW_W1_ADDR(round, widx) IW_W_ADDR(round, widx, 32) +#define IW_W1W2_ADDR(round, widx) IW_W_ADDR(round, widx, 48) + +/* Rounds 1-12, expanded input block addresses. */ +#define XW_W1_ADDR(round, widx) XW_W_ADDR(round, widx, 0) +#define XW_W1W2_ADDR(round, widx) XW_W_ADDR(round, widx, 16) + +/* Input block loading. + * Interleaving within round function needed for in-order CPUs. */ +#define LOAD_W_VEC_1_1() \ + add addr0, sp, #IW_W1_ADDR(0, 0); +#define LOAD_W_VEC_1_2() \ + add addr1, sp, #IW_W1_ADDR(4, 0); +#define LOAD_W_VEC_1_3() \ + ld1 {W0.16b}, [RDATA], #16; +#define LOAD_W_VEC_1_4() \ + ld1 {W1.16b}, [RDATA], #16; +#define LOAD_W_VEC_1_5() \ + ld1 {W2.16b}, [RDATA], #16; +#define LOAD_W_VEC_1_6() \ + ld1 {W3.16b}, [RDATA], #16; +#define LOAD_W_VEC_1_7() \ + rev32 XTMP0.16b, W0.16b; +#define LOAD_W_VEC_1_8() \ + rev32 XTMP1.16b, W1.16b; +#define LOAD_W_VEC_2_1() \ + rev32 XTMP2.16b, W2.16b; +#define LOAD_W_VEC_2_2() \ + rev32 XTMP3.16b, W3.16b; +#define LOAD_W_VEC_2_3() \ + eor XTMP4.16b, XTMP1.16b, XTMP0.16b; +#define LOAD_W_VEC_2_4() \ + eor XTMP5.16b, XTMP2.16b, XTMP1.16b; +#define LOAD_W_VEC_2_5() \ + st1 {XTMP0.16b}, [addr0], #16; +#define LOAD_W_VEC_2_6() \ + st1 {XTMP4.16b}, [addr0]; \ + add addr0, sp, #IW_W1_ADDR(8, 0); +#define LOAD_W_VEC_2_7() \ + eor XTMP6.16b, XTMP3.16b, XTMP2.16b; +#define LOAD_W_VEC_2_8() \ + ext W0.16b, XTMP0.16b, XTMP0.16b, #8; /* W0: xx, w0, xx, xx */ +#define LOAD_W_VEC_3_1() \ + mov W2.16b, XTMP1.16b; /* W2: xx, w6, w5, w4 */ +#define LOAD_W_VEC_3_2() \ + st1 {XTMP1.16b}, [addr1], #16; +#define LOAD_W_VEC_3_3() \ + st1 {XTMP5.16b}, [addr1]; \ + ext W1.16b, XTMP0.16b, XTMP0.16b, #4; /* W1: xx, w3, w2, w1 */ +#define LOAD_W_VEC_3_4() \ + ext W3.16b, XTMP1.16b, XTMP2.16b, #12; /* W3: xx, w9, w8, w7 */ +#define LOAD_W_VEC_3_5() \ + ext W4.16b, XTMP2.16b, XTMP3.16b, #8; /* W4: xx, w12, w11, w10 */ +#define LOAD_W_VEC_3_6() \ + st1 {XTMP2.16b}, [addr0], #16; +#define LOAD_W_VEC_3_7() \ + st1 {XTMP6.16b}, [addr0]; +#define LOAD_W_VEC_3_8() \ + ext W5.16b, XTMP3.16b, XTMP3.16b, #4; /* W5: xx, w15, w14, w13 */ + +#define LOAD_W_VEC_1(iop_num, ...) \ + LOAD_W_VEC_1_##iop_num() +#define LOAD_W_VEC_2(iop_num, ...) \ + LOAD_W_VEC_2_##iop_num() +#define LOAD_W_VEC_3(iop_num, ...) \ + LOAD_W_VEC_3_##iop_num() + +/* Message scheduling. Note: 3 words per vector register. + * Interleaving within round function needed for in-order CPUs. */ +#define SCHED_W_1_1(round, w0, w1, w2, w3, w4, w5) \ + /* Load (w[i - 16]) => XTMP0 */ \ + /* Load (w[i - 13]) => XTMP5 */ \ + ext XTMP0.16b, w0.16b, w0.16b, #12; /* XTMP0: w0, xx, xx, xx */ +#define SCHED_W_1_2(round, w0, w1, w2, w3, w4, w5) \ + ext XTMP5.16b, w1.16b, w1.16b, #12; +#define SCHED_W_1_3(round, w0, w1, w2, w3, w4, w5) \ + ext XTMP0.16b, XTMP0.16b, w1.16b, #12; /* XTMP0: xx, w2, w1, w0 */ +#define SCHED_W_1_4(round, w0, w1, w2, w3, w4, w5) \ + ext XTMP5.16b, XTMP5.16b, w2.16b, #12; +#define SCHED_W_1_5(round, w0, w1, w2, w3, w4, w5) \ + /* w[i - 9] == w3 */ \ + /* W3 ^ XTMP0 => XTMP0 */ \ + eor XTMP0.16b, XTMP0.16b, w3.16b; +#define SCHED_W_1_6(round, w0, w1, w2, w3, w4, w5) \ + /* w[i - 3] == w5 */ \ + /* rol(XMM5, 15) ^ XTMP0 => XTMP0 */ \ + /* rol(XTMP5, 7) => XTMP1 */ \ + add addr0, sp, #XW_W1_ADDR((round), 0); \ + shl XTMP2.4s, w5.4s, #15; +#define SCHED_W_1_7(round, w0, w1, w2, w3, w4, w5) \ + shl XTMP1.4s, XTMP5.4s, #7; +#define SCHED_W_1_8(round, w0, w1, w2, w3, w4, w5) \ + sri XTMP2.4s, w5.4s, #(32-15); +#define SCHED_W_2_1(round, w0, w1, w2, w3, w4, w5) \ + sri XTMP1.4s, XTMP5.4s, #(32-7); +#define SCHED_W_2_2(round, w0, w1, w2, w3, w4, w5) \ + eor XTMP0.16b, XTMP0.16b, XTMP2.16b; +#define SCHED_W_2_3(round, w0, w1, w2, w3, w4, w5) \ + /* w[i - 6] == W4 */ \ + /* W4 ^ XTMP1 => XTMP1 */ \ + eor XTMP1.16b, XTMP1.16b, w4.16b; +#define SCHED_W_2_4(round, w0, w1, w2, w3, w4, w5) \ + /* P1(XTMP0) ^ XTMP1 => W0 */ \ + shl XTMP3.4s, XTMP0.4s, #15; +#define SCHED_W_2_5(round, w0, w1, w2, w3, w4, w5) \ + shl XTMP4.4s, XTMP0.4s, #23; +#define SCHED_W_2_6(round, w0, w1, w2, w3, w4, w5) \ + eor w0.16b, XTMP1.16b, XTMP0.16b; +#define SCHED_W_2_7(round, w0, w1, w2, w3, w4, w5) \ + sri XTMP3.4s, XTMP0.4s, #(32-15); +#define SCHED_W_2_8(round, w0, w1, w2, w3, w4, w5) \ + sri XTMP4.4s, XTMP0.4s, #(32-23); +#define SCHED_W_3_1(round, w0, w1, w2, w3, w4, w5) \ + eor w0.16b, w0.16b, XTMP3.16b; +#define SCHED_W_3_2(round, w0, w1, w2, w3, w4, w5) \ + /* Load (w[i - 3]) => XTMP2 */ \ + ext XTMP2.16b, w4.16b, w4.16b, #12; +#define SCHED_W_3_3(round, w0, w1, w2, w3, w4, w5) \ + eor w0.16b, w0.16b, XTMP4.16b; +#define SCHED_W_3_4(round, w0, w1, w2, w3, w4, w5) \ + ext XTMP2.16b, XTMP2.16b, w5.16b, #12; +#define SCHED_W_3_5(round, w0, w1, w2, w3, w4, w5) \ + /* W1 ^ W2 => XTMP3 */ \ + eor XTMP3.16b, XTMP2.16b, w0.16b; +#define SCHED_W_3_6(round, w0, w1, w2, w3, w4, w5) +#define SCHED_W_3_7(round, w0, w1, w2, w3, w4, w5) \ + st1 {XTMP2.16b-XTMP3.16b}, [addr0]; +#define SCHED_W_3_8(round, w0, w1, w2, w3, w4, w5) + +#define SCHED_W_W0W1W2W3W4W5_1(iop_num, round) \ + SCHED_W_1_##iop_num(round, W0, W1, W2, W3, W4, W5) +#define SCHED_W_W0W1W2W3W4W5_2(iop_num, round) \ + SCHED_W_2_##iop_num(round, W0, W1, W2, W3, W4, W5) +#define SCHED_W_W0W1W2W3W4W5_3(iop_num, round) \ + SCHED_W_3_##iop_num(round, W0, W1, W2, W3, W4, W5) + +#define SCHED_W_W1W2W3W4W5W0_1(iop_num, round) \ + SCHED_W_1_##iop_num(round, W1, W2, W3, W4, W5, W0) +#define SCHED_W_W1W2W3W4W5W0_2(iop_num, round) \ + SCHED_W_2_##iop_num(round, W1, W2, W3, W4, W5, W0) +#define SCHED_W_W1W2W3W4W5W0_3(iop_num, round) \ + SCHED_W_3_##iop_num(round, W1, W2, W3, W4, W5, W0) + +#define SCHED_W_W2W3W4W5W0W1_1(iop_num, round) \ + SCHED_W_1_##iop_num(round, W2, W3, W4, W5, W0, W1) +#define SCHED_W_W2W3W4W5W0W1_2(iop_num, round) \ + SCHED_W_2_##iop_num(round, W2, W3, W4, W5, W0, W1) +#define SCHED_W_W2W3W4W5W0W1_3(iop_num, round) \ + SCHED_W_3_##iop_num(round, W2, W3, W4, W5, W0, W1) + +#define SCHED_W_W3W4W5W0W1W2_1(iop_num, round) \ + SCHED_W_1_##iop_num(round, W3, W4, W5, W0, W1, W2) +#define SCHED_W_W3W4W5W0W1W2_2(iop_num, round) \ + SCHED_W_2_##iop_num(round, W3, W4, W5, W0, W1, W2) +#define SCHED_W_W3W4W5W0W1W2_3(iop_num, round) \ + SCHED_W_3_##iop_num(round, W3, W4, W5, W0, W1, W2) + +#define SCHED_W_W4W5W0W1W2W3_1(iop_num, round) \ + SCHED_W_1_##iop_num(round, W4, W5, W0, W1, W2, W3) +#define SCHED_W_W4W5W0W1W2W3_2(iop_num, round) \ + SCHED_W_2_##iop_num(round, W4, W5, W0, W1, W2, W3) +#define SCHED_W_W4W5W0W1W2W3_3(iop_num, round) \ + SCHED_W_3_##iop_num(round, W4, W5, W0, W1, W2, W3) + +#define SCHED_W_W5W0W1W2W3W4_1(iop_num, round) \ + SCHED_W_1_##iop_num(round, W5, W0, W1, W2, W3, W4) +#define SCHED_W_W5W0W1W2W3W4_2(iop_num, round) \ + SCHED_W_2_##iop_num(round, W5, W0, W1, W2, W3, W4) +#define SCHED_W_W5W0W1W2W3W4_3(iop_num, round) \ + SCHED_W_3_##iop_num(round, W5, W0, W1, W2, W3, W4) + + + /* + * Transform blocks*64 bytes (blocks*16 32-bit words) at 'src'. + * + * void sm3_neon_transform(struct sm3_state *sst, u8 const *src, + * int blocks) + */ + .text +.align 3 +SYM_TYPED_FUNC_START(sm3_neon_transform) + ldp ra, rb, [RSTATE, #0] + ldp rc, rd, [RSTATE, #8] + ldp re, rf, [RSTATE, #16] + ldp rg, rh, [RSTATE, #24] + + stp x28, x29, [sp, #-16]! + stp x19, x20, [sp, #-16]! + stp x21, x22, [sp, #-16]! + stp x23, x24, [sp, #-16]! + stp x25, x26, [sp, #-16]! + mov RFRAME, sp + + sub addr0, sp, #STACK_SIZE + adr_l RKPTR, .LKtable + and sp, addr0, #(~63) + + /* Preload first block. */ + LOAD_W_VEC_1(1, 0) + LOAD_W_VEC_1(2, 0) + LOAD_W_VEC_1(3, 0) + LOAD_W_VEC_1(4, 0) + LOAD_W_VEC_1(5, 0) + LOAD_W_VEC_1(6, 0) + LOAD_W_VEC_1(7, 0) + LOAD_W_VEC_1(8, 0) + LOAD_W_VEC_2(1, 0) + LOAD_W_VEC_2(2, 0) + LOAD_W_VEC_2(3, 0) + LOAD_W_VEC_2(4, 0) + LOAD_W_VEC_2(5, 0) + LOAD_W_VEC_2(6, 0) + LOAD_W_VEC_2(7, 0) + LOAD_W_VEC_2(8, 0) + LOAD_W_VEC_3(1, 0) + LOAD_W_VEC_3(2, 0) + LOAD_W_VEC_3(3, 0) + LOAD_W_VEC_3(4, 0) + LOAD_W_VEC_3(5, 0) + LOAD_W_VEC_3(6, 0) + LOAD_W_VEC_3(7, 0) + LOAD_W_VEC_3(8, 0) + +.balign 16 +.Loop: + /* Transform 0-3 */ + R1(ra, rb, rc, rd, re, rf, rg, rh, k_even, KL, 0, 0, IW, _, 0) + R1(rd, ra, rb, rc, rh, re, rf, rg, k_odd, _, 1, 1, IW, _, 0) + R1(rc, rd, ra, rb, rg, rh, re, rf, k_even, KL, 2, 2, IW, _, 0) + R1(rb, rc, rd, ra, rf, rg, rh, re, k_odd, _, 3, 3, IW, _, 0) + + /* Transform 4-7 + Precalc 12-14 */ + R1(ra, rb, rc, rd, re, rf, rg, rh, k_even, KL, 4, 0, IW, _, 0) + R1(rd, ra, rb, rc, rh, re, rf, rg, k_odd, _, 5, 1, IW, _, 0) + R1(rc, rd, ra, rb, rg, rh, re, rf, k_even, KL, 6, 2, IW, SCHED_W_W0W1W2W3W4W5_1, 12) + R1(rb, rc, rd, ra, rf, rg, rh, re, k_odd, _, 7, 3, IW, SCHED_W_W0W1W2W3W4W5_2, 12) + + /* Transform 8-11 + Precalc 12-17 */ + R1(ra, rb, rc, rd, re, rf, rg, rh, k_even, KL, 8, 0, IW, SCHED_W_W0W1W2W3W4W5_3, 12) + R1(rd, ra, rb, rc, rh, re, rf, rg, k_odd, _, 9, 1, IW, SCHED_W_W1W2W3W4W5W0_1, 15) + R1(rc, rd, ra, rb, rg, rh, re, rf, k_even, KL, 10, 2, IW, SCHED_W_W1W2W3W4W5W0_2, 15) + R1(rb, rc, rd, ra, rf, rg, rh, re, k_odd, _, 11, 3, IW, SCHED_W_W1W2W3W4W5W0_3, 15) + + /* Transform 12-14 + Precalc 18-20 */ + R1(ra, rb, rc, rd, re, rf, rg, rh, k_even, KL, 12, 0, XW, SCHED_W_W2W3W4W5W0W1_1, 18) + R1(rd, ra, rb, rc, rh, re, rf, rg, k_odd, _, 13, 1, XW, SCHED_W_W2W3W4W5W0W1_2, 18) + R1(rc, rd, ra, rb, rg, rh, re, rf, k_even, KL, 14, 2, XW, SCHED_W_W2W3W4W5W0W1_3, 18) + + /* Transform 15-17 + Precalc 21-23 */ + R1(rb, rc, rd, ra, rf, rg, rh, re, k_odd, _, 15, 0, XW, SCHED_W_W3W4W5W0W1W2_1, 21) + R2(ra, rb, rc, rd, re, rf, rg, rh, k_even, KL, 16, 1, XW, SCHED_W_W3W4W5W0W1W2_2, 21) + R2(rd, ra, rb, rc, rh, re, rf, rg, k_odd, _, 17, 2, XW, SCHED_W_W3W4W5W0W1W2_3, 21) + + /* Transform 18-20 + Precalc 24-26 */ + R2(rc, rd, ra, rb, rg, rh, re, rf, k_even, KL, 18, 0, XW, SCHED_W_W4W5W0W1W2W3_1, 24) + R2(rb, rc, rd, ra, rf, rg, rh, re, k_odd, _, 19, 1, XW, SCHED_W_W4W5W0W1W2W3_2, 24) + R2(ra, rb, rc, rd, re, rf, rg, rh, k_even, KL, 20, 2, XW, SCHED_W_W4W5W0W1W2W3_3, 24) + + /* Transform 21-23 + Precalc 27-29 */ + R2(rd, ra, rb, rc, rh, re, rf, rg, k_odd, _, 21, 0, XW, SCHED_W_W5W0W1W2W3W4_1, 27) + R2(rc, rd, ra, rb, rg, rh, re, rf, k_even, KL, 22, 1, XW, SCHED_W_W5W0W1W2W3W4_2, 27) + R2(rb, rc, rd, ra, rf, rg, rh, re, k_odd, _, 23, 2, XW, SCHED_W_W5W0W1W2W3W4_3, 27) + + /* Transform 24-26 + Precalc 30-32 */ + R2(ra, rb, rc, rd, re, rf, rg, rh, k_even, KL, 24, 0, XW, SCHED_W_W0W1W2W3W4W5_1, 30) + R2(rd, ra, rb, rc, rh, re, rf, rg, k_odd, _, 25, 1, XW, SCHED_W_W0W1W2W3W4W5_2, 30) + R2(rc, rd, ra, rb, rg, rh, re, rf, k_even, KL, 26, 2, XW, SCHED_W_W0W1W2W3W4W5_3, 30) + + /* Transform 27-29 + Precalc 33-35 */ + R2(rb, rc, rd, ra, rf, rg, rh, re, k_odd, _, 27, 0, XW, SCHED_W_W1W2W3W4W5W0_1, 33) + R2(ra, rb, rc, rd, re, rf, rg, rh, k_even, KL, 28, 1, XW, SCHED_W_W1W2W3W4W5W0_2, 33) + R2(rd, ra, rb, rc, rh, re, rf, rg, k_odd, _, 29, 2, XW, SCHED_W_W1W2W3W4W5W0_3, 33) + + /* Transform 30-32 + Precalc 36-38 */ + R2(rc, rd, ra, rb, rg, rh, re, rf, k_even, KL, 30, 0, XW, SCHED_W_W2W3W4W5W0W1_1, 36) + R2(rb, rc, rd, ra, rf, rg, rh, re, k_odd, _, 31, 1, XW, SCHED_W_W2W3W4W5W0W1_2, 36) + R2(ra, rb, rc, rd, re, rf, rg, rh, k_even, KL, 32, 2, XW, SCHED_W_W2W3W4W5W0W1_3, 36) + + /* Transform 33-35 + Precalc 39-41 */ + R2(rd, ra, rb, rc, rh, re, rf, rg, k_odd, _, 33, 0, XW, SCHED_W_W3W4W5W0W1W2_1, 39) + R2(rc, rd, ra, rb, rg, rh, re, rf, k_even, KL, 34, 1, XW, SCHED_W_W3W4W5W0W1W2_2, 39) + R2(rb, rc, rd, ra, rf, rg, rh, re, k_odd, _, 35, 2, XW, SCHED_W_W3W4W5W0W1W2_3, 39) + + /* Transform 36-38 + Precalc 42-44 */ + R2(ra, rb, rc, rd, re, rf, rg, rh, k_even, KL, 36, 0, XW, SCHED_W_W4W5W0W1W2W3_1, 42) + R2(rd, ra, rb, rc, rh, re, rf, rg, k_odd, _, 37, 1, XW, SCHED_W_W4W5W0W1W2W3_2, 42) + R2(rc, rd, ra, rb, rg, rh, re, rf, k_even, KL, 38, 2, XW, SCHED_W_W4W5W0W1W2W3_3, 42) + + /* Transform 39-41 + Precalc 45-47 */ + R2(rb, rc, rd, ra, rf, rg, rh, re, k_odd, _, 39, 0, XW, SCHED_W_W5W0W1W2W3W4_1, 45) + R2(ra, rb, rc, rd, re, rf, rg, rh, k_even, KL, 40, 1, XW, SCHED_W_W5W0W1W2W3W4_2, 45) + R2(rd, ra, rb, rc, rh, re, rf, rg, k_odd, _, 41, 2, XW, SCHED_W_W5W0W1W2W3W4_3, 45) + + /* Transform 42-44 + Precalc 48-50 */ + R2(rc, rd, ra, rb, rg, rh, re, rf, k_even, KL, 42, 0, XW, SCHED_W_W0W1W2W3W4W5_1, 48) + R2(rb, rc, rd, ra, rf, rg, rh, re, k_odd, _, 43, 1, XW, SCHED_W_W0W1W2W3W4W5_2, 48) + R2(ra, rb, rc, rd, re, rf, rg, rh, k_even, KL, 44, 2, XW, SCHED_W_W0W1W2W3W4W5_3, 48) + + /* Transform 45-47 + Precalc 51-53 */ + R2(rd, ra, rb, rc, rh, re, rf, rg, k_odd, _, 45, 0, XW, SCHED_W_W1W2W3W4W5W0_1, 51) + R2(rc, rd, ra, rb, rg, rh, re, rf, k_even, KL, 46, 1, XW, SCHED_W_W1W2W3W4W5W0_2, 51) + R2(rb, rc, rd, ra, rf, rg, rh, re, k_odd, _, 47, 2, XW, SCHED_W_W1W2W3W4W5W0_3, 51) + + /* Transform 48-50 + Precalc 54-56 */ + R2(ra, rb, rc, rd, re, rf, rg, rh, k_even, KL, 48, 0, XW, SCHED_W_W2W3W4W5W0W1_1, 54) + R2(rd, ra, rb, rc, rh, re, rf, rg, k_odd, _, 49, 1, XW, SCHED_W_W2W3W4W5W0W1_2, 54) + R2(rc, rd, ra, rb, rg, rh, re, rf, k_even, KL, 50, 2, XW, SCHED_W_W2W3W4W5W0W1_3, 54) + + /* Transform 51-53 + Precalc 57-59 */ + R2(rb, rc, rd, ra, rf, rg, rh, re, k_odd, _, 51, 0, XW, SCHED_W_W3W4W5W0W1W2_1, 57) + R2(ra, rb, rc, rd, re, rf, rg, rh, k_even, KL, 52, 1, XW, SCHED_W_W3W4W5W0W1W2_2, 57) + R2(rd, ra, rb, rc, rh, re, rf, rg, k_odd, _, 53, 2, XW, SCHED_W_W3W4W5W0W1W2_3, 57) + + /* Transform 54-56 + Precalc 60-62 */ + R2(rc, rd, ra, rb, rg, rh, re, rf, k_even, KL, 54, 0, XW, SCHED_W_W4W5W0W1W2W3_1, 60) + R2(rb, rc, rd, ra, rf, rg, rh, re, k_odd, _, 55, 1, XW, SCHED_W_W4W5W0W1W2W3_2, 60) + R2(ra, rb, rc, rd, re, rf, rg, rh, k_even, KL, 56, 2, XW, SCHED_W_W4W5W0W1W2W3_3, 60) + + /* Transform 57-59 + Precalc 63 */ + R2(rd, ra, rb, rc, rh, re, rf, rg, k_odd, _, 57, 0, XW, SCHED_W_W5W0W1W2W3W4_1, 63) + R2(rc, rd, ra, rb, rg, rh, re, rf, k_even, KL, 58, 1, XW, SCHED_W_W5W0W1W2W3W4_2, 63) + R2(rb, rc, rd, ra, rf, rg, rh, re, k_odd, _, 59, 2, XW, SCHED_W_W5W0W1W2W3W4_3, 63) + + /* Transform 60 */ + R2(ra, rb, rc, rd, re, rf, rg, rh, k_even, KL, 60, 0, XW, _, _) + subs RNBLKS, RNBLKS, #1 + b.eq .Lend + + /* Transform 61-63 + Preload next block */ + R2(rd, ra, rb, rc, rh, re, rf, rg, k_odd, _, 61, 1, XW, LOAD_W_VEC_1, _) + ldp s0, s1, [RSTATE, #0] + R2(rc, rd, ra, rb, rg, rh, re, rf, k_even, KL, 62, 2, XW, LOAD_W_VEC_2, _) + ldp s2, s3, [RSTATE, #8] + R2(rb, rc, rd, ra, rf, rg, rh, re, k_odd, _, 63, 0, XW, LOAD_W_VEC_3, _) + + /* Update the chaining variables. */ + eor ra, ra, s0 + eor rb, rb, s1 + ldp s0, s1, [RSTATE, #16] + eor rc, rc, s2 + ldp k_even, k_odd, [RSTATE, #24] + eor rd, rd, s3 + eor re, re, s0 + stp ra, rb, [RSTATE, #0] + eor rf, rf, s1 + stp rc, rd, [RSTATE, #8] + eor rg, rg, k_even + stp re, rf, [RSTATE, #16] + eor rh, rh, k_odd + stp rg, rh, [RSTATE, #24] + b .Loop + +.Lend: + /* Transform 61-63 */ + R2(rd, ra, rb, rc, rh, re, rf, rg, k_odd, _, 61, 1, XW, _, _) + ldp s0, s1, [RSTATE, #0] + R2(rc, rd, ra, rb, rg, rh, re, rf, k_even, KL, 62, 2, XW, _, _) + ldp s2, s3, [RSTATE, #8] + R2(rb, rc, rd, ra, rf, rg, rh, re, k_odd, _, 63, 0, XW, _, _) + + /* Update the chaining variables. */ + eor ra, ra, s0 + clear_vec(W0) + eor rb, rb, s1 + clear_vec(W1) + ldp s0, s1, [RSTATE, #16] + clear_vec(W2) + eor rc, rc, s2 + clear_vec(W3) + ldp k_even, k_odd, [RSTATE, #24] + clear_vec(W4) + eor rd, rd, s3 + clear_vec(W5) + eor re, re, s0 + clear_vec(XTMP0) + stp ra, rb, [RSTATE, #0] + clear_vec(XTMP1) + eor rf, rf, s1 + clear_vec(XTMP2) + stp rc, rd, [RSTATE, #8] + clear_vec(XTMP3) + eor rg, rg, k_even + clear_vec(XTMP4) + stp re, rf, [RSTATE, #16] + clear_vec(XTMP5) + eor rh, rh, k_odd + clear_vec(XTMP6) + stp rg, rh, [RSTATE, #24] + + /* Clear message expansion area */ + add addr0, sp, #STACK_W + st1 {W0.16b-W3.16b}, [addr0], #64 + st1 {W0.16b-W3.16b}, [addr0], #64 + st1 {W0.16b-W3.16b}, [addr0] + + mov sp, RFRAME + + ldp x25, x26, [sp], #16 + ldp x23, x24, [sp], #16 + ldp x21, x22, [sp], #16 + ldp x19, x20, [sp], #16 + ldp x28, x29, [sp], #16 + + ret +SYM_FUNC_END(sm3_neon_transform) + + + .section ".rodata", "a" + + .align 4 +.LKtable: + .long 0x79cc4519, 0xf3988a32, 0xe7311465, 0xce6228cb + .long 0x9cc45197, 0x3988a32f, 0x7311465e, 0xe6228cbc + .long 0xcc451979, 0x988a32f3, 0x311465e7, 0x6228cbce + .long 0xc451979c, 0x88a32f39, 0x11465e73, 0x228cbce6 + .long 0x9d8a7a87, 0x3b14f50f, 0x7629ea1e, 0xec53d43c + .long 0xd8a7a879, 0xb14f50f3, 0x629ea1e7, 0xc53d43ce + .long 0x8a7a879d, 0x14f50f3b, 0x29ea1e76, 0x53d43cec + .long 0xa7a879d8, 0x4f50f3b1, 0x9ea1e762, 0x3d43cec5 + .long 0x7a879d8a, 0xf50f3b14, 0xea1e7629, 0xd43cec53 + .long 0xa879d8a7, 0x50f3b14f, 0xa1e7629e, 0x43cec53d + .long 0x879d8a7a, 0x0f3b14f5, 0x1e7629ea, 0x3cec53d4 + .long 0x79d8a7a8, 0xf3b14f50, 0xe7629ea1, 0xcec53d43 + .long 0x9d8a7a87, 0x3b14f50f, 0x7629ea1e, 0xec53d43c + .long 0xd8a7a879, 0xb14f50f3, 0x629ea1e7, 0xc53d43ce + .long 0x8a7a879d, 0x14f50f3b, 0x29ea1e76, 0x53d43cec + .long 0xa7a879d8, 0x4f50f3b1, 0x9ea1e762, 0x3d43cec5 diff --git a/arch/arm64/crypto/sm3-neon-glue.c b/arch/arm64/crypto/sm3-neon-glue.c new file mode 100644 index 0000000000000000000000000000000000000000..7182ee683f14ace6ea8c23ec9af0ab40fd3db97c --- /dev/null +++ b/arch/arm64/crypto/sm3-neon-glue.c @@ -0,0 +1,103 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * sm3-neon-glue.c - SM3 secure hash using NEON instructions + * + * Copyright (C) 2022 Tianjia Zhang + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +asmlinkage void sm3_neon_transform(struct sm3_state *sst, u8 const *src, + int blocks); + +static int sm3_neon_update(struct shash_desc *desc, const u8 *data, + unsigned int len) +{ + if (!crypto_simd_usable()) { + sm3_update(shash_desc_ctx(desc), data, len); + return 0; + } + + kernel_neon_begin(); + sm3_base_do_update(desc, data, len, sm3_neon_transform); + kernel_neon_end(); + + return 0; +} + +static int sm3_neon_final(struct shash_desc *desc, u8 *out) +{ + if (!crypto_simd_usable()) { + sm3_final(shash_desc_ctx(desc), out); + return 0; + } + + kernel_neon_begin(); + sm3_base_do_finalize(desc, sm3_neon_transform); + kernel_neon_end(); + + return sm3_base_finish(desc, out); +} + +static int sm3_neon_finup(struct shash_desc *desc, const u8 *data, + unsigned int len, u8 *out) +{ + if (!crypto_simd_usable()) { + struct sm3_state *sctx = shash_desc_ctx(desc); + + if (len) + sm3_update(sctx, data, len); + sm3_final(sctx, out); + return 0; + } + + kernel_neon_begin(); + if (len) + sm3_base_do_update(desc, data, len, sm3_neon_transform); + sm3_base_do_finalize(desc, sm3_neon_transform); + kernel_neon_end(); + + return sm3_base_finish(desc, out); +} + +static struct shash_alg sm3_alg = { + .digestsize = SM3_DIGEST_SIZE, + .init = sm3_base_init, + .update = sm3_neon_update, + .final = sm3_neon_final, + .finup = sm3_neon_finup, + .descsize = sizeof(struct sm3_state), + .base.cra_name = "sm3", + .base.cra_driver_name = "sm3-neon", + .base.cra_blocksize = SM3_BLOCK_SIZE, + .base.cra_module = THIS_MODULE, + .base.cra_priority = 200, +}; + +static int __init sm3_neon_init(void) +{ + return crypto_register_shash(&sm3_alg); +} + +static void __exit sm3_neon_fini(void) +{ + crypto_unregister_shash(&sm3_alg); +} + +module_init(sm3_neon_init); +module_exit(sm3_neon_fini); + +MODULE_DESCRIPTION("SM3 secure hash using NEON instructions"); +MODULE_AUTHOR("Jussi Kivilinna "); +MODULE_AUTHOR("Tianjia Zhang "); +MODULE_LICENSE("GPL v2"); diff --git a/arch/arm64/crypto/sm4-ce-asm.h b/arch/arm64/crypto/sm4-ce-asm.h new file mode 100644 index 0000000000000000000000000000000000000000..7ea98e42e779cd2d586eb99a07db9168d4c52b24 --- /dev/null +++ b/arch/arm64/crypto/sm4-ce-asm.h @@ -0,0 +1,209 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * SM4 helper macros for Crypto Extensions + * Copyright (C) 2022 Tianjia Zhang + */ + +#define SM4_PREPARE(ptr) \ + ld1 {v24.16b-v27.16b}, [ptr], #64; \ + ld1 {v28.16b-v31.16b}, [ptr]; + +#define SM4_CRYPT_BLK_BE(b0) \ + sm4e b0.4s, v24.4s; \ + sm4e b0.4s, v25.4s; \ + sm4e b0.4s, v26.4s; \ + sm4e b0.4s, v27.4s; \ + sm4e b0.4s, v28.4s; \ + sm4e b0.4s, v29.4s; \ + sm4e b0.4s, v30.4s; \ + sm4e b0.4s, v31.4s; \ + rev64 b0.4s, b0.4s; \ + ext b0.16b, b0.16b, b0.16b, #8; \ + rev32 b0.16b, b0.16b; + +#define SM4_CRYPT_BLK(b0) \ + rev32 b0.16b, b0.16b; \ + SM4_CRYPT_BLK_BE(b0); + +#define SM4_CRYPT_BLK2_BE(b0, b1) \ + sm4e b0.4s, v24.4s; \ + sm4e b1.4s, v24.4s; \ + sm4e b0.4s, v25.4s; \ + sm4e b1.4s, v25.4s; \ + sm4e b0.4s, v26.4s; \ + sm4e b1.4s, v26.4s; \ + sm4e b0.4s, v27.4s; \ + sm4e b1.4s, v27.4s; \ + sm4e b0.4s, v28.4s; \ + sm4e b1.4s, v28.4s; \ + sm4e b0.4s, v29.4s; \ + sm4e b1.4s, v29.4s; \ + sm4e b0.4s, v30.4s; \ + sm4e b1.4s, v30.4s; \ + sm4e b0.4s, v31.4s; \ + sm4e b1.4s, v31.4s; \ + rev64 b0.4s, b0.4s; \ + rev64 b1.4s, b1.4s; \ + ext b0.16b, b0.16b, b0.16b, #8; \ + ext b1.16b, b1.16b, b1.16b, #8; \ + rev32 b0.16b, b0.16b; \ + rev32 b1.16b, b1.16b; \ + +#define SM4_CRYPT_BLK2(b0, b1) \ + rev32 b0.16b, b0.16b; \ + rev32 b1.16b, b1.16b; \ + SM4_CRYPT_BLK2_BE(b0, b1); + +#define SM4_CRYPT_BLK4_BE(b0, b1, b2, b3) \ + sm4e b0.4s, v24.4s; \ + sm4e b1.4s, v24.4s; \ + sm4e b2.4s, v24.4s; \ + sm4e b3.4s, v24.4s; \ + sm4e b0.4s, v25.4s; \ + sm4e b1.4s, v25.4s; \ + sm4e b2.4s, v25.4s; \ + sm4e b3.4s, v25.4s; \ + sm4e b0.4s, v26.4s; \ + sm4e b1.4s, v26.4s; \ + sm4e b2.4s, v26.4s; \ + sm4e b3.4s, v26.4s; \ + sm4e b0.4s, v27.4s; \ + sm4e b1.4s, v27.4s; \ + sm4e b2.4s, v27.4s; \ + sm4e b3.4s, v27.4s; \ + sm4e b0.4s, v28.4s; \ + sm4e b1.4s, v28.4s; \ + sm4e b2.4s, v28.4s; \ + sm4e b3.4s, v28.4s; \ + sm4e b0.4s, v29.4s; \ + sm4e b1.4s, v29.4s; \ + sm4e b2.4s, v29.4s; \ + sm4e b3.4s, v29.4s; \ + sm4e b0.4s, v30.4s; \ + sm4e b1.4s, v30.4s; \ + sm4e b2.4s, v30.4s; \ + sm4e b3.4s, v30.4s; \ + sm4e b0.4s, v31.4s; \ + sm4e b1.4s, v31.4s; \ + sm4e b2.4s, v31.4s; \ + sm4e b3.4s, v31.4s; \ + rev64 b0.4s, b0.4s; \ + rev64 b1.4s, b1.4s; \ + rev64 b2.4s, b2.4s; \ + rev64 b3.4s, b3.4s; \ + ext b0.16b, b0.16b, b0.16b, #8; \ + ext b1.16b, b1.16b, b1.16b, #8; \ + ext b2.16b, b2.16b, b2.16b, #8; \ + ext b3.16b, b3.16b, b3.16b, #8; \ + rev32 b0.16b, b0.16b; \ + rev32 b1.16b, b1.16b; \ + rev32 b2.16b, b2.16b; \ + rev32 b3.16b, b3.16b; + +#define SM4_CRYPT_BLK4(b0, b1, b2, b3) \ + rev32 b0.16b, b0.16b; \ + rev32 b1.16b, b1.16b; \ + rev32 b2.16b, b2.16b; \ + rev32 b3.16b, b3.16b; \ + SM4_CRYPT_BLK4_BE(b0, b1, b2, b3); + +#define SM4_CRYPT_BLK8_BE(b0, b1, b2, b3, b4, b5, b6, b7) \ + sm4e b0.4s, v24.4s; \ + sm4e b1.4s, v24.4s; \ + sm4e b2.4s, v24.4s; \ + sm4e b3.4s, v24.4s; \ + sm4e b4.4s, v24.4s; \ + sm4e b5.4s, v24.4s; \ + sm4e b6.4s, v24.4s; \ + sm4e b7.4s, v24.4s; \ + sm4e b0.4s, v25.4s; \ + sm4e b1.4s, v25.4s; \ + sm4e b2.4s, v25.4s; \ + sm4e b3.4s, v25.4s; \ + sm4e b4.4s, v25.4s; \ + sm4e b5.4s, v25.4s; \ + sm4e b6.4s, v25.4s; \ + sm4e b7.4s, v25.4s; \ + sm4e b0.4s, v26.4s; \ + sm4e b1.4s, v26.4s; \ + sm4e b2.4s, v26.4s; \ + sm4e b3.4s, v26.4s; \ + sm4e b4.4s, v26.4s; \ + sm4e b5.4s, v26.4s; \ + sm4e b6.4s, v26.4s; \ + sm4e b7.4s, v26.4s; \ + sm4e b0.4s, v27.4s; \ + sm4e b1.4s, v27.4s; \ + sm4e b2.4s, v27.4s; \ + sm4e b3.4s, v27.4s; \ + sm4e b4.4s, v27.4s; \ + sm4e b5.4s, v27.4s; \ + sm4e b6.4s, v27.4s; \ + sm4e b7.4s, v27.4s; \ + sm4e b0.4s, v28.4s; \ + sm4e b1.4s, v28.4s; \ + sm4e b2.4s, v28.4s; \ + sm4e b3.4s, v28.4s; \ + sm4e b4.4s, v28.4s; \ + sm4e b5.4s, v28.4s; \ + sm4e b6.4s, v28.4s; \ + sm4e b7.4s, v28.4s; \ + sm4e b0.4s, v29.4s; \ + sm4e b1.4s, v29.4s; \ + sm4e b2.4s, v29.4s; \ + sm4e b3.4s, v29.4s; \ + sm4e b4.4s, v29.4s; \ + sm4e b5.4s, v29.4s; \ + sm4e b6.4s, v29.4s; \ + sm4e b7.4s, v29.4s; \ + sm4e b0.4s, v30.4s; \ + sm4e b1.4s, v30.4s; \ + sm4e b2.4s, v30.4s; \ + sm4e b3.4s, v30.4s; \ + sm4e b4.4s, v30.4s; \ + sm4e b5.4s, v30.4s; \ + sm4e b6.4s, v30.4s; \ + sm4e b7.4s, v30.4s; \ + sm4e b0.4s, v31.4s; \ + sm4e b1.4s, v31.4s; \ + sm4e b2.4s, v31.4s; \ + sm4e b3.4s, v31.4s; \ + sm4e b4.4s, v31.4s; \ + sm4e b5.4s, v31.4s; \ + sm4e b6.4s, v31.4s; \ + sm4e b7.4s, v31.4s; \ + rev64 b0.4s, b0.4s; \ + rev64 b1.4s, b1.4s; \ + rev64 b2.4s, b2.4s; \ + rev64 b3.4s, b3.4s; \ + rev64 b4.4s, b4.4s; \ + rev64 b5.4s, b5.4s; \ + rev64 b6.4s, b6.4s; \ + rev64 b7.4s, b7.4s; \ + ext b0.16b, b0.16b, b0.16b, #8; \ + ext b1.16b, b1.16b, b1.16b, #8; \ + ext b2.16b, b2.16b, b2.16b, #8; \ + ext b3.16b, b3.16b, b3.16b, #8; \ + ext b4.16b, b4.16b, b4.16b, #8; \ + ext b5.16b, b5.16b, b5.16b, #8; \ + ext b6.16b, b6.16b, b6.16b, #8; \ + ext b7.16b, b7.16b, b7.16b, #8; \ + rev32 b0.16b, b0.16b; \ + rev32 b1.16b, b1.16b; \ + rev32 b2.16b, b2.16b; \ + rev32 b3.16b, b3.16b; \ + rev32 b4.16b, b4.16b; \ + rev32 b5.16b, b5.16b; \ + rev32 b6.16b, b6.16b; \ + rev32 b7.16b, b7.16b; + +#define SM4_CRYPT_BLK8(b0, b1, b2, b3, b4, b5, b6, b7) \ + rev32 b0.16b, b0.16b; \ + rev32 b1.16b, b1.16b; \ + rev32 b2.16b, b2.16b; \ + rev32 b3.16b, b3.16b; \ + rev32 b4.16b, b4.16b; \ + rev32 b5.16b, b5.16b; \ + rev32 b6.16b, b6.16b; \ + rev32 b7.16b, b7.16b; \ + SM4_CRYPT_BLK8_BE(b0, b1, b2, b3, b4, b5, b6, b7); diff --git a/arch/arm64/crypto/sm4-ce-ccm-core.S b/arch/arm64/crypto/sm4-ce-ccm-core.S new file mode 100644 index 0000000000000000000000000000000000000000..028207c4afd0f76037336cf030375dd9b3ab4011 --- /dev/null +++ b/arch/arm64/crypto/sm4-ce-ccm-core.S @@ -0,0 +1,328 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * SM4-CCM AEAD Algorithm using ARMv8 Crypto Extensions + * as specified in rfc8998 + * https://datatracker.ietf.org/doc/html/rfc8998 + * + * Copyright (C) 2022 Tianjia Zhang + */ + +#include +#include +#include "sm4-ce-asm.h" + +.arch armv8-a+crypto + +.irp b, 0, 1, 8, 9, 10, 11, 12, 13, 14, 15, 16, 24, 25, 26, 27, 28, 29, 30, 31 + .set .Lv\b\().4s, \b +.endr + +.macro sm4e, vd, vn + .inst 0xcec08400 | (.L\vn << 5) | .L\vd +.endm + +/* Register macros */ + +#define RMAC v16 + +/* Helper macros. */ + +#define inc_le128(vctr) \ + mov vctr.d[1], x8; \ + mov vctr.d[0], x7; \ + adds x8, x8, #1; \ + rev64 vctr.16b, vctr.16b; \ + adc x7, x7, xzr; + + +.align 3 +SYM_FUNC_START(sm4_ce_cbcmac_update) + /* input: + * x0: round key array, CTX + * x1: mac + * x2: src + * w3: nblocks + */ + SM4_PREPARE(x0) + + ld1 {RMAC.16b}, [x1] + +.Lcbcmac_loop_4x: + cmp w3, #4 + blt .Lcbcmac_loop_1x + + sub w3, w3, #4 + + ld1 {v0.16b-v3.16b}, [x2], #64 + + SM4_CRYPT_BLK(RMAC) + eor RMAC.16b, RMAC.16b, v0.16b + SM4_CRYPT_BLK(RMAC) + eor RMAC.16b, RMAC.16b, v1.16b + SM4_CRYPT_BLK(RMAC) + eor RMAC.16b, RMAC.16b, v2.16b + SM4_CRYPT_BLK(RMAC) + eor RMAC.16b, RMAC.16b, v3.16b + + cbz w3, .Lcbcmac_end + b .Lcbcmac_loop_4x + +.Lcbcmac_loop_1x: + sub w3, w3, #1 + + ld1 {v0.16b}, [x2], #16 + + SM4_CRYPT_BLK(RMAC) + eor RMAC.16b, RMAC.16b, v0.16b + + cbnz w3, .Lcbcmac_loop_1x + +.Lcbcmac_end: + st1 {RMAC.16b}, [x1] + ret +SYM_FUNC_END(sm4_ce_cbcmac_update) + +.align 3 +SYM_FUNC_START(sm4_ce_ccm_final) + /* input: + * x0: round key array, CTX + * x1: ctr0 (big endian, 128 bit) + * x2: mac + */ + SM4_PREPARE(x0) + + ld1 {RMAC.16b}, [x2] + ld1 {v0.16b}, [x1] + + SM4_CRYPT_BLK2(RMAC, v0) + + /* en-/decrypt the mac with ctr0 */ + eor RMAC.16b, RMAC.16b, v0.16b + st1 {RMAC.16b}, [x2] + + ret +SYM_FUNC_END(sm4_ce_ccm_final) + +.align 3 +SYM_FUNC_START(sm4_ce_ccm_enc) + /* input: + * x0: round key array, CTX + * x1: dst + * x2: src + * x3: ctr (big endian, 128 bit) + * w4: nbytes + * x5: mac + */ + SM4_PREPARE(x0) + + ldp x7, x8, [x3] + rev x7, x7 + rev x8, x8 + + ld1 {RMAC.16b}, [x5] + +.Lccm_enc_loop_4x: + cmp w4, #(4 * 16) + blt .Lccm_enc_loop_1x + + sub w4, w4, #(4 * 16) + + /* construct CTRs */ + inc_le128(v8) /* +0 */ + inc_le128(v9) /* +1 */ + inc_le128(v10) /* +2 */ + inc_le128(v11) /* +3 */ + + ld1 {v0.16b-v3.16b}, [x2], #64 + + SM4_CRYPT_BLK2(v8, RMAC) + eor v8.16b, v8.16b, v0.16b + eor RMAC.16b, RMAC.16b, v0.16b + SM4_CRYPT_BLK2(v9, RMAC) + eor v9.16b, v9.16b, v1.16b + eor RMAC.16b, RMAC.16b, v1.16b + SM4_CRYPT_BLK2(v10, RMAC) + eor v10.16b, v10.16b, v2.16b + eor RMAC.16b, RMAC.16b, v2.16b + SM4_CRYPT_BLK2(v11, RMAC) + eor v11.16b, v11.16b, v3.16b + eor RMAC.16b, RMAC.16b, v3.16b + + st1 {v8.16b-v11.16b}, [x1], #64 + + cbz w4, .Lccm_enc_end + b .Lccm_enc_loop_4x + +.Lccm_enc_loop_1x: + cmp w4, #16 + blt .Lccm_enc_tail + + sub w4, w4, #16 + + /* construct CTRs */ + inc_le128(v8) + + ld1 {v0.16b}, [x2], #16 + + SM4_CRYPT_BLK2(v8, RMAC) + eor v8.16b, v8.16b, v0.16b + eor RMAC.16b, RMAC.16b, v0.16b + + st1 {v8.16b}, [x1], #16 + + cbz w4, .Lccm_enc_end + b .Lccm_enc_loop_1x + +.Lccm_enc_tail: + /* construct CTRs */ + inc_le128(v8) + + SM4_CRYPT_BLK2(RMAC, v8) + + /* store new MAC */ + st1 {RMAC.16b}, [x5] + +.Lccm_enc_tail_loop: + ldrb w0, [x2], #1 /* get 1 byte from input */ + umov w9, v8.b[0] /* get top crypted CTR byte */ + umov w6, RMAC.b[0] /* get top MAC byte */ + + eor w9, w9, w0 /* w9 = CTR ^ input */ + eor w6, w6, w0 /* w6 = MAC ^ input */ + + strb w9, [x1], #1 /* store out byte */ + strb w6, [x5], #1 /* store MAC byte */ + + subs w4, w4, #1 + beq .Lccm_enc_ret + + /* shift out one byte */ + ext RMAC.16b, RMAC.16b, RMAC.16b, #1 + ext v8.16b, v8.16b, v8.16b, #1 + + b .Lccm_enc_tail_loop + +.Lccm_enc_end: + /* store new MAC */ + st1 {RMAC.16b}, [x5] + + /* store new CTR */ + rev x7, x7 + rev x8, x8 + stp x7, x8, [x3] + +.Lccm_enc_ret: + ret +SYM_FUNC_END(sm4_ce_ccm_enc) + +.align 3 +SYM_FUNC_START(sm4_ce_ccm_dec) + /* input: + * x0: round key array, CTX + * x1: dst + * x2: src + * x3: ctr (big endian, 128 bit) + * w4: nbytes + * x5: mac + */ + SM4_PREPARE(x0) + + ldp x7, x8, [x3] + rev x7, x7 + rev x8, x8 + + ld1 {RMAC.16b}, [x5] + +.Lccm_dec_loop_4x: + cmp w4, #(4 * 16) + blt .Lccm_dec_loop_1x + + sub w4, w4, #(4 * 16) + + /* construct CTRs */ + inc_le128(v8) /* +0 */ + inc_le128(v9) /* +1 */ + inc_le128(v10) /* +2 */ + inc_le128(v11) /* +3 */ + + ld1 {v0.16b-v3.16b}, [x2], #64 + + SM4_CRYPT_BLK2(v8, RMAC) + eor v8.16b, v8.16b, v0.16b + eor RMAC.16b, RMAC.16b, v8.16b + SM4_CRYPT_BLK2(v9, RMAC) + eor v9.16b, v9.16b, v1.16b + eor RMAC.16b, RMAC.16b, v9.16b + SM4_CRYPT_BLK2(v10, RMAC) + eor v10.16b, v10.16b, v2.16b + eor RMAC.16b, RMAC.16b, v10.16b + SM4_CRYPT_BLK2(v11, RMAC) + eor v11.16b, v11.16b, v3.16b + eor RMAC.16b, RMAC.16b, v11.16b + + st1 {v8.16b-v11.16b}, [x1], #64 + + cbz w4, .Lccm_dec_end + b .Lccm_dec_loop_4x + +.Lccm_dec_loop_1x: + cmp w4, #16 + blt .Lccm_dec_tail + + sub w4, w4, #16 + + /* construct CTRs */ + inc_le128(v8) + + ld1 {v0.16b}, [x2], #16 + + SM4_CRYPT_BLK2(v8, RMAC) + eor v8.16b, v8.16b, v0.16b + eor RMAC.16b, RMAC.16b, v8.16b + + st1 {v8.16b}, [x1], #16 + + cbz w4, .Lccm_dec_end + b .Lccm_dec_loop_1x + +.Lccm_dec_tail: + /* construct CTRs */ + inc_le128(v8) + + SM4_CRYPT_BLK2(RMAC, v8) + + /* store new MAC */ + st1 {RMAC.16b}, [x5] + +.Lccm_dec_tail_loop: + ldrb w0, [x2], #1 /* get 1 byte from input */ + umov w9, v8.b[0] /* get top crypted CTR byte */ + umov w6, RMAC.b[0] /* get top MAC byte */ + + eor w9, w9, w0 /* w9 = CTR ^ input */ + eor w6, w6, w9 /* w6 = MAC ^ output */ + + strb w9, [x1], #1 /* store out byte */ + strb w6, [x5], #1 /* store MAC byte */ + + subs w4, w4, #1 + beq .Lccm_dec_ret + + /* shift out one byte */ + ext RMAC.16b, RMAC.16b, RMAC.16b, #1 + ext v8.16b, v8.16b, v8.16b, #1 + + b .Lccm_dec_tail_loop + +.Lccm_dec_end: + /* store new MAC */ + st1 {RMAC.16b}, [x5] + + /* store new CTR */ + rev x7, x7 + rev x8, x8 + stp x7, x8, [x3] + +.Lccm_dec_ret: + ret +SYM_FUNC_END(sm4_ce_ccm_dec) diff --git a/arch/arm64/crypto/sm4-ce-ccm-glue.c b/arch/arm64/crypto/sm4-ce-ccm-glue.c new file mode 100644 index 0000000000000000000000000000000000000000..f2cec7b52efcd15f9d1dd59194384524e1a0d6e1 --- /dev/null +++ b/arch/arm64/crypto/sm4-ce-ccm-glue.c @@ -0,0 +1,303 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * SM4-CCM AEAD Algorithm using ARMv8 Crypto Extensions + * as specified in rfc8998 + * https://datatracker.ietf.org/doc/html/rfc8998 + * + * Copyright (C) 2022 Tianjia Zhang + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "sm4-ce.h" + +asmlinkage void sm4_ce_cbcmac_update(const u32 *rkey_enc, u8 *mac, + const u8 *src, unsigned int nblocks); +asmlinkage void sm4_ce_ccm_enc(const u32 *rkey_enc, u8 *dst, const u8 *src, + u8 *iv, unsigned int nbytes, u8 *mac); +asmlinkage void sm4_ce_ccm_dec(const u32 *rkey_enc, u8 *dst, const u8 *src, + u8 *iv, unsigned int nbytes, u8 *mac); +asmlinkage void sm4_ce_ccm_final(const u32 *rkey_enc, u8 *iv, u8 *mac); + + +static int ccm_setkey(struct crypto_aead *tfm, const u8 *key, + unsigned int key_len) +{ + struct sm4_ctx *ctx = crypto_aead_ctx(tfm); + + if (key_len != SM4_KEY_SIZE) + return -EINVAL; + + kernel_neon_begin(); + sm4_ce_expand_key(key, ctx->rkey_enc, ctx->rkey_dec, + crypto_sm4_fk, crypto_sm4_ck); + kernel_neon_end(); + + return 0; +} + +static int ccm_setauthsize(struct crypto_aead *tfm, unsigned int authsize) +{ + if ((authsize & 1) || authsize < 4) + return -EINVAL; + return 0; +} + +static int ccm_format_input(u8 info[], struct aead_request *req, + unsigned int msglen) +{ + struct crypto_aead *aead = crypto_aead_reqtfm(req); + unsigned int l = req->iv[0] + 1; + unsigned int m; + __be32 len; + + /* verify that CCM dimension 'L': 2 <= L <= 8 */ + if (l < 2 || l > 8) + return -EINVAL; + if (l < 4 && msglen >> (8 * l)) + return -EOVERFLOW; + + memset(&req->iv[SM4_BLOCK_SIZE - l], 0, l); + + memcpy(info, req->iv, SM4_BLOCK_SIZE); + + m = crypto_aead_authsize(aead); + + /* format flags field per RFC 3610/NIST 800-38C */ + *info |= ((m - 2) / 2) << 3; + if (req->assoclen) + *info |= (1 << 6); + + /* + * format message length field, + * Linux uses a u32 type to represent msglen + */ + if (l >= 4) + l = 4; + + len = cpu_to_be32(msglen); + memcpy(&info[SM4_BLOCK_SIZE - l], (u8 *)&len + 4 - l, l); + + return 0; +} + +static void ccm_calculate_auth_mac(struct aead_request *req, u8 mac[]) +{ + struct crypto_aead *aead = crypto_aead_reqtfm(req); + struct sm4_ctx *ctx = crypto_aead_ctx(aead); + struct __packed { __be16 l; __be32 h; } aadlen; + u32 assoclen = req->assoclen; + struct scatter_walk walk; + unsigned int len; + + if (assoclen < 0xff00) { + aadlen.l = cpu_to_be16(assoclen); + len = 2; + } else { + aadlen.l = cpu_to_be16(0xfffe); + put_unaligned_be32(assoclen, &aadlen.h); + len = 6; + } + + sm4_ce_crypt_block(ctx->rkey_enc, mac, mac); + crypto_xor(mac, (const u8 *)&aadlen, len); + + scatterwalk_start(&walk, req->src); + + do { + u32 n = scatterwalk_clamp(&walk, assoclen); + u8 *p, *ptr; + + if (!n) { + scatterwalk_start(&walk, sg_next(walk.sg)); + n = scatterwalk_clamp(&walk, assoclen); + } + + p = ptr = scatterwalk_map(&walk); + assoclen -= n; + scatterwalk_advance(&walk, n); + + while (n > 0) { + unsigned int l, nblocks; + + if (len == SM4_BLOCK_SIZE) { + if (n < SM4_BLOCK_SIZE) { + sm4_ce_crypt_block(ctx->rkey_enc, + mac, mac); + + len = 0; + } else { + nblocks = n / SM4_BLOCK_SIZE; + sm4_ce_cbcmac_update(ctx->rkey_enc, + mac, ptr, nblocks); + + ptr += nblocks * SM4_BLOCK_SIZE; + n %= SM4_BLOCK_SIZE; + + continue; + } + } + + l = min(n, SM4_BLOCK_SIZE - len); + if (l) { + crypto_xor(mac + len, ptr, l); + len += l; + ptr += l; + n -= l; + } + } + + scatterwalk_unmap(p); + scatterwalk_done(&walk, 0, assoclen); + } while (assoclen); +} + +static int ccm_crypt(struct aead_request *req, struct skcipher_walk *walk, + u32 *rkey_enc, u8 mac[], + void (*sm4_ce_ccm_crypt)(const u32 *rkey_enc, u8 *dst, + const u8 *src, u8 *iv, + unsigned int nbytes, u8 *mac)) +{ + u8 __aligned(8) ctr0[SM4_BLOCK_SIZE]; + int err; + + /* preserve the initial ctr0 for the TAG */ + memcpy(ctr0, walk->iv, SM4_BLOCK_SIZE); + crypto_inc(walk->iv, SM4_BLOCK_SIZE); + + kernel_neon_begin(); + + if (req->assoclen) + ccm_calculate_auth_mac(req, mac); + + do { + unsigned int tail = walk->nbytes % SM4_BLOCK_SIZE; + const u8 *src = walk->src.virt.addr; + u8 *dst = walk->dst.virt.addr; + + if (walk->nbytes == walk->total) + tail = 0; + + if (walk->nbytes - tail) + sm4_ce_ccm_crypt(rkey_enc, dst, src, walk->iv, + walk->nbytes - tail, mac); + + if (walk->nbytes == walk->total) + sm4_ce_ccm_final(rkey_enc, ctr0, mac); + + kernel_neon_end(); + + if (walk->nbytes) { + err = skcipher_walk_done(walk, tail); + if (err) + return err; + if (walk->nbytes) + kernel_neon_begin(); + } + } while (walk->nbytes > 0); + + return 0; +} + +static int ccm_encrypt(struct aead_request *req) +{ + struct crypto_aead *aead = crypto_aead_reqtfm(req); + struct sm4_ctx *ctx = crypto_aead_ctx(aead); + u8 __aligned(8) mac[SM4_BLOCK_SIZE]; + struct skcipher_walk walk; + int err; + + err = ccm_format_input(mac, req, req->cryptlen); + if (err) + return err; + + err = skcipher_walk_aead_encrypt(&walk, req, false); + if (err) + return err; + + err = ccm_crypt(req, &walk, ctx->rkey_enc, mac, sm4_ce_ccm_enc); + if (err) + return err; + + /* copy authtag to end of dst */ + scatterwalk_map_and_copy(mac, req->dst, req->assoclen + req->cryptlen, + crypto_aead_authsize(aead), 1); + + return 0; +} + +static int ccm_decrypt(struct aead_request *req) +{ + struct crypto_aead *aead = crypto_aead_reqtfm(req); + unsigned int authsize = crypto_aead_authsize(aead); + struct sm4_ctx *ctx = crypto_aead_ctx(aead); + u8 __aligned(8) mac[SM4_BLOCK_SIZE]; + u8 authtag[SM4_BLOCK_SIZE]; + struct skcipher_walk walk; + int err; + + err = ccm_format_input(mac, req, req->cryptlen - authsize); + if (err) + return err; + + err = skcipher_walk_aead_decrypt(&walk, req, false); + if (err) + return err; + + err = ccm_crypt(req, &walk, ctx->rkey_enc, mac, sm4_ce_ccm_dec); + if (err) + return err; + + /* compare calculated auth tag with the stored one */ + scatterwalk_map_and_copy(authtag, req->src, + req->assoclen + req->cryptlen - authsize, + authsize, 0); + + if (crypto_memneq(authtag, mac, authsize)) + return -EBADMSG; + + return 0; +} + +static struct aead_alg sm4_ccm_alg = { + .base = { + .cra_name = "ccm(sm4)", + .cra_driver_name = "ccm-sm4-ce", + .cra_priority = 400, + .cra_blocksize = 1, + .cra_ctxsize = sizeof(struct sm4_ctx), + .cra_module = THIS_MODULE, + }, + .ivsize = SM4_BLOCK_SIZE, + .chunksize = SM4_BLOCK_SIZE, + .maxauthsize = SM4_BLOCK_SIZE, + .setkey = ccm_setkey, + .setauthsize = ccm_setauthsize, + .encrypt = ccm_encrypt, + .decrypt = ccm_decrypt, +}; + +static int __init sm4_ce_ccm_init(void) +{ + return crypto_register_aead(&sm4_ccm_alg); +} + +static void __exit sm4_ce_ccm_exit(void) +{ + crypto_unregister_aead(&sm4_ccm_alg); +} + +module_cpu_feature_match(SM4, sm4_ce_ccm_init); +module_exit(sm4_ce_ccm_exit); + +MODULE_DESCRIPTION("Synchronous SM4 in CCM mode using ARMv8 Crypto Extensions"); +MODULE_ALIAS_CRYPTO("ccm(sm4)"); +MODULE_AUTHOR("Tianjia Zhang "); +MODULE_LICENSE("GPL v2"); diff --git a/arch/arm64/crypto/sm4-ce-cipher-glue.c b/arch/arm64/crypto/sm4-ce-cipher-glue.c index 76a34ef4abbbf198d1fe62e898c37da4c4d3e365..c31d76fb5a17707440b299d164c68a41e2308f57 100644 --- a/arch/arm64/crypto/sm4-ce-cipher-glue.c +++ b/arch/arm64/crypto/sm4-ce-cipher-glue.c @@ -2,11 +2,11 @@ #include #include +#include #include #include #include #include -#include #include MODULE_ALIAS_CRYPTO("sm4"); diff --git a/arch/arm64/crypto/sm4-ce-core.S b/arch/arm64/crypto/sm4-ce-core.S index 934e0f093279968362d7ddc2e4c1e7774c34077e..877b80c54a0d3aa8610a28256964847387585609 100644 --- a/arch/arm64/crypto/sm4-ce-core.S +++ b/arch/arm64/crypto/sm4-ce-core.S @@ -10,10 +10,12 @@ #include #include +#include "sm4-ce-asm.h" .arch armv8-a+crypto -.irp b, 0, 1, 2, 3, 4, 5, 6, 7, 16, 20, 24, 25, 26, 27, 28, 29, 30, 31 +.irp b, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, \ + 20, 24, 25, 26, 27, 28, 29, 30, 31 .set .Lv\b\().4s, \b .endr @@ -33,174 +35,8 @@ #define RTMP3 v19 #define RIV v20 - -/* Helper macros. */ - -#define PREPARE \ - ld1 {v24.16b-v27.16b}, [x0], #64; \ - ld1 {v28.16b-v31.16b}, [x0]; - -#define SM4_CRYPT_BLK(b0) \ - rev32 b0.16b, b0.16b; \ - sm4e b0.4s, v24.4s; \ - sm4e b0.4s, v25.4s; \ - sm4e b0.4s, v26.4s; \ - sm4e b0.4s, v27.4s; \ - sm4e b0.4s, v28.4s; \ - sm4e b0.4s, v29.4s; \ - sm4e b0.4s, v30.4s; \ - sm4e b0.4s, v31.4s; \ - rev64 b0.4s, b0.4s; \ - ext b0.16b, b0.16b, b0.16b, #8; \ - rev32 b0.16b, b0.16b; - -#define SM4_CRYPT_BLK4(b0, b1, b2, b3) \ - rev32 b0.16b, b0.16b; \ - rev32 b1.16b, b1.16b; \ - rev32 b2.16b, b2.16b; \ - rev32 b3.16b, b3.16b; \ - sm4e b0.4s, v24.4s; \ - sm4e b1.4s, v24.4s; \ - sm4e b2.4s, v24.4s; \ - sm4e b3.4s, v24.4s; \ - sm4e b0.4s, v25.4s; \ - sm4e b1.4s, v25.4s; \ - sm4e b2.4s, v25.4s; \ - sm4e b3.4s, v25.4s; \ - sm4e b0.4s, v26.4s; \ - sm4e b1.4s, v26.4s; \ - sm4e b2.4s, v26.4s; \ - sm4e b3.4s, v26.4s; \ - sm4e b0.4s, v27.4s; \ - sm4e b1.4s, v27.4s; \ - sm4e b2.4s, v27.4s; \ - sm4e b3.4s, v27.4s; \ - sm4e b0.4s, v28.4s; \ - sm4e b1.4s, v28.4s; \ - sm4e b2.4s, v28.4s; \ - sm4e b3.4s, v28.4s; \ - sm4e b0.4s, v29.4s; \ - sm4e b1.4s, v29.4s; \ - sm4e b2.4s, v29.4s; \ - sm4e b3.4s, v29.4s; \ - sm4e b0.4s, v30.4s; \ - sm4e b1.4s, v30.4s; \ - sm4e b2.4s, v30.4s; \ - sm4e b3.4s, v30.4s; \ - sm4e b0.4s, v31.4s; \ - sm4e b1.4s, v31.4s; \ - sm4e b2.4s, v31.4s; \ - sm4e b3.4s, v31.4s; \ - rev64 b0.4s, b0.4s; \ - rev64 b1.4s, b1.4s; \ - rev64 b2.4s, b2.4s; \ - rev64 b3.4s, b3.4s; \ - ext b0.16b, b0.16b, b0.16b, #8; \ - ext b1.16b, b1.16b, b1.16b, #8; \ - ext b2.16b, b2.16b, b2.16b, #8; \ - ext b3.16b, b3.16b, b3.16b, #8; \ - rev32 b0.16b, b0.16b; \ - rev32 b1.16b, b1.16b; \ - rev32 b2.16b, b2.16b; \ - rev32 b3.16b, b3.16b; - -#define SM4_CRYPT_BLK8(b0, b1, b2, b3, b4, b5, b6, b7) \ - rev32 b0.16b, b0.16b; \ - rev32 b1.16b, b1.16b; \ - rev32 b2.16b, b2.16b; \ - rev32 b3.16b, b3.16b; \ - rev32 b4.16b, b4.16b; \ - rev32 b5.16b, b5.16b; \ - rev32 b6.16b, b6.16b; \ - rev32 b7.16b, b7.16b; \ - sm4e b0.4s, v24.4s; \ - sm4e b1.4s, v24.4s; \ - sm4e b2.4s, v24.4s; \ - sm4e b3.4s, v24.4s; \ - sm4e b4.4s, v24.4s; \ - sm4e b5.4s, v24.4s; \ - sm4e b6.4s, v24.4s; \ - sm4e b7.4s, v24.4s; \ - sm4e b0.4s, v25.4s; \ - sm4e b1.4s, v25.4s; \ - sm4e b2.4s, v25.4s; \ - sm4e b3.4s, v25.4s; \ - sm4e b4.4s, v25.4s; \ - sm4e b5.4s, v25.4s; \ - sm4e b6.4s, v25.4s; \ - sm4e b7.4s, v25.4s; \ - sm4e b0.4s, v26.4s; \ - sm4e b1.4s, v26.4s; \ - sm4e b2.4s, v26.4s; \ - sm4e b3.4s, v26.4s; \ - sm4e b4.4s, v26.4s; \ - sm4e b5.4s, v26.4s; \ - sm4e b6.4s, v26.4s; \ - sm4e b7.4s, v26.4s; \ - sm4e b0.4s, v27.4s; \ - sm4e b1.4s, v27.4s; \ - sm4e b2.4s, v27.4s; \ - sm4e b3.4s, v27.4s; \ - sm4e b4.4s, v27.4s; \ - sm4e b5.4s, v27.4s; \ - sm4e b6.4s, v27.4s; \ - sm4e b7.4s, v27.4s; \ - sm4e b0.4s, v28.4s; \ - sm4e b1.4s, v28.4s; \ - sm4e b2.4s, v28.4s; \ - sm4e b3.4s, v28.4s; \ - sm4e b4.4s, v28.4s; \ - sm4e b5.4s, v28.4s; \ - sm4e b6.4s, v28.4s; \ - sm4e b7.4s, v28.4s; \ - sm4e b0.4s, v29.4s; \ - sm4e b1.4s, v29.4s; \ - sm4e b2.4s, v29.4s; \ - sm4e b3.4s, v29.4s; \ - sm4e b4.4s, v29.4s; \ - sm4e b5.4s, v29.4s; \ - sm4e b6.4s, v29.4s; \ - sm4e b7.4s, v29.4s; \ - sm4e b0.4s, v30.4s; \ - sm4e b1.4s, v30.4s; \ - sm4e b2.4s, v30.4s; \ - sm4e b3.4s, v30.4s; \ - sm4e b4.4s, v30.4s; \ - sm4e b5.4s, v30.4s; \ - sm4e b6.4s, v30.4s; \ - sm4e b7.4s, v30.4s; \ - sm4e b0.4s, v31.4s; \ - sm4e b1.4s, v31.4s; \ - sm4e b2.4s, v31.4s; \ - sm4e b3.4s, v31.4s; \ - sm4e b4.4s, v31.4s; \ - sm4e b5.4s, v31.4s; \ - sm4e b6.4s, v31.4s; \ - sm4e b7.4s, v31.4s; \ - rev64 b0.4s, b0.4s; \ - rev64 b1.4s, b1.4s; \ - rev64 b2.4s, b2.4s; \ - rev64 b3.4s, b3.4s; \ - rev64 b4.4s, b4.4s; \ - rev64 b5.4s, b5.4s; \ - rev64 b6.4s, b6.4s; \ - rev64 b7.4s, b7.4s; \ - ext b0.16b, b0.16b, b0.16b, #8; \ - ext b1.16b, b1.16b, b1.16b, #8; \ - ext b2.16b, b2.16b, b2.16b, #8; \ - ext b3.16b, b3.16b, b3.16b, #8; \ - ext b4.16b, b4.16b, b4.16b, #8; \ - ext b5.16b, b5.16b, b5.16b, #8; \ - ext b6.16b, b6.16b, b6.16b, #8; \ - ext b7.16b, b7.16b, b7.16b, #8; \ - rev32 b0.16b, b0.16b; \ - rev32 b1.16b, b1.16b; \ - rev32 b2.16b, b2.16b; \ - rev32 b3.16b, b3.16b; \ - rev32 b4.16b, b4.16b; \ - rev32 b5.16b, b5.16b; \ - rev32 b6.16b, b6.16b; \ - rev32 b7.16b, b7.16b; +#define RMAC v20 +#define RMASK v21 .align 3 @@ -231,32 +67,23 @@ SYM_FUNC_START(sm4_ce_expand_key) sm4ekey v6.4s, v5.4s, v30.4s; sm4ekey v7.4s, v6.4s, v31.4s; + adr_l x5, .Lbswap128_mask + ld1 {v24.16b}, [x5] + st1 {v0.16b-v3.16b}, [x1], #64; st1 {v4.16b-v7.16b}, [x1]; - rev64 v7.4s, v7.4s; - rev64 v6.4s, v6.4s; - rev64 v5.4s, v5.4s; - rev64 v4.4s, v4.4s; - rev64 v3.4s, v3.4s; - rev64 v2.4s, v2.4s; - rev64 v1.4s, v1.4s; - rev64 v0.4s, v0.4s; - ext v7.16b, v7.16b, v7.16b, #8; - ext v6.16b, v6.16b, v6.16b, #8; - ext v5.16b, v5.16b, v5.16b, #8; - ext v4.16b, v4.16b, v4.16b, #8; - ext v3.16b, v3.16b, v3.16b, #8; - ext v2.16b, v2.16b, v2.16b, #8; - ext v1.16b, v1.16b, v1.16b, #8; - ext v0.16b, v0.16b, v0.16b, #8; - st1 {v7.16b}, [x2], #16; - st1 {v6.16b}, [x2], #16; - st1 {v5.16b}, [x2], #16; - st1 {v4.16b}, [x2], #16; - st1 {v3.16b}, [x2], #16; - st1 {v2.16b}, [x2], #16; - st1 {v1.16b}, [x2], #16; - st1 {v0.16b}, [x2]; + + tbl v16.16b, {v7.16b}, v24.16b + tbl v17.16b, {v6.16b}, v24.16b + tbl v18.16b, {v5.16b}, v24.16b + tbl v19.16b, {v4.16b}, v24.16b + tbl v20.16b, {v3.16b}, v24.16b + tbl v21.16b, {v2.16b}, v24.16b + tbl v22.16b, {v1.16b}, v24.16b + tbl v23.16b, {v0.16b}, v24.16b + + st1 {v16.16b-v19.16b}, [x2], #64 + st1 {v20.16b-v23.16b}, [x2] ret; SYM_FUNC_END(sm4_ce_expand_key) @@ -268,7 +95,7 @@ SYM_FUNC_START(sm4_ce_crypt_block) * x1: dst * x2: src */ - PREPARE; + SM4_PREPARE(x0) ld1 {v0.16b}, [x2]; SM4_CRYPT_BLK(v0); @@ -285,7 +112,7 @@ SYM_FUNC_START(sm4_ce_crypt) * x2: src * w3: nblocks */ - PREPARE; + SM4_PREPARE(x0) .Lcrypt_loop_blk: sub w3, w3, #8; @@ -337,26 +164,50 @@ SYM_FUNC_START(sm4_ce_cbc_enc) * x3: iv (big endian, 128 bit) * w4: nblocks */ - PREPARE; + SM4_PREPARE(x0) + + ld1 {RIV.16b}, [x3] + +.Lcbc_enc_loop_4x: + cmp w4, #4 + blt .Lcbc_enc_loop_1x + + sub w4, w4, #4 - ld1 {RIV.16b}, [x3]; + ld1 {v0.16b-v3.16b}, [x2], #64 -.Lcbc_enc_loop: - sub w4, w4, #1; + eor v0.16b, v0.16b, RIV.16b + SM4_CRYPT_BLK(v0) + eor v1.16b, v1.16b, v0.16b + SM4_CRYPT_BLK(v1) + eor v2.16b, v2.16b, v1.16b + SM4_CRYPT_BLK(v2) + eor v3.16b, v3.16b, v2.16b + SM4_CRYPT_BLK(v3) - ld1 {RTMP0.16b}, [x2], #16; - eor RIV.16b, RIV.16b, RTMP0.16b; + st1 {v0.16b-v3.16b}, [x1], #64 + mov RIV.16b, v3.16b - SM4_CRYPT_BLK(RIV); + cbz w4, .Lcbc_enc_end + b .Lcbc_enc_loop_4x - st1 {RIV.16b}, [x1], #16; +.Lcbc_enc_loop_1x: + sub w4, w4, #1 - cbnz w4, .Lcbc_enc_loop; + ld1 {v0.16b}, [x2], #16 + eor RIV.16b, RIV.16b, v0.16b + SM4_CRYPT_BLK(RIV) + + st1 {RIV.16b}, [x1], #16 + + cbnz w4, .Lcbc_enc_loop_1x + +.Lcbc_enc_end: /* store new IV */ - st1 {RIV.16b}, [x3]; + st1 {RIV.16b}, [x3] - ret; + ret SYM_FUNC_END(sm4_ce_cbc_enc) .align 3 @@ -368,81 +219,189 @@ SYM_FUNC_START(sm4_ce_cbc_dec) * x3: iv (big endian, 128 bit) * w4: nblocks */ - PREPARE; + SM4_PREPARE(x0) - ld1 {RIV.16b}, [x3]; + ld1 {RIV.16b}, [x3] -.Lcbc_loop_blk: - sub w4, w4, #8; - tbnz w4, #31, .Lcbc_tail8; +.Lcbc_dec_loop_8x: + sub w4, w4, #8 + tbnz w4, #31, .Lcbc_dec_4x - ld1 {v0.16b-v3.16b}, [x2], #64; - ld1 {v4.16b-v7.16b}, [x2]; + ld1 {v0.16b-v3.16b}, [x2], #64 + ld1 {v4.16b-v7.16b}, [x2], #64 - SM4_CRYPT_BLK8(v0, v1, v2, v3, v4, v5, v6, v7); + rev32 v8.16b, v0.16b + rev32 v9.16b, v1.16b + rev32 v10.16b, v2.16b + rev32 v11.16b, v3.16b + rev32 v12.16b, v4.16b + rev32 v13.16b, v5.16b + rev32 v14.16b, v6.16b + rev32 v15.16b, v7.16b - sub x2, x2, #64; - eor v0.16b, v0.16b, RIV.16b; - ld1 {RTMP0.16b-RTMP3.16b}, [x2], #64; - eor v1.16b, v1.16b, RTMP0.16b; - eor v2.16b, v2.16b, RTMP1.16b; - eor v3.16b, v3.16b, RTMP2.16b; - st1 {v0.16b-v3.16b}, [x1], #64; + SM4_CRYPT_BLK8_BE(v8, v9, v10, v11, v12, v13, v14, v15) - eor v4.16b, v4.16b, RTMP3.16b; - ld1 {RTMP0.16b-RTMP3.16b}, [x2], #64; - eor v5.16b, v5.16b, RTMP0.16b; - eor v6.16b, v6.16b, RTMP1.16b; - eor v7.16b, v7.16b, RTMP2.16b; + eor v8.16b, v8.16b, RIV.16b + eor v9.16b, v9.16b, v0.16b + eor v10.16b, v10.16b, v1.16b + eor v11.16b, v11.16b, v2.16b + eor v12.16b, v12.16b, v3.16b + eor v13.16b, v13.16b, v4.16b + eor v14.16b, v14.16b, v5.16b + eor v15.16b, v15.16b, v6.16b - mov RIV.16b, RTMP3.16b; - st1 {v4.16b-v7.16b}, [x1], #64; + st1 {v8.16b-v11.16b}, [x1], #64 + st1 {v12.16b-v15.16b}, [x1], #64 - cbz w4, .Lcbc_end; - b .Lcbc_loop_blk; + mov RIV.16b, v7.16b -.Lcbc_tail8: - add w4, w4, #8; - cmp w4, #4; - blt .Lcbc_tail4; + cbz w4, .Lcbc_dec_end + b .Lcbc_dec_loop_8x - sub w4, w4, #4; +.Lcbc_dec_4x: + add w4, w4, #8 + cmp w4, #4 + blt .Lcbc_dec_loop_1x - ld1 {v0.16b-v3.16b}, [x2]; + sub w4, w4, #4 - SM4_CRYPT_BLK4(v0, v1, v2, v3); + ld1 {v0.16b-v3.16b}, [x2], #64 - eor v0.16b, v0.16b, RIV.16b; - ld1 {RTMP0.16b-RTMP3.16b}, [x2], #64; - eor v1.16b, v1.16b, RTMP0.16b; - eor v2.16b, v2.16b, RTMP1.16b; - eor v3.16b, v3.16b, RTMP2.16b; + rev32 v8.16b, v0.16b + rev32 v9.16b, v1.16b + rev32 v10.16b, v2.16b + rev32 v11.16b, v3.16b - mov RIV.16b, RTMP3.16b; - st1 {v0.16b-v3.16b}, [x1], #64; + SM4_CRYPT_BLK4_BE(v8, v9, v10, v11) - cbz w4, .Lcbc_end; + eor v8.16b, v8.16b, RIV.16b + eor v9.16b, v9.16b, v0.16b + eor v10.16b, v10.16b, v1.16b + eor v11.16b, v11.16b, v2.16b -.Lcbc_tail4: - sub w4, w4, #1; + st1 {v8.16b-v11.16b}, [x1], #64 - ld1 {v0.16b}, [x2]; + mov RIV.16b, v3.16b - SM4_CRYPT_BLK(v0); + cbz w4, .Lcbc_dec_end - eor v0.16b, v0.16b, RIV.16b; - ld1 {RIV.16b}, [x2], #16; - st1 {v0.16b}, [x1], #16; +.Lcbc_dec_loop_1x: + sub w4, w4, #1 + + ld1 {v0.16b}, [x2], #16 - cbnz w4, .Lcbc_tail4; + rev32 v8.16b, v0.16b -.Lcbc_end: + SM4_CRYPT_BLK_BE(v8) + + eor v8.16b, v8.16b, RIV.16b + st1 {v8.16b}, [x1], #16 + + mov RIV.16b, v0.16b + + cbnz w4, .Lcbc_dec_loop_1x + +.Lcbc_dec_end: /* store new IV */ - st1 {RIV.16b}, [x3]; + st1 {RIV.16b}, [x3] - ret; + ret SYM_FUNC_END(sm4_ce_cbc_dec) +.align 3 +SYM_FUNC_START(sm4_ce_cbc_cts_enc) + /* input: + * x0: round key array, CTX + * x1: dst + * x2: src + * x3: iv (big endian, 128 bit) + * w4: nbytes + */ + SM4_PREPARE(x0) + + sub w5, w4, #16 + uxtw x5, w5 + + ld1 {RIV.16b}, [x3] + + ld1 {v0.16b}, [x2] + eor RIV.16b, RIV.16b, v0.16b + SM4_CRYPT_BLK(RIV) + + /* load permute table */ + adr_l x6, .Lcts_permute_table + add x7, x6, #32 + add x6, x6, x5 + sub x7, x7, x5 + ld1 {v3.16b}, [x6] + ld1 {v4.16b}, [x7] + + /* overlapping loads */ + add x2, x2, x5 + ld1 {v1.16b}, [x2] + + /* create Cn from En-1 */ + tbl v0.16b, {RIV.16b}, v3.16b + /* padding Pn with zeros */ + tbl v1.16b, {v1.16b}, v4.16b + + eor v1.16b, v1.16b, RIV.16b + SM4_CRYPT_BLK(v1) + + /* overlapping stores */ + add x5, x1, x5 + st1 {v0.16b}, [x5] + st1 {v1.16b}, [x1] + + ret +SYM_FUNC_END(sm4_ce_cbc_cts_enc) + +.align 3 +SYM_FUNC_START(sm4_ce_cbc_cts_dec) + /* input: + * x0: round key array, CTX + * x1: dst + * x2: src + * x3: iv (big endian, 128 bit) + * w4: nbytes + */ + SM4_PREPARE(x0) + + sub w5, w4, #16 + uxtw x5, w5 + + ld1 {RIV.16b}, [x3] + + /* load permute table */ + adr_l x6, .Lcts_permute_table + add x7, x6, #32 + add x6, x6, x5 + sub x7, x7, x5 + ld1 {v3.16b}, [x6] + ld1 {v4.16b}, [x7] + + /* overlapping loads */ + ld1 {v0.16b}, [x2], x5 + ld1 {v1.16b}, [x2] + + SM4_CRYPT_BLK(v0) + /* select the first Ln bytes of Xn to create Pn */ + tbl v2.16b, {v0.16b}, v3.16b + eor v2.16b, v2.16b, v1.16b + + /* overwrite the first Ln bytes with Cn to create En-1 */ + tbx v0.16b, {v1.16b}, v4.16b + SM4_CRYPT_BLK(v0) + eor v0.16b, v0.16b, RIV.16b + + /* overlapping stores */ + add x5, x1, x5 + st1 {v2.16b}, [x5] + st1 {v0.16b}, [x1] + + ret +SYM_FUNC_END(sm4_ce_cbc_cts_dec) + .align 3 SYM_FUNC_START(sm4_ce_cfb_enc) /* input: @@ -452,25 +411,57 @@ SYM_FUNC_START(sm4_ce_cfb_enc) * x3: iv (big endian, 128 bit) * w4: nblocks */ - PREPARE; + SM4_PREPARE(x0) + + ld1 {RIV.16b}, [x3] + +.Lcfb_enc_loop_4x: + cmp w4, #4 + blt .Lcfb_enc_loop_1x + + sub w4, w4, #4 + + ld1 {v0.16b-v3.16b}, [x2], #64 + + rev32 v8.16b, RIV.16b + SM4_CRYPT_BLK_BE(v8) + eor v0.16b, v0.16b, v8.16b + + rev32 v8.16b, v0.16b + SM4_CRYPT_BLK_BE(v8) + eor v1.16b, v1.16b, v8.16b + + rev32 v8.16b, v1.16b + SM4_CRYPT_BLK_BE(v8) + eor v2.16b, v2.16b, v8.16b + + rev32 v8.16b, v2.16b + SM4_CRYPT_BLK_BE(v8) + eor v3.16b, v3.16b, v8.16b + + st1 {v0.16b-v3.16b}, [x1], #64 + mov RIV.16b, v3.16b + + cbz w4, .Lcfb_enc_end + b .Lcfb_enc_loop_4x - ld1 {RIV.16b}, [x3]; +.Lcfb_enc_loop_1x: + sub w4, w4, #1 -.Lcfb_enc_loop: - sub w4, w4, #1; + ld1 {v0.16b}, [x2], #16 - SM4_CRYPT_BLK(RIV); + SM4_CRYPT_BLK(RIV) + eor RIV.16b, RIV.16b, v0.16b - ld1 {RTMP0.16b}, [x2], #16; - eor RIV.16b, RIV.16b, RTMP0.16b; - st1 {RIV.16b}, [x1], #16; + st1 {RIV.16b}, [x1], #16 - cbnz w4, .Lcfb_enc_loop; + cbnz w4, .Lcfb_enc_loop_1x +.Lcfb_enc_end: /* store new IV */ - st1 {RIV.16b}, [x3]; + st1 {RIV.16b}, [x3] - ret; + ret SYM_FUNC_END(sm4_ce_cfb_enc) .align 3 @@ -482,79 +473,91 @@ SYM_FUNC_START(sm4_ce_cfb_dec) * x3: iv (big endian, 128 bit) * w4: nblocks */ - PREPARE; + SM4_PREPARE(x0) - ld1 {v0.16b}, [x3]; + ld1 {RIV.16b}, [x3] -.Lcfb_loop_blk: - sub w4, w4, #8; - tbnz w4, #31, .Lcfb_tail8; +.Lcfb_dec_loop_8x: + sub w4, w4, #8 + tbnz w4, #31, .Lcfb_dec_4x - ld1 {v1.16b, v2.16b, v3.16b}, [x2], #48; - ld1 {v4.16b-v7.16b}, [x2]; + ld1 {v0.16b-v3.16b}, [x2], #64 + ld1 {v4.16b-v7.16b}, [x2], #64 - SM4_CRYPT_BLK8(v0, v1, v2, v3, v4, v5, v6, v7); + rev32 v8.16b, RIV.16b + rev32 v9.16b, v0.16b + rev32 v10.16b, v1.16b + rev32 v11.16b, v2.16b + rev32 v12.16b, v3.16b + rev32 v13.16b, v4.16b + rev32 v14.16b, v5.16b + rev32 v15.16b, v6.16b - sub x2, x2, #48; - ld1 {RTMP0.16b-RTMP3.16b}, [x2], #64; - eor v0.16b, v0.16b, RTMP0.16b; - eor v1.16b, v1.16b, RTMP1.16b; - eor v2.16b, v2.16b, RTMP2.16b; - eor v3.16b, v3.16b, RTMP3.16b; - st1 {v0.16b-v3.16b}, [x1], #64; + SM4_CRYPT_BLK8_BE(v8, v9, v10, v11, v12, v13, v14, v15) - ld1 {RTMP0.16b-RTMP3.16b}, [x2], #64; - eor v4.16b, v4.16b, RTMP0.16b; - eor v5.16b, v5.16b, RTMP1.16b; - eor v6.16b, v6.16b, RTMP2.16b; - eor v7.16b, v7.16b, RTMP3.16b; - st1 {v4.16b-v7.16b}, [x1], #64; + mov RIV.16b, v7.16b - mov v0.16b, RTMP3.16b; + eor v0.16b, v0.16b, v8.16b + eor v1.16b, v1.16b, v9.16b + eor v2.16b, v2.16b, v10.16b + eor v3.16b, v3.16b, v11.16b + eor v4.16b, v4.16b, v12.16b + eor v5.16b, v5.16b, v13.16b + eor v6.16b, v6.16b, v14.16b + eor v7.16b, v7.16b, v15.16b - cbz w4, .Lcfb_end; - b .Lcfb_loop_blk; + st1 {v0.16b-v3.16b}, [x1], #64 + st1 {v4.16b-v7.16b}, [x1], #64 -.Lcfb_tail8: - add w4, w4, #8; - cmp w4, #4; - blt .Lcfb_tail4; + cbz w4, .Lcfb_dec_end + b .Lcfb_dec_loop_8x - sub w4, w4, #4; +.Lcfb_dec_4x: + add w4, w4, #8 + cmp w4, #4 + blt .Lcfb_dec_loop_1x - ld1 {v1.16b, v2.16b, v3.16b}, [x2]; + sub w4, w4, #4 - SM4_CRYPT_BLK4(v0, v1, v2, v3); + ld1 {v0.16b-v3.16b}, [x2], #64 - ld1 {RTMP0.16b-RTMP3.16b}, [x2], #64; - eor v0.16b, v0.16b, RTMP0.16b; - eor v1.16b, v1.16b, RTMP1.16b; - eor v2.16b, v2.16b, RTMP2.16b; - eor v3.16b, v3.16b, RTMP3.16b; - st1 {v0.16b-v3.16b}, [x1], #64; + rev32 v8.16b, RIV.16b + rev32 v9.16b, v0.16b + rev32 v10.16b, v1.16b + rev32 v11.16b, v2.16b - mov v0.16b, RTMP3.16b; + SM4_CRYPT_BLK4_BE(v8, v9, v10, v11) - cbz w4, .Lcfb_end; + mov RIV.16b, v3.16b -.Lcfb_tail4: - sub w4, w4, #1; + eor v0.16b, v0.16b, v8.16b + eor v1.16b, v1.16b, v9.16b + eor v2.16b, v2.16b, v10.16b + eor v3.16b, v3.16b, v11.16b - SM4_CRYPT_BLK(v0); + st1 {v0.16b-v3.16b}, [x1], #64 - ld1 {RTMP0.16b}, [x2], #16; - eor v0.16b, v0.16b, RTMP0.16b; - st1 {v0.16b}, [x1], #16; + cbz w4, .Lcfb_dec_end + +.Lcfb_dec_loop_1x: + sub w4, w4, #1 + + ld1 {v0.16b}, [x2], #16 + + SM4_CRYPT_BLK(RIV) - mov v0.16b, RTMP0.16b; + eor RIV.16b, RIV.16b, v0.16b + st1 {RIV.16b}, [x1], #16 - cbnz w4, .Lcfb_tail4; + mov RIV.16b, v0.16b -.Lcfb_end: + cbnz w4, .Lcfb_dec_loop_1x + +.Lcfb_dec_end: /* store new IV */ - st1 {v0.16b}, [x3]; + st1 {RIV.16b}, [x3] - ret; + ret SYM_FUNC_END(sm4_ce_cfb_dec) .align 3 @@ -566,95 +569,525 @@ SYM_FUNC_START(sm4_ce_ctr_enc) * x3: ctr (big endian, 128 bit) * w4: nblocks */ - PREPARE; + SM4_PREPARE(x0) - ldp x7, x8, [x3]; - rev x7, x7; - rev x8, x8; + ldp x7, x8, [x3] + rev x7, x7 + rev x8, x8 -.Lctr_loop_blk: - sub w4, w4, #8; - tbnz w4, #31, .Lctr_tail8; +.Lctr_loop_8x: + sub w4, w4, #8 + tbnz w4, #31, .Lctr_4x -#define inc_le128(vctr) \ - mov vctr.d[1], x8; \ - mov vctr.d[0], x7; \ - adds x8, x8, #1; \ - adc x7, x7, xzr; \ - rev64 vctr.16b, vctr.16b; +#define inc_le128(vctr) \ + mov vctr.d[1], x8; \ + mov vctr.d[0], x7; \ + adds x8, x8, #1; \ + rev64 vctr.16b, vctr.16b; \ + adc x7, x7, xzr; /* construct CTRs */ - inc_le128(v0); /* +0 */ - inc_le128(v1); /* +1 */ - inc_le128(v2); /* +2 */ - inc_le128(v3); /* +3 */ - inc_le128(v4); /* +4 */ - inc_le128(v5); /* +5 */ - inc_le128(v6); /* +6 */ - inc_le128(v7); /* +7 */ + inc_le128(v0) /* +0 */ + inc_le128(v1) /* +1 */ + inc_le128(v2) /* +2 */ + inc_le128(v3) /* +3 */ + inc_le128(v4) /* +4 */ + inc_le128(v5) /* +5 */ + inc_le128(v6) /* +6 */ + inc_le128(v7) /* +7 */ + + ld1 {v8.16b-v11.16b}, [x2], #64 + ld1 {v12.16b-v15.16b}, [x2], #64 + + SM4_CRYPT_BLK8(v0, v1, v2, v3, v4, v5, v6, v7) + + eor v0.16b, v0.16b, v8.16b + eor v1.16b, v1.16b, v9.16b + eor v2.16b, v2.16b, v10.16b + eor v3.16b, v3.16b, v11.16b + eor v4.16b, v4.16b, v12.16b + eor v5.16b, v5.16b, v13.16b + eor v6.16b, v6.16b, v14.16b + eor v7.16b, v7.16b, v15.16b + + st1 {v0.16b-v3.16b}, [x1], #64 + st1 {v4.16b-v7.16b}, [x1], #64 + + cbz w4, .Lctr_end + b .Lctr_loop_8x + +.Lctr_4x: + add w4, w4, #8 + cmp w4, #4 + blt .Lctr_loop_1x + + sub w4, w4, #4 - SM4_CRYPT_BLK8(v0, v1, v2, v3, v4, v5, v6, v7); + /* construct CTRs */ + inc_le128(v0) /* +0 */ + inc_le128(v1) /* +1 */ + inc_le128(v2) /* +2 */ + inc_le128(v3) /* +3 */ - ld1 {RTMP0.16b-RTMP3.16b}, [x2], #64; - eor v0.16b, v0.16b, RTMP0.16b; - eor v1.16b, v1.16b, RTMP1.16b; - eor v2.16b, v2.16b, RTMP2.16b; - eor v3.16b, v3.16b, RTMP3.16b; - st1 {v0.16b-v3.16b}, [x1], #64; + ld1 {v8.16b-v11.16b}, [x2], #64 - ld1 {RTMP0.16b-RTMP3.16b}, [x2], #64; - eor v4.16b, v4.16b, RTMP0.16b; - eor v5.16b, v5.16b, RTMP1.16b; - eor v6.16b, v6.16b, RTMP2.16b; - eor v7.16b, v7.16b, RTMP3.16b; - st1 {v4.16b-v7.16b}, [x1], #64; + SM4_CRYPT_BLK4(v0, v1, v2, v3) + + eor v0.16b, v0.16b, v8.16b + eor v1.16b, v1.16b, v9.16b + eor v2.16b, v2.16b, v10.16b + eor v3.16b, v3.16b, v11.16b - cbz w4, .Lctr_end; - b .Lctr_loop_blk; + st1 {v0.16b-v3.16b}, [x1], #64 -.Lctr_tail8: - add w4, w4, #8; - cmp w4, #4; - blt .Lctr_tail4; + cbz w4, .Lctr_end - sub w4, w4, #4; +.Lctr_loop_1x: + sub w4, w4, #1 /* construct CTRs */ - inc_le128(v0); /* +0 */ - inc_le128(v1); /* +1 */ - inc_le128(v2); /* +2 */ - inc_le128(v3); /* +3 */ + inc_le128(v0) - SM4_CRYPT_BLK4(v0, v1, v2, v3); + ld1 {v8.16b}, [x2], #16 - ld1 {RTMP0.16b-RTMP3.16b}, [x2], #64; - eor v0.16b, v0.16b, RTMP0.16b; - eor v1.16b, v1.16b, RTMP1.16b; - eor v2.16b, v2.16b, RTMP2.16b; - eor v3.16b, v3.16b, RTMP3.16b; - st1 {v0.16b-v3.16b}, [x1], #64; + SM4_CRYPT_BLK(v0) - cbz w4, .Lctr_end; + eor v0.16b, v0.16b, v8.16b + st1 {v0.16b}, [x1], #16 -.Lctr_tail4: - sub w4, w4, #1; + cbnz w4, .Lctr_loop_1x - /* construct CTRs */ - inc_le128(v0); +.Lctr_end: + /* store new CTR */ + rev x7, x7 + rev x8, x8 + stp x7, x8, [x3] - SM4_CRYPT_BLK(v0); + ret +SYM_FUNC_END(sm4_ce_ctr_enc) - ld1 {RTMP0.16b}, [x2], #16; - eor v0.16b, v0.16b, RTMP0.16b; - st1 {v0.16b}, [x1], #16; - cbnz w4, .Lctr_tail4; +#define tweak_next(vt, vin, RTMP) \ + sshr RTMP.2d, vin.2d, #63; \ + and RTMP.16b, RTMP.16b, RMASK.16b; \ + add vt.2d, vin.2d, vin.2d; \ + ext RTMP.16b, RTMP.16b, RTMP.16b, #8; \ + eor vt.16b, vt.16b, RTMP.16b; -.Lctr_end: - /* store new CTR */ - rev x7, x7; - rev x8, x8; - stp x7, x8, [x3]; +.align 3 +SYM_FUNC_START(sm4_ce_xts_enc) + /* input: + * x0: round key array, CTX + * x1: dst + * x2: src + * x3: tweak (big endian, 128 bit) + * w4: nbytes + * x5: round key array for IV + */ + ld1 {v8.16b}, [x3] - ret; -SYM_FUNC_END(sm4_ce_ctr_enc) + cbz x5, .Lxts_enc_nofirst + + SM4_PREPARE(x5) + + /* Generate first tweak */ + SM4_CRYPT_BLK(v8) + +.Lxts_enc_nofirst: + SM4_PREPARE(x0) + + ands w5, w4, #15 + lsr w4, w4, #4 + sub w6, w4, #1 + csel w4, w4, w6, eq + uxtw x5, w5 + + movi RMASK.2s, #0x1 + movi RTMP0.2s, #0x87 + uzp1 RMASK.4s, RMASK.4s, RTMP0.4s + + cbz w4, .Lxts_enc_cts + +.Lxts_enc_loop_8x: + sub w4, w4, #8 + tbnz w4, #31, .Lxts_enc_4x + + tweak_next( v9, v8, RTMP0) + tweak_next(v10, v9, RTMP1) + tweak_next(v11, v10, RTMP2) + tweak_next(v12, v11, RTMP3) + tweak_next(v13, v12, RTMP0) + tweak_next(v14, v13, RTMP1) + tweak_next(v15, v14, RTMP2) + + ld1 {v0.16b-v3.16b}, [x2], #64 + ld1 {v4.16b-v7.16b}, [x2], #64 + eor v0.16b, v0.16b, v8.16b + eor v1.16b, v1.16b, v9.16b + eor v2.16b, v2.16b, v10.16b + eor v3.16b, v3.16b, v11.16b + eor v4.16b, v4.16b, v12.16b + eor v5.16b, v5.16b, v13.16b + eor v6.16b, v6.16b, v14.16b + eor v7.16b, v7.16b, v15.16b + + SM4_CRYPT_BLK8(v0, v1, v2, v3, v4, v5, v6, v7) + + eor v0.16b, v0.16b, v8.16b + eor v1.16b, v1.16b, v9.16b + eor v2.16b, v2.16b, v10.16b + eor v3.16b, v3.16b, v11.16b + eor v4.16b, v4.16b, v12.16b + eor v5.16b, v5.16b, v13.16b + eor v6.16b, v6.16b, v14.16b + eor v7.16b, v7.16b, v15.16b + st1 {v0.16b-v3.16b}, [x1], #64 + st1 {v4.16b-v7.16b}, [x1], #64 + + tweak_next(v8, v15, RTMP3) + + cbz w4, .Lxts_enc_cts + b .Lxts_enc_loop_8x + +.Lxts_enc_4x: + add w4, w4, #8 + cmp w4, #4 + blt .Lxts_enc_loop_1x + + sub w4, w4, #4 + + tweak_next( v9, v8, RTMP0) + tweak_next(v10, v9, RTMP1) + tweak_next(v11, v10, RTMP2) + + ld1 {v0.16b-v3.16b}, [x2], #64 + eor v0.16b, v0.16b, v8.16b + eor v1.16b, v1.16b, v9.16b + eor v2.16b, v2.16b, v10.16b + eor v3.16b, v3.16b, v11.16b + + SM4_CRYPT_BLK4(v0, v1, v2, v3) + + eor v0.16b, v0.16b, v8.16b + eor v1.16b, v1.16b, v9.16b + eor v2.16b, v2.16b, v10.16b + eor v3.16b, v3.16b, v11.16b + st1 {v0.16b-v3.16b}, [x1], #64 + + tweak_next(v8, v11, RTMP3) + + cbz w4, .Lxts_enc_cts + +.Lxts_enc_loop_1x: + sub w4, w4, #1 + + ld1 {v0.16b}, [x2], #16 + eor v0.16b, v0.16b, v8.16b + + SM4_CRYPT_BLK(v0) + + eor v0.16b, v0.16b, v8.16b + st1 {v0.16b}, [x1], #16 + + tweak_next(v8, v8, RTMP0) + + cbnz w4, .Lxts_enc_loop_1x + +.Lxts_enc_cts: + cbz x5, .Lxts_enc_end + + /* cipher text stealing */ + + tweak_next(v9, v8, RTMP0) + ld1 {v0.16b}, [x2] + eor v0.16b, v0.16b, v8.16b + SM4_CRYPT_BLK(v0) + eor v0.16b, v0.16b, v8.16b + + /* load permute table */ + adr_l x6, .Lcts_permute_table + add x7, x6, #32 + add x6, x6, x5 + sub x7, x7, x5 + ld1 {v3.16b}, [x6] + ld1 {v4.16b}, [x7] + + /* overlapping loads */ + add x2, x2, x5 + ld1 {v1.16b}, [x2] + + /* create Cn from En-1 */ + tbl v2.16b, {v0.16b}, v3.16b + /* padding Pn with En-1 at the end */ + tbx v0.16b, {v1.16b}, v4.16b + + eor v0.16b, v0.16b, v9.16b + SM4_CRYPT_BLK(v0) + eor v0.16b, v0.16b, v9.16b + + + /* overlapping stores */ + add x5, x1, x5 + st1 {v2.16b}, [x5] + st1 {v0.16b}, [x1] + + b .Lxts_enc_ret + +.Lxts_enc_end: + /* store new tweak */ + st1 {v8.16b}, [x3] + +.Lxts_enc_ret: + ret +SYM_FUNC_END(sm4_ce_xts_enc) + +.align 3 +SYM_FUNC_START(sm4_ce_xts_dec) + /* input: + * x0: round key array, CTX + * x1: dst + * x2: src + * x3: tweak (big endian, 128 bit) + * w4: nbytes + * x5: round key array for IV + */ + ld1 {v8.16b}, [x3] + + cbz x5, .Lxts_dec_nofirst + + SM4_PREPARE(x5) + + /* Generate first tweak */ + SM4_CRYPT_BLK(v8) + +.Lxts_dec_nofirst: + SM4_PREPARE(x0) + + ands w5, w4, #15 + lsr w4, w4, #4 + sub w6, w4, #1 + csel w4, w4, w6, eq + uxtw x5, w5 + + movi RMASK.2s, #0x1 + movi RTMP0.2s, #0x87 + uzp1 RMASK.4s, RMASK.4s, RTMP0.4s + + cbz w4, .Lxts_dec_cts + +.Lxts_dec_loop_8x: + sub w4, w4, #8 + tbnz w4, #31, .Lxts_dec_4x + + tweak_next( v9, v8, RTMP0) + tweak_next(v10, v9, RTMP1) + tweak_next(v11, v10, RTMP2) + tweak_next(v12, v11, RTMP3) + tweak_next(v13, v12, RTMP0) + tweak_next(v14, v13, RTMP1) + tweak_next(v15, v14, RTMP2) + + ld1 {v0.16b-v3.16b}, [x2], #64 + ld1 {v4.16b-v7.16b}, [x2], #64 + eor v0.16b, v0.16b, v8.16b + eor v1.16b, v1.16b, v9.16b + eor v2.16b, v2.16b, v10.16b + eor v3.16b, v3.16b, v11.16b + eor v4.16b, v4.16b, v12.16b + eor v5.16b, v5.16b, v13.16b + eor v6.16b, v6.16b, v14.16b + eor v7.16b, v7.16b, v15.16b + + SM4_CRYPT_BLK8(v0, v1, v2, v3, v4, v5, v6, v7) + + eor v0.16b, v0.16b, v8.16b + eor v1.16b, v1.16b, v9.16b + eor v2.16b, v2.16b, v10.16b + eor v3.16b, v3.16b, v11.16b + eor v4.16b, v4.16b, v12.16b + eor v5.16b, v5.16b, v13.16b + eor v6.16b, v6.16b, v14.16b + eor v7.16b, v7.16b, v15.16b + st1 {v0.16b-v3.16b}, [x1], #64 + st1 {v4.16b-v7.16b}, [x1], #64 + + tweak_next(v8, v15, RTMP3) + + cbz w4, .Lxts_dec_cts + b .Lxts_dec_loop_8x + +.Lxts_dec_4x: + add w4, w4, #8 + cmp w4, #4 + blt .Lxts_dec_loop_1x + + sub w4, w4, #4 + + tweak_next( v9, v8, RTMP0) + tweak_next(v10, v9, RTMP1) + tweak_next(v11, v10, RTMP2) + + ld1 {v0.16b-v3.16b}, [x2], #64 + eor v0.16b, v0.16b, v8.16b + eor v1.16b, v1.16b, v9.16b + eor v2.16b, v2.16b, v10.16b + eor v3.16b, v3.16b, v11.16b + + SM4_CRYPT_BLK4(v0, v1, v2, v3) + + eor v0.16b, v0.16b, v8.16b + eor v1.16b, v1.16b, v9.16b + eor v2.16b, v2.16b, v10.16b + eor v3.16b, v3.16b, v11.16b + st1 {v0.16b-v3.16b}, [x1], #64 + + tweak_next(v8, v11, RTMP3) + + cbz w4, .Lxts_dec_cts + +.Lxts_dec_loop_1x: + sub w4, w4, #1 + + ld1 {v0.16b}, [x2], #16 + eor v0.16b, v0.16b, v8.16b + + SM4_CRYPT_BLK(v0) + + eor v0.16b, v0.16b, v8.16b + st1 {v0.16b}, [x1], #16 + + tweak_next(v8, v8, RTMP0) + + cbnz w4, .Lxts_dec_loop_1x + +.Lxts_dec_cts: + cbz x5, .Lxts_dec_end + + /* cipher text stealing */ + + tweak_next(v9, v8, RTMP0) + ld1 {v0.16b}, [x2] + eor v0.16b, v0.16b, v9.16b + SM4_CRYPT_BLK(v0) + eor v0.16b, v0.16b, v9.16b + + /* load permute table */ + adr_l x6, .Lcts_permute_table + add x7, x6, #32 + add x6, x6, x5 + sub x7, x7, x5 + ld1 {v3.16b}, [x6] + ld1 {v4.16b}, [x7] + + /* overlapping loads */ + add x2, x2, x5 + ld1 {v1.16b}, [x2] + + /* create Cn from En-1 */ + tbl v2.16b, {v0.16b}, v3.16b + /* padding Pn with En-1 at the end */ + tbx v0.16b, {v1.16b}, v4.16b + + eor v0.16b, v0.16b, v8.16b + SM4_CRYPT_BLK(v0) + eor v0.16b, v0.16b, v8.16b + + + /* overlapping stores */ + add x5, x1, x5 + st1 {v2.16b}, [x5] + st1 {v0.16b}, [x1] + + b .Lxts_dec_ret + +.Lxts_dec_end: + /* store new tweak */ + st1 {v8.16b}, [x3] + +.Lxts_dec_ret: + ret +SYM_FUNC_END(sm4_ce_xts_dec) + +.align 3 +SYM_FUNC_START(sm4_ce_mac_update) + /* input: + * x0: round key array, CTX + * x1: digest + * x2: src + * w3: nblocks + * w4: enc_before + * w5: enc_after + */ + SM4_PREPARE(x0) + + ld1 {RMAC.16b}, [x1] + + cbz w4, .Lmac_update + + SM4_CRYPT_BLK(RMAC) + +.Lmac_update: + cbz w3, .Lmac_ret + + sub w6, w3, #1 + cmp w5, wzr + csel w3, w3, w6, ne + + cbz w3, .Lmac_end + +.Lmac_loop_4x: + cmp w3, #4 + blt .Lmac_loop_1x + + sub w3, w3, #4 + + ld1 {v0.16b-v3.16b}, [x2], #64 + + eor RMAC.16b, RMAC.16b, v0.16b + SM4_CRYPT_BLK(RMAC) + eor RMAC.16b, RMAC.16b, v1.16b + SM4_CRYPT_BLK(RMAC) + eor RMAC.16b, RMAC.16b, v2.16b + SM4_CRYPT_BLK(RMAC) + eor RMAC.16b, RMAC.16b, v3.16b + SM4_CRYPT_BLK(RMAC) + + cbz w3, .Lmac_end + b .Lmac_loop_4x + +.Lmac_loop_1x: + sub w3, w3, #1 + + ld1 {v0.16b}, [x2], #16 + + eor RMAC.16b, RMAC.16b, v0.16b + SM4_CRYPT_BLK(RMAC) + + cbnz w3, .Lmac_loop_1x + + +.Lmac_end: + cbnz w5, .Lmac_ret + + ld1 {v0.16b}, [x2], #16 + eor RMAC.16b, RMAC.16b, v0.16b + +.Lmac_ret: + st1 {RMAC.16b}, [x1] + ret +SYM_FUNC_END(sm4_ce_mac_update) + + + .section ".rodata", "a" + .align 4 +.Lbswap128_mask: + .byte 0x0c, 0x0d, 0x0e, 0x0f, 0x08, 0x09, 0x0a, 0x0b + .byte 0x04, 0x05, 0x06, 0x07, 0x00, 0x01, 0x02, 0x03 + +.Lcts_permute_table: + .byte 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff + .byte 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff + .byte 0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7 + .byte 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf + .byte 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff + .byte 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff diff --git a/arch/arm64/crypto/sm4-ce-gcm-core.S b/arch/arm64/crypto/sm4-ce-gcm-core.S new file mode 100644 index 0000000000000000000000000000000000000000..7aa3ec18a28912bc13d74c6b187b55a5b2d2cf2e --- /dev/null +++ b/arch/arm64/crypto/sm4-ce-gcm-core.S @@ -0,0 +1,741 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * SM4-GCM AEAD Algorithm using ARMv8 Crypto Extensions + * as specified in rfc8998 + * https://datatracker.ietf.org/doc/html/rfc8998 + * + * Copyright (C) 2016 Jussi Kivilinna + * Copyright (C) 2022 Tianjia Zhang + */ + +#include +#include +#include "sm4-ce-asm.h" + +.arch armv8-a+crypto + +.irp b, 0, 1, 2, 3, 24, 25, 26, 27, 28, 29, 30, 31 + .set .Lv\b\().4s, \b +.endr + +.macro sm4e, vd, vn + .inst 0xcec08400 | (.L\vn << 5) | .L\vd +.endm + +/* Register macros */ + +/* Used for both encryption and decryption */ +#define RHASH v21 +#define RRCONST v22 +#define RZERO v23 + +/* Helper macros. */ + +/* + * input: m0, m1 + * output: r0:r1 (low 128-bits in r0, high in r1) + */ +#define PMUL_128x128(r0, r1, m0, m1, T0, T1) \ + ext T0.16b, m1.16b, m1.16b, #8; \ + pmull r0.1q, m0.1d, m1.1d; \ + pmull T1.1q, m0.1d, T0.1d; \ + pmull2 T0.1q, m0.2d, T0.2d; \ + pmull2 r1.1q, m0.2d, m1.2d; \ + eor T0.16b, T0.16b, T1.16b; \ + ext T1.16b, RZERO.16b, T0.16b, #8; \ + ext T0.16b, T0.16b, RZERO.16b, #8; \ + eor r0.16b, r0.16b, T1.16b; \ + eor r1.16b, r1.16b, T0.16b; + +#define PMUL_128x128_4x(r0, r1, m0, m1, T0, T1, \ + r2, r3, m2, m3, T2, T3, \ + r4, r5, m4, m5, T4, T5, \ + r6, r7, m6, m7, T6, T7) \ + ext T0.16b, m1.16b, m1.16b, #8; \ + ext T2.16b, m3.16b, m3.16b, #8; \ + ext T4.16b, m5.16b, m5.16b, #8; \ + ext T6.16b, m7.16b, m7.16b, #8; \ + pmull r0.1q, m0.1d, m1.1d; \ + pmull r2.1q, m2.1d, m3.1d; \ + pmull r4.1q, m4.1d, m5.1d; \ + pmull r6.1q, m6.1d, m7.1d; \ + pmull T1.1q, m0.1d, T0.1d; \ + pmull T3.1q, m2.1d, T2.1d; \ + pmull T5.1q, m4.1d, T4.1d; \ + pmull T7.1q, m6.1d, T6.1d; \ + pmull2 T0.1q, m0.2d, T0.2d; \ + pmull2 T2.1q, m2.2d, T2.2d; \ + pmull2 T4.1q, m4.2d, T4.2d; \ + pmull2 T6.1q, m6.2d, T6.2d; \ + pmull2 r1.1q, m0.2d, m1.2d; \ + pmull2 r3.1q, m2.2d, m3.2d; \ + pmull2 r5.1q, m4.2d, m5.2d; \ + pmull2 r7.1q, m6.2d, m7.2d; \ + eor T0.16b, T0.16b, T1.16b; \ + eor T2.16b, T2.16b, T3.16b; \ + eor T4.16b, T4.16b, T5.16b; \ + eor T6.16b, T6.16b, T7.16b; \ + ext T1.16b, RZERO.16b, T0.16b, #8; \ + ext T3.16b, RZERO.16b, T2.16b, #8; \ + ext T5.16b, RZERO.16b, T4.16b, #8; \ + ext T7.16b, RZERO.16b, T6.16b, #8; \ + ext T0.16b, T0.16b, RZERO.16b, #8; \ + ext T2.16b, T2.16b, RZERO.16b, #8; \ + ext T4.16b, T4.16b, RZERO.16b, #8; \ + ext T6.16b, T6.16b, RZERO.16b, #8; \ + eor r0.16b, r0.16b, T1.16b; \ + eor r2.16b, r2.16b, T3.16b; \ + eor r4.16b, r4.16b, T5.16b; \ + eor r6.16b, r6.16b, T7.16b; \ + eor r1.16b, r1.16b, T0.16b; \ + eor r3.16b, r3.16b, T2.16b; \ + eor r5.16b, r5.16b, T4.16b; \ + eor r7.16b, r7.16b, T6.16b; + +/* + * input: r0:r1 (low 128-bits in r0, high in r1) + * output: a + */ +#define REDUCTION(a, r0, r1, rconst, T0, T1) \ + pmull2 T0.1q, r1.2d, rconst.2d; \ + ext T1.16b, T0.16b, RZERO.16b, #8; \ + ext T0.16b, RZERO.16b, T0.16b, #8; \ + eor r1.16b, r1.16b, T1.16b; \ + eor r0.16b, r0.16b, T0.16b; \ + pmull T0.1q, r1.1d, rconst.1d; \ + eor a.16b, r0.16b, T0.16b; + +#define SM4_CRYPT_PMUL_128x128_BLK(b0, r0, r1, m0, m1, T0, T1) \ + rev32 b0.16b, b0.16b; \ + ext T0.16b, m1.16b, m1.16b, #8; \ + sm4e b0.4s, v24.4s; \ + pmull r0.1q, m0.1d, m1.1d; \ + sm4e b0.4s, v25.4s; \ + pmull T1.1q, m0.1d, T0.1d; \ + sm4e b0.4s, v26.4s; \ + pmull2 T0.1q, m0.2d, T0.2d; \ + sm4e b0.4s, v27.4s; \ + pmull2 r1.1q, m0.2d, m1.2d; \ + sm4e b0.4s, v28.4s; \ + eor T0.16b, T0.16b, T1.16b; \ + sm4e b0.4s, v29.4s; \ + ext T1.16b, RZERO.16b, T0.16b, #8; \ + sm4e b0.4s, v30.4s; \ + ext T0.16b, T0.16b, RZERO.16b, #8; \ + sm4e b0.4s, v31.4s; \ + eor r0.16b, r0.16b, T1.16b; \ + rev64 b0.4s, b0.4s; \ + eor r1.16b, r1.16b, T0.16b; \ + ext b0.16b, b0.16b, b0.16b, #8; \ + rev32 b0.16b, b0.16b; + +#define SM4_CRYPT_PMUL_128x128_BLK3(b0, b1, b2, \ + r0, r1, m0, m1, T0, T1, \ + r2, r3, m2, m3, T2, T3, \ + r4, r5, m4, m5, T4, T5) \ + rev32 b0.16b, b0.16b; \ + rev32 b1.16b, b1.16b; \ + rev32 b2.16b, b2.16b; \ + ext T0.16b, m1.16b, m1.16b, #8; \ + ext T2.16b, m3.16b, m3.16b, #8; \ + ext T4.16b, m5.16b, m5.16b, #8; \ + sm4e b0.4s, v24.4s; \ + sm4e b1.4s, v24.4s; \ + sm4e b2.4s, v24.4s; \ + pmull r0.1q, m0.1d, m1.1d; \ + pmull r2.1q, m2.1d, m3.1d; \ + pmull r4.1q, m4.1d, m5.1d; \ + sm4e b0.4s, v25.4s; \ + sm4e b1.4s, v25.4s; \ + sm4e b2.4s, v25.4s; \ + pmull T1.1q, m0.1d, T0.1d; \ + pmull T3.1q, m2.1d, T2.1d; \ + pmull T5.1q, m4.1d, T4.1d; \ + sm4e b0.4s, v26.4s; \ + sm4e b1.4s, v26.4s; \ + sm4e b2.4s, v26.4s; \ + pmull2 T0.1q, m0.2d, T0.2d; \ + pmull2 T2.1q, m2.2d, T2.2d; \ + pmull2 T4.1q, m4.2d, T4.2d; \ + sm4e b0.4s, v27.4s; \ + sm4e b1.4s, v27.4s; \ + sm4e b2.4s, v27.4s; \ + pmull2 r1.1q, m0.2d, m1.2d; \ + pmull2 r3.1q, m2.2d, m3.2d; \ + pmull2 r5.1q, m4.2d, m5.2d; \ + sm4e b0.4s, v28.4s; \ + sm4e b1.4s, v28.4s; \ + sm4e b2.4s, v28.4s; \ + eor T0.16b, T0.16b, T1.16b; \ + eor T2.16b, T2.16b, T3.16b; \ + eor T4.16b, T4.16b, T5.16b; \ + sm4e b0.4s, v29.4s; \ + sm4e b1.4s, v29.4s; \ + sm4e b2.4s, v29.4s; \ + ext T1.16b, RZERO.16b, T0.16b, #8; \ + ext T3.16b, RZERO.16b, T2.16b, #8; \ + ext T5.16b, RZERO.16b, T4.16b, #8; \ + sm4e b0.4s, v30.4s; \ + sm4e b1.4s, v30.4s; \ + sm4e b2.4s, v30.4s; \ + ext T0.16b, T0.16b, RZERO.16b, #8; \ + ext T2.16b, T2.16b, RZERO.16b, #8; \ + ext T4.16b, T4.16b, RZERO.16b, #8; \ + sm4e b0.4s, v31.4s; \ + sm4e b1.4s, v31.4s; \ + sm4e b2.4s, v31.4s; \ + eor r0.16b, r0.16b, T1.16b; \ + eor r2.16b, r2.16b, T3.16b; \ + eor r4.16b, r4.16b, T5.16b; \ + rev64 b0.4s, b0.4s; \ + rev64 b1.4s, b1.4s; \ + rev64 b2.4s, b2.4s; \ + eor r1.16b, r1.16b, T0.16b; \ + eor r3.16b, r3.16b, T2.16b; \ + eor r5.16b, r5.16b, T4.16b; \ + ext b0.16b, b0.16b, b0.16b, #8; \ + ext b1.16b, b1.16b, b1.16b, #8; \ + ext b2.16b, b2.16b, b2.16b, #8; \ + eor r0.16b, r0.16b, r2.16b; \ + eor r1.16b, r1.16b, r3.16b; \ + rev32 b0.16b, b0.16b; \ + rev32 b1.16b, b1.16b; \ + rev32 b2.16b, b2.16b; \ + eor r0.16b, r0.16b, r4.16b; \ + eor r1.16b, r1.16b, r5.16b; + +#define inc32_le128(vctr) \ + mov vctr.d[1], x9; \ + add w6, w9, #1; \ + mov vctr.d[0], x8; \ + bfi x9, x6, #0, #32; \ + rev64 vctr.16b, vctr.16b; + +#define GTAG_HASH_LENGTHS(vctr0, vlen) \ + ld1 {vlen.16b}, [x7]; \ + /* construct CTR0 */ \ + /* the lower 32-bits of initial IV is always be32(1) */ \ + mov x6, #0x1; \ + bfi x9, x6, #0, #32; \ + mov vctr0.d[0], x8; \ + mov vctr0.d[1], x9; \ + rbit vlen.16b, vlen.16b; \ + rev64 vctr0.16b, vctr0.16b; \ + /* authtag = GCTR(CTR0, GHASH) */ \ + eor RHASH.16b, RHASH.16b, vlen.16b; \ + SM4_CRYPT_PMUL_128x128_BLK(vctr0, RR0, RR1, RHASH, RH1, \ + RTMP0, RTMP1); \ + REDUCTION(RHASH, RR0, RR1, RRCONST, RTMP2, RTMP3); \ + rbit RHASH.16b, RHASH.16b; \ + eor RHASH.16b, RHASH.16b, vctr0.16b; + + +/* Register macros for encrypt and ghash */ + +/* can be the same as input v0-v3 */ +#define RR1 v0 +#define RR3 v1 +#define RR5 v2 +#define RR7 v3 + +#define RR0 v4 +#define RR2 v5 +#define RR4 v6 +#define RR6 v7 + +#define RTMP0 v8 +#define RTMP1 v9 +#define RTMP2 v10 +#define RTMP3 v11 +#define RTMP4 v12 +#define RTMP5 v13 +#define RTMP6 v14 +#define RTMP7 v15 + +#define RH1 v16 +#define RH2 v17 +#define RH3 v18 +#define RH4 v19 + +.align 3 +SYM_FUNC_START(sm4_ce_pmull_ghash_setup) + /* input: + * x0: round key array, CTX + * x1: ghash table + */ + SM4_PREPARE(x0) + + adr_l x2, .Lghash_rconst + ld1r {RRCONST.2d}, [x2] + + eor RZERO.16b, RZERO.16b, RZERO.16b + + /* H = E(K, 0^128) */ + rev32 v0.16b, RZERO.16b + SM4_CRYPT_BLK_BE(v0) + + /* H ^ 1 */ + rbit RH1.16b, v0.16b + + /* H ^ 2 */ + PMUL_128x128(RR0, RR1, RH1, RH1, RTMP0, RTMP1) + REDUCTION(RH2, RR0, RR1, RRCONST, RTMP2, RTMP3) + + /* H ^ 3 */ + PMUL_128x128(RR0, RR1, RH2, RH1, RTMP0, RTMP1) + REDUCTION(RH3, RR0, RR1, RRCONST, RTMP2, RTMP3) + + /* H ^ 4 */ + PMUL_128x128(RR0, RR1, RH2, RH2, RTMP0, RTMP1) + REDUCTION(RH4, RR0, RR1, RRCONST, RTMP2, RTMP3) + + st1 {RH1.16b-RH4.16b}, [x1] + + ret +SYM_FUNC_END(sm4_ce_pmull_ghash_setup) + +.align 3 +SYM_FUNC_START(pmull_ghash_update) + /* input: + * x0: ghash table + * x1: ghash result + * x2: src + * w3: nblocks + */ + ld1 {RH1.16b-RH4.16b}, [x0] + + ld1 {RHASH.16b}, [x1] + rbit RHASH.16b, RHASH.16b + + adr_l x4, .Lghash_rconst + ld1r {RRCONST.2d}, [x4] + + eor RZERO.16b, RZERO.16b, RZERO.16b + +.Lghash_loop_4x: + cmp w3, #4 + blt .Lghash_loop_1x + + sub w3, w3, #4 + + ld1 {v0.16b-v3.16b}, [x2], #64 + + rbit v0.16b, v0.16b + rbit v1.16b, v1.16b + rbit v2.16b, v2.16b + rbit v3.16b, v3.16b + + /* + * (in0 ^ HASH) * H^4 => rr0:rr1 + * (in1) * H^3 => rr2:rr3 + * (in2) * H^2 => rr4:rr5 + * (in3) * H^1 => rr6:rr7 + */ + eor RHASH.16b, RHASH.16b, v0.16b + + PMUL_128x128_4x(RR0, RR1, RHASH, RH4, RTMP0, RTMP1, + RR2, RR3, v1, RH3, RTMP2, RTMP3, + RR4, RR5, v2, RH2, RTMP4, RTMP5, + RR6, RR7, v3, RH1, RTMP6, RTMP7) + + eor RR0.16b, RR0.16b, RR2.16b + eor RR1.16b, RR1.16b, RR3.16b + eor RR0.16b, RR0.16b, RR4.16b + eor RR1.16b, RR1.16b, RR5.16b + eor RR0.16b, RR0.16b, RR6.16b + eor RR1.16b, RR1.16b, RR7.16b + + REDUCTION(RHASH, RR0, RR1, RRCONST, RTMP0, RTMP1) + + cbz w3, .Lghash_end + b .Lghash_loop_4x + +.Lghash_loop_1x: + sub w3, w3, #1 + + ld1 {v0.16b}, [x2], #16 + rbit v0.16b, v0.16b + eor RHASH.16b, RHASH.16b, v0.16b + + PMUL_128x128(RR0, RR1, RHASH, RH1, RTMP0, RTMP1) + REDUCTION(RHASH, RR0, RR1, RRCONST, RTMP2, RTMP3) + + cbnz w3, .Lghash_loop_1x + +.Lghash_end: + rbit RHASH.16b, RHASH.16b + st1 {RHASH.2d}, [x1] + + ret +SYM_FUNC_END(pmull_ghash_update) + +.align 3 +SYM_FUNC_START(sm4_ce_pmull_gcm_enc) + /* input: + * x0: round key array, CTX + * x1: dst + * x2: src + * x3: ctr (big endian, 128 bit) + * w4: nbytes + * x5: ghash result + * x6: ghash table + * x7: lengths (only for last block) + */ + SM4_PREPARE(x0) + + ldp x8, x9, [x3] + rev x8, x8 + rev x9, x9 + + ld1 {RH1.16b-RH4.16b}, [x6] + + ld1 {RHASH.16b}, [x5] + rbit RHASH.16b, RHASH.16b + + adr_l x6, .Lghash_rconst + ld1r {RRCONST.2d}, [x6] + + eor RZERO.16b, RZERO.16b, RZERO.16b + + cbz w4, .Lgcm_enc_hash_len + +.Lgcm_enc_loop_4x: + cmp w4, #(4 * 16) + blt .Lgcm_enc_loop_1x + + sub w4, w4, #(4 * 16) + + /* construct CTRs */ + inc32_le128(v0) /* +0 */ + inc32_le128(v1) /* +1 */ + inc32_le128(v2) /* +2 */ + inc32_le128(v3) /* +3 */ + + ld1 {RTMP0.16b-RTMP3.16b}, [x2], #64 + + SM4_CRYPT_BLK4(v0, v1, v2, v3) + + eor v0.16b, v0.16b, RTMP0.16b + eor v1.16b, v1.16b, RTMP1.16b + eor v2.16b, v2.16b, RTMP2.16b + eor v3.16b, v3.16b, RTMP3.16b + st1 {v0.16b-v3.16b}, [x1], #64 + + /* ghash update */ + + rbit v0.16b, v0.16b + rbit v1.16b, v1.16b + rbit v2.16b, v2.16b + rbit v3.16b, v3.16b + + /* + * (in0 ^ HASH) * H^4 => rr0:rr1 + * (in1) * H^3 => rr2:rr3 + * (in2) * H^2 => rr4:rr5 + * (in3) * H^1 => rr6:rr7 + */ + eor RHASH.16b, RHASH.16b, v0.16b + + PMUL_128x128_4x(RR0, RR1, RHASH, RH4, RTMP0, RTMP1, + RR2, RR3, v1, RH3, RTMP2, RTMP3, + RR4, RR5, v2, RH2, RTMP4, RTMP5, + RR6, RR7, v3, RH1, RTMP6, RTMP7) + + eor RR0.16b, RR0.16b, RR2.16b + eor RR1.16b, RR1.16b, RR3.16b + eor RR0.16b, RR0.16b, RR4.16b + eor RR1.16b, RR1.16b, RR5.16b + eor RR0.16b, RR0.16b, RR6.16b + eor RR1.16b, RR1.16b, RR7.16b + + REDUCTION(RHASH, RR0, RR1, RRCONST, RTMP0, RTMP1) + + cbz w4, .Lgcm_enc_hash_len + b .Lgcm_enc_loop_4x + +.Lgcm_enc_loop_1x: + cmp w4, #16 + blt .Lgcm_enc_tail + + sub w4, w4, #16 + + /* construct CTRs */ + inc32_le128(v0) + + ld1 {RTMP0.16b}, [x2], #16 + + SM4_CRYPT_BLK(v0) + + eor v0.16b, v0.16b, RTMP0.16b + st1 {v0.16b}, [x1], #16 + + /* ghash update */ + rbit v0.16b, v0.16b + eor RHASH.16b, RHASH.16b, v0.16b + PMUL_128x128(RR0, RR1, RHASH, RH1, RTMP0, RTMP1) + REDUCTION(RHASH, RR0, RR1, RRCONST, RTMP2, RTMP3) + + cbz w4, .Lgcm_enc_hash_len + b .Lgcm_enc_loop_1x + +.Lgcm_enc_tail: + /* construct CTRs */ + inc32_le128(v0) + SM4_CRYPT_BLK(v0) + + /* load permute table */ + adr_l x0, .Lcts_permute_table + add x0, x0, #32 + sub x0, x0, w4, uxtw + ld1 {v3.16b}, [x0] + +.Lgcm_enc_tail_loop: + /* do encrypt */ + ldrb w0, [x2], #1 /* get 1 byte from input */ + umov w6, v0.b[0] /* get top crypted byte */ + eor w6, w6, w0 /* w6 = CTR ^ input */ + strb w6, [x1], #1 /* store out byte */ + + /* shift right out one byte */ + ext v0.16b, v0.16b, v0.16b, #1 + /* the last ciphertext is placed in high bytes */ + ins v0.b[15], w6 + + subs w4, w4, #1 + bne .Lgcm_enc_tail_loop + + /* padding last block with zeros */ + tbl v0.16b, {v0.16b}, v3.16b + + /* ghash update */ + rbit v0.16b, v0.16b + eor RHASH.16b, RHASH.16b, v0.16b + PMUL_128x128(RR0, RR1, RHASH, RH1, RTMP0, RTMP1) + REDUCTION(RHASH, RR0, RR1, RRCONST, RTMP2, RTMP3) + +.Lgcm_enc_hash_len: + cbz x7, .Lgcm_enc_end + + GTAG_HASH_LENGTHS(v1, v3) + + b .Lgcm_enc_ret + +.Lgcm_enc_end: + /* store new CTR */ + rev x8, x8 + rev x9, x9 + stp x8, x9, [x3] + + rbit RHASH.16b, RHASH.16b + +.Lgcm_enc_ret: + /* store new MAC */ + st1 {RHASH.2d}, [x5] + + ret +SYM_FUNC_END(sm4_ce_pmull_gcm_enc) + +#undef RR1 +#undef RR3 +#undef RR5 +#undef RR7 +#undef RR0 +#undef RR2 +#undef RR4 +#undef RR6 +#undef RTMP0 +#undef RTMP1 +#undef RTMP2 +#undef RTMP3 +#undef RTMP4 +#undef RTMP5 +#undef RTMP6 +#undef RTMP7 +#undef RH1 +#undef RH2 +#undef RH3 +#undef RH4 + + +/* Register macros for decrypt */ + +/* v0-v2 for building CTRs, v3-v5 for saving inputs */ + +#define RR1 v6 +#define RR3 v7 +#define RR5 v8 + +#define RR0 v9 +#define RR2 v10 +#define RR4 v11 + +#define RTMP0 v12 +#define RTMP1 v13 +#define RTMP2 v14 +#define RTMP3 v15 +#define RTMP4 v16 +#define RTMP5 v17 + +#define RH1 v18 +#define RH2 v19 +#define RH3 v20 + +.align 3 +SYM_FUNC_START(sm4_ce_pmull_gcm_dec) + /* input: + * x0: round key array, CTX + * x1: dst + * x2: src + * x3: ctr (big endian, 128 bit) + * w4: nbytes + * x5: ghash result + * x6: ghash table + * x7: lengths (only for last block) + */ + SM4_PREPARE(x0) + + ldp x8, x9, [x3] + rev x8, x8 + rev x9, x9 + + ld1 {RH1.16b-RH3.16b}, [x6] + + ld1 {RHASH.16b}, [x5] + rbit RHASH.16b, RHASH.16b + + adr_l x6, .Lghash_rconst + ld1r {RRCONST.2d}, [x6] + + eor RZERO.16b, RZERO.16b, RZERO.16b + + cbz w4, .Lgcm_dec_hash_len + +.Lgcm_dec_loop_3x: + cmp w4, #(3 * 16) + blt .Lgcm_dec_loop_1x + + sub w4, w4, #(3 * 16) + + ld1 {v3.16b-v5.16b}, [x2], #(3 * 16) + + /* construct CTRs */ + inc32_le128(v0) /* +0 */ + rbit v6.16b, v3.16b + inc32_le128(v1) /* +1 */ + rbit v7.16b, v4.16b + inc32_le128(v2) /* +2 */ + rbit v8.16b, v5.16b + + eor RHASH.16b, RHASH.16b, v6.16b + + /* decrypt & ghash update */ + SM4_CRYPT_PMUL_128x128_BLK3(v0, v1, v2, + RR0, RR1, RHASH, RH3, RTMP0, RTMP1, + RR2, RR3, v7, RH2, RTMP2, RTMP3, + RR4, RR5, v8, RH1, RTMP4, RTMP5) + + eor v0.16b, v0.16b, v3.16b + eor v1.16b, v1.16b, v4.16b + eor v2.16b, v2.16b, v5.16b + + REDUCTION(RHASH, RR0, RR1, RRCONST, RTMP0, RTMP1) + + st1 {v0.16b-v2.16b}, [x1], #(3 * 16) + + cbz w4, .Lgcm_dec_hash_len + b .Lgcm_dec_loop_3x + +.Lgcm_dec_loop_1x: + cmp w4, #16 + blt .Lgcm_dec_tail + + sub w4, w4, #16 + + ld1 {v3.16b}, [x2], #16 + + /* construct CTRs */ + inc32_le128(v0) + rbit v6.16b, v3.16b + + eor RHASH.16b, RHASH.16b, v6.16b + + SM4_CRYPT_PMUL_128x128_BLK(v0, RR0, RR1, RHASH, RH1, RTMP0, RTMP1) + + eor v0.16b, v0.16b, v3.16b + + REDUCTION(RHASH, RR0, RR1, RRCONST, RTMP2, RTMP3) + + st1 {v0.16b}, [x1], #16 + + cbz w4, .Lgcm_dec_hash_len + b .Lgcm_dec_loop_1x + +.Lgcm_dec_tail: + /* construct CTRs */ + inc32_le128(v0) + SM4_CRYPT_BLK(v0) + + /* load permute table */ + adr_l x0, .Lcts_permute_table + add x0, x0, #32 + sub x0, x0, w4, uxtw + ld1 {v3.16b}, [x0] + +.Lgcm_dec_tail_loop: + /* do decrypt */ + ldrb w0, [x2], #1 /* get 1 byte from input */ + umov w6, v0.b[0] /* get top crypted byte */ + eor w6, w6, w0 /* w6 = CTR ^ input */ + strb w6, [x1], #1 /* store out byte */ + + /* shift right out one byte */ + ext v0.16b, v0.16b, v0.16b, #1 + /* the last ciphertext is placed in high bytes */ + ins v0.b[15], w0 + + subs w4, w4, #1 + bne .Lgcm_dec_tail_loop + + /* padding last block with zeros */ + tbl v0.16b, {v0.16b}, v3.16b + + /* ghash update */ + rbit v0.16b, v0.16b + eor RHASH.16b, RHASH.16b, v0.16b + PMUL_128x128(RR0, RR1, RHASH, RH1, RTMP0, RTMP1) + REDUCTION(RHASH, RR0, RR1, RRCONST, RTMP2, RTMP3) + +.Lgcm_dec_hash_len: + cbz x7, .Lgcm_dec_end + + GTAG_HASH_LENGTHS(v1, v3) + + b .Lgcm_dec_ret + +.Lgcm_dec_end: + /* store new CTR */ + rev x8, x8 + rev x9, x9 + stp x8, x9, [x3] + + rbit RHASH.16b, RHASH.16b + +.Lgcm_dec_ret: + /* store new MAC */ + st1 {RHASH.2d}, [x5] + + ret +SYM_FUNC_END(sm4_ce_pmull_gcm_dec) + + .section ".rodata", "a" + .align 4 +.Lcts_permute_table: + .byte 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff + .byte 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff + .byte 0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7 + .byte 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf + .byte 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff + .byte 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff + +.Lghash_rconst: + .quad 0x87 diff --git a/arch/arm64/crypto/sm4-ce-gcm-glue.c b/arch/arm64/crypto/sm4-ce-gcm-glue.c new file mode 100644 index 0000000000000000000000000000000000000000..c450a2025ca9aff2bed43c93c784f22ac066e495 --- /dev/null +++ b/arch/arm64/crypto/sm4-ce-gcm-glue.c @@ -0,0 +1,286 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * SM4-GCM AEAD Algorithm using ARMv8 Crypto Extensions + * as specified in rfc8998 + * https://datatracker.ietf.org/doc/html/rfc8998 + * + * Copyright (C) 2022 Tianjia Zhang + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "sm4-ce.h" + +asmlinkage void sm4_ce_pmull_ghash_setup(const u32 *rkey_enc, u8 *ghash_table); +asmlinkage void pmull_ghash_update(const u8 *ghash_table, u8 *ghash, + const u8 *src, unsigned int nblocks); +asmlinkage void sm4_ce_pmull_gcm_enc(const u32 *rkey_enc, u8 *dst, + const u8 *src, u8 *iv, + unsigned int nbytes, u8 *ghash, + const u8 *ghash_table, const u8 *lengths); +asmlinkage void sm4_ce_pmull_gcm_dec(const u32 *rkey_enc, u8 *dst, + const u8 *src, u8 *iv, + unsigned int nbytes, u8 *ghash, + const u8 *ghash_table, const u8 *lengths); + +#define GHASH_BLOCK_SIZE 16 +#define GCM_IV_SIZE 12 + +struct sm4_gcm_ctx { + struct sm4_ctx key; + u8 ghash_table[16 * 4]; +}; + + +static int gcm_setkey(struct crypto_aead *tfm, const u8 *key, + unsigned int key_len) +{ + struct sm4_gcm_ctx *ctx = crypto_aead_ctx(tfm); + + if (key_len != SM4_KEY_SIZE) + return -EINVAL; + + kernel_neon_begin(); + + sm4_ce_expand_key(key, ctx->key.rkey_enc, ctx->key.rkey_dec, + crypto_sm4_fk, crypto_sm4_ck); + sm4_ce_pmull_ghash_setup(ctx->key.rkey_enc, ctx->ghash_table); + + kernel_neon_end(); + return 0; +} + +static int gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize) +{ + switch (authsize) { + case 4: + case 8: + case 12 ... 16: + return 0; + default: + return -EINVAL; + } +} + +static void gcm_calculate_auth_mac(struct aead_request *req, u8 ghash[]) +{ + struct crypto_aead *aead = crypto_aead_reqtfm(req); + struct sm4_gcm_ctx *ctx = crypto_aead_ctx(aead); + u8 __aligned(8) buffer[GHASH_BLOCK_SIZE]; + u32 assoclen = req->assoclen; + struct scatter_walk walk; + unsigned int buflen = 0; + + scatterwalk_start(&walk, req->src); + + do { + u32 n = scatterwalk_clamp(&walk, assoclen); + u8 *p, *ptr; + + if (!n) { + scatterwalk_start(&walk, sg_next(walk.sg)); + n = scatterwalk_clamp(&walk, assoclen); + } + + p = ptr = scatterwalk_map(&walk); + assoclen -= n; + scatterwalk_advance(&walk, n); + + if (n + buflen < GHASH_BLOCK_SIZE) { + memcpy(&buffer[buflen], ptr, n); + buflen += n; + } else { + unsigned int nblocks; + + if (buflen) { + unsigned int l = GHASH_BLOCK_SIZE - buflen; + + memcpy(&buffer[buflen], ptr, l); + ptr += l; + n -= l; + + pmull_ghash_update(ctx->ghash_table, ghash, + buffer, 1); + } + + nblocks = n / GHASH_BLOCK_SIZE; + if (nblocks) { + pmull_ghash_update(ctx->ghash_table, ghash, + ptr, nblocks); + ptr += nblocks * GHASH_BLOCK_SIZE; + } + + buflen = n % GHASH_BLOCK_SIZE; + if (buflen) + memcpy(&buffer[0], ptr, buflen); + } + + scatterwalk_unmap(p); + scatterwalk_done(&walk, 0, assoclen); + } while (assoclen); + + /* padding with '0' */ + if (buflen) { + memset(&buffer[buflen], 0, GHASH_BLOCK_SIZE - buflen); + pmull_ghash_update(ctx->ghash_table, ghash, buffer, 1); + } +} + +static int gcm_crypt(struct aead_request *req, struct skcipher_walk *walk, + struct sm4_gcm_ctx *ctx, u8 ghash[], + void (*sm4_ce_pmull_gcm_crypt)(const u32 *rkey_enc, + u8 *dst, const u8 *src, u8 *iv, + unsigned int nbytes, u8 *ghash, + const u8 *ghash_table, const u8 *lengths)) +{ + u8 __aligned(8) iv[SM4_BLOCK_SIZE]; + be128 __aligned(8) lengths; + int err; + + memset(ghash, 0, SM4_BLOCK_SIZE); + + lengths.a = cpu_to_be64(req->assoclen * 8); + lengths.b = cpu_to_be64(walk->total * 8); + + memcpy(iv, walk->iv, GCM_IV_SIZE); + put_unaligned_be32(2, iv + GCM_IV_SIZE); + + kernel_neon_begin(); + + if (req->assoclen) + gcm_calculate_auth_mac(req, ghash); + + do { + unsigned int tail = walk->nbytes % SM4_BLOCK_SIZE; + const u8 *src = walk->src.virt.addr; + u8 *dst = walk->dst.virt.addr; + + if (walk->nbytes == walk->total) { + tail = 0; + + sm4_ce_pmull_gcm_crypt(ctx->key.rkey_enc, dst, src, iv, + walk->nbytes, ghash, + ctx->ghash_table, + (const u8 *)&lengths); + } else if (walk->nbytes - tail) { + sm4_ce_pmull_gcm_crypt(ctx->key.rkey_enc, dst, src, iv, + walk->nbytes - tail, ghash, + ctx->ghash_table, NULL); + } + + kernel_neon_end(); + + err = skcipher_walk_done(walk, tail); + if (err) + return err; + if (walk->nbytes) + kernel_neon_begin(); + } while (walk->nbytes > 0); + + return 0; +} + +static int gcm_encrypt(struct aead_request *req) +{ + struct crypto_aead *aead = crypto_aead_reqtfm(req); + struct sm4_gcm_ctx *ctx = crypto_aead_ctx(aead); + u8 __aligned(8) ghash[SM4_BLOCK_SIZE]; + struct skcipher_walk walk; + int err; + + err = skcipher_walk_aead_encrypt(&walk, req, false); + if (err) + return err; + + err = gcm_crypt(req, &walk, ctx, ghash, sm4_ce_pmull_gcm_enc); + if (err) + return err; + + /* copy authtag to end of dst */ + scatterwalk_map_and_copy(ghash, req->dst, req->assoclen + req->cryptlen, + crypto_aead_authsize(aead), 1); + + return 0; +} + +static int gcm_decrypt(struct aead_request *req) +{ + struct crypto_aead *aead = crypto_aead_reqtfm(req); + unsigned int authsize = crypto_aead_authsize(aead); + struct sm4_gcm_ctx *ctx = crypto_aead_ctx(aead); + u8 __aligned(8) ghash[SM4_BLOCK_SIZE]; + u8 authtag[SM4_BLOCK_SIZE]; + struct skcipher_walk walk; + int err; + + err = skcipher_walk_aead_decrypt(&walk, req, false); + if (err) + return err; + + err = gcm_crypt(req, &walk, ctx, ghash, sm4_ce_pmull_gcm_dec); + if (err) + return err; + + /* compare calculated auth tag with the stored one */ + scatterwalk_map_and_copy(authtag, req->src, + req->assoclen + req->cryptlen - authsize, + authsize, 0); + + if (crypto_memneq(authtag, ghash, authsize)) + return -EBADMSG; + + return 0; +} + +static struct aead_alg sm4_gcm_alg = { + .base = { + .cra_name = "gcm(sm4)", + .cra_driver_name = "gcm-sm4-ce", + .cra_priority = 400, + .cra_blocksize = 1, + .cra_ctxsize = sizeof(struct sm4_gcm_ctx), + .cra_module = THIS_MODULE, + }, + .ivsize = GCM_IV_SIZE, + .chunksize = SM4_BLOCK_SIZE, + .maxauthsize = SM4_BLOCK_SIZE, + .setkey = gcm_setkey, + .setauthsize = gcm_setauthsize, + .encrypt = gcm_encrypt, + .decrypt = gcm_decrypt, +}; + +static int __init sm4_ce_gcm_init(void) +{ + if (!cpu_have_named_feature(PMULL)) + return -ENODEV; + + return crypto_register_aead(&sm4_gcm_alg); +} + +static void __exit sm4_ce_gcm_exit(void) +{ + crypto_unregister_aead(&sm4_gcm_alg); +} + +static const struct cpu_feature __maybe_unused sm4_ce_gcm_cpu_feature[] = { + { cpu_feature(PMULL) }, + {} +}; +MODULE_DEVICE_TABLE(cpu, sm4_ce_gcm_cpu_feature); + +module_cpu_feature_match(SM4, sm4_ce_gcm_init); +module_exit(sm4_ce_gcm_exit); + +MODULE_DESCRIPTION("Synchronous SM4 in GCM mode using ARMv8 Crypto Extensions"); +MODULE_ALIAS_CRYPTO("gcm(sm4)"); +MODULE_AUTHOR("Tianjia Zhang "); +MODULE_LICENSE("GPL v2"); diff --git a/arch/arm64/crypto/sm4-ce-glue.c b/arch/arm64/crypto/sm4-ce-glue.c index 496d55c0d01a4617ca3c014382ad3e9ea246f866..0a2d32ed3bdec4504ef3546fd1b356431a83a088 100644 --- a/arch/arm64/crypto/sm4-ce-glue.c +++ b/arch/arm64/crypto/sm4-ce-glue.c @@ -14,8 +14,12 @@ #include #include #include +#include #include #include +#include +#include +#include #include #define BYTES2BLKS(nbytes) ((nbytes) >> 4) @@ -26,15 +30,48 @@ asmlinkage void sm4_ce_crypt_block(const u32 *rkey, u8 *dst, const u8 *src); asmlinkage void sm4_ce_crypt(const u32 *rkey, u8 *dst, const u8 *src, unsigned int nblks); asmlinkage void sm4_ce_cbc_enc(const u32 *rkey, u8 *dst, const u8 *src, - u8 *iv, unsigned int nblks); + u8 *iv, unsigned int nblocks); asmlinkage void sm4_ce_cbc_dec(const u32 *rkey, u8 *dst, const u8 *src, - u8 *iv, unsigned int nblks); + u8 *iv, unsigned int nblocks); +asmlinkage void sm4_ce_cbc_cts_enc(const u32 *rkey, u8 *dst, const u8 *src, + u8 *iv, unsigned int nbytes); +asmlinkage void sm4_ce_cbc_cts_dec(const u32 *rkey, u8 *dst, const u8 *src, + u8 *iv, unsigned int nbytes); asmlinkage void sm4_ce_cfb_enc(const u32 *rkey, u8 *dst, const u8 *src, u8 *iv, unsigned int nblks); asmlinkage void sm4_ce_cfb_dec(const u32 *rkey, u8 *dst, const u8 *src, u8 *iv, unsigned int nblks); asmlinkage void sm4_ce_ctr_enc(const u32 *rkey, u8 *dst, const u8 *src, u8 *iv, unsigned int nblks); +asmlinkage void sm4_ce_xts_enc(const u32 *rkey1, u8 *dst, const u8 *src, + u8 *tweak, unsigned int nbytes, + const u32 *rkey2_enc); +asmlinkage void sm4_ce_xts_dec(const u32 *rkey1, u8 *dst, const u8 *src, + u8 *tweak, unsigned int nbytes, + const u32 *rkey2_enc); +asmlinkage void sm4_ce_mac_update(const u32 *rkey_enc, u8 *digest, + const u8 *src, unsigned int nblocks, + bool enc_before, bool enc_after); + +EXPORT_SYMBOL(sm4_ce_expand_key); +EXPORT_SYMBOL(sm4_ce_crypt_block); +EXPORT_SYMBOL(sm4_ce_cbc_enc); +EXPORT_SYMBOL(sm4_ce_cfb_enc); + +struct sm4_xts_ctx { + struct sm4_ctx key1; + struct sm4_ctx key2; +}; + +struct sm4_mac_tfm_ctx { + struct sm4_ctx key; + u8 __aligned(8) consts[]; +}; + +struct sm4_mac_desc_ctx { + unsigned int len; + u8 digest[SM4_BLOCK_SIZE]; +}; static int sm4_setkey(struct crypto_skcipher *tfm, const u8 *key, unsigned int key_len) @@ -44,8 +81,33 @@ static int sm4_setkey(struct crypto_skcipher *tfm, const u8 *key, if (key_len != SM4_KEY_SIZE) return -EINVAL; + kernel_neon_begin(); sm4_ce_expand_key(key, ctx->rkey_enc, ctx->rkey_dec, crypto_sm4_fk, crypto_sm4_ck); + kernel_neon_end(); + return 0; +} + +static int sm4_xts_setkey(struct crypto_skcipher *tfm, const u8 *key, + unsigned int key_len) +{ + struct sm4_xts_ctx *ctx = crypto_skcipher_ctx(tfm); + int ret; + + if (key_len != SM4_KEY_SIZE * 2) + return -EINVAL; + + ret = xts_verify_key(tfm, key, key_len); + if (ret) + return ret; + + kernel_neon_begin(); + sm4_ce_expand_key(key, ctx->key1.rkey_enc, + ctx->key1.rkey_dec, crypto_sm4_fk, crypto_sm4_ck); + sm4_ce_expand_key(&key[SM4_KEY_SIZE], ctx->key2.rkey_enc, + ctx->key2.rkey_dec, crypto_sm4_fk, crypto_sm4_ck); + kernel_neon_end(); + return 0; } @@ -94,66 +156,128 @@ static int sm4_ecb_decrypt(struct skcipher_request *req) return sm4_ecb_do_crypt(req, ctx->rkey_dec); } -static int sm4_cbc_encrypt(struct skcipher_request *req) +static int sm4_cbc_crypt(struct skcipher_request *req, + struct sm4_ctx *ctx, bool encrypt) { - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm); struct skcipher_walk walk; unsigned int nbytes; int err; err = skcipher_walk_virt(&walk, req, false); + if (err) + return err; while ((nbytes = walk.nbytes) > 0) { const u8 *src = walk.src.virt.addr; u8 *dst = walk.dst.virt.addr; - unsigned int nblks; + unsigned int nblocks; - kernel_neon_begin(); + nblocks = nbytes / SM4_BLOCK_SIZE; + if (nblocks) { + kernel_neon_begin(); - nblks = BYTES2BLKS(nbytes); - if (nblks) { - sm4_ce_cbc_enc(ctx->rkey_enc, dst, src, walk.iv, nblks); - nbytes -= nblks * SM4_BLOCK_SIZE; - } + if (encrypt) + sm4_ce_cbc_enc(ctx->rkey_enc, dst, src, + walk.iv, nblocks); + else + sm4_ce_cbc_dec(ctx->rkey_dec, dst, src, + walk.iv, nblocks); - kernel_neon_end(); + kernel_neon_end(); + } - err = skcipher_walk_done(&walk, nbytes); + err = skcipher_walk_done(&walk, nbytes % SM4_BLOCK_SIZE); } return err; } +static int sm4_cbc_encrypt(struct skcipher_request *req) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm); + + return sm4_cbc_crypt(req, ctx, true); +} + static int sm4_cbc_decrypt(struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm); + + return sm4_cbc_crypt(req, ctx, false); +} + +static int sm4_cbc_cts_crypt(struct skcipher_request *req, bool encrypt) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm); + struct scatterlist *src = req->src; + struct scatterlist *dst = req->dst; + struct scatterlist sg_src[2], sg_dst[2]; + struct skcipher_request subreq; struct skcipher_walk walk; - unsigned int nbytes; + int cbc_blocks; int err; - err = skcipher_walk_virt(&walk, req, false); + if (req->cryptlen < SM4_BLOCK_SIZE) + return -EINVAL; - while ((nbytes = walk.nbytes) > 0) { - const u8 *src = walk.src.virt.addr; - u8 *dst = walk.dst.virt.addr; - unsigned int nblks; + if (req->cryptlen == SM4_BLOCK_SIZE) + return sm4_cbc_crypt(req, ctx, encrypt); - kernel_neon_begin(); + skcipher_request_set_tfm(&subreq, tfm); + skcipher_request_set_callback(&subreq, skcipher_request_flags(req), + NULL, NULL); - nblks = BYTES2BLKS(nbytes); - if (nblks) { - sm4_ce_cbc_dec(ctx->rkey_dec, dst, src, walk.iv, nblks); - nbytes -= nblks * SM4_BLOCK_SIZE; - } + /* handle the CBC cryption part */ + cbc_blocks = DIV_ROUND_UP(req->cryptlen, SM4_BLOCK_SIZE) - 2; + if (cbc_blocks) { + skcipher_request_set_crypt(&subreq, src, dst, + cbc_blocks * SM4_BLOCK_SIZE, + req->iv); - kernel_neon_end(); + err = sm4_cbc_crypt(&subreq, ctx, encrypt); + if (err) + return err; - err = skcipher_walk_done(&walk, nbytes); + dst = src = scatterwalk_ffwd(sg_src, src, subreq.cryptlen); + if (req->dst != req->src) + dst = scatterwalk_ffwd(sg_dst, req->dst, + subreq.cryptlen); } - return err; + /* handle ciphertext stealing */ + skcipher_request_set_crypt(&subreq, src, dst, + req->cryptlen - cbc_blocks * SM4_BLOCK_SIZE, + req->iv); + + err = skcipher_walk_virt(&walk, &subreq, false); + if (err) + return err; + + kernel_neon_begin(); + + if (encrypt) + sm4_ce_cbc_cts_enc(ctx->rkey_enc, walk.dst.virt.addr, + walk.src.virt.addr, walk.iv, walk.nbytes); + else + sm4_ce_cbc_cts_dec(ctx->rkey_dec, walk.dst.virt.addr, + walk.src.virt.addr, walk.iv, walk.nbytes); + + kernel_neon_end(); + + return skcipher_walk_done(&walk, 0); +} + +static int sm4_cbc_cts_encrypt(struct skcipher_request *req) +{ + return sm4_cbc_cts_crypt(req, true); +} + +static int sm4_cbc_cts_decrypt(struct skcipher_request *req) +{ + return sm4_cbc_cts_crypt(req, false); } static int sm4_cfb_encrypt(struct skcipher_request *req) @@ -283,6 +407,111 @@ static int sm4_ctr_crypt(struct skcipher_request *req) return err; } +static int sm4_xts_crypt(struct skcipher_request *req, bool encrypt) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct sm4_xts_ctx *ctx = crypto_skcipher_ctx(tfm); + int tail = req->cryptlen % SM4_BLOCK_SIZE; + const u32 *rkey2_enc = ctx->key2.rkey_enc; + struct scatterlist sg_src[2], sg_dst[2]; + struct skcipher_request subreq; + struct scatterlist *src, *dst; + struct skcipher_walk walk; + unsigned int nbytes; + int err; + + if (req->cryptlen < SM4_BLOCK_SIZE) + return -EINVAL; + + err = skcipher_walk_virt(&walk, req, false); + if (err) + return err; + + if (unlikely(tail > 0 && walk.nbytes < walk.total)) { + int nblocks = DIV_ROUND_UP(req->cryptlen, SM4_BLOCK_SIZE) - 2; + + skcipher_walk_abort(&walk); + + skcipher_request_set_tfm(&subreq, tfm); + skcipher_request_set_callback(&subreq, + skcipher_request_flags(req), + NULL, NULL); + skcipher_request_set_crypt(&subreq, req->src, req->dst, + nblocks * SM4_BLOCK_SIZE, req->iv); + + err = skcipher_walk_virt(&walk, &subreq, false); + if (err) + return err; + } else { + tail = 0; + } + + while ((nbytes = walk.nbytes) >= SM4_BLOCK_SIZE) { + if (nbytes < walk.total) + nbytes &= ~(SM4_BLOCK_SIZE - 1); + + kernel_neon_begin(); + + if (encrypt) + sm4_ce_xts_enc(ctx->key1.rkey_enc, walk.dst.virt.addr, + walk.src.virt.addr, walk.iv, nbytes, + rkey2_enc); + else + sm4_ce_xts_dec(ctx->key1.rkey_dec, walk.dst.virt.addr, + walk.src.virt.addr, walk.iv, nbytes, + rkey2_enc); + + kernel_neon_end(); + + rkey2_enc = NULL; + + err = skcipher_walk_done(&walk, walk.nbytes - nbytes); + if (err) + return err; + } + + if (likely(tail == 0)) + return 0; + + /* handle ciphertext stealing */ + + dst = src = scatterwalk_ffwd(sg_src, req->src, subreq.cryptlen); + if (req->dst != req->src) + dst = scatterwalk_ffwd(sg_dst, req->dst, subreq.cryptlen); + + skcipher_request_set_crypt(&subreq, src, dst, SM4_BLOCK_SIZE + tail, + req->iv); + + err = skcipher_walk_virt(&walk, &subreq, false); + if (err) + return err; + + kernel_neon_begin(); + + if (encrypt) + sm4_ce_xts_enc(ctx->key1.rkey_enc, walk.dst.virt.addr, + walk.src.virt.addr, walk.iv, walk.nbytes, + rkey2_enc); + else + sm4_ce_xts_dec(ctx->key1.rkey_dec, walk.dst.virt.addr, + walk.src.virt.addr, walk.iv, walk.nbytes, + rkey2_enc); + + kernel_neon_end(); + + return skcipher_walk_done(&walk, 0); +} + +static int sm4_xts_encrypt(struct skcipher_request *req) +{ + return sm4_xts_crypt(req, true); +} + +static int sm4_xts_decrypt(struct skcipher_request *req) +{ + return sm4_xts_crypt(req, false); +} + static struct skcipher_alg sm4_algs[] = { { .base = { @@ -345,28 +574,312 @@ static struct skcipher_alg sm4_algs[] = { .setkey = sm4_setkey, .encrypt = sm4_ctr_crypt, .decrypt = sm4_ctr_crypt, + }, { + .base = { + .cra_name = "cts(cbc(sm4))", + .cra_driver_name = "cts-cbc-sm4-ce", + .cra_priority = 400, + .cra_blocksize = SM4_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct sm4_ctx), + .cra_module = THIS_MODULE, + }, + .min_keysize = SM4_KEY_SIZE, + .max_keysize = SM4_KEY_SIZE, + .ivsize = SM4_BLOCK_SIZE, + .walksize = SM4_BLOCK_SIZE * 2, + .setkey = sm4_setkey, + .encrypt = sm4_cbc_cts_encrypt, + .decrypt = sm4_cbc_cts_decrypt, + }, { + .base = { + .cra_name = "xts(sm4)", + .cra_driver_name = "xts-sm4-ce", + .cra_priority = 400, + .cra_blocksize = SM4_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct sm4_xts_ctx), + .cra_module = THIS_MODULE, + }, + .min_keysize = SM4_KEY_SIZE * 2, + .max_keysize = SM4_KEY_SIZE * 2, + .ivsize = SM4_BLOCK_SIZE, + .walksize = SM4_BLOCK_SIZE * 2, + .setkey = sm4_xts_setkey, + .encrypt = sm4_xts_encrypt, + .decrypt = sm4_xts_decrypt, + } +}; + +static int sm4_cbcmac_setkey(struct crypto_shash *tfm, const u8 *key, + unsigned int key_len) +{ + struct sm4_mac_tfm_ctx *ctx = crypto_shash_ctx(tfm); + + if (key_len != SM4_KEY_SIZE) + return -EINVAL; + + kernel_neon_begin(); + sm4_ce_expand_key(key, ctx->key.rkey_enc, ctx->key.rkey_dec, + crypto_sm4_fk, crypto_sm4_ck); + kernel_neon_end(); + + return 0; +} + +static int sm4_cmac_setkey(struct crypto_shash *tfm, const u8 *key, + unsigned int key_len) +{ + struct sm4_mac_tfm_ctx *ctx = crypto_shash_ctx(tfm); + be128 *consts = (be128 *)ctx->consts; + u64 a, b; + + if (key_len != SM4_KEY_SIZE) + return -EINVAL; + + memset(consts, 0, SM4_BLOCK_SIZE); + + kernel_neon_begin(); + + sm4_ce_expand_key(key, ctx->key.rkey_enc, ctx->key.rkey_dec, + crypto_sm4_fk, crypto_sm4_ck); + + /* encrypt the zero block */ + sm4_ce_crypt_block(ctx->key.rkey_enc, (u8 *)consts, (const u8 *)consts); + + kernel_neon_end(); + + /* gf(2^128) multiply zero-ciphertext with u and u^2 */ + a = be64_to_cpu(consts[0].a); + b = be64_to_cpu(consts[0].b); + consts[0].a = cpu_to_be64((a << 1) | (b >> 63)); + consts[0].b = cpu_to_be64((b << 1) ^ ((a >> 63) ? 0x87 : 0)); + + a = be64_to_cpu(consts[0].a); + b = be64_to_cpu(consts[0].b); + consts[1].a = cpu_to_be64((a << 1) | (b >> 63)); + consts[1].b = cpu_to_be64((b << 1) ^ ((a >> 63) ? 0x87 : 0)); + + return 0; +} + +static int sm4_xcbc_setkey(struct crypto_shash *tfm, const u8 *key, + unsigned int key_len) +{ + struct sm4_mac_tfm_ctx *ctx = crypto_shash_ctx(tfm); + u8 __aligned(8) key2[SM4_BLOCK_SIZE]; + static u8 const ks[3][SM4_BLOCK_SIZE] = { + { [0 ... SM4_BLOCK_SIZE - 1] = 0x1}, + { [0 ... SM4_BLOCK_SIZE - 1] = 0x2}, + { [0 ... SM4_BLOCK_SIZE - 1] = 0x3}, + }; + + if (key_len != SM4_KEY_SIZE) + return -EINVAL; + + kernel_neon_begin(); + + sm4_ce_expand_key(key, ctx->key.rkey_enc, ctx->key.rkey_dec, + crypto_sm4_fk, crypto_sm4_ck); + + sm4_ce_crypt_block(ctx->key.rkey_enc, key2, ks[0]); + sm4_ce_crypt(ctx->key.rkey_enc, ctx->consts, ks[1], 2); + + sm4_ce_expand_key(key2, ctx->key.rkey_enc, ctx->key.rkey_dec, + crypto_sm4_fk, crypto_sm4_ck); + + kernel_neon_end(); + + return 0; +} + +static int sm4_mac_init(struct shash_desc *desc) +{ + struct sm4_mac_desc_ctx *ctx = shash_desc_ctx(desc); + + memset(ctx->digest, 0, SM4_BLOCK_SIZE); + ctx->len = 0; + + return 0; +} + +static int sm4_mac_update(struct shash_desc *desc, const u8 *p, + unsigned int len) +{ + struct sm4_mac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm); + struct sm4_mac_desc_ctx *ctx = shash_desc_ctx(desc); + unsigned int l, nblocks; + + if (len == 0) + return 0; + + if (ctx->len || ctx->len + len < SM4_BLOCK_SIZE) { + l = min(len, SM4_BLOCK_SIZE - ctx->len); + + crypto_xor(ctx->digest + ctx->len, p, l); + ctx->len += l; + len -= l; + p += l; + } + + if (len && (ctx->len % SM4_BLOCK_SIZE) == 0) { + kernel_neon_begin(); + + if (len < SM4_BLOCK_SIZE && ctx->len == SM4_BLOCK_SIZE) { + sm4_ce_crypt_block(tctx->key.rkey_enc, + ctx->digest, ctx->digest); + ctx->len = 0; + } else { + nblocks = len / SM4_BLOCK_SIZE; + len %= SM4_BLOCK_SIZE; + + sm4_ce_mac_update(tctx->key.rkey_enc, ctx->digest, p, + nblocks, (ctx->len == SM4_BLOCK_SIZE), + (len != 0)); + + p += nblocks * SM4_BLOCK_SIZE; + + if (len == 0) + ctx->len = SM4_BLOCK_SIZE; + } + + kernel_neon_end(); + + if (len) { + crypto_xor(ctx->digest, p, len); + ctx->len = len; + } + } + + return 0; +} + +static int sm4_cmac_final(struct shash_desc *desc, u8 *out) +{ + struct sm4_mac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm); + struct sm4_mac_desc_ctx *ctx = shash_desc_ctx(desc); + const u8 *consts = tctx->consts; + + if (ctx->len != SM4_BLOCK_SIZE) { + ctx->digest[ctx->len] ^= 0x80; + consts += SM4_BLOCK_SIZE; + } + + kernel_neon_begin(); + sm4_ce_mac_update(tctx->key.rkey_enc, ctx->digest, consts, 1, + false, true); + kernel_neon_end(); + + memcpy(out, ctx->digest, SM4_BLOCK_SIZE); + + return 0; +} + +static int sm4_cbcmac_final(struct shash_desc *desc, u8 *out) +{ + struct sm4_mac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm); + struct sm4_mac_desc_ctx *ctx = shash_desc_ctx(desc); + + if (ctx->len) { + kernel_neon_begin(); + sm4_ce_crypt_block(tctx->key.rkey_enc, ctx->digest, + ctx->digest); + kernel_neon_end(); + } + + memcpy(out, ctx->digest, SM4_BLOCK_SIZE); + + return 0; +} + +static struct shash_alg sm4_mac_algs[] = { + { + .base = { + .cra_name = "cmac(sm4)", + .cra_driver_name = "cmac-sm4-ce", + .cra_priority = 400, + .cra_blocksize = SM4_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct sm4_mac_tfm_ctx) + + SM4_BLOCK_SIZE * 2, + .cra_module = THIS_MODULE, + }, + .digestsize = SM4_BLOCK_SIZE, + .init = sm4_mac_init, + .update = sm4_mac_update, + .final = sm4_cmac_final, + .setkey = sm4_cmac_setkey, + .descsize = sizeof(struct sm4_mac_desc_ctx), + }, { + .base = { + .cra_name = "xcbc(sm4)", + .cra_driver_name = "xcbc-sm4-ce", + .cra_priority = 400, + .cra_blocksize = SM4_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct sm4_mac_tfm_ctx) + + SM4_BLOCK_SIZE * 2, + .cra_module = THIS_MODULE, + }, + .digestsize = SM4_BLOCK_SIZE, + .init = sm4_mac_init, + .update = sm4_mac_update, + .final = sm4_cmac_final, + .setkey = sm4_xcbc_setkey, + .descsize = sizeof(struct sm4_mac_desc_ctx), + }, { + .base = { + .cra_name = "cbcmac(sm4)", + .cra_driver_name = "cbcmac-sm4-ce", + .cra_priority = 400, + .cra_blocksize = 1, + .cra_ctxsize = sizeof(struct sm4_mac_tfm_ctx), + .cra_module = THIS_MODULE, + }, + .digestsize = SM4_BLOCK_SIZE, + .init = sm4_mac_init, + .update = sm4_mac_update, + .final = sm4_cbcmac_final, + .setkey = sm4_cbcmac_setkey, + .descsize = sizeof(struct sm4_mac_desc_ctx), } }; static int __init sm4_init(void) { - return crypto_register_skciphers(sm4_algs, ARRAY_SIZE(sm4_algs)); + int err; + + err = crypto_register_skciphers(sm4_algs, ARRAY_SIZE(sm4_algs)); + if (err) + return err; + + err = crypto_register_shashes(sm4_mac_algs, ARRAY_SIZE(sm4_mac_algs)); + if (err) + goto out_err; + + return 0; + +out_err: + crypto_unregister_skciphers(sm4_algs, ARRAY_SIZE(sm4_algs)); + return err; } static void __exit sm4_exit(void) { + crypto_unregister_shashes(sm4_mac_algs, ARRAY_SIZE(sm4_mac_algs)); crypto_unregister_skciphers(sm4_algs, ARRAY_SIZE(sm4_algs)); } module_cpu_feature_match(SM4, sm4_init); module_exit(sm4_exit); -MODULE_DESCRIPTION("SM4 ECB/CBC/CFB/CTR using ARMv8 Crypto Extensions"); +MODULE_DESCRIPTION("SM4 ECB/CBC/CFB/CTR/XTS using ARMv8 Crypto Extensions"); MODULE_ALIAS_CRYPTO("sm4-ce"); MODULE_ALIAS_CRYPTO("sm4"); MODULE_ALIAS_CRYPTO("ecb(sm4)"); MODULE_ALIAS_CRYPTO("cbc(sm4)"); MODULE_ALIAS_CRYPTO("cfb(sm4)"); MODULE_ALIAS_CRYPTO("ctr(sm4)"); +MODULE_ALIAS_CRYPTO("cts(cbc(sm4))"); +MODULE_ALIAS_CRYPTO("xts(sm4)"); +MODULE_ALIAS_CRYPTO("cmac(sm4)"); +MODULE_ALIAS_CRYPTO("xcbc(sm4)"); +MODULE_ALIAS_CRYPTO("cbcmac(sm4)"); MODULE_AUTHOR("Tianjia Zhang "); MODULE_LICENSE("GPL v2"); diff --git a/arch/arm64/crypto/sm4-ce.h b/arch/arm64/crypto/sm4-ce.h new file mode 100644 index 0000000000000000000000000000000000000000..109c21b37590d39a7309e4aa5cadf813e454a736 --- /dev/null +++ b/arch/arm64/crypto/sm4-ce.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * SM4 common functions for Crypto Extensions + * Copyright (C) 2022 Tianjia Zhang + */ + +void sm4_ce_expand_key(const u8 *key, u32 *rkey_enc, u32 *rkey_dec, + const u32 *fk, const u32 *ck); + +void sm4_ce_crypt_block(const u32 *rkey, u8 *dst, const u8 *src); + +void sm4_ce_cbc_enc(const u32 *rkey_enc, u8 *dst, const u8 *src, + u8 *iv, unsigned int nblocks); + +void sm4_ce_cfb_enc(const u32 *rkey_enc, u8 *dst, const u8 *src, + u8 *iv, unsigned int nblocks); diff --git a/arch/arm64/crypto/sm4-neon-core.S b/arch/arm64/crypto/sm4-neon-core.S index 3d5256b354d27fb2f5464fc810f595ab17b1a115..f295b4b7d70a3ee9e02bfa3c2df2ea14d3d72a8b 100644 --- a/arch/arm64/crypto/sm4-neon-core.S +++ b/arch/arm64/crypto/sm4-neon-core.S @@ -18,6 +18,11 @@ #define RTMP2 v10 #define RTMP3 v11 +#define RTMP4 v12 +#define RTMP5 v13 +#define RTMP6 v14 +#define RTMP7 v15 + #define RX0 v12 #define RX1 v13 #define RKEY v14 @@ -25,7 +30,7 @@ /* Helper macros. */ -#define PREPARE \ +#define SM4_PREPARE() \ adr_l x5, crypto_sm4_sbox; \ ld1 {v16.16b-v19.16b}, [x5], #64; \ ld1 {v20.16b-v23.16b}, [x5], #64; \ @@ -42,7 +47,25 @@ zip1 s2.2d, RTMP2.2d, RTMP3.2d; \ zip2 s3.2d, RTMP2.2d, RTMP3.2d; -#define rotate_clockwise_90(s0, s1, s2, s3) \ +#define transpose_4x4_2x(s0, s1, s2, s3, s4, s5, s6, s7) \ + zip1 RTMP0.4s, s0.4s, s1.4s; \ + zip1 RTMP1.4s, s2.4s, s3.4s; \ + zip2 RTMP2.4s, s0.4s, s1.4s; \ + zip2 RTMP3.4s, s2.4s, s3.4s; \ + zip1 RTMP4.4s, s4.4s, s5.4s; \ + zip1 RTMP5.4s, s6.4s, s7.4s; \ + zip2 RTMP6.4s, s4.4s, s5.4s; \ + zip2 RTMP7.4s, s6.4s, s7.4s; \ + zip1 s0.2d, RTMP0.2d, RTMP1.2d; \ + zip2 s1.2d, RTMP0.2d, RTMP1.2d; \ + zip1 s2.2d, RTMP2.2d, RTMP3.2d; \ + zip2 s3.2d, RTMP2.2d, RTMP3.2d; \ + zip1 s4.2d, RTMP4.2d, RTMP5.2d; \ + zip2 s5.2d, RTMP4.2d, RTMP5.2d; \ + zip1 s6.2d, RTMP6.2d, RTMP7.2d; \ + zip2 s7.2d, RTMP6.2d, RTMP7.2d; + +#define rotate_clockwise_4x4(s0, s1, s2, s3) \ zip1 RTMP0.4s, s1.4s, s0.4s; \ zip2 RTMP1.4s, s1.4s, s0.4s; \ zip1 RTMP2.4s, s3.4s, s2.4s; \ @@ -52,6 +75,24 @@ zip1 s2.2d, RTMP3.2d, RTMP1.2d; \ zip2 s3.2d, RTMP3.2d, RTMP1.2d; +#define rotate_clockwise_4x4_2x(s0, s1, s2, s3, s4, s5, s6, s7) \ + zip1 RTMP0.4s, s1.4s, s0.4s; \ + zip1 RTMP2.4s, s3.4s, s2.4s; \ + zip2 RTMP1.4s, s1.4s, s0.4s; \ + zip2 RTMP3.4s, s3.4s, s2.4s; \ + zip1 RTMP4.4s, s5.4s, s4.4s; \ + zip1 RTMP6.4s, s7.4s, s6.4s; \ + zip2 RTMP5.4s, s5.4s, s4.4s; \ + zip2 RTMP7.4s, s7.4s, s6.4s; \ + zip1 s0.2d, RTMP2.2d, RTMP0.2d; \ + zip2 s1.2d, RTMP2.2d, RTMP0.2d; \ + zip1 s2.2d, RTMP3.2d, RTMP1.2d; \ + zip2 s3.2d, RTMP3.2d, RTMP1.2d; \ + zip1 s4.2d, RTMP6.2d, RTMP4.2d; \ + zip2 s5.2d, RTMP6.2d, RTMP4.2d; \ + zip1 s6.2d, RTMP7.2d, RTMP5.2d; \ + zip2 s7.2d, RTMP7.2d, RTMP5.2d; + #define ROUND4(round, s0, s1, s2, s3) \ dup RX0.4s, RKEY.s[round]; \ /* rk ^ s1 ^ s2 ^ s3 */ \ @@ -87,14 +128,7 @@ /* s0 ^= RTMP3 */ \ eor s0.16b, s0.16b, RTMP3.16b; -#define SM4_CRYPT_BLK4(b0, b1, b2, b3) \ - rev32 b0.16b, b0.16b; \ - rev32 b1.16b, b1.16b; \ - rev32 b2.16b, b2.16b; \ - rev32 b3.16b, b3.16b; \ - \ - transpose_4x4(b0, b1, b2, b3); \ - \ +#define SM4_CRYPT_BLK4_BE(b0, b1, b2, b3) \ mov x6, 8; \ 4: \ ld1 {RKEY.4s}, [x0], #16; \ @@ -107,15 +141,23 @@ \ bne 4b; \ \ - rotate_clockwise_90(b0, b1, b2, b3); \ rev32 b0.16b, b0.16b; \ rev32 b1.16b, b1.16b; \ rev32 b2.16b, b2.16b; \ rev32 b3.16b, b3.16b; \ \ + rotate_clockwise_4x4(b0, b1, b2, b3); \ + \ /* repoint to rkey */ \ sub x0, x0, #128; +#define SM4_CRYPT_BLK4(b0, b1, b2, b3) \ + rev32 b0.16b, b0.16b; \ + rev32 b1.16b, b1.16b; \ + rev32 b2.16b, b2.16b; \ + rev32 b3.16b, b3.16b; \ + SM4_CRYPT_BLK4_BE(b0, b1, b2, b3); + #define ROUND8(round, s0, s1, s2, s3, t0, t1, t2, t3) \ /* rk ^ s1 ^ s2 ^ s3 */ \ dup RX0.4s, RKEY.s[round]; \ @@ -175,7 +217,7 @@ eor s0.16b, s0.16b, RTMP0.16b; \ eor t0.16b, t0.16b, RTMP1.16b; -#define SM4_CRYPT_BLK8(b0, b1, b2, b3, b4, b5, b6, b7) \ +#define SM4_CRYPT_BLK8_norotate(b0, b1, b2, b3, b4, b5, b6, b7) \ rev32 b0.16b, b0.16b; \ rev32 b1.16b, b1.16b; \ rev32 b2.16b, b2.16b; \ @@ -185,9 +227,6 @@ rev32 b6.16b, b6.16b; \ rev32 b7.16b, b7.16b; \ \ - transpose_4x4(b0, b1, b2, b3); \ - transpose_4x4(b4, b5, b6, b7); \ - \ mov x6, 8; \ 8: \ ld1 {RKEY.4s}, [x0], #16; \ @@ -200,8 +239,6 @@ \ bne 8b; \ \ - rotate_clockwise_90(b0, b1, b2, b3); \ - rotate_clockwise_90(b4, b5, b6, b7); \ rev32 b0.16b, b0.16b; \ rev32 b1.16b, b1.16b; \ rev32 b2.16b, b2.16b; \ @@ -214,274 +251,429 @@ /* repoint to rkey */ \ sub x0, x0, #128; +#define SM4_CRYPT_BLK8(b0, b1, b2, b3, b4, b5, b6, b7) \ + SM4_CRYPT_BLK8_norotate(b0, b1, b2, b3, b4, b5, b6, b7); \ + rotate_clockwise_4x4_2x(b0, b1, b2, b3, b4, b5, b6, b7); \ -.align 3 -SYM_FUNC_START_LOCAL(__sm4_neon_crypt_blk1_4) - /* input: - * x0: round key array, CTX - * x1: dst - * x2: src - * w3: num blocks (1..4) - */ - PREPARE; - - ld1 {v0.16b}, [x2], #16; - mov v1.16b, v0.16b; - mov v2.16b, v0.16b; - mov v3.16b, v0.16b; - cmp w3, #2; - blt .Lblk4_load_input_done; - ld1 {v1.16b}, [x2], #16; - beq .Lblk4_load_input_done; - ld1 {v2.16b}, [x2], #16; - cmp w3, #3; - beq .Lblk4_load_input_done; - ld1 {v3.16b}, [x2]; - -.Lblk4_load_input_done: - SM4_CRYPT_BLK4(v0, v1, v2, v3); - - st1 {v0.16b}, [x1], #16; - cmp w3, #2; - blt .Lblk4_store_output_done; - st1 {v1.16b}, [x1], #16; - beq .Lblk4_store_output_done; - st1 {v2.16b}, [x1], #16; - cmp w3, #3; - beq .Lblk4_store_output_done; - st1 {v3.16b}, [x1]; - -.Lblk4_store_output_done: - ret; -SYM_FUNC_END(__sm4_neon_crypt_blk1_4) .align 3 -SYM_FUNC_START(sm4_neon_crypt_blk1_8) +SYM_FUNC_START(sm4_neon_crypt) /* input: * x0: round key array, CTX * x1: dst * x2: src - * w3: num blocks (1..8) + * w3: nblocks */ - cmp w3, #5; - blt __sm4_neon_crypt_blk1_4; - - PREPARE; - - ld1 {v0.16b-v3.16b}, [x2], #64; - ld1 {v4.16b}, [x2], #16; - mov v5.16b, v4.16b; - mov v6.16b, v4.16b; - mov v7.16b, v4.16b; - beq .Lblk8_load_input_done; - ld1 {v5.16b}, [x2], #16; - cmp w3, #7; - blt .Lblk8_load_input_done; - ld1 {v6.16b}, [x2], #16; - beq .Lblk8_load_input_done; - ld1 {v7.16b}, [x2]; - -.Lblk8_load_input_done: - SM4_CRYPT_BLK8(v0, v1, v2, v3, v4, v5, v6, v7); - - cmp w3, #6; - st1 {v0.16b-v3.16b}, [x1], #64; - st1 {v4.16b}, [x1], #16; - blt .Lblk8_store_output_done; - st1 {v5.16b}, [x1], #16; - beq .Lblk8_store_output_done; - st1 {v6.16b}, [x1], #16; - cmp w3, #7; - beq .Lblk8_store_output_done; - st1 {v7.16b}, [x1]; - -.Lblk8_store_output_done: - ret; -SYM_FUNC_END(sm4_neon_crypt_blk1_8) + SM4_PREPARE() -.align 3 -SYM_FUNC_START(sm4_neon_crypt_blk8) - /* input: - * x0: round key array, CTX - * x1: dst - * x2: src - * w3: nblocks (multiples of 8) - */ - PREPARE; +.Lcrypt_loop_8x: + sub w3, w3, #8 + tbnz w3, #31, .Lcrypt_4x + + ld4 {v0.4s-v3.4s}, [x2], #64 + ld4 {v4.4s-v7.4s}, [x2], #64 -.Lcrypt_loop_blk: - subs w3, w3, #8; - bmi .Lcrypt_end; + SM4_CRYPT_BLK8(v0, v1, v2, v3, v4, v5, v6, v7) - ld1 {v0.16b-v3.16b}, [x2], #64; - ld1 {v4.16b-v7.16b}, [x2], #64; + st1 {v0.16b-v3.16b}, [x1], #64 + st1 {v4.16b-v7.16b}, [x1], #64 - SM4_CRYPT_BLK8(v0, v1, v2, v3, v4, v5, v6, v7); + cbz w3, .Lcrypt_end + b .Lcrypt_loop_8x - st1 {v0.16b-v3.16b}, [x1], #64; - st1 {v4.16b-v7.16b}, [x1], #64; +.Lcrypt_4x: + add w3, w3, #8 + cmp w3, #4 + blt .Lcrypt_tail - b .Lcrypt_loop_blk; + sub w3, w3, #4 + + ld4 {v0.4s-v3.4s}, [x2], #64 + + SM4_CRYPT_BLK4(v0, v1, v2, v3) + + st1 {v0.16b-v3.16b}, [x1], #64 + + cbz w3, .Lcrypt_end + +.Lcrypt_tail: + cmp w3, #2 + ld1 {v0.16b}, [x2], #16 + blt .Lcrypt_tail_load_done + ld1 {v1.16b}, [x2], #16 + beq .Lcrypt_tail_load_done + ld1 {v2.16b}, [x2], #16 + +.Lcrypt_tail_load_done: + transpose_4x4(v0, v1, v2, v3) + + SM4_CRYPT_BLK4(v0, v1, v2, v3) + + cmp w3, #2 + st1 {v0.16b}, [x1], #16 + blt .Lcrypt_end + st1 {v1.16b}, [x1], #16 + beq .Lcrypt_end + st1 {v2.16b}, [x1], #16 .Lcrypt_end: - ret; -SYM_FUNC_END(sm4_neon_crypt_blk8) + ret +SYM_FUNC_END(sm4_neon_crypt) .align 3 -SYM_FUNC_START(sm4_neon_cbc_dec_blk8) +SYM_FUNC_START(sm4_neon_cbc_dec) /* input: * x0: round key array, CTX * x1: dst * x2: src * x3: iv (big endian, 128 bit) - * w4: nblocks (multiples of 8) + * w4: nblocks */ - PREPARE; + SM4_PREPARE() + + ld1 {RIV.16b}, [x3] + +.Lcbc_dec_loop_8x: + sub w4, w4, #8 + tbnz w4, #31, .Lcbc_dec_4x + + ld4 {v0.4s-v3.4s}, [x2], #64 + ld4 {v4.4s-v7.4s}, [x2] + + SM4_CRYPT_BLK8_norotate(v0, v1, v2, v3, v4, v5, v6, v7) + + /* Avoid overwriting the RIV register */ + rotate_clockwise_4x4(v0, v1, v2, v3) + rotate_clockwise_4x4(v4, v5, v6, v7) + + sub x2, x2, #64 + + eor v0.16b, v0.16b, RIV.16b - ld1 {RIV.16b}, [x3]; + ld1 {RTMP0.16b-RTMP3.16b}, [x2], #64 + ld1 {RTMP4.16b-RTMP7.16b}, [x2], #64 -.Lcbc_loop_blk: - subs w4, w4, #8; - bmi .Lcbc_end; + eor v1.16b, v1.16b, RTMP0.16b + eor v2.16b, v2.16b, RTMP1.16b + eor v3.16b, v3.16b, RTMP2.16b + eor v4.16b, v4.16b, RTMP3.16b + eor v5.16b, v5.16b, RTMP4.16b + eor v6.16b, v6.16b, RTMP5.16b + eor v7.16b, v7.16b, RTMP6.16b - ld1 {v0.16b-v3.16b}, [x2], #64; - ld1 {v4.16b-v7.16b}, [x2]; + mov RIV.16b, RTMP7.16b - SM4_CRYPT_BLK8(v0, v1, v2, v3, v4, v5, v6, v7); + st1 {v0.16b-v3.16b}, [x1], #64 + st1 {v4.16b-v7.16b}, [x1], #64 - sub x2, x2, #64; - eor v0.16b, v0.16b, RIV.16b; - ld1 {RTMP0.16b-RTMP3.16b}, [x2], #64; - eor v1.16b, v1.16b, RTMP0.16b; - eor v2.16b, v2.16b, RTMP1.16b; - eor v3.16b, v3.16b, RTMP2.16b; - st1 {v0.16b-v3.16b}, [x1], #64; + cbz w4, .Lcbc_dec_end + b .Lcbc_dec_loop_8x - eor v4.16b, v4.16b, RTMP3.16b; - ld1 {RTMP0.16b-RTMP3.16b}, [x2], #64; - eor v5.16b, v5.16b, RTMP0.16b; - eor v6.16b, v6.16b, RTMP1.16b; - eor v7.16b, v7.16b, RTMP2.16b; +.Lcbc_dec_4x: + add w4, w4, #8 + cmp w4, #4 + blt .Lcbc_dec_tail - mov RIV.16b, RTMP3.16b; - st1 {v4.16b-v7.16b}, [x1], #64; + sub w4, w4, #4 - b .Lcbc_loop_blk; + ld1 {v0.16b-v3.16b}, [x2], #64 -.Lcbc_end: + rev32 v4.16b, v0.16b + rev32 v5.16b, v1.16b + rev32 v6.16b, v2.16b + rev32 v7.16b, v3.16b + + transpose_4x4(v4, v5, v6, v7) + + SM4_CRYPT_BLK4_BE(v4, v5, v6, v7) + + eor v4.16b, v4.16b, RIV.16b + eor v5.16b, v5.16b, v0.16b + eor v6.16b, v6.16b, v1.16b + eor v7.16b, v7.16b, v2.16b + + mov RIV.16b, v3.16b + + st1 {v4.16b-v7.16b}, [x1], #64 + + cbz w4, .Lcbc_dec_end + +.Lcbc_dec_tail: + cmp w4, #2 + ld1 {v0.16b}, [x2], #16 + blt .Lcbc_dec_tail_load_done + ld1 {v1.16b}, [x2], #16 + beq .Lcbc_dec_tail_load_done + ld1 {v2.16b}, [x2], #16 + +.Lcbc_dec_tail_load_done: + rev32 v4.16b, v0.16b + rev32 v5.16b, v1.16b + rev32 v6.16b, v2.16b + + transpose_4x4(v4, v5, v6, v7) + + SM4_CRYPT_BLK4_BE(v4, v5, v6, v7) + + cmp w4, #2 + eor v4.16b, v4.16b, RIV.16b + mov RIV.16b, v0.16b + st1 {v4.16b}, [x1], #16 + blt .Lcbc_dec_end + + eor v5.16b, v5.16b, v0.16b + mov RIV.16b, v1.16b + st1 {v5.16b}, [x1], #16 + beq .Lcbc_dec_end + + eor v6.16b, v6.16b, v1.16b + mov RIV.16b, v2.16b + st1 {v6.16b}, [x1], #16 + +.Lcbc_dec_end: /* store new IV */ - st1 {RIV.16b}, [x3]; + st1 {RIV.16b}, [x3] - ret; -SYM_FUNC_END(sm4_neon_cbc_dec_blk8) + ret +SYM_FUNC_END(sm4_neon_cbc_dec) .align 3 -SYM_FUNC_START(sm4_neon_cfb_dec_blk8) +SYM_FUNC_START(sm4_neon_cfb_dec) /* input: * x0: round key array, CTX * x1: dst * x2: src * x3: iv (big endian, 128 bit) - * w4: nblocks (multiples of 8) + * w4: nblocks */ - PREPARE; + SM4_PREPARE() + + ld1 {v0.16b}, [x3] + +.Lcfb_dec_loop_8x: + sub w4, w4, #8 + tbnz w4, #31, .Lcfb_dec_4x + + ld1 {v1.16b-v3.16b}, [x2], #48 + ld4 {v4.4s-v7.4s}, [x2] + + transpose_4x4(v0, v1, v2, v3) + + SM4_CRYPT_BLK8(v0, v1, v2, v3, v4, v5, v6, v7) + + sub x2, x2, #48 + ld1 {RTMP0.16b-RTMP3.16b}, [x2], #64 + ld1 {RTMP4.16b-RTMP7.16b}, [x2], #64 + + eor v0.16b, v0.16b, RTMP0.16b + eor v1.16b, v1.16b, RTMP1.16b + eor v2.16b, v2.16b, RTMP2.16b + eor v3.16b, v3.16b, RTMP3.16b + eor v4.16b, v4.16b, RTMP4.16b + eor v5.16b, v5.16b, RTMP5.16b + eor v6.16b, v6.16b, RTMP6.16b + eor v7.16b, v7.16b, RTMP7.16b + + st1 {v0.16b-v3.16b}, [x1], #64 + st1 {v4.16b-v7.16b}, [x1], #64 + + mov v0.16b, RTMP7.16b + + cbz w4, .Lcfb_dec_end + b .Lcfb_dec_loop_8x + +.Lcfb_dec_4x: + add w4, w4, #8 + cmp w4, #4 + blt .Lcfb_dec_tail + + sub w4, w4, #4 + + ld1 {v4.16b-v7.16b}, [x2], #64 + + rev32 v0.16b, v0.16b /* v0 is IV register */ + rev32 v1.16b, v4.16b + rev32 v2.16b, v5.16b + rev32 v3.16b, v6.16b + + transpose_4x4(v0, v1, v2, v3) + + SM4_CRYPT_BLK4_BE(v0, v1, v2, v3) - ld1 {v0.16b}, [x3]; + eor v0.16b, v0.16b, v4.16b + eor v1.16b, v1.16b, v5.16b + eor v2.16b, v2.16b, v6.16b + eor v3.16b, v3.16b, v7.16b -.Lcfb_loop_blk: - subs w4, w4, #8; - bmi .Lcfb_end; + st1 {v0.16b-v3.16b}, [x1], #64 - ld1 {v1.16b, v2.16b, v3.16b}, [x2], #48; - ld1 {v4.16b-v7.16b}, [x2]; + mov v0.16b, v7.16b - SM4_CRYPT_BLK8(v0, v1, v2, v3, v4, v5, v6, v7); + cbz w4, .Lcfb_dec_end - sub x2, x2, #48; - ld1 {RTMP0.16b-RTMP3.16b}, [x2], #64; - eor v0.16b, v0.16b, RTMP0.16b; - eor v1.16b, v1.16b, RTMP1.16b; - eor v2.16b, v2.16b, RTMP2.16b; - eor v3.16b, v3.16b, RTMP3.16b; - st1 {v0.16b-v3.16b}, [x1], #64; +.Lcfb_dec_tail: + cmp w4, #2 + ld1 {v4.16b}, [x2], #16 + blt .Lcfb_dec_tail_load_done + ld1 {v5.16b}, [x2], #16 + beq .Lcfb_dec_tail_load_done + ld1 {v6.16b}, [x2], #16 - ld1 {RTMP0.16b-RTMP3.16b}, [x2], #64; - eor v4.16b, v4.16b, RTMP0.16b; - eor v5.16b, v5.16b, RTMP1.16b; - eor v6.16b, v6.16b, RTMP2.16b; - eor v7.16b, v7.16b, RTMP3.16b; - st1 {v4.16b-v7.16b}, [x1], #64; +.Lcfb_dec_tail_load_done: + rev32 v0.16b, v0.16b /* v0 is IV register */ + rev32 v1.16b, v4.16b + rev32 v2.16b, v5.16b - mov v0.16b, RTMP3.16b; + transpose_4x4(v0, v1, v2, v3) - b .Lcfb_loop_blk; + SM4_CRYPT_BLK4_BE(v0, v1, v2, v3) -.Lcfb_end: + cmp w4, #2 + eor v0.16b, v0.16b, v4.16b + st1 {v0.16b}, [x1], #16 + mov v0.16b, v4.16b + blt .Lcfb_dec_end + + eor v1.16b, v1.16b, v5.16b + st1 {v1.16b}, [x1], #16 + mov v0.16b, v5.16b + beq .Lcfb_dec_end + + eor v2.16b, v2.16b, v6.16b + st1 {v2.16b}, [x1], #16 + mov v0.16b, v6.16b + +.Lcfb_dec_end: /* store new IV */ - st1 {v0.16b}, [x3]; + st1 {v0.16b}, [x3] - ret; -SYM_FUNC_END(sm4_neon_cfb_dec_blk8) + ret +SYM_FUNC_END(sm4_neon_cfb_dec) .align 3 -SYM_FUNC_START(sm4_neon_ctr_enc_blk8) +SYM_FUNC_START(sm4_neon_ctr_crypt) /* input: * x0: round key array, CTX * x1: dst * x2: src * x3: ctr (big endian, 128 bit) - * w4: nblocks (multiples of 8) + * w4: nblocks */ - PREPARE; + SM4_PREPARE() - ldp x7, x8, [x3]; - rev x7, x7; - rev x8, x8; + ldp x7, x8, [x3] + rev x7, x7 + rev x8, x8 -.Lctr_loop_blk: - subs w4, w4, #8; - bmi .Lctr_end; +.Lctr_crypt_loop_8x: + sub w4, w4, #8 + tbnz w4, #31, .Lctr_crypt_4x -#define inc_le128(vctr) \ - mov vctr.d[1], x8; \ - mov vctr.d[0], x7; \ - adds x8, x8, #1; \ - adc x7, x7, xzr; \ - rev64 vctr.16b, vctr.16b; +#define inc_le128(vctr) \ + mov vctr.d[1], x8; \ + mov vctr.d[0], x7; \ + adds x8, x8, #1; \ + rev64 vctr.16b, vctr.16b; \ + adc x7, x7, xzr; /* construct CTRs */ - inc_le128(v0); /* +0 */ - inc_le128(v1); /* +1 */ - inc_le128(v2); /* +2 */ - inc_le128(v3); /* +3 */ - inc_le128(v4); /* +4 */ - inc_le128(v5); /* +5 */ - inc_le128(v6); /* +6 */ - inc_le128(v7); /* +7 */ - - SM4_CRYPT_BLK8(v0, v1, v2, v3, v4, v5, v6, v7); - - ld1 {RTMP0.16b-RTMP3.16b}, [x2], #64; - eor v0.16b, v0.16b, RTMP0.16b; - eor v1.16b, v1.16b, RTMP1.16b; - eor v2.16b, v2.16b, RTMP2.16b; - eor v3.16b, v3.16b, RTMP3.16b; - st1 {v0.16b-v3.16b}, [x1], #64; - - ld1 {RTMP0.16b-RTMP3.16b}, [x2], #64; - eor v4.16b, v4.16b, RTMP0.16b; - eor v5.16b, v5.16b, RTMP1.16b; - eor v6.16b, v6.16b, RTMP2.16b; - eor v7.16b, v7.16b, RTMP3.16b; - st1 {v4.16b-v7.16b}, [x1], #64; - - b .Lctr_loop_blk; - -.Lctr_end: + inc_le128(v0) /* +0 */ + inc_le128(v1) /* +1 */ + inc_le128(v2) /* +2 */ + inc_le128(v3) /* +3 */ + inc_le128(v4) /* +4 */ + inc_le128(v5) /* +5 */ + inc_le128(v6) /* +6 */ + inc_le128(v7) /* +7 */ + + transpose_4x4_2x(v0, v1, v2, v3, v4, v5, v6, v7) + + SM4_CRYPT_BLK8(v0, v1, v2, v3, v4, v5, v6, v7) + + ld1 {RTMP0.16b-RTMP3.16b}, [x2], #64 + ld1 {RTMP4.16b-RTMP7.16b}, [x2], #64 + + eor v0.16b, v0.16b, RTMP0.16b + eor v1.16b, v1.16b, RTMP1.16b + eor v2.16b, v2.16b, RTMP2.16b + eor v3.16b, v3.16b, RTMP3.16b + eor v4.16b, v4.16b, RTMP4.16b + eor v5.16b, v5.16b, RTMP5.16b + eor v6.16b, v6.16b, RTMP6.16b + eor v7.16b, v7.16b, RTMP7.16b + + st1 {v0.16b-v3.16b}, [x1], #64 + st1 {v4.16b-v7.16b}, [x1], #64 + + cbz w4, .Lctr_crypt_end + b .Lctr_crypt_loop_8x + +.Lctr_crypt_4x: + add w4, w4, #8 + cmp w4, #4 + blt .Lctr_crypt_tail + + sub w4, w4, #4 + + /* construct CTRs */ + inc_le128(v0) /* +0 */ + inc_le128(v1) /* +1 */ + inc_le128(v2) /* +2 */ + inc_le128(v3) /* +3 */ + + ld1 {v4.16b-v7.16b}, [x2], #64 + + transpose_4x4(v0, v1, v2, v3) + + SM4_CRYPT_BLK4(v0, v1, v2, v3) + + eor v0.16b, v0.16b, v4.16b + eor v1.16b, v1.16b, v5.16b + eor v2.16b, v2.16b, v6.16b + eor v3.16b, v3.16b, v7.16b + + st1 {v0.16b-v3.16b}, [x1], #64 + + cbz w4, .Lctr_crypt_end + +.Lctr_crypt_tail: + /* inc_le128 will change the sign bit */ + ld1 {v4.16b}, [x2], #16 + inc_le128(v0) + cmp w4, #2 + blt .Lctr_crypt_tail_load_done + + ld1 {v5.16b}, [x2], #16 + inc_le128(v1) + cmp w4, #2 + beq .Lctr_crypt_tail_load_done + + ld1 {v6.16b}, [x2], #16 + inc_le128(v2) + +.Lctr_crypt_tail_load_done: + transpose_4x4(v0, v1, v2, v3) + + SM4_CRYPT_BLK4(v0, v1, v2, v3) + + cmp w4, #2 + + eor v0.16b, v0.16b, v4.16b + st1 {v0.16b}, [x1], #16 + blt .Lctr_crypt_end + + eor v1.16b, v1.16b, v5.16b + st1 {v1.16b}, [x1], #16 + beq .Lctr_crypt_end + + eor v2.16b, v2.16b, v6.16b + st1 {v2.16b}, [x1], #16 + +.Lctr_crypt_end: /* store new CTR */ - rev x7, x7; - rev x8, x8; - stp x7, x8, [x3]; + rev x7, x7 + rev x8, x8 + stp x7, x8, [x3] - ret; -SYM_FUNC_END(sm4_neon_ctr_enc_blk8) + ret +SYM_FUNC_END(sm4_neon_ctr_crypt) diff --git a/arch/arm64/crypto/sm4-neon-glue.c b/arch/arm64/crypto/sm4-neon-glue.c index 03a6a6866a3112f0dc10dc2fc119f7d53c867a1d..7b19accf5c0374d2db5eee4b1ebe3632cd3c8410 100644 --- a/arch/arm64/crypto/sm4-neon-glue.c +++ b/arch/arm64/crypto/sm4-neon-glue.c @@ -18,19 +18,14 @@ #include #include -#define BYTES2BLKS(nbytes) ((nbytes) >> 4) -#define BYTES2BLK8(nbytes) (((nbytes) >> 4) & ~(8 - 1)) - -asmlinkage void sm4_neon_crypt_blk1_8(const u32 *rkey, u8 *dst, const u8 *src, - unsigned int nblks); -asmlinkage void sm4_neon_crypt_blk8(const u32 *rkey, u8 *dst, const u8 *src, - unsigned int nblks); -asmlinkage void sm4_neon_cbc_dec_blk8(const u32 *rkey, u8 *dst, const u8 *src, - u8 *iv, unsigned int nblks); -asmlinkage void sm4_neon_cfb_dec_blk8(const u32 *rkey, u8 *dst, const u8 *src, - u8 *iv, unsigned int nblks); -asmlinkage void sm4_neon_ctr_enc_blk8(const u32 *rkey, u8 *dst, const u8 *src, - u8 *iv, unsigned int nblks); +asmlinkage void sm4_neon_crypt(const u32 *rkey, u8 *dst, const u8 *src, + unsigned int nblocks); +asmlinkage void sm4_neon_cbc_dec(const u32 *rkey_dec, u8 *dst, const u8 *src, + u8 *iv, unsigned int nblocks); +asmlinkage void sm4_neon_cfb_dec(const u32 *rkey_enc, u8 *dst, const u8 *src, + u8 *iv, unsigned int nblocks); +asmlinkage void sm4_neon_ctr_crypt(const u32 *rkey_enc, u8 *dst, const u8 *src, + u8 *iv, unsigned int nblocks); static int sm4_setkey(struct crypto_skcipher *tfm, const u8 *key, unsigned int key_len) @@ -51,27 +46,18 @@ static int sm4_ecb_do_crypt(struct skcipher_request *req, const u32 *rkey) while ((nbytes = walk.nbytes) > 0) { const u8 *src = walk.src.virt.addr; u8 *dst = walk.dst.virt.addr; - unsigned int nblks; + unsigned int nblocks; - kernel_neon_begin(); + nblocks = nbytes / SM4_BLOCK_SIZE; + if (nblocks) { + kernel_neon_begin(); - nblks = BYTES2BLK8(nbytes); - if (nblks) { - sm4_neon_crypt_blk8(rkey, dst, src, nblks); - dst += nblks * SM4_BLOCK_SIZE; - src += nblks * SM4_BLOCK_SIZE; - nbytes -= nblks * SM4_BLOCK_SIZE; - } + sm4_neon_crypt(rkey, dst, src, nblocks); - nblks = BYTES2BLKS(nbytes); - if (nblks) { - sm4_neon_crypt_blk1_8(rkey, dst, src, nblks); - nbytes -= nblks * SM4_BLOCK_SIZE; + kernel_neon_end(); } - kernel_neon_end(); - - err = skcipher_walk_done(&walk, nbytes); + err = skcipher_walk_done(&walk, nbytes % SM4_BLOCK_SIZE); } return err; @@ -138,48 +124,19 @@ static int sm4_cbc_decrypt(struct skcipher_request *req) while ((nbytes = walk.nbytes) > 0) { const u8 *src = walk.src.virt.addr; u8 *dst = walk.dst.virt.addr; - unsigned int nblks; + unsigned int nblocks; - kernel_neon_begin(); + nblocks = nbytes / SM4_BLOCK_SIZE; + if (nblocks) { + kernel_neon_begin(); - nblks = BYTES2BLK8(nbytes); - if (nblks) { - sm4_neon_cbc_dec_blk8(ctx->rkey_dec, dst, src, - walk.iv, nblks); - dst += nblks * SM4_BLOCK_SIZE; - src += nblks * SM4_BLOCK_SIZE; - nbytes -= nblks * SM4_BLOCK_SIZE; - } + sm4_neon_cbc_dec(ctx->rkey_dec, dst, src, + walk.iv, nblocks); - nblks = BYTES2BLKS(nbytes); - if (nblks) { - u8 keystream[SM4_BLOCK_SIZE * 8]; - u8 iv[SM4_BLOCK_SIZE]; - int i; - - sm4_neon_crypt_blk1_8(ctx->rkey_dec, keystream, - src, nblks); - - src += ((int)nblks - 2) * SM4_BLOCK_SIZE; - dst += (nblks - 1) * SM4_BLOCK_SIZE; - memcpy(iv, src + SM4_BLOCK_SIZE, SM4_BLOCK_SIZE); - - for (i = nblks - 1; i > 0; i--) { - crypto_xor_cpy(dst, src, - &keystream[i * SM4_BLOCK_SIZE], - SM4_BLOCK_SIZE); - src -= SM4_BLOCK_SIZE; - dst -= SM4_BLOCK_SIZE; - } - crypto_xor_cpy(dst, walk.iv, - keystream, SM4_BLOCK_SIZE); - memcpy(walk.iv, iv, SM4_BLOCK_SIZE); - nbytes -= nblks * SM4_BLOCK_SIZE; + kernel_neon_end(); } - kernel_neon_end(); - - err = skcipher_walk_done(&walk, nbytes); + err = skcipher_walk_done(&walk, nbytes % SM4_BLOCK_SIZE); } return err; @@ -238,41 +195,21 @@ static int sm4_cfb_decrypt(struct skcipher_request *req) while ((nbytes = walk.nbytes) > 0) { const u8 *src = walk.src.virt.addr; u8 *dst = walk.dst.virt.addr; - unsigned int nblks; + unsigned int nblocks; - kernel_neon_begin(); + nblocks = nbytes / SM4_BLOCK_SIZE; + if (nblocks) { + kernel_neon_begin(); - nblks = BYTES2BLK8(nbytes); - if (nblks) { - sm4_neon_cfb_dec_blk8(ctx->rkey_enc, dst, src, - walk.iv, nblks); - dst += nblks * SM4_BLOCK_SIZE; - src += nblks * SM4_BLOCK_SIZE; - nbytes -= nblks * SM4_BLOCK_SIZE; - } + sm4_neon_cfb_dec(ctx->rkey_enc, dst, src, + walk.iv, nblocks); - nblks = BYTES2BLKS(nbytes); - if (nblks) { - u8 keystream[SM4_BLOCK_SIZE * 8]; - - memcpy(keystream, walk.iv, SM4_BLOCK_SIZE); - if (nblks > 1) - memcpy(&keystream[SM4_BLOCK_SIZE], src, - (nblks - 1) * SM4_BLOCK_SIZE); - memcpy(walk.iv, src + (nblks - 1) * SM4_BLOCK_SIZE, - SM4_BLOCK_SIZE); - - sm4_neon_crypt_blk1_8(ctx->rkey_enc, keystream, - keystream, nblks); - - crypto_xor_cpy(dst, src, keystream, - nblks * SM4_BLOCK_SIZE); - dst += nblks * SM4_BLOCK_SIZE; - src += nblks * SM4_BLOCK_SIZE; - nbytes -= nblks * SM4_BLOCK_SIZE; - } + kernel_neon_end(); - kernel_neon_end(); + dst += nblocks * SM4_BLOCK_SIZE; + src += nblocks * SM4_BLOCK_SIZE; + nbytes -= nblocks * SM4_BLOCK_SIZE; + } /* tail */ if (walk.nbytes == walk.total && nbytes > 0) { @@ -302,40 +239,21 @@ static int sm4_ctr_crypt(struct skcipher_request *req) while ((nbytes = walk.nbytes) > 0) { const u8 *src = walk.src.virt.addr; u8 *dst = walk.dst.virt.addr; - unsigned int nblks; + unsigned int nblocks; - kernel_neon_begin(); + nblocks = nbytes / SM4_BLOCK_SIZE; + if (nblocks) { + kernel_neon_begin(); - nblks = BYTES2BLK8(nbytes); - if (nblks) { - sm4_neon_ctr_enc_blk8(ctx->rkey_enc, dst, src, - walk.iv, nblks); - dst += nblks * SM4_BLOCK_SIZE; - src += nblks * SM4_BLOCK_SIZE; - nbytes -= nblks * SM4_BLOCK_SIZE; - } + sm4_neon_ctr_crypt(ctx->rkey_enc, dst, src, + walk.iv, nblocks); - nblks = BYTES2BLKS(nbytes); - if (nblks) { - u8 keystream[SM4_BLOCK_SIZE * 8]; - int i; - - for (i = 0; i < nblks; i++) { - memcpy(&keystream[i * SM4_BLOCK_SIZE], - walk.iv, SM4_BLOCK_SIZE); - crypto_inc(walk.iv, SM4_BLOCK_SIZE); - } - sm4_neon_crypt_blk1_8(ctx->rkey_enc, keystream, - keystream, nblks); - - crypto_xor_cpy(dst, src, keystream, - nblks * SM4_BLOCK_SIZE); - dst += nblks * SM4_BLOCK_SIZE; - src += nblks * SM4_BLOCK_SIZE; - nbytes -= nblks * SM4_BLOCK_SIZE; - } + kernel_neon_end(); - kernel_neon_end(); + dst += nblocks * SM4_BLOCK_SIZE; + src += nblocks * SM4_BLOCK_SIZE; + nbytes -= nblocks * SM4_BLOCK_SIZE; + } /* tail */ if (walk.nbytes == walk.total && nbytes > 0) { diff --git a/arch/arm64/include/asm/hugetlb.h b/arch/arm64/include/asm/hugetlb.h index 6a4a1ab8eb238f148177a30e2c32f7f912e8bd31..d20f5da2d76fa3ef876e99b14c28705da0d0ace1 100644 --- a/arch/arm64/include/asm/hugetlb.h +++ b/arch/arm64/include/asm/hugetlb.h @@ -49,15 +49,6 @@ extern pte_t huge_ptep_get(pte_t *ptep); void __init arm64_hugetlb_cma_reserve(void); -#define huge_ptep_modify_prot_start huge_ptep_modify_prot_start -extern pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma, - unsigned long addr, pte_t *ptep); - -#define huge_ptep_modify_prot_commit huge_ptep_modify_prot_commit -extern void huge_ptep_modify_prot_commit(struct vm_area_struct *vma, - unsigned long addr, pte_t *ptep, - pte_t old_pte, pte_t new_pte); - #include #endif /* __ASM_HUGETLB_H */ diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h index 8aa8492dafc0f43c4ff6b5d4fa93315ef3f4bc1a..0df3fc3a017371cf402efc50c32fe326a4e14711 100644 --- a/arch/arm64/include/asm/kvm_arm.h +++ b/arch/arm64/include/asm/kvm_arm.h @@ -135,7 +135,7 @@ * 40 bits wide (T0SZ = 24). Systems with a PARange smaller than 40 bits are * not known to exist and will break with this configuration. * - * The VTCR_EL2 is configured per VM and is initialised in kvm_arm_setup_stage2(). + * The VTCR_EL2 is configured per VM and is initialised in kvm_init_stage2_mmu. * * Note that when using 4K pages, we concatenate two first level page tables * together. With 16K pages, we concatenate 16 first level page tables. @@ -340,9 +340,13 @@ * We have * PAR [PA_Shift - 1 : 12] = PA [PA_Shift - 1 : 12] * HPFAR [PA_Shift - 9 : 4] = FIPA [PA_Shift - 1 : 12] + * + * Always assume 52 bit PA since at this point, we don't know how many PA bits + * the page table has been set up for. This should be safe since unused address + * bits in PAR are res0. */ #define PAR_TO_HPFAR(par) \ - (((par) & GENMASK_ULL(PHYS_MASK_SHIFT - 1, 12)) >> 8) + (((par) & GENMASK_ULL(52 - 1, 12)) >> 8) #define ECN(x) { ESR_ELx_EC_##x, #x } diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h index 53035763e48e891e6e2c0ff43c740c8a26e3d3af..43c3bc0f9544d9a3f477427315d07e6d17fafb94 100644 --- a/arch/arm64/include/asm/kvm_asm.h +++ b/arch/arm64/include/asm/kvm_asm.h @@ -76,6 +76,9 @@ enum __kvm_host_smccc_func { __KVM_HOST_SMCCC_FUNC___vgic_v3_save_aprs, __KVM_HOST_SMCCC_FUNC___vgic_v3_restore_aprs, __KVM_HOST_SMCCC_FUNC___pkvm_vcpu_init_traps, + __KVM_HOST_SMCCC_FUNC___pkvm_init_vm, + __KVM_HOST_SMCCC_FUNC___pkvm_init_vcpu, + __KVM_HOST_SMCCC_FUNC___pkvm_teardown_vm, }; #define DECLARE_KVM_VHE_SYM(sym) extern char sym[] @@ -106,7 +109,7 @@ enum __kvm_host_smccc_func { #define per_cpu_ptr_nvhe_sym(sym, cpu) \ ({ \ unsigned long base, off; \ - base = kvm_arm_hyp_percpu_base[cpu]; \ + base = kvm_nvhe_sym(kvm_arm_hyp_percpu_base)[cpu]; \ off = (unsigned long)&CHOOSE_NVHE_SYM(sym) - \ (unsigned long)&CHOOSE_NVHE_SYM(__per_cpu_start); \ base ? (typeof(CHOOSE_NVHE_SYM(sym))*)(base + off) : NULL; \ @@ -211,7 +214,7 @@ DECLARE_KVM_HYP_SYM(__kvm_hyp_vector); #define __kvm_hyp_init CHOOSE_NVHE_SYM(__kvm_hyp_init) #define __kvm_hyp_vector CHOOSE_HYP_SYM(__kvm_hyp_vector) -extern unsigned long kvm_arm_hyp_percpu_base[NR_CPUS]; +extern unsigned long kvm_nvhe_sym(kvm_arm_hyp_percpu_base)[]; DECLARE_KVM_NVHE_SYM(__per_cpu_start); DECLARE_KVM_NVHE_SYM(__per_cpu_end); diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index fd34ab155d0b7c599613b848b4fed6cff8542a00..35a159d131b5f862c16b52842ef6b0ba8088fb00 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -73,6 +73,63 @@ u32 __attribute_const__ kvm_target_cpu(void); int kvm_reset_vcpu(struct kvm_vcpu *vcpu); void kvm_arm_vcpu_destroy(struct kvm_vcpu *vcpu); +struct kvm_hyp_memcache { + phys_addr_t head; + unsigned long nr_pages; +}; + +static inline void push_hyp_memcache(struct kvm_hyp_memcache *mc, + phys_addr_t *p, + phys_addr_t (*to_pa)(void *virt)) +{ + *p = mc->head; + mc->head = to_pa(p); + mc->nr_pages++; +} + +static inline void *pop_hyp_memcache(struct kvm_hyp_memcache *mc, + void *(*to_va)(phys_addr_t phys)) +{ + phys_addr_t *p = to_va(mc->head); + + if (!mc->nr_pages) + return NULL; + + mc->head = *p; + mc->nr_pages--; + + return p; +} + +static inline int __topup_hyp_memcache(struct kvm_hyp_memcache *mc, + unsigned long min_pages, + void *(*alloc_fn)(void *arg), + phys_addr_t (*to_pa)(void *virt), + void *arg) +{ + while (mc->nr_pages < min_pages) { + phys_addr_t *p = alloc_fn(arg); + + if (!p) + return -ENOMEM; + push_hyp_memcache(mc, p, to_pa); + } + + return 0; +} + +static inline void __free_hyp_memcache(struct kvm_hyp_memcache *mc, + void (*free_fn)(void *virt, void *arg), + void *(*to_va)(phys_addr_t phys), + void *arg) +{ + while (mc->nr_pages) + free_fn(pop_hyp_memcache(mc, to_va), arg); +} + +void free_hyp_memcache(struct kvm_hyp_memcache *mc); +int topup_hyp_memcache(struct kvm_hyp_memcache *mc, unsigned long min_pages); + struct kvm_vmid { atomic64_t id; }; @@ -115,6 +172,13 @@ struct kvm_smccc_features { unsigned long vendor_hyp_bmap; }; +typedef unsigned int pkvm_handle_t; + +struct kvm_protected_vm { + pkvm_handle_t handle; + struct kvm_hyp_memcache teardown_mc; +}; + struct kvm_arch { struct kvm_s2_mmu mmu; @@ -163,9 +227,19 @@ struct kvm_arch { u8 pfr0_csv2; u8 pfr0_csv3; + struct { + u8 imp:4; + u8 unimp:4; + } dfr0_pmuver; /* Hypercall features firmware registers' descriptor */ struct kvm_smccc_features smccc_feat; + + /* + * For an untrusted host VM, 'pkvm.handle' is used to lookup + * the associated pKVM instance in the hypervisor. + */ + struct kvm_protected_vm pkvm; }; struct kvm_vcpu_fault_info { @@ -925,8 +999,6 @@ int kvm_set_ipa_limit(void); #define __KVM_HAVE_ARCH_VM_ALLOC struct kvm *kvm_arch_alloc_vm(void); -int kvm_arm_setup_stage2(struct kvm *kvm, unsigned long type); - static inline bool kvm_vm_is_protected(struct kvm *kvm) { return false; diff --git a/arch/arm64/include/asm/kvm_hyp.h b/arch/arm64/include/asm/kvm_hyp.h index aa7fa2a08f0604af5b25f2eb0f334f4a716b4431..6797eafe7890b596ec97b65beba1a5312143a2f9 100644 --- a/arch/arm64/include/asm/kvm_hyp.h +++ b/arch/arm64/include/asm/kvm_hyp.h @@ -123,4 +123,7 @@ extern u64 kvm_nvhe_sym(id_aa64mmfr0_el1_sys_val); extern u64 kvm_nvhe_sym(id_aa64mmfr1_el1_sys_val); extern u64 kvm_nvhe_sym(id_aa64mmfr2_el1_sys_val); +extern unsigned long kvm_nvhe_sym(__icache_flags); +extern unsigned int kvm_nvhe_sym(kvm_arm_vmid_bits); + #endif /* __ARM64_KVM_HYP_H__ */ diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h index 7784081088e78f84c7aad3aaf9553a276a6d1b8a..e4a7e636949983e7331289ba519c6fefcb594cca 100644 --- a/arch/arm64/include/asm/kvm_mmu.h +++ b/arch/arm64/include/asm/kvm_mmu.h @@ -166,7 +166,7 @@ int create_hyp_exec_mappings(phys_addr_t phys_addr, size_t size, void free_hyp_pgds(void); void stage2_unmap_vm(struct kvm *kvm); -int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu); +int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu, unsigned long type); void kvm_free_stage2_pgd(struct kvm_s2_mmu *mmu); int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa, phys_addr_t pa, unsigned long size, bool writable); diff --git a/arch/arm64/include/asm/kvm_pgtable.h b/arch/arm64/include/asm/kvm_pgtable.h index 3252eb50ecfe59e2df67caf19d4bf790eb22a4b8..63f81b27a4e302cede0605430ca0759d308f6f17 100644 --- a/arch/arm64/include/asm/kvm_pgtable.h +++ b/arch/arm64/include/asm/kvm_pgtable.h @@ -42,6 +42,8 @@ typedef u64 kvm_pte_t; #define KVM_PTE_ADDR_MASK GENMASK(47, PAGE_SHIFT) #define KVM_PTE_ADDR_51_48 GENMASK(15, 12) +#define KVM_PHYS_INVALID (-1ULL) + static inline bool kvm_pte_valid(kvm_pte_t pte) { return pte & KVM_PTE_VALID; @@ -57,6 +59,18 @@ static inline u64 kvm_pte_to_phys(kvm_pte_t pte) return pa; } +static inline kvm_pte_t kvm_phys_to_pte(u64 pa) +{ + kvm_pte_t pte = pa & KVM_PTE_ADDR_MASK; + + if (PAGE_SHIFT == 16) { + pa &= GENMASK(51, 48); + pte |= FIELD_PREP(KVM_PTE_ADDR_51_48, pa >> 48); + } + + return pte; +} + static inline u64 kvm_granule_shift(u32 level) { /* Assumes KVM_PGTABLE_MAX_LEVELS is 4 */ @@ -85,6 +99,8 @@ static inline bool kvm_level_supports_block_mapping(u32 level) * allocation is physically contiguous. * @free_pages_exact: Free an exact number of memory pages previously * allocated by zalloc_pages_exact. + * @free_removed_table: Free a removed paging structure by unlinking and + * dropping references. * @get_page: Increment the refcount on a page. * @put_page: Decrement the refcount on a page. When the * refcount reaches 0 the page is automatically @@ -103,6 +119,7 @@ struct kvm_pgtable_mm_ops { void* (*zalloc_page)(void *arg); void* (*zalloc_pages_exact)(size_t size); void (*free_pages_exact)(void *addr, size_t size); + void (*free_removed_table)(void *addr, u32 level); void (*get_page)(void *addr); void (*put_page)(void *addr); int (*page_count)(void *addr); @@ -161,29 +178,6 @@ enum kvm_pgtable_prot { typedef bool (*kvm_pgtable_force_pte_cb_t)(u64 addr, u64 end, enum kvm_pgtable_prot prot); -/** - * struct kvm_pgtable - KVM page-table. - * @ia_bits: Maximum input address size, in bits. - * @start_level: Level at which the page-table walk starts. - * @pgd: Pointer to the first top-level entry of the page-table. - * @mm_ops: Memory management callbacks. - * @mmu: Stage-2 KVM MMU struct. Unused for stage-1 page-tables. - * @flags: Stage-2 page-table flags. - * @force_pte_cb: Function that returns true if page level mappings must - * be used instead of block mappings. - */ -struct kvm_pgtable { - u32 ia_bits; - u32 start_level; - kvm_pte_t *pgd; - struct kvm_pgtable_mm_ops *mm_ops; - - /* Stage-2 only */ - struct kvm_s2_mmu *mmu; - enum kvm_pgtable_stage2_flags flags; - kvm_pgtable_force_pte_cb_t force_pte_cb; -}; - /** * enum kvm_pgtable_walk_flags - Flags to control a depth-first page-table walk. * @KVM_PGTABLE_WALK_LEAF: Visit leaf entries, including invalid @@ -192,17 +186,34 @@ struct kvm_pgtable { * children. * @KVM_PGTABLE_WALK_TABLE_POST: Visit table entries after their * children. + * @KVM_PGTABLE_WALK_SHARED: Indicates the page-tables may be shared + * with other software walkers. */ enum kvm_pgtable_walk_flags { KVM_PGTABLE_WALK_LEAF = BIT(0), KVM_PGTABLE_WALK_TABLE_PRE = BIT(1), KVM_PGTABLE_WALK_TABLE_POST = BIT(2), + KVM_PGTABLE_WALK_SHARED = BIT(3), +}; + +struct kvm_pgtable_visit_ctx { + kvm_pte_t *ptep; + kvm_pte_t old; + void *arg; + struct kvm_pgtable_mm_ops *mm_ops; + u64 addr; + u64 end; + u32 level; + enum kvm_pgtable_walk_flags flags; }; -typedef int (*kvm_pgtable_visitor_fn_t)(u64 addr, u64 end, u32 level, - kvm_pte_t *ptep, - enum kvm_pgtable_walk_flags flag, - void * const arg); +typedef int (*kvm_pgtable_visitor_fn_t)(const struct kvm_pgtable_visit_ctx *ctx, + enum kvm_pgtable_walk_flags visit); + +static inline bool kvm_pgtable_walk_shared(const struct kvm_pgtable_visit_ctx *ctx) +{ + return ctx->flags & KVM_PGTABLE_WALK_SHARED; +} /** * struct kvm_pgtable_walker - Hook into a page-table walk. @@ -217,6 +228,94 @@ struct kvm_pgtable_walker { const enum kvm_pgtable_walk_flags flags; }; +/* + * RCU cannot be used in a non-kernel context such as the hyp. As such, page + * table walkers used in hyp do not call into RCU and instead use other + * synchronization mechanisms (such as a spinlock). + */ +#if defined(__KVM_NVHE_HYPERVISOR__) || defined(__KVM_VHE_HYPERVISOR__) + +typedef kvm_pte_t *kvm_pteref_t; + +static inline kvm_pte_t *kvm_dereference_pteref(struct kvm_pgtable_walker *walker, + kvm_pteref_t pteref) +{ + return pteref; +} + +static inline int kvm_pgtable_walk_begin(struct kvm_pgtable_walker *walker) +{ + /* + * Due to the lack of RCU (or a similar protection scheme), only + * non-shared table walkers are allowed in the hypervisor. + */ + if (walker->flags & KVM_PGTABLE_WALK_SHARED) + return -EPERM; + + return 0; +} + +static inline void kvm_pgtable_walk_end(struct kvm_pgtable_walker *walker) {} + +static inline bool kvm_pgtable_walk_lock_held(void) +{ + return true; +} + +#else + +typedef kvm_pte_t __rcu *kvm_pteref_t; + +static inline kvm_pte_t *kvm_dereference_pteref(struct kvm_pgtable_walker *walker, + kvm_pteref_t pteref) +{ + return rcu_dereference_check(pteref, !(walker->flags & KVM_PGTABLE_WALK_SHARED)); +} + +static inline int kvm_pgtable_walk_begin(struct kvm_pgtable_walker *walker) +{ + if (walker->flags & KVM_PGTABLE_WALK_SHARED) + rcu_read_lock(); + + return 0; +} + +static inline void kvm_pgtable_walk_end(struct kvm_pgtable_walker *walker) +{ + if (walker->flags & KVM_PGTABLE_WALK_SHARED) + rcu_read_unlock(); +} + +static inline bool kvm_pgtable_walk_lock_held(void) +{ + return rcu_read_lock_held(); +} + +#endif + +/** + * struct kvm_pgtable - KVM page-table. + * @ia_bits: Maximum input address size, in bits. + * @start_level: Level at which the page-table walk starts. + * @pgd: Pointer to the first top-level entry of the page-table. + * @mm_ops: Memory management callbacks. + * @mmu: Stage-2 KVM MMU struct. Unused for stage-1 page-tables. + * @flags: Stage-2 page-table flags. + * @force_pte_cb: Function that returns true if page level mappings must + * be used instead of block mappings. + */ +struct kvm_pgtable { + u32 ia_bits; + u32 start_level; + kvm_pteref_t pgd; + struct kvm_pgtable_mm_ops *mm_ops; + + /* Stage-2 only */ + struct kvm_s2_mmu *mmu; + enum kvm_pgtable_stage2_flags flags; + kvm_pgtable_force_pte_cb_t force_pte_cb; +}; + /** * kvm_pgtable_hyp_init() - Initialise a hypervisor stage-1 page-table. * @pgt: Uninitialised page-table structure to initialise. @@ -296,6 +395,14 @@ u64 kvm_pgtable_hyp_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size); */ u64 kvm_get_vtcr(u64 mmfr0, u64 mmfr1, u32 phys_shift); +/** + * kvm_pgtable_stage2_pgd_size() - Helper to compute size of a stage-2 PGD + * @vtcr: Content of the VTCR register. + * + * Return: the size (in bytes) of the stage-2 PGD + */ +size_t kvm_pgtable_stage2_pgd_size(u64 vtcr); + /** * __kvm_pgtable_stage2_init() - Initialise a guest stage-2 page-table. * @pgt: Uninitialised page-table structure to initialise. @@ -324,6 +431,17 @@ int __kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_s2_mmu *mmu, */ void kvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt); +/** + * kvm_pgtable_stage2_free_removed() - Free a removed stage-2 paging structure. + * @mm_ops: Memory management callbacks. + * @pgtable: Unlinked stage-2 paging structure to be freed. + * @level: Level of the stage-2 paging structure to be freed. + * + * The page-table is assumed to be unreachable by any hardware walkers prior to + * freeing and therefore no TLB invalidation is performed. + */ +void kvm_pgtable_stage2_free_removed(struct kvm_pgtable_mm_ops *mm_ops, void *pgtable, u32 level); + /** * kvm_pgtable_stage2_map() - Install a mapping in a guest stage-2 page-table. * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*(). @@ -333,6 +451,7 @@ void kvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt); * @prot: Permissions and attributes for the mapping. * @mc: Cache of pre-allocated and zeroed memory from which to allocate * page-table pages. + * @flags: Flags to control the page-table walk (ex. a shared walk) * * The offset of @addr within a page is ignored, @size is rounded-up to * the next page boundary and @phys is rounded-down to the previous page @@ -354,7 +473,7 @@ void kvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt); */ int kvm_pgtable_stage2_map(struct kvm_pgtable *pgt, u64 addr, u64 size, u64 phys, enum kvm_pgtable_prot prot, - void *mc); + void *mc, enum kvm_pgtable_walk_flags flags); /** * kvm_pgtable_stage2_set_owner() - Unmap and annotate pages in the IPA space to diff --git a/arch/arm64/include/asm/kvm_pkvm.h b/arch/arm64/include/asm/kvm_pkvm.h index 9f4ad2a8df59c046e267e0b6ff0c3f788d11977b..01129b0d4c689e46c149b6d91dd5b3bce99f64d6 100644 --- a/arch/arm64/include/asm/kvm_pkvm.h +++ b/arch/arm64/include/asm/kvm_pkvm.h @@ -9,11 +9,49 @@ #include #include +/* Maximum number of VMs that can co-exist under pKVM. */ +#define KVM_MAX_PVMS 255 + #define HYP_MEMBLOCK_REGIONS 128 +int pkvm_init_host_vm(struct kvm *kvm); +int pkvm_create_hyp_vm(struct kvm *kvm); +void pkvm_destroy_hyp_vm(struct kvm *kvm); + extern struct memblock_region kvm_nvhe_sym(hyp_memory)[]; extern unsigned int kvm_nvhe_sym(hyp_memblock_nr); +static inline unsigned long +hyp_vmemmap_memblock_size(struct memblock_region *reg, size_t vmemmap_entry_size) +{ + unsigned long nr_pages = reg->size >> PAGE_SHIFT; + unsigned long start, end; + + start = (reg->base >> PAGE_SHIFT) * vmemmap_entry_size; + end = start + nr_pages * vmemmap_entry_size; + start = ALIGN_DOWN(start, PAGE_SIZE); + end = ALIGN(end, PAGE_SIZE); + + return end - start; +} + +static inline unsigned long hyp_vmemmap_pages(size_t vmemmap_entry_size) +{ + unsigned long res = 0, i; + + for (i = 0; i < kvm_nvhe_sym(hyp_memblock_nr); i++) { + res += hyp_vmemmap_memblock_size(&kvm_nvhe_sym(hyp_memory)[i], + vmemmap_entry_size); + } + + return res >> PAGE_SHIFT; +} + +static inline unsigned long hyp_vm_table_pages(void) +{ + return PAGE_ALIGN(KVM_MAX_PVMS * sizeof(void *)) >> PAGE_SHIFT; +} + static inline unsigned long __hyp_pgtable_max_pages(unsigned long nr_pages) { unsigned long total = 0, i; diff --git a/arch/arm64/include/asm/mte.h b/arch/arm64/include/asm/mte.h index 760c62f8e22f84f95c48de4f1470d9cea67ca536..20dd06d70af5f39c05497d8b138fb15e1323851b 100644 --- a/arch/arm64/include/asm/mte.h +++ b/arch/arm64/include/asm/mte.h @@ -25,7 +25,7 @@ unsigned long mte_copy_tags_to_user(void __user *to, void *from, unsigned long n); int mte_save_tags(struct page *page); void mte_save_page_tags(const void *page_addr, void *tag_storage); -bool mte_restore_tags(swp_entry_t entry, struct page *page); +void mte_restore_tags(swp_entry_t entry, struct page *page); void mte_restore_page_tags(void *page_addr, const void *tag_storage); void mte_invalidate_tags(int type, pgoff_t offset); void mte_invalidate_tags_area(int type); @@ -36,6 +36,58 @@ void mte_free_tag_storage(char *storage); /* track which pages have valid allocation tags */ #define PG_mte_tagged PG_arch_2 +/* simple lock to avoid multiple threads tagging the same page */ +#define PG_mte_lock PG_arch_3 + +static inline void set_page_mte_tagged(struct page *page) +{ + /* + * Ensure that the tags written prior to this function are visible + * before the page flags update. + */ + smp_wmb(); + set_bit(PG_mte_tagged, &page->flags); +} + +static inline bool page_mte_tagged(struct page *page) +{ + bool ret = test_bit(PG_mte_tagged, &page->flags); + + /* + * If the page is tagged, ensure ordering with a likely subsequent + * read of the tags. + */ + if (ret) + smp_rmb(); + return ret; +} + +/* + * Lock the page for tagging and return 'true' if the page can be tagged, + * 'false' if already tagged. PG_mte_tagged is never cleared and therefore the + * locking only happens once for page initialisation. + * + * The page MTE lock state: + * + * Locked: PG_mte_lock && !PG_mte_tagged + * Unlocked: !PG_mte_lock || PG_mte_tagged + * + * Acquire semantics only if the page is tagged (returning 'false'). + */ +static inline bool try_page_mte_tagging(struct page *page) +{ + if (!test_and_set_bit(PG_mte_lock, &page->flags)) + return true; + + /* + * The tags are either being initialised or may have been initialised + * already. Check if the PG_mte_tagged flag has been set or wait + * otherwise. + */ + smp_cond_load_acquire(&page->flags, VAL & (1UL << PG_mte_tagged)); + + return false; +} void mte_zero_clear_page_tags(void *addr); void mte_sync_tags(pte_t old_pte, pte_t pte); @@ -56,6 +108,17 @@ size_t mte_probe_user_range(const char __user *uaddr, size_t size); /* unused if !CONFIG_ARM64_MTE, silence the compiler */ #define PG_mte_tagged 0 +static inline void set_page_mte_tagged(struct page *page) +{ +} +static inline bool page_mte_tagged(struct page *page) +{ + return false; +} +static inline bool try_page_mte_tagging(struct page *page) +{ + return false; +} static inline void mte_zero_clear_page_tags(void *addr) { } diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index c36d56dbf940f726247a289c85fbe886316eff9a..b4bbeed80fb6ed1d37334a35b6ed49ce2bc0dcbc 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -1020,8 +1020,6 @@ static inline pmd_t pmdp_establish(struct vm_area_struct *vma, */ #define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS) -extern int kern_addr_valid(unsigned long addr); - #ifdef CONFIG_ARM64_MTE #define __HAVE_ARCH_PREPARE_TO_SWAP @@ -1048,8 +1046,8 @@ static inline void arch_swap_invalidate_area(int type) #define __HAVE_ARCH_SWAP_RESTORE static inline void arch_swap_restore(swp_entry_t entry, struct folio *folio) { - if (system_supports_mte() && mte_restore_tags(entry, &folio->page)) - set_bit(PG_mte_tagged, &folio->flags); + if (system_supports_mte()) + mte_restore_tags(entry, &folio->page); } #endif /* CONFIG_ARM64_MTE */ @@ -1095,15 +1093,6 @@ static inline bool pud_sect_supported(void) } -#define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION -#define ptep_modify_prot_start ptep_modify_prot_start -extern pte_t ptep_modify_prot_start(struct vm_area_struct *vma, - unsigned long addr, pte_t *ptep); - -#define ptep_modify_prot_commit ptep_modify_prot_commit -extern void ptep_modify_prot_commit(struct vm_area_struct *vma, - unsigned long addr, pte_t *ptep, - pte_t old_pte, pte_t new_pte); #endif /* !__ASSEMBLY__ */ #endif /* __ASM_PGTABLE_H */ diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h index 316917b9870704de245f002cb4261c46f8a2fea4..a7a857f1784d80d6264eeebecbeb371c7d13ba5b 100644 --- a/arch/arm64/include/uapi/asm/kvm.h +++ b/arch/arm64/include/uapi/asm/kvm.h @@ -43,6 +43,7 @@ #define __KVM_HAVE_VCPU_EVENTS #define KVM_COALESCED_MMIO_PAGE_OFFSET 1 +#define KVM_DIRTY_LOG_PAGE_OFFSET 64 #define KVM_REG_SIZE(id) \ (1U << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT)) diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c index 307faa2b4395ed9fcacdbf9f992111cd376dc601..89ac00084f38a4584924bd2da8e2e0e9c5341a07 100644 --- a/arch/arm64/kernel/cpu_errata.c +++ b/arch/arm64/kernel/cpu_errata.c @@ -661,13 +661,6 @@ const struct arm64_cpu_capabilities arm64_errata[] = { CAP_MIDR_RANGE_LIST(trbe_write_out_of_range_cpus), }, #endif -#ifdef CONFIG_ARM64_ERRATUM_2645198 - { - .desc = "ARM erratum 2645198", - .capability = ARM64_WORKAROUND_2645198, - ERRATA_MIDR_ALL_VERSIONS(MIDR_CORTEX_A715) - }, -#endif #ifdef CONFIG_ARM64_ERRATUM_2077057 { .desc = "ARM erratum 2077057", diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 7e76e1fda2a1f8ac62ed56b1fede327aa262c3e6..a77315b338e6113c4525ce8a24bae485ea869541 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -2076,8 +2076,10 @@ static void cpu_enable_mte(struct arm64_cpu_capabilities const *cap) * Clear the tags in the zero page. This needs to be done via the * linear map which has the Tagged attribute. */ - if (!test_and_set_bit(PG_mte_tagged, &ZERO_PAGE(0)->flags)) + if (try_page_mte_tagging(ZERO_PAGE(0))) { mte_clear_page_tags(lm_alias(empty_zero_page)); + set_page_mte_tagged(ZERO_PAGE(0)); + } kasan_init_hw_tags_cpu(); } diff --git a/arch/arm64/kernel/elfcore.c b/arch/arm64/kernel/elfcore.c index 27ef7ad3ffd2e27a5c6fdc1e1a63c179df2e56e2..353009d7f307f40e982adf627a3ad9e408c80a50 100644 --- a/arch/arm64/kernel/elfcore.c +++ b/arch/arm64/kernel/elfcore.c @@ -47,7 +47,7 @@ static int mte_dump_tag_range(struct coredump_params *cprm, * Pages mapped in user space as !pte_access_permitted() (e.g. * PROT_EXEC only) may not have the PG_mte_tagged flag set. */ - if (!test_bit(PG_mte_tagged, &page->flags)) { + if (!page_mte_tagged(page)) { put_page(page); dump_skip(cprm, MTE_PAGE_TAG_STORAGE); continue; diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c index af5df48ba915b9274211f66c33f4e61fcbe89a1e..788597a6b6a2f01ddfa7eae4c67802b3d50d359e 100644 --- a/arch/arm64/kernel/hibernate.c +++ b/arch/arm64/kernel/hibernate.c @@ -271,7 +271,7 @@ static int swsusp_mte_save_tags(void) if (!page) continue; - if (!test_bit(PG_mte_tagged, &page->flags)) + if (!page_mte_tagged(page)) continue; ret = save_tags(page, pfn); diff --git a/arch/arm64/kernel/image-vars.h b/arch/arm64/kernel/image-vars.h index f31130ba02331060fb394fb27e5d9b2cd75cc60e..d0e9bb5c91fccad6c22c16d7ab3e24559a85b2b1 100644 --- a/arch/arm64/kernel/image-vars.h +++ b/arch/arm64/kernel/image-vars.h @@ -63,12 +63,6 @@ KVM_NVHE_ALIAS(nvhe_hyp_panic_handler); /* Vectors installed by hyp-init on reset HVC. */ KVM_NVHE_ALIAS(__hyp_stub_vectors); -/* Kernel symbol used by icache_is_vpipt(). */ -KVM_NVHE_ALIAS(__icache_flags); - -/* VMID bits set by the KVM VMID allocator */ -KVM_NVHE_ALIAS(kvm_arm_vmid_bits); - /* Static keys which are set if a vGIC trap should be handled in hyp. */ KVM_NVHE_ALIAS(vgic_v2_cpuif_trap); KVM_NVHE_ALIAS(vgic_v3_cpuif_trap); @@ -84,9 +78,6 @@ KVM_NVHE_ALIAS(gic_nonsecure_priorities); KVM_NVHE_ALIAS(__start___kvm_ex_table); KVM_NVHE_ALIAS(__stop___kvm_ex_table); -/* Array containing bases of nVHE per-CPU memory regions. */ -KVM_NVHE_ALIAS(kvm_arm_hyp_percpu_base); - /* PMU available static key */ #ifdef CONFIG_HW_PERF_EVENTS KVM_NVHE_ALIAS(kvm_arm_pmu_available); @@ -103,12 +94,6 @@ KVM_NVHE_ALIAS_HYP(__memcpy, __pi_memcpy); KVM_NVHE_ALIAS_HYP(__memset, __pi_memset); #endif -/* Kernel memory sections */ -KVM_NVHE_ALIAS(__start_rodata); -KVM_NVHE_ALIAS(__end_rodata); -KVM_NVHE_ALIAS(__bss_start); -KVM_NVHE_ALIAS(__bss_stop); - /* Hyp memory sections */ KVM_NVHE_ALIAS(__hyp_idmap_text_start); KVM_NVHE_ALIAS(__hyp_idmap_text_end); diff --git a/arch/arm64/kernel/mte.c b/arch/arm64/kernel/mte.c index 7467217c1eaf372d19a0eed8398e2e89e04f44c7..f5bcb0dc6267385e3401ed2735429b45af56e8e7 100644 --- a/arch/arm64/kernel/mte.c +++ b/arch/arm64/kernel/mte.c @@ -41,19 +41,17 @@ static void mte_sync_page_tags(struct page *page, pte_t old_pte, if (check_swap && is_swap_pte(old_pte)) { swp_entry_t entry = pte_to_swp_entry(old_pte); - if (!non_swap_entry(entry) && mte_restore_tags(entry, page)) - return; + if (!non_swap_entry(entry)) + mte_restore_tags(entry, page); } if (!pte_is_tagged) return; - /* - * Test PG_mte_tagged again in case it was racing with another - * set_pte_at(). - */ - if (!test_and_set_bit(PG_mte_tagged, &page->flags)) + if (try_page_mte_tagging(page)) { mte_clear_page_tags(page_address(page)); + set_page_mte_tagged(page); + } } void mte_sync_tags(pte_t old_pte, pte_t pte) @@ -69,9 +67,11 @@ void mte_sync_tags(pte_t old_pte, pte_t pte) /* if PG_mte_tagged is set, tags have already been initialised */ for (i = 0; i < nr_pages; i++, page++) { - if (!test_bit(PG_mte_tagged, &page->flags)) + if (!page_mte_tagged(page)) { mte_sync_page_tags(page, old_pte, check_swap, pte_is_tagged); + set_page_mte_tagged(page); + } } /* ensure the tags are visible before the PTE is set */ @@ -96,8 +96,7 @@ int memcmp_pages(struct page *page1, struct page *page2) * pages is tagged, set_pte_at() may zero or change the tags of the * other page via mte_sync_tags(). */ - if (test_bit(PG_mte_tagged, &page1->flags) || - test_bit(PG_mte_tagged, &page2->flags)) + if (page_mte_tagged(page1) || page_mte_tagged(page2)) return addr1 != addr2; return ret; @@ -454,7 +453,7 @@ static int __access_remote_tags(struct mm_struct *mm, unsigned long addr, put_page(page); break; } - WARN_ON_ONCE(!test_bit(PG_mte_tagged, &page->flags)); + WARN_ON_ONCE(!page_mte_tagged(page)); /* limit access to the end of the page */ offset = offset_in_page(addr); diff --git a/arch/arm64/kernel/vdso/Makefile b/arch/arm64/kernel/vdso/Makefile index 619e2dc7ee14cce1b906c1962c5429194260ad80..beaf9586338f5c10569764a9728209289a06756c 100644 --- a/arch/arm64/kernel/vdso/Makefile +++ b/arch/arm64/kernel/vdso/Makefile @@ -27,7 +27,7 @@ ldflags-y := -shared -soname=linux-vdso.so.1 --hash-style=sysv \ -Bsymbolic --build-id=sha1 -n $(btildflags-y) ifdef CONFIG_LD_ORPHAN_WARN - ldflags-y += --orphan-handling=warn + ldflags-y += --orphan-handling=$(CONFIG_LD_ORPHAN_WARN_LEVEL) endif ldflags-y += -T diff --git a/arch/arm64/kernel/vdso32/Makefile b/arch/arm64/kernel/vdso32/Makefile index 36c8f66cad251ac3711f73f5b94d353311bde0b7..f59bd1a4ead6b46d31f0a7a6e3d5bf08212b1891 100644 --- a/arch/arm64/kernel/vdso32/Makefile +++ b/arch/arm64/kernel/vdso32/Makefile @@ -104,7 +104,7 @@ VDSO_AFLAGS += -D__ASSEMBLY__ VDSO_LDFLAGS += -Bsymbolic --no-undefined -soname=linux-vdso.so.1 VDSO_LDFLAGS += -z max-page-size=4096 -z common-page-size=4096 VDSO_LDFLAGS += -shared --hash-style=sysv --build-id=sha1 -VDSO_LDFLAGS += --orphan-handling=warn +VDSO_LDFLAGS += --orphan-handling=$(CONFIG_LD_ORPHAN_WARN_LEVEL) # Borrow vdsomunge.c from the arm vDSO diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig index 815cc118c675f3e0fc96e58df229ffe3f7ce928b..05da3c8f7e88f6a640ef744d8c401e695e490553 100644 --- a/arch/arm64/kvm/Kconfig +++ b/arch/arm64/kvm/Kconfig @@ -32,6 +32,8 @@ menuconfig KVM select KVM_VFIO select HAVE_KVM_EVENTFD select HAVE_KVM_IRQFD + select HAVE_KVM_DIRTY_RING_ACQ_REL + select NEED_KVM_DIRTY_RING_WITH_BITMAP select HAVE_KVM_MSI select HAVE_KVM_IRQCHIP select HAVE_KVM_IRQ_ROUTING diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index 94d33e296e10c57c34008af96d7a9adcb72d99ef..9c5573bc4614569ac62ea80457d13173f3db11fa 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -37,6 +37,7 @@ #include #include #include +#include #include #include @@ -50,7 +51,6 @@ DEFINE_STATIC_KEY_FALSE(kvm_protected_mode_initialized); DECLARE_KVM_HYP_PER_CPU(unsigned long, kvm_hyp_vector); DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page); -unsigned long kvm_arm_hyp_percpu_base[NR_CPUS]; DECLARE_KVM_NVHE_PER_CPU(struct kvm_nvhe_init_params, kvm_init_params); static bool vgic_present; @@ -138,24 +138,24 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) { int ret; - ret = kvm_arm_setup_stage2(kvm, type); - if (ret) - return ret; - - ret = kvm_init_stage2_mmu(kvm, &kvm->arch.mmu); + ret = kvm_share_hyp(kvm, kvm + 1); if (ret) return ret; - ret = kvm_share_hyp(kvm, kvm + 1); + ret = pkvm_init_host_vm(kvm); if (ret) - goto out_free_stage2_pgd; + goto err_unshare_kvm; if (!zalloc_cpumask_var(&kvm->arch.supported_cpus, GFP_KERNEL)) { ret = -ENOMEM; - goto out_free_stage2_pgd; + goto err_unshare_kvm; } cpumask_copy(kvm->arch.supported_cpus, cpu_possible_mask); + ret = kvm_init_stage2_mmu(kvm, &kvm->arch.mmu, type); + if (ret) + goto err_free_cpumask; + kvm_vgic_early_init(kvm); /* The maximum number of VCPUs is limited by the host's GIC model */ @@ -164,9 +164,18 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) set_default_spectre(kvm); kvm_arm_init_hypercalls(kvm); - return ret; -out_free_stage2_pgd: - kvm_free_stage2_pgd(&kvm->arch.mmu); + /* + * Initialise the default PMUver before there is a chance to + * create an actual PMU. + */ + kvm->arch.dfr0_pmuver.imp = kvm_arm_pmu_get_pmuver_limit(); + + return 0; + +err_free_cpumask: + free_cpumask_var(kvm->arch.supported_cpus); +err_unshare_kvm: + kvm_unshare_hyp(kvm, kvm + 1); return ret; } @@ -187,6 +196,9 @@ void kvm_arch_destroy_vm(struct kvm *kvm) kvm_vgic_destroy(kvm); + if (is_protected_kvm_enabled()) + pkvm_destroy_hyp_vm(kvm); + kvm_destroy_vcpus(kvm); kvm_unshare_hyp(kvm, kvm + 1); @@ -569,6 +581,12 @@ int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu) if (ret) return ret; + if (is_protected_kvm_enabled()) { + ret = pkvm_create_hyp_vm(kvm); + if (ret) + return ret; + } + if (!irqchip_in_kernel(kvm)) { /* * Tell the rest of the code that there are userspace irqchip @@ -746,6 +764,9 @@ static int check_vcpu_requests(struct kvm_vcpu *vcpu) if (kvm_check_request(KVM_REQ_SUSPEND, vcpu)) return kvm_vcpu_suspend(vcpu); + + if (kvm_dirty_ring_check_request(vcpu)) + return 0; } return 1; @@ -1518,7 +1539,7 @@ static int kvm_init_vector_slots(void) return 0; } -static void cpu_prepare_hyp_mode(int cpu) +static void cpu_prepare_hyp_mode(int cpu, u32 hyp_va_bits) { struct kvm_nvhe_init_params *params = per_cpu_ptr_nvhe_sym(kvm_init_params, cpu); unsigned long tcr; @@ -1534,23 +1555,9 @@ static void cpu_prepare_hyp_mode(int cpu) params->mair_el2 = read_sysreg(mair_el1); - /* - * The ID map may be configured to use an extended virtual address - * range. This is only the case if system RAM is out of range for the - * currently configured page size and VA_BITS, in which case we will - * also need the extended virtual range for the HYP ID map, or we won't - * be able to enable the EL2 MMU. - * - * However, at EL2, there is only one TTBR register, and we can't switch - * between translation tables *and* update TCR_EL2.T0SZ at the same - * time. Bottom line: we need to use the extended range with *both* our - * translation tables. - * - * So use the same T0SZ value we use for the ID map. - */ tcr = (read_sysreg(tcr_el1) & TCR_EL2_MASK) | TCR_EL2_RES1; tcr &= ~TCR_T0SZ_MASK; - tcr |= (idmap_t0sz & GENMASK(TCR_TxSZ_WIDTH - 1, 0)) << TCR_T0SZ_OFFSET; + tcr |= TCR_T0SZ(hyp_va_bits); params->tcr_el2 = tcr; params->pgd_pa = kvm_mmu_get_httbr(); @@ -1844,13 +1851,13 @@ static void teardown_hyp_mode(void) free_hyp_pgds(); for_each_possible_cpu(cpu) { free_page(per_cpu(kvm_arm_hyp_stack_page, cpu)); - free_pages(kvm_arm_hyp_percpu_base[cpu], nvhe_percpu_order()); + free_pages(kvm_nvhe_sym(kvm_arm_hyp_percpu_base)[cpu], nvhe_percpu_order()); } } static int do_pkvm_init(u32 hyp_va_bits) { - void *per_cpu_base = kvm_ksym_ref(kvm_arm_hyp_percpu_base); + void *per_cpu_base = kvm_ksym_ref(kvm_nvhe_sym(kvm_arm_hyp_percpu_base)); int ret; preempt_disable(); @@ -1870,11 +1877,8 @@ static int do_pkvm_init(u32 hyp_va_bits) return ret; } -static int kvm_hyp_init_protection(u32 hyp_va_bits) +static void kvm_hyp_init_symbols(void) { - void *addr = phys_to_virt(hyp_mem_base); - int ret; - kvm_nvhe_sym(id_aa64pfr0_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1); kvm_nvhe_sym(id_aa64pfr1_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64PFR1_EL1); kvm_nvhe_sym(id_aa64isar0_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64ISAR0_EL1); @@ -1883,6 +1887,14 @@ static int kvm_hyp_init_protection(u32 hyp_va_bits) kvm_nvhe_sym(id_aa64mmfr0_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1); kvm_nvhe_sym(id_aa64mmfr1_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1); kvm_nvhe_sym(id_aa64mmfr2_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64MMFR2_EL1); + kvm_nvhe_sym(__icache_flags) = __icache_flags; + kvm_nvhe_sym(kvm_arm_vmid_bits) = kvm_arm_vmid_bits; +} + +static int kvm_hyp_init_protection(u32 hyp_va_bits) +{ + void *addr = phys_to_virt(hyp_mem_base); + int ret; ret = create_hyp_mappings(addr, addr + hyp_mem_size, PAGE_HYP); if (ret) @@ -1950,7 +1962,7 @@ static int init_hyp_mode(void) page_addr = page_address(page); memcpy(page_addr, CHOOSE_NVHE_SYM(__per_cpu_start), nvhe_percpu_size()); - kvm_arm_hyp_percpu_base[cpu] = (unsigned long)page_addr; + kvm_nvhe_sym(kvm_arm_hyp_percpu_base)[cpu] = (unsigned long)page_addr; } /* @@ -2043,7 +2055,7 @@ static int init_hyp_mode(void) } for_each_possible_cpu(cpu) { - char *percpu_begin = (char *)kvm_arm_hyp_percpu_base[cpu]; + char *percpu_begin = (char *)kvm_nvhe_sym(kvm_arm_hyp_percpu_base)[cpu]; char *percpu_end = percpu_begin + nvhe_percpu_size(); /* Map Hyp percpu pages */ @@ -2054,9 +2066,11 @@ static int init_hyp_mode(void) } /* Prepare the CPU initialization parameters */ - cpu_prepare_hyp_mode(cpu); + cpu_prepare_hyp_mode(cpu, hyp_va_bits); } + kvm_hyp_init_symbols(); + if (is_protected_kvm_enabled()) { init_cpu_logical_map(); @@ -2064,9 +2078,7 @@ static int init_hyp_mode(void) err = -ENODEV; goto out_err; } - } - if (is_protected_kvm_enabled()) { err = kvm_hyp_init_protection(hyp_va_bits); if (err) { kvm_err("Failed to init hyp memory protection\n"); @@ -2130,6 +2142,11 @@ struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr) return NULL; } +bool kvm_arch_irqchip_in_kernel(struct kvm *kvm) +{ + return irqchip_in_kernel(kvm); +} + bool kvm_arch_has_irq_bypass(void) { return true; diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c index 2ff13a3f847966c2268cb21c91045e1609869265..5626ddb540ce3333d561f9e61c13d3a695f859fe 100644 --- a/arch/arm64/kvm/guest.c +++ b/arch/arm64/kvm/guest.c @@ -1059,7 +1059,7 @@ long kvm_vm_ioctl_mte_copy_tags(struct kvm *kvm, maddr = page_address(page); if (!write) { - if (test_bit(PG_mte_tagged, &page->flags)) + if (page_mte_tagged(page)) num_tags = mte_copy_tags_to_user(tags, maddr, MTE_GRANULES_PER_PAGE); else @@ -1068,15 +1068,19 @@ long kvm_vm_ioctl_mte_copy_tags(struct kvm *kvm, clear_user(tags, MTE_GRANULES_PER_PAGE); kvm_release_pfn_clean(pfn); } else { + /* + * Only locking to serialise with a concurrent + * set_pte_at() in the VMM but still overriding the + * tags, hence ignoring the return value. + */ + try_page_mte_tagging(page); num_tags = mte_copy_tags_from_user(maddr, tags, MTE_GRANULES_PER_PAGE); - /* - * Set the flag after checking the write - * completed fully - */ - if (num_tags == MTE_GRANULES_PER_PAGE) - set_bit(PG_mte_tagged, &page->flags); + /* uaccess failed, don't leave stale tags */ + if (num_tags != MTE_GRANULES_PER_PAGE) + mte_clear_page_tags(page); + set_page_mte_tagged(page); kvm_release_pfn_dirty(pfn); } diff --git a/arch/arm64/kvm/hyp/hyp-constants.c b/arch/arm64/kvm/hyp/hyp-constants.c index b3742a6691e87450865eefb61f0018ee1db17158..b257a3b4bfc5c6c7c9fdcba16b1d7c09bc4d76fd 100644 --- a/arch/arm64/kvm/hyp/hyp-constants.c +++ b/arch/arm64/kvm/hyp/hyp-constants.c @@ -2,9 +2,12 @@ #include #include +#include int main(void) { DEFINE(STRUCT_HYP_PAGE_SIZE, sizeof(struct hyp_page)); + DEFINE(PKVM_HYP_VM_SIZE, sizeof(struct pkvm_hyp_vm)); + DEFINE(PKVM_HYP_VCPU_SIZE, sizeof(struct pkvm_hyp_vcpu)); return 0; } diff --git a/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h b/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h index 80e99836eac792427fd62e1c2a5be1b762ab5b26..b7bdbe63deed876f238f97cfd1f03f98497d8aaa 100644 --- a/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h +++ b/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h @@ -8,8 +8,10 @@ #define __KVM_NVHE_MEM_PROTECT__ #include #include +#include #include #include +#include #include /* @@ -43,30 +45,45 @@ static inline enum pkvm_page_state pkvm_getstate(enum kvm_pgtable_prot prot) return prot & PKVM_PAGE_STATE_PROT_MASK; } -struct host_kvm { +struct host_mmu { struct kvm_arch arch; struct kvm_pgtable pgt; struct kvm_pgtable_mm_ops mm_ops; hyp_spinlock_t lock; }; -extern struct host_kvm host_kvm; +extern struct host_mmu host_mmu; -extern const u8 pkvm_hyp_id; +/* This corresponds to page-table locking order */ +enum pkvm_component_id { + PKVM_ID_HOST, + PKVM_ID_HYP, +}; + +extern unsigned long hyp_nr_cpus; int __pkvm_prot_finalize(void); int __pkvm_host_share_hyp(u64 pfn); int __pkvm_host_unshare_hyp(u64 pfn); +int __pkvm_host_donate_hyp(u64 pfn, u64 nr_pages); +int __pkvm_hyp_donate_host(u64 pfn, u64 nr_pages); bool addr_is_memory(phys_addr_t phys); int host_stage2_idmap_locked(phys_addr_t addr, u64 size, enum kvm_pgtable_prot prot); int host_stage2_set_owner_locked(phys_addr_t addr, u64 size, u8 owner_id); int kvm_host_prepare_stage2(void *pgt_pool_base); +int kvm_guest_prepare_stage2(struct pkvm_hyp_vm *vm, void *pgd); void handle_host_mem_abort(struct kvm_cpu_context *host_ctxt); +int hyp_pin_shared_mem(void *from, void *to); +void hyp_unpin_shared_mem(void *from, void *to); +void reclaim_guest_pages(struct pkvm_hyp_vm *vm, struct kvm_hyp_memcache *mc); +int refill_memcache(struct kvm_hyp_memcache *mc, unsigned long min_pages, + struct kvm_hyp_memcache *host_mc); + static __always_inline void __load_host_stage2(void) { if (static_branch_likely(&kvm_protected_mode_initialized)) - __load_stage2(&host_kvm.arch.mmu, &host_kvm.arch); + __load_stage2(&host_mmu.arch.mmu, &host_mmu.arch); else write_sysreg(0, vttbr_el2); } diff --git a/arch/arm64/kvm/hyp/include/nvhe/memory.h b/arch/arm64/kvm/hyp/include/nvhe/memory.h index 592b7edb3edb4cdcb3a43855b3edf3a53c37e346..ab205c4d67748f688c95ed63d00c18b1eca1c7ef 100644 --- a/arch/arm64/kvm/hyp/include/nvhe/memory.h +++ b/arch/arm64/kvm/hyp/include/nvhe/memory.h @@ -38,6 +38,10 @@ static inline phys_addr_t hyp_virt_to_phys(void *addr) #define hyp_page_to_virt(page) __hyp_va(hyp_page_to_phys(page)) #define hyp_page_to_pool(page) (((struct hyp_page *)page)->pool) +/* + * Refcounting for 'struct hyp_page'. + * hyp_pool::lock must be held if atomic access to the refcount is required. + */ static inline int hyp_page_count(void *addr) { struct hyp_page *p = hyp_virt_to_page(addr); @@ -45,4 +49,27 @@ static inline int hyp_page_count(void *addr) return p->refcount; } +static inline void hyp_page_ref_inc(struct hyp_page *p) +{ + BUG_ON(p->refcount == USHRT_MAX); + p->refcount++; +} + +static inline void hyp_page_ref_dec(struct hyp_page *p) +{ + BUG_ON(!p->refcount); + p->refcount--; +} + +static inline int hyp_page_ref_dec_and_test(struct hyp_page *p) +{ + hyp_page_ref_dec(p); + return (p->refcount == 0); +} + +static inline void hyp_set_page_refcounted(struct hyp_page *p) +{ + BUG_ON(p->refcount); + p->refcount = 1; +} #endif /* __KVM_HYP_MEMORY_H */ diff --git a/arch/arm64/kvm/hyp/include/nvhe/mm.h b/arch/arm64/kvm/hyp/include/nvhe/mm.h index 42d8eb9bfe725b49c808b22e4333d69bb7a47ece..d5ec972b5c1ec8a3506d4e5851d249e7f8ea3b8b 100644 --- a/arch/arm64/kvm/hyp/include/nvhe/mm.h +++ b/arch/arm64/kvm/hyp/include/nvhe/mm.h @@ -13,9 +13,13 @@ extern struct kvm_pgtable pkvm_pgtable; extern hyp_spinlock_t pkvm_pgd_lock; +int hyp_create_pcpu_fixmap(void); +void *hyp_fixmap_map(phys_addr_t phys); +void hyp_fixmap_unmap(void); + int hyp_create_idmap(u32 hyp_va_bits); int hyp_map_vectors(void); -int hyp_back_vmemmap(phys_addr_t phys, unsigned long size, phys_addr_t back); +int hyp_back_vmemmap(phys_addr_t back); int pkvm_cpu_set_vector(enum arm64_hyp_spectre_vector slot); int pkvm_create_mappings(void *from, void *to, enum kvm_pgtable_prot prot); int pkvm_create_mappings_locked(void *from, void *to, enum kvm_pgtable_prot prot); @@ -24,16 +28,4 @@ int __pkvm_create_private_mapping(phys_addr_t phys, size_t size, unsigned long *haddr); int pkvm_alloc_private_va_range(size_t size, unsigned long *haddr); -static inline void hyp_vmemmap_range(phys_addr_t phys, unsigned long size, - unsigned long *start, unsigned long *end) -{ - unsigned long nr_pages = size >> PAGE_SHIFT; - struct hyp_page *p = hyp_phys_to_page(phys); - - *start = (unsigned long)p; - *end = *start + nr_pages * sizeof(struct hyp_page); - *start = ALIGN_DOWN(*start, PAGE_SIZE); - *end = ALIGN(*end, PAGE_SIZE); -} - #endif /* __KVM_HYP_MM_H */ diff --git a/arch/arm64/kvm/hyp/include/nvhe/pkvm.h b/arch/arm64/kvm/hyp/include/nvhe/pkvm.h new file mode 100644 index 0000000000000000000000000000000000000000..82b3d62538a6111cc3c3d6d65d99d5e613d14fae --- /dev/null +++ b/arch/arm64/kvm/hyp/include/nvhe/pkvm.h @@ -0,0 +1,68 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2021 Google LLC + * Author: Fuad Tabba + */ + +#ifndef __ARM64_KVM_NVHE_PKVM_H__ +#define __ARM64_KVM_NVHE_PKVM_H__ + +#include + +#include +#include + +/* + * Holds the relevant data for maintaining the vcpu state completely at hyp. + */ +struct pkvm_hyp_vcpu { + struct kvm_vcpu vcpu; + + /* Backpointer to the host's (untrusted) vCPU instance. */ + struct kvm_vcpu *host_vcpu; +}; + +/* + * Holds the relevant data for running a protected vm. + */ +struct pkvm_hyp_vm { + struct kvm kvm; + + /* Backpointer to the host's (untrusted) KVM instance. */ + struct kvm *host_kvm; + + /* The guest's stage-2 page-table managed by the hypervisor. */ + struct kvm_pgtable pgt; + struct kvm_pgtable_mm_ops mm_ops; + struct hyp_pool pool; + hyp_spinlock_t lock; + + /* + * The number of vcpus initialized and ready to run. + * Modifying this is protected by 'vm_table_lock'. + */ + unsigned int nr_vcpus; + + /* Array of the hyp vCPU structures for this VM. */ + struct pkvm_hyp_vcpu *vcpus[]; +}; + +static inline struct pkvm_hyp_vm * +pkvm_hyp_vcpu_to_hyp_vm(struct pkvm_hyp_vcpu *hyp_vcpu) +{ + return container_of(hyp_vcpu->vcpu.kvm, struct pkvm_hyp_vm, kvm); +} + +void pkvm_hyp_vm_table_init(void *tbl); + +int __pkvm_init_vm(struct kvm *host_kvm, unsigned long vm_hva, + unsigned long pgd_hva); +int __pkvm_init_vcpu(pkvm_handle_t handle, struct kvm_vcpu *host_vcpu, + unsigned long vcpu_hva); +int __pkvm_teardown_vm(pkvm_handle_t handle); + +struct pkvm_hyp_vcpu *pkvm_load_hyp_vcpu(pkvm_handle_t handle, + unsigned int vcpu_idx); +void pkvm_put_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu); + +#endif /* __ARM64_KVM_NVHE_PKVM_H__ */ diff --git a/arch/arm64/kvm/hyp/include/nvhe/spinlock.h b/arch/arm64/kvm/hyp/include/nvhe/spinlock.h index 4652fd04bdbee5b94f4922f010585bc4fa6a65e6..7c7ea8c55405f589f741a414f71d486120710092 100644 --- a/arch/arm64/kvm/hyp/include/nvhe/spinlock.h +++ b/arch/arm64/kvm/hyp/include/nvhe/spinlock.h @@ -28,9 +28,17 @@ typedef union hyp_spinlock { }; } hyp_spinlock_t; +#define __HYP_SPIN_LOCK_INITIALIZER \ + { .__val = 0 } + +#define __HYP_SPIN_LOCK_UNLOCKED \ + ((hyp_spinlock_t) __HYP_SPIN_LOCK_INITIALIZER) + +#define DEFINE_HYP_SPINLOCK(x) hyp_spinlock_t x = __HYP_SPIN_LOCK_UNLOCKED + #define hyp_spin_lock_init(l) \ do { \ - *(l) = (hyp_spinlock_t){ .__val = 0 }; \ + *(l) = __HYP_SPIN_LOCK_UNLOCKED; \ } while (0) static inline void hyp_spin_lock(hyp_spinlock_t *lock) diff --git a/arch/arm64/kvm/hyp/nvhe/cache.S b/arch/arm64/kvm/hyp/nvhe/cache.S index 0c367eb5f4e28dce0dcd0986c10413b5361f5faf..85936c17ae4076df6a03b87efddb86f2bec9da98 100644 --- a/arch/arm64/kvm/hyp/nvhe/cache.S +++ b/arch/arm64/kvm/hyp/nvhe/cache.S @@ -12,3 +12,14 @@ SYM_FUNC_START(__pi_dcache_clean_inval_poc) ret SYM_FUNC_END(__pi_dcache_clean_inval_poc) SYM_FUNC_ALIAS(dcache_clean_inval_poc, __pi_dcache_clean_inval_poc) + +SYM_FUNC_START(__pi_icache_inval_pou) +alternative_if ARM64_HAS_CACHE_DIC + isb + ret +alternative_else_nop_endif + + invalidate_icache_by_line x0, x1, x2, x3 + ret +SYM_FUNC_END(__pi_icache_inval_pou) +SYM_FUNC_ALIAS(icache_inval_pou, __pi_icache_inval_pou) diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-main.c b/arch/arm64/kvm/hyp/nvhe/hyp-main.c index 3cea4b6ac23ec114c037348fee1363ad25edeff6..728e01d4536b02d519a591c7aecbc61a75642d47 100644 --- a/arch/arm64/kvm/hyp/nvhe/hyp-main.c +++ b/arch/arm64/kvm/hyp/nvhe/hyp-main.c @@ -15,17 +15,93 @@ #include #include +#include #include DEFINE_PER_CPU(struct kvm_nvhe_init_params, kvm_init_params); void __kvm_hyp_host_forward_smc(struct kvm_cpu_context *host_ctxt); +static void flush_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu) +{ + struct kvm_vcpu *host_vcpu = hyp_vcpu->host_vcpu; + + hyp_vcpu->vcpu.arch.ctxt = host_vcpu->arch.ctxt; + + hyp_vcpu->vcpu.arch.sve_state = kern_hyp_va(host_vcpu->arch.sve_state); + hyp_vcpu->vcpu.arch.sve_max_vl = host_vcpu->arch.sve_max_vl; + + hyp_vcpu->vcpu.arch.hw_mmu = host_vcpu->arch.hw_mmu; + + hyp_vcpu->vcpu.arch.hcr_el2 = host_vcpu->arch.hcr_el2; + hyp_vcpu->vcpu.arch.mdcr_el2 = host_vcpu->arch.mdcr_el2; + hyp_vcpu->vcpu.arch.cptr_el2 = host_vcpu->arch.cptr_el2; + + hyp_vcpu->vcpu.arch.iflags = host_vcpu->arch.iflags; + hyp_vcpu->vcpu.arch.fp_state = host_vcpu->arch.fp_state; + + hyp_vcpu->vcpu.arch.debug_ptr = kern_hyp_va(host_vcpu->arch.debug_ptr); + hyp_vcpu->vcpu.arch.host_fpsimd_state = host_vcpu->arch.host_fpsimd_state; + + hyp_vcpu->vcpu.arch.vsesr_el2 = host_vcpu->arch.vsesr_el2; + + hyp_vcpu->vcpu.arch.vgic_cpu.vgic_v3 = host_vcpu->arch.vgic_cpu.vgic_v3; +} + +static void sync_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu) +{ + struct kvm_vcpu *host_vcpu = hyp_vcpu->host_vcpu; + struct vgic_v3_cpu_if *hyp_cpu_if = &hyp_vcpu->vcpu.arch.vgic_cpu.vgic_v3; + struct vgic_v3_cpu_if *host_cpu_if = &host_vcpu->arch.vgic_cpu.vgic_v3; + unsigned int i; + + host_vcpu->arch.ctxt = hyp_vcpu->vcpu.arch.ctxt; + + host_vcpu->arch.hcr_el2 = hyp_vcpu->vcpu.arch.hcr_el2; + host_vcpu->arch.cptr_el2 = hyp_vcpu->vcpu.arch.cptr_el2; + + host_vcpu->arch.fault = hyp_vcpu->vcpu.arch.fault; + + host_vcpu->arch.iflags = hyp_vcpu->vcpu.arch.iflags; + host_vcpu->arch.fp_state = hyp_vcpu->vcpu.arch.fp_state; + + host_cpu_if->vgic_hcr = hyp_cpu_if->vgic_hcr; + for (i = 0; i < hyp_cpu_if->used_lrs; ++i) + host_cpu_if->vgic_lr[i] = hyp_cpu_if->vgic_lr[i]; +} + static void handle___kvm_vcpu_run(struct kvm_cpu_context *host_ctxt) { - DECLARE_REG(struct kvm_vcpu *, vcpu, host_ctxt, 1); + DECLARE_REG(struct kvm_vcpu *, host_vcpu, host_ctxt, 1); + int ret; - cpu_reg(host_ctxt, 1) = __kvm_vcpu_run(kern_hyp_va(vcpu)); + host_vcpu = kern_hyp_va(host_vcpu); + + if (unlikely(is_protected_kvm_enabled())) { + struct pkvm_hyp_vcpu *hyp_vcpu; + struct kvm *host_kvm; + + host_kvm = kern_hyp_va(host_vcpu->kvm); + hyp_vcpu = pkvm_load_hyp_vcpu(host_kvm->arch.pkvm.handle, + host_vcpu->vcpu_idx); + if (!hyp_vcpu) { + ret = -EINVAL; + goto out; + } + + flush_hyp_vcpu(hyp_vcpu); + + ret = __kvm_vcpu_run(&hyp_vcpu->vcpu); + + sync_hyp_vcpu(hyp_vcpu); + pkvm_put_hyp_vcpu(hyp_vcpu); + } else { + /* The host is fully trusted, run its vCPU directly. */ + ret = __kvm_vcpu_run(host_vcpu); + } + +out: + cpu_reg(host_ctxt, 1) = ret; } static void handle___kvm_adjust_pc(struct kvm_cpu_context *host_ctxt) @@ -191,6 +267,33 @@ static void handle___pkvm_vcpu_init_traps(struct kvm_cpu_context *host_ctxt) __pkvm_vcpu_init_traps(kern_hyp_va(vcpu)); } +static void handle___pkvm_init_vm(struct kvm_cpu_context *host_ctxt) +{ + DECLARE_REG(struct kvm *, host_kvm, host_ctxt, 1); + DECLARE_REG(unsigned long, vm_hva, host_ctxt, 2); + DECLARE_REG(unsigned long, pgd_hva, host_ctxt, 3); + + host_kvm = kern_hyp_va(host_kvm); + cpu_reg(host_ctxt, 1) = __pkvm_init_vm(host_kvm, vm_hva, pgd_hva); +} + +static void handle___pkvm_init_vcpu(struct kvm_cpu_context *host_ctxt) +{ + DECLARE_REG(pkvm_handle_t, handle, host_ctxt, 1); + DECLARE_REG(struct kvm_vcpu *, host_vcpu, host_ctxt, 2); + DECLARE_REG(unsigned long, vcpu_hva, host_ctxt, 3); + + host_vcpu = kern_hyp_va(host_vcpu); + cpu_reg(host_ctxt, 1) = __pkvm_init_vcpu(handle, host_vcpu, vcpu_hva); +} + +static void handle___pkvm_teardown_vm(struct kvm_cpu_context *host_ctxt) +{ + DECLARE_REG(pkvm_handle_t, handle, host_ctxt, 1); + + cpu_reg(host_ctxt, 1) = __pkvm_teardown_vm(handle); +} + typedef void (*hcall_t)(struct kvm_cpu_context *); #define HANDLE_FUNC(x) [__KVM_HOST_SMCCC_FUNC_##x] = (hcall_t)handle_##x @@ -220,6 +323,9 @@ static const hcall_t host_hcall[] = { HANDLE_FUNC(__vgic_v3_save_aprs), HANDLE_FUNC(__vgic_v3_restore_aprs), HANDLE_FUNC(__pkvm_vcpu_init_traps), + HANDLE_FUNC(__pkvm_init_vm), + HANDLE_FUNC(__pkvm_init_vcpu), + HANDLE_FUNC(__pkvm_teardown_vm), }; static void handle_host_hcall(struct kvm_cpu_context *host_ctxt) diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-smp.c b/arch/arm64/kvm/hyp/nvhe/hyp-smp.c index 9f54833af400972670ceddec0a7904d190a005e5..04d194583f1e47a42079726276bc941cdc146641 100644 --- a/arch/arm64/kvm/hyp/nvhe/hyp-smp.c +++ b/arch/arm64/kvm/hyp/nvhe/hyp-smp.c @@ -23,6 +23,8 @@ u64 cpu_logical_map(unsigned int cpu) return hyp_cpu_logical_map[cpu]; } +unsigned long __ro_after_init kvm_arm_hyp_percpu_base[NR_CPUS]; + unsigned long __hyp_per_cpu_offset(unsigned int cpu) { unsigned long *cpu_base_array; diff --git a/arch/arm64/kvm/hyp/nvhe/mem_protect.c b/arch/arm64/kvm/hyp/nvhe/mem_protect.c index 07f9dc9848ef114849103110ee97aef91e95d5b6..552653fa18be34b2cde379d04ad6acb27395e3c8 100644 --- a/arch/arm64/kvm/hyp/nvhe/mem_protect.c +++ b/arch/arm64/kvm/hyp/nvhe/mem_protect.c @@ -21,21 +21,33 @@ #define KVM_HOST_S2_FLAGS (KVM_PGTABLE_S2_NOFWB | KVM_PGTABLE_S2_IDMAP) -extern unsigned long hyp_nr_cpus; -struct host_kvm host_kvm; +struct host_mmu host_mmu; static struct hyp_pool host_s2_pool; -const u8 pkvm_hyp_id = 1; +static DEFINE_PER_CPU(struct pkvm_hyp_vm *, __current_vm); +#define current_vm (*this_cpu_ptr(&__current_vm)) + +static void guest_lock_component(struct pkvm_hyp_vm *vm) +{ + hyp_spin_lock(&vm->lock); + current_vm = vm; +} + +static void guest_unlock_component(struct pkvm_hyp_vm *vm) +{ + current_vm = NULL; + hyp_spin_unlock(&vm->lock); +} static void host_lock_component(void) { - hyp_spin_lock(&host_kvm.lock); + hyp_spin_lock(&host_mmu.lock); } static void host_unlock_component(void) { - hyp_spin_unlock(&host_kvm.lock); + hyp_spin_unlock(&host_mmu.lock); } static void hyp_lock_component(void) @@ -79,6 +91,11 @@ static void host_s2_put_page(void *addr) hyp_put_page(&host_s2_pool, addr); } +static void host_s2_free_removed_table(void *addr, u32 level) +{ + kvm_pgtable_stage2_free_removed(&host_mmu.mm_ops, addr, level); +} + static int prepare_s2_pool(void *pgt_pool_base) { unsigned long nr_pages, pfn; @@ -90,9 +107,10 @@ static int prepare_s2_pool(void *pgt_pool_base) if (ret) return ret; - host_kvm.mm_ops = (struct kvm_pgtable_mm_ops) { + host_mmu.mm_ops = (struct kvm_pgtable_mm_ops) { .zalloc_pages_exact = host_s2_zalloc_pages_exact, .zalloc_page = host_s2_zalloc_page, + .free_removed_table = host_s2_free_removed_table, .phys_to_virt = hyp_phys_to_virt, .virt_to_phys = hyp_virt_to_phys, .page_count = hyp_page_count, @@ -111,7 +129,7 @@ static void prepare_host_vtcr(void) parange = kvm_get_parange(id_aa64mmfr0_el1_sys_val); phys_shift = id_aa64mmfr0_parange_to_phys_shift(parange); - host_kvm.arch.vtcr = kvm_get_vtcr(id_aa64mmfr0_el1_sys_val, + host_mmu.arch.vtcr = kvm_get_vtcr(id_aa64mmfr0_el1_sys_val, id_aa64mmfr1_el1_sys_val, phys_shift); } @@ -119,45 +137,170 @@ static bool host_stage2_force_pte_cb(u64 addr, u64 end, enum kvm_pgtable_prot pr int kvm_host_prepare_stage2(void *pgt_pool_base) { - struct kvm_s2_mmu *mmu = &host_kvm.arch.mmu; + struct kvm_s2_mmu *mmu = &host_mmu.arch.mmu; int ret; prepare_host_vtcr(); - hyp_spin_lock_init(&host_kvm.lock); - mmu->arch = &host_kvm.arch; + hyp_spin_lock_init(&host_mmu.lock); + mmu->arch = &host_mmu.arch; ret = prepare_s2_pool(pgt_pool_base); if (ret) return ret; - ret = __kvm_pgtable_stage2_init(&host_kvm.pgt, mmu, - &host_kvm.mm_ops, KVM_HOST_S2_FLAGS, + ret = __kvm_pgtable_stage2_init(&host_mmu.pgt, mmu, + &host_mmu.mm_ops, KVM_HOST_S2_FLAGS, host_stage2_force_pte_cb); if (ret) return ret; - mmu->pgd_phys = __hyp_pa(host_kvm.pgt.pgd); - mmu->pgt = &host_kvm.pgt; + mmu->pgd_phys = __hyp_pa(host_mmu.pgt.pgd); + mmu->pgt = &host_mmu.pgt; atomic64_set(&mmu->vmid.id, 0); return 0; } +static bool guest_stage2_force_pte_cb(u64 addr, u64 end, + enum kvm_pgtable_prot prot) +{ + return true; +} + +static void *guest_s2_zalloc_pages_exact(size_t size) +{ + void *addr = hyp_alloc_pages(¤t_vm->pool, get_order(size)); + + WARN_ON(size != (PAGE_SIZE << get_order(size))); + hyp_split_page(hyp_virt_to_page(addr)); + + return addr; +} + +static void guest_s2_free_pages_exact(void *addr, unsigned long size) +{ + u8 order = get_order(size); + unsigned int i; + + for (i = 0; i < (1 << order); i++) + hyp_put_page(¤t_vm->pool, addr + (i * PAGE_SIZE)); +} + +static void *guest_s2_zalloc_page(void *mc) +{ + struct hyp_page *p; + void *addr; + + addr = hyp_alloc_pages(¤t_vm->pool, 0); + if (addr) + return addr; + + addr = pop_hyp_memcache(mc, hyp_phys_to_virt); + if (!addr) + return addr; + + memset(addr, 0, PAGE_SIZE); + p = hyp_virt_to_page(addr); + memset(p, 0, sizeof(*p)); + p->refcount = 1; + + return addr; +} + +static void guest_s2_get_page(void *addr) +{ + hyp_get_page(¤t_vm->pool, addr); +} + +static void guest_s2_put_page(void *addr) +{ + hyp_put_page(¤t_vm->pool, addr); +} + +static void clean_dcache_guest_page(void *va, size_t size) +{ + __clean_dcache_guest_page(hyp_fixmap_map(__hyp_pa(va)), size); + hyp_fixmap_unmap(); +} + +static void invalidate_icache_guest_page(void *va, size_t size) +{ + __invalidate_icache_guest_page(hyp_fixmap_map(__hyp_pa(va)), size); + hyp_fixmap_unmap(); +} + +int kvm_guest_prepare_stage2(struct pkvm_hyp_vm *vm, void *pgd) +{ + struct kvm_s2_mmu *mmu = &vm->kvm.arch.mmu; + unsigned long nr_pages; + int ret; + + nr_pages = kvm_pgtable_stage2_pgd_size(vm->kvm.arch.vtcr) >> PAGE_SHIFT; + ret = hyp_pool_init(&vm->pool, hyp_virt_to_pfn(pgd), nr_pages, 0); + if (ret) + return ret; + + hyp_spin_lock_init(&vm->lock); + vm->mm_ops = (struct kvm_pgtable_mm_ops) { + .zalloc_pages_exact = guest_s2_zalloc_pages_exact, + .free_pages_exact = guest_s2_free_pages_exact, + .zalloc_page = guest_s2_zalloc_page, + .phys_to_virt = hyp_phys_to_virt, + .virt_to_phys = hyp_virt_to_phys, + .page_count = hyp_page_count, + .get_page = guest_s2_get_page, + .put_page = guest_s2_put_page, + .dcache_clean_inval_poc = clean_dcache_guest_page, + .icache_inval_pou = invalidate_icache_guest_page, + }; + + guest_lock_component(vm); + ret = __kvm_pgtable_stage2_init(mmu->pgt, mmu, &vm->mm_ops, 0, + guest_stage2_force_pte_cb); + guest_unlock_component(vm); + if (ret) + return ret; + + vm->kvm.arch.mmu.pgd_phys = __hyp_pa(vm->pgt.pgd); + + return 0; +} + +void reclaim_guest_pages(struct pkvm_hyp_vm *vm, struct kvm_hyp_memcache *mc) +{ + void *addr; + + /* Dump all pgtable pages in the hyp_pool */ + guest_lock_component(vm); + kvm_pgtable_stage2_destroy(&vm->pgt); + vm->kvm.arch.mmu.pgd_phys = 0ULL; + guest_unlock_component(vm); + + /* Drain the hyp_pool into the memcache */ + addr = hyp_alloc_pages(&vm->pool, 0); + while (addr) { + memset(hyp_virt_to_page(addr), 0, sizeof(struct hyp_page)); + push_hyp_memcache(mc, addr, hyp_virt_to_phys); + WARN_ON(__pkvm_hyp_donate_host(hyp_virt_to_pfn(addr), 1)); + addr = hyp_alloc_pages(&vm->pool, 0); + } +} + int __pkvm_prot_finalize(void) { - struct kvm_s2_mmu *mmu = &host_kvm.arch.mmu; + struct kvm_s2_mmu *mmu = &host_mmu.arch.mmu; struct kvm_nvhe_init_params *params = this_cpu_ptr(&kvm_init_params); if (params->hcr_el2 & HCR_VM) return -EPERM; params->vttbr = kvm_get_vttbr(mmu); - params->vtcr = host_kvm.arch.vtcr; + params->vtcr = host_mmu.arch.vtcr; params->hcr_el2 |= HCR_VM; kvm_flush_dcache_to_poc(params, sizeof(*params)); write_sysreg(params->hcr_el2, hcr_el2); - __load_stage2(&host_kvm.arch.mmu, &host_kvm.arch); + __load_stage2(&host_mmu.arch.mmu, &host_mmu.arch); /* * Make sure to have an ISB before the TLB maintenance below but only @@ -175,7 +318,7 @@ int __pkvm_prot_finalize(void) static int host_stage2_unmap_dev_all(void) { - struct kvm_pgtable *pgt = &host_kvm.pgt; + struct kvm_pgtable *pgt = &host_mmu.pgt; struct memblock_region *reg; u64 addr = 0; int i, ret; @@ -195,7 +338,7 @@ struct kvm_mem_range { u64 end; }; -static bool find_mem_range(phys_addr_t addr, struct kvm_mem_range *range) +static struct memblock_region *find_mem_range(phys_addr_t addr, struct kvm_mem_range *range) { int cur, left = 0, right = hyp_memblock_nr; struct memblock_region *reg; @@ -218,18 +361,28 @@ static bool find_mem_range(phys_addr_t addr, struct kvm_mem_range *range) } else { range->start = reg->base; range->end = end; - return true; + return reg; } } - return false; + return NULL; } bool addr_is_memory(phys_addr_t phys) { struct kvm_mem_range range; - return find_mem_range(phys, &range); + return !!find_mem_range(phys, &range); +} + +static bool addr_is_allowed_memory(phys_addr_t phys) +{ + struct memblock_region *reg; + struct kvm_mem_range range; + + reg = find_mem_range(phys, &range); + + return reg && !(reg->flags & MEMBLOCK_NOMAP); } static bool is_in_mem_range(u64 addr, struct kvm_mem_range *range) @@ -250,8 +403,8 @@ static bool range_is_memory(u64 start, u64 end) static inline int __host_stage2_idmap(u64 start, u64 end, enum kvm_pgtable_prot prot) { - return kvm_pgtable_stage2_map(&host_kvm.pgt, start, end - start, start, - prot, &host_s2_pool); + return kvm_pgtable_stage2_map(&host_mmu.pgt, start, end - start, start, + prot, &host_s2_pool, 0); } /* @@ -263,7 +416,7 @@ static inline int __host_stage2_idmap(u64 start, u64 end, #define host_stage2_try(fn, ...) \ ({ \ int __ret; \ - hyp_assert_lock_held(&host_kvm.lock); \ + hyp_assert_lock_held(&host_mmu.lock); \ __ret = fn(__VA_ARGS__); \ if (__ret == -ENOMEM) { \ __ret = host_stage2_unmap_dev_all(); \ @@ -286,8 +439,8 @@ static int host_stage2_adjust_range(u64 addr, struct kvm_mem_range *range) u32 level; int ret; - hyp_assert_lock_held(&host_kvm.lock); - ret = kvm_pgtable_get_leaf(&host_kvm.pgt, addr, &pte, &level); + hyp_assert_lock_held(&host_mmu.lock); + ret = kvm_pgtable_get_leaf(&host_mmu.pgt, addr, &pte, &level); if (ret) return ret; @@ -319,7 +472,7 @@ int host_stage2_idmap_locked(phys_addr_t addr, u64 size, int host_stage2_set_owner_locked(phys_addr_t addr, u64 size, u8 owner_id) { - return host_stage2_try(kvm_pgtable_stage2_set_owner, &host_kvm.pgt, + return host_stage2_try(kvm_pgtable_stage2_set_owner, &host_mmu.pgt, addr, size, &host_s2_pool, owner_id); } @@ -348,7 +501,7 @@ static bool host_stage2_force_pte_cb(u64 addr, u64 end, enum kvm_pgtable_prot pr static int host_stage2_idmap(u64 addr) { struct kvm_mem_range range; - bool is_memory = find_mem_range(addr, &range); + bool is_memory = !!find_mem_range(addr, &range); enum kvm_pgtable_prot prot; int ret; @@ -380,12 +533,6 @@ void handle_host_mem_abort(struct kvm_cpu_context *host_ctxt) BUG_ON(ret && ret != -EAGAIN); } -/* This corresponds to locking order */ -enum pkvm_component_id { - PKVM_ID_HOST, - PKVM_ID_HYP, -}; - struct pkvm_mem_transition { u64 nr_pages; @@ -399,6 +546,9 @@ struct pkvm_mem_transition { /* Address in the completer's address space */ u64 completer_addr; } host; + struct { + u64 completer_addr; + } hyp; }; } initiator; @@ -412,23 +562,24 @@ struct pkvm_mem_share { const enum kvm_pgtable_prot completer_prot; }; +struct pkvm_mem_donation { + const struct pkvm_mem_transition tx; +}; + struct check_walk_data { enum pkvm_page_state desired; enum pkvm_page_state (*get_page_state)(kvm_pte_t pte); }; -static int __check_page_state_visitor(u64 addr, u64 end, u32 level, - kvm_pte_t *ptep, - enum kvm_pgtable_walk_flags flag, - void * const arg) +static int __check_page_state_visitor(const struct kvm_pgtable_visit_ctx *ctx, + enum kvm_pgtable_walk_flags visit) { - struct check_walk_data *d = arg; - kvm_pte_t pte = *ptep; + struct check_walk_data *d = ctx->arg; - if (kvm_pte_valid(pte) && !addr_is_memory(kvm_pte_to_phys(pte))) + if (kvm_pte_valid(ctx->old) && !addr_is_allowed_memory(kvm_pte_to_phys(ctx->old))) return -EINVAL; - return d->get_page_state(pte) == d->desired ? 0 : -EPERM; + return d->get_page_state(ctx->old) == d->desired ? 0 : -EPERM; } static int check_page_state_range(struct kvm_pgtable *pgt, u64 addr, u64 size, @@ -459,8 +610,8 @@ static int __host_check_page_state_range(u64 addr, u64 size, .get_page_state = host_get_page_state, }; - hyp_assert_lock_held(&host_kvm.lock); - return check_page_state_range(&host_kvm.pgt, addr, size, &d); + hyp_assert_lock_held(&host_mmu.lock); + return check_page_state_range(&host_mmu.pgt, addr, size, &d); } static int __host_set_page_state_range(u64 addr, u64 size, @@ -511,6 +662,46 @@ static int host_initiate_unshare(u64 *completer_addr, return __host_set_page_state_range(addr, size, PKVM_PAGE_OWNED); } +static int host_initiate_donation(u64 *completer_addr, + const struct pkvm_mem_transition *tx) +{ + u8 owner_id = tx->completer.id; + u64 size = tx->nr_pages * PAGE_SIZE; + + *completer_addr = tx->initiator.host.completer_addr; + return host_stage2_set_owner_locked(tx->initiator.addr, size, owner_id); +} + +static bool __host_ack_skip_pgtable_check(const struct pkvm_mem_transition *tx) +{ + return !(IS_ENABLED(CONFIG_NVHE_EL2_DEBUG) || + tx->initiator.id != PKVM_ID_HYP); +} + +static int __host_ack_transition(u64 addr, const struct pkvm_mem_transition *tx, + enum pkvm_page_state state) +{ + u64 size = tx->nr_pages * PAGE_SIZE; + + if (__host_ack_skip_pgtable_check(tx)) + return 0; + + return __host_check_page_state_range(addr, size, state); +} + +static int host_ack_donation(u64 addr, const struct pkvm_mem_transition *tx) +{ + return __host_ack_transition(addr, tx, PKVM_NOPAGE); +} + +static int host_complete_donation(u64 addr, const struct pkvm_mem_transition *tx) +{ + u64 size = tx->nr_pages * PAGE_SIZE; + u8 host_id = tx->completer.id; + + return host_stage2_set_owner_locked(addr, size, host_id); +} + static enum pkvm_page_state hyp_get_page_state(kvm_pte_t pte) { if (!kvm_pte_valid(pte)) @@ -531,6 +722,27 @@ static int __hyp_check_page_state_range(u64 addr, u64 size, return check_page_state_range(&pkvm_pgtable, addr, size, &d); } +static int hyp_request_donation(u64 *completer_addr, + const struct pkvm_mem_transition *tx) +{ + u64 size = tx->nr_pages * PAGE_SIZE; + u64 addr = tx->initiator.addr; + + *completer_addr = tx->initiator.hyp.completer_addr; + return __hyp_check_page_state_range(addr, size, PKVM_PAGE_OWNED); +} + +static int hyp_initiate_donation(u64 *completer_addr, + const struct pkvm_mem_transition *tx) +{ + u64 size = tx->nr_pages * PAGE_SIZE; + int ret; + + *completer_addr = tx->initiator.hyp.completer_addr; + ret = kvm_pgtable_hyp_unmap(&pkvm_pgtable, tx->initiator.addr, size); + return (ret != size) ? -EFAULT : 0; +} + static bool __hyp_ack_skip_pgtable_check(const struct pkvm_mem_transition *tx) { return !(IS_ENABLED(CONFIG_NVHE_EL2_DEBUG) || @@ -555,6 +767,9 @@ static int hyp_ack_unshare(u64 addr, const struct pkvm_mem_transition *tx) { u64 size = tx->nr_pages * PAGE_SIZE; + if (tx->initiator.id == PKVM_ID_HOST && hyp_page_count((void *)addr)) + return -EBUSY; + if (__hyp_ack_skip_pgtable_check(tx)) return 0; @@ -562,6 +777,16 @@ static int hyp_ack_unshare(u64 addr, const struct pkvm_mem_transition *tx) PKVM_PAGE_SHARED_BORROWED); } +static int hyp_ack_donation(u64 addr, const struct pkvm_mem_transition *tx) +{ + u64 size = tx->nr_pages * PAGE_SIZE; + + if (__hyp_ack_skip_pgtable_check(tx)) + return 0; + + return __hyp_check_page_state_range(addr, size, PKVM_NOPAGE); +} + static int hyp_complete_share(u64 addr, const struct pkvm_mem_transition *tx, enum kvm_pgtable_prot perms) { @@ -580,6 +805,15 @@ static int hyp_complete_unshare(u64 addr, const struct pkvm_mem_transition *tx) return (ret != size) ? -EFAULT : 0; } +static int hyp_complete_donation(u64 addr, + const struct pkvm_mem_transition *tx) +{ + void *start = (void *)addr, *end = start + (tx->nr_pages * PAGE_SIZE); + enum kvm_pgtable_prot prot = pkvm_mkstate(PAGE_HYP, PKVM_PAGE_OWNED); + + return pkvm_create_mappings_locked(start, end, prot); +} + static int check_share(struct pkvm_mem_share *share) { const struct pkvm_mem_transition *tx = &share->tx; @@ -732,6 +966,94 @@ static int do_unshare(struct pkvm_mem_share *share) return WARN_ON(__do_unshare(share)); } +static int check_donation(struct pkvm_mem_donation *donation) +{ + const struct pkvm_mem_transition *tx = &donation->tx; + u64 completer_addr; + int ret; + + switch (tx->initiator.id) { + case PKVM_ID_HOST: + ret = host_request_owned_transition(&completer_addr, tx); + break; + case PKVM_ID_HYP: + ret = hyp_request_donation(&completer_addr, tx); + break; + default: + ret = -EINVAL; + } + + if (ret) + return ret; + + switch (tx->completer.id) { + case PKVM_ID_HOST: + ret = host_ack_donation(completer_addr, tx); + break; + case PKVM_ID_HYP: + ret = hyp_ack_donation(completer_addr, tx); + break; + default: + ret = -EINVAL; + } + + return ret; +} + +static int __do_donate(struct pkvm_mem_donation *donation) +{ + const struct pkvm_mem_transition *tx = &donation->tx; + u64 completer_addr; + int ret; + + switch (tx->initiator.id) { + case PKVM_ID_HOST: + ret = host_initiate_donation(&completer_addr, tx); + break; + case PKVM_ID_HYP: + ret = hyp_initiate_donation(&completer_addr, tx); + break; + default: + ret = -EINVAL; + } + + if (ret) + return ret; + + switch (tx->completer.id) { + case PKVM_ID_HOST: + ret = host_complete_donation(completer_addr, tx); + break; + case PKVM_ID_HYP: + ret = hyp_complete_donation(completer_addr, tx); + break; + default: + ret = -EINVAL; + } + + return ret; +} + +/* + * do_donate(): + * + * The page owner transfers ownership to another component, losing access + * as a consequence. + * + * Initiator: OWNED => NOPAGE + * Completer: NOPAGE => OWNED + */ +static int do_donate(struct pkvm_mem_donation *donation) +{ + int ret; + + ret = check_donation(donation); + if (ret) + return ret; + + return WARN_ON(__do_donate(donation)); +} + int __pkvm_host_share_hyp(u64 pfn) { int ret; @@ -797,3 +1119,112 @@ int __pkvm_host_unshare_hyp(u64 pfn) return ret; } + +int __pkvm_host_donate_hyp(u64 pfn, u64 nr_pages) +{ + int ret; + u64 host_addr = hyp_pfn_to_phys(pfn); + u64 hyp_addr = (u64)__hyp_va(host_addr); + struct pkvm_mem_donation donation = { + .tx = { + .nr_pages = nr_pages, + .initiator = { + .id = PKVM_ID_HOST, + .addr = host_addr, + .host = { + .completer_addr = hyp_addr, + }, + }, + .completer = { + .id = PKVM_ID_HYP, + }, + }, + }; + + host_lock_component(); + hyp_lock_component(); + + ret = do_donate(&donation); + + hyp_unlock_component(); + host_unlock_component(); + + return ret; +} + +int __pkvm_hyp_donate_host(u64 pfn, u64 nr_pages) +{ + int ret; + u64 host_addr = hyp_pfn_to_phys(pfn); + u64 hyp_addr = (u64)__hyp_va(host_addr); + struct pkvm_mem_donation donation = { + .tx = { + .nr_pages = nr_pages, + .initiator = { + .id = PKVM_ID_HYP, + .addr = hyp_addr, + .hyp = { + .completer_addr = host_addr, + }, + }, + .completer = { + .id = PKVM_ID_HOST, + }, + }, + }; + + host_lock_component(); + hyp_lock_component(); + + ret = do_donate(&donation); + + hyp_unlock_component(); + host_unlock_component(); + + return ret; +} + +int hyp_pin_shared_mem(void *from, void *to) +{ + u64 cur, start = ALIGN_DOWN((u64)from, PAGE_SIZE); + u64 end = PAGE_ALIGN((u64)to); + u64 size = end - start; + int ret; + + host_lock_component(); + hyp_lock_component(); + + ret = __host_check_page_state_range(__hyp_pa(start), size, + PKVM_PAGE_SHARED_OWNED); + if (ret) + goto unlock; + + ret = __hyp_check_page_state_range(start, size, + PKVM_PAGE_SHARED_BORROWED); + if (ret) + goto unlock; + + for (cur = start; cur < end; cur += PAGE_SIZE) + hyp_page_ref_inc(hyp_virt_to_page(cur)); + +unlock: + hyp_unlock_component(); + host_unlock_component(); + + return ret; +} + +void hyp_unpin_shared_mem(void *from, void *to) +{ + u64 cur, start = ALIGN_DOWN((u64)from, PAGE_SIZE); + u64 end = PAGE_ALIGN((u64)to); + + host_lock_component(); + hyp_lock_component(); + + for (cur = start; cur < end; cur += PAGE_SIZE) + hyp_page_ref_dec(hyp_virt_to_page(cur)); + + hyp_unlock_component(); + host_unlock_component(); +} diff --git a/arch/arm64/kvm/hyp/nvhe/mm.c b/arch/arm64/kvm/hyp/nvhe/mm.c index 96193cb31a399da80598f9517a7c3e0259676b58..318298eb3d6b20eeda1854b10adf06ce676ee790 100644 --- a/arch/arm64/kvm/hyp/nvhe/mm.c +++ b/arch/arm64/kvm/hyp/nvhe/mm.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include @@ -25,6 +26,12 @@ unsigned int hyp_memblock_nr; static u64 __io_map_base; +struct hyp_fixmap_slot { + u64 addr; + kvm_pte_t *ptep; +}; +static DEFINE_PER_CPU(struct hyp_fixmap_slot, fixmap_slots); + static int __pkvm_create_mappings(unsigned long start, unsigned long size, unsigned long phys, enum kvm_pgtable_prot prot) { @@ -129,13 +136,36 @@ int pkvm_create_mappings(void *from, void *to, enum kvm_pgtable_prot prot) return ret; } -int hyp_back_vmemmap(phys_addr_t phys, unsigned long size, phys_addr_t back) +int hyp_back_vmemmap(phys_addr_t back) { - unsigned long start, end; + unsigned long i, start, size, end = 0; + int ret; - hyp_vmemmap_range(phys, size, &start, &end); + for (i = 0; i < hyp_memblock_nr; i++) { + start = hyp_memory[i].base; + start = ALIGN_DOWN((u64)hyp_phys_to_page(start), PAGE_SIZE); + /* + * The begining of the hyp_vmemmap region for the current + * memblock may already be backed by the page backing the end + * the previous region, so avoid mapping it twice. + */ + start = max(start, end); + + end = hyp_memory[i].base + hyp_memory[i].size; + end = PAGE_ALIGN((u64)hyp_phys_to_page(end)); + if (start >= end) + continue; + + size = end - start; + ret = __pkvm_create_mappings(start, size, back, PAGE_HYP); + if (ret) + return ret; + + memset(hyp_phys_to_virt(back), 0, size); + back += size; + } - return __pkvm_create_mappings(start, end - start, back, PAGE_HYP); + return 0; } static void *__hyp_bp_vect_base; @@ -189,6 +219,102 @@ int hyp_map_vectors(void) return 0; } +void *hyp_fixmap_map(phys_addr_t phys) +{ + struct hyp_fixmap_slot *slot = this_cpu_ptr(&fixmap_slots); + kvm_pte_t pte, *ptep = slot->ptep; + + pte = *ptep; + pte &= ~kvm_phys_to_pte(KVM_PHYS_INVALID); + pte |= kvm_phys_to_pte(phys) | KVM_PTE_VALID; + WRITE_ONCE(*ptep, pte); + dsb(ishst); + + return (void *)slot->addr; +} + +static void fixmap_clear_slot(struct hyp_fixmap_slot *slot) +{ + kvm_pte_t *ptep = slot->ptep; + u64 addr = slot->addr; + + WRITE_ONCE(*ptep, *ptep & ~KVM_PTE_VALID); + + /* + * Irritatingly, the architecture requires that we use inner-shareable + * broadcast TLB invalidation here in case another CPU speculates + * through our fixmap and decides to create an "amalagamation of the + * values held in the TLB" due to the apparent lack of a + * break-before-make sequence. + * + * https://lore.kernel.org/kvm/20221017115209.2099-1-will@kernel.org/T/#mf10dfbaf1eaef9274c581b81c53758918c1d0f03 + */ + dsb(ishst); + __tlbi_level(vale2is, __TLBI_VADDR(addr, 0), (KVM_PGTABLE_MAX_LEVELS - 1)); + dsb(ish); + isb(); +} + +void hyp_fixmap_unmap(void) +{ + fixmap_clear_slot(this_cpu_ptr(&fixmap_slots)); +} + +static int __create_fixmap_slot_cb(const struct kvm_pgtable_visit_ctx *ctx, + enum kvm_pgtable_walk_flags visit) +{ + struct hyp_fixmap_slot *slot = per_cpu_ptr(&fixmap_slots, (u64)ctx->arg); + + if (!kvm_pte_valid(ctx->old) || ctx->level != KVM_PGTABLE_MAX_LEVELS - 1) + return -EINVAL; + + slot->addr = ctx->addr; + slot->ptep = ctx->ptep; + + /* + * Clear the PTE, but keep the page-table page refcount elevated to + * prevent it from ever being freed. This lets us manipulate the PTEs + * by hand safely without ever needing to allocate memory. + */ + fixmap_clear_slot(slot); + + return 0; +} + +static int create_fixmap_slot(u64 addr, u64 cpu) +{ + struct kvm_pgtable_walker walker = { + .cb = __create_fixmap_slot_cb, + .flags = KVM_PGTABLE_WALK_LEAF, + .arg = (void *)cpu, + }; + + return kvm_pgtable_walk(&pkvm_pgtable, addr, PAGE_SIZE, &walker); +} + +int hyp_create_pcpu_fixmap(void) +{ + unsigned long addr, i; + int ret; + + for (i = 0; i < hyp_nr_cpus; i++) { + ret = pkvm_alloc_private_va_range(PAGE_SIZE, &addr); + if (ret) + return ret; + + ret = kvm_pgtable_hyp_map(&pkvm_pgtable, addr, PAGE_SIZE, + __hyp_pa(__hyp_bss_start), PAGE_HYP); + if (ret) + return ret; + + ret = create_fixmap_slot(addr, i); + if (ret) + return ret; + } + + return 0; +} + int hyp_create_idmap(u32 hyp_va_bits) { unsigned long start, end; @@ -213,3 +339,36 @@ int hyp_create_idmap(u32 hyp_va_bits) return __pkvm_create_mappings(start, end - start, start, PAGE_HYP_EXEC); } + +static void *admit_host_page(void *arg) +{ + struct kvm_hyp_memcache *host_mc = arg; + + if (!host_mc->nr_pages) + return NULL; + + /* + * The host still owns the pages in its memcache, so we need to go + * through a full host-to-hyp donation cycle to change it. Fortunately, + * __pkvm_host_donate_hyp() takes care of races for us, so if it + * succeeds we're good to go. + */ + if (__pkvm_host_donate_hyp(hyp_phys_to_pfn(host_mc->head), 1)) + return NULL; + + return pop_hyp_memcache(host_mc, hyp_phys_to_virt); +} + +/* Refill our local memcache by poping pages from the one provided by the host. */ +int refill_memcache(struct kvm_hyp_memcache *mc, unsigned long min_pages, + struct kvm_hyp_memcache *host_mc) +{ + struct kvm_hyp_memcache tmp = *host_mc; + int ret; + + ret = __topup_hyp_memcache(mc, min_pages, admit_host_page, + hyp_virt_to_phys, &tmp); + *host_mc = tmp; + + return ret; +} diff --git a/arch/arm64/kvm/hyp/nvhe/page_alloc.c b/arch/arm64/kvm/hyp/nvhe/page_alloc.c index d40f0b30b534701d2b35500469e0f79533c9a8c4..803ba3222e75fc7fd226004466b198042b0cb13c 100644 --- a/arch/arm64/kvm/hyp/nvhe/page_alloc.c +++ b/arch/arm64/kvm/hyp/nvhe/page_alloc.c @@ -93,11 +93,16 @@ static inline struct hyp_page *node_to_page(struct list_head *node) static void __hyp_attach_page(struct hyp_pool *pool, struct hyp_page *p) { + phys_addr_t phys = hyp_page_to_phys(p); unsigned short order = p->order; struct hyp_page *buddy; memset(hyp_page_to_virt(p), 0, PAGE_SIZE << p->order); + /* Skip coalescing for 'external' pages being freed into the pool. */ + if (phys < pool->range_start || phys >= pool->range_end) + goto insert; + /* * Only the first struct hyp_page of a high-order page (otherwise known * as the 'head') should have p->order set. The non-head pages should @@ -116,6 +121,7 @@ static void __hyp_attach_page(struct hyp_pool *pool, p = min(p, buddy); } +insert: /* Mark the new head, and insert it */ p->order = order; page_add_to_list(p, &pool->free_area[order]); @@ -144,25 +150,6 @@ static struct hyp_page *__hyp_extract_page(struct hyp_pool *pool, return p; } -static inline void hyp_page_ref_inc(struct hyp_page *p) -{ - BUG_ON(p->refcount == USHRT_MAX); - p->refcount++; -} - -static inline int hyp_page_ref_dec_and_test(struct hyp_page *p) -{ - BUG_ON(!p->refcount); - p->refcount--; - return (p->refcount == 0); -} - -static inline void hyp_set_page_refcounted(struct hyp_page *p) -{ - BUG_ON(p->refcount); - p->refcount = 1; -} - static void __hyp_put_page(struct hyp_pool *pool, struct hyp_page *p) { if (hyp_page_ref_dec_and_test(p)) @@ -249,10 +236,8 @@ int hyp_pool_init(struct hyp_pool *pool, u64 pfn, unsigned int nr_pages, /* Init the vmemmap portion */ p = hyp_phys_to_page(phys); - for (i = 0; i < nr_pages; i++) { - p[i].order = 0; + for (i = 0; i < nr_pages; i++) hyp_set_page_refcounted(&p[i]); - } /* Attach the unused pages to the buddy tree */ for (i = reserved_pages; i < nr_pages; i++) diff --git a/arch/arm64/kvm/hyp/nvhe/pkvm.c b/arch/arm64/kvm/hyp/nvhe/pkvm.c index 85d3b7ae720fb0ae78e79709e9c555ab01531e0d..a06ece14a6d8ef63fefb15684d6fcacf3876b928 100644 --- a/arch/arm64/kvm/hyp/nvhe/pkvm.c +++ b/arch/arm64/kvm/hyp/nvhe/pkvm.c @@ -7,8 +7,17 @@ #include #include #include +#include +#include +#include #include +/* Used by icache_is_vpipt(). */ +unsigned long __icache_flags; + +/* Used by kvm_get_vttbr(). */ +unsigned int kvm_arm_vmid_bits; + /* * Set trap register values based on features in ID_AA64PFR0. */ @@ -183,3 +192,430 @@ void __pkvm_vcpu_init_traps(struct kvm_vcpu *vcpu) pvm_init_traps_aa64mmfr0(vcpu); pvm_init_traps_aa64mmfr1(vcpu); } + +/* + * Start the VM table handle at the offset defined instead of at 0. + * Mainly for sanity checking and debugging. + */ +#define HANDLE_OFFSET 0x1000 + +static unsigned int vm_handle_to_idx(pkvm_handle_t handle) +{ + return handle - HANDLE_OFFSET; +} + +static pkvm_handle_t idx_to_vm_handle(unsigned int idx) +{ + return idx + HANDLE_OFFSET; +} + +/* + * Spinlock for protecting state related to the VM table. Protects writes + * to 'vm_table' and 'nr_table_entries' as well as reads and writes to + * 'last_hyp_vcpu_lookup'. + */ +static DEFINE_HYP_SPINLOCK(vm_table_lock); + +/* + * The table of VM entries for protected VMs in hyp. + * Allocated at hyp initialization and setup. + */ +static struct pkvm_hyp_vm **vm_table; + +void pkvm_hyp_vm_table_init(void *tbl) +{ + WARN_ON(vm_table); + vm_table = tbl; +} + +/* + * Return the hyp vm structure corresponding to the handle. + */ +static struct pkvm_hyp_vm *get_vm_by_handle(pkvm_handle_t handle) +{ + unsigned int idx = vm_handle_to_idx(handle); + + if (unlikely(idx >= KVM_MAX_PVMS)) + return NULL; + + return vm_table[idx]; +} + +struct pkvm_hyp_vcpu *pkvm_load_hyp_vcpu(pkvm_handle_t handle, + unsigned int vcpu_idx) +{ + struct pkvm_hyp_vcpu *hyp_vcpu = NULL; + struct pkvm_hyp_vm *hyp_vm; + + hyp_spin_lock(&vm_table_lock); + hyp_vm = get_vm_by_handle(handle); + if (!hyp_vm || hyp_vm->nr_vcpus <= vcpu_idx) + goto unlock; + + hyp_vcpu = hyp_vm->vcpus[vcpu_idx]; + hyp_page_ref_inc(hyp_virt_to_page(hyp_vm)); +unlock: + hyp_spin_unlock(&vm_table_lock); + return hyp_vcpu; +} + +void pkvm_put_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu) +{ + struct pkvm_hyp_vm *hyp_vm = pkvm_hyp_vcpu_to_hyp_vm(hyp_vcpu); + + hyp_spin_lock(&vm_table_lock); + hyp_page_ref_dec(hyp_virt_to_page(hyp_vm)); + hyp_spin_unlock(&vm_table_lock); +} + +static void unpin_host_vcpu(struct kvm_vcpu *host_vcpu) +{ + if (host_vcpu) + hyp_unpin_shared_mem(host_vcpu, host_vcpu + 1); +} + +static void unpin_host_vcpus(struct pkvm_hyp_vcpu *hyp_vcpus[], + unsigned int nr_vcpus) +{ + int i; + + for (i = 0; i < nr_vcpus; i++) + unpin_host_vcpu(hyp_vcpus[i]->host_vcpu); +} + +static void init_pkvm_hyp_vm(struct kvm *host_kvm, struct pkvm_hyp_vm *hyp_vm, + unsigned int nr_vcpus) +{ + hyp_vm->host_kvm = host_kvm; + hyp_vm->kvm.created_vcpus = nr_vcpus; + hyp_vm->kvm.arch.vtcr = host_mmu.arch.vtcr; +} + +static int init_pkvm_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu, + struct pkvm_hyp_vm *hyp_vm, + struct kvm_vcpu *host_vcpu, + unsigned int vcpu_idx) +{ + int ret = 0; + + if (hyp_pin_shared_mem(host_vcpu, host_vcpu + 1)) + return -EBUSY; + + if (host_vcpu->vcpu_idx != vcpu_idx) { + ret = -EINVAL; + goto done; + } + + hyp_vcpu->host_vcpu = host_vcpu; + + hyp_vcpu->vcpu.kvm = &hyp_vm->kvm; + hyp_vcpu->vcpu.vcpu_id = READ_ONCE(host_vcpu->vcpu_id); + hyp_vcpu->vcpu.vcpu_idx = vcpu_idx; + + hyp_vcpu->vcpu.arch.hw_mmu = &hyp_vm->kvm.arch.mmu; + hyp_vcpu->vcpu.arch.cflags = READ_ONCE(host_vcpu->arch.cflags); +done: + if (ret) + unpin_host_vcpu(host_vcpu); + return ret; +} + +static int find_free_vm_table_entry(struct kvm *host_kvm) +{ + int i; + + for (i = 0; i < KVM_MAX_PVMS; ++i) { + if (!vm_table[i]) + return i; + } + + return -ENOMEM; +} + +/* + * Allocate a VM table entry and insert a pointer to the new vm. + * + * Return a unique handle to the protected VM on success, + * negative error code on failure. + */ +static pkvm_handle_t insert_vm_table_entry(struct kvm *host_kvm, + struct pkvm_hyp_vm *hyp_vm) +{ + struct kvm_s2_mmu *mmu = &hyp_vm->kvm.arch.mmu; + int idx; + + hyp_assert_lock_held(&vm_table_lock); + + /* + * Initializing protected state might have failed, yet a malicious + * host could trigger this function. Thus, ensure that 'vm_table' + * exists. + */ + if (unlikely(!vm_table)) + return -EINVAL; + + idx = find_free_vm_table_entry(host_kvm); + if (idx < 0) + return idx; + + hyp_vm->kvm.arch.pkvm.handle = idx_to_vm_handle(idx); + + /* VMID 0 is reserved for the host */ + atomic64_set(&mmu->vmid.id, idx + 1); + + mmu->arch = &hyp_vm->kvm.arch; + mmu->pgt = &hyp_vm->pgt; + + vm_table[idx] = hyp_vm; + return hyp_vm->kvm.arch.pkvm.handle; +} + +/* + * Deallocate and remove the VM table entry corresponding to the handle. + */ +static void remove_vm_table_entry(pkvm_handle_t handle) +{ + hyp_assert_lock_held(&vm_table_lock); + vm_table[vm_handle_to_idx(handle)] = NULL; +} + +static size_t pkvm_get_hyp_vm_size(unsigned int nr_vcpus) +{ + return size_add(sizeof(struct pkvm_hyp_vm), + size_mul(sizeof(struct pkvm_hyp_vcpu *), nr_vcpus)); +} + +static void *map_donated_memory_noclear(unsigned long host_va, size_t size) +{ + void *va = (void *)kern_hyp_va(host_va); + + if (!PAGE_ALIGNED(va)) + return NULL; + + if (__pkvm_host_donate_hyp(hyp_virt_to_pfn(va), + PAGE_ALIGN(size) >> PAGE_SHIFT)) + return NULL; + + return va; +} + +static void *map_donated_memory(unsigned long host_va, size_t size) +{ + void *va = map_donated_memory_noclear(host_va, size); + + if (va) + memset(va, 0, size); + + return va; +} + +static void __unmap_donated_memory(void *va, size_t size) +{ + WARN_ON(__pkvm_hyp_donate_host(hyp_virt_to_pfn(va), + PAGE_ALIGN(size) >> PAGE_SHIFT)); +} + +static void unmap_donated_memory(void *va, size_t size) +{ + if (!va) + return; + + memset(va, 0, size); + __unmap_donated_memory(va, size); +} + +static void unmap_donated_memory_noclear(void *va, size_t size) +{ + if (!va) + return; + + __unmap_donated_memory(va, size); +} + +/* + * Initialize the hypervisor copy of the protected VM state using the + * memory donated by the host. + * + * Unmaps the donated memory from the host at stage 2. + * + * host_kvm: A pointer to the host's struct kvm. + * vm_hva: The host va of the area being donated for the VM state. + * Must be page aligned. + * pgd_hva: The host va of the area being donated for the stage-2 PGD for + * the VM. Must be page aligned. Its size is implied by the VM's + * VTCR. + * + * Return a unique handle to the protected VM on success, + * negative error code on failure. + */ +int __pkvm_init_vm(struct kvm *host_kvm, unsigned long vm_hva, + unsigned long pgd_hva) +{ + struct pkvm_hyp_vm *hyp_vm = NULL; + size_t vm_size, pgd_size; + unsigned int nr_vcpus; + void *pgd = NULL; + int ret; + + ret = hyp_pin_shared_mem(host_kvm, host_kvm + 1); + if (ret) + return ret; + + nr_vcpus = READ_ONCE(host_kvm->created_vcpus); + if (nr_vcpus < 1) { + ret = -EINVAL; + goto err_unpin_kvm; + } + + vm_size = pkvm_get_hyp_vm_size(nr_vcpus); + pgd_size = kvm_pgtable_stage2_pgd_size(host_mmu.arch.vtcr); + + ret = -ENOMEM; + + hyp_vm = map_donated_memory(vm_hva, vm_size); + if (!hyp_vm) + goto err_remove_mappings; + + pgd = map_donated_memory_noclear(pgd_hva, pgd_size); + if (!pgd) + goto err_remove_mappings; + + init_pkvm_hyp_vm(host_kvm, hyp_vm, nr_vcpus); + + hyp_spin_lock(&vm_table_lock); + ret = insert_vm_table_entry(host_kvm, hyp_vm); + if (ret < 0) + goto err_unlock; + + ret = kvm_guest_prepare_stage2(hyp_vm, pgd); + if (ret) + goto err_remove_vm_table_entry; + hyp_spin_unlock(&vm_table_lock); + + return hyp_vm->kvm.arch.pkvm.handle; + +err_remove_vm_table_entry: + remove_vm_table_entry(hyp_vm->kvm.arch.pkvm.handle); +err_unlock: + hyp_spin_unlock(&vm_table_lock); +err_remove_mappings: + unmap_donated_memory(hyp_vm, vm_size); + unmap_donated_memory(pgd, pgd_size); +err_unpin_kvm: + hyp_unpin_shared_mem(host_kvm, host_kvm + 1); + return ret; +} + +/* + * Initialize the hypervisor copy of the protected vCPU state using the + * memory donated by the host. + * + * handle: The handle for the protected vm. + * host_vcpu: A pointer to the corresponding host vcpu. + * vcpu_hva: The host va of the area being donated for the vcpu state. + * Must be page aligned. The size of the area must be equal to + * the page-aligned size of 'struct pkvm_hyp_vcpu'. + * Return 0 on success, negative error code on failure. + */ +int __pkvm_init_vcpu(pkvm_handle_t handle, struct kvm_vcpu *host_vcpu, + unsigned long vcpu_hva) +{ + struct pkvm_hyp_vcpu *hyp_vcpu; + struct pkvm_hyp_vm *hyp_vm; + unsigned int idx; + int ret; + + hyp_vcpu = map_donated_memory(vcpu_hva, sizeof(*hyp_vcpu)); + if (!hyp_vcpu) + return -ENOMEM; + + hyp_spin_lock(&vm_table_lock); + + hyp_vm = get_vm_by_handle(handle); + if (!hyp_vm) { + ret = -ENOENT; + goto unlock; + } + + idx = hyp_vm->nr_vcpus; + if (idx >= hyp_vm->kvm.created_vcpus) { + ret = -EINVAL; + goto unlock; + } + + ret = init_pkvm_hyp_vcpu(hyp_vcpu, hyp_vm, host_vcpu, idx); + if (ret) + goto unlock; + + hyp_vm->vcpus[idx] = hyp_vcpu; + hyp_vm->nr_vcpus++; +unlock: + hyp_spin_unlock(&vm_table_lock); + + if (ret) + unmap_donated_memory(hyp_vcpu, sizeof(*hyp_vcpu)); + + return ret; +} + +static void +teardown_donated_memory(struct kvm_hyp_memcache *mc, void *addr, size_t size) +{ + size = PAGE_ALIGN(size); + memset(addr, 0, size); + + for (void *start = addr; start < addr + size; start += PAGE_SIZE) + push_hyp_memcache(mc, start, hyp_virt_to_phys); + + unmap_donated_memory_noclear(addr, size); +} + +int __pkvm_teardown_vm(pkvm_handle_t handle) +{ + struct kvm_hyp_memcache *mc; + struct pkvm_hyp_vm *hyp_vm; + struct kvm *host_kvm; + unsigned int idx; + size_t vm_size; + int err; + + hyp_spin_lock(&vm_table_lock); + hyp_vm = get_vm_by_handle(handle); + if (!hyp_vm) { + err = -ENOENT; + goto err_unlock; + } + + if (WARN_ON(hyp_page_count(hyp_vm))) { + err = -EBUSY; + goto err_unlock; + } + + host_kvm = hyp_vm->host_kvm; + + /* Ensure the VMID is clean before it can be reallocated */ + __kvm_tlb_flush_vmid(&hyp_vm->kvm.arch.mmu); + remove_vm_table_entry(handle); + hyp_spin_unlock(&vm_table_lock); + + /* Reclaim guest pages (including page-table pages) */ + mc = &host_kvm->arch.pkvm.teardown_mc; + reclaim_guest_pages(hyp_vm, mc); + unpin_host_vcpus(hyp_vm->vcpus, hyp_vm->nr_vcpus); + + /* Push the metadata pages to the teardown memcache */ + for (idx = 0; idx < hyp_vm->nr_vcpus; ++idx) { + struct pkvm_hyp_vcpu *hyp_vcpu = hyp_vm->vcpus[idx]; + + teardown_donated_memory(mc, hyp_vcpu, sizeof(*hyp_vcpu)); + } + + vm_size = pkvm_get_hyp_vm_size(hyp_vm->kvm.created_vcpus); + teardown_donated_memory(mc, hyp_vm, vm_size); + hyp_unpin_shared_mem(host_kvm, host_kvm + 1); + return 0; + +err_unlock: + hyp_spin_unlock(&vm_table_lock); + return err; +} diff --git a/arch/arm64/kvm/hyp/nvhe/setup.c b/arch/arm64/kvm/hyp/nvhe/setup.c index e8d4ea2fcfa09039e98b966aefeecabb7d586e2a..110f04627785c8dd0dccebbd2cd674478b255e7b 100644 --- a/arch/arm64/kvm/hyp/nvhe/setup.c +++ b/arch/arm64/kvm/hyp/nvhe/setup.c @@ -16,6 +16,7 @@ #include #include #include +#include #include unsigned long hyp_nr_cpus; @@ -24,6 +25,7 @@ unsigned long hyp_nr_cpus; (unsigned long)__per_cpu_start) static void *vmemmap_base; +static void *vm_table_base; static void *hyp_pgt_base; static void *host_s2_pgt_base; static struct kvm_pgtable_mm_ops pkvm_pgtable_mm_ops; @@ -31,16 +33,20 @@ static struct hyp_pool hpool; static int divide_memory_pool(void *virt, unsigned long size) { - unsigned long vstart, vend, nr_pages; + unsigned long nr_pages; hyp_early_alloc_init(virt, size); - hyp_vmemmap_range(__hyp_pa(virt), size, &vstart, &vend); - nr_pages = (vend - vstart) >> PAGE_SHIFT; + nr_pages = hyp_vmemmap_pages(sizeof(struct hyp_page)); vmemmap_base = hyp_early_alloc_contig(nr_pages); if (!vmemmap_base) return -ENOMEM; + nr_pages = hyp_vm_table_pages(); + vm_table_base = hyp_early_alloc_contig(nr_pages); + if (!vm_table_base) + return -ENOMEM; + nr_pages = hyp_s1_pgtable_pages(); hyp_pgt_base = hyp_early_alloc_contig(nr_pages); if (!hyp_pgt_base) @@ -78,7 +84,7 @@ static int recreate_hyp_mappings(phys_addr_t phys, unsigned long size, if (ret) return ret; - ret = hyp_back_vmemmap(phys, size, hyp_virt_to_phys(vmemmap_base)); + ret = hyp_back_vmemmap(hyp_virt_to_phys(vmemmap_base)); if (ret) return ret; @@ -138,20 +144,17 @@ static int recreate_hyp_mappings(phys_addr_t phys, unsigned long size, } /* - * Map the host's .bss and .rodata sections RO in the hypervisor, but - * transfer the ownership from the host to the hypervisor itself to - * make sure it can't be donated or shared with another entity. + * Map the host sections RO in the hypervisor, but transfer the + * ownership from the host to the hypervisor itself to make sure they + * can't be donated or shared with another entity. * * The ownership transition requires matching changes in the host * stage-2. This will be done later (see finalize_host_mappings()) once * the hyp_vmemmap is addressable. */ prot = pkvm_mkstate(PAGE_HYP_RO, PKVM_PAGE_SHARED_OWNED); - ret = pkvm_create_mappings(__start_rodata, __end_rodata, prot); - if (ret) - return ret; - - ret = pkvm_create_mappings(__hyp_bss_end, __bss_stop, prot); + ret = pkvm_create_mappings(&kvm_vgic_global_state, + &kvm_vgic_global_state + 1, prot); if (ret) return ret; @@ -186,33 +189,20 @@ static void hpool_put_page(void *addr) hyp_put_page(&hpool, addr); } -static int finalize_host_mappings_walker(u64 addr, u64 end, u32 level, - kvm_pte_t *ptep, - enum kvm_pgtable_walk_flags flag, - void * const arg) +static int fix_host_ownership_walker(const struct kvm_pgtable_visit_ctx *ctx, + enum kvm_pgtable_walk_flags visit) { - struct kvm_pgtable_mm_ops *mm_ops = arg; enum kvm_pgtable_prot prot; enum pkvm_page_state state; - kvm_pte_t pte = *ptep; phys_addr_t phys; - if (!kvm_pte_valid(pte)) + if (!kvm_pte_valid(ctx->old)) return 0; - /* - * Fix-up the refcount for the page-table pages as the early allocator - * was unable to access the hyp_vmemmap and so the buddy allocator has - * initialised the refcount to '1'. - */ - mm_ops->get_page(ptep); - if (flag != KVM_PGTABLE_WALK_LEAF) - return 0; - - if (level != (KVM_PGTABLE_MAX_LEVELS - 1)) + if (ctx->level != (KVM_PGTABLE_MAX_LEVELS - 1)) return -EINVAL; - phys = kvm_pte_to_phys(pte); + phys = kvm_pte_to_phys(ctx->old); if (!addr_is_memory(phys)) return -EINVAL; @@ -220,10 +210,10 @@ static int finalize_host_mappings_walker(u64 addr, u64 end, u32 level, * Adjust the host stage-2 mappings to match the ownership attributes * configured in the hypervisor stage-1. */ - state = pkvm_getstate(kvm_pgtable_hyp_pte_prot(pte)); + state = pkvm_getstate(kvm_pgtable_hyp_pte_prot(ctx->old)); switch (state) { case PKVM_PAGE_OWNED: - return host_stage2_set_owner_locked(phys, PAGE_SIZE, pkvm_hyp_id); + return host_stage2_set_owner_locked(phys, PAGE_SIZE, PKVM_ID_HYP); case PKVM_PAGE_SHARED_OWNED: prot = pkvm_mkstate(PKVM_HOST_MEM_PROT, PKVM_PAGE_SHARED_BORROWED); break; @@ -237,12 +227,25 @@ static int finalize_host_mappings_walker(u64 addr, u64 end, u32 level, return host_stage2_idmap_locked(phys, PAGE_SIZE, prot); } -static int finalize_host_mappings(void) +static int fix_hyp_pgtable_refcnt_walker(const struct kvm_pgtable_visit_ctx *ctx, + enum kvm_pgtable_walk_flags visit) +{ + /* + * Fix-up the refcount for the page-table pages as the early allocator + * was unable to access the hyp_vmemmap and so the buddy allocator has + * initialised the refcount to '1'. + */ + if (kvm_pte_valid(ctx->old)) + ctx->mm_ops->get_page(ctx->ptep); + + return 0; +} + +static int fix_host_ownership(void) { struct kvm_pgtable_walker walker = { - .cb = finalize_host_mappings_walker, - .flags = KVM_PGTABLE_WALK_LEAF | KVM_PGTABLE_WALK_TABLE_POST, - .arg = pkvm_pgtable.mm_ops, + .cb = fix_host_ownership_walker, + .flags = KVM_PGTABLE_WALK_LEAF, }; int i, ret; @@ -258,6 +261,18 @@ static int finalize_host_mappings(void) return 0; } +static int fix_hyp_pgtable_refcnt(void) +{ + struct kvm_pgtable_walker walker = { + .cb = fix_hyp_pgtable_refcnt_walker, + .flags = KVM_PGTABLE_WALK_LEAF | KVM_PGTABLE_WALK_TABLE_POST, + .arg = pkvm_pgtable.mm_ops, + }; + + return kvm_pgtable_walk(&pkvm_pgtable, 0, BIT(pkvm_pgtable.ia_bits), + &walker); +} + void __noreturn __pkvm_init_finalise(void) { struct kvm_host_data *host_data = this_cpu_ptr(&kvm_host_data); @@ -287,10 +302,19 @@ void __noreturn __pkvm_init_finalise(void) }; pkvm_pgtable.mm_ops = &pkvm_pgtable_mm_ops; - ret = finalize_host_mappings(); + ret = fix_host_ownership(); + if (ret) + goto out; + + ret = fix_hyp_pgtable_refcnt(); + if (ret) + goto out; + + ret = hyp_create_pcpu_fixmap(); if (ret) goto out; + pkvm_hyp_vm_table_init(vm_table_base); out: /* * We tail-called to here from handle___pkvm_init() and will not return, diff --git a/arch/arm64/kvm/hyp/pgtable.c b/arch/arm64/kvm/hyp/pgtable.c index cdf8e76b0be141380c95a39e76398a222be75c93..b11cf2c618a6c9a7a52762d5eae98667fa821086 100644 --- a/arch/arm64/kvm/hyp/pgtable.c +++ b/arch/arm64/kvm/hyp/pgtable.c @@ -49,35 +49,38 @@ #define KVM_INVALID_PTE_OWNER_MASK GENMASK(9, 2) #define KVM_MAX_OWNER_ID 1 +/* + * Used to indicate a pte for which a 'break-before-make' sequence is in + * progress. + */ +#define KVM_INVALID_PTE_LOCKED BIT(10) + struct kvm_pgtable_walk_data { - struct kvm_pgtable *pgt; struct kvm_pgtable_walker *walker; u64 addr; u64 end; }; -#define KVM_PHYS_INVALID (-1ULL) - static bool kvm_phys_is_valid(u64 phys) { return phys < BIT(id_aa64mmfr0_parange_to_phys_shift(ID_AA64MMFR0_EL1_PARANGE_MAX)); } -static bool kvm_block_mapping_supported(u64 addr, u64 end, u64 phys, u32 level) +static bool kvm_block_mapping_supported(const struct kvm_pgtable_visit_ctx *ctx, u64 phys) { - u64 granule = kvm_granule_size(level); + u64 granule = kvm_granule_size(ctx->level); - if (!kvm_level_supports_block_mapping(level)) + if (!kvm_level_supports_block_mapping(ctx->level)) return false; - if (granule > (end - addr)) + if (granule > (ctx->end - ctx->addr)) return false; if (kvm_phys_is_valid(phys) && !IS_ALIGNED(phys, granule)) return false; - return IS_ALIGNED(addr, granule); + return IS_ALIGNED(ctx->addr, granule); } static u32 kvm_pgtable_idx(struct kvm_pgtable_walk_data *data, u32 level) @@ -88,7 +91,7 @@ static u32 kvm_pgtable_idx(struct kvm_pgtable_walk_data *data, u32 level) return (data->addr >> shift) & mask; } -static u32 __kvm_pgd_page_idx(struct kvm_pgtable *pgt, u64 addr) +static u32 kvm_pgd_page_idx(struct kvm_pgtable *pgt, u64 addr) { u64 shift = kvm_granule_shift(pgt->start_level - 1); /* May underflow */ u64 mask = BIT(pgt->ia_bits) - 1; @@ -96,11 +99,6 @@ static u32 __kvm_pgd_page_idx(struct kvm_pgtable *pgt, u64 addr) return (addr & mask) >> shift; } -static u32 kvm_pgd_page_idx(struct kvm_pgtable_walk_data *data) -{ - return __kvm_pgd_page_idx(data->pgt, data->addr); -} - static u32 kvm_pgd_pages(u32 ia_bits, u32 start_level) { struct kvm_pgtable pgt = { @@ -108,7 +106,7 @@ static u32 kvm_pgd_pages(u32 ia_bits, u32 start_level) .start_level = start_level, }; - return __kvm_pgd_page_idx(&pgt, -1ULL) + 1; + return kvm_pgd_page_idx(&pgt, -1ULL) + 1; } static bool kvm_pte_table(kvm_pte_t pte, u32 level) @@ -122,16 +120,6 @@ static bool kvm_pte_table(kvm_pte_t pte, u32 level) return FIELD_GET(KVM_PTE_TYPE, pte) == KVM_PTE_TYPE_TABLE; } -static kvm_pte_t kvm_phys_to_pte(u64 pa) -{ - kvm_pte_t pte = pa & KVM_PTE_ADDR_MASK; - - if (PAGE_SHIFT == 16) - pte |= FIELD_PREP(KVM_PTE_ADDR_51_48, pa >> 48); - - return pte; -} - static kvm_pte_t *kvm_pte_follow(kvm_pte_t pte, struct kvm_pgtable_mm_ops *mm_ops) { return mm_ops->phys_to_virt(kvm_pte_to_phys(pte)); @@ -142,16 +130,13 @@ static void kvm_clear_pte(kvm_pte_t *ptep) WRITE_ONCE(*ptep, 0); } -static void kvm_set_table_pte(kvm_pte_t *ptep, kvm_pte_t *childp, - struct kvm_pgtable_mm_ops *mm_ops) +static kvm_pte_t kvm_init_table_pte(kvm_pte_t *childp, struct kvm_pgtable_mm_ops *mm_ops) { - kvm_pte_t old = *ptep, pte = kvm_phys_to_pte(mm_ops->virt_to_phys(childp)); + kvm_pte_t pte = kvm_phys_to_pte(mm_ops->virt_to_phys(childp)); pte |= FIELD_PREP(KVM_PTE_TYPE, KVM_PTE_TYPE_TABLE); pte |= KVM_PTE_VALID; - - WARN_ON(kvm_pte_valid(old)); - smp_store_release(ptep, pte); + return pte; } static kvm_pte_t kvm_init_valid_leaf_pte(u64 pa, kvm_pte_t attr, u32 level) @@ -172,36 +157,47 @@ static kvm_pte_t kvm_init_invalid_leaf_owner(u8 owner_id) return FIELD_PREP(KVM_INVALID_PTE_OWNER_MASK, owner_id); } -static int kvm_pgtable_visitor_cb(struct kvm_pgtable_walk_data *data, u64 addr, - u32 level, kvm_pte_t *ptep, - enum kvm_pgtable_walk_flags flag) +static int kvm_pgtable_visitor_cb(struct kvm_pgtable_walk_data *data, + const struct kvm_pgtable_visit_ctx *ctx, + enum kvm_pgtable_walk_flags visit) { struct kvm_pgtable_walker *walker = data->walker; - return walker->cb(addr, data->end, level, ptep, flag, walker->arg); + + /* Ensure the appropriate lock is held (e.g. RCU lock for stage-2 MMU) */ + WARN_ON_ONCE(kvm_pgtable_walk_shared(ctx) && !kvm_pgtable_walk_lock_held()); + return walker->cb(ctx, visit); } static int __kvm_pgtable_walk(struct kvm_pgtable_walk_data *data, - kvm_pte_t *pgtable, u32 level); + struct kvm_pgtable_mm_ops *mm_ops, kvm_pteref_t pgtable, u32 level); static inline int __kvm_pgtable_visit(struct kvm_pgtable_walk_data *data, - kvm_pte_t *ptep, u32 level) + struct kvm_pgtable_mm_ops *mm_ops, + kvm_pteref_t pteref, u32 level) { - int ret = 0; - u64 addr = data->addr; - kvm_pte_t *childp, pte = *ptep; - bool table = kvm_pte_table(pte, level); enum kvm_pgtable_walk_flags flags = data->walker->flags; + kvm_pte_t *ptep = kvm_dereference_pteref(data->walker, pteref); + struct kvm_pgtable_visit_ctx ctx = { + .ptep = ptep, + .old = READ_ONCE(*ptep), + .arg = data->walker->arg, + .mm_ops = mm_ops, + .addr = data->addr, + .end = data->end, + .level = level, + .flags = flags, + }; + int ret = 0; + kvm_pteref_t childp; + bool table = kvm_pte_table(ctx.old, level); - if (table && (flags & KVM_PGTABLE_WALK_TABLE_PRE)) { - ret = kvm_pgtable_visitor_cb(data, addr, level, ptep, - KVM_PGTABLE_WALK_TABLE_PRE); - } + if (table && (ctx.flags & KVM_PGTABLE_WALK_TABLE_PRE)) + ret = kvm_pgtable_visitor_cb(data, &ctx, KVM_PGTABLE_WALK_TABLE_PRE); - if (!table && (flags & KVM_PGTABLE_WALK_LEAF)) { - ret = kvm_pgtable_visitor_cb(data, addr, level, ptep, - KVM_PGTABLE_WALK_LEAF); - pte = *ptep; - table = kvm_pte_table(pte, level); + if (!table && (ctx.flags & KVM_PGTABLE_WALK_LEAF)) { + ret = kvm_pgtable_visitor_cb(data, &ctx, KVM_PGTABLE_WALK_LEAF); + ctx.old = READ_ONCE(*ptep); + table = kvm_pte_table(ctx.old, level); } if (ret) @@ -213,22 +209,20 @@ static inline int __kvm_pgtable_visit(struct kvm_pgtable_walk_data *data, goto out; } - childp = kvm_pte_follow(pte, data->pgt->mm_ops); - ret = __kvm_pgtable_walk(data, childp, level + 1); + childp = (kvm_pteref_t)kvm_pte_follow(ctx.old, mm_ops); + ret = __kvm_pgtable_walk(data, mm_ops, childp, level + 1); if (ret) goto out; - if (flags & KVM_PGTABLE_WALK_TABLE_POST) { - ret = kvm_pgtable_visitor_cb(data, addr, level, ptep, - KVM_PGTABLE_WALK_TABLE_POST); - } + if (ctx.flags & KVM_PGTABLE_WALK_TABLE_POST) + ret = kvm_pgtable_visitor_cb(data, &ctx, KVM_PGTABLE_WALK_TABLE_POST); out: return ret; } static int __kvm_pgtable_walk(struct kvm_pgtable_walk_data *data, - kvm_pte_t *pgtable, u32 level) + struct kvm_pgtable_mm_ops *mm_ops, kvm_pteref_t pgtable, u32 level) { u32 idx; int ret = 0; @@ -237,12 +231,12 @@ static int __kvm_pgtable_walk(struct kvm_pgtable_walk_data *data, return -EINVAL; for (idx = kvm_pgtable_idx(data, level); idx < PTRS_PER_PTE; ++idx) { - kvm_pte_t *ptep = &pgtable[idx]; + kvm_pteref_t pteref = &pgtable[idx]; if (data->addr >= data->end) break; - ret = __kvm_pgtable_visit(data, ptep, level); + ret = __kvm_pgtable_visit(data, mm_ops, pteref, level); if (ret) break; } @@ -250,11 +244,10 @@ static int __kvm_pgtable_walk(struct kvm_pgtable_walk_data *data, return ret; } -static int _kvm_pgtable_walk(struct kvm_pgtable_walk_data *data) +static int _kvm_pgtable_walk(struct kvm_pgtable *pgt, struct kvm_pgtable_walk_data *data) { u32 idx; int ret = 0; - struct kvm_pgtable *pgt = data->pgt; u64 limit = BIT(pgt->ia_bits); if (data->addr > limit || data->end > limit) @@ -263,10 +256,10 @@ static int _kvm_pgtable_walk(struct kvm_pgtable_walk_data *data) if (!pgt->pgd) return -EINVAL; - for (idx = kvm_pgd_page_idx(data); data->addr < data->end; ++idx) { - kvm_pte_t *ptep = &pgt->pgd[idx * PTRS_PER_PTE]; + for (idx = kvm_pgd_page_idx(pgt, data->addr); data->addr < data->end; ++idx) { + kvm_pteref_t pteref = &pgt->pgd[idx * PTRS_PER_PTE]; - ret = __kvm_pgtable_walk(data, ptep, pgt->start_level); + ret = __kvm_pgtable_walk(data, pgt->mm_ops, pteref, pgt->start_level); if (ret) break; } @@ -278,13 +271,20 @@ int kvm_pgtable_walk(struct kvm_pgtable *pgt, u64 addr, u64 size, struct kvm_pgtable_walker *walker) { struct kvm_pgtable_walk_data walk_data = { - .pgt = pgt, .addr = ALIGN_DOWN(addr, PAGE_SIZE), .end = PAGE_ALIGN(walk_data.addr + size), .walker = walker, }; + int r; - return _kvm_pgtable_walk(&walk_data); + r = kvm_pgtable_walk_begin(walker); + if (r) + return r; + + r = _kvm_pgtable_walk(pgt, &walk_data); + kvm_pgtable_walk_end(walker); + + return r; } struct leaf_walk_data { @@ -292,13 +292,13 @@ struct leaf_walk_data { u32 level; }; -static int leaf_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep, - enum kvm_pgtable_walk_flags flag, void * const arg) +static int leaf_walker(const struct kvm_pgtable_visit_ctx *ctx, + enum kvm_pgtable_walk_flags visit) { - struct leaf_walk_data *data = arg; + struct leaf_walk_data *data = ctx->arg; - data->pte = *ptep; - data->level = level; + data->pte = ctx->old; + data->level = ctx->level; return 0; } @@ -329,7 +329,6 @@ int kvm_pgtable_get_leaf(struct kvm_pgtable *pgt, u64 addr, struct hyp_map_data { u64 phys; kvm_pte_t attr; - struct kvm_pgtable_mm_ops *mm_ops; }; static int hyp_set_prot_attr(enum kvm_pgtable_prot prot, kvm_pte_t *ptep) @@ -383,47 +382,49 @@ enum kvm_pgtable_prot kvm_pgtable_hyp_pte_prot(kvm_pte_t pte) return prot; } -static bool hyp_map_walker_try_leaf(u64 addr, u64 end, u32 level, - kvm_pte_t *ptep, struct hyp_map_data *data) +static bool hyp_map_walker_try_leaf(const struct kvm_pgtable_visit_ctx *ctx, + struct hyp_map_data *data) { - kvm_pte_t new, old = *ptep; - u64 granule = kvm_granule_size(level), phys = data->phys; + kvm_pte_t new; + u64 granule = kvm_granule_size(ctx->level), phys = data->phys; - if (!kvm_block_mapping_supported(addr, end, phys, level)) + if (!kvm_block_mapping_supported(ctx, phys)) return false; data->phys += granule; - new = kvm_init_valid_leaf_pte(phys, data->attr, level); - if (old == new) + new = kvm_init_valid_leaf_pte(phys, data->attr, ctx->level); + if (ctx->old == new) return true; - if (!kvm_pte_valid(old)) - data->mm_ops->get_page(ptep); - else if (WARN_ON((old ^ new) & ~KVM_PTE_LEAF_ATTR_HI_SW)) + if (!kvm_pte_valid(ctx->old)) + ctx->mm_ops->get_page(ctx->ptep); + else if (WARN_ON((ctx->old ^ new) & ~KVM_PTE_LEAF_ATTR_HI_SW)) return false; - smp_store_release(ptep, new); + smp_store_release(ctx->ptep, new); return true; } -static int hyp_map_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep, - enum kvm_pgtable_walk_flags flag, void * const arg) +static int hyp_map_walker(const struct kvm_pgtable_visit_ctx *ctx, + enum kvm_pgtable_walk_flags visit) { - kvm_pte_t *childp; - struct hyp_map_data *data = arg; - struct kvm_pgtable_mm_ops *mm_ops = data->mm_ops; + kvm_pte_t *childp, new; + struct hyp_map_data *data = ctx->arg; + struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops; - if (hyp_map_walker_try_leaf(addr, end, level, ptep, arg)) + if (hyp_map_walker_try_leaf(ctx, data)) return 0; - if (WARN_ON(level == KVM_PGTABLE_MAX_LEVELS - 1)) + if (WARN_ON(ctx->level == KVM_PGTABLE_MAX_LEVELS - 1)) return -EINVAL; childp = (kvm_pte_t *)mm_ops->zalloc_page(NULL); if (!childp) return -ENOMEM; - kvm_set_table_pte(ptep, childp, mm_ops); - mm_ops->get_page(ptep); + new = kvm_init_table_pte(childp, mm_ops); + mm_ops->get_page(ctx->ptep); + smp_store_release(ctx->ptep, new); + return 0; } @@ -433,7 +434,6 @@ int kvm_pgtable_hyp_map(struct kvm_pgtable *pgt, u64 addr, u64 size, u64 phys, int ret; struct hyp_map_data map_data = { .phys = ALIGN_DOWN(phys, PAGE_SIZE), - .mm_ops = pgt->mm_ops, }; struct kvm_pgtable_walker walker = { .cb = hyp_map_walker, @@ -451,44 +451,39 @@ int kvm_pgtable_hyp_map(struct kvm_pgtable *pgt, u64 addr, u64 size, u64 phys, return ret; } -struct hyp_unmap_data { - u64 unmapped; - struct kvm_pgtable_mm_ops *mm_ops; -}; - -static int hyp_unmap_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep, - enum kvm_pgtable_walk_flags flag, void * const arg) +static int hyp_unmap_walker(const struct kvm_pgtable_visit_ctx *ctx, + enum kvm_pgtable_walk_flags visit) { - kvm_pte_t pte = *ptep, *childp = NULL; - u64 granule = kvm_granule_size(level); - struct hyp_unmap_data *data = arg; - struct kvm_pgtable_mm_ops *mm_ops = data->mm_ops; + kvm_pte_t *childp = NULL; + u64 granule = kvm_granule_size(ctx->level); + u64 *unmapped = ctx->arg; + struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops; - if (!kvm_pte_valid(pte)) + if (!kvm_pte_valid(ctx->old)) return -EINVAL; - if (kvm_pte_table(pte, level)) { - childp = kvm_pte_follow(pte, mm_ops); + if (kvm_pte_table(ctx->old, ctx->level)) { + childp = kvm_pte_follow(ctx->old, mm_ops); if (mm_ops->page_count(childp) != 1) return 0; - kvm_clear_pte(ptep); + kvm_clear_pte(ctx->ptep); dsb(ishst); - __tlbi_level(vae2is, __TLBI_VADDR(addr, 0), level); + __tlbi_level(vae2is, __TLBI_VADDR(ctx->addr, 0), ctx->level); } else { - if (end - addr < granule) + if (ctx->end - ctx->addr < granule) return -EINVAL; - kvm_clear_pte(ptep); + kvm_clear_pte(ctx->ptep); dsb(ishst); - __tlbi_level(vale2is, __TLBI_VADDR(addr, 0), level); - data->unmapped += granule; + __tlbi_level(vale2is, __TLBI_VADDR(ctx->addr, 0), ctx->level); + *unmapped += granule; } dsb(ish); isb(); - mm_ops->put_page(ptep); + mm_ops->put_page(ctx->ptep); if (childp) mm_ops->put_page(childp); @@ -498,12 +493,10 @@ static int hyp_unmap_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep, u64 kvm_pgtable_hyp_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size) { - struct hyp_unmap_data unmap_data = { - .mm_ops = pgt->mm_ops, - }; + u64 unmapped = 0; struct kvm_pgtable_walker walker = { .cb = hyp_unmap_walker, - .arg = &unmap_data, + .arg = &unmapped, .flags = KVM_PGTABLE_WALK_LEAF | KVM_PGTABLE_WALK_TABLE_POST, }; @@ -511,7 +504,7 @@ u64 kvm_pgtable_hyp_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size) return 0; kvm_pgtable_walk(pgt, addr, size, &walker); - return unmap_data.unmapped; + return unmapped; } int kvm_pgtable_hyp_init(struct kvm_pgtable *pgt, u32 va_bits, @@ -519,7 +512,7 @@ int kvm_pgtable_hyp_init(struct kvm_pgtable *pgt, u32 va_bits, { u64 levels = ARM64_HW_PGTABLE_LEVELS(va_bits); - pgt->pgd = (kvm_pte_t *)mm_ops->zalloc_page(NULL); + pgt->pgd = (kvm_pteref_t)mm_ops->zalloc_page(NULL); if (!pgt->pgd) return -ENOMEM; @@ -532,19 +525,18 @@ int kvm_pgtable_hyp_init(struct kvm_pgtable *pgt, u32 va_bits, return 0; } -static int hyp_free_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep, - enum kvm_pgtable_walk_flags flag, void * const arg) +static int hyp_free_walker(const struct kvm_pgtable_visit_ctx *ctx, + enum kvm_pgtable_walk_flags visit) { - struct kvm_pgtable_mm_ops *mm_ops = arg; - kvm_pte_t pte = *ptep; + struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops; - if (!kvm_pte_valid(pte)) + if (!kvm_pte_valid(ctx->old)) return 0; - mm_ops->put_page(ptep); + mm_ops->put_page(ctx->ptep); - if (kvm_pte_table(pte, level)) - mm_ops->put_page(kvm_pte_follow(pte, mm_ops)); + if (kvm_pte_table(ctx->old, ctx->level)) + mm_ops->put_page(kvm_pte_follow(ctx->old, mm_ops)); return 0; } @@ -554,11 +546,10 @@ void kvm_pgtable_hyp_destroy(struct kvm_pgtable *pgt) struct kvm_pgtable_walker walker = { .cb = hyp_free_walker, .flags = KVM_PGTABLE_WALK_LEAF | KVM_PGTABLE_WALK_TABLE_POST, - .arg = pgt->mm_ops, }; WARN_ON(kvm_pgtable_walk(pgt, 0, BIT(pgt->ia_bits), &walker)); - pgt->mm_ops->put_page(pgt->pgd); + pgt->mm_ops->put_page(kvm_dereference_pteref(&walker, pgt->pgd)); pgt->pgd = NULL; } @@ -573,8 +564,6 @@ struct stage2_map_data { struct kvm_s2_mmu *mmu; void *memcache; - struct kvm_pgtable_mm_ops *mm_ops; - /* Force mappings to page granularity */ bool force_pte; }; @@ -682,19 +671,92 @@ static bool stage2_pte_is_counted(kvm_pte_t pte) return !!pte; } -static void stage2_put_pte(kvm_pte_t *ptep, struct kvm_s2_mmu *mmu, u64 addr, - u32 level, struct kvm_pgtable_mm_ops *mm_ops) +static bool stage2_pte_is_locked(kvm_pte_t pte) +{ + return !kvm_pte_valid(pte) && (pte & KVM_INVALID_PTE_LOCKED); +} + +static bool stage2_try_set_pte(const struct kvm_pgtable_visit_ctx *ctx, kvm_pte_t new) +{ + if (!kvm_pgtable_walk_shared(ctx)) { + WRITE_ONCE(*ctx->ptep, new); + return true; + } + + return cmpxchg(ctx->ptep, ctx->old, new) == ctx->old; +} + +/** + * stage2_try_break_pte() - Invalidates a pte according to the + * 'break-before-make' requirements of the + * architecture. + * + * @ctx: context of the visited pte. + * @mmu: stage-2 mmu + * + * Returns: true if the pte was successfully broken. + * + * If the removed pte was valid, performs the necessary serialization and TLB + * invalidation for the old value. For counted ptes, drops the reference count + * on the containing table page. + */ +static bool stage2_try_break_pte(const struct kvm_pgtable_visit_ctx *ctx, + struct kvm_s2_mmu *mmu) +{ + struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops; + + if (stage2_pte_is_locked(ctx->old)) { + /* + * Should never occur if this walker has exclusive access to the + * page tables. + */ + WARN_ON(!kvm_pgtable_walk_shared(ctx)); + return false; + } + + if (!stage2_try_set_pte(ctx, KVM_INVALID_PTE_LOCKED)) + return false; + + /* + * Perform the appropriate TLB invalidation based on the evicted pte + * value (if any). + */ + if (kvm_pte_table(ctx->old, ctx->level)) + kvm_call_hyp(__kvm_tlb_flush_vmid, mmu); + else if (kvm_pte_valid(ctx->old)) + kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, mmu, ctx->addr, ctx->level); + + if (stage2_pte_is_counted(ctx->old)) + mm_ops->put_page(ctx->ptep); + + return true; +} + +static void stage2_make_pte(const struct kvm_pgtable_visit_ctx *ctx, kvm_pte_t new) +{ + struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops; + + WARN_ON(!stage2_pte_is_locked(*ctx->ptep)); + + if (stage2_pte_is_counted(new)) + mm_ops->get_page(ctx->ptep); + + smp_store_release(ctx->ptep, new); +} + +static void stage2_put_pte(const struct kvm_pgtable_visit_ctx *ctx, struct kvm_s2_mmu *mmu, + struct kvm_pgtable_mm_ops *mm_ops) { /* * Clear the existing PTE, and perform break-before-make with * TLB maintenance if it was valid. */ - if (kvm_pte_valid(*ptep)) { - kvm_clear_pte(ptep); - kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, mmu, addr, level); + if (kvm_pte_valid(ctx->old)) { + kvm_clear_pte(ctx->ptep); + kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, mmu, ctx->addr, ctx->level); } - mm_ops->put_page(ptep); + mm_ops->put_page(ctx->ptep); } static bool stage2_pte_cacheable(struct kvm_pgtable *pgt, kvm_pte_t pte) @@ -708,44 +770,42 @@ static bool stage2_pte_executable(kvm_pte_t pte) return !(pte & KVM_PTE_LEAF_ATTR_HI_S2_XN); } -static bool stage2_leaf_mapping_allowed(u64 addr, u64 end, u32 level, +static bool stage2_leaf_mapping_allowed(const struct kvm_pgtable_visit_ctx *ctx, struct stage2_map_data *data) { - if (data->force_pte && (level < (KVM_PGTABLE_MAX_LEVELS - 1))) + if (data->force_pte && (ctx->level < (KVM_PGTABLE_MAX_LEVELS - 1))) return false; - return kvm_block_mapping_supported(addr, end, data->phys, level); + return kvm_block_mapping_supported(ctx, data->phys); } -static int stage2_map_walker_try_leaf(u64 addr, u64 end, u32 level, - kvm_pte_t *ptep, +static int stage2_map_walker_try_leaf(const struct kvm_pgtable_visit_ctx *ctx, struct stage2_map_data *data) { - kvm_pte_t new, old = *ptep; - u64 granule = kvm_granule_size(level), phys = data->phys; + kvm_pte_t new; + u64 granule = kvm_granule_size(ctx->level), phys = data->phys; struct kvm_pgtable *pgt = data->mmu->pgt; - struct kvm_pgtable_mm_ops *mm_ops = data->mm_ops; + struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops; - if (!stage2_leaf_mapping_allowed(addr, end, level, data)) + if (!stage2_leaf_mapping_allowed(ctx, data)) return -E2BIG; if (kvm_phys_is_valid(phys)) - new = kvm_init_valid_leaf_pte(phys, data->attr, level); + new = kvm_init_valid_leaf_pte(phys, data->attr, ctx->level); else new = kvm_init_invalid_leaf_owner(data->owner_id); - if (stage2_pte_is_counted(old)) { - /* - * Skip updating the PTE if we are trying to recreate the exact - * same mapping or only change the access permissions. Instead, - * the vCPU will exit one more time from guest if still needed - * and then go through the path of relaxing permissions. - */ - if (!stage2_pte_needs_update(old, new)) - return -EAGAIN; + /* + * Skip updating the PTE if we are trying to recreate the exact + * same mapping or only change the access permissions. Instead, + * the vCPU will exit one more time from guest if still needed + * and then go through the path of relaxing permissions. + */ + if (!stage2_pte_needs_update(ctx->old, new)) + return -EAGAIN; - stage2_put_pte(ptep, data->mmu, addr, level, mm_ops); - } + if (!stage2_try_break_pte(ctx, data->mmu)) + return -EAGAIN; /* Perform CMOs before installation of the guest stage-2 PTE */ if (mm_ops->dcache_clean_inval_poc && stage2_pte_cacheable(pgt, new)) @@ -755,56 +815,43 @@ static int stage2_map_walker_try_leaf(u64 addr, u64 end, u32 level, if (mm_ops->icache_inval_pou && stage2_pte_executable(new)) mm_ops->icache_inval_pou(kvm_pte_follow(new, mm_ops), granule); - smp_store_release(ptep, new); - if (stage2_pte_is_counted(new)) - mm_ops->get_page(ptep); + stage2_make_pte(ctx, new); + if (kvm_phys_is_valid(phys)) data->phys += granule; return 0; } -static int stage2_map_walk_table_pre(u64 addr, u64 end, u32 level, - kvm_pte_t *ptep, +static int stage2_map_walk_table_pre(const struct kvm_pgtable_visit_ctx *ctx, struct stage2_map_data *data) { - if (data->anchor) - return 0; + struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops; + kvm_pte_t *childp = kvm_pte_follow(ctx->old, mm_ops); + int ret; - if (!stage2_leaf_mapping_allowed(addr, end, level, data)) + if (!stage2_leaf_mapping_allowed(ctx, data)) return 0; - data->childp = kvm_pte_follow(*ptep, data->mm_ops); - kvm_clear_pte(ptep); + ret = stage2_map_walker_try_leaf(ctx, data); + if (ret) + return ret; - /* - * Invalidate the whole stage-2, as we may have numerous leaf - * entries below us which would otherwise need invalidating - * individually. - */ - kvm_call_hyp(__kvm_tlb_flush_vmid, data->mmu); - data->anchor = ptep; + mm_ops->free_removed_table(childp, ctx->level); return 0; } -static int stage2_map_walk_leaf(u64 addr, u64 end, u32 level, kvm_pte_t *ptep, +static int stage2_map_walk_leaf(const struct kvm_pgtable_visit_ctx *ctx, struct stage2_map_data *data) { - struct kvm_pgtable_mm_ops *mm_ops = data->mm_ops; - kvm_pte_t *childp, pte = *ptep; + struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops; + kvm_pte_t *childp, new; int ret; - if (data->anchor) { - if (stage2_pte_is_counted(pte)) - mm_ops->put_page(ptep); - - return 0; - } - - ret = stage2_map_walker_try_leaf(addr, end, level, ptep, data); + ret = stage2_map_walker_try_leaf(ctx, data); if (ret != -E2BIG) return ret; - if (WARN_ON(level == KVM_PGTABLE_MAX_LEVELS - 1)) + if (WARN_ON(ctx->level == KVM_PGTABLE_MAX_LEVELS - 1)) return -EINVAL; if (!data->memcache) @@ -814,99 +861,62 @@ static int stage2_map_walk_leaf(u64 addr, u64 end, u32 level, kvm_pte_t *ptep, if (!childp) return -ENOMEM; + if (!stage2_try_break_pte(ctx, data->mmu)) { + mm_ops->put_page(childp); + return -EAGAIN; + } + /* * If we've run into an existing block mapping then replace it with * a table. Accesses beyond 'end' that fall within the new table * will be mapped lazily. */ - if (stage2_pte_is_counted(pte)) - stage2_put_pte(ptep, data->mmu, addr, level, mm_ops); - - kvm_set_table_pte(ptep, childp, mm_ops); - mm_ops->get_page(ptep); + new = kvm_init_table_pte(childp, mm_ops); + stage2_make_pte(ctx, new); return 0; } -static int stage2_map_walk_table_post(u64 addr, u64 end, u32 level, - kvm_pte_t *ptep, - struct stage2_map_data *data) -{ - struct kvm_pgtable_mm_ops *mm_ops = data->mm_ops; - kvm_pte_t *childp; - int ret = 0; - - if (!data->anchor) - return 0; - - if (data->anchor == ptep) { - childp = data->childp; - data->anchor = NULL; - data->childp = NULL; - ret = stage2_map_walk_leaf(addr, end, level, ptep, data); - } else { - childp = kvm_pte_follow(*ptep, mm_ops); - } - - mm_ops->put_page(childp); - mm_ops->put_page(ptep); - - return ret; -} - /* - * This is a little fiddly, as we use all three of the walk flags. The idea - * is that the TABLE_PRE callback runs for table entries on the way down, - * looking for table entries which we could conceivably replace with a - * block entry for this mapping. If it finds one, then it sets the 'anchor' - * field in 'struct stage2_map_data' to point at the table entry, before - * clearing the entry to zero and descending into the now detached table. + * The TABLE_PRE callback runs for table entries on the way down, looking + * for table entries which we could conceivably replace with a block entry + * for this mapping. If it finds one it replaces the entry and calls + * kvm_pgtable_mm_ops::free_removed_table() to tear down the detached table. * - * The behaviour of the LEAF callback then depends on whether or not the - * anchor has been set. If not, then we're not using a block mapping higher - * up the table and we perform the mapping at the existing leaves instead. - * If, on the other hand, the anchor _is_ set, then we drop references to - * all valid leaves so that the pages beneath the anchor can be freed. - * - * Finally, the TABLE_POST callback does nothing if the anchor has not - * been set, but otherwise frees the page-table pages while walking back up - * the page-table, installing the block entry when it revisits the anchor - * pointer and clearing the anchor to NULL. + * Otherwise, the LEAF callback performs the mapping at the existing leaves + * instead. */ -static int stage2_map_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep, - enum kvm_pgtable_walk_flags flag, void * const arg) +static int stage2_map_walker(const struct kvm_pgtable_visit_ctx *ctx, + enum kvm_pgtable_walk_flags visit) { - struct stage2_map_data *data = arg; + struct stage2_map_data *data = ctx->arg; - switch (flag) { + switch (visit) { case KVM_PGTABLE_WALK_TABLE_PRE: - return stage2_map_walk_table_pre(addr, end, level, ptep, data); + return stage2_map_walk_table_pre(ctx, data); case KVM_PGTABLE_WALK_LEAF: - return stage2_map_walk_leaf(addr, end, level, ptep, data); - case KVM_PGTABLE_WALK_TABLE_POST: - return stage2_map_walk_table_post(addr, end, level, ptep, data); + return stage2_map_walk_leaf(ctx, data); + default: + return -EINVAL; } - - return -EINVAL; } int kvm_pgtable_stage2_map(struct kvm_pgtable *pgt, u64 addr, u64 size, u64 phys, enum kvm_pgtable_prot prot, - void *mc) + void *mc, enum kvm_pgtable_walk_flags flags) { int ret; struct stage2_map_data map_data = { .phys = ALIGN_DOWN(phys, PAGE_SIZE), .mmu = pgt->mmu, .memcache = mc, - .mm_ops = pgt->mm_ops, .force_pte = pgt->force_pte_cb && pgt->force_pte_cb(addr, addr + size, prot), }; struct kvm_pgtable_walker walker = { .cb = stage2_map_walker, - .flags = KVM_PGTABLE_WALK_TABLE_PRE | - KVM_PGTABLE_WALK_LEAF | - KVM_PGTABLE_WALK_TABLE_POST, + .flags = flags | + KVM_PGTABLE_WALK_TABLE_PRE | + KVM_PGTABLE_WALK_LEAF, .arg = &map_data, }; @@ -930,15 +940,13 @@ int kvm_pgtable_stage2_set_owner(struct kvm_pgtable *pgt, u64 addr, u64 size, .phys = KVM_PHYS_INVALID, .mmu = pgt->mmu, .memcache = mc, - .mm_ops = pgt->mm_ops, .owner_id = owner_id, .force_pte = true, }; struct kvm_pgtable_walker walker = { .cb = stage2_map_walker, .flags = KVM_PGTABLE_WALK_TABLE_PRE | - KVM_PGTABLE_WALK_LEAF | - KVM_PGTABLE_WALK_TABLE_POST, + KVM_PGTABLE_WALK_LEAF, .arg = &map_data, }; @@ -949,30 +957,29 @@ int kvm_pgtable_stage2_set_owner(struct kvm_pgtable *pgt, u64 addr, u64 size, return ret; } -static int stage2_unmap_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep, - enum kvm_pgtable_walk_flags flag, - void * const arg) +static int stage2_unmap_walker(const struct kvm_pgtable_visit_ctx *ctx, + enum kvm_pgtable_walk_flags visit) { - struct kvm_pgtable *pgt = arg; + struct kvm_pgtable *pgt = ctx->arg; struct kvm_s2_mmu *mmu = pgt->mmu; - struct kvm_pgtable_mm_ops *mm_ops = pgt->mm_ops; - kvm_pte_t pte = *ptep, *childp = NULL; + struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops; + kvm_pte_t *childp = NULL; bool need_flush = false; - if (!kvm_pte_valid(pte)) { - if (stage2_pte_is_counted(pte)) { - kvm_clear_pte(ptep); - mm_ops->put_page(ptep); + if (!kvm_pte_valid(ctx->old)) { + if (stage2_pte_is_counted(ctx->old)) { + kvm_clear_pte(ctx->ptep); + mm_ops->put_page(ctx->ptep); } return 0; } - if (kvm_pte_table(pte, level)) { - childp = kvm_pte_follow(pte, mm_ops); + if (kvm_pte_table(ctx->old, ctx->level)) { + childp = kvm_pte_follow(ctx->old, mm_ops); if (mm_ops->page_count(childp) != 1) return 0; - } else if (stage2_pte_cacheable(pgt, pte)) { + } else if (stage2_pte_cacheable(pgt, ctx->old)) { need_flush = !stage2_has_fwb(pgt); } @@ -981,11 +988,11 @@ static int stage2_unmap_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep, * block entry and rely on the remaining portions being faulted * back lazily. */ - stage2_put_pte(ptep, mmu, addr, level, mm_ops); + stage2_put_pte(ctx, mmu, mm_ops); if (need_flush && mm_ops->dcache_clean_inval_poc) - mm_ops->dcache_clean_inval_poc(kvm_pte_follow(pte, mm_ops), - kvm_granule_size(level)); + mm_ops->dcache_clean_inval_poc(kvm_pte_follow(ctx->old, mm_ops), + kvm_granule_size(ctx->level)); if (childp) mm_ops->put_page(childp); @@ -1009,21 +1016,19 @@ struct stage2_attr_data { kvm_pte_t attr_clr; kvm_pte_t pte; u32 level; - struct kvm_pgtable_mm_ops *mm_ops; }; -static int stage2_attr_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep, - enum kvm_pgtable_walk_flags flag, - void * const arg) +static int stage2_attr_walker(const struct kvm_pgtable_visit_ctx *ctx, + enum kvm_pgtable_walk_flags visit) { - kvm_pte_t pte = *ptep; - struct stage2_attr_data *data = arg; - struct kvm_pgtable_mm_ops *mm_ops = data->mm_ops; + kvm_pte_t pte = ctx->old; + struct stage2_attr_data *data = ctx->arg; + struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops; - if (!kvm_pte_valid(pte)) + if (!kvm_pte_valid(ctx->old)) return 0; - data->level = level; + data->level = ctx->level; data->pte = pte; pte &= ~data->attr_clr; pte |= data->attr_set; @@ -1039,10 +1044,12 @@ static int stage2_attr_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep, * stage-2 PTE if we are going to add executable permission. */ if (mm_ops->icache_inval_pou && - stage2_pte_executable(pte) && !stage2_pte_executable(*ptep)) + stage2_pte_executable(pte) && !stage2_pte_executable(ctx->old)) mm_ops->icache_inval_pou(kvm_pte_follow(pte, mm_ops), - kvm_granule_size(level)); - WRITE_ONCE(*ptep, pte); + kvm_granule_size(ctx->level)); + + if (!stage2_try_set_pte(ctx, pte)) + return -EAGAIN; } return 0; @@ -1051,19 +1058,18 @@ static int stage2_attr_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep, static int stage2_update_leaf_attrs(struct kvm_pgtable *pgt, u64 addr, u64 size, kvm_pte_t attr_set, kvm_pte_t attr_clr, kvm_pte_t *orig_pte, - u32 *level) + u32 *level, enum kvm_pgtable_walk_flags flags) { int ret; kvm_pte_t attr_mask = KVM_PTE_LEAF_ATTR_LO | KVM_PTE_LEAF_ATTR_HI; struct stage2_attr_data data = { .attr_set = attr_set & attr_mask, .attr_clr = attr_clr & attr_mask, - .mm_ops = pgt->mm_ops, }; struct kvm_pgtable_walker walker = { .cb = stage2_attr_walker, .arg = &data, - .flags = KVM_PGTABLE_WALK_LEAF, + .flags = flags | KVM_PGTABLE_WALK_LEAF, }; ret = kvm_pgtable_walk(pgt, addr, size, &walker); @@ -1082,14 +1088,14 @@ int kvm_pgtable_stage2_wrprotect(struct kvm_pgtable *pgt, u64 addr, u64 size) { return stage2_update_leaf_attrs(pgt, addr, size, 0, KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W, - NULL, NULL); + NULL, NULL, 0); } kvm_pte_t kvm_pgtable_stage2_mkyoung(struct kvm_pgtable *pgt, u64 addr) { kvm_pte_t pte = 0; stage2_update_leaf_attrs(pgt, addr, 1, KVM_PTE_LEAF_ATTR_LO_S2_AF, 0, - &pte, NULL); + &pte, NULL, 0); dsb(ishst); return pte; } @@ -1098,7 +1104,7 @@ kvm_pte_t kvm_pgtable_stage2_mkold(struct kvm_pgtable *pgt, u64 addr) { kvm_pte_t pte = 0; stage2_update_leaf_attrs(pgt, addr, 1, 0, KVM_PTE_LEAF_ATTR_LO_S2_AF, - &pte, NULL); + &pte, NULL, 0); /* * "But where's the TLBI?!", you scream. * "Over in the core code", I sigh. @@ -1111,7 +1117,7 @@ kvm_pte_t kvm_pgtable_stage2_mkold(struct kvm_pgtable *pgt, u64 addr) bool kvm_pgtable_stage2_is_young(struct kvm_pgtable *pgt, u64 addr) { kvm_pte_t pte = 0; - stage2_update_leaf_attrs(pgt, addr, 1, 0, 0, &pte, NULL); + stage2_update_leaf_attrs(pgt, addr, 1, 0, 0, &pte, NULL, 0); return pte & KVM_PTE_LEAF_ATTR_LO_S2_AF; } @@ -1134,26 +1140,25 @@ int kvm_pgtable_stage2_relax_perms(struct kvm_pgtable *pgt, u64 addr, if (prot & KVM_PGTABLE_PROT_X) clr |= KVM_PTE_LEAF_ATTR_HI_S2_XN; - ret = stage2_update_leaf_attrs(pgt, addr, 1, set, clr, NULL, &level); + ret = stage2_update_leaf_attrs(pgt, addr, 1, set, clr, NULL, &level, + KVM_PGTABLE_WALK_SHARED); if (!ret) kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, pgt->mmu, addr, level); return ret; } -static int stage2_flush_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep, - enum kvm_pgtable_walk_flags flag, - void * const arg) +static int stage2_flush_walker(const struct kvm_pgtable_visit_ctx *ctx, + enum kvm_pgtable_walk_flags visit) { - struct kvm_pgtable *pgt = arg; + struct kvm_pgtable *pgt = ctx->arg; struct kvm_pgtable_mm_ops *mm_ops = pgt->mm_ops; - kvm_pte_t pte = *ptep; - if (!kvm_pte_valid(pte) || !stage2_pte_cacheable(pgt, pte)) + if (!kvm_pte_valid(ctx->old) || !stage2_pte_cacheable(pgt, ctx->old)) return 0; if (mm_ops->dcache_clean_inval_poc) - mm_ops->dcache_clean_inval_poc(kvm_pte_follow(pte, mm_ops), - kvm_granule_size(level)); + mm_ops->dcache_clean_inval_poc(kvm_pte_follow(ctx->old, mm_ops), + kvm_granule_size(ctx->level)); return 0; } @@ -1184,7 +1189,7 @@ int __kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_s2_mmu *mmu, u32 start_level = VTCR_EL2_TGRAN_SL0_BASE - sl0; pgd_sz = kvm_pgd_pages(ia_bits, start_level) * PAGE_SIZE; - pgt->pgd = mm_ops->zalloc_pages_exact(pgd_sz); + pgt->pgd = (kvm_pteref_t)mm_ops->zalloc_pages_exact(pgd_sz); if (!pgt->pgd) return -ENOMEM; @@ -1200,20 +1205,27 @@ int __kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_s2_mmu *mmu, return 0; } -static int stage2_free_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep, - enum kvm_pgtable_walk_flags flag, - void * const arg) +size_t kvm_pgtable_stage2_pgd_size(u64 vtcr) +{ + u32 ia_bits = VTCR_EL2_IPA(vtcr); + u32 sl0 = FIELD_GET(VTCR_EL2_SL0_MASK, vtcr); + u32 start_level = VTCR_EL2_TGRAN_SL0_BASE - sl0; + + return kvm_pgd_pages(ia_bits, start_level) * PAGE_SIZE; +} + +static int stage2_free_walker(const struct kvm_pgtable_visit_ctx *ctx, + enum kvm_pgtable_walk_flags visit) { - struct kvm_pgtable_mm_ops *mm_ops = arg; - kvm_pte_t pte = *ptep; + struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops; - if (!stage2_pte_is_counted(pte)) + if (!stage2_pte_is_counted(ctx->old)) return 0; - mm_ops->put_page(ptep); + mm_ops->put_page(ctx->ptep); - if (kvm_pte_table(pte, level)) - mm_ops->put_page(kvm_pte_follow(pte, mm_ops)); + if (kvm_pte_table(ctx->old, ctx->level)) + mm_ops->put_page(kvm_pte_follow(ctx->old, mm_ops)); return 0; } @@ -1225,11 +1237,33 @@ void kvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt) .cb = stage2_free_walker, .flags = KVM_PGTABLE_WALK_LEAF | KVM_PGTABLE_WALK_TABLE_POST, - .arg = pgt->mm_ops, }; WARN_ON(kvm_pgtable_walk(pgt, 0, BIT(pgt->ia_bits), &walker)); pgd_sz = kvm_pgd_pages(pgt->ia_bits, pgt->start_level) * PAGE_SIZE; - pgt->mm_ops->free_pages_exact(pgt->pgd, pgd_sz); + pgt->mm_ops->free_pages_exact(kvm_dereference_pteref(&walker, pgt->pgd), pgd_sz); pgt->pgd = NULL; } + +void kvm_pgtable_stage2_free_removed(struct kvm_pgtable_mm_ops *mm_ops, void *pgtable, u32 level) +{ + kvm_pteref_t ptep = (kvm_pteref_t)pgtable; + struct kvm_pgtable_walker walker = { + .cb = stage2_free_walker, + .flags = KVM_PGTABLE_WALK_LEAF | + KVM_PGTABLE_WALK_TABLE_POST, + }; + struct kvm_pgtable_walk_data data = { + .walker = &walker, + + /* + * At this point the IPA really doesn't matter, as the page + * table being traversed has already been removed from the stage + * 2. Set an appropriate range to cover the entire page table. + */ + .addr = 0, + .end = kvm_granule_size(level), + }; + + WARN_ON(__kvm_pgtable_walk(&data, mm_ops, ptep, level + 1)); +} diff --git a/arch/arm64/kvm/hyp/vhe/Makefile b/arch/arm64/kvm/hyp/vhe/Makefile index 96bec0ecf9dd96c0a3f19e6bfedd4b8852b4eedd..3b9e5464b5b39ca07633751f2eea412c03d672f9 100644 --- a/arch/arm64/kvm/hyp/vhe/Makefile +++ b/arch/arm64/kvm/hyp/vhe/Makefile @@ -1,6 +1,6 @@ # SPDX-License-Identifier: GPL-2.0 # -# Makefile for Kernel-based Virtual Machine module, HYP/nVHE part +# Makefile for Kernel-based Virtual Machine module, HYP/VHE part # asflags-y := -D__KVM_VHE_HYPERVISOR__ diff --git a/arch/arm64/kvm/irq.h b/arch/arm64/kvm/irq.h deleted file mode 100644 index 0d257de42c10997fbc147b5aa53a84b880f4b182..0000000000000000000000000000000000000000 --- a/arch/arm64/kvm/irq.h +++ /dev/null @@ -1,16 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * irq.h: in kernel interrupt controller related definitions - * Copyright (c) 2016 Red Hat, Inc. - * - * This header is included by irqchip.c. However, on ARM, interrupt - * controller declarations are located in include/kvm/arm_vgic.h since - * they are mostly shared between arm and arm64. - */ - -#ifndef __IRQ_H -#define __IRQ_H - -#include - -#endif diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c index 60ee3d9f01f8c198b0dd90a35f32594e1cfff2d1..31d7fa4c7c140513f6b69034cafd5f23a8a88567 100644 --- a/arch/arm64/kvm/mmu.c +++ b/arch/arm64/kvm/mmu.c @@ -128,6 +128,25 @@ static void kvm_s2_free_pages_exact(void *virt, size_t size) free_pages_exact(virt, size); } +static struct kvm_pgtable_mm_ops kvm_s2_mm_ops; + +static void stage2_free_removed_table_rcu_cb(struct rcu_head *head) +{ + struct page *page = container_of(head, struct page, rcu_head); + void *pgtable = page_to_virt(page); + u32 level = page_private(page); + + kvm_pgtable_stage2_free_removed(&kvm_s2_mm_ops, pgtable, level); +} + +static void stage2_free_removed_table(void *addr, u32 level) +{ + struct page *page = virt_to_page(addr); + + set_page_private(page, (unsigned long)level); + call_rcu(&page->rcu_head, stage2_free_removed_table_rcu_cb); +} + static void kvm_host_get_page(void *addr) { get_page(virt_to_page(addr)); @@ -640,8 +659,8 @@ static struct kvm_pgtable_mm_ops kvm_user_mm_ops = { static int get_user_mapping_size(struct kvm *kvm, u64 addr) { struct kvm_pgtable pgt = { - .pgd = (kvm_pte_t *)kvm->mm->pgd, - .ia_bits = VA_BITS, + .pgd = (kvm_pteref_t)kvm->mm->pgd, + .ia_bits = vabits_actual, .start_level = (KVM_PGTABLE_MAX_LEVELS - CONFIG_PGTABLE_LEVELS), .mm_ops = &kvm_user_mm_ops, @@ -662,6 +681,7 @@ static struct kvm_pgtable_mm_ops kvm_s2_mm_ops = { .zalloc_page = stage2_memcache_zalloc_page, .zalloc_pages_exact = kvm_s2_zalloc_pages_exact, .free_pages_exact = kvm_s2_free_pages_exact, + .free_removed_table = stage2_free_removed_table, .get_page = kvm_host_get_page, .put_page = kvm_s2_put_page, .page_count = kvm_host_page_count, @@ -675,15 +695,42 @@ static struct kvm_pgtable_mm_ops kvm_s2_mm_ops = { * kvm_init_stage2_mmu - Initialise a S2 MMU structure * @kvm: The pointer to the KVM structure * @mmu: The pointer to the s2 MMU structure + * @type: The machine type of the virtual machine * * Allocates only the stage-2 HW PGD level table(s). * Note we don't need locking here as this is only called when the VM is * created, which can only be done once. */ -int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu) +int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu, unsigned long type) { + u32 kvm_ipa_limit = get_kvm_ipa_limit(); int cpu, err; struct kvm_pgtable *pgt; + u64 mmfr0, mmfr1; + u32 phys_shift; + + if (type & ~KVM_VM_TYPE_ARM_IPA_SIZE_MASK) + return -EINVAL; + + phys_shift = KVM_VM_TYPE_ARM_IPA_SIZE(type); + if (is_protected_kvm_enabled()) { + phys_shift = kvm_ipa_limit; + } else if (phys_shift) { + if (phys_shift > kvm_ipa_limit || + phys_shift < ARM64_MIN_PARANGE_BITS) + return -EINVAL; + } else { + phys_shift = KVM_PHYS_SHIFT; + if (phys_shift > kvm_ipa_limit) { + pr_warn_once("%s using unsupported default IPA limit, upgrade your VMM\n", + current->comm); + return -EINVAL; + } + } + + mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1); + mmfr1 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1); + kvm->arch.vtcr = kvm_get_vtcr(mmfr0, mmfr1, phys_shift); if (mmu->pgt != NULL) { kvm_err("kvm_arch already initialized?\n"); @@ -807,6 +854,32 @@ void kvm_free_stage2_pgd(struct kvm_s2_mmu *mmu) } } +static void hyp_mc_free_fn(void *addr, void *unused) +{ + free_page((unsigned long)addr); +} + +static void *hyp_mc_alloc_fn(void *unused) +{ + return (void *)__get_free_page(GFP_KERNEL_ACCOUNT); +} + +void free_hyp_memcache(struct kvm_hyp_memcache *mc) +{ + if (is_protected_kvm_enabled()) + __free_hyp_memcache(mc, hyp_mc_free_fn, + kvm_host_va, NULL); +} + +int topup_hyp_memcache(struct kvm_hyp_memcache *mc, unsigned long min_pages) +{ + if (!is_protected_kvm_enabled()) + return 0; + + return __topup_hyp_memcache(mc, min_pages, hyp_mc_alloc_fn, + kvm_host_pa, NULL); +} + /** * kvm_phys_addr_ioremap - map a device range to guest IPA * @@ -841,7 +914,7 @@ int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa, write_lock(&kvm->mmu_lock); ret = kvm_pgtable_stage2_map(pgt, addr, PAGE_SIZE, pa, prot, - &cache); + &cache, 0); write_unlock(&kvm->mmu_lock); if (ret) break; @@ -1091,32 +1164,26 @@ static int get_vma_page_shift(struct vm_area_struct *vma, unsigned long hva) * - mmap_lock protects between a VM faulting a page in and the VMM performing * an mprotect() to add VM_MTE */ -static int sanitise_mte_tags(struct kvm *kvm, kvm_pfn_t pfn, - unsigned long size) +static void sanitise_mte_tags(struct kvm *kvm, kvm_pfn_t pfn, + unsigned long size) { unsigned long i, nr_pages = size >> PAGE_SHIFT; - struct page *page; + struct page *page = pfn_to_page(pfn); if (!kvm_has_mte(kvm)) - return 0; - - /* - * pfn_to_online_page() is used to reject ZONE_DEVICE pages - * that may not support tags. - */ - page = pfn_to_online_page(pfn); - - if (!page) - return -EFAULT; + return; for (i = 0; i < nr_pages; i++, page++) { - if (!test_bit(PG_mte_tagged, &page->flags)) { + if (try_page_mte_tagging(page)) { mte_clear_page_tags(page_address(page)); - set_bit(PG_mte_tagged, &page->flags); + set_page_mte_tagged(page); } } +} - return 0; +static bool kvm_vma_mte_allowed(struct vm_area_struct *vma) +{ + return vma->vm_flags & VM_MTE_ALLOWED; } static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, @@ -1127,7 +1194,6 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, bool write_fault, writable, force_pte = false; bool exec_fault; bool device = false; - bool shared; unsigned long mmu_seq; struct kvm *kvm = vcpu->kvm; struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache; @@ -1136,7 +1202,6 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, gfn_t gfn; kvm_pfn_t pfn; bool logging_active = memslot_is_logging(memslot); - bool use_read_lock = false; unsigned long fault_level = kvm_vcpu_trap_get_fault_level(vcpu); unsigned long vma_pagesize, fault_granule; enum kvm_pgtable_prot prot = KVM_PGTABLE_PROT_R; @@ -1171,14 +1236,10 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, if (logging_active) { force_pte = true; vma_shift = PAGE_SHIFT; - use_read_lock = (fault_status == FSC_PERM && write_fault && - fault_granule == PAGE_SIZE); } else { vma_shift = get_vma_page_shift(vma, hva); } - shared = (vma->vm_flags & VM_SHARED); - switch (vma_shift) { #ifndef __PAGETABLE_PMD_FOLDED case PUD_SHIFT: @@ -1239,7 +1300,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, */ smp_rmb(); - pfn = __gfn_to_pfn_memslot(memslot, gfn, false, NULL, + pfn = __gfn_to_pfn_memslot(memslot, gfn, false, false, NULL, write_fault, &writable, NULL); if (pfn == KVM_PFN_ERR_HWPOISON) { kvm_send_hwpoison_signal(hva, vma_shift); @@ -1271,15 +1332,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, if (exec_fault && device) return -ENOEXEC; - /* - * To reduce MMU contentions and enhance concurrency during dirty - * logging dirty logging, only acquire read lock for permission - * relaxation. - */ - if (use_read_lock) - read_lock(&kvm->mmu_lock); - else - write_lock(&kvm->mmu_lock); + read_lock(&kvm->mmu_lock); pgt = vcpu->arch.hw_mmu->pgt; if (mmu_invalidate_retry(kvm, mmu_seq)) goto out_unlock; @@ -1298,13 +1351,13 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, } if (fault_status != FSC_PERM && !device && kvm_has_mte(kvm)) { - /* Check the VMM hasn't introduced a new VM_SHARED VMA */ - if (!shared) - ret = sanitise_mte_tags(kvm, pfn, vma_pagesize); - else + /* Check the VMM hasn't introduced a new disallowed VMA */ + if (kvm_vma_mte_allowed(vma)) { + sanitise_mte_tags(kvm, pfn, vma_pagesize); + } else { ret = -EFAULT; - if (ret) goto out_unlock; + } } if (writable) @@ -1323,15 +1376,12 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, * permissions only if vma_pagesize equals fault_granule. Otherwise, * kvm_pgtable_stage2_map() should be called to change block size. */ - if (fault_status == FSC_PERM && vma_pagesize == fault_granule) { + if (fault_status == FSC_PERM && vma_pagesize == fault_granule) ret = kvm_pgtable_stage2_relax_perms(pgt, fault_ipa, prot); - } else { - WARN_ONCE(use_read_lock, "Attempted stage-2 map outside of write lock\n"); - + else ret = kvm_pgtable_stage2_map(pgt, fault_ipa, vma_pagesize, __pfn_to_phys(pfn), prot, - memcache); - } + memcache, KVM_PGTABLE_WALK_SHARED); /* Mark the page dirty only if the fault is handled successfully */ if (writable && !ret) { @@ -1340,10 +1390,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, } out_unlock: - if (use_read_lock) - read_unlock(&kvm->mmu_lock); - else - write_unlock(&kvm->mmu_lock); + read_unlock(&kvm->mmu_lock); kvm_set_pfn_accessed(pfn); kvm_release_pfn_clean(pfn); return ret != -EAGAIN ? ret : 0; @@ -1526,15 +1573,18 @@ bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range) bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range) { kvm_pfn_t pfn = pte_pfn(range->pte); - int ret; if (!kvm->arch.mmu.pgt) return false; WARN_ON(range->end - range->start != 1); - ret = sanitise_mte_tags(kvm, pfn, PAGE_SIZE); - if (ret) + /* + * If the page isn't tagged, defer to user_mem_abort() for sanitising + * the MTE tags. The S2 pte should have been unmapped by + * mmu_notifier_invalidate_range_end(). + */ + if (kvm_has_mte(kvm) && !page_mte_tagged(pfn_to_page(pfn))) return false; /* @@ -1549,7 +1599,7 @@ bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range) */ kvm_pgtable_stage2_map(kvm->arch.mmu.pgt, range->start << PAGE_SHIFT, PAGE_SIZE, __pfn_to_phys(pfn), - KVM_PGTABLE_PROT_R, NULL); + KVM_PGTABLE_PROT_R, NULL, 0); return false; } @@ -1618,6 +1668,8 @@ static struct kvm_pgtable_mm_ops kvm_hyp_mm_ops = { int kvm_mmu_init(u32 *hyp_va_bits) { int err; + u32 idmap_bits; + u32 kernel_bits; hyp_idmap_start = __pa_symbol(__hyp_idmap_text_start); hyp_idmap_start = ALIGN_DOWN(hyp_idmap_start, PAGE_SIZE); @@ -1631,7 +1683,31 @@ int kvm_mmu_init(u32 *hyp_va_bits) */ BUG_ON((hyp_idmap_start ^ (hyp_idmap_end - 1)) & PAGE_MASK); - *hyp_va_bits = 64 - ((idmap_t0sz & TCR_T0SZ_MASK) >> TCR_T0SZ_OFFSET); + /* + * The ID map may be configured to use an extended virtual address + * range. This is only the case if system RAM is out of range for the + * currently configured page size and VA_BITS_MIN, in which case we will + * also need the extended virtual range for the HYP ID map, or we won't + * be able to enable the EL2 MMU. + * + * However, in some cases the ID map may be configured for fewer than + * the number of VA bits used by the regular kernel stage 1. This + * happens when VA_BITS=52 and the kernel image is placed in PA space + * below 48 bits. + * + * At EL2, there is only one TTBR register, and we can't switch between + * translation tables *and* update TCR_EL2.T0SZ at the same time. Bottom + * line: we need to use the extended range with *both* our translation + * tables. + * + * So use the maximum of the idmap VA bits and the regular kernel stage + * 1 VA bits to assure that the hypervisor can both ID map its code page + * and map any kernel memory. + */ + idmap_bits = 64 - ((idmap_t0sz & TCR_T0SZ_MASK) >> TCR_T0SZ_OFFSET); + kernel_bits = vabits_actual; + *hyp_va_bits = max(idmap_bits, kernel_bits); + kvm_debug("Using %u-bit virtual addresses at EL2\n", *hyp_va_bits); kvm_debug("IDMAP page: %lx\n", hyp_idmap_start); kvm_debug("HYP VA range: %lx:%lx\n", @@ -1740,12 +1816,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, if (!vma) break; - /* - * VM_SHARED mappings are not allowed with MTE to avoid races - * when updating the PG_mte_tagged page flag, see - * sanitise_mte_tags for more details. - */ - if (kvm_has_mte(kvm) && vma->vm_flags & VM_SHARED) { + if (kvm_has_mte(kvm) && !kvm_vma_mte_allowed(vma)) { ret = -EINVAL; break; } diff --git a/arch/arm64/kvm/pkvm.c b/arch/arm64/kvm/pkvm.c index ebecb7c045f4366ebbab3035ec696ce901523728..cf56958b1492a486f7e549c95d1f5ec828349a6b 100644 --- a/arch/arm64/kvm/pkvm.c +++ b/arch/arm64/kvm/pkvm.c @@ -6,6 +6,7 @@ #include #include +#include #include #include @@ -53,7 +54,7 @@ static int __init register_memblock_regions(void) void __init kvm_hyp_reserve(void) { - u64 nr_pages, prev, hyp_mem_pages = 0; + u64 hyp_mem_pages = 0; int ret; if (!is_hyp_mode_available() || is_kernel_in_hyp_mode()) @@ -71,21 +72,8 @@ void __init kvm_hyp_reserve(void) hyp_mem_pages += hyp_s1_pgtable_pages(); hyp_mem_pages += host_s2_pgtable_pages(); - - /* - * The hyp_vmemmap needs to be backed by pages, but these pages - * themselves need to be present in the vmemmap, so compute the number - * of pages needed by looking for a fixed point. - */ - nr_pages = 0; - do { - prev = nr_pages; - nr_pages = hyp_mem_pages + prev; - nr_pages = DIV_ROUND_UP(nr_pages * STRUCT_HYP_PAGE_SIZE, - PAGE_SIZE); - nr_pages += __hyp_pgtable_max_pages(nr_pages); - } while (nr_pages != prev); - hyp_mem_pages += nr_pages; + hyp_mem_pages += hyp_vm_table_pages(); + hyp_mem_pages += hyp_vmemmap_pages(STRUCT_HYP_PAGE_SIZE); /* * Try to allocate a PMD-aligned region to reduce TLB pressure once @@ -107,3 +95,121 @@ void __init kvm_hyp_reserve(void) kvm_info("Reserved %lld MiB at 0x%llx\n", hyp_mem_size >> 20, hyp_mem_base); } + +/* + * Allocates and donates memory for hypervisor VM structs at EL2. + * + * Allocates space for the VM state, which includes the hyp vm as well as + * the hyp vcpus. + * + * Stores an opaque handler in the kvm struct for future reference. + * + * Return 0 on success, negative error code on failure. + */ +static int __pkvm_create_hyp_vm(struct kvm *host_kvm) +{ + size_t pgd_sz, hyp_vm_sz, hyp_vcpu_sz; + struct kvm_vcpu *host_vcpu; + pkvm_handle_t handle; + void *pgd, *hyp_vm; + unsigned long idx; + int ret; + + if (host_kvm->created_vcpus < 1) + return -EINVAL; + + pgd_sz = kvm_pgtable_stage2_pgd_size(host_kvm->arch.vtcr); + + /* + * The PGD pages will be reclaimed using a hyp_memcache which implies + * page granularity. So, use alloc_pages_exact() to get individual + * refcounts. + */ + pgd = alloc_pages_exact(pgd_sz, GFP_KERNEL_ACCOUNT); + if (!pgd) + return -ENOMEM; + + /* Allocate memory to donate to hyp for vm and vcpu pointers. */ + hyp_vm_sz = PAGE_ALIGN(size_add(PKVM_HYP_VM_SIZE, + size_mul(sizeof(void *), + host_kvm->created_vcpus))); + hyp_vm = alloc_pages_exact(hyp_vm_sz, GFP_KERNEL_ACCOUNT); + if (!hyp_vm) { + ret = -ENOMEM; + goto free_pgd; + } + + /* Donate the VM memory to hyp and let hyp initialize it. */ + ret = kvm_call_hyp_nvhe(__pkvm_init_vm, host_kvm, hyp_vm, pgd); + if (ret < 0) + goto free_vm; + + handle = ret; + + host_kvm->arch.pkvm.handle = handle; + + /* Donate memory for the vcpus at hyp and initialize it. */ + hyp_vcpu_sz = PAGE_ALIGN(PKVM_HYP_VCPU_SIZE); + kvm_for_each_vcpu(idx, host_vcpu, host_kvm) { + void *hyp_vcpu; + + /* Indexing of the vcpus to be sequential starting at 0. */ + if (WARN_ON(host_vcpu->vcpu_idx != idx)) { + ret = -EINVAL; + goto destroy_vm; + } + + hyp_vcpu = alloc_pages_exact(hyp_vcpu_sz, GFP_KERNEL_ACCOUNT); + if (!hyp_vcpu) { + ret = -ENOMEM; + goto destroy_vm; + } + + ret = kvm_call_hyp_nvhe(__pkvm_init_vcpu, handle, host_vcpu, + hyp_vcpu); + if (ret) { + free_pages_exact(hyp_vcpu, hyp_vcpu_sz); + goto destroy_vm; + } + } + + return 0; + +destroy_vm: + pkvm_destroy_hyp_vm(host_kvm); + return ret; +free_vm: + free_pages_exact(hyp_vm, hyp_vm_sz); +free_pgd: + free_pages_exact(pgd, pgd_sz); + return ret; +} + +int pkvm_create_hyp_vm(struct kvm *host_kvm) +{ + int ret = 0; + + mutex_lock(&host_kvm->lock); + if (!host_kvm->arch.pkvm.handle) + ret = __pkvm_create_hyp_vm(host_kvm); + mutex_unlock(&host_kvm->lock); + + return ret; +} + +void pkvm_destroy_hyp_vm(struct kvm *host_kvm) +{ + if (host_kvm->arch.pkvm.handle) { + WARN_ON(kvm_call_hyp_nvhe(__pkvm_teardown_vm, + host_kvm->arch.pkvm.handle)); + } + + host_kvm->arch.pkvm.handle = 0; + free_hyp_memcache(&host_kvm->arch.pkvm.teardown_mc); +} + +int pkvm_init_host_vm(struct kvm *host_kvm) +{ + mutex_init(&host_kvm->lock); + return 0; +} diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c index 0003c7d37533a87ea5be34414b812996cf094fc8..24908400e190616f317b9e6725b4605c12d1c8a4 100644 --- a/arch/arm64/kvm/pmu-emul.c +++ b/arch/arm64/kvm/pmu-emul.c @@ -15,16 +15,25 @@ #include #include +#define PERF_ATTR_CFG1_COUNTER_64BIT BIT(0) + DEFINE_STATIC_KEY_FALSE(kvm_arm_pmu_available); static LIST_HEAD(arm_pmus); static DEFINE_MUTEX(arm_pmus_lock); -static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx); -static void kvm_pmu_update_pmc_chained(struct kvm_vcpu *vcpu, u64 select_idx); -static void kvm_pmu_stop_counter(struct kvm_vcpu *vcpu, struct kvm_pmc *pmc); +static void kvm_pmu_create_perf_event(struct kvm_pmc *pmc); +static void kvm_pmu_release_perf_event(struct kvm_pmc *pmc); + +static struct kvm_vcpu *kvm_pmc_to_vcpu(const struct kvm_pmc *pmc) +{ + return container_of(pmc, struct kvm_vcpu, arch.pmu.pmc[pmc->idx]); +} -#define PERF_ATTR_CFG1_KVM_PMU_CHAINED 0x1 +static struct kvm_pmc *kvm_vcpu_idx_to_pmc(struct kvm_vcpu *vcpu, int cnt_idx) +{ + return &vcpu->arch.pmu.pmc[cnt_idx]; +} static u32 kvm_pmu_event_mask(struct kvm *kvm) { @@ -47,113 +56,46 @@ static u32 kvm_pmu_event_mask(struct kvm *kvm) } /** - * kvm_pmu_idx_is_64bit - determine if select_idx is a 64bit counter - * @vcpu: The vcpu pointer - * @select_idx: The counter index + * kvm_pmc_is_64bit - determine if counter is 64bit + * @pmc: counter context */ -static bool kvm_pmu_idx_is_64bit(struct kvm_vcpu *vcpu, u64 select_idx) +static bool kvm_pmc_is_64bit(struct kvm_pmc *pmc) { - return (select_idx == ARMV8_PMU_CYCLE_IDX && - __vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_LC); + return (pmc->idx == ARMV8_PMU_CYCLE_IDX || + kvm_pmu_is_3p5(kvm_pmc_to_vcpu(pmc))); } -static struct kvm_vcpu *kvm_pmc_to_vcpu(struct kvm_pmc *pmc) +static bool kvm_pmc_has_64bit_overflow(struct kvm_pmc *pmc) { - struct kvm_pmu *pmu; - struct kvm_vcpu_arch *vcpu_arch; + u64 val = __vcpu_sys_reg(kvm_pmc_to_vcpu(pmc), PMCR_EL0); - pmc -= pmc->idx; - pmu = container_of(pmc, struct kvm_pmu, pmc[0]); - vcpu_arch = container_of(pmu, struct kvm_vcpu_arch, pmu); - return container_of(vcpu_arch, struct kvm_vcpu, arch); + return (pmc->idx < ARMV8_PMU_CYCLE_IDX && (val & ARMV8_PMU_PMCR_LP)) || + (pmc->idx == ARMV8_PMU_CYCLE_IDX && (val & ARMV8_PMU_PMCR_LC)); } -/** - * kvm_pmu_pmc_is_chained - determine if the pmc is chained - * @pmc: The PMU counter pointer - */ -static bool kvm_pmu_pmc_is_chained(struct kvm_pmc *pmc) +static bool kvm_pmu_counter_can_chain(struct kvm_pmc *pmc) { - struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc); - - return test_bit(pmc->idx >> 1, vcpu->arch.pmu.chained); + return (!(pmc->idx & 1) && (pmc->idx + 1) < ARMV8_PMU_CYCLE_IDX && + !kvm_pmc_has_64bit_overflow(pmc)); } -/** - * kvm_pmu_idx_is_high_counter - determine if select_idx is a high/low counter - * @select_idx: The counter index - */ -static bool kvm_pmu_idx_is_high_counter(u64 select_idx) -{ - return select_idx & 0x1; -} - -/** - * kvm_pmu_get_canonical_pmc - obtain the canonical pmc - * @pmc: The PMU counter pointer - * - * When a pair of PMCs are chained together we use the low counter (canonical) - * to hold the underlying perf event. - */ -static struct kvm_pmc *kvm_pmu_get_canonical_pmc(struct kvm_pmc *pmc) -{ - if (kvm_pmu_pmc_is_chained(pmc) && - kvm_pmu_idx_is_high_counter(pmc->idx)) - return pmc - 1; - - return pmc; -} -static struct kvm_pmc *kvm_pmu_get_alternate_pmc(struct kvm_pmc *pmc) +static u32 counter_index_to_reg(u64 idx) { - if (kvm_pmu_idx_is_high_counter(pmc->idx)) - return pmc - 1; - else - return pmc + 1; + return (idx == ARMV8_PMU_CYCLE_IDX) ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + idx; } -/** - * kvm_pmu_idx_has_chain_evtype - determine if the event type is chain - * @vcpu: The vcpu pointer - * @select_idx: The counter index - */ -static bool kvm_pmu_idx_has_chain_evtype(struct kvm_vcpu *vcpu, u64 select_idx) +static u32 counter_index_to_evtreg(u64 idx) { - u64 eventsel, reg; - - select_idx |= 0x1; - - if (select_idx == ARMV8_PMU_CYCLE_IDX) - return false; - - reg = PMEVTYPER0_EL0 + select_idx; - eventsel = __vcpu_sys_reg(vcpu, reg) & kvm_pmu_event_mask(vcpu->kvm); - - return eventsel == ARMV8_PMUV3_PERFCTR_CHAIN; + return (idx == ARMV8_PMU_CYCLE_IDX) ? PMCCFILTR_EL0 : PMEVTYPER0_EL0 + idx; } -/** - * kvm_pmu_get_pair_counter_value - get PMU counter value - * @vcpu: The vcpu pointer - * @pmc: The PMU counter pointer - */ -static u64 kvm_pmu_get_pair_counter_value(struct kvm_vcpu *vcpu, - struct kvm_pmc *pmc) +static u64 kvm_pmu_get_pmc_value(struct kvm_pmc *pmc) { - u64 counter, counter_high, reg, enabled, running; - - if (kvm_pmu_pmc_is_chained(pmc)) { - pmc = kvm_pmu_get_canonical_pmc(pmc); - reg = PMEVCNTR0_EL0 + pmc->idx; + struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc); + u64 counter, reg, enabled, running; - counter = __vcpu_sys_reg(vcpu, reg); - counter_high = __vcpu_sys_reg(vcpu, reg + 1); - - counter = lower_32_bits(counter) | (counter_high << 32); - } else { - reg = (pmc->idx == ARMV8_PMU_CYCLE_IDX) - ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + pmc->idx; - counter = __vcpu_sys_reg(vcpu, reg); - } + reg = counter_index_to_reg(pmc->idx); + counter = __vcpu_sys_reg(vcpu, reg); /* * The real counter value is equal to the value of counter register plus @@ -163,6 +105,9 @@ static u64 kvm_pmu_get_pair_counter_value(struct kvm_vcpu *vcpu, counter += perf_event_read_value(pmc->perf_event, &enabled, &running); + if (!kvm_pmc_is_64bit(pmc)) + counter = lower_32_bits(counter); + return counter; } @@ -173,22 +118,37 @@ static u64 kvm_pmu_get_pair_counter_value(struct kvm_vcpu *vcpu, */ u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx) { - u64 counter; - struct kvm_pmu *pmu = &vcpu->arch.pmu; - struct kvm_pmc *pmc = &pmu->pmc[select_idx]; - if (!kvm_vcpu_has_pmu(vcpu)) return 0; - counter = kvm_pmu_get_pair_counter_value(vcpu, pmc); + return kvm_pmu_get_pmc_value(kvm_vcpu_idx_to_pmc(vcpu, select_idx)); +} - if (kvm_pmu_pmc_is_chained(pmc) && - kvm_pmu_idx_is_high_counter(select_idx)) - counter = upper_32_bits(counter); - else if (select_idx != ARMV8_PMU_CYCLE_IDX) - counter = lower_32_bits(counter); +static void kvm_pmu_set_pmc_value(struct kvm_pmc *pmc, u64 val, bool force) +{ + struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc); + u64 reg; - return counter; + kvm_pmu_release_perf_event(pmc); + + reg = counter_index_to_reg(pmc->idx); + + if (vcpu_mode_is_32bit(vcpu) && pmc->idx != ARMV8_PMU_CYCLE_IDX && + !force) { + /* + * Even with PMUv3p5, AArch32 cannot write to the top + * 32bit of the counters. The only possible course of + * action is to use PMCR.P, which will reset them to + * 0 (the only use of the 'force' parameter). + */ + val = __vcpu_sys_reg(vcpu, reg) & GENMASK(63, 32); + val |= lower_32_bits(val); + } + + __vcpu_sys_reg(vcpu, reg) = val; + + /* Recreate the perf event to reflect the updated sample_period */ + kvm_pmu_create_perf_event(pmc); } /** @@ -199,17 +159,10 @@ u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx) */ void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val) { - u64 reg; - if (!kvm_vcpu_has_pmu(vcpu)) return; - reg = (select_idx == ARMV8_PMU_CYCLE_IDX) - ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + select_idx; - __vcpu_sys_reg(vcpu, reg) += (s64)val - kvm_pmu_get_counter_value(vcpu, select_idx); - - /* Recreate the perf event to reflect the updated sample_period */ - kvm_pmu_create_perf_event(vcpu, select_idx); + kvm_pmu_set_pmc_value(kvm_vcpu_idx_to_pmc(vcpu, select_idx), val, false); } /** @@ -218,7 +171,6 @@ void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val) */ static void kvm_pmu_release_perf_event(struct kvm_pmc *pmc) { - pmc = kvm_pmu_get_canonical_pmc(pmc); if (pmc->perf_event) { perf_event_disable(pmc->perf_event); perf_event_release_kernel(pmc->perf_event); @@ -232,29 +184,20 @@ static void kvm_pmu_release_perf_event(struct kvm_pmc *pmc) * * If this counter has been configured to monitor some event, release it here. */ -static void kvm_pmu_stop_counter(struct kvm_vcpu *vcpu, struct kvm_pmc *pmc) +static void kvm_pmu_stop_counter(struct kvm_pmc *pmc) { - u64 counter, reg, val; + struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc); + u64 reg, val; - pmc = kvm_pmu_get_canonical_pmc(pmc); if (!pmc->perf_event) return; - counter = kvm_pmu_get_pair_counter_value(vcpu, pmc); + val = kvm_pmu_get_pmc_value(pmc); - if (pmc->idx == ARMV8_PMU_CYCLE_IDX) { - reg = PMCCNTR_EL0; - val = counter; - } else { - reg = PMEVCNTR0_EL0 + pmc->idx; - val = lower_32_bits(counter); - } + reg = counter_index_to_reg(pmc->idx); __vcpu_sys_reg(vcpu, reg) = val; - if (kvm_pmu_pmc_is_chained(pmc)) - __vcpu_sys_reg(vcpu, reg + 1) = upper_32_bits(counter); - kvm_pmu_release_perf_event(pmc); } @@ -280,13 +223,10 @@ void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu) void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu) { unsigned long mask = kvm_pmu_valid_counter_mask(vcpu); - struct kvm_pmu *pmu = &vcpu->arch.pmu; int i; for_each_set_bit(i, &mask, 32) - kvm_pmu_stop_counter(vcpu, &pmu->pmc[i]); - - bitmap_zero(vcpu->arch.pmu.chained, ARMV8_PMU_MAX_COUNTER_PAIRS); + kvm_pmu_stop_counter(kvm_vcpu_idx_to_pmc(vcpu, i)); } /** @@ -297,10 +237,9 @@ void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu) void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu) { int i; - struct kvm_pmu *pmu = &vcpu->arch.pmu; for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) - kvm_pmu_release_perf_event(&pmu->pmc[i]); + kvm_pmu_release_perf_event(kvm_vcpu_idx_to_pmc(vcpu, i)); irq_work_sync(&vcpu->arch.pmu.overflow_work); } @@ -325,9 +264,6 @@ u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu) void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val) { int i; - struct kvm_pmu *pmu = &vcpu->arch.pmu; - struct kvm_pmc *pmc; - if (!kvm_vcpu_has_pmu(vcpu)) return; @@ -335,17 +271,16 @@ void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val) return; for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) { + struct kvm_pmc *pmc; + if (!(val & BIT(i))) continue; - pmc = &pmu->pmc[i]; - - /* A change in the enable state may affect the chain state */ - kvm_pmu_update_pmc_chained(vcpu, i); - kvm_pmu_create_perf_event(vcpu, i); + pmc = kvm_vcpu_idx_to_pmc(vcpu, i); - /* At this point, pmc must be the canonical */ - if (pmc->perf_event) { + if (!pmc->perf_event) { + kvm_pmu_create_perf_event(pmc); + } else { perf_event_enable(pmc->perf_event); if (pmc->perf_event->state != PERF_EVENT_STATE_ACTIVE) kvm_debug("fail to enable perf event\n"); @@ -363,23 +298,18 @@ void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val) void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val) { int i; - struct kvm_pmu *pmu = &vcpu->arch.pmu; - struct kvm_pmc *pmc; if (!kvm_vcpu_has_pmu(vcpu) || !val) return; for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) { + struct kvm_pmc *pmc; + if (!(val & BIT(i))) continue; - pmc = &pmu->pmc[i]; - - /* A change in the enable state may affect the chain state */ - kvm_pmu_update_pmc_chained(vcpu, i); - kvm_pmu_create_perf_event(vcpu, i); + pmc = kvm_vcpu_idx_to_pmc(vcpu, i); - /* At this point, pmc must be the canonical */ if (pmc->perf_event) perf_event_disable(pmc->perf_event); } @@ -476,14 +406,69 @@ void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu) static void kvm_pmu_perf_overflow_notify_vcpu(struct irq_work *work) { struct kvm_vcpu *vcpu; - struct kvm_pmu *pmu; - - pmu = container_of(work, struct kvm_pmu, overflow_work); - vcpu = kvm_pmc_to_vcpu(pmu->pmc); + vcpu = container_of(work, struct kvm_vcpu, arch.pmu.overflow_work); kvm_vcpu_kick(vcpu); } +/* + * Perform an increment on any of the counters described in @mask, + * generating the overflow if required, and propagate it as a chained + * event if possible. + */ +static void kvm_pmu_counter_increment(struct kvm_vcpu *vcpu, + unsigned long mask, u32 event) +{ + int i; + + if (!(__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E)) + return; + + /* Weed out disabled counters */ + mask &= __vcpu_sys_reg(vcpu, PMCNTENSET_EL0); + + for_each_set_bit(i, &mask, ARMV8_PMU_CYCLE_IDX) { + struct kvm_pmc *pmc = kvm_vcpu_idx_to_pmc(vcpu, i); + u64 type, reg; + + /* Filter on event type */ + type = __vcpu_sys_reg(vcpu, counter_index_to_evtreg(i)); + type &= kvm_pmu_event_mask(vcpu->kvm); + if (type != event) + continue; + + /* Increment this counter */ + reg = __vcpu_sys_reg(vcpu, counter_index_to_reg(i)) + 1; + if (!kvm_pmc_is_64bit(pmc)) + reg = lower_32_bits(reg); + __vcpu_sys_reg(vcpu, counter_index_to_reg(i)) = reg; + + /* No overflow? move on */ + if (kvm_pmc_has_64bit_overflow(pmc) ? reg : lower_32_bits(reg)) + continue; + + /* Mark overflow */ + __vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(i); + + if (kvm_pmu_counter_can_chain(pmc)) + kvm_pmu_counter_increment(vcpu, BIT(i + 1), + ARMV8_PMUV3_PERFCTR_CHAIN); + } +} + +/* Compute the sample period for a given counter value */ +static u64 compute_period(struct kvm_pmc *pmc, u64 counter) +{ + u64 val; + + if (kvm_pmc_is_64bit(pmc) && kvm_pmc_has_64bit_overflow(pmc)) + val = (-counter) & GENMASK(63, 0); + else + val = (-counter) & GENMASK(31, 0); + + return val; +} + /** * When the perf event overflows, set the overflow status and inform the vcpu. */ @@ -503,10 +488,7 @@ static void kvm_pmu_perf_overflow(struct perf_event *perf_event, * Reset the sample period to the architectural limit, * i.e. the point where the counter overflows. */ - period = -(local64_read(&perf_event->count)); - - if (!kvm_pmu_idx_is_64bit(vcpu, pmc->idx)) - period &= GENMASK(31, 0); + period = compute_period(pmc, local64_read(&perf_event->count)); local64_set(&perf_event->hw.period_left, 0); perf_event->attr.sample_period = period; @@ -514,6 +496,10 @@ static void kvm_pmu_perf_overflow(struct perf_event *perf_event, __vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(idx); + if (kvm_pmu_counter_can_chain(pmc)) + kvm_pmu_counter_increment(vcpu, BIT(idx + 1), + ARMV8_PMUV3_PERFCTR_CHAIN); + if (kvm_pmu_overflow_status(vcpu)) { kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu); @@ -533,50 +519,7 @@ static void kvm_pmu_perf_overflow(struct perf_event *perf_event, */ void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val) { - struct kvm_pmu *pmu = &vcpu->arch.pmu; - int i; - - if (!kvm_vcpu_has_pmu(vcpu)) - return; - - if (!(__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E)) - return; - - /* Weed out disabled counters */ - val &= __vcpu_sys_reg(vcpu, PMCNTENSET_EL0); - - for (i = 0; i < ARMV8_PMU_CYCLE_IDX; i++) { - u64 type, reg; - - if (!(val & BIT(i))) - continue; - - /* PMSWINC only applies to ... SW_INC! */ - type = __vcpu_sys_reg(vcpu, PMEVTYPER0_EL0 + i); - type &= kvm_pmu_event_mask(vcpu->kvm); - if (type != ARMV8_PMUV3_PERFCTR_SW_INCR) - continue; - - /* increment this even SW_INC counter */ - reg = __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) + 1; - reg = lower_32_bits(reg); - __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) = reg; - - if (reg) /* no overflow on the low part */ - continue; - - if (kvm_pmu_pmc_is_chained(&pmu->pmc[i])) { - /* increment the high counter */ - reg = __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i + 1) + 1; - reg = lower_32_bits(reg); - __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i + 1) = reg; - if (!reg) /* mark overflow on the high counter */ - __vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(i + 1); - } else { - /* mark overflow on low counter */ - __vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(i); - } - } + kvm_pmu_counter_increment(vcpu, val, ARMV8_PMUV3_PERFCTR_SW_INCR); } /** @@ -591,6 +534,12 @@ void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val) if (!kvm_vcpu_has_pmu(vcpu)) return; + /* Fixup PMCR_EL0 to reconcile the PMU version and the LP bit */ + if (!kvm_pmu_is_3p5(vcpu)) + val &= ~ARMV8_PMU_PMCR_LP; + + __vcpu_sys_reg(vcpu, PMCR_EL0) = val; + if (val & ARMV8_PMU_PMCR_E) { kvm_pmu_enable_counter_mask(vcpu, __vcpu_sys_reg(vcpu, PMCNTENSET_EL0)); @@ -606,49 +555,44 @@ void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val) unsigned long mask = kvm_pmu_valid_counter_mask(vcpu); mask &= ~BIT(ARMV8_PMU_CYCLE_IDX); for_each_set_bit(i, &mask, 32) - kvm_pmu_set_counter_value(vcpu, i, 0); + kvm_pmu_set_pmc_value(kvm_vcpu_idx_to_pmc(vcpu, i), 0, true); } } -static bool kvm_pmu_counter_is_enabled(struct kvm_vcpu *vcpu, u64 select_idx) +static bool kvm_pmu_counter_is_enabled(struct kvm_pmc *pmc) { + struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc); return (__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E) && - (__vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & BIT(select_idx)); + (__vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & BIT(pmc->idx)); } /** * kvm_pmu_create_perf_event - create a perf event for a counter - * @vcpu: The vcpu pointer - * @select_idx: The number of selected counter + * @pmc: Counter context */ -static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx) +static void kvm_pmu_create_perf_event(struct kvm_pmc *pmc) { + struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc); struct arm_pmu *arm_pmu = vcpu->kvm->arch.arm_pmu; - struct kvm_pmu *pmu = &vcpu->arch.pmu; - struct kvm_pmc *pmc; struct perf_event *event; struct perf_event_attr attr; - u64 eventsel, counter, reg, data; + u64 eventsel, reg, data; - /* - * For chained counters the event type and filtering attributes are - * obtained from the low/even counter. We also use this counter to - * determine if the event is enabled/disabled. - */ - pmc = kvm_pmu_get_canonical_pmc(&pmu->pmc[select_idx]); - - reg = (pmc->idx == ARMV8_PMU_CYCLE_IDX) - ? PMCCFILTR_EL0 : PMEVTYPER0_EL0 + pmc->idx; + reg = counter_index_to_evtreg(pmc->idx); data = __vcpu_sys_reg(vcpu, reg); - kvm_pmu_stop_counter(vcpu, pmc); + kvm_pmu_stop_counter(pmc); if (pmc->idx == ARMV8_PMU_CYCLE_IDX) eventsel = ARMV8_PMUV3_PERFCTR_CPU_CYCLES; else eventsel = data & kvm_pmu_event_mask(vcpu->kvm); - /* Software increment event doesn't need to be backed by a perf event */ - if (eventsel == ARMV8_PMUV3_PERFCTR_SW_INCR) + /* + * Neither SW increment nor chained events need to be backed + * by a perf event. + */ + if (eventsel == ARMV8_PMUV3_PERFCTR_SW_INCR || + eventsel == ARMV8_PMUV3_PERFCTR_CHAIN) return; /* @@ -663,37 +607,25 @@ static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx) attr.type = arm_pmu->pmu.type; attr.size = sizeof(attr); attr.pinned = 1; - attr.disabled = !kvm_pmu_counter_is_enabled(vcpu, pmc->idx); + attr.disabled = !kvm_pmu_counter_is_enabled(pmc); attr.exclude_user = data & ARMV8_PMU_EXCLUDE_EL0 ? 1 : 0; attr.exclude_kernel = data & ARMV8_PMU_EXCLUDE_EL1 ? 1 : 0; attr.exclude_hv = 1; /* Don't count EL2 events */ attr.exclude_host = 1; /* Don't count host events */ attr.config = eventsel; - counter = kvm_pmu_get_pair_counter_value(vcpu, pmc); - - if (kvm_pmu_pmc_is_chained(pmc)) { - /** - * The initial sample period (overflow count) of an event. For - * chained counters we only support overflow interrupts on the - * high counter. - */ - attr.sample_period = (-counter) & GENMASK(63, 0); - attr.config1 |= PERF_ATTR_CFG1_KVM_PMU_CHAINED; + /* + * If counting with a 64bit counter, advertise it to the perf + * code, carefully dealing with the initial sample period + * which also depends on the overflow. + */ + if (kvm_pmc_is_64bit(pmc)) + attr.config1 |= PERF_ATTR_CFG1_COUNTER_64BIT; - event = perf_event_create_kernel_counter(&attr, -1, current, - kvm_pmu_perf_overflow, - pmc + 1); - } else { - /* The initial sample period (overflow count) of an event. */ - if (kvm_pmu_idx_is_64bit(vcpu, pmc->idx)) - attr.sample_period = (-counter) & GENMASK(63, 0); - else - attr.sample_period = (-counter) & GENMASK(31, 0); + attr.sample_period = compute_period(pmc, kvm_pmu_get_pmc_value(pmc)); - event = perf_event_create_kernel_counter(&attr, -1, current, + event = perf_event_create_kernel_counter(&attr, -1, current, kvm_pmu_perf_overflow, pmc); - } if (IS_ERR(event)) { pr_err_once("kvm: pmu event creation failed %ld\n", @@ -704,41 +636,6 @@ static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx) pmc->perf_event = event; } -/** - * kvm_pmu_update_pmc_chained - update chained bitmap - * @vcpu: The vcpu pointer - * @select_idx: The number of selected counter - * - * Update the chained bitmap based on the event type written in the - * typer register and the enable state of the odd register. - */ -static void kvm_pmu_update_pmc_chained(struct kvm_vcpu *vcpu, u64 select_idx) -{ - struct kvm_pmu *pmu = &vcpu->arch.pmu; - struct kvm_pmc *pmc = &pmu->pmc[select_idx], *canonical_pmc; - bool new_state, old_state; - - old_state = kvm_pmu_pmc_is_chained(pmc); - new_state = kvm_pmu_idx_has_chain_evtype(vcpu, pmc->idx) && - kvm_pmu_counter_is_enabled(vcpu, pmc->idx | 0x1); - - if (old_state == new_state) - return; - - canonical_pmc = kvm_pmu_get_canonical_pmc(pmc); - kvm_pmu_stop_counter(vcpu, canonical_pmc); - if (new_state) { - /* - * During promotion from !chained to chained we must ensure - * the adjacent counter is stopped and its event destroyed - */ - kvm_pmu_stop_counter(vcpu, kvm_pmu_get_alternate_pmc(pmc)); - set_bit(pmc->idx >> 1, vcpu->arch.pmu.chained); - return; - } - clear_bit(pmc->idx >> 1, vcpu->arch.pmu.chained); -} - /** * kvm_pmu_set_counter_event_type - set selected counter to monitor some event * @vcpu: The vcpu pointer @@ -752,6 +649,7 @@ static void kvm_pmu_update_pmc_chained(struct kvm_vcpu *vcpu, u64 select_idx) void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data, u64 select_idx) { + struct kvm_pmc *pmc = kvm_vcpu_idx_to_pmc(vcpu, select_idx); u64 reg, mask; if (!kvm_vcpu_has_pmu(vcpu)) @@ -761,20 +659,19 @@ void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data, mask &= ~ARMV8_PMU_EVTYPE_EVENT; mask |= kvm_pmu_event_mask(vcpu->kvm); - reg = (select_idx == ARMV8_PMU_CYCLE_IDX) - ? PMCCFILTR_EL0 : PMEVTYPER0_EL0 + select_idx; + reg = counter_index_to_evtreg(pmc->idx); __vcpu_sys_reg(vcpu, reg) = data & mask; - kvm_pmu_update_pmc_chained(vcpu, select_idx); - kvm_pmu_create_perf_event(vcpu, select_idx); + kvm_pmu_create_perf_event(pmc); } void kvm_host_pmu_init(struct arm_pmu *pmu) { struct arm_pmu_entry *entry; - if (pmu->pmuver == 0 || pmu->pmuver == ID_AA64DFR0_EL1_PMUVer_IMP_DEF) + if (pmu->pmuver == ID_AA64DFR0_EL1_PMUVer_NI || + pmu->pmuver == ID_AA64DFR0_EL1_PMUVer_IMP_DEF) return; mutex_lock(&arm_pmus_lock); @@ -827,7 +724,7 @@ static struct arm_pmu *kvm_pmu_probe_armpmu(void) if (event->pmu) { pmu = to_arm_pmu(event->pmu); - if (pmu->pmuver == 0 || + if (pmu->pmuver == ID_AA64DFR0_EL1_PMUVer_NI || pmu->pmuver == ID_AA64DFR0_EL1_PMUVer_IMP_DEF) pmu = NULL; } @@ -849,6 +746,8 @@ u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1) if (!pmceid1) { val = read_sysreg(pmceid0_el0); + /* always support CHAIN */ + val |= BIT(ARMV8_PMUV3_PERFCTR_CHAIN); base = 0; } else { val = read_sysreg(pmceid1_el0); @@ -1150,3 +1049,14 @@ int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr) return -ENXIO; } + +u8 kvm_arm_pmu_get_pmuver_limit(void) +{ + u64 tmp; + + tmp = read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1); + tmp = cpuid_feature_cap_perfmon_field(tmp, + ID_AA64DFR0_EL1_PMUVer_SHIFT, + ID_AA64DFR0_EL1_PMUVer_V3P5); + return FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer), tmp); +} diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c index 5ae18472205a9f34b73eb087b1ba383edee46ad0..e0267f672b8abec1e72bdf80c7d1abb37518ad33 100644 --- a/arch/arm64/kvm/reset.c +++ b/arch/arm64/kvm/reset.c @@ -395,32 +395,3 @@ int kvm_set_ipa_limit(void) return 0; } - -int kvm_arm_setup_stage2(struct kvm *kvm, unsigned long type) -{ - u64 mmfr0, mmfr1; - u32 phys_shift; - - if (type & ~KVM_VM_TYPE_ARM_IPA_SIZE_MASK) - return -EINVAL; - - phys_shift = KVM_VM_TYPE_ARM_IPA_SIZE(type); - if (phys_shift) { - if (phys_shift > kvm_ipa_limit || - phys_shift < ARM64_MIN_PARANGE_BITS) - return -EINVAL; - } else { - phys_shift = KVM_PHYS_SHIFT; - if (phys_shift > kvm_ipa_limit) { - pr_warn_once("%s using unsupported default IPA limit, upgrade your VMM\n", - current->comm); - return -EINVAL; - } - } - - mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1); - mmfr1 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1); - kvm->arch.vtcr = kvm_get_vtcr(mmfr0, mmfr1, phys_shift); - - return 0; -} diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 608e4f25161d3c162feb13bc49abcbbea81fc6b1..d5ee52d6bf7326f9254298631ef2fc0824c0e176 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -639,22 +639,18 @@ static void reset_pmselr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) { - u64 pmcr, val; + u64 pmcr; /* No PMU available, PMCR_EL0 may UNDEF... */ if (!kvm_arm_support_pmu_v3()) return; - pmcr = read_sysreg(pmcr_el0); - /* - * Writable bits of PMCR_EL0 (ARMV8_PMU_PMCR_MASK) are reset to UNKNOWN - * except PMCR.E resetting to zero. - */ - val = ((pmcr & ~ARMV8_PMU_PMCR_MASK) - | (ARMV8_PMU_PMCR_MASK & 0xdecafbad)) & (~ARMV8_PMU_PMCR_E); + /* Only preserve PMCR_EL0.N, and reset the rest to 0 */ + pmcr = read_sysreg(pmcr_el0) & ARMV8_PMU_PMCR_N_MASK; if (!kvm_supports_32bit_el0()) - val |= ARMV8_PMU_PMCR_LC; - __vcpu_sys_reg(vcpu, r->reg) = val; + pmcr |= ARMV8_PMU_PMCR_LC; + + __vcpu_sys_reg(vcpu, r->reg) = pmcr; } static bool check_pmu_access_disabled(struct kvm_vcpu *vcpu, u64 flags) @@ -697,13 +693,15 @@ static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, return false; if (p->is_write) { - /* Only update writeable bits of PMCR */ + /* + * Only update writeable bits of PMCR (continuing into + * kvm_pmu_handle_pmcr() as well) + */ val = __vcpu_sys_reg(vcpu, PMCR_EL0); val &= ~ARMV8_PMU_PMCR_MASK; val |= p->regval & ARMV8_PMU_PMCR_MASK; if (!kvm_supports_32bit_el0()) val |= ARMV8_PMU_PMCR_LC; - __vcpu_sys_reg(vcpu, PMCR_EL0) = val; kvm_pmu_handle_pmcr(vcpu, val); kvm_vcpu_pmu_restore_guest(vcpu); } else { @@ -1062,6 +1060,40 @@ static bool access_arch_timer(struct kvm_vcpu *vcpu, return true; } +static u8 vcpu_pmuver(const struct kvm_vcpu *vcpu) +{ + if (kvm_vcpu_has_pmu(vcpu)) + return vcpu->kvm->arch.dfr0_pmuver.imp; + + return vcpu->kvm->arch.dfr0_pmuver.unimp; +} + +static u8 perfmon_to_pmuver(u8 perfmon) +{ + switch (perfmon) { + case ID_DFR0_EL1_PerfMon_PMUv3: + return ID_AA64DFR0_EL1_PMUVer_IMP; + case ID_DFR0_EL1_PerfMon_IMPDEF: + return ID_AA64DFR0_EL1_PMUVer_IMP_DEF; + default: + /* Anything ARMv8.1+ and NI have the same value. For now. */ + return perfmon; + } +} + +static u8 pmuver_to_perfmon(u8 pmuver) +{ + switch (pmuver) { + case ID_AA64DFR0_EL1_PMUVer_IMP: + return ID_DFR0_EL1_PerfMon_PMUv3; + case ID_AA64DFR0_EL1_PMUVer_IMP_DEF: + return ID_DFR0_EL1_PerfMon_IMPDEF; + default: + /* Anything ARMv8.1+ and NI have the same value. For now. */ + return pmuver; + } +} + /* Read a sanitised cpufeature ID register by sys_reg_desc */ static u64 read_id_reg(const struct kvm_vcpu *vcpu, struct sys_reg_desc const *r) { @@ -1111,18 +1143,17 @@ static u64 read_id_reg(const struct kvm_vcpu *vcpu, struct sys_reg_desc const *r /* Limit debug to ARMv8.0 */ val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_DebugVer); val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_DebugVer), 6); - /* Limit guests to PMUv3 for ARMv8.4 */ - val = cpuid_feature_cap_perfmon_field(val, - ID_AA64DFR0_EL1_PMUVer_SHIFT, - kvm_vcpu_has_pmu(vcpu) ? ID_AA64DFR0_EL1_PMUVer_V3P4 : 0); + /* Set PMUver to the required version */ + val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer); + val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer), + vcpu_pmuver(vcpu)); /* Hide SPE from guests */ val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMSVer); break; case SYS_ID_DFR0_EL1: - /* Limit guests to PMUv3 for ARMv8.4 */ - val = cpuid_feature_cap_perfmon_field(val, - ID_DFR0_EL1_PerfMon_SHIFT, - kvm_vcpu_has_pmu(vcpu) ? ID_DFR0_EL1_PerfMon_PMUv3p4 : 0); + val &= ~ARM64_FEATURE_MASK(ID_DFR0_EL1_PerfMon); + val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_DFR0_EL1_PerfMon), + pmuver_to_perfmon(vcpu_pmuver(vcpu))); break; } @@ -1222,6 +1253,85 @@ static int set_id_aa64pfr0_el1(struct kvm_vcpu *vcpu, return 0; } +static int set_id_aa64dfr0_el1(struct kvm_vcpu *vcpu, + const struct sys_reg_desc *rd, + u64 val) +{ + u8 pmuver, host_pmuver; + bool valid_pmu; + + host_pmuver = kvm_arm_pmu_get_pmuver_limit(); + + /* + * Allow AA64DFR0_EL1.PMUver to be set from userspace as long + * as it doesn't promise more than what the HW gives us. We + * allow an IMPDEF PMU though, only if no PMU is supported + * (KVM backward compatibility handling). + */ + pmuver = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer), val); + if ((pmuver != ID_AA64DFR0_EL1_PMUVer_IMP_DEF && pmuver > host_pmuver)) + return -EINVAL; + + valid_pmu = (pmuver != 0 && pmuver != ID_AA64DFR0_EL1_PMUVer_IMP_DEF); + + /* Make sure view register and PMU support do match */ + if (kvm_vcpu_has_pmu(vcpu) != valid_pmu) + return -EINVAL; + + /* We can only differ with PMUver, and anything else is an error */ + val ^= read_id_reg(vcpu, rd); + val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer); + if (val) + return -EINVAL; + + if (valid_pmu) + vcpu->kvm->arch.dfr0_pmuver.imp = pmuver; + else + vcpu->kvm->arch.dfr0_pmuver.unimp = pmuver; + + return 0; +} + +static int set_id_dfr0_el1(struct kvm_vcpu *vcpu, + const struct sys_reg_desc *rd, + u64 val) +{ + u8 perfmon, host_perfmon; + bool valid_pmu; + + host_perfmon = pmuver_to_perfmon(kvm_arm_pmu_get_pmuver_limit()); + + /* + * Allow DFR0_EL1.PerfMon to be set from userspace as long as + * it doesn't promise more than what the HW gives us on the + * AArch64 side (as everything is emulated with that), and + * that this is a PMUv3. + */ + perfmon = FIELD_GET(ARM64_FEATURE_MASK(ID_DFR0_EL1_PerfMon), val); + if ((perfmon != ID_DFR0_EL1_PerfMon_IMPDEF && perfmon > host_perfmon) || + (perfmon != 0 && perfmon < ID_DFR0_EL1_PerfMon_PMUv3)) + return -EINVAL; + + valid_pmu = (perfmon != 0 && perfmon != ID_DFR0_EL1_PerfMon_IMPDEF); + + /* Make sure view register and PMU support do match */ + if (kvm_vcpu_has_pmu(vcpu) != valid_pmu) + return -EINVAL; + + /* We can only differ with PerfMon, and anything else is an error */ + val ^= read_id_reg(vcpu, rd); + val &= ~ARM64_FEATURE_MASK(ID_DFR0_EL1_PerfMon); + if (val) + return -EINVAL; + + if (valid_pmu) + vcpu->kvm->arch.dfr0_pmuver.imp = perfmon_to_pmuver(perfmon); + else + vcpu->kvm->arch.dfr0_pmuver.unimp = perfmon_to_pmuver(perfmon); + + return 0; +} + /* * cpufeature ID register user accessors * @@ -1443,7 +1553,9 @@ static const struct sys_reg_desc sys_reg_descs[] = { /* CRm=1 */ AA32_ID_SANITISED(ID_PFR0_EL1), AA32_ID_SANITISED(ID_PFR1_EL1), - AA32_ID_SANITISED(ID_DFR0_EL1), + { SYS_DESC(SYS_ID_DFR0_EL1), .access = access_id_reg, + .get_user = get_id_reg, .set_user = set_id_dfr0_el1, + .visibility = aa32_id_visibility, }, ID_HIDDEN(ID_AFR0_EL1), AA32_ID_SANITISED(ID_MMFR0_EL1), AA32_ID_SANITISED(ID_MMFR1_EL1), @@ -1483,7 +1595,8 @@ static const struct sys_reg_desc sys_reg_descs[] = { ID_UNALLOCATED(4,7), /* CRm=5 */ - ID_SANITISED(ID_AA64DFR0_EL1), + { SYS_DESC(SYS_ID_AA64DFR0_EL1), .access = access_id_reg, + .get_user = get_id_reg, .set_user = set_id_aa64dfr0_el1, }, ID_SANITISED(ID_AA64DFR1_EL1), ID_UNALLOCATED(5,2), ID_UNALLOCATED(5,3), diff --git a/arch/arm64/kvm/vgic/vgic-its.c b/arch/arm64/kvm/vgic/vgic-its.c index 733b53055f9760fe01d22c3d28006c3a7cefae65..94a666dd1443d1310aadf35a1267e26d04709eca 100644 --- a/arch/arm64/kvm/vgic/vgic-its.c +++ b/arch/arm64/kvm/vgic/vgic-its.c @@ -2743,6 +2743,7 @@ static int vgic_its_has_attr(struct kvm_device *dev, static int vgic_its_ctrl(struct kvm *kvm, struct vgic_its *its, u64 attr) { const struct vgic_its_abi *abi = vgic_its_get_abi(its); + struct vgic_dist *dist = &kvm->arch.vgic; int ret = 0; if (attr == KVM_DEV_ARM_VGIC_CTRL_INIT) /* Nothing to do */ @@ -2762,7 +2763,9 @@ static int vgic_its_ctrl(struct kvm *kvm, struct vgic_its *its, u64 attr) vgic_its_reset(kvm, its); break; case KVM_DEV_ARM_ITS_SAVE_TABLES: + dist->save_its_tables_in_progress = true; ret = abi->save_tables(its); + dist->save_its_tables_in_progress = false; break; case KVM_DEV_ARM_ITS_RESTORE_TABLES: ret = abi->restore_tables(its); @@ -2775,6 +2778,23 @@ static int vgic_its_ctrl(struct kvm *kvm, struct vgic_its *its, u64 attr) return ret; } +/* + * kvm_arch_allow_write_without_running_vcpu - allow writing guest memory + * without the running VCPU when dirty ring is enabled. + * + * The running VCPU is required to track dirty guest pages when dirty ring + * is enabled. Otherwise, the backup bitmap should be used to track the + * dirty guest pages. When vgic/its tables are being saved, the backup + * bitmap is used to track the dirty guest pages due to the missed running + * VCPU in the period. + */ +bool kvm_arch_allow_write_without_running_vcpu(struct kvm *kvm) +{ + struct vgic_dist *dist = &kvm->arch.vgic; + + return dist->save_its_tables_in_progress; +} + static int vgic_its_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr) { diff --git a/arch/arm64/mm/copypage.c b/arch/arm64/mm/copypage.c index 24913271e898c121a9083610144ec640804f02c0..8dd5a8fe64b4f6688889ace395aedc83a735ae9f 100644 --- a/arch/arm64/mm/copypage.c +++ b/arch/arm64/mm/copypage.c @@ -21,9 +21,12 @@ void copy_highpage(struct page *to, struct page *from) copy_page(kto, kfrom); - if (system_supports_mte() && test_bit(PG_mte_tagged, &from->flags)) { - set_bit(PG_mte_tagged, &to->flags); + if (system_supports_mte() && page_mte_tagged(from)) { + page_kasan_tag_reset(to); + /* It's a new page, shouldn't have been tagged yet */ + WARN_ON_ONCE(!try_page_mte_tagging(to)); mte_copy_page_tags(kto, kfrom); + set_page_mte_tagged(to); } } EXPORT_SYMBOL(copy_highpage); diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index 3eb2825d08cffc7335fd64e643fbed105541a424..596f46dabe4ef2f16f5d10b05e5f9983878961bb 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -943,6 +943,8 @@ struct page *alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma, void tag_clear_highpage(struct page *page) { + /* Newly allocated page, shouldn't have been tagged yet */ + WARN_ON_ONCE(!try_page_mte_tagging(page)); mte_zero_clear_page_tags(page_address(page)); - set_bit(PG_mte_tagged, &page->flags); + set_page_mte_tagged(page); } diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c index cd8d96e1fa1aec693cb6b3929aee6408ffdff3b4..35e9a468d13e6ac68093c7516350815df5b009b5 100644 --- a/arch/arm64/mm/hugetlbpage.c +++ b/arch/arm64/mm/hugetlbpage.c @@ -559,24 +559,3 @@ bool __init arch_hugetlb_valid_size(unsigned long size) { return __hugetlb_valid_size(size); } - -pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) -{ - if (IS_ENABLED(CONFIG_ARM64_WORKAROUND_2645198) && - cpus_have_const_cap(ARM64_WORKAROUND_2645198)) { - /* - * Break-before-make (BBM) is required for all user space mappings - * when the permission changes from executable to non-executable - * in cases where cpu is affected with errata #2645198. - */ - if (pte_user_exec(READ_ONCE(*ptep))) - return huge_ptep_clear_flush(vma, addr, ptep); - } - return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep); -} - -void huge_ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep, - pte_t old_pte, pte_t pte) -{ - set_huge_pte_at(vma->vm_mm, addr, ptep, pte); -} diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 2368e4daa23d3e6b82ee1750e038d800f27acc51..14c87e8d69d8391c995616a749c665cabf4aaddf 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -814,53 +814,6 @@ void __init paging_init(void) create_idmap(); } -/* - * Check whether a kernel address is valid (derived from arch/x86/). - */ -int kern_addr_valid(unsigned long addr) -{ - pgd_t *pgdp; - p4d_t *p4dp; - pud_t *pudp, pud; - pmd_t *pmdp, pmd; - pte_t *ptep, pte; - - addr = arch_kasan_reset_tag(addr); - if ((((long)addr) >> VA_BITS) != -1UL) - return 0; - - pgdp = pgd_offset_k(addr); - if (pgd_none(READ_ONCE(*pgdp))) - return 0; - - p4dp = p4d_offset(pgdp, addr); - if (p4d_none(READ_ONCE(*p4dp))) - return 0; - - pudp = pud_offset(p4dp, addr); - pud = READ_ONCE(*pudp); - if (pud_none(pud)) - return 0; - - if (pud_sect(pud)) - return pfn_valid(pud_pfn(pud)); - - pmdp = pmd_offset(pudp, addr); - pmd = READ_ONCE(*pmdp); - if (pmd_none(pmd)) - return 0; - - if (pmd_sect(pmd)) - return pfn_valid(pmd_pfn(pmd)); - - ptep = pte_offset_kernel(pmdp, addr); - pte = READ_ONCE(*ptep); - if (pte_none(pte)) - return 0; - - return pfn_valid(pte_pfn(pte)); -} - #ifdef CONFIG_MEMORY_HOTPLUG static void free_hotplug_page_range(struct page *page, size_t size, struct vmem_altmap *altmap) @@ -1184,53 +1137,28 @@ static void free_empty_tables(unsigned long addr, unsigned long end, } #endif +void __meminit vmemmap_set_pmd(pmd_t *pmdp, void *p, int node, + unsigned long addr, unsigned long next) +{ + pmd_set_huge(pmdp, __pa(p), __pgprot(PROT_SECT_NORMAL)); +} + +int __meminit vmemmap_check_pmd(pmd_t *pmdp, int node, + unsigned long addr, unsigned long next) +{ + vmemmap_verify((pte_t *)pmdp, node, addr, next); + return 1; +} + int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, struct vmem_altmap *altmap) { - unsigned long addr = start; - unsigned long next; - pgd_t *pgdp; - p4d_t *p4dp; - pud_t *pudp; - pmd_t *pmdp; - WARN_ON((start < VMEMMAP_START) || (end > VMEMMAP_END)); if (!IS_ENABLED(CONFIG_ARM64_4K_PAGES)) return vmemmap_populate_basepages(start, end, node, altmap); - - do { - next = pmd_addr_end(addr, end); - - pgdp = vmemmap_pgd_populate(addr, node); - if (!pgdp) - return -ENOMEM; - - p4dp = vmemmap_p4d_populate(pgdp, addr, node); - if (!p4dp) - return -ENOMEM; - - pudp = vmemmap_pud_populate(p4dp, addr, node); - if (!pudp) - return -ENOMEM; - - pmdp = pmd_offset(pudp, addr); - if (pmd_none(READ_ONCE(*pmdp))) { - void *p = NULL; - - p = vmemmap_alloc_block_buf(PMD_SIZE, node, altmap); - if (!p) { - if (vmemmap_populate_basepages(addr, next, node, altmap)) - return -ENOMEM; - continue; - } - - pmd_set_huge(pmdp, __pa(p), __pgprot(PROT_SECT_NORMAL)); - } else - vmemmap_verify((pte_t *)pmdp, node, addr, next); - } while (addr = next, addr != end); - - return 0; + else + return vmemmap_populate_hugepages(start, end, node, altmap); } #ifdef CONFIG_MEMORY_HOTPLUG @@ -1702,24 +1630,3 @@ static int __init prevent_bootmem_remove_init(void) } early_initcall(prevent_bootmem_remove_init); #endif - -pte_t ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) -{ - if (IS_ENABLED(CONFIG_ARM64_WORKAROUND_2645198) && - cpus_have_const_cap(ARM64_WORKAROUND_2645198)) { - /* - * Break-before-make (BBM) is required for all user space mappings - * when the permission changes from executable to non-executable - * in cases where cpu is affected with errata #2645198. - */ - if (pte_user_exec(READ_ONCE(*ptep))) - return ptep_clear_flush(vma, addr, ptep); - } - return ptep_get_and_clear(vma->vm_mm, addr, ptep); -} - -void ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep, - pte_t old_pte, pte_t pte) -{ - set_pte_at(vma->vm_mm, addr, ptep, pte); -} diff --git a/arch/arm64/mm/mteswap.c b/arch/arm64/mm/mteswap.c index bed803d8e15856b56468fcbc6b271f82b4466261..cd508ba80ab1ba889fdc29647297c6707418e531 100644 --- a/arch/arm64/mm/mteswap.c +++ b/arch/arm64/mm/mteswap.c @@ -24,7 +24,7 @@ int mte_save_tags(struct page *page) { void *tag_storage, *ret; - if (!test_bit(PG_mte_tagged, &page->flags)) + if (!page_mte_tagged(page)) return 0; tag_storage = mte_allocate_tag_storage(); @@ -46,21 +46,17 @@ int mte_save_tags(struct page *page) return 0; } -bool mte_restore_tags(swp_entry_t entry, struct page *page) +void mte_restore_tags(swp_entry_t entry, struct page *page) { void *tags = xa_load(&mte_pages, entry.val); if (!tags) - return false; + return; - /* - * Test PG_mte_tagged again in case it was racing with another - * set_pte_at(). - */ - if (!test_and_set_bit(PG_mte_tagged, &page->flags)) + if (try_page_mte_tagging(page)) { mte_restore_page_tags(page_address(page), tags); - - return true; + set_page_mte_tagged(page); + } } void mte_invalidate_tags(int type, pgoff_t offset) diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c index 5922178d7a064c1c98af43ad97d72fa4a6b8d79b..79dd201c59d8b32e0652476d6961027b90bcd42c 100644 --- a/arch/arm64/mm/pageattr.c +++ b/arch/arm64/mm/pageattr.c @@ -202,8 +202,7 @@ void __kernel_map_pages(struct page *page, int numpages, int enable) /* * This function is used to determine if a linear map page has been marked as - * not-valid. Walk the page table and check the PTE_VALID bit. This is based - * on kern_addr_valid(), which almost does what we need. + * not-valid. Walk the page table and check the PTE_VALID bit. * * Because this is only called on the kernel linear map, p?d_sect() implies * p?d_present(). When debug_pagealloc is enabled, sections mappings are diff --git a/arch/arm64/tools/cpucaps b/arch/arm64/tools/cpucaps index dfeb2c51e2573dc07d58be19e58cb89d568051ab..a86ee376920a08ddfdb7f116336a90ed13e88629 100644 --- a/arch/arm64/tools/cpucaps +++ b/arch/arm64/tools/cpucaps @@ -71,7 +71,6 @@ WORKAROUND_2038923 WORKAROUND_2064142 WORKAROUND_2077057 WORKAROUND_2457168 -WORKAROUND_2645198 WORKAROUND_2658417 WORKAROUND_TRBE_OVERWRITE_FILL_MODE WORKAROUND_TSB_FLUSH_FAILURE diff --git a/arch/csky/Kconfig b/arch/csky/Kconfig index adee6ab36862e6a1a2a92ff68bfd93f57d0fd14c..dba02da6fa344ca5dbeeab2bd106d06fd3332136 100644 --- a/arch/csky/Kconfig +++ b/arch/csky/Kconfig @@ -9,6 +9,7 @@ config CSKY select ARCH_USE_BUILTIN_BSWAP select ARCH_USE_QUEUED_RWLOCKS select ARCH_USE_QUEUED_SPINLOCKS + select ARCH_HAS_CURRENT_STACK_POINTER select ARCH_INLINE_READ_LOCK if !PREEMPTION select ARCH_INLINE_READ_LOCK_BH if !PREEMPTION select ARCH_INLINE_READ_LOCK_IRQ if !PREEMPTION @@ -93,7 +94,6 @@ config CSKY select HAVE_PERF_USER_STACK_DUMP select HAVE_DMA_CONTIGUOUS select HAVE_REGS_AND_STACK_ACCESS_API - select HAVE_RSEQ select HAVE_STACKPROTECTOR select HAVE_SYSCALL_TRACEPOINTS select MAY_HAVE_SPARSE_IRQ @@ -269,7 +269,7 @@ menuconfig HAVE_TCM bool "Tightly-Coupled/Sram Memory" depends on !COMPILE_TEST help - The implementation are not only used by TCM (Tightly-Coupled Meory) + The implementation are not only used by TCM (Tightly-Coupled Memory) but also used by sram on SOC bus. It follow existed linux tcm software interface, so that old tcm application codes could be re-used directly. diff --git a/arch/csky/include/asm/pgtable.h b/arch/csky/include/asm/pgtable.h index c3d9b92cbe61c81d030eda9cfe1d968fe10d4050..77bc6caff2d23875e0474940db012ad0fb9d5738 100644 --- a/arch/csky/include/asm/pgtable.h +++ b/arch/csky/include/asm/pgtable.h @@ -249,9 +249,6 @@ extern void paging_init(void); void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *pte); -/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */ -#define kern_addr_valid(addr) (1) - #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ remap_pfn_range(vma, vaddr, pfn, size, prot) diff --git a/arch/csky/include/asm/processor.h b/arch/csky/include/asm/processor.h index 63ad71fab30d7bbe5af9a42b7581e3c486554a41..ea75d72dea86966232dd4b04ec4fa4ea419ef784 100644 --- a/arch/csky/include/asm/processor.h +++ b/arch/csky/include/asm/processor.h @@ -84,4 +84,6 @@ unsigned long __get_wchan(struct task_struct *p); #define cpu_relax() barrier() +register unsigned long current_stack_pointer __asm__("sp"); + #endif /* __ASM_CSKY_PROCESSOR_H */ diff --git a/arch/csky/kernel/entry.S b/arch/csky/kernel/entry.S index 547b4cd1b24b4d67d34d2b2c72c027e981049f82..c68cdcc76d60e4ace1a16b1d932147420619fcbd 100644 --- a/arch/csky/kernel/entry.S +++ b/arch/csky/kernel/entry.S @@ -54,7 +54,7 @@ ENTRY(csky_systemcall) lrw r9, __NR_syscalls cmphs syscallid, r9 /* Check nr of syscall */ - bt 1f + bt ret_from_exception lrw r9, sys_call_table ixw r9, syscallid @@ -80,11 +80,6 @@ ENTRY(csky_systemcall) jsr syscallid #endif stw a0, (sp, LSAVE_A0) /* Save return value */ -1: -#ifdef CONFIG_DEBUG_RSEQ - mov a0, sp - jbsr rseq_syscall -#endif jmpi ret_from_exception csky_syscall_trace: @@ -113,10 +108,6 @@ csky_syscall_trace: stw a0, (sp, LSAVE_A0) /* Save return value */ 1: -#ifdef CONFIG_DEBUG_RSEQ - mov a0, sp - jbsr rseq_syscall -#endif mov a0, sp /* right now, sp --> pt_regs */ jbsr syscall_trace_exit br ret_from_exception diff --git a/arch/csky/kernel/signal.c b/arch/csky/kernel/signal.c index b7b3685283d763ca9fb9bb1ae1c7358f1f895630..10da0fefd4319171452b3926fd172da53613ff68 100644 --- a/arch/csky/kernel/signal.c +++ b/arch/csky/kernel/signal.c @@ -179,8 +179,6 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs) sigset_t *oldset = sigmask_to_save(); int ret; - rseq_signal_deliver(ksig, regs); - /* Are we from a system call? */ if (in_syscall(regs)) { /* Avoid additional syscall restarting via ret_from_exception */ diff --git a/arch/csky/kernel/stacktrace.c b/arch/csky/kernel/stacktrace.c index 9f78f5d2151172460d3effa09358b9a814a903df..27ecd63e321bba5e8d62f837e40e73e596441856 100644 --- a/arch/csky/kernel/stacktrace.c +++ b/arch/csky/kernel/stacktrace.c @@ -23,10 +23,9 @@ void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs, sp = user_stack_pointer(regs); pc = instruction_pointer(regs); } else if (task == NULL || task == current) { - const register unsigned long current_sp __asm__ ("sp"); const register unsigned long current_fp __asm__ ("r8"); fp = current_fp; - sp = current_sp; + sp = current_stack_pointer; pc = (unsigned long)walk_stackframe; } else { /* task blocked in __switch_to */ @@ -68,8 +67,7 @@ static void notrace walk_stackframe(struct task_struct *task, sp = user_stack_pointer(regs); pc = instruction_pointer(regs); } else if (task == NULL || task == current) { - const register unsigned long current_sp __asm__ ("sp"); - sp = current_sp; + sp = current_stack_pointer; pc = (unsigned long)walk_stackframe; } else { /* task blocked in __switch_to */ diff --git a/arch/hexagon/include/asm/page.h b/arch/hexagon/include/asm/page.h index 7cbf719c578ec455fa3243d7193718dd2462f150..d7d4f9fca32795ef0d921c93c7bda4d4fe732e75 100644 --- a/arch/hexagon/include/asm/page.h +++ b/arch/hexagon/include/asm/page.h @@ -131,13 +131,6 @@ static inline void clear_page(void *page) #define page_to_virt(page) __va(page_to_phys(page)) -/* - * For port to Hexagon Virtual Machine, MAYBE we check for attempts - * to reference reserved HVM space, but in any case, the VM will be - * protected. - */ -#define kern_addr_valid(addr) (1) - #include #include /* XXX Todo: implement assembly-optimized version of getorder. */ diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index c6e06cdc738f0bb0696c8451a64ec8c64f7835d2..d7e4a24e8644cdecb03d96228871e8e294a2004e 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig @@ -63,6 +63,7 @@ config IA64 select NUMA if !FLATMEM select PCI_MSI_ARCH_FALLBACKS if PCI_MSI select ZONE_DMA32 + select FUNCTION_ALIGNMENT_32B default y help The Itanium Processor Family is Intel's 64-bit successor to diff --git a/arch/ia64/Makefile b/arch/ia64/Makefile index 56c4bb276b6eda0e19c91fca6b2fd216be0e7467..d553ab7022fe4088459996580bbc0f41de05f2bd 100644 --- a/arch/ia64/Makefile +++ b/arch/ia64/Makefile @@ -23,7 +23,7 @@ KBUILD_AFLAGS_KERNEL := -mconstant-gp EXTRA := cflags-y := -pipe $(EXTRA) -ffixed-r13 -mfixed-range=f12-f15,f32-f127 \ - -falign-functions=32 -frename-registers -fno-optimize-sibling-calls + -frename-registers -fno-optimize-sibling-calls KBUILD_CFLAGS_KERNEL := -mconstant-gp GAS_STATUS = $(shell $(srctree)/arch/ia64/scripts/check-gas "$(CC)" "$(OBJDUMP)") diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h index 6925e28ae61d192ff6bfa102c9427daf5f78ce49..01517a5e677891b2a0fbaf0f454248c31ddc0f23 100644 --- a/arch/ia64/include/asm/pgtable.h +++ b/arch/ia64/include/asm/pgtable.h @@ -181,22 +181,6 @@ ia64_phys_addr_valid (unsigned long addr) return (addr & (local_cpu_data->unimpl_pa_mask)) == 0; } -/* - * kern_addr_valid(ADDR) tests if ADDR is pointing to valid kernel - * memory. For the return value to be meaningful, ADDR must be >= - * PAGE_OFFSET. This operation can be relatively expensive (e.g., - * require a hash-, or multi-level tree-lookup or something of that - * sort) but it guarantees to return TRUE only if accessing the page - * at that address does not cause an error. Note that there may be - * addresses for which kern_addr_valid() returns FALSE even though an - * access would not cause an error (e.g., this is typically true for - * memory mapped I/O regions. - * - * XXX Need to implement this for IA-64. - */ -#define kern_addr_valid(addr) (1) - - /* * Now come the defines and routines to manage and access the three-level * page table. diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c index f993cb36c06266de996d6b5e15787c326a7d25aa..380d2f3966c9862570c5d7707022bac67b81e865 100644 --- a/arch/ia64/mm/hugetlbpage.c +++ b/arch/ia64/mm/hugetlbpage.c @@ -91,21 +91,6 @@ int prepare_hugepage_range(struct file *file, return 0; } -struct page *follow_huge_addr(struct mm_struct *mm, unsigned long addr, int write) -{ - struct page *page; - pte_t *ptep; - - if (REGION_NUMBER(addr) != RGN_HPAGE) - return ERR_PTR(-EINVAL); - - ptep = huge_pte_offset(mm, addr, HPAGE_SIZE); - if (!ptep || pte_none(*ptep)) - return NULL; - page = pte_page(*ptep); - page += ((addr & ~HPAGE_MASK) >> PAGE_SHIFT); - return page; -} int pmd_huge(pmd_t pmd) { return 0; diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig index 386adde2feffb31f024d4014311baf519dca03bd..9cc8b84f7eb03d856900f2fb42f412d25ec5b0d5 100644 --- a/arch/loongarch/Kconfig +++ b/arch/loongarch/Kconfig @@ -53,10 +53,12 @@ config LOONGARCH select ARCH_USE_QUEUED_RWLOCKS select ARCH_USE_QUEUED_SPINLOCKS select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT + select ARCH_WANT_HUGETLB_PAGE_OPTIMIZE_VMEMMAP select ARCH_WANT_LD_ORPHAN_WARN select ARCH_WANTS_NO_INSTR select BUILDTIME_TABLE_SORT select COMMON_CLK + select CPU_PM select EFI select GENERIC_CLOCKEVENTS select GENERIC_CMOS_UPDATE @@ -85,11 +87,18 @@ config LOONGARCH select HAVE_ARCH_TRANSPARENT_HUGEPAGE select HAVE_ASM_MODVERSIONS select HAVE_CONTEXT_TRACKING_USER + select HAVE_C_RECORDMCOUNT select HAVE_DEBUG_STACKOVERFLOW select HAVE_DMA_CONTIGUOUS + select HAVE_DYNAMIC_FTRACE + select HAVE_DYNAMIC_FTRACE_WITH_ARGS + select HAVE_DYNAMIC_FTRACE_WITH_REGS select HAVE_EBPF_JIT select HAVE_EXIT_THREAD select HAVE_FAST_GUP + select HAVE_FTRACE_MCOUNT_RECORD + select HAVE_FUNCTION_GRAPH_TRACER + select HAVE_FUNCTION_TRACER select HAVE_GENERIC_VDSO select HAVE_IOREMAP_PROT select HAVE_IRQ_EXIT_ON_IRQ_STACK @@ -103,6 +112,7 @@ config LOONGARCH select HAVE_REGS_AND_STACK_ACCESS_API select HAVE_RSEQ select HAVE_SETUP_PER_CPU_AREA if NUMA + select HAVE_STACKPROTECTOR select HAVE_SYSCALL_TRACEPOINTS select HAVE_TIF_NOHZ select HAVE_VIRT_CPU_ACCOUNTING_GEN if !SMP @@ -112,6 +122,8 @@ config LOONGARCH select MODULES_USE_ELF_RELA if MODULES select NEED_PER_CPU_EMBED_FIRST_CHUNK select NEED_PER_CPU_PAGE_FIRST_CHUNK + select OF + select OF_EARLY_FLATTREE select PCI select PCI_DOMAINS_GENERIC select PCI_ECAM if ACPI @@ -122,6 +134,8 @@ config LOONGARCH select RTC_LIB select SMP select SPARSE_IRQ + select SYSCTL_ARCH_UNALIGN_ALLOW + select SYSCTL_ARCH_UNALIGN_NO_WARN select SYSCTL_EXCEPTION_TRACE select SWIOTLB select TRACE_IRQFLAGS_SUPPORT @@ -488,6 +502,7 @@ config ARCH_FLATMEM_ENABLE config ARCH_SPARSEMEM_ENABLE def_bool y + select SPARSEMEM_VMEMMAP_ENABLE help Say Y to support efficient handling of sparse physical memory, for architectures which are either NUMA (Non-Uniform Memory Access) @@ -514,6 +529,13 @@ config ARCH_MMAP_RND_BITS_MAX menu "Power management options" +config ARCH_SUSPEND_POSSIBLE + def_bool y + +config ARCH_HIBERNATION_POSSIBLE + def_bool y + +source "kernel/power/Kconfig" source "drivers/acpi/Kconfig" endmenu diff --git a/arch/loongarch/Makefile b/arch/loongarch/Makefile index 01b57b7263225d195fddf7cfc4d47a84967d6901..4402387d27551a3601c7a5ea49b6bf42f1377376 100644 --- a/arch/loongarch/Makefile +++ b/arch/loongarch/Makefile @@ -25,6 +25,11 @@ endif 32bit-emul = elf32loongarch 64bit-emul = elf64loongarch +ifdef CONFIG_DYNAMIC_FTRACE +KBUILD_CPPFLAGS += -DCC_USING_PATCHABLE_FUNCTION_ENTRY +CC_FLAGS_FTRACE := -fpatchable-function-entry=2 +endif + ifdef CONFIG_64BIT tool-archpref = $(64bit-tool-archpref) UTS_MACHINE := loongarch64 @@ -104,6 +109,9 @@ endif libs-y += arch/loongarch/lib/ libs-$(CONFIG_EFI_STUB) += $(objtree)/drivers/firmware/efi/libstub/lib.a +# suspend and hibernation support +drivers-$(CONFIG_PM) += arch/loongarch/power/ + ifeq ($(KBUILD_EXTMOD),) prepare: vdso_prepare vdso_prepare: prepare0 diff --git a/arch/loongarch/configs/loongson3_defconfig b/arch/loongarch/configs/loongson3_defconfig index 3540e9c0a631066081726c44c3cbebc22618771e..eb84cae642e5874cb8c09f3b06d9b8044bf6c423 100644 --- a/arch/loongarch/configs/loongson3_defconfig +++ b/arch/loongarch/configs/loongson3_defconfig @@ -34,12 +34,13 @@ CONFIG_SYSFS_DEPRECATED=y CONFIG_RELAY=y CONFIG_BLK_DEV_INITRD=y CONFIG_EXPERT=y -CONFIG_USERFAULTFD=y +CONFIG_KALLSYMS_ALL=y CONFIG_PERF_EVENTS=y -# CONFIG_COMPAT_BRK is not set CONFIG_LOONGARCH=y CONFIG_64BIT=y CONFIG_MACH_LOONGSON64=y +CONFIG_PAGE_SIZE_16KB=y +CONFIG_HZ_250=y CONFIG_DMI=y CONFIG_EFI=y CONFIG_SMP=y @@ -47,14 +48,14 @@ CONFIG_HOTPLUG_CPU=y CONFIG_NR_CPUS=64 CONFIG_NUMA=y CONFIG_KEXEC=y -CONFIG_PAGE_SIZE_16KB=y -CONFIG_HZ_250=y +CONFIG_SUSPEND=y +CONFIG_HIBERNATION=y CONFIG_ACPI=y CONFIG_ACPI_SPCR_TABLE=y -CONFIG_ACPI_HOTPLUG_CPU=y CONFIG_ACPI_TAD=y CONFIG_ACPI_DOCK=y CONFIG_ACPI_IPMI=m +CONFIG_ACPI_HOTPLUG_CPU=y CONFIG_ACPI_PCI_SLOT=y CONFIG_ACPI_HOTPLUG_MEMORY=y CONFIG_EFI_ZBOOT=y @@ -73,17 +74,19 @@ CONFIG_UNIXWARE_DISKLABEL=y CONFIG_IOSCHED_BFQ=y CONFIG_BFQ_GROUP_IOSCHED=y CONFIG_BINFMT_MISC=m -CONFIG_MEMORY_HOTPLUG=y -CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE=y -CONFIG_MEMORY_HOTREMOVE=y -CONFIG_KSM=y -CONFIG_TRANSPARENT_HUGEPAGE=y +CONFIG_ZPOOL=y CONFIG_ZSWAP=y CONFIG_ZSWAP_COMPRESSOR_DEFAULT_ZSTD=y -CONFIG_ZPOOL=y CONFIG_ZBUD=y CONFIG_Z3FOLD=y CONFIG_ZSMALLOC=m +# CONFIG_COMPAT_BRK is not set +CONFIG_MEMORY_HOTPLUG=y +CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE=y +CONFIG_MEMORY_HOTREMOVE=y +CONFIG_KSM=y +CONFIG_TRANSPARENT_HUGEPAGE=y +CONFIG_USERFAULTFD=y CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y @@ -118,7 +121,6 @@ CONFIG_NETFILTER=y CONFIG_BRIDGE_NETFILTER=m CONFIG_NETFILTER_NETLINK_LOG=m CONFIG_NF_CONNTRACK=m -CONFIG_NF_LOG_NETDEV=m CONFIG_NF_CONNTRACK_AMANDA=m CONFIG_NF_CONNTRACK_FTP=m CONFIG_NF_CONNTRACK_NETBIOS_NS=m @@ -416,6 +418,7 @@ CONFIG_SCSI_VIRTIO=m CONFIG_ATA=y CONFIG_SATA_AHCI=y CONFIG_SATA_AHCI_PLATFORM=y +CONFIG_AHCI_DWC=y CONFIG_PATA_ATIIXP=y CONFIG_PATA_PCMCIA=m CONFIG_MD=y @@ -469,13 +472,11 @@ CONFIG_VIRTIO_NET=m # CONFIG_NET_VENDOR_ARC is not set # CONFIG_NET_VENDOR_ATHEROS is not set CONFIG_BNX2=y -# CONFIG_NET_VENDOR_BROCADE is not set # CONFIG_NET_VENDOR_CAVIUM is not set CONFIG_CHELSIO_T1=m CONFIG_CHELSIO_T1_1G=y CONFIG_CHELSIO_T3=m CONFIG_CHELSIO_T4=m -# CONFIG_NET_VENDOR_CIRRUS is not set # CONFIG_NET_VENDOR_CISCO is not set # CONFIG_NET_VENDOR_DEC is not set # CONFIG_NET_VENDOR_DLINK is not set @@ -496,6 +497,7 @@ CONFIG_IXGBE=y # CONFIG_NET_VENDOR_NVIDIA is not set # CONFIG_NET_VENDOR_OKI is not set # CONFIG_NET_VENDOR_QLOGIC is not set +# CONFIG_NET_VENDOR_BROCADE is not set # CONFIG_NET_VENDOR_QUALCOMM is not set # CONFIG_NET_VENDOR_RDC is not set CONFIG_8139CP=m @@ -505,9 +507,9 @@ CONFIG_R8169=y # CONFIG_NET_VENDOR_ROCKER is not set # CONFIG_NET_VENDOR_SAMSUNG is not set # CONFIG_NET_VENDOR_SEEQ is not set -# CONFIG_NET_VENDOR_SOLARFLARE is not set # CONFIG_NET_VENDOR_SILAN is not set # CONFIG_NET_VENDOR_SIS is not set +# CONFIG_NET_VENDOR_SOLARFLARE is not set # CONFIG_NET_VENDOR_SMSC is not set CONFIG_STMMAC_ETH=y # CONFIG_NET_VENDOR_SUN is not set @@ -588,6 +590,7 @@ CONFIG_SERIAL_8250_EXTENDED=y CONFIG_SERIAL_8250_MANY_PORTS=y CONFIG_SERIAL_8250_SHARE_IRQ=y CONFIG_SERIAL_8250_RSA=y +CONFIG_SERIAL_OF_PLATFORM=y CONFIG_SERIAL_NONSTANDARD=y CONFIG_PRINTER=m CONFIG_VIRTIO_CONSOLE=y @@ -602,6 +605,11 @@ CONFIG_I2C_GPIO=y CONFIG_SPI=y CONFIG_GPIO_SYSFS=y CONFIG_GPIO_LOONGSON=y +CONFIG_POWER_RESET=y +CONFIG_POWER_RESET_RESTART=y +CONFIG_POWER_RESET_SYSCON=y +CONFIG_POWER_RESET_SYSCON_POWEROFF=y +CONFIG_SYSCON_REBOOT_MODE=y CONFIG_SENSORS_LM75=m CONFIG_SENSORS_LM93=m CONFIG_SENSORS_W83795=m @@ -609,16 +617,16 @@ CONFIG_SENSORS_W83627HF=m CONFIG_RC_CORE=m CONFIG_LIRC=y CONFIG_RC_DECODERS=y +CONFIG_IR_IMON_DECODER=m +CONFIG_IR_JVC_DECODER=m +CONFIG_IR_MCE_KBD_DECODER=m CONFIG_IR_NEC_DECODER=m CONFIG_IR_RC5_DECODER=m CONFIG_IR_RC6_DECODER=m -CONFIG_IR_JVC_DECODER=m -CONFIG_IR_SONY_DECODER=m CONFIG_IR_SANYO_DECODER=m CONFIG_IR_SHARP_DECODER=m -CONFIG_IR_MCE_KBD_DECODER=m +CONFIG_IR_SONY_DECODER=m CONFIG_IR_XMP_DECODER=m -CONFIG_IR_IMON_DECODER=m CONFIG_MEDIA_SUPPORT=m CONFIG_MEDIA_USB_SUPPORT=y CONFIG_USB_VIDEO_CLASS=m @@ -638,6 +646,7 @@ CONFIG_DRM_VIRTIO_GPU=m CONFIG_FB=y CONFIG_FB_EFI=y CONFIG_FB_RADEON=y +CONFIG_LCD_CLASS_DEVICE=y CONFIG_LCD_PLATFORM=m # CONFIG_VGA_CONSOLE is not set CONFIG_FRAMEBUFFER_CONSOLE=y @@ -647,7 +656,6 @@ CONFIG_SOUND=y CONFIG_SND=y CONFIG_SND_SEQUENCER=m CONFIG_SND_SEQ_DUMMY=m -# CONFIG_SND_ISA is not set CONFIG_SND_BT87X=m CONFIG_SND_BT87X_OVERCLOCK=y CONFIG_SND_HDA_INTEL=y @@ -818,10 +826,6 @@ CONFIG_CRYPTO_USER=m # CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set CONFIG_CRYPTO_PCRYPT=m CONFIG_CRYPTO_CRYPTD=m -CONFIG_CRYPTO_CHACHA20POLY1305=m -CONFIG_CRYPTO_HMAC=y -CONFIG_CRYPTO_VMAC=m -CONFIG_CRYPTO_WP512=m CONFIG_CRYPTO_ANUBIS=m CONFIG_CRYPTO_BLOWFISH=m CONFIG_CRYPTO_CAST5=m @@ -831,6 +835,9 @@ CONFIG_CRYPTO_SEED=m CONFIG_CRYPTO_SERPENT=m CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m +CONFIG_CRYPTO_CHACHA20POLY1305=m +CONFIG_CRYPTO_VMAC=m +CONFIG_CRYPTO_WP512=m CONFIG_CRYPTO_DEFLATE=m CONFIG_CRYPTO_LZO=m CONFIG_CRYPTO_842=m @@ -844,6 +851,7 @@ CONFIG_CRYPTO_DEV_VIRTIO=m CONFIG_PRINTK_TIME=y CONFIG_STRIP_ASM_SYMS=y CONFIG_MAGIC_SYSRQ=y +CONFIG_DEBUG_FS=y # CONFIG_SCHED_DEBUG is not set CONFIG_SCHEDSTATS=y # CONFIG_DEBUG_PREEMPT is not set diff --git a/arch/loongarch/include/asm/acpi.h b/arch/loongarch/include/asm/acpi.h index 825c2519b9d1f7c0eedabf118f20375b4493604e..4198753aa1d0f1782a1cb17f2a4280881aa3d0c2 100644 --- a/arch/loongarch/include/asm/acpi.h +++ b/arch/loongarch/include/asm/acpi.h @@ -35,4 +35,14 @@ extern struct list_head acpi_wakeup_device_list; #define ACPI_TABLE_UPGRADE_MAX_PHYS ARCH_LOW_ADDRESS_LIMIT +extern int loongarch_acpi_suspend(void); +extern int (*acpi_suspend_lowlevel)(void); +extern void loongarch_suspend_enter(void); + +static inline unsigned long acpi_get_wakeup_address(void) +{ + extern void loongarch_wakeup_start(void); + return (unsigned long)loongarch_wakeup_start; +} + #endif /* _ASM_LOONGARCH_ACPI_H */ diff --git a/arch/loongarch/include/asm/alternative-asm.h b/arch/loongarch/include/asm/alternative-asm.h new file mode 100644 index 0000000000000000000000000000000000000000..ff3d10ac393f2048ce3e9dddda57ecd2b217a3df --- /dev/null +++ b/arch/loongarch/include/asm/alternative-asm.h @@ -0,0 +1,82 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_ALTERNATIVE_ASM_H +#define _ASM_ALTERNATIVE_ASM_H + +#ifdef __ASSEMBLY__ + +#include + +/* + * Issue one struct alt_instr descriptor entry (need to put it into + * the section .altinstructions, see below). This entry contains + * enough information for the alternatives patching code to patch an + * instruction. See apply_alternatives(). + */ +.macro altinstruction_entry orig alt feature orig_len alt_len + .long \orig - . + .long \alt - . + .short \feature + .byte \orig_len + .byte \alt_len +.endm + +/* + * Define an alternative between two instructions. If @feature is + * present, early code in apply_alternatives() replaces @oldinstr with + * @newinstr. ".fill" directive takes care of proper instruction padding + * in case @newinstr is longer than @oldinstr. + */ +.macro ALTERNATIVE oldinstr, newinstr, feature +140 : + \oldinstr +141 : + .fill - (((144f-143f)-(141b-140b)) > 0) * ((144f-143f)-(141b-140b)) / 4, 4, 0x03400000 +142 : + + .pushsection .altinstructions, "a" + altinstruction_entry 140b, 143f, \feature, 142b-140b, 144f-143f + .popsection + + .subsection 1 +143 : + \newinstr +144 : + .previous +.endm + +#define old_len (141b-140b) +#define new_len1 (144f-143f) +#define new_len2 (145f-144f) + +#define alt_max_short(a, b) ((a) ^ (((a) ^ (b)) & -(-((a) < (b))))) + +/* + * Same as ALTERNATIVE macro above but for two alternatives. If CPU + * has @feature1, it replaces @oldinstr with @newinstr1. If CPU has + * @feature2, it replaces @oldinstr with @feature2. + */ +.macro ALTERNATIVE_2 oldinstr, newinstr1, feature1, newinstr2, feature2 +140 : + \oldinstr +141 : + .fill - ((alt_max_short(new_len1, new_len2) - (old_len)) > 0) * \ + (alt_max_short(new_len1, new_len2) - (old_len)) / 4, 4, 0x03400000 +142 : + + .pushsection .altinstructions, "a" + altinstruction_entry 140b, 143f, \feature1, 142b-140b, 144f-143f, 142b-141b + altinstruction_entry 140b, 144f, \feature2, 142b-140b, 145f-144f, 142b-141b + .popsection + + .subsection 1 +143 : + \newinstr1 +144 : + \newinstr2 +145 : + .previous +.endm + +#endif /* __ASSEMBLY__ */ + +#endif /* _ASM_ALTERNATIVE_ASM_H */ diff --git a/arch/loongarch/include/asm/alternative.h b/arch/loongarch/include/asm/alternative.h new file mode 100644 index 0000000000000000000000000000000000000000..cee7b29785ab250a0a9e5994037b0b0b3c794c1f --- /dev/null +++ b/arch/loongarch/include/asm/alternative.h @@ -0,0 +1,111 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_ALTERNATIVE_H +#define _ASM_ALTERNATIVE_H + +#ifndef __ASSEMBLY__ + +#include +#include +#include +#include + +struct alt_instr { + s32 instr_offset; /* offset to original instruction */ + s32 replace_offset; /* offset to replacement instruction */ + u16 feature; /* feature bit set for replacement */ + u8 instrlen; /* length of original instruction */ + u8 replacementlen; /* length of new instruction */ +} __packed; + +/* + * Debug flag that can be tested to see whether alternative + * instructions were patched in already: + */ +extern int alternatives_patched; +extern struct alt_instr __alt_instructions[], __alt_instructions_end[]; + +extern void alternative_instructions(void); +extern void apply_alternatives(struct alt_instr *start, struct alt_instr *end); + +#define b_replacement(num) "664"#num +#define e_replacement(num) "665"#num + +#define alt_end_marker "663" +#define alt_slen "662b-661b" +#define alt_total_slen alt_end_marker"b-661b" +#define alt_rlen(num) e_replacement(num)"f-"b_replacement(num)"f" + +#define __OLDINSTR(oldinstr, num) \ + "661:\n\t" oldinstr "\n662:\n" \ + ".fill -(((" alt_rlen(num) ")-(" alt_slen ")) > 0) * " \ + "((" alt_rlen(num) ")-(" alt_slen ")) / 4, 4, 0x03400000\n" + +#define OLDINSTR(oldinstr, num) \ + __OLDINSTR(oldinstr, num) \ + alt_end_marker ":\n" + +#define alt_max_short(a, b) "((" a ") ^ (((" a ") ^ (" b ")) & -(-((" a ") < (" b ")))))" + +/* + * Pad the second replacement alternative with additional NOPs if it is + * additionally longer than the first replacement alternative. + */ +#define OLDINSTR_2(oldinstr, num1, num2) \ + "661:\n\t" oldinstr "\n662:\n" \ + ".fill -((" alt_max_short(alt_rlen(num1), alt_rlen(num2)) " - (" alt_slen ")) > 0) * " \ + "(" alt_max_short(alt_rlen(num1), alt_rlen(num2)) " - (" alt_slen ")) / 4, " \ + "4, 0x03400000\n" \ + alt_end_marker ":\n" + +#define ALTINSTR_ENTRY(feature, num) \ + " .long 661b - .\n" /* label */ \ + " .long " b_replacement(num)"f - .\n" /* new instruction */ \ + " .short " __stringify(feature) "\n" /* feature bit */ \ + " .byte " alt_total_slen "\n" /* source len */ \ + " .byte " alt_rlen(num) "\n" /* replacement len */ + +#define ALTINSTR_REPLACEMENT(newinstr, feature, num) /* replacement */ \ + b_replacement(num)":\n\t" newinstr "\n" e_replacement(num) ":\n\t" + +/* alternative assembly primitive: */ +#define ALTERNATIVE(oldinstr, newinstr, feature) \ + OLDINSTR(oldinstr, 1) \ + ".pushsection .altinstructions,\"a\"\n" \ + ALTINSTR_ENTRY(feature, 1) \ + ".popsection\n" \ + ".subsection 1\n" \ + ALTINSTR_REPLACEMENT(newinstr, feature, 1) \ + ".previous\n" + +#define ALTERNATIVE_2(oldinstr, newinstr1, feature1, newinstr2, feature2)\ + OLDINSTR_2(oldinstr, 1, 2) \ + ".pushsection .altinstructions,\"a\"\n" \ + ALTINSTR_ENTRY(feature1, 1) \ + ALTINSTR_ENTRY(feature2, 2) \ + ".popsection\n" \ + ".subsection 1\n" \ + ALTINSTR_REPLACEMENT(newinstr1, feature1, 1) \ + ALTINSTR_REPLACEMENT(newinstr2, feature2, 2) \ + ".previous\n" + +/* + * Alternative instructions for different CPU types or capabilities. + * + * This allows to use optimized instructions even on generic binary + * kernels. + * + * length of oldinstr must be longer or equal the length of newinstr + * It can be padded with nops as needed. + * + * For non barrier like inlines please define new variants + * without volatile and memory clobber. + */ +#define alternative(oldinstr, newinstr, feature) \ + (asm volatile (ALTERNATIVE(oldinstr, newinstr, feature) : : : "memory")) + +#define alternative_2(oldinstr, newinstr1, feature1, newinstr2, feature2) \ + (asm volatile(ALTERNATIVE_2(oldinstr, newinstr1, feature1, newinstr2, feature2) ::: "memory")) + +#endif /* __ASSEMBLY__ */ + +#endif /* _ASM_ALTERNATIVE_H */ diff --git a/arch/loongarch/include/asm/asm-extable.h b/arch/loongarch/include/asm/asm-extable.h new file mode 100644 index 0000000000000000000000000000000000000000..df05005f2b80a22bbb8cb205335e1f0e7d15031e --- /dev/null +++ b/arch/loongarch/include/asm/asm-extable.h @@ -0,0 +1,65 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +#ifndef __ASM_ASM_EXTABLE_H +#define __ASM_ASM_EXTABLE_H + +#define EX_TYPE_NONE 0 +#define EX_TYPE_FIXUP 1 +#define EX_TYPE_UACCESS_ERR_ZERO 2 +#define EX_TYPE_BPF 3 + +#ifdef __ASSEMBLY__ + +#define __ASM_EXTABLE_RAW(insn, fixup, type, data) \ + .pushsection __ex_table, "a"; \ + .balign 4; \ + .long ((insn) - .); \ + .long ((fixup) - .); \ + .short (type); \ + .short (data); \ + .popsection; + + .macro _asm_extable, insn, fixup + __ASM_EXTABLE_RAW(\insn, \fixup, EX_TYPE_FIXUP, 0) + .endm + +#else /* __ASSEMBLY__ */ + +#include +#include +#include + +#define __ASM_EXTABLE_RAW(insn, fixup, type, data) \ + ".pushsection __ex_table, \"a\"\n" \ + ".balign 4\n" \ + ".long ((" insn ") - .)\n" \ + ".long ((" fixup ") - .)\n" \ + ".short (" type ")\n" \ + ".short (" data ")\n" \ + ".popsection\n" + +#define _ASM_EXTABLE(insn, fixup) \ + __ASM_EXTABLE_RAW(#insn, #fixup, __stringify(EX_TYPE_FIXUP), "0") + +#define EX_DATA_REG_ERR_SHIFT 0 +#define EX_DATA_REG_ERR GENMASK(4, 0) +#define EX_DATA_REG_ZERO_SHIFT 5 +#define EX_DATA_REG_ZERO GENMASK(9, 5) + +#define EX_DATA_REG(reg, gpr) \ + "((.L__gpr_num_" #gpr ") << " __stringify(EX_DATA_REG_##reg##_SHIFT) ")" + +#define _ASM_EXTABLE_UACCESS_ERR_ZERO(insn, fixup, err, zero) \ + __DEFINE_ASM_GPR_NUMS \ + __ASM_EXTABLE_RAW(#insn, #fixup, \ + __stringify(EX_TYPE_UACCESS_ERR_ZERO), \ + "(" \ + EX_DATA_REG(ERR, err) " | " \ + EX_DATA_REG(ZERO, zero) \ + ")") + +#define _ASM_EXTABLE_UACCESS_ERR(insn, fixup, err) \ + _ASM_EXTABLE_UACCESS_ERR_ZERO(insn, fixup, err, zero) + +#endif /* __ASSEMBLY__ */ + +#endif /* __ASM_ASM_EXTABLE_H */ diff --git a/arch/loongarch/include/asm/bootinfo.h b/arch/loongarch/include/asm/bootinfo.h index ed0910e8b856b8804c2d1027f44e74be8e23f620..0051b526ac6d31307643dc317b6310a72ba53217 100644 --- a/arch/loongarch/include/asm/bootinfo.h +++ b/arch/loongarch/include/asm/bootinfo.h @@ -32,6 +32,7 @@ struct loongson_system_configuration { int cores_per_node; int cores_per_package; unsigned long cores_io_master; + unsigned long suspend_addr; const char *cpuname; }; diff --git a/arch/loongarch/include/asm/bugs.h b/arch/loongarch/include/asm/bugs.h new file mode 100644 index 0000000000000000000000000000000000000000..98396535163b34d1468384914a893df8be627707 --- /dev/null +++ b/arch/loongarch/include/asm/bugs.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This is included by init/main.c to check for architecture-dependent bugs. + * + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef _ASM_BUGS_H +#define _ASM_BUGS_H + +#include +#include + +extern void check_bugs(void); + +#endif /* _ASM_BUGS_H */ diff --git a/arch/loongarch/include/asm/efi.h b/arch/loongarch/include/asm/efi.h index 97f16e60c6ff2f2e76aa29f36847a5ca4b6e35b9..091897d40b0375758b4822ae1a95719013314709 100644 --- a/arch/loongarch/include/asm/efi.h +++ b/arch/loongarch/include/asm/efi.h @@ -9,6 +9,7 @@ void __init efi_init(void); void __init efi_runtime_init(void); +void __init *efi_fdt_pointer(void); void efifb_setup_from_dmi(struct screen_info *si, const char *opt); #define ARCH_EFI_IRQ_FLAGS_MASK 0x00000004 /* Bit 2: CSR.CRMD.IE */ diff --git a/arch/loongarch/include/asm/extable.h b/arch/loongarch/include/asm/extable.h new file mode 100644 index 0000000000000000000000000000000000000000..5abf29f1bc9196be6aef78ed57243b21da6415fd --- /dev/null +++ b/arch/loongarch/include/asm/extable.h @@ -0,0 +1,47 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_LOONGARCH_EXTABLE_H +#define _ASM_LOONGARCH_EXTABLE_H + +/* + * The exception table consists of pairs of relative offsets: the first + * is the relative offset to an instruction that is allowed to fault, + * and the second is the relative offset at which the program should + * continue. No registers are modified, so it is entirely up to the + * continuation code to figure out what to do. + * + * All the routines below use bits of fixup code that are out of line + * with the main instruction path. This means when everything is well, + * we don't even have to jump over them. Further, they do not intrude + * on our cache or tlb entries. + */ + +struct exception_table_entry { + int insn, fixup; + short type, data; +}; + +#define ARCH_HAS_RELATIVE_EXTABLE + +#define swap_ex_entry_fixup(a, b, tmp, delta) \ +do { \ + (a)->fixup = (b)->fixup + (delta); \ + (b)->fixup = (tmp).fixup - (delta); \ + (a)->type = (b)->type; \ + (b)->type = (tmp).type; \ + (a)->data = (b)->data; \ + (b)->data = (tmp).data; \ +} while (0) + +#ifdef CONFIG_BPF_JIT +bool ex_handler_bpf(const struct exception_table_entry *ex, struct pt_regs *regs); +#else +static inline +bool ex_handler_bpf(const struct exception_table_entry *ex, struct pt_regs *regs) +{ + return false; +} +#endif /* !CONFIG_BPF_JIT */ + +bool fixup_exception(struct pt_regs *regs); + +#endif diff --git a/arch/loongarch/include/asm/ftrace.h b/arch/loongarch/include/asm/ftrace.h new file mode 100644 index 0000000000000000000000000000000000000000..90f9d3399b2aafe62a495e93f0735898d4b28715 --- /dev/null +++ b/arch/loongarch/include/asm/ftrace.h @@ -0,0 +1,68 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2022 Loongson Technology Corporation Limited + */ + +#ifndef _ASM_LOONGARCH_FTRACE_H +#define _ASM_LOONGARCH_FTRACE_H + +#define FTRACE_PLT_IDX 0 +#define FTRACE_REGS_PLT_IDX 1 +#define NR_FTRACE_PLTS 2 + +#define GRAPH_FAKE_OFFSET (sizeof(struct pt_regs) - offsetof(struct pt_regs, regs[1])) + +#ifdef CONFIG_FUNCTION_TRACER + +#define MCOUNT_INSN_SIZE 4 /* sizeof mcount call */ + +#ifndef __ASSEMBLY__ + +#ifndef CONFIG_DYNAMIC_FTRACE + +#define mcount _mcount +extern void _mcount(void); +extern void prepare_ftrace_return(unsigned long self_addr, unsigned long callsite_sp, unsigned long old); + +#else + +struct dyn_ftrace; +struct dyn_arch_ftrace { }; + +#define ARCH_SUPPORTS_FTRACE_OPS 1 +#define HAVE_FUNCTION_GRAPH_RET_ADDR_PTR + +#define ftrace_init_nop ftrace_init_nop +int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec); + +static inline unsigned long ftrace_call_adjust(unsigned long addr) +{ + return addr; +} + +void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent); + +#endif /* CONFIG_DYNAMIC_FTRACE */ + +#ifdef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS +struct ftrace_ops; + +struct ftrace_regs { + struct pt_regs regs; +}; + +static __always_inline struct pt_regs *arch_ftrace_get_regs(struct ftrace_regs *fregs) +{ + return &fregs->regs; +} + +#define ftrace_graph_func ftrace_graph_func +void ftrace_graph_func(unsigned long ip, unsigned long parent_ip, + struct ftrace_ops *op, struct ftrace_regs *fregs); +#endif + +#endif /* __ASSEMBLY__ */ + +#endif /* CONFIG_FUNCTION_TRACER */ + +#endif /* _ASM_LOONGARCH_FTRACE_H */ diff --git a/arch/loongarch/include/asm/futex.h b/arch/loongarch/include/asm/futex.h index feb6658c84ff8b04e18a684ee9cddca18bfb1c33..042ca4448e4d371c0885308a85a8c1089340eaaf 100644 --- a/arch/loongarch/include/asm/futex.h +++ b/arch/loongarch/include/asm/futex.h @@ -7,6 +7,7 @@ #include #include +#include #include #include @@ -18,18 +19,11 @@ "2: sc.w $t0, %2 \n" \ " beqz $t0, 1b \n" \ "3: \n" \ - " .section .fixup,\"ax\" \n" \ - "4: li.w %0, %6 \n" \ - " b 3b \n" \ - " .previous \n" \ - " .section __ex_table,\"a\" \n" \ - " "__UA_ADDR "\t1b, 4b \n" \ - " "__UA_ADDR "\t2b, 4b \n" \ - " .previous \n" \ + _ASM_EXTABLE_UACCESS_ERR(1b, 3b, %0) \ + _ASM_EXTABLE_UACCESS_ERR(2b, 3b, %0) \ : "=r" (ret), "=&r" (oldval), \ "=ZC" (*uaddr) \ - : "0" (0), "ZC" (*uaddr), "Jr" (oparg), \ - "i" (-EFAULT) \ + : "0" (0), "ZC" (*uaddr), "Jr" (oparg) \ : "memory", "t0"); \ } @@ -86,17 +80,10 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 oldval, u32 newv " beqz $t0, 1b \n" "3: \n" __WEAK_LLSC_MB - " .section .fixup,\"ax\" \n" - "4: li.d %0, %6 \n" - " b 3b \n" - " .previous \n" - " .section __ex_table,\"a\" \n" - " "__UA_ADDR "\t1b, 4b \n" - " "__UA_ADDR "\t2b, 4b \n" - " .previous \n" + _ASM_EXTABLE_UACCESS_ERR(1b, 3b, %0) + _ASM_EXTABLE_UACCESS_ERR(2b, 3b, %0) : "+r" (ret), "=&r" (val), "=ZC" (*uaddr) - : "ZC" (*uaddr), "Jr" (oldval), "Jr" (newval), - "i" (-EFAULT) + : "ZC" (*uaddr), "Jr" (oldval), "Jr" (newval) : "memory", "t0"); *uval = val; diff --git a/arch/loongarch/include/asm/gpr-num.h b/arch/loongarch/include/asm/gpr-num.h new file mode 100644 index 0000000000000000000000000000000000000000..e0941af20c7e77b380b15696e6aecf7d629ee18b --- /dev/null +++ b/arch/loongarch/include/asm/gpr-num.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +#ifndef __ASM_GPR_NUM_H +#define __ASM_GPR_NUM_H + +#ifdef __ASSEMBLY__ + + .equ .L__gpr_num_zero, 0 + .irp num,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31 + .equ .L__gpr_num_$r\num, \num + .endr + +#else /* __ASSEMBLY__ */ + +#define __DEFINE_ASM_GPR_NUMS \ +" .equ .L__gpr_num_zero, 0\n" \ +" .irp num,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31\n" \ +" .equ .L__gpr_num_$r\\num, \\num\n" \ +" .endr\n" \ + +#endif /* __ASSEMBLY__ */ + +#endif /* __ASM_GPR_NUM_H */ diff --git a/arch/loongarch/include/asm/inst.h b/arch/loongarch/include/asm/inst.h index fce1843ceebb36a30e67fd01554bca37932dd383..c00e1512d4fa33e49bf4b6c4627460938510de75 100644 --- a/arch/loongarch/include/asm/inst.h +++ b/arch/loongarch/include/asm/inst.h @@ -8,14 +8,17 @@ #include #include +#define INSN_NOP 0x03400000 #define INSN_BREAK 0x002a0000 #define ADDR_IMMMASK_LU52ID 0xFFF0000000000000 #define ADDR_IMMMASK_LU32ID 0x000FFFFF00000000 +#define ADDR_IMMMASK_LU12IW 0x00000000FFFFF000 #define ADDR_IMMMASK_ADDU16ID 0x00000000FFFF0000 #define ADDR_IMMSHIFT_LU52ID 52 #define ADDR_IMMSHIFT_LU32ID 32 +#define ADDR_IMMSHIFT_LU12IW 12 #define ADDR_IMMSHIFT_ADDU16ID 16 #define ADDR_IMM(addr, INSN) ((addr & ADDR_IMMMASK_##INSN) >> ADDR_IMMSHIFT_##INSN) @@ -28,6 +31,7 @@ enum reg0i26_op { enum reg1i20_op { lu12iw_op = 0x0a, lu32id_op = 0x0b, + pcaddi_op = 0x0c, pcaddu12i_op = 0x0e, pcaddu18i_op = 0x0f, }; @@ -35,6 +39,8 @@ enum reg1i20_op { enum reg1i21_op { beqz_op = 0x10, bnez_op = 0x11, + bceqz_op = 0x12, /* bits[9:8] = 0x00 */ + bcnez_op = 0x12, /* bits[9:8] = 0x01 */ }; enum reg2_op { @@ -76,6 +82,10 @@ enum reg2i12_op { ldbu_op = 0xa8, ldhu_op = 0xa9, ldwu_op = 0xaa, + flds_op = 0xac, + fsts_op = 0xad, + fldd_op = 0xae, + fstd_op = 0xaf, }; enum reg2i14_op { @@ -146,6 +156,10 @@ enum reg3_op { ldxbu_op = 0x7040, ldxhu_op = 0x7048, ldxwu_op = 0x7050, + fldxs_op = 0x7060, + fldxd_op = 0x7068, + fstxs_op = 0x7070, + fstxd_op = 0x7078, amswapw_op = 0x70c0, amswapd_op = 0x70c1, amaddw_op = 0x70c2, @@ -307,6 +321,12 @@ static inline bool is_imm_negative(unsigned long val, unsigned int bit) return val & (1UL << (bit - 1)); } +static inline bool is_pc_ins(union loongarch_instruction *ip) +{ + return ip->reg1i20_format.opcode >= pcaddi_op && + ip->reg1i20_format.opcode <= pcaddu18i_op; +} + static inline bool is_branch_ins(union loongarch_instruction *ip) { return ip->reg1i21_format.opcode >= beqz_op && @@ -331,6 +351,18 @@ static inline bool is_stack_alloc_ins(union loongarch_instruction *ip) is_imm12_negative(ip->reg2i12_format.immediate); } +int larch_insn_read(void *addr, u32 *insnp); +int larch_insn_write(void *addr, u32 insn); +int larch_insn_patch_text(void *addr, u32 insn); + +u32 larch_insn_gen_nop(void); +u32 larch_insn_gen_b(unsigned long pc, unsigned long dest); +u32 larch_insn_gen_bl(unsigned long pc, unsigned long dest); + +u32 larch_insn_gen_or(enum loongarch_gpr rd, enum loongarch_gpr rj, enum loongarch_gpr rk); +u32 larch_insn_gen_move(enum loongarch_gpr rd, enum loongarch_gpr rj); + +u32 larch_insn_gen_lu12iw(enum loongarch_gpr rd, int imm); u32 larch_insn_gen_lu32id(enum loongarch_gpr rd, int imm); u32 larch_insn_gen_lu52id(enum loongarch_gpr rd, enum loongarch_gpr rj, int imm); u32 larch_insn_gen_jirl(enum loongarch_gpr rd, enum loongarch_gpr rj, unsigned long pc, unsigned long dest); @@ -345,6 +377,14 @@ static inline bool unsigned_imm_check(unsigned long val, unsigned int bit) return val < (1UL << bit); } +static inline unsigned long sign_extend(unsigned long val, unsigned int idx) +{ + if (!is_imm_negative(val, idx + 1)) + return ((1UL << idx) - 1) & val; + else + return ~((1UL << idx) - 1) | val; +} + #define DEF_EMIT_REG0I26_FORMAT(NAME, OP) \ static inline void emit_##NAME(union loongarch_instruction *insn, \ int offset) \ @@ -566,4 +606,10 @@ static inline void emit_##NAME(union loongarch_instruction *insn, \ DEF_EMIT_REG3SA2_FORMAT(alsld, alsld_op) +struct pt_regs; + +void emulate_load_store_insn(struct pt_regs *regs, void __user *addr, unsigned int *pc); +unsigned long unaligned_read(void __user *addr, void *value, unsigned long n, bool sign); +unsigned long unaligned_write(void __user *addr, unsigned long value, unsigned long n); + #endif /* _ASM_INST_H */ diff --git a/arch/loongarch/include/asm/loongson.h b/arch/loongarch/include/asm/loongson.h index 00db93edae1ba3f6c37ad0db0ac01b3b06ef06b8..12494cffffd1741e8e187c9ac3bed69764b598f6 100644 --- a/arch/loongarch/include/asm/loongson.h +++ b/arch/loongarch/include/asm/loongson.h @@ -136,4 +136,7 @@ typedef enum { #define ls7a_writel(val, addr) *(volatile unsigned int *)TO_UNCACHE(addr) = (val) #define ls7a_writeq(val, addr) *(volatile unsigned long *)TO_UNCACHE(addr) = (val) +void enable_gpe_wakeup(void); +void enable_pci_wakeup(void); + #endif /* __ASM_LOONGSON_H */ diff --git a/arch/loongarch/include/asm/module.h b/arch/loongarch/include/asm/module.h index b29b19a46f4270553c03ce85d1a5d59be78b9d26..12a0f1e66916d0c9b6cab0438349228aca3cabfa 100644 --- a/arch/loongarch/include/asm/module.h +++ b/arch/loongarch/include/asm/module.h @@ -11,7 +11,7 @@ #define RELA_STACK_DEPTH 16 struct mod_section { - Elf_Shdr *shdr; + int shndx; int num_entries; int max_entries; }; @@ -20,6 +20,9 @@ struct mod_arch_specific { struct mod_section got; struct mod_section plt; struct mod_section plt_idx; + + /* For CONFIG_DYNAMIC_FTRACE */ + struct plt_entry *ftrace_trampolines; }; struct got_entry { @@ -37,8 +40,8 @@ struct plt_idx_entry { Elf_Addr symbol_addr; }; -Elf_Addr module_emit_got_entry(struct module *mod, Elf_Addr val); -Elf_Addr module_emit_plt_entry(struct module *mod, Elf_Addr val); +Elf_Addr module_emit_got_entry(struct module *mod, Elf_Shdr *sechdrs, Elf_Addr val); +Elf_Addr module_emit_plt_entry(struct module *mod, Elf_Shdr *sechdrs, Elf_Addr val); static inline struct got_entry emit_got_entry(Elf_Addr val) { @@ -49,7 +52,7 @@ static inline struct plt_entry emit_plt_entry(unsigned long val) { u32 lu12iw, lu32id, lu52id, jirl; - lu12iw = (lu12iw_op << 25 | (((val >> 12) & 0xfffff) << 5) | LOONGARCH_GPR_T1); + lu12iw = larch_insn_gen_lu12iw(LOONGARCH_GPR_T1, ADDR_IMM(val, LU12IW)); lu32id = larch_insn_gen_lu32id(LOONGARCH_GPR_T1, ADDR_IMM(val, LU32ID)); lu52id = larch_insn_gen_lu52id(LOONGARCH_GPR_T1, LOONGARCH_GPR_T1, ADDR_IMM(val, LU52ID)); jirl = larch_insn_gen_jirl(0, LOONGARCH_GPR_T1, 0, (val & 0xfff)); @@ -62,10 +65,10 @@ static inline struct plt_idx_entry emit_plt_idx_entry(unsigned long val) return (struct plt_idx_entry) { val }; } -static inline int get_plt_idx(unsigned long val, const struct mod_section *sec) +static inline int get_plt_idx(unsigned long val, Elf_Shdr *sechdrs, const struct mod_section *sec) { int i; - struct plt_idx_entry *plt_idx = (struct plt_idx_entry *)sec->shdr->sh_addr; + struct plt_idx_entry *plt_idx = (struct plt_idx_entry *)sechdrs[sec->shndx].sh_addr; for (i = 0; i < sec->num_entries; i++) { if (plt_idx[i].symbol_addr == val) @@ -76,11 +79,12 @@ static inline int get_plt_idx(unsigned long val, const struct mod_section *sec) } static inline struct plt_entry *get_plt_entry(unsigned long val, - const struct mod_section *sec_plt, - const struct mod_section *sec_plt_idx) + Elf_Shdr *sechdrs, + const struct mod_section *sec_plt, + const struct mod_section *sec_plt_idx) { - int plt_idx = get_plt_idx(val, sec_plt_idx); - struct plt_entry *plt = (struct plt_entry *)sec_plt->shdr->sh_addr; + int plt_idx = get_plt_idx(val, sechdrs, sec_plt_idx); + struct plt_entry *plt = (struct plt_entry *)sechdrs[sec_plt->shndx].sh_addr; if (plt_idx < 0) return NULL; @@ -89,10 +93,11 @@ static inline struct plt_entry *get_plt_entry(unsigned long val, } static inline struct got_entry *get_got_entry(Elf_Addr val, + Elf_Shdr *sechdrs, const struct mod_section *sec) { - struct got_entry *got = (struct got_entry *)sec->shdr->sh_addr; int i; + struct got_entry *got = (struct got_entry *)sechdrs[sec->shndx].sh_addr; for (i = 0; i < sec->num_entries; i++) if (got[i].symbol_addr == val) diff --git a/arch/loongarch/include/asm/module.lds.h b/arch/loongarch/include/asm/module.lds.h index a3d1bc0fcc72e99dcab35aa623f8956fdbec41f3..438f09d4ccf41d6032f07b7ebcb3799ff7bf4c22 100644 --- a/arch/loongarch/include/asm/module.lds.h +++ b/arch/loongarch/include/asm/module.lds.h @@ -5,4 +5,5 @@ SECTIONS { .got : { BYTE(0) } .plt : { BYTE(0) } .plt.idx : { BYTE(0) } + .ftrace_trampoline : { BYTE(0) } } diff --git a/arch/loongarch/include/asm/pgalloc.h b/arch/loongarch/include/asm/pgalloc.h index 4bfeb3c9c9acc5602e231d6774933423f5076ab9..af1d1e4a696595bb4cf2d7f38247eac7318aadbe 100644 --- a/arch/loongarch/include/asm/pgalloc.h +++ b/arch/loongarch/include/asm/pgalloc.h @@ -42,15 +42,6 @@ static inline void p4d_populate(struct mm_struct *mm, p4d_t *p4d, pud_t *pud) extern void pagetable_init(void); -/* - * Initialize a new pmd table with invalid pointers. - */ -extern void pmd_init(unsigned long page, unsigned long pagetable); - -/* - * Initialize a new pgd / pmd table with invalid pointers. - */ -extern void pgd_init(unsigned long page); extern pgd_t *pgd_alloc(struct mm_struct *mm); #define __pte_free_tlb(tlb, pte, address) \ @@ -76,7 +67,7 @@ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address) } pmd = (pmd_t *)page_address(pg); - pmd_init((unsigned long)pmd, (unsigned long)invalid_pte_table); + pmd_init(pmd); return pmd; } @@ -92,7 +83,7 @@ static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address) pud = (pud_t *) __get_free_page(GFP_KERNEL); if (pud) - pud_init((unsigned long)pud, (unsigned long)invalid_pmd_table); + pud_init(pud); return pud; } diff --git a/arch/loongarch/include/asm/pgtable.h b/arch/loongarch/include/asm/pgtable.h index 79d5bfd913e0fb4843b2fc96a109ceb3e7bb6087..7a34e900d8c18a610af721426904438b5eb8c013 100644 --- a/arch/loongarch/include/asm/pgtable.h +++ b/arch/loongarch/include/asm/pgtable.h @@ -11,6 +11,7 @@ #include #include +#include #include #if CONFIG_PGTABLE_LEVELS == 2 @@ -59,6 +60,7 @@ #include #include #include +#include struct mm_struct; struct vm_area_struct; @@ -86,7 +88,10 @@ extern unsigned long zero_page_mask; #define VMALLOC_START MODULES_END #define VMALLOC_END \ (vm_map_base + \ - min(PTRS_PER_PGD * PTRS_PER_PUD * PTRS_PER_PMD * PTRS_PER_PTE * PAGE_SIZE, (1UL << cpu_vabits)) - PMD_SIZE) + min(PTRS_PER_PGD * PTRS_PER_PUD * PTRS_PER_PMD * PTRS_PER_PTE * PAGE_SIZE, (1UL << cpu_vabits)) - PMD_SIZE - VMEMMAP_SIZE) + +#define vmemmap ((struct page *)((VMALLOC_END + PMD_SIZE) & PMD_MASK)) +#define VMEMMAP_END ((unsigned long)vmemmap + VMEMMAP_SIZE - 1) #define pte_ERROR(e) \ pr_err("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e)) @@ -237,11 +242,11 @@ extern void set_pmd_at(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp, pm #define pfn_pmd(pfn, prot) __pmd(((pfn) << _PFN_SHIFT) | pgprot_val(prot)) /* - * Initialize a new pgd / pmd table with invalid pointers. + * Initialize a new pgd / pud / pmd table with invalid pointers. */ -extern void pgd_init(unsigned long page); -extern void pud_init(unsigned long page, unsigned long pagetable); -extern void pmd_init(unsigned long page, unsigned long pagetable); +extern void pgd_init(void *addr); +extern void pud_init(void *addr); +extern void pmd_init(void *addr); /* * Non-present pages: high 40 bits are offset, next 8 bits type, @@ -425,8 +430,6 @@ static inline void update_mmu_cache_pmd(struct vm_area_struct *vma, __update_tlb(vma, address, (pte_t *)pmdp); } -#define kern_addr_valid(addr) (1) - static inline unsigned long pmd_pfn(pmd_t pmd) { return (pmd_val(pmd) & _PFN_MASK) >> _PFN_SHIFT; diff --git a/arch/loongarch/include/asm/setup.h b/arch/loongarch/include/asm/setup.h index ca373f8e3c4db29a42fccdcca72a816135ea286c..72ead58039f3e159ae13c7e066c4ee9793aa2208 100644 --- a/arch/loongarch/include/asm/setup.h +++ b/arch/loongarch/include/asm/setup.h @@ -13,6 +13,7 @@ extern unsigned long eentry; extern unsigned long tlbrentry; +extern char init_command_line[COMMAND_LINE_SIZE]; extern void tlb_init(int cpu); extern void cpu_cache_init(void); extern void cache_error_setup(void); diff --git a/arch/loongarch/include/asm/sparsemem.h b/arch/loongarch/include/asm/sparsemem.h index 3d18cdf1b06939fe755b1882294af0273a7bce18..8d4af6aff8a8f1e149f59b4a9b34b80bf7197db2 100644 --- a/arch/loongarch/include/asm/sparsemem.h +++ b/arch/loongarch/include/asm/sparsemem.h @@ -11,8 +11,16 @@ #define SECTION_SIZE_BITS 29 /* 2^29 = Largest Huge Page Size */ #define MAX_PHYSMEM_BITS 48 +#ifdef CONFIG_SPARSEMEM_VMEMMAP +#define VMEMMAP_SIZE (sizeof(struct page) * (1UL << (cpu_pabits + 1 - PAGE_SHIFT))) +#endif + #endif /* CONFIG_SPARSEMEM */ +#ifndef VMEMMAP_SIZE +#define VMEMMAP_SIZE 0 /* 1, For FLATMEM; 2, For SPARSEMEM without VMEMMAP. */ +#endif + #ifdef CONFIG_MEMORY_HOTPLUG int memory_add_physaddr_to_nid(u64 addr); #define memory_add_physaddr_to_nid memory_add_physaddr_to_nid diff --git a/arch/loongarch/include/asm/stackprotector.h b/arch/loongarch/include/asm/stackprotector.h new file mode 100644 index 0000000000000000000000000000000000000000..a1a965751a7b98108d50da9689d99bce223993cb --- /dev/null +++ b/arch/loongarch/include/asm/stackprotector.h @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * GCC stack protector support. + * + * Stack protector works by putting predefined pattern at the start of + * the stack frame and verifying that it hasn't been overwritten when + * returning from the function. The pattern is called stack canary and + * on LoongArch gcc expects it to be defined by a global variable called + * "__stack_chk_guard". + */ + +#ifndef _ASM_STACKPROTECTOR_H +#define _ASM_STACKPROTECTOR_H + +#include +#include + +extern unsigned long __stack_chk_guard; + +/* + * Initialize the stackprotector canary value. + * + * NOTE: this must only be called from functions that never return, + * and it must always be inlined. + */ +static __always_inline void boot_init_stack_canary(void) +{ + unsigned long canary; + + /* Try to get a semi random initial value. */ + get_random_bytes(&canary, sizeof(canary)); + canary ^= LINUX_VERSION_CODE; + + current->stack_canary = canary; + __stack_chk_guard = current->stack_canary; +} + +#endif /* _ASM_STACKPROTECTOR_H */ diff --git a/arch/loongarch/include/asm/string.h b/arch/loongarch/include/asm/string.h index b07e60ded9577c8d4b128d88b0f08c7230385214..7b29cc9c70aa617049a3790f8cff9632ff100a66 100644 --- a/arch/loongarch/include/asm/string.h +++ b/arch/loongarch/include/asm/string.h @@ -5,8 +5,13 @@ #ifndef _ASM_STRING_H #define _ASM_STRING_H +#define __HAVE_ARCH_MEMSET extern void *memset(void *__s, int __c, size_t __count); + +#define __HAVE_ARCH_MEMCPY extern void *memcpy(void *__to, __const__ void *__from, size_t __n); + +#define __HAVE_ARCH_MEMMOVE extern void *memmove(void *__dest, __const__ void *__src, size_t __n); #endif /* _ASM_STRING_H */ diff --git a/arch/loongarch/include/asm/thread_info.h b/arch/loongarch/include/asm/thread_info.h index b7dd9f19a5a9c04eb6c3a92c312bb3d2eeec8023..1a3354ca056e9af11e15bfe80ffce5ad97c9e640 100644 --- a/arch/loongarch/include/asm/thread_info.h +++ b/arch/loongarch/include/asm/thread_info.h @@ -38,7 +38,7 @@ struct thread_info { #define INIT_THREAD_INFO(tsk) \ { \ .task = &tsk, \ - .flags = 0, \ + .flags = _TIF_FIXADE, \ .cpu = 0, \ .preempt_count = INIT_PREEMPT_COUNT, \ } diff --git a/arch/loongarch/include/asm/time.h b/arch/loongarch/include/asm/time.h index 2eae219301d0a963cbb53747569067b53f93286b..037a2d1b8ff4cc8ca1ccc3e98f4cbe8c8ed2cedb 100644 --- a/arch/loongarch/include/asm/time.h +++ b/arch/loongarch/include/asm/time.h @@ -12,6 +12,7 @@ extern u64 cpu_clock_freq; extern u64 const_clock_freq; +extern void save_counter(void); extern void sync_counter(void); static inline unsigned int calc_const_freq(void) diff --git a/arch/loongarch/include/asm/uaccess.h b/arch/loongarch/include/asm/uaccess.h index a8ae2af4025ab30f5f165c30f09f408f9377f69a..255899d4a7c36a73ad9802b6f71c4bf75a0ae9f6 100644 --- a/arch/loongarch/include/asm/uaccess.h +++ b/arch/loongarch/include/asm/uaccess.h @@ -15,7 +15,8 @@ #include #include #include -#include +#include +#include #include extern u64 __ua_limit; @@ -160,16 +161,9 @@ do { \ __asm__ __volatile__( \ "1: " insn " %1, %2 \n" \ "2: \n" \ - " .section .fixup,\"ax\" \n" \ - "3: li.w %0, %3 \n" \ - " move %1, $zero \n" \ - " b 2b \n" \ - " .previous \n" \ - " .section __ex_table,\"a\" \n" \ - " "__UA_ADDR "\t1b, 3b \n" \ - " .previous \n" \ + _ASM_EXTABLE_UACCESS_ERR_ZERO(1b, 2b, %0, %1) \ : "+r" (__gu_err), "=r" (__gu_tmp) \ - : "m" (__m(ptr)), "i" (-EFAULT)); \ + : "m" (__m(ptr))); \ \ (val) = (__typeof__(*(ptr))) __gu_tmp; \ } @@ -192,15 +186,9 @@ do { \ __asm__ __volatile__( \ "1: " insn " %z2, %1 # __put_user_asm\n" \ "2: \n" \ - " .section .fixup,\"ax\" \n" \ - "3: li.w %0, %3 \n" \ - " b 2b \n" \ - " .previous \n" \ - " .section __ex_table,\"a\" \n" \ - " " __UA_ADDR " 1b, 3b \n" \ - " .previous \n" \ + _ASM_EXTABLE_UACCESS_ERR(1b, 2b, %0) \ : "+r" (__pu_err), "=m" (__m(ptr)) \ - : "Jr" (__pu_val), "i" (-EFAULT)); \ + : "Jr" (__pu_val)); \ } #define __get_kernel_nofault(dst, src, type, err_label) \ diff --git a/arch/loongarch/include/asm/unwind.h b/arch/loongarch/include/asm/unwind.h index 6af4718bdf015fbd40ecbab9fc8619a5f6660add..f2b52b9ea93d2862d69e9e7f1075f6e1bcfee619 100644 --- a/arch/loongarch/include/asm/unwind.h +++ b/arch/loongarch/include/asm/unwind.h @@ -20,7 +20,8 @@ struct unwind_state { char type; /* UNWINDER_XXX */ struct stack_info stack_info; struct task_struct *task; - bool first, error; + bool first, error, is_ftrace; + int graph_idx; unsigned long sp, pc, ra; }; diff --git a/arch/loongarch/kernel/Makefile b/arch/loongarch/kernel/Makefile index 42be564278fa1948167ca660672ccd839353ecb9..fcaa024a685ec6df4a983daa59dbf2f967b038af 100644 --- a/arch/loongarch/kernel/Makefile +++ b/arch/loongarch/kernel/Makefile @@ -7,13 +7,27 @@ extra-y := vmlinux.lds obj-y += head.o cpu-probe.o cacheinfo.o env.o setup.o entry.o genex.o \ traps.o irq.o idle.o process.o dma.o mem.o io.o reset.o switch.o \ - elf.o syscall.o signal.o time.o topology.o inst.o ptrace.o vdso.o + elf.o syscall.o signal.o time.o topology.o inst.o ptrace.o vdso.o \ + alternative.o unaligned.o obj-$(CONFIG_ACPI) += acpi.o obj-$(CONFIG_EFI) += efi.o obj-$(CONFIG_CPU_HAS_FPU) += fpu.o +ifdef CONFIG_FUNCTION_TRACER + ifndef CONFIG_DYNAMIC_FTRACE + obj-y += mcount.o ftrace.o + CFLAGS_REMOVE_ftrace.o = $(CC_FLAGS_FTRACE) + else + obj-y += mcount_dyn.o ftrace_dyn.o + CFLAGS_REMOVE_ftrace_dyn.o = $(CC_FLAGS_FTRACE) + endif + CFLAGS_REMOVE_inst.o = $(CC_FLAGS_FTRACE) + CFLAGS_REMOVE_time.o = $(CC_FLAGS_FTRACE) + CFLAGS_REMOVE_perf_event.o = $(CC_FLAGS_FTRACE) +endif + obj-$(CONFIG_MODULES) += module.o module-sections.o obj-$(CONFIG_STACKTRACE) += stacktrace.o diff --git a/arch/loongarch/kernel/acpi.c b/arch/loongarch/kernel/acpi.c index 8319cc40900908fb2c88ae28ef094afaf9464641..98f431157e4c1c1815fc08b2a6df06a2c432c363 100644 --- a/arch/loongarch/kernel/acpi.c +++ b/arch/loongarch/kernel/acpi.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #include #include @@ -139,20 +140,26 @@ static void __init acpi_process_madt(void) loongson_sysconf.nr_cpus = num_processors; } +#ifndef CONFIG_SUSPEND +int (*acpi_suspend_lowlevel)(void); +#else +int (*acpi_suspend_lowlevel)(void) = loongarch_acpi_suspend; +#endif + void __init acpi_boot_table_init(void) { /* * If acpi_disabled, bail out */ if (acpi_disabled) - return; + goto fdt_earlycon; /* * Initialize the ACPI boot-time table parser. */ if (acpi_table_init()) { disable_acpi(); - return; + goto fdt_earlycon; } loongson_sysconf.boot_cpu_id = read_csr_cpuid(); @@ -164,6 +171,12 @@ void __init acpi_boot_table_init(void) /* Do not enable ACPI SPCR console by default */ acpi_parse_spcr(earlycon_acpi_spcr_enable, false); + + return; + +fdt_earlycon: + if (earlycon_acpi_spcr_enable) + early_init_dt_scan_chosen_stdout(); } #ifdef CONFIG_ACPI_NUMA diff --git a/arch/loongarch/kernel/alternative.c b/arch/loongarch/kernel/alternative.c new file mode 100644 index 0000000000000000000000000000000000000000..c5aebeac960b6e19908550185a49363c54f0768e --- /dev/null +++ b/arch/loongarch/kernel/alternative.c @@ -0,0 +1,246 @@ +// SPDX-License-Identifier: GPL-2.0-only +#include +#include +#include +#include +#include +#include + +int __read_mostly alternatives_patched; + +EXPORT_SYMBOL_GPL(alternatives_patched); + +#define MAX_PATCH_SIZE (((u8)(-1)) / LOONGARCH_INSN_SIZE) + +static int __initdata_or_module debug_alternative; + +static int __init debug_alt(char *str) +{ + debug_alternative = 1; + return 1; +} +__setup("debug-alternative", debug_alt); + +#define DPRINTK(fmt, args...) \ +do { \ + if (debug_alternative) \ + printk(KERN_DEBUG "%s: " fmt "\n", __func__, ##args); \ +} while (0) + +#define DUMP_WORDS(buf, count, fmt, args...) \ +do { \ + if (unlikely(debug_alternative)) { \ + int _j; \ + union loongarch_instruction *_buf = buf; \ + \ + if (!(count)) \ + break; \ + \ + printk(KERN_DEBUG fmt, ##args); \ + for (_j = 0; _j < count - 1; _j++) \ + printk(KERN_CONT "<%08x> ", _buf[_j].word); \ + printk(KERN_CONT "<%08x>\n", _buf[_j].word); \ + } \ +} while (0) + +/* Use this to add nops to a buffer, then text_poke the whole buffer. */ +static void __init_or_module add_nops(union loongarch_instruction *insn, int count) +{ + while (count--) { + insn->word = INSN_NOP; + insn++; + } +} + +/* Is the jump addr in local .altinstructions */ +static inline bool in_alt_jump(unsigned long jump, void *start, void *end) +{ + return jump >= (unsigned long)start && jump < (unsigned long)end; +} + +static void __init_or_module recompute_jump(union loongarch_instruction *buf, + union loongarch_instruction *dest, union loongarch_instruction *src, + void *start, void *end) +{ + unsigned int si, si_l, si_h; + unsigned long cur_pc, jump_addr, pc; + long offset; + + cur_pc = (unsigned long)src; + pc = (unsigned long)dest; + + si_l = src->reg0i26_format.immediate_l; + si_h = src->reg0i26_format.immediate_h; + switch (src->reg0i26_format.opcode) { + case b_op: + case bl_op: + jump_addr = cur_pc + sign_extend((si_h << 16 | si_l) << 2, 27); + if (in_alt_jump(jump_addr, start, end)) + return; + offset = jump_addr - pc; + BUG_ON(offset < -SZ_128M || offset >= SZ_128M); + offset >>= 2; + buf->reg0i26_format.immediate_h = offset >> 16; + buf->reg0i26_format.immediate_l = offset; + return; + } + + si_l = src->reg1i21_format.immediate_l; + si_h = src->reg1i21_format.immediate_h; + switch (src->reg1i21_format.opcode) { + case bceqz_op: /* bceqz_op = bcnez_op */ + BUG_ON(buf->reg1i21_format.rj & BIT(4)); + fallthrough; + case beqz_op: + case bnez_op: + jump_addr = cur_pc + sign_extend((si_h << 16 | si_l) << 2, 22); + if (in_alt_jump(jump_addr, start, end)) + return; + offset = jump_addr - pc; + BUG_ON(offset < -SZ_4M || offset >= SZ_4M); + offset >>= 2; + buf->reg1i21_format.immediate_h = offset >> 16; + buf->reg1i21_format.immediate_l = offset; + return; + } + + si = src->reg2i16_format.immediate; + switch (src->reg2i16_format.opcode) { + case beq_op: + case bne_op: + case blt_op: + case bge_op: + case bltu_op: + case bgeu_op: + jump_addr = cur_pc + sign_extend(si << 2, 17); + if (in_alt_jump(jump_addr, start, end)) + return; + offset = jump_addr - pc; + BUG_ON(offset < -SZ_128K || offset >= SZ_128K); + offset >>= 2; + buf->reg2i16_format.immediate = offset; + return; + } +} + +static int __init_or_module copy_alt_insns(union loongarch_instruction *buf, + union loongarch_instruction *dest, union loongarch_instruction *src, int nr) +{ + int i; + + for (i = 0; i < nr; i++) { + buf[i].word = src[i].word; + + if (is_pc_ins(&src[i])) { + pr_err("Not support pcrel instruction at present!"); + return -EINVAL; + } + + if (is_branch_ins(&src[i]) && + src[i].reg2i16_format.opcode != jirl_op) { + recompute_jump(&buf[i], &dest[i], &src[i], src, src + nr); + } + } + + return 0; +} + +/* + * text_poke_early - Update instructions on a live kernel at boot time + * + * When you use this code to patch more than one byte of an instruction + * you need to make sure that other CPUs cannot execute this code in parallel. + * Also no thread must be currently preempted in the middle of these + * instructions. And on the local CPU you need to be protected again NMI or MCE + * handlers seeing an inconsistent instruction while you patch. + */ +static void *__init_or_module text_poke_early(union loongarch_instruction *insn, + union loongarch_instruction *buf, unsigned int nr) +{ + int i; + unsigned long flags; + + local_irq_save(flags); + + for (i = 0; i < nr; i++) + insn[i].word = buf[i].word; + + local_irq_restore(flags); + + wbflush(); + flush_icache_range((unsigned long)insn, (unsigned long)(insn + nr)); + + return insn; +} + +/* + * Replace instructions with better alternatives for this CPU type. This runs + * before SMP is initialized to avoid SMP problems with self modifying code. + * This implies that asymmetric systems where APs have less capabilities than + * the boot processor are not handled. Tough. Make sure you disable such + * features by hand. + */ +void __init_or_module apply_alternatives(struct alt_instr *start, struct alt_instr *end) +{ + struct alt_instr *a; + unsigned int nr_instr, nr_repl, nr_insnbuf; + union loongarch_instruction *instr, *replacement; + union loongarch_instruction insnbuf[MAX_PATCH_SIZE]; + + DPRINTK("alt table %px, -> %px", start, end); + /* + * The scan order should be from start to end. A later scanned + * alternative code can overwrite previously scanned alternative code. + * Some kernel functions (e.g. memcpy, memset, etc) use this order to + * patch code. + * + * So be careful if you want to change the scan order to any other + * order. + */ + for (a = start; a < end; a++) { + nr_insnbuf = 0; + + instr = (void *)&a->instr_offset + a->instr_offset; + replacement = (void *)&a->replace_offset + a->replace_offset; + + BUG_ON(a->instrlen > sizeof(insnbuf)); + BUG_ON(a->instrlen & 0x3); + BUG_ON(a->replacementlen & 0x3); + + nr_instr = a->instrlen / LOONGARCH_INSN_SIZE; + nr_repl = a->replacementlen / LOONGARCH_INSN_SIZE; + + if (!cpu_has(a->feature)) { + DPRINTK("feat not exist: %d, old: (%px len: %d), repl: (%px, len: %d)", + a->feature, instr, a->instrlen, + replacement, a->replacementlen); + + continue; + } + + DPRINTK("feat: %d, old: (%px len: %d), repl: (%px, len: %d)", + a->feature, instr, a->instrlen, + replacement, a->replacementlen); + + DUMP_WORDS(instr, nr_instr, "%px: old_insn: ", instr); + DUMP_WORDS(replacement, nr_repl, "%px: rpl_insn: ", replacement); + + copy_alt_insns(insnbuf, instr, replacement, nr_repl); + nr_insnbuf = nr_repl; + + if (nr_instr > nr_repl) { + add_nops(insnbuf + nr_repl, nr_instr - nr_repl); + nr_insnbuf += nr_instr - nr_repl; + } + DUMP_WORDS(insnbuf, nr_insnbuf, "%px: final_insn: ", instr); + + text_poke_early(instr, insnbuf, nr_insnbuf); + } +} + +void __init alternative_instructions(void) +{ + apply_alternatives(__alt_instructions, __alt_instructions_end); + + alternatives_patched = 1; +} diff --git a/arch/loongarch/kernel/asm-offsets.c b/arch/loongarch/kernel/asm-offsets.c index bdd88eda9513f62d7b1245eafcaaeed1b4a3f2a4..4bdb203fc66e145405667a6fca09e57b786c1b8b 100644 --- a/arch/loongarch/kernel/asm-offsets.c +++ b/arch/loongarch/kernel/asm-offsets.c @@ -68,6 +68,9 @@ void output_task_defines(void) OFFSET(TASK_FLAGS, task_struct, flags); OFFSET(TASK_MM, task_struct, mm); OFFSET(TASK_PID, task_struct, pid); +#if defined(CONFIG_STACKPROTECTOR) + OFFSET(TASK_STACK_CANARY, task_struct, stack_canary); +#endif DEFINE(TASK_STRUCT_SIZE, sizeof(struct task_struct)); BLANK(); } @@ -257,3 +260,15 @@ void output_smpboot_defines(void) BLANK(); } #endif + +#ifdef CONFIG_HIBERNATION +void output_pbe_defines(void) +{ + COMMENT(" Linux struct pbe offsets. "); + OFFSET(PBE_ADDRESS, pbe, address); + OFFSET(PBE_ORIG_ADDRESS, pbe, orig_address); + OFFSET(PBE_NEXT, pbe, next); + DEFINE(PBE_SIZE, sizeof(struct pbe)); + BLANK(); +} +#endif diff --git a/arch/loongarch/kernel/efi.c b/arch/loongarch/kernel/efi.c index d75ce73e8ff82b77a3491aeb15a772740084b085..3d448fef3af454ce46dc5b52e2cbcfa56d0be11d 100644 --- a/arch/loongarch/kernel/efi.c +++ b/arch/loongarch/kernel/efi.c @@ -28,16 +28,29 @@ static unsigned long efi_nr_tables; static unsigned long efi_config_table; static unsigned long __initdata boot_memmap = EFI_INVALID_TABLE_ADDR; +static unsigned long __initdata fdt_pointer = EFI_INVALID_TABLE_ADDR; static efi_system_table_t *efi_systab; static efi_config_table_type_t arch_tables[] __initdata = { {LINUX_EFI_BOOT_MEMMAP_GUID, &boot_memmap, "MEMMAP" }, + {DEVICE_TREE_GUID, &fdt_pointer, "FDTPTR" }, {}, }; +void __init *efi_fdt_pointer(void) +{ + if (!efi_systab) + return NULL; + + if (fdt_pointer == EFI_INVALID_TABLE_ADDR) + return NULL; + + return early_memremap_ro(fdt_pointer, SZ_64K); +} + void __init efi_runtime_init(void) { - if (!efi_enabled(EFI_BOOT)) + if (!efi_enabled(EFI_BOOT) || !efi_systab->runtime) return; if (efi_runtime_disabled()) { diff --git a/arch/loongarch/kernel/env.c b/arch/loongarch/kernel/env.c index 6d56a463b091c0f6069380565ea6e0883e7571a6..6b3bfb0092e60b34946490415ff7cd2a51287886 100644 --- a/arch/loongarch/kernel/env.c +++ b/arch/loongarch/kernel/env.c @@ -11,6 +11,7 @@ #include #include #include +#include u64 efi_system_table; struct loongson_system_configuration loongson_sysconf; @@ -27,6 +28,7 @@ void __init init_environ(void) clear_bit(EFI_BOOT, &efi.flags); strscpy(boot_command_line, cmdline, COMMAND_LINE_SIZE); + strscpy(init_command_line, cmdline, COMMAND_LINE_SIZE); early_memunmap(cmdline, COMMAND_LINE_SIZE); efi_system_table = fw_arg2; diff --git a/arch/loongarch/kernel/fpu.S b/arch/loongarch/kernel/fpu.S index 576b3370a296da0e0db3b710df2f682e6b628a12..ccde94140c896d84e746b02fc16b9ba51d9ee6fa 100644 --- a/arch/loongarch/kernel/fpu.S +++ b/arch/loongarch/kernel/fpu.S @@ -8,6 +8,7 @@ */ #include #include +#include #include #include #include @@ -21,9 +22,7 @@ .macro EX insn, reg, src, offs .ex\@: \insn \reg, \src, \offs - .section __ex_table,"a" - PTR .ex\@, fault - .previous + _asm_extable .ex\@, fault .endm .macro sc_save_fp base diff --git a/arch/loongarch/kernel/ftrace.c b/arch/loongarch/kernel/ftrace.c new file mode 100644 index 0000000000000000000000000000000000000000..8c3ec1bc7aad39743b5451dd5b9b0c7807494220 --- /dev/null +++ b/arch/loongarch/kernel/ftrace.c @@ -0,0 +1,73 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2022 Loongson Technology Corporation Limited + */ + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + +/* + * As `call _mcount` follows LoongArch psABI, ra-saved operation and + * stack operation can be found before this insn. + */ + +static int ftrace_get_parent_ra_addr(unsigned long insn_addr, int *ra_off) +{ + int limit = 32; + union loongarch_instruction *insn; + + insn = (union loongarch_instruction *)insn_addr; + + do { + insn--; + limit--; + + if (is_ra_save_ins(insn)) + *ra_off = -((1 << 12) - insn->reg2i12_format.immediate); + + } while (!is_stack_alloc_ins(insn) && limit); + + if (!limit) + return -EINVAL; + + return 0; +} + +void prepare_ftrace_return(unsigned long self_addr, + unsigned long callsite_sp, unsigned long old) +{ + int ra_off; + unsigned long return_hooker = (unsigned long)&return_to_handler; + + if (unlikely(ftrace_graph_is_dead())) + return; + + if (unlikely(atomic_read(¤t->tracing_graph_pause))) + return; + + if (ftrace_get_parent_ra_addr(self_addr, &ra_off)) + goto out; + + if (!function_graph_enter(old, self_addr, 0, NULL)) + *(unsigned long *)(callsite_sp + ra_off) = return_hooker; + + return; + +out: + ftrace_graph_stop(); + WARN_ON(1); +} +#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ diff --git a/arch/loongarch/kernel/ftrace_dyn.c b/arch/loongarch/kernel/ftrace_dyn.c new file mode 100644 index 0000000000000000000000000000000000000000..0f07591cab3096b6cb620cecc56ec508c7dddc31 --- /dev/null +++ b/arch/loongarch/kernel/ftrace_dyn.c @@ -0,0 +1,273 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Based on arch/arm64/kernel/ftrace.c + * + * Copyright (C) 2022 Loongson Technology Corporation Limited + */ + +#include +#include + +#include +#include + +static int ftrace_modify_code(unsigned long pc, u32 old, u32 new, bool validate) +{ + u32 replaced; + + if (validate) { + if (larch_insn_read((void *)pc, &replaced)) + return -EFAULT; + + if (replaced != old) + return -EINVAL; + } + + if (larch_insn_patch_text((void *)pc, new)) + return -EPERM; + + return 0; +} + +#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS + +#ifdef CONFIG_MODULES +static inline int __get_mod(struct module **mod, unsigned long addr) +{ + preempt_disable(); + *mod = __module_text_address(addr); + preempt_enable(); + + if (WARN_ON(!(*mod))) + return -EINVAL; + + return 0; +} + +static struct plt_entry *get_ftrace_plt(struct module *mod, unsigned long addr) +{ + struct plt_entry *plt = mod->arch.ftrace_trampolines; + + if (addr == FTRACE_ADDR) + return &plt[FTRACE_PLT_IDX]; + if (addr == FTRACE_REGS_ADDR && + IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS)) + return &plt[FTRACE_REGS_PLT_IDX]; + + return NULL; +} + +static unsigned long get_plt_addr(struct module *mod, unsigned long addr) +{ + struct plt_entry *plt; + + plt = get_ftrace_plt(mod, addr); + if (!plt) { + pr_err("ftrace: no module PLT for %ps\n", (void *)addr); + return -EINVAL; + } + + return (unsigned long)plt; +} +#endif + +int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, unsigned long addr) +{ + u32 old, new; + unsigned long pc; + long offset __maybe_unused; + + pc = rec->ip + LOONGARCH_INSN_SIZE; + +#ifdef CONFIG_MODULES + offset = (long)pc - (long)addr; + + if (offset < -SZ_128M || offset >= SZ_128M) { + int ret; + struct module *mod; + + ret = __get_mod(&mod, pc); + if (ret) + return ret; + + addr = get_plt_addr(mod, addr); + + old_addr = get_plt_addr(mod, old_addr); + } +#endif + + new = larch_insn_gen_bl(pc, addr); + old = larch_insn_gen_bl(pc, old_addr); + + return ftrace_modify_code(pc, old, new, true); +} + +#endif /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */ + +int ftrace_update_ftrace_func(ftrace_func_t func) +{ + u32 new; + unsigned long pc; + + pc = (unsigned long)&ftrace_call; + new = larch_insn_gen_bl(pc, (unsigned long)func); + + return ftrace_modify_code(pc, 0, new, false); +} + +/* + * The compiler has inserted 2 NOPs before the regular function prologue. + * T series registers are available and safe because of LoongArch's psABI. + * + * At runtime, we can replace nop with bl to enable ftrace call and replace bl + * with nop to disable ftrace call. The bl requires us to save the original RA + * value, so it saves RA at t0 here. + * + * Details are: + * + * | Compiled | Disabled | Enabled | + * +------------+------------------------+------------------------+ + * | nop | move t0, ra | move t0, ra | + * | nop | nop | bl ftrace_caller | + * | func_body | func_body | func_body | + * + * The RA value will be recovered by ftrace_regs_entry, and restored into RA + * before returning to the regular function prologue. When a function is not + * being traced, the "move t0, ra" is not harmful. + */ + +int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec) +{ + u32 old, new; + unsigned long pc; + + pc = rec->ip; + old = larch_insn_gen_nop(); + new = larch_insn_gen_move(LOONGARCH_GPR_T0, LOONGARCH_GPR_RA); + + return ftrace_modify_code(pc, old, new, true); +} + +int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) +{ + u32 old, new; + unsigned long pc; + long offset __maybe_unused; + + pc = rec->ip + LOONGARCH_INSN_SIZE; + +#ifdef CONFIG_MODULES + offset = (long)pc - (long)addr; + + if (offset < -SZ_128M || offset >= SZ_128M) { + int ret; + struct module *mod; + + ret = __get_mod(&mod, pc); + if (ret) + return ret; + + addr = get_plt_addr(mod, addr); + } +#endif + + old = larch_insn_gen_nop(); + new = larch_insn_gen_bl(pc, addr); + + return ftrace_modify_code(pc, old, new, true); +} + +int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, unsigned long addr) +{ + u32 old, new; + unsigned long pc; + long offset __maybe_unused; + + pc = rec->ip + LOONGARCH_INSN_SIZE; + +#ifdef CONFIG_MODULES + offset = (long)pc - (long)addr; + + if (offset < -SZ_128M || offset >= SZ_128M) { + int ret; + struct module *mod; + + ret = __get_mod(&mod, pc); + if (ret) + return ret; + + addr = get_plt_addr(mod, addr); + } +#endif + + new = larch_insn_gen_nop(); + old = larch_insn_gen_bl(pc, addr); + + return ftrace_modify_code(pc, old, new, true); +} + +void arch_ftrace_update_code(int command) +{ + command |= FTRACE_MAY_SLEEP; + ftrace_modify_all_code(command); +} + +int __init ftrace_dyn_arch_init(void) +{ + return 0; +} + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER +void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent) +{ + unsigned long old; + unsigned long return_hooker = (unsigned long)&return_to_handler; + + if (unlikely(atomic_read(¤t->tracing_graph_pause))) + return; + + old = *parent; + + if (!function_graph_enter(old, self_addr, 0, parent)) + *parent = return_hooker; +} + +#ifdef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS +void ftrace_graph_func(unsigned long ip, unsigned long parent_ip, + struct ftrace_ops *op, struct ftrace_regs *fregs) +{ + struct pt_regs *regs = &fregs->regs; + unsigned long *parent = (unsigned long *)®s->regs[1]; + + prepare_ftrace_return(ip, (unsigned long *)parent); +} +#else +static int ftrace_modify_graph_caller(bool enable) +{ + u32 branch, nop; + unsigned long pc, func; + extern void ftrace_graph_call(void); + + pc = (unsigned long)&ftrace_graph_call; + func = (unsigned long)&ftrace_graph_caller; + + nop = larch_insn_gen_nop(); + branch = larch_insn_gen_b(pc, func); + + if (enable) + return ftrace_modify_code(pc, nop, branch, true); + else + return ftrace_modify_code(pc, branch, nop, true); +} + +int ftrace_enable_ftrace_graph_caller(void) +{ + return ftrace_modify_graph_caller(true); +} + +int ftrace_disable_ftrace_graph_caller(void) +{ + return ftrace_modify_graph_caller(false); +} +#endif /* CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS */ +#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ diff --git a/arch/loongarch/kernel/inst.c b/arch/loongarch/kernel/inst.c index b1df0ec34bd1b35e0bb522b873bb08d8af5dbcab..512579d79b221ccb632cbff1c3140233e3a5dda0 100644 --- a/arch/loongarch/kernel/inst.c +++ b/arch/loongarch/kernel/inst.c @@ -2,8 +2,135 @@ /* * Copyright (C) 2020-2022 Loongson Technology Corporation Limited */ +#include +#include + +#include #include +static DEFINE_RAW_SPINLOCK(patch_lock); + +int larch_insn_read(void *addr, u32 *insnp) +{ + int ret; + u32 val; + + ret = copy_from_kernel_nofault(&val, addr, LOONGARCH_INSN_SIZE); + if (!ret) + *insnp = val; + + return ret; +} + +int larch_insn_write(void *addr, u32 insn) +{ + int ret; + unsigned long flags = 0; + + raw_spin_lock_irqsave(&patch_lock, flags); + ret = copy_to_kernel_nofault(addr, &insn, LOONGARCH_INSN_SIZE); + raw_spin_unlock_irqrestore(&patch_lock, flags); + + return ret; +} + +int larch_insn_patch_text(void *addr, u32 insn) +{ + int ret; + u32 *tp = addr; + + if ((unsigned long)tp & 3) + return -EINVAL; + + ret = larch_insn_write(tp, insn); + if (!ret) + flush_icache_range((unsigned long)tp, + (unsigned long)tp + LOONGARCH_INSN_SIZE); + + return ret; +} + +u32 larch_insn_gen_nop(void) +{ + return INSN_NOP; +} + +u32 larch_insn_gen_b(unsigned long pc, unsigned long dest) +{ + long offset = dest - pc; + unsigned int immediate_l, immediate_h; + union loongarch_instruction insn; + + if ((offset & 3) || offset < -SZ_128M || offset >= SZ_128M) { + pr_warn("The generated b instruction is out of range.\n"); + return INSN_BREAK; + } + + offset >>= 2; + + immediate_l = offset & 0xffff; + offset >>= 16; + immediate_h = offset & 0x3ff; + + insn.reg0i26_format.opcode = b_op; + insn.reg0i26_format.immediate_l = immediate_l; + insn.reg0i26_format.immediate_h = immediate_h; + + return insn.word; +} + +u32 larch_insn_gen_bl(unsigned long pc, unsigned long dest) +{ + long offset = dest - pc; + unsigned int immediate_l, immediate_h; + union loongarch_instruction insn; + + if ((offset & 3) || offset < -SZ_128M || offset >= SZ_128M) { + pr_warn("The generated bl instruction is out of range.\n"); + return INSN_BREAK; + } + + offset >>= 2; + + immediate_l = offset & 0xffff; + offset >>= 16; + immediate_h = offset & 0x3ff; + + insn.reg0i26_format.opcode = bl_op; + insn.reg0i26_format.immediate_l = immediate_l; + insn.reg0i26_format.immediate_h = immediate_h; + + return insn.word; +} + +u32 larch_insn_gen_or(enum loongarch_gpr rd, enum loongarch_gpr rj, enum loongarch_gpr rk) +{ + union loongarch_instruction insn; + + insn.reg3_format.opcode = or_op; + insn.reg3_format.rd = rd; + insn.reg3_format.rj = rj; + insn.reg3_format.rk = rk; + + return insn.word; +} + +u32 larch_insn_gen_move(enum loongarch_gpr rd, enum loongarch_gpr rj) +{ + return larch_insn_gen_or(rd, rj, 0); +} + +u32 larch_insn_gen_lu12iw(enum loongarch_gpr rd, int imm) +{ + union loongarch_instruction insn; + + insn.reg1i20_format.opcode = lu12iw_op; + insn.reg1i20_format.rd = rd; + insn.reg1i20_format.immediate = imm; + + return insn.word; +} + u32 larch_insn_gen_lu32id(enum loongarch_gpr rd, int imm) { union loongarch_instruction insn; diff --git a/arch/loongarch/kernel/mcount.S b/arch/loongarch/kernel/mcount.S new file mode 100644 index 0000000000000000000000000000000000000000..8cdc1563cd33f246bf324137116a55e37a2e84f8 --- /dev/null +++ b/arch/loongarch/kernel/mcount.S @@ -0,0 +1,96 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * LoongArch specific _mcount support + * + * Copyright (C) 2022 Loongson Technology Corporation Limited + */ + +#include +#include +#include +#include + + .text + +#define MCOUNT_S0_OFFSET (0) +#define MCOUNT_RA_OFFSET (SZREG) +#define MCOUNT_STACK_SIZE (2 * SZREG) + + .macro MCOUNT_SAVE_REGS + PTR_ADDI sp, sp, -MCOUNT_STACK_SIZE + PTR_S s0, sp, MCOUNT_S0_OFFSET + PTR_S ra, sp, MCOUNT_RA_OFFSET + move s0, a0 + .endm + + .macro MCOUNT_RESTORE_REGS + move a0, s0 + PTR_L ra, sp, MCOUNT_RA_OFFSET + PTR_L s0, sp, MCOUNT_S0_OFFSET + PTR_ADDI sp, sp, MCOUNT_STACK_SIZE + .endm + +SYM_FUNC_START(_mcount) + la.pcrel t1, ftrace_stub + la.pcrel t2, ftrace_trace_function /* Prepare t2 for (1) */ + PTR_L t2, t2, 0 + beq t1, t2, fgraph_trace + + MCOUNT_SAVE_REGS + + move a0, ra /* arg0: self return address */ + move a1, s0 /* arg1: parent's return address */ + jirl ra, t2, 0 /* (1) call *ftrace_trace_function */ + + MCOUNT_RESTORE_REGS + +fgraph_trace: +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + la.pcrel t1, ftrace_stub + la.pcrel t3, ftrace_graph_return + PTR_L t3, t3, 0 + bne t1, t3, ftrace_graph_caller + la.pcrel t1, ftrace_graph_entry_stub + la.pcrel t3, ftrace_graph_entry + PTR_L t3, t3, 0 + bne t1, t3, ftrace_graph_caller +#endif + +SYM_INNER_LABEL(ftrace_stub, SYM_L_GLOBAL) + jr ra +#ifdef CONFIG_FUNCTION_GRAPH_TRACER +SYM_INNER_LABEL(ftrace_graph_func, SYM_L_GLOBAL) + bl ftrace_stub +#endif +SYM_FUNC_END(_mcount) +EXPORT_SYMBOL(_mcount) + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER +SYM_FUNC_START(ftrace_graph_caller) + MCOUNT_SAVE_REGS + + PTR_ADDI a0, ra, -4 /* arg0: Callsite self return addr */ + PTR_ADDI a1, sp, MCOUNT_STACK_SIZE /* arg1: Callsite sp */ + move a2, s0 /* arg2: Callsite parent ra */ + bl prepare_ftrace_return + + MCOUNT_RESTORE_REGS + jr ra +SYM_FUNC_END(ftrace_graph_caller) + +SYM_FUNC_START(return_to_handler) + PTR_ADDI sp, sp, -2 * SZREG + PTR_S a0, sp, 0 + PTR_S a1, sp, SZREG + + bl ftrace_return_to_handler + + /* Restore the real parent address: a0 -> ra */ + move ra, a0 + + PTR_L a0, sp, 0 + PTR_L a1, sp, SZREG + PTR_ADDI sp, sp, 2 * SZREG + jr ra +SYM_FUNC_END(return_to_handler) +#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ diff --git a/arch/loongarch/kernel/mcount_dyn.S b/arch/loongarch/kernel/mcount_dyn.S new file mode 100644 index 0000000000000000000000000000000000000000..bbabf06244c2670fecd2132e05d8564c21b12946 --- /dev/null +++ b/arch/loongarch/kernel/mcount_dyn.S @@ -0,0 +1,149 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2022 Loongson Technology Corporation Limited + */ + +#include +#include +#include +#include + + .text +/* + * Due to -fpatchable-function-entry=2: the compiler inserted 2 NOPs before the + * regular C function prologue. When PC arrived here, the last 2 instructions + * are as follows: + * move t0, ra + * bl callsite (for modules, callsite is a tramplione) + * + * modules trampoline is as follows: + * lu12i.w t1, callsite[31:12] + * lu32i.d t1, callsite[51:32] + * lu52i.d t1, t1, callsite[63:52] + * jirl zero, t1, callsite[11:0] >> 2 + * + * See arch/loongarch/kernel/ftrace_dyn.c for details. Here, pay attention to + * that the T series regs are available and safe because each C functions + * follows the LoongArch's psABI as well. + */ + + .macro ftrace_regs_entry allregs=0 + PTR_ADDI sp, sp, -PT_SIZE + PTR_S t0, sp, PT_R1 /* Save parent ra at PT_R1(RA) */ + PTR_S a0, sp, PT_R4 + PTR_S a1, sp, PT_R5 + PTR_S a2, sp, PT_R6 + PTR_S a3, sp, PT_R7 + PTR_S a4, sp, PT_R8 + PTR_S a5, sp, PT_R9 + PTR_S a6, sp, PT_R10 + PTR_S a7, sp, PT_R11 + PTR_S fp, sp, PT_R22 + .if \allregs + PTR_S tp, sp, PT_R2 + PTR_S t0, sp, PT_R12 + PTR_S t1, sp, PT_R13 + PTR_S t2, sp, PT_R14 + PTR_S t3, sp, PT_R15 + PTR_S t4, sp, PT_R16 + PTR_S t5, sp, PT_R17 + PTR_S t6, sp, PT_R18 + PTR_S t7, sp, PT_R19 + PTR_S t8, sp, PT_R20 + PTR_S u0, sp, PT_R21 + PTR_S s0, sp, PT_R23 + PTR_S s1, sp, PT_R24 + PTR_S s2, sp, PT_R25 + PTR_S s3, sp, PT_R26 + PTR_S s4, sp, PT_R27 + PTR_S s5, sp, PT_R28 + PTR_S s6, sp, PT_R29 + PTR_S s7, sp, PT_R30 + PTR_S s8, sp, PT_R31 + /* Clear it for later use as a flag sometimes. */ + PTR_S zero, sp, PT_R0 + .endif + PTR_S ra, sp, PT_ERA /* Save trace function ra at PT_ERA */ + PTR_ADDI t8, sp, PT_SIZE + PTR_S t8, sp, PT_R3 + .endm + +SYM_FUNC_START(ftrace_stub) + jr ra +SYM_FUNC_END(ftrace_stub) + +SYM_CODE_START(ftrace_common) + PTR_ADDI a0, ra, -8 /* arg0: ip */ + move a1, t0 /* arg1: parent_ip */ + la.pcrel t1, function_trace_op + PTR_L a2, t1, 0 /* arg2: op */ + move a3, sp /* arg3: regs */ + +SYM_INNER_LABEL(ftrace_call, SYM_L_GLOBAL) + bl ftrace_stub +#ifdef CONFIG_FUNCTION_GRAPH_TRACER +SYM_INNER_LABEL(ftrace_graph_call, SYM_L_GLOBAL) + nop /* b ftrace_graph_caller */ +#endif + +/* + * As we didn't use S series regs in this assmembly code and all calls + * are C function which will save S series regs by themselves, there is + * no need to restore S series regs. The T series is available and safe + * at the callsite, so there is no need to restore the T series regs. + */ +ftrace_common_return: + PTR_L ra, sp, PT_R1 + PTR_L a0, sp, PT_R4 + PTR_L a1, sp, PT_R5 + PTR_L a2, sp, PT_R6 + PTR_L a3, sp, PT_R7 + PTR_L a4, sp, PT_R8 + PTR_L a5, sp, PT_R9 + PTR_L a6, sp, PT_R10 + PTR_L a7, sp, PT_R11 + PTR_L fp, sp, PT_R22 + PTR_L t0, sp, PT_ERA + PTR_ADDI sp, sp, PT_SIZE + jr t0 +SYM_CODE_END(ftrace_common) + +SYM_CODE_START(ftrace_caller) + ftrace_regs_entry allregs=0 + b ftrace_common +SYM_CODE_END(ftrace_caller) + +#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS +SYM_CODE_START(ftrace_regs_caller) + ftrace_regs_entry allregs=1 + b ftrace_common +SYM_CODE_END(ftrace_regs_caller) +#endif + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER +SYM_CODE_START(ftrace_graph_caller) + PTR_L a0, sp, PT_ERA + PTR_ADDI a0, a0, -8 /* arg0: self_addr */ + PTR_ADDI a1, sp, PT_R1 /* arg1: parent */ + bl prepare_ftrace_return + b ftrace_common_return +SYM_CODE_END(ftrace_graph_caller) + +SYM_CODE_START(return_to_handler) + /* Save return value regs */ + PTR_ADDI sp, sp, -2 * SZREG + PTR_S a0, sp, 0 + PTR_S a1, sp, SZREG + + move a0, zero + bl ftrace_return_to_handler + move ra, a0 + + /* Restore return value regs */ + PTR_L a0, sp, 0 + PTR_L a1, sp, SZREG + PTR_ADDI sp, sp, 2 * SZREG + + jr ra +SYM_CODE_END(return_to_handler) +#endif diff --git a/arch/loongarch/kernel/module-sections.c b/arch/loongarch/kernel/module-sections.c index d296a70b758fd60a5963d1298bbd1189e645c93c..d4dbcda1c4b0a42a7d7de52fe9f457a22d4940cb 100644 --- a/arch/loongarch/kernel/module-sections.c +++ b/arch/loongarch/kernel/module-sections.c @@ -6,18 +6,19 @@ #include #include #include +#include -Elf_Addr module_emit_got_entry(struct module *mod, Elf_Addr val) +Elf_Addr module_emit_got_entry(struct module *mod, Elf_Shdr *sechdrs, Elf_Addr val) { struct mod_section *got_sec = &mod->arch.got; int i = got_sec->num_entries; - struct got_entry *got = get_got_entry(val, got_sec); + struct got_entry *got = get_got_entry(val, sechdrs, got_sec); if (got) return (Elf_Addr)got; /* There is no GOT entry for val yet, create a new one. */ - got = (struct got_entry *)got_sec->shdr->sh_addr; + got = (struct got_entry *)sechdrs[got_sec->shndx].sh_addr; got[i] = emit_got_entry(val); got_sec->num_entries++; @@ -33,12 +34,12 @@ Elf_Addr module_emit_got_entry(struct module *mod, Elf_Addr val) return (Elf_Addr)&got[i]; } -Elf_Addr module_emit_plt_entry(struct module *mod, Elf_Addr val) +Elf_Addr module_emit_plt_entry(struct module *mod, Elf_Shdr *sechdrs, Elf_Addr val) { int nr; struct mod_section *plt_sec = &mod->arch.plt; struct mod_section *plt_idx_sec = &mod->arch.plt_idx; - struct plt_entry *plt = get_plt_entry(val, plt_sec, plt_idx_sec); + struct plt_entry *plt = get_plt_entry(val, sechdrs, plt_sec, plt_idx_sec); struct plt_idx_entry *plt_idx; if (plt) @@ -47,9 +48,9 @@ Elf_Addr module_emit_plt_entry(struct module *mod, Elf_Addr val) nr = plt_sec->num_entries; /* There is no duplicate entry, create a new one */ - plt = (struct plt_entry *)plt_sec->shdr->sh_addr; + plt = (struct plt_entry *)sechdrs[plt_sec->shndx].sh_addr; plt[nr] = emit_plt_entry(val); - plt_idx = (struct plt_idx_entry *)plt_idx_sec->shdr->sh_addr; + plt_idx = (struct plt_idx_entry *)sechdrs[plt_idx_sec->shndx].sh_addr; plt_idx[nr] = emit_plt_idx_entry(val); plt_sec->num_entries++; @@ -103,28 +104,31 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings, struct module *mod) { unsigned int i, num_plts = 0, num_gots = 0; + Elf_Shdr *got_sec, *plt_sec, *plt_idx_sec, *tramp = NULL; /* * Find the empty .plt sections. */ for (i = 0; i < ehdr->e_shnum; i++) { if (!strcmp(secstrings + sechdrs[i].sh_name, ".got")) - mod->arch.got.shdr = sechdrs + i; + mod->arch.got.shndx = i; else if (!strcmp(secstrings + sechdrs[i].sh_name, ".plt")) - mod->arch.plt.shdr = sechdrs + i; + mod->arch.plt.shndx = i; else if (!strcmp(secstrings + sechdrs[i].sh_name, ".plt.idx")) - mod->arch.plt_idx.shdr = sechdrs + i; + mod->arch.plt_idx.shndx = i; + else if (!strcmp(secstrings + sechdrs[i].sh_name, ".ftrace_trampoline")) + tramp = sechdrs + i; } - if (!mod->arch.got.shdr) { + if (!mod->arch.got.shndx) { pr_err("%s: module GOT section(s) missing\n", mod->name); return -ENOEXEC; } - if (!mod->arch.plt.shdr) { + if (!mod->arch.plt.shndx) { pr_err("%s: module PLT section(s) missing\n", mod->name); return -ENOEXEC; } - if (!mod->arch.plt_idx.shdr) { + if (!mod->arch.plt_idx.shndx) { pr_err("%s: module PLT.IDX section(s) missing\n", mod->name); return -ENOEXEC; } @@ -145,26 +149,36 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, count_max_entries(relas, num_rela, &num_plts, &num_gots); } - mod->arch.got.shdr->sh_type = SHT_NOBITS; - mod->arch.got.shdr->sh_flags = SHF_ALLOC; - mod->arch.got.shdr->sh_addralign = L1_CACHE_BYTES; - mod->arch.got.shdr->sh_size = (num_gots + 1) * sizeof(struct got_entry); + got_sec = sechdrs + mod->arch.got.shndx; + got_sec->sh_type = SHT_NOBITS; + got_sec->sh_flags = SHF_ALLOC; + got_sec->sh_addralign = L1_CACHE_BYTES; + got_sec->sh_size = (num_gots + 1) * sizeof(struct got_entry); mod->arch.got.num_entries = 0; mod->arch.got.max_entries = num_gots; - mod->arch.plt.shdr->sh_type = SHT_NOBITS; - mod->arch.plt.shdr->sh_flags = SHF_EXECINSTR | SHF_ALLOC; - mod->arch.plt.shdr->sh_addralign = L1_CACHE_BYTES; - mod->arch.plt.shdr->sh_size = (num_plts + 1) * sizeof(struct plt_entry); + plt_sec = sechdrs + mod->arch.plt.shndx; + plt_sec->sh_type = SHT_NOBITS; + plt_sec->sh_flags = SHF_EXECINSTR | SHF_ALLOC; + plt_sec->sh_addralign = L1_CACHE_BYTES; + plt_sec->sh_size = (num_plts + 1) * sizeof(struct plt_entry); mod->arch.plt.num_entries = 0; mod->arch.plt.max_entries = num_plts; - mod->arch.plt_idx.shdr->sh_type = SHT_NOBITS; - mod->arch.plt_idx.shdr->sh_flags = SHF_ALLOC; - mod->arch.plt_idx.shdr->sh_addralign = L1_CACHE_BYTES; - mod->arch.plt_idx.shdr->sh_size = (num_plts + 1) * sizeof(struct plt_idx_entry); + plt_idx_sec = sechdrs + mod->arch.plt_idx.shndx; + plt_idx_sec->sh_type = SHT_NOBITS; + plt_idx_sec->sh_flags = SHF_ALLOC; + plt_idx_sec->sh_addralign = L1_CACHE_BYTES; + plt_idx_sec->sh_size = (num_plts + 1) * sizeof(struct plt_idx_entry); mod->arch.plt_idx.num_entries = 0; mod->arch.plt_idx.max_entries = num_plts; + if (tramp) { + tramp->sh_type = SHT_NOBITS; + tramp->sh_flags = SHF_EXECINSTR | SHF_ALLOC; + tramp->sh_addralign = __alignof__(struct plt_entry); + tramp->sh_size = NR_FTRACE_PLTS * sizeof(struct plt_entry); + } + return 0; } diff --git a/arch/loongarch/kernel/module.c b/arch/loongarch/kernel/module.c index 097595b2fc14bce0fc0ee059a855e71c7233eca0..b8b86088b2dd2dbda770bfe30e6954b592d6c4ea 100644 --- a/arch/loongarch/kernel/module.c +++ b/arch/loongarch/kernel/module.c @@ -15,8 +15,11 @@ #include #include #include +#include #include #include +#include +#include static int rela_stack_push(s64 stack_value, s64 *rela_stack, size_t *rela_stack_top) { @@ -98,16 +101,17 @@ static int apply_r_larch_sop_push_dup(struct module *mod, u32 *location, Elf_Add return 0; } -static int apply_r_larch_sop_push_plt_pcrel(struct module *mod, u32 *location, Elf_Addr v, +static int apply_r_larch_sop_push_plt_pcrel(struct module *mod, + Elf_Shdr *sechdrs, u32 *location, Elf_Addr v, s64 *rela_stack, size_t *rela_stack_top, unsigned int type) { ptrdiff_t offset = (void *)v - (void *)location; if (offset >= SZ_128M) - v = module_emit_plt_entry(mod, v); + v = module_emit_plt_entry(mod, sechdrs, v); if (offset < -SZ_128M) - v = module_emit_plt_entry(mod, v); + v = module_emit_plt_entry(mod, sechdrs, v); return apply_r_larch_sop_push_pcrel(mod, location, v, rela_stack, rela_stack_top, type); } @@ -271,17 +275,18 @@ static int apply_r_larch_add_sub(struct module *mod, u32 *location, Elf_Addr v, } } -static int apply_r_larch_b26(struct module *mod, u32 *location, Elf_Addr v, +static int apply_r_larch_b26(struct module *mod, + Elf_Shdr *sechdrs, u32 *location, Elf_Addr v, s64 *rela_stack, size_t *rela_stack_top, unsigned int type) { ptrdiff_t offset = (void *)v - (void *)location; union loongarch_instruction *insn = (union loongarch_instruction *)location; if (offset >= SZ_128M) - v = module_emit_plt_entry(mod, v); + v = module_emit_plt_entry(mod, sechdrs, v); if (offset < -SZ_128M) - v = module_emit_plt_entry(mod, v); + v = module_emit_plt_entry(mod, sechdrs, v); offset = (void *)v - (void *)location; @@ -338,10 +343,11 @@ static int apply_r_larch_pcala(struct module *mod, u32 *location, Elf_Addr v, return 0; } -static int apply_r_larch_got_pc(struct module *mod, u32 *location, Elf_Addr v, +static int apply_r_larch_got_pc(struct module *mod, + Elf_Shdr *sechdrs, u32 *location, Elf_Addr v, s64 *rela_stack, size_t *rela_stack_top, unsigned int type) { - Elf_Addr got = module_emit_got_entry(mod, v); + Elf_Addr got = module_emit_got_entry(mod, sechdrs, v); if (!got) return -EINVAL; @@ -386,13 +392,10 @@ static reloc_rela_handler reloc_rela_handlers[] = { [R_LARCH_SOP_PUSH_PCREL] = apply_r_larch_sop_push_pcrel, [R_LARCH_SOP_PUSH_ABSOLUTE] = apply_r_larch_sop_push_absolute, [R_LARCH_SOP_PUSH_DUP] = apply_r_larch_sop_push_dup, - [R_LARCH_SOP_PUSH_PLT_PCREL] = apply_r_larch_sop_push_plt_pcrel, [R_LARCH_SOP_SUB ... R_LARCH_SOP_IF_ELSE] = apply_r_larch_sop, [R_LARCH_SOP_POP_32_S_10_5 ... R_LARCH_SOP_POP_32_U] = apply_r_larch_sop_imm_field, [R_LARCH_ADD32 ... R_LARCH_SUB64] = apply_r_larch_add_sub, - [R_LARCH_B26] = apply_r_larch_b26, [R_LARCH_PCALA_HI20...R_LARCH_PCALA64_HI12] = apply_r_larch_pcala, - [R_LARCH_GOT_PC_HI20...R_LARCH_GOT_PC_LO12] = apply_r_larch_got_pc, }; int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab, @@ -443,7 +446,22 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab, sym->st_value, rel[i].r_addend, (u64)location); v = sym->st_value + rel[i].r_addend; - err = handler(mod, location, v, rela_stack, &rela_stack_top, type); + switch (type) { + case R_LARCH_B26: + err = apply_r_larch_b26(mod, sechdrs, location, + v, rela_stack, &rela_stack_top, type); + break; + case R_LARCH_GOT_PC_HI20...R_LARCH_GOT_PC_LO12: + err = apply_r_larch_got_pc(mod, sechdrs, location, + v, rela_stack, &rela_stack_top, type); + break; + case R_LARCH_SOP_PUSH_PLT_PCREL: + err = apply_r_larch_sop_push_plt_pcrel(mod, sechdrs, location, + v, rela_stack, &rela_stack_top, type); + break; + default: + err = handler(mod, location, v, rela_stack, &rela_stack_top, type); + } if (err) return err; } @@ -456,3 +474,36 @@ void *module_alloc(unsigned long size) return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END, GFP_KERNEL, PAGE_KERNEL, 0, NUMA_NO_NODE, __builtin_return_address(0)); } + +static void module_init_ftrace_plt(const Elf_Ehdr *hdr, + const Elf_Shdr *sechdrs, struct module *mod) +{ +#ifdef CONFIG_DYNAMIC_FTRACE + struct plt_entry *ftrace_plts; + + ftrace_plts = (void *)sechdrs->sh_addr; + + ftrace_plts[FTRACE_PLT_IDX] = emit_plt_entry(FTRACE_ADDR); + + if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS)) + ftrace_plts[FTRACE_REGS_PLT_IDX] = emit_plt_entry(FTRACE_REGS_ADDR); + + mod->arch.ftrace_trampolines = ftrace_plts; +#endif +} + +int module_finalize(const Elf_Ehdr *hdr, + const Elf_Shdr *sechdrs, struct module *mod) +{ + const Elf_Shdr *s, *se; + const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; + + for (s = sechdrs, se = sechdrs + hdr->e_shnum; s < se; s++) { + if (!strcmp(".altinstructions", secstrs + s->sh_name)) + apply_alternatives((void *)s->sh_addr, (void *)s->sh_addr + s->sh_size); + if (!strcmp(".ftrace_trampoline", secstrs + s->sh_name)) + module_init_ftrace_plt(hdr, s, mod); + } + + return 0; +} diff --git a/arch/loongarch/kernel/numa.c b/arch/loongarch/kernel/numa.c index a13f92593cfdad89294abe8d37582b40c2c12954..708665895b47d314df2f13c13db75c021f7b198a 100644 --- a/arch/loongarch/kernel/numa.c +++ b/arch/loongarch/kernel/numa.c @@ -78,7 +78,7 @@ void __init pcpu_populate_pte(unsigned long addr) new = memblock_alloc(PAGE_SIZE, PAGE_SIZE); pgd_populate(&init_mm, pgd, new); #ifndef __PAGETABLE_PUD_FOLDED - pud_init((unsigned long)new, (unsigned long)invalid_pmd_table); + pud_init(new); #endif } @@ -89,7 +89,7 @@ void __init pcpu_populate_pte(unsigned long addr) new = memblock_alloc(PAGE_SIZE, PAGE_SIZE); pud_populate(&init_mm, pud, new); #ifndef __PAGETABLE_PMD_FOLDED - pmd_init((unsigned long)new, (unsigned long)invalid_pte_table); + pmd_init(new); #endif } @@ -388,6 +388,21 @@ static void __init numa_default_distance(void) } } +/* + * fake_numa_init() - For Non-ACPI systems + * Return: 0 on success, -errno on failure. + */ +static int __init fake_numa_init(void) +{ + phys_addr_t start = memblock_start_of_DRAM(); + phys_addr_t end = memblock_end_of_DRAM() - 1; + + node_set(0, numa_nodes_parsed); + pr_info("Faking a node at [mem %pap-%pap]\n", &start, &end); + + return numa_add_memblk(0, start, end + 1); +} + int __init init_numa_memory(void) { int i; @@ -404,7 +419,7 @@ int __init init_numa_memory(void) memset(&numa_meminfo, 0, sizeof(numa_meminfo)); /* Parse SRAT and SLIT if provided by firmware. */ - ret = acpi_numa_init(); + ret = acpi_disabled ? fake_numa_init() : acpi_numa_init(); if (ret < 0) return ret; diff --git a/arch/loongarch/kernel/process.c b/arch/loongarch/kernel/process.c index d61c9f465b9520842a3b004cdf07cafaf3a08f24..c583b1ef1f44cefaada16d0d9c450b2019515ec9 100644 --- a/arch/loongarch/kernel/process.c +++ b/arch/loongarch/kernel/process.c @@ -47,6 +47,12 @@ #include #include +#ifdef CONFIG_STACKPROTECTOR +#include +unsigned long __stack_chk_guard __read_mostly; +EXPORT_SYMBOL(__stack_chk_guard); +#endif + /* * Idle related variables and functions */ diff --git a/arch/loongarch/kernel/reset.c b/arch/loongarch/kernel/reset.c index 8c82021eb2f447d867560a77b3f5e6c276804bf4..1ef8c63835351ba7b04eb4838464927c1ac1c32c 100644 --- a/arch/loongarch/kernel/reset.c +++ b/arch/loongarch/kernel/reset.c @@ -15,6 +15,7 @@ #include #include #include +#include void (*pm_power_off)(void); EXPORT_SYMBOL(pm_power_off); @@ -41,6 +42,10 @@ void machine_power_off(void) #ifdef CONFIG_SMP preempt_disable(); smp_send_stop(); +#endif +#ifdef CONFIG_PM + if (!acpi_disabled) + enable_pci_wakeup(); #endif do_kernel_power_off(); #ifdef CONFIG_EFI diff --git a/arch/loongarch/kernel/setup.c b/arch/loongarch/kernel/setup.c index ae436def7ee98792d875317607fe6fe45ea33e84..4344502c0b31780677a84cd5bb117004336df712 100644 --- a/arch/loongarch/kernel/setup.c +++ b/arch/loongarch/kernel/setup.c @@ -28,10 +28,16 @@ #include #include #include +#include +#include +#include +#include #include #include +#include #include +#include #include #include #include @@ -67,6 +73,7 @@ static const char dmi_empty_string[] = " "; * * These are initialized so they are in the .data section */ +char init_command_line[COMMAND_LINE_SIZE] __initdata; static int num_standard_resources; static struct resource *standard_resources; @@ -80,6 +87,11 @@ const char *get_system_type(void) return "generic-loongson-machine"; } +void __init check_bugs(void) +{ + alternative_instructions(); +} + static const char *dmi_string_parse(const struct dmi_header *dm, u8 s) { const u8 *bp = ((u8 *) dm) + dm->length; @@ -246,6 +258,58 @@ static void __init arch_parse_crashkernel(void) #endif } +static void __init fdt_setup(void) +{ +#ifdef CONFIG_OF_EARLY_FLATTREE + void *fdt_pointer; + + /* ACPI-based systems do not require parsing fdt */ + if (acpi_os_get_root_pointer()) + return; + + /* Look for a device tree configuration table entry */ + fdt_pointer = efi_fdt_pointer(); + if (!fdt_pointer || fdt_check_header(fdt_pointer)) + return; + + early_init_dt_scan(fdt_pointer); + early_init_fdt_reserve_self(); + + max_low_pfn = PFN_PHYS(memblock_end_of_DRAM()); +#endif +} + +static void __init bootcmdline_init(char **cmdline_p) +{ + /* + * If CONFIG_CMDLINE_FORCE is enabled then initializing the command line + * is trivial - we simply use the built-in command line unconditionally & + * unmodified. + */ + if (IS_ENABLED(CONFIG_CMDLINE_FORCE)) { + strscpy(boot_command_line, CONFIG_CMDLINE, COMMAND_LINE_SIZE); + goto out; + } + +#ifdef CONFIG_OF_FLATTREE + /* + * If CONFIG_CMDLINE_BOOTLOADER is enabled and we are in FDT-based system, + * the boot_command_line will be overwritten by early_init_dt_scan_chosen(). + * So we need to append init_command_line (the original copy of boot_command_line) + * to boot_command_line. + */ + if (initial_boot_params) { + if (boot_command_line[0]) + strlcat(boot_command_line, " ", COMMAND_LINE_SIZE); + + strlcat(boot_command_line, init_command_line, COMMAND_LINE_SIZE); + } +#endif + +out: + *cmdline_p = boot_command_line; +} + void __init platform_init(void) { arch_reserve_vmcore(); @@ -258,6 +322,7 @@ void __init platform_init(void) acpi_gbl_use_default_register_widths = false; acpi_boot_table_init(); #endif + unflatten_and_copy_device_tree(); #ifdef CONFIG_NUMA init_numa_memory(); @@ -290,6 +355,8 @@ static void __init arch_mem_init(char **cmdline_p) check_kernel_sections_mem(); + early_init_fdt_scan_reserved_mem(); + /* * In order to reduce the possibility of kernel panic when failed to * get IO TLB memory under CONFIG_SWIOTLB, it is better to allocate @@ -304,6 +371,10 @@ static void __init arch_mem_init(char **cmdline_p) dma_contiguous_reserve(PFN_PHYS(max_low_pfn)); + /* Reserve for hibernation. */ + register_nosave_region(PFN_DOWN(__pa_symbol(&__nosave_begin)), + PFN_UP(__pa_symbol(&__nosave_end))); + memblock_dump_all(); early_memtest(PFN_PHYS(ARCH_PFN_OFFSET), PFN_PHYS(max_low_pfn)); @@ -363,6 +434,81 @@ static void __init resource_init(void) #endif } +static int __init add_legacy_isa_io(struct fwnode_handle *fwnode, + resource_size_t hw_start, resource_size_t size) +{ + int ret = 0; + unsigned long vaddr; + struct logic_pio_hwaddr *range; + + range = kzalloc(sizeof(*range), GFP_ATOMIC); + if (!range) + return -ENOMEM; + + range->fwnode = fwnode; + range->size = size = round_up(size, PAGE_SIZE); + range->hw_start = hw_start; + range->flags = LOGIC_PIO_CPU_MMIO; + + ret = logic_pio_register_range(range); + if (ret) { + kfree(range); + return ret; + } + + /* Legacy ISA must placed at the start of PCI_IOBASE */ + if (range->io_start != 0) { + logic_pio_unregister_range(range); + kfree(range); + return -EINVAL; + } + + vaddr = (unsigned long)(PCI_IOBASE + range->io_start); + ioremap_page_range(vaddr, vaddr + size, hw_start, pgprot_device(PAGE_KERNEL)); + + return 0; +} + +static __init int arch_reserve_pio_range(void) +{ + struct device_node *np; + + for_each_node_by_name(np, "isa") { + struct of_range range; + struct of_range_parser parser; + + pr_info("ISA Bridge: %pOF\n", np); + + if (of_range_parser_init(&parser, np)) { + pr_info("Failed to parse resources.\n"); + of_node_put(np); + break; + } + + for_each_of_range(&parser, &range) { + switch (range.flags & IORESOURCE_TYPE_BITS) { + case IORESOURCE_IO: + pr_info(" IO 0x%016llx..0x%016llx -> 0x%016llx\n", + range.cpu_addr, + range.cpu_addr + range.size - 1, + range.bus_addr); + if (add_legacy_isa_io(&np->fwnode, range.cpu_addr, range.size)) + pr_warn("Failed to reserve legacy IO in Logic PIO\n"); + break; + case IORESOURCE_MEM: + pr_info(" MEM 0x%016llx..0x%016llx -> 0x%016llx\n", + range.cpu_addr, + range.cpu_addr + range.size - 1, + range.bus_addr); + break; + } + } + } + + return 0; +} +arch_initcall(arch_reserve_pio_range); + static int __init reserve_memblock_reserved_regions(void) { u64 i, j; @@ -415,12 +561,13 @@ static void __init prefill_possible_map(void) void __init setup_arch(char **cmdline_p) { cpu_probe(); - *cmdline_p = boot_command_line; init_environ(); efi_init(); + fdt_setup(); memblock_init(); pagetable_init(); + bootcmdline_init(cmdline_p); parse_early_param(); reserve_initrd_mem(); diff --git a/arch/loongarch/kernel/smp.c b/arch/loongarch/kernel/smp.c index 14508d429ffa32b4bc6951d2d1d1ad352cd540e8..8c6e227cb29df47b8d143dbf9766229a785813b9 100644 --- a/arch/loongarch/kernel/smp.c +++ b/arch/loongarch/kernel/smp.c @@ -16,6 +16,7 @@ #include #include #include +#include #include #include #include @@ -180,8 +181,42 @@ irqreturn_t loongson_ipi_interrupt(int irq, void *dev) return IRQ_HANDLED; } +static void __init fdt_smp_setup(void) +{ +#ifdef CONFIG_OF + unsigned int cpu, cpuid; + struct device_node *node = NULL; + + for_each_of_cpu_node(node) { + if (!of_device_is_available(node)) + continue; + + cpuid = of_get_cpu_hwid(node, 0); + if (cpuid >= nr_cpu_ids) + continue; + + if (cpuid == loongson_sysconf.boot_cpu_id) { + cpu = 0; + numa_add_cpu(cpu); + } else { + cpu = cpumask_next_zero(-1, cpu_present_mask); + } + + num_processors++; + set_cpu_possible(cpu, true); + set_cpu_present(cpu, true); + __cpu_number_map[cpuid] = cpu; + __cpu_logical_map[cpu] = cpuid; + } + + loongson_sysconf.nr_cpus = num_processors; +#endif +} + void __init loongson_smp_setup(void) { + fdt_smp_setup(); + cpu_data[0].core = cpu_logical_map(0) % loongson_sysconf.cores_per_package; cpu_data[0].package = cpu_logical_map(0) / loongson_sysconf.cores_per_package; diff --git a/arch/loongarch/kernel/switch.S b/arch/loongarch/kernel/switch.S index 202a163cb32f6c1a0a8db030f28410aa526912fb..31dd8199b2453508f746db2ba40f997464f4c207 100644 --- a/arch/loongarch/kernel/switch.S +++ b/arch/loongarch/kernel/switch.S @@ -23,6 +23,11 @@ SYM_FUNC_START(__switch_to) stptr.d ra, a0, THREAD_REG01 stptr.d a3, a0, THREAD_SCHED_RA stptr.d a4, a0, THREAD_SCHED_CFA +#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_SMP) + la t7, __stack_chk_guard + LONG_L t8, a1, TASK_STACK_CANARY + LONG_S t8, t7, 0 +#endif move tp, a2 cpu_restore_nonscratch a1 diff --git a/arch/loongarch/kernel/time.c b/arch/loongarch/kernel/time.c index 786735dcc8d678ecc6022ca8e8060a0bd4a4153a..a6576dea590c0b26644ce3eebd392b17a1477490 100644 --- a/arch/loongarch/kernel/time.c +++ b/arch/loongarch/kernel/time.c @@ -115,12 +115,17 @@ static unsigned long __init get_loops_per_jiffy(void) return lpj; } -static long init_timeval; +static long init_offset __nosavedata; + +void save_counter(void) +{ + init_offset = drdtime(); +} void sync_counter(void) { /* Ensure counter begin at 0 */ - csr_write64(-init_timeval, LOONGARCH_CSR_CNTC); + csr_write64(init_offset, LOONGARCH_CSR_CNTC); } static int get_timer_irq(void) @@ -219,7 +224,7 @@ void __init time_init(void) else const_clock_freq = calc_const_freq(); - init_timeval = drdtime() - csr_read64(LOONGARCH_CSR_CNTC); + init_offset = -(drdtime() - csr_read64(LOONGARCH_CSR_CNTC)); constant_clockevent_init(); constant_clocksource_init(); diff --git a/arch/loongarch/kernel/traps.c b/arch/loongarch/kernel/traps.c index 1a4dce84ebc60f5208e19893d8d4b943b47c5504..7ea62faeeadb561362b0228d4937bedcb7466e51 100644 --- a/arch/loongarch/kernel/traps.c +++ b/arch/loongarch/kernel/traps.c @@ -368,13 +368,40 @@ asmlinkage void noinstr do_ade(struct pt_regs *regs) irqentry_exit(regs, state); } +/* sysctl hooks */ +int unaligned_enabled __read_mostly = 1; /* Enabled by default */ +int no_unaligned_warning __read_mostly = 1; /* Only 1 warning by default */ + asmlinkage void noinstr do_ale(struct pt_regs *regs) { + unsigned int *pc; irqentry_state_t state = irqentry_enter(regs); + perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, regs->csr_badvaddr); + + /* + * Did we catch a fault trying to load an instruction? + */ + if (regs->csr_badvaddr == regs->csr_era) + goto sigbus; + if (user_mode(regs) && !test_thread_flag(TIF_FIXADE)) + goto sigbus; + if (!unaligned_enabled) + goto sigbus; + if (!no_unaligned_warning) + show_registers(regs); + + pc = (unsigned int *)exception_era(regs); + + emulate_load_store_insn(regs, (void __user *)regs->csr_badvaddr, pc); + + goto out; + +sigbus: die_if_kernel("Kernel ale access", regs); force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *)regs->csr_badvaddr); +out: irqentry_exit(regs, state); } diff --git a/arch/loongarch/kernel/unaligned.c b/arch/loongarch/kernel/unaligned.c new file mode 100644 index 0000000000000000000000000000000000000000..bdff825d29ef4dd252430cbd0f5e923fb102cb5f --- /dev/null +++ b/arch/loongarch/kernel/unaligned.c @@ -0,0 +1,499 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Handle unaligned accesses by emulation. + * + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + * + * Derived from MIPS: + * Copyright (C) 1996, 1998, 1999, 2002 by Ralf Baechle + * Copyright (C) 1999 Silicon Graphics, Inc. + * Copyright (C) 2014 Imagination Technologies Ltd. + */ +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "access-helper.h" + +#ifdef CONFIG_DEBUG_FS +static u32 unaligned_instructions_user; +static u32 unaligned_instructions_kernel; +#endif + +static inline unsigned long read_fpr(unsigned int idx) +{ +#define READ_FPR(idx, __value) \ + __asm__ __volatile__("movfr2gr.d %0, $f"#idx"\n\t" : "=r"(__value)); + + unsigned long __value; + + switch (idx) { + case 0: + READ_FPR(0, __value); + break; + case 1: + READ_FPR(1, __value); + break; + case 2: + READ_FPR(2, __value); + break; + case 3: + READ_FPR(3, __value); + break; + case 4: + READ_FPR(4, __value); + break; + case 5: + READ_FPR(5, __value); + break; + case 6: + READ_FPR(6, __value); + break; + case 7: + READ_FPR(7, __value); + break; + case 8: + READ_FPR(8, __value); + break; + case 9: + READ_FPR(9, __value); + break; + case 10: + READ_FPR(10, __value); + break; + case 11: + READ_FPR(11, __value); + break; + case 12: + READ_FPR(12, __value); + break; + case 13: + READ_FPR(13, __value); + break; + case 14: + READ_FPR(14, __value); + break; + case 15: + READ_FPR(15, __value); + break; + case 16: + READ_FPR(16, __value); + break; + case 17: + READ_FPR(17, __value); + break; + case 18: + READ_FPR(18, __value); + break; + case 19: + READ_FPR(19, __value); + break; + case 20: + READ_FPR(20, __value); + break; + case 21: + READ_FPR(21, __value); + break; + case 22: + READ_FPR(22, __value); + break; + case 23: + READ_FPR(23, __value); + break; + case 24: + READ_FPR(24, __value); + break; + case 25: + READ_FPR(25, __value); + break; + case 26: + READ_FPR(26, __value); + break; + case 27: + READ_FPR(27, __value); + break; + case 28: + READ_FPR(28, __value); + break; + case 29: + READ_FPR(29, __value); + break; + case 30: + READ_FPR(30, __value); + break; + case 31: + READ_FPR(31, __value); + break; + default: + panic("unexpected idx '%d'", idx); + } +#undef READ_FPR + return __value; +} + +static inline void write_fpr(unsigned int idx, unsigned long value) +{ +#define WRITE_FPR(idx, value) \ + __asm__ __volatile__("movgr2fr.d $f"#idx", %0\n\t" :: "r"(value)); + + switch (idx) { + case 0: + WRITE_FPR(0, value); + break; + case 1: + WRITE_FPR(1, value); + break; + case 2: + WRITE_FPR(2, value); + break; + case 3: + WRITE_FPR(3, value); + break; + case 4: + WRITE_FPR(4, value); + break; + case 5: + WRITE_FPR(5, value); + break; + case 6: + WRITE_FPR(6, value); + break; + case 7: + WRITE_FPR(7, value); + break; + case 8: + WRITE_FPR(8, value); + break; + case 9: + WRITE_FPR(9, value); + break; + case 10: + WRITE_FPR(10, value); + break; + case 11: + WRITE_FPR(11, value); + break; + case 12: + WRITE_FPR(12, value); + break; + case 13: + WRITE_FPR(13, value); + break; + case 14: + WRITE_FPR(14, value); + break; + case 15: + WRITE_FPR(15, value); + break; + case 16: + WRITE_FPR(16, value); + break; + case 17: + WRITE_FPR(17, value); + break; + case 18: + WRITE_FPR(18, value); + break; + case 19: + WRITE_FPR(19, value); + break; + case 20: + WRITE_FPR(20, value); + break; + case 21: + WRITE_FPR(21, value); + break; + case 22: + WRITE_FPR(22, value); + break; + case 23: + WRITE_FPR(23, value); + break; + case 24: + WRITE_FPR(24, value); + break; + case 25: + WRITE_FPR(25, value); + break; + case 26: + WRITE_FPR(26, value); + break; + case 27: + WRITE_FPR(27, value); + break; + case 28: + WRITE_FPR(28, value); + break; + case 29: + WRITE_FPR(29, value); + break; + case 30: + WRITE_FPR(30, value); + break; + case 31: + WRITE_FPR(31, value); + break; + default: + panic("unexpected idx '%d'", idx); + } +#undef WRITE_FPR +} + +void emulate_load_store_insn(struct pt_regs *regs, void __user *addr, unsigned int *pc) +{ + bool fp = false; + bool sign, write; + bool user = user_mode(regs); + unsigned int res, size = 0; + unsigned long value = 0; + union loongarch_instruction insn; + + perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0); + + __get_inst(&insn.word, pc, user); + + switch (insn.reg2i12_format.opcode) { + case ldh_op: + size = 2; + sign = true; + write = false; + break; + case ldhu_op: + size = 2; + sign = false; + write = false; + break; + case sth_op: + size = 2; + sign = true; + write = true; + break; + case ldw_op: + size = 4; + sign = true; + write = false; + break; + case ldwu_op: + size = 4; + sign = false; + write = false; + break; + case stw_op: + size = 4; + sign = true; + write = true; + break; + case ldd_op: + size = 8; + sign = true; + write = false; + break; + case std_op: + size = 8; + sign = true; + write = true; + break; + case flds_op: + size = 4; + fp = true; + sign = true; + write = false; + break; + case fsts_op: + size = 4; + fp = true; + sign = true; + write = true; + break; + case fldd_op: + size = 8; + fp = true; + sign = true; + write = false; + break; + case fstd_op: + size = 8; + fp = true; + sign = true; + write = true; + break; + } + + switch (insn.reg2i14_format.opcode) { + case ldptrw_op: + size = 4; + sign = true; + write = false; + break; + case stptrw_op: + size = 4; + sign = true; + write = true; + break; + case ldptrd_op: + size = 8; + sign = true; + write = false; + break; + case stptrd_op: + size = 8; + sign = true; + write = true; + break; + } + + switch (insn.reg3_format.opcode) { + case ldxh_op: + size = 2; + sign = true; + write = false; + break; + case ldxhu_op: + size = 2; + sign = false; + write = false; + break; + case stxh_op: + size = 2; + sign = true; + write = true; + break; + case ldxw_op: + size = 4; + sign = true; + write = false; + break; + case ldxwu_op: + size = 4; + sign = false; + write = false; + break; + case stxw_op: + size = 4; + sign = true; + write = true; + break; + case ldxd_op: + size = 8; + sign = true; + write = false; + break; + case stxd_op: + size = 8; + sign = true; + write = true; + break; + case fldxs_op: + size = 4; + fp = true; + sign = true; + write = false; + break; + case fstxs_op: + size = 4; + fp = true; + sign = true; + write = true; + break; + case fldxd_op: + size = 8; + fp = true; + sign = true; + write = false; + break; + case fstxd_op: + size = 8; + fp = true; + sign = true; + write = true; + break; + } + + if (!size) + goto sigbus; + if (user && !access_ok(addr, size)) + goto sigbus; + + if (!write) { + res = unaligned_read(addr, &value, size, sign); + if (res) + goto fault; + + /* Rd is the same field in any formats */ + if (!fp) + regs->regs[insn.reg3_format.rd] = value; + else { + if (is_fpu_owner()) + write_fpr(insn.reg3_format.rd, value); + else + set_fpr64(¤t->thread.fpu.fpr[insn.reg3_format.rd], 0, value); + } + } else { + /* Rd is the same field in any formats */ + if (!fp) + value = regs->regs[insn.reg3_format.rd]; + else { + if (is_fpu_owner()) + value = read_fpr(insn.reg3_format.rd); + else + value = get_fpr64(¤t->thread.fpu.fpr[insn.reg3_format.rd], 0); + } + + res = unaligned_write(addr, value, size); + if (res) + goto fault; + } + +#ifdef CONFIG_DEBUG_FS + if (user) + unaligned_instructions_user++; + else + unaligned_instructions_kernel++; +#endif + + compute_return_era(regs); + + return; + +fault: + /* Did we have an exception handler installed? */ + if (fixup_exception(regs)) + return; + + die_if_kernel("Unhandled kernel unaligned access", regs); + force_sig(SIGSEGV); + + return; + +sigbus: + die_if_kernel("Unhandled kernel unaligned access", regs); + force_sig(SIGBUS); + + return; +} + +#ifdef CONFIG_DEBUG_FS +static int __init debugfs_unaligned(void) +{ + struct dentry *d; + + d = debugfs_create_dir("loongarch", NULL); + if (!d) + return -ENOMEM; + + debugfs_create_u32("unaligned_instructions_user", + S_IRUGO, d, &unaligned_instructions_user); + debugfs_create_u32("unaligned_instructions_kernel", + S_IRUGO, d, &unaligned_instructions_kernel); + + return 0; +} +arch_initcall(debugfs_unaligned); +#endif diff --git a/arch/loongarch/kernel/unwind_guess.c b/arch/loongarch/kernel/unwind_guess.c index 5afa6064d73e45b4b99524072861fdbc0fc38974..e2d2e4f3001f490a76762ad336fbcf696c4bbfe3 100644 --- a/arch/loongarch/kernel/unwind_guess.c +++ b/arch/loongarch/kernel/unwind_guess.c @@ -3,6 +3,7 @@ * Copyright (C) 2022 Loongson Technology Corporation Limited */ #include +#include #include @@ -53,7 +54,8 @@ bool unwind_next_frame(struct unwind_state *state) state->sp < info->end; state->sp += sizeof(unsigned long)) { addr = *(unsigned long *)(state->sp); - + state->pc = ftrace_graph_ret_addr(state->task, &state->graph_idx, + addr, (unsigned long *)(state->sp - GRAPH_FAKE_OFFSET)); if (__kernel_text_address(addr)) return true; } diff --git a/arch/loongarch/kernel/unwind_prologue.c b/arch/loongarch/kernel/unwind_prologue.c index 4571c3c87cd4c0dc8da5515d82fecd8da1b7397e..0f8d1451ebb848d8a88f8a5c9405903a8d4c93c0 100644 --- a/arch/loongarch/kernel/unwind_prologue.c +++ b/arch/loongarch/kernel/unwind_prologue.c @@ -2,12 +2,23 @@ /* * Copyright (C) 2022 Loongson Technology Corporation Limited */ +#include #include #include #include #include +static inline void unwind_state_fixup(struct unwind_state *state) +{ +#ifdef CONFIG_DYNAMIC_FTRACE + static unsigned long ftrace = (unsigned long)ftrace_call + 4; + + if (state->pc == ftrace) + state->is_ftrace = true; +#endif +} + unsigned long unwind_get_return_address(struct unwind_state *state) { @@ -32,6 +43,8 @@ static bool unwind_by_guess(struct unwind_state *state) state->sp < info->end; state->sp += sizeof(unsigned long)) { addr = *(unsigned long *)(state->sp); + state->pc = ftrace_graph_ret_addr(state->task, &state->graph_idx, + addr, (unsigned long *)(state->sp - GRAPH_FAKE_OFFSET)); if (__kernel_text_address(addr)) return true; } @@ -41,15 +54,30 @@ static bool unwind_by_guess(struct unwind_state *state) static bool unwind_by_prologue(struct unwind_state *state) { - struct stack_info *info = &state->stack_info; - union loongarch_instruction *ip, *ip_end; long frame_ra = -1; unsigned long frame_size = 0; unsigned long size, offset, pc = state->pc; + struct pt_regs *regs; + struct stack_info *info = &state->stack_info; + union loongarch_instruction *ip, *ip_end; if (state->sp >= info->end || state->sp < info->begin) return false; + if (state->is_ftrace) { + /* + * As we meet ftrace_regs_entry, reset first flag like first doing + * tracing. Prologue analysis will stop soon because PC is at entry. + */ + regs = (struct pt_regs *)state->sp; + state->first = true; + state->is_ftrace = false; + state->pc = regs->csr_era; + state->ra = regs->regs[1]; + state->sp = regs->regs[3]; + return true; + } + if (!kallsyms_lookup_size_offset(pc, &size, &offset)) return false; @@ -95,7 +123,7 @@ static bool unwind_by_prologue(struct unwind_state *state) state->pc = *(unsigned long *)(state->sp + frame_ra); state->sp = state->sp + frame_size; - return !!__kernel_text_address(state->pc); + goto out; first: state->first = false; @@ -104,7 +132,9 @@ first: state->pc = state->ra; - return !!__kernel_text_address(state->ra); +out: + unwind_state_fixup(state); + return !!__kernel_text_address(state->pc); } void unwind_start(struct unwind_state *state, struct task_struct *task, @@ -147,8 +177,11 @@ bool unwind_next_frame(struct unwind_state *state) break; case UNWINDER_PROLOGUE: - if (unwind_by_prologue(state)) + if (unwind_by_prologue(state)) { + state->pc = ftrace_graph_ret_addr(state->task, &state->graph_idx, + state->pc, (unsigned long *)(state->sp - GRAPH_FAKE_OFFSET)); return true; + } if (info->type == STACK_TYPE_IRQ && info->end == state->sp) { @@ -158,10 +191,11 @@ bool unwind_next_frame(struct unwind_state *state) if (user_mode(regs) || !__kernel_text_address(pc)) return false; - state->pc = pc; - state->sp = regs->regs[3]; - state->ra = regs->regs[1]; state->first = true; + state->ra = regs->regs[1]; + state->sp = regs->regs[3]; + state->pc = ftrace_graph_ret_addr(state->task, &state->graph_idx, + pc, (unsigned long *)(state->sp - GRAPH_FAKE_OFFSET)); get_stack_info(state->sp, state->task, info); return true; diff --git a/arch/loongarch/kernel/vmlinux.lds.S b/arch/loongarch/kernel/vmlinux.lds.S index b3309a5e695b23ceed7f156331805015d00255ff..733b16e8d55ddb55d3142cf7ccd02287c158c85f 100644 --- a/arch/loongarch/kernel/vmlinux.lds.S +++ b/arch/loongarch/kernel/vmlinux.lds.S @@ -4,6 +4,7 @@ #include #define PAGE_SIZE _PAGE_SIZE +#define RO_EXCEPTION_TABLE_ALIGN 4 /* * Put .bss..swapper_pg_dir as the first thing in .bss. This will @@ -53,7 +54,17 @@ SECTIONS . = ALIGN(PECOFF_SEGMENT_ALIGN); _etext = .; - EXCEPTION_TABLE(16) + /* + * struct alt_inst entries. From the header (alternative.h): + * "Alternative instructions for different CPU types or capabilities" + * Think locking instructions on spinlocks. + */ + . = ALIGN(4); + .altinstructions : AT(ADDR(.altinstructions) - LOAD_OFFSET) { + __alt_instructions = .; + *(.altinstructions) + __alt_instructions_end = .; + } .got : ALIGN(16) { *(.got) } .plt : ALIGN(16) { *(.plt) } diff --git a/arch/loongarch/lib/Makefile b/arch/loongarch/lib/Makefile index e36635fccb6955706929f93f46e584b60a63f1a1..40bde632900fc823fc3f7653ce1b3b3435ff93f5 100644 --- a/arch/loongarch/lib/Makefile +++ b/arch/loongarch/lib/Makefile @@ -3,4 +3,5 @@ # Makefile for LoongArch-specific library files. # -lib-y += delay.o clear_user.o copy_user.o dump_tlb.o +lib-y += delay.o memset.o memcpy.o memmove.o \ + clear_user.o copy_user.o dump_tlb.o unaligned.o diff --git a/arch/loongarch/lib/clear_user.S b/arch/loongarch/lib/clear_user.S index 16ba2b8dd68ad9f601030c9e9c3ac40f202ca118..2dc48e61a2c8cc2d08a70049143ba7677d7f6599 100644 --- a/arch/loongarch/lib/clear_user.S +++ b/arch/loongarch/lib/clear_user.S @@ -3,30 +3,37 @@ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited */ +#include #include #include +#include +#include #include #include -.macro fixup_ex from, to, offset, fix -.if \fix - .section .fixup, "ax" -\to: addi.d a0, a1, \offset +.irp to, 0, 1, 2, 3, 4, 5, 6, 7 +.L_fixup_handle_\to\(): + addi.d a0, a1, (\to) * (-8) jr ra - .previous -.endif - .section __ex_table, "a" - PTR \from\()b, \to\()b - .previous -.endm +.endr + +SYM_FUNC_START(__clear_user) + /* + * Some CPUs support hardware unaligned access + */ + ALTERNATIVE "b __clear_user_generic", \ + "b __clear_user_fast", CPU_FEATURE_UAL +SYM_FUNC_END(__clear_user) + +EXPORT_SYMBOL(__clear_user) /* - * unsigned long __clear_user(void *addr, size_t size) + * unsigned long __clear_user_generic(void *addr, size_t size) * * a0: addr * a1: size */ -SYM_FUNC_START(__clear_user) +SYM_FUNC_START(__clear_user_generic) beqz a1, 2f 1: st.b zero, a0, 0 @@ -37,7 +44,55 @@ SYM_FUNC_START(__clear_user) 2: move a0, a1 jr ra - fixup_ex 1, 3, 0, 1 -SYM_FUNC_END(__clear_user) + _asm_extable 1b, .L_fixup_handle_0 +SYM_FUNC_END(__clear_user_generic) -EXPORT_SYMBOL(__clear_user) +/* + * unsigned long __clear_user_fast(void *addr, unsigned long size) + * + * a0: addr + * a1: size + */ +SYM_FUNC_START(__clear_user_fast) + beqz a1, 10f + + ori a2, zero, 64 + blt a1, a2, 9f + + /* set 64 bytes at a time */ +1: st.d zero, a0, 0 +2: st.d zero, a0, 8 +3: st.d zero, a0, 16 +4: st.d zero, a0, 24 +5: st.d zero, a0, 32 +6: st.d zero, a0, 40 +7: st.d zero, a0, 48 +8: st.d zero, a0, 56 + + addi.d a0, a0, 64 + addi.d a1, a1, -64 + bge a1, a2, 1b + + beqz a1, 10f + + /* set the remaining bytes */ +9: st.b zero, a0, 0 + addi.d a0, a0, 1 + addi.d a1, a1, -1 + bgt a1, zero, 9b + + /* return */ +10: move a0, a1 + jr ra + + /* fixup and ex_table */ + _asm_extable 1b, .L_fixup_handle_0 + _asm_extable 2b, .L_fixup_handle_1 + _asm_extable 3b, .L_fixup_handle_2 + _asm_extable 4b, .L_fixup_handle_3 + _asm_extable 5b, .L_fixup_handle_4 + _asm_extable 6b, .L_fixup_handle_5 + _asm_extable 7b, .L_fixup_handle_6 + _asm_extable 8b, .L_fixup_handle_7 + _asm_extable 9b, .L_fixup_handle_0 +SYM_FUNC_END(__clear_user_fast) diff --git a/arch/loongarch/lib/copy_user.S b/arch/loongarch/lib/copy_user.S index 97d20327a69eeb1b91ec64d990583c314730a5f8..55ac6020a1ad116bc10c792be61d595b14ec6bc7 100644 --- a/arch/loongarch/lib/copy_user.S +++ b/arch/loongarch/lib/copy_user.S @@ -3,31 +3,38 @@ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited */ +#include #include #include +#include +#include #include #include -.macro fixup_ex from, to, offset, fix -.if \fix - .section .fixup, "ax" -\to: addi.d a0, a2, \offset +.irp to, 0, 1, 2, 3, 4, 5, 6, 7 +.L_fixup_handle_\to\(): + addi.d a0, a2, (\to) * (-8) jr ra - .previous -.endif - .section __ex_table, "a" - PTR \from\()b, \to\()b - .previous -.endm +.endr + +SYM_FUNC_START(__copy_user) + /* + * Some CPUs support hardware unaligned access + */ + ALTERNATIVE "b __copy_user_generic", \ + "b __copy_user_fast", CPU_FEATURE_UAL +SYM_FUNC_END(__copy_user) + +EXPORT_SYMBOL(__copy_user) /* - * unsigned long __copy_user(void *to, const void *from, size_t n) + * unsigned long __copy_user_generic(void *to, const void *from, size_t n) * * a0: to * a1: from * a2: n */ -SYM_FUNC_START(__copy_user) +SYM_FUNC_START(__copy_user_generic) beqz a2, 3f 1: ld.b t0, a1, 0 @@ -40,8 +47,77 @@ SYM_FUNC_START(__copy_user) 3: move a0, a2 jr ra - fixup_ex 1, 4, 0, 1 - fixup_ex 2, 4, 0, 0 -SYM_FUNC_END(__copy_user) + _asm_extable 1b, .L_fixup_handle_0 + _asm_extable 2b, .L_fixup_handle_0 +SYM_FUNC_END(__copy_user_generic) -EXPORT_SYMBOL(__copy_user) +/* + * unsigned long __copy_user_fast(void *to, const void *from, unsigned long n) + * + * a0: to + * a1: from + * a2: n + */ +SYM_FUNC_START(__copy_user_fast) + beqz a2, 19f + + ori a3, zero, 64 + blt a2, a3, 17f + + /* copy 64 bytes at a time */ +1: ld.d t0, a1, 0 +2: ld.d t1, a1, 8 +3: ld.d t2, a1, 16 +4: ld.d t3, a1, 24 +5: ld.d t4, a1, 32 +6: ld.d t5, a1, 40 +7: ld.d t6, a1, 48 +8: ld.d t7, a1, 56 +9: st.d t0, a0, 0 +10: st.d t1, a0, 8 +11: st.d t2, a0, 16 +12: st.d t3, a0, 24 +13: st.d t4, a0, 32 +14: st.d t5, a0, 40 +15: st.d t6, a0, 48 +16: st.d t7, a0, 56 + + addi.d a0, a0, 64 + addi.d a1, a1, 64 + addi.d a2, a2, -64 + bge a2, a3, 1b + + beqz a2, 19f + + /* copy the remaining bytes */ +17: ld.b t0, a1, 0 +18: st.b t0, a0, 0 + addi.d a0, a0, 1 + addi.d a1, a1, 1 + addi.d a2, a2, -1 + bgt a2, zero, 17b + + /* return */ +19: move a0, a2 + jr ra + + /* fixup and ex_table */ + _asm_extable 1b, .L_fixup_handle_0 + _asm_extable 2b, .L_fixup_handle_1 + _asm_extable 3b, .L_fixup_handle_2 + _asm_extable 4b, .L_fixup_handle_3 + _asm_extable 5b, .L_fixup_handle_4 + _asm_extable 6b, .L_fixup_handle_5 + _asm_extable 7b, .L_fixup_handle_6 + _asm_extable 8b, .L_fixup_handle_7 + _asm_extable 9b, .L_fixup_handle_0 + _asm_extable 10b, .L_fixup_handle_1 + _asm_extable 11b, .L_fixup_handle_2 + _asm_extable 12b, .L_fixup_handle_3 + _asm_extable 13b, .L_fixup_handle_4 + _asm_extable 14b, .L_fixup_handle_5 + _asm_extable 15b, .L_fixup_handle_6 + _asm_extable 16b, .L_fixup_handle_7 + _asm_extable 17b, .L_fixup_handle_0 + _asm_extable 18b, .L_fixup_handle_0 +SYM_FUNC_END(__copy_user_fast) diff --git a/arch/loongarch/lib/memcpy.S b/arch/loongarch/lib/memcpy.S new file mode 100644 index 0000000000000000000000000000000000000000..7c07d595ee89aca1428431b18727f33332576b06 --- /dev/null +++ b/arch/loongarch/lib/memcpy.S @@ -0,0 +1,95 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ + +#include +#include +#include +#include +#include +#include + +SYM_FUNC_START(memcpy) + /* + * Some CPUs support hardware unaligned access + */ + ALTERNATIVE "b __memcpy_generic", \ + "b __memcpy_fast", CPU_FEATURE_UAL +SYM_FUNC_END(memcpy) + +EXPORT_SYMBOL(memcpy) + +/* + * void *__memcpy_generic(void *dst, const void *src, size_t n) + * + * a0: dst + * a1: src + * a2: n + */ +SYM_FUNC_START(__memcpy_generic) + move a3, a0 + beqz a2, 2f + +1: ld.b t0, a1, 0 + st.b t0, a0, 0 + addi.d a0, a0, 1 + addi.d a1, a1, 1 + addi.d a2, a2, -1 + bgt a2, zero, 1b + +2: move a0, a3 + jr ra +SYM_FUNC_END(__memcpy_generic) + +/* + * void *__memcpy_fast(void *dst, const void *src, size_t n) + * + * a0: dst + * a1: src + * a2: n + */ +SYM_FUNC_START(__memcpy_fast) + move a3, a0 + beqz a2, 3f + + ori a4, zero, 64 + blt a2, a4, 2f + + /* copy 64 bytes at a time */ +1: ld.d t0, a1, 0 + ld.d t1, a1, 8 + ld.d t2, a1, 16 + ld.d t3, a1, 24 + ld.d t4, a1, 32 + ld.d t5, a1, 40 + ld.d t6, a1, 48 + ld.d t7, a1, 56 + st.d t0, a0, 0 + st.d t1, a0, 8 + st.d t2, a0, 16 + st.d t3, a0, 24 + st.d t4, a0, 32 + st.d t5, a0, 40 + st.d t6, a0, 48 + st.d t7, a0, 56 + + addi.d a0, a0, 64 + addi.d a1, a1, 64 + addi.d a2, a2, -64 + bge a2, a4, 1b + + beqz a2, 3f + + /* copy the remaining bytes */ +2: ld.b t0, a1, 0 + st.b t0, a0, 0 + addi.d a0, a0, 1 + addi.d a1, a1, 1 + addi.d a2, a2, -1 + bgt a2, zero, 2b + + /* return */ +3: move a0, a3 + jr ra +SYM_FUNC_END(__memcpy_fast) diff --git a/arch/loongarch/lib/memmove.S b/arch/loongarch/lib/memmove.S new file mode 100644 index 0000000000000000000000000000000000000000..6ffdb46da78fdfee56ce06f73fb16e250d61e663 --- /dev/null +++ b/arch/loongarch/lib/memmove.S @@ -0,0 +1,121 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ + +#include +#include +#include +#include +#include +#include + +SYM_FUNC_START(memmove) + blt a0, a1, 1f /* dst < src, memcpy */ + blt a1, a0, 3f /* src < dst, rmemcpy */ + jr ra /* dst == src, return */ + + /* if (src - dst) < 64, copy 1 byte at a time */ +1: ori a3, zero, 64 + sub.d t0, a1, a0 + blt t0, a3, 2f + b memcpy +2: b __memcpy_generic + + /* if (dst - src) < 64, copy 1 byte at a time */ +3: ori a3, zero, 64 + sub.d t0, a0, a1 + blt t0, a3, 4f + b rmemcpy +4: b __rmemcpy_generic +SYM_FUNC_END(memmove) + +EXPORT_SYMBOL(memmove) + +SYM_FUNC_START(rmemcpy) + /* + * Some CPUs support hardware unaligned access + */ + ALTERNATIVE "b __rmemcpy_generic", \ + "b __rmemcpy_fast", CPU_FEATURE_UAL +SYM_FUNC_END(rmemcpy) + +/* + * void *__rmemcpy_generic(void *dst, const void *src, size_t n) + * + * a0: dst + * a1: src + * a2: n + */ +SYM_FUNC_START(__rmemcpy_generic) + move a3, a0 + beqz a2, 2f + + add.d a0, a0, a2 + add.d a1, a1, a2 + +1: ld.b t0, a1, -1 + st.b t0, a0, -1 + addi.d a0, a0, -1 + addi.d a1, a1, -1 + addi.d a2, a2, -1 + bgt a2, zero, 1b + +2: move a0, a3 + jr ra +SYM_FUNC_END(__rmemcpy_generic) + +/* + * void *__rmemcpy_fast(void *dst, const void *src, size_t n) + * + * a0: dst + * a1: src + * a2: n + */ +SYM_FUNC_START(__rmemcpy_fast) + move a3, a0 + beqz a2, 3f + + add.d a0, a0, a2 + add.d a1, a1, a2 + + ori a4, zero, 64 + blt a2, a4, 2f + + /* copy 64 bytes at a time */ +1: ld.d t0, a1, -8 + ld.d t1, a1, -16 + ld.d t2, a1, -24 + ld.d t3, a1, -32 + ld.d t4, a1, -40 + ld.d t5, a1, -48 + ld.d t6, a1, -56 + ld.d t7, a1, -64 + st.d t0, a0, -8 + st.d t1, a0, -16 + st.d t2, a0, -24 + st.d t3, a0, -32 + st.d t4, a0, -40 + st.d t5, a0, -48 + st.d t6, a0, -56 + st.d t7, a0, -64 + + addi.d a0, a0, -64 + addi.d a1, a1, -64 + addi.d a2, a2, -64 + bge a2, a4, 1b + + beqz a2, 3f + + /* copy the remaining bytes */ +2: ld.b t0, a1, -1 + st.b t0, a0, -1 + addi.d a0, a0, -1 + addi.d a1, a1, -1 + addi.d a2, a2, -1 + bgt a2, zero, 2b + + /* return */ +3: move a0, a3 + jr ra +SYM_FUNC_END(__rmemcpy_fast) diff --git a/arch/loongarch/lib/memset.S b/arch/loongarch/lib/memset.S new file mode 100644 index 0000000000000000000000000000000000000000..e7cb4ea3747d7ce045ad51406604fb9035da991f --- /dev/null +++ b/arch/loongarch/lib/memset.S @@ -0,0 +1,91 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ + +#include +#include +#include +#include +#include +#include + +.macro fill_to_64 r0 + bstrins.d \r0, \r0, 15, 8 + bstrins.d \r0, \r0, 31, 16 + bstrins.d \r0, \r0, 63, 32 +.endm + +SYM_FUNC_START(memset) + /* + * Some CPUs support hardware unaligned access + */ + ALTERNATIVE "b __memset_generic", \ + "b __memset_fast", CPU_FEATURE_UAL +SYM_FUNC_END(memset) + +EXPORT_SYMBOL(memset) + +/* + * void *__memset_generic(void *s, int c, size_t n) + * + * a0: s + * a1: c + * a2: n + */ +SYM_FUNC_START(__memset_generic) + move a3, a0 + beqz a2, 2f + +1: st.b a1, a0, 0 + addi.d a0, a0, 1 + addi.d a2, a2, -1 + bgt a2, zero, 1b + +2: move a0, a3 + jr ra +SYM_FUNC_END(__memset_generic) + +/* + * void *__memset_fast(void *s, int c, size_t n) + * + * a0: s + * a1: c + * a2: n + */ +SYM_FUNC_START(__memset_fast) + move a3, a0 + beqz a2, 3f + + ori a4, zero, 64 + blt a2, a4, 2f + + /* fill a1 to 64 bits */ + fill_to_64 a1 + + /* set 64 bytes at a time */ +1: st.d a1, a0, 0 + st.d a1, a0, 8 + st.d a1, a0, 16 + st.d a1, a0, 24 + st.d a1, a0, 32 + st.d a1, a0, 40 + st.d a1, a0, 48 + st.d a1, a0, 56 + + addi.d a0, a0, 64 + addi.d a2, a2, -64 + bge a2, a4, 1b + + beqz a2, 3f + + /* set the remaining bytes */ +2: st.b a1, a0, 0 + addi.d a0, a0, 1 + addi.d a2, a2, -1 + bgt a2, zero, 2b + + /* return */ +3: move a0, a3 + jr ra +SYM_FUNC_END(__memset_fast) diff --git a/arch/loongarch/lib/unaligned.S b/arch/loongarch/lib/unaligned.S new file mode 100644 index 0000000000000000000000000000000000000000..9177fd638f072c9b1cec989de31d9a13360e16cb --- /dev/null +++ b/arch/loongarch/lib/unaligned.S @@ -0,0 +1,84 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ + +#include + +#include +#include +#include +#include +#include +#include + +.L_fixup_handle_unaligned: + li.w a0, -EFAULT + jr ra + +/* + * unsigned long unaligned_read(void *addr, void *value, unsigned long n, bool sign) + * + * a0: addr + * a1: value + * a2: n + * a3: sign + */ +SYM_FUNC_START(unaligned_read) + beqz a2, 5f + + li.w t2, 0 + addi.d t0, a2, -1 + slli.d t1, t0, 3 + add.d a0, a0, t0 + + beqz a3, 2f +1: ld.b t3, a0, 0 + b 3f + +2: ld.bu t3, a0, 0 +3: sll.d t3, t3, t1 + or t2, t2, t3 + addi.d t1, t1, -8 + addi.d a0, a0, -1 + addi.d a2, a2, -1 + bgtz a2, 2b +4: st.d t2, a1, 0 + + move a0, a2 + jr ra + +5: li.w a0, -EFAULT + jr ra + + _asm_extable 1b, .L_fixup_handle_unaligned + _asm_extable 2b, .L_fixup_handle_unaligned + _asm_extable 4b, .L_fixup_handle_unaligned +SYM_FUNC_END(unaligned_read) + +/* + * unsigned long unaligned_write(void *addr, unsigned long value, unsigned long n) + * + * a0: addr + * a1: value + * a2: n + */ +SYM_FUNC_START(unaligned_write) + beqz a2, 3f + + li.w t0, 0 +1: srl.d t1, a1, t0 +2: st.b t1, a0, 0 + addi.d t0, t0, 8 + addi.d a2, a2, -1 + addi.d a0, a0, 1 + bgtz a2, 1b + + move a0, a2 + jr ra + +3: li.w a0, -EFAULT + jr ra + + _asm_extable 2b, .L_fixup_handle_unaligned +SYM_FUNC_END(unaligned_write) diff --git a/arch/loongarch/mm/extable.c b/arch/loongarch/mm/extable.c index bc20988f2b87cc9af9b93e39ececf2c874df9970..9ab69872dcffea2c6e34a568e4f23aec9041a624 100644 --- a/arch/loongarch/mm/extable.c +++ b/arch/loongarch/mm/extable.c @@ -2,21 +2,62 @@ /* * Copyright (C) 2020-2022 Loongson Technology Corporation Limited */ +#include #include -#include -#include #include +#include +#include + +static inline unsigned long +get_ex_fixup(const struct exception_table_entry *ex) +{ + return ((unsigned long)&ex->fixup + ex->fixup); +} + +static inline void regs_set_gpr(struct pt_regs *regs, + unsigned int offset, unsigned long val) +{ + if (offset && offset <= MAX_REG_OFFSET) + *(unsigned long *)((unsigned long)regs + offset) = val; +} + +static bool ex_handler_fixup(const struct exception_table_entry *ex, + struct pt_regs *regs) +{ + regs->csr_era = get_ex_fixup(ex); + + return true; +} + +static bool ex_handler_uaccess_err_zero(const struct exception_table_entry *ex, + struct pt_regs *regs) +{ + int reg_err = FIELD_GET(EX_DATA_REG_ERR, ex->data); + int reg_zero = FIELD_GET(EX_DATA_REG_ZERO, ex->data); + + regs_set_gpr(regs, reg_err * sizeof(unsigned long), -EFAULT); + regs_set_gpr(regs, reg_zero * sizeof(unsigned long), 0); + regs->csr_era = get_ex_fixup(ex); + + return true; +} -int fixup_exception(struct pt_regs *regs) +bool fixup_exception(struct pt_regs *regs) { - const struct exception_table_entry *fixup; + const struct exception_table_entry *ex; - fixup = search_exception_tables(exception_era(regs)); - if (fixup) { - regs->csr_era = fixup->fixup; + ex = search_exception_tables(exception_era(regs)); + if (!ex) + return false; - return 1; + switch (ex->type) { + case EX_TYPE_FIXUP: + return ex_handler_fixup(ex, regs); + case EX_TYPE_UACCESS_ERR_ZERO: + return ex_handler_uaccess_err_zero(ex, regs); + case EX_TYPE_BPF: + return ex_handler_bpf(ex, regs); } - return 0; + BUG(); } diff --git a/arch/loongarch/mm/init.c b/arch/loongarch/mm/init.c index 080061793c859d58d6875207a20961bd08191b68..e018aed345866010c0822c82bfc0c5459ef2e41e 100644 --- a/arch/loongarch/mm/init.c +++ b/arch/loongarch/mm/init.c @@ -22,7 +22,7 @@ #include #include #include -#include +#include #include #include @@ -152,6 +152,45 @@ EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid); #endif #endif +#ifdef CONFIG_SPARSEMEM_VMEMMAP +void __meminit vmemmap_set_pmd(pmd_t *pmd, void *p, int node, + unsigned long addr, unsigned long next) +{ + pmd_t entry; + + entry = pfn_pmd(virt_to_pfn(p), PAGE_KERNEL); + pmd_val(entry) |= _PAGE_HUGE | _PAGE_HGLOBAL; + set_pmd_at(&init_mm, addr, pmd, entry); +} + +int __meminit vmemmap_check_pmd(pmd_t *pmd, int node, + unsigned long addr, unsigned long next) +{ + int huge = pmd_val(*pmd) & _PAGE_HUGE; + + if (huge) + vmemmap_verify((pte_t *)pmd, node, addr, next); + + return huge; +} + +int __meminit vmemmap_populate(unsigned long start, unsigned long end, + int node, struct vmem_altmap *altmap) +{ +#if CONFIG_PGTABLE_LEVELS == 2 + return vmemmap_populate_basepages(start, end, node, NULL); +#else + return vmemmap_populate_hugepages(start, end, node, NULL); +#endif +} + +#ifdef CONFIG_MEMORY_HOTPLUG +void vmemmap_free(unsigned long start, unsigned long end, struct vmem_altmap *altmap) +{ +} +#endif +#endif + static pte_t *fixmap_pte(unsigned long addr) { pgd_t *pgd; @@ -168,7 +207,7 @@ static pte_t *fixmap_pte(unsigned long addr) new = memblock_alloc_low(PAGE_SIZE, PAGE_SIZE); pgd_populate(&init_mm, pgd, new); #ifndef __PAGETABLE_PUD_FOLDED - pud_init((unsigned long)new, (unsigned long)invalid_pmd_table); + pud_init(new); #endif } @@ -179,7 +218,7 @@ static pte_t *fixmap_pte(unsigned long addr) new = memblock_alloc_low(PAGE_SIZE, PAGE_SIZE); pud_populate(&init_mm, pud, new); #ifndef __PAGETABLE_PMD_FOLDED - pmd_init((unsigned long)new, (unsigned long)invalid_pte_table); + pmd_init(new); #endif } diff --git a/arch/loongarch/mm/pgtable.c b/arch/loongarch/mm/pgtable.c index ee179ccd3e3f22e16bb13a7c5b2933db57bb2289..36a6dc0148aef2d6ad0c3020e8d255010c258815 100644 --- a/arch/loongarch/mm/pgtable.c +++ b/arch/loongarch/mm/pgtable.c @@ -16,7 +16,7 @@ pgd_t *pgd_alloc(struct mm_struct *mm) ret = (pgd_t *) __get_free_page(GFP_KERNEL); if (ret) { init = pgd_offset(&init_mm, 0UL); - pgd_init((unsigned long)ret); + pgd_init(ret); memcpy(ret + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD, (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); } @@ -25,7 +25,7 @@ pgd_t *pgd_alloc(struct mm_struct *mm) } EXPORT_SYMBOL_GPL(pgd_alloc); -void pgd_init(unsigned long page) +void pgd_init(void *addr) { unsigned long *p, *end; unsigned long entry; @@ -38,7 +38,7 @@ void pgd_init(unsigned long page) entry = (unsigned long)invalid_pte_table; #endif - p = (unsigned long *) page; + p = (unsigned long *)addr; end = p + PTRS_PER_PGD; do { @@ -56,11 +56,12 @@ void pgd_init(unsigned long page) EXPORT_SYMBOL_GPL(pgd_init); #ifndef __PAGETABLE_PMD_FOLDED -void pmd_init(unsigned long addr, unsigned long pagetable) +void pmd_init(void *addr) { unsigned long *p, *end; + unsigned long pagetable = (unsigned long)invalid_pte_table; - p = (unsigned long *) addr; + p = (unsigned long *)addr; end = p + PTRS_PER_PMD; do { @@ -79,9 +80,10 @@ EXPORT_SYMBOL_GPL(pmd_init); #endif #ifndef __PAGETABLE_PUD_FOLDED -void pud_init(unsigned long addr, unsigned long pagetable) +void pud_init(void *addr) { unsigned long *p, *end; + unsigned long pagetable = (unsigned long)invalid_pmd_table; p = (unsigned long *)addr; end = p + PTRS_PER_PUD; @@ -98,6 +100,7 @@ void pud_init(unsigned long addr, unsigned long pagetable) p[-1] = pagetable; } while (p != end); } +EXPORT_SYMBOL_GPL(pud_init); #endif pmd_t mk_pmd(struct page *page, pgprot_t prot) @@ -119,12 +122,12 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr, void __init pagetable_init(void) { /* Initialize the entire pgd. */ - pgd_init((unsigned long)swapper_pg_dir); - pgd_init((unsigned long)invalid_pg_dir); + pgd_init(swapper_pg_dir); + pgd_init(invalid_pg_dir); #ifndef __PAGETABLE_PUD_FOLDED - pud_init((unsigned long)invalid_pud_table, (unsigned long)invalid_pmd_table); + pud_init(invalid_pud_table); #endif #ifndef __PAGETABLE_PMD_FOLDED - pmd_init((unsigned long)invalid_pmd_table, (unsigned long)invalid_pte_table); + pmd_init(invalid_pmd_table); #endif } diff --git a/arch/loongarch/net/bpf_jit.c b/arch/loongarch/net/bpf_jit.c index bdcd0c7719a9eedd3145e4d4cbb8a07508806d11..c4b1947ebf768fb222e23fdd9a93796c83690dad 100644 --- a/arch/loongarch/net/bpf_jit.c +++ b/arch/loongarch/net/bpf_jit.c @@ -387,6 +387,65 @@ static bool is_signed_bpf_cond(u8 cond) cond == BPF_JSGE || cond == BPF_JSLE; } +#define BPF_FIXUP_REG_MASK GENMASK(31, 27) +#define BPF_FIXUP_OFFSET_MASK GENMASK(26, 0) + +bool ex_handler_bpf(const struct exception_table_entry *ex, + struct pt_regs *regs) +{ + int dst_reg = FIELD_GET(BPF_FIXUP_REG_MASK, ex->fixup); + off_t offset = FIELD_GET(BPF_FIXUP_OFFSET_MASK, ex->fixup); + + regs->regs[dst_reg] = 0; + regs->csr_era = (unsigned long)&ex->fixup - offset; + + return true; +} + +/* For accesses to BTF pointers, add an entry to the exception table */ +static int add_exception_handler(const struct bpf_insn *insn, + struct jit_ctx *ctx, + int dst_reg) +{ + unsigned long pc; + off_t offset; + struct exception_table_entry *ex; + + if (!ctx->image || !ctx->prog->aux->extable || BPF_MODE(insn->code) != BPF_PROBE_MEM) + return 0; + + if (WARN_ON_ONCE(ctx->num_exentries >= ctx->prog->aux->num_exentries)) + return -EINVAL; + + ex = &ctx->prog->aux->extable[ctx->num_exentries]; + pc = (unsigned long)&ctx->image[ctx->idx - 1]; + + offset = pc - (long)&ex->insn; + if (WARN_ON_ONCE(offset >= 0 || offset < INT_MIN)) + return -ERANGE; + + ex->insn = offset; + + /* + * Since the extable follows the program, the fixup offset is always + * negative and limited to BPF_JIT_REGION_SIZE. Store a positive value + * to keep things simple, and put the destination register in the upper + * bits. We don't need to worry about buildtime or runtime sort + * modifying the upper bits because the table is already sorted, and + * isn't part of the main exception table. + */ + offset = (long)&ex->fixup - (pc + LOONGARCH_INSN_SIZE); + if (!FIELD_FIT(BPF_FIXUP_OFFSET_MASK, offset)) + return -ERANGE; + + ex->type = EX_TYPE_BPF; + ex->fixup = FIELD_PREP(BPF_FIXUP_OFFSET_MASK, offset) | FIELD_PREP(BPF_FIXUP_REG_MASK, dst_reg); + + ctx->num_exentries++; + + return 0; +} + static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool extra_pass) { u8 tm = -1; @@ -816,6 +875,10 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool ext case BPF_LDX | BPF_MEM | BPF_H: case BPF_LDX | BPF_MEM | BPF_W: case BPF_LDX | BPF_MEM | BPF_DW: + case BPF_LDX | BPF_PROBE_MEM | BPF_DW: + case BPF_LDX | BPF_PROBE_MEM | BPF_W: + case BPF_LDX | BPF_PROBE_MEM | BPF_H: + case BPF_LDX | BPF_PROBE_MEM | BPF_B: switch (BPF_SIZE(code)) { case BPF_B: if (is_signed_imm12(off)) { @@ -854,6 +917,10 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool ext } break; } + + ret = add_exception_handler(insn, ctx, dst); + if (ret) + return ret; break; /* *(size *)(dst + off) = imm */ @@ -1018,6 +1085,9 @@ static int validate_code(struct jit_ctx *ctx) return -1; } + if (WARN_ON_ONCE(ctx->num_exentries != ctx->prog->aux->num_exentries)) + return -1; + return 0; } @@ -1025,7 +1095,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) { bool tmp_blinded = false, extra_pass = false; u8 *image_ptr; - int image_size; + int image_size, prog_size, extable_size; struct jit_ctx ctx; struct jit_data *jit_data; struct bpf_binary_header *header; @@ -1066,7 +1136,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) image_ptr = jit_data->image; header = jit_data->header; extra_pass = true; - image_size = sizeof(u32) * ctx.idx; + prog_size = sizeof(u32) * ctx.idx; goto skip_init_ctx; } @@ -1088,12 +1158,15 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) ctx.epilogue_offset = ctx.idx; build_epilogue(&ctx); + extable_size = prog->aux->num_exentries * sizeof(struct exception_table_entry); + /* Now we know the actual image size. * As each LoongArch instruction is of length 32bit, * we are translating number of JITed intructions into * the size required to store these JITed code. */ - image_size = sizeof(u32) * ctx.idx; + prog_size = sizeof(u32) * ctx.idx; + image_size = prog_size + extable_size; /* Now we know the size of the structure to make */ header = bpf_jit_binary_alloc(image_size, &image_ptr, sizeof(u32), jit_fill_hole); @@ -1104,9 +1177,12 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) /* 2. Now, the actual pass to generate final JIT code */ ctx.image = (union loongarch_instruction *)image_ptr; + if (extable_size) + prog->aux->extable = (void *)image_ptr + prog_size; skip_init_ctx: ctx.idx = 0; + ctx.num_exentries = 0; build_prologue(&ctx); if (build_body(&ctx, extra_pass)) { @@ -1125,7 +1201,7 @@ skip_init_ctx: /* And we're done */ if (bpf_jit_enable > 1) - bpf_jit_dump(prog->len, image_size, 2, ctx.image); + bpf_jit_dump(prog->len, prog_size, 2, ctx.image); /* Update the icache */ flush_icache_range((unsigned long)header, (unsigned long)(ctx.image + ctx.idx)); @@ -1147,7 +1223,7 @@ skip_init_ctx: jit_data->header = header; } prog->jited = 1; - prog->jited_len = image_size; + prog->jited_len = prog_size; prog->bpf_func = (void *)ctx.image; if (!prog->is_func || extra_pass) { diff --git a/arch/loongarch/net/bpf_jit.h b/arch/loongarch/net/bpf_jit.h index e665ddb0aeb85580d583098c88fd6995e1353da1..ca708024fdd3e61f6859b76c125d2bda64989d21 100644 --- a/arch/loongarch/net/bpf_jit.h +++ b/arch/loongarch/net/bpf_jit.h @@ -4,6 +4,7 @@ * * Copyright (C) 2022 Loongson Technology Corporation Limited */ +#include #include #include #include @@ -15,6 +16,7 @@ struct jit_ctx { unsigned int flags; unsigned int epilogue_offset; u32 *offset; + int num_exentries; union loongarch_instruction *image; u32 stack_size; }; diff --git a/arch/loongarch/pci/acpi.c b/arch/loongarch/pci/acpi.c index 8235ec92b41fe2b35b66848447a94698d3aaeb04..365f7de771cbb9e6c72d1d8d0133af476510c2a0 100644 --- a/arch/loongarch/pci/acpi.c +++ b/arch/loongarch/pci/acpi.c @@ -26,9 +26,12 @@ void pcibios_add_bus(struct pci_bus *bus) int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge) { - struct pci_config_window *cfg = bridge->bus->sysdata; - struct acpi_device *adev = to_acpi_device(cfg->parent); + struct acpi_device *adev = NULL; struct device *bus_dev = &bridge->bus->dev; + struct pci_config_window *cfg = bridge->bus->sysdata; + + if (!acpi_disabled) + adev = to_acpi_device(cfg->parent); ACPI_COMPANION_SET(&bridge->dev, adev); set_dev_node(bus_dev, pa_to_nid(cfg->res.start)); diff --git a/arch/loongarch/power/Makefile b/arch/loongarch/power/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..58151d003e40ea17e82c02c74fafe16fa39bbf3b --- /dev/null +++ b/arch/loongarch/power/Makefile @@ -0,0 +1,4 @@ +obj-y += platform.o + +obj-$(CONFIG_SUSPEND) += suspend.o suspend_asm.o +obj-$(CONFIG_HIBERNATION) += hibernate.o hibernate_asm.o diff --git a/arch/loongarch/power/hibernate.c b/arch/loongarch/power/hibernate.c new file mode 100644 index 0000000000000000000000000000000000000000..1e0590542f987cd59c0bad22f938b51ad0e4f2a5 --- /dev/null +++ b/arch/loongarch/power/hibernate.c @@ -0,0 +1,62 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include +#include +#include + +static u32 saved_crmd; +static u32 saved_prmd; +static u32 saved_euen; +static u32 saved_ecfg; +static u64 saved_pcpu_base; +struct pt_regs saved_regs; + +void save_processor_state(void) +{ + saved_crmd = csr_read32(LOONGARCH_CSR_CRMD); + saved_prmd = csr_read32(LOONGARCH_CSR_PRMD); + saved_euen = csr_read32(LOONGARCH_CSR_EUEN); + saved_ecfg = csr_read32(LOONGARCH_CSR_ECFG); + saved_pcpu_base = csr_read64(PERCPU_BASE_KS); + + if (is_fpu_owner()) + save_fp(current); +} + +void restore_processor_state(void) +{ + csr_write32(saved_crmd, LOONGARCH_CSR_CRMD); + csr_write32(saved_prmd, LOONGARCH_CSR_PRMD); + csr_write32(saved_euen, LOONGARCH_CSR_EUEN); + csr_write32(saved_ecfg, LOONGARCH_CSR_ECFG); + csr_write64(saved_pcpu_base, PERCPU_BASE_KS); + + if (is_fpu_owner()) + restore_fp(current); +} + +int pfn_is_nosave(unsigned long pfn) +{ + unsigned long nosave_begin_pfn = PFN_DOWN(__pa(&__nosave_begin)); + unsigned long nosave_end_pfn = PFN_UP(__pa(&__nosave_end)); + + return (pfn >= nosave_begin_pfn) && (pfn < nosave_end_pfn); +} + +extern int swsusp_asm_suspend(void); + +int swsusp_arch_suspend(void) +{ + enable_pci_wakeup(); + return swsusp_asm_suspend(); +} + +extern int swsusp_asm_resume(void); + +int swsusp_arch_resume(void) +{ + /* Avoid TLB mismatch during and after kernel resume */ + local_flush_tlb_all(); + return swsusp_asm_resume(); +} diff --git a/arch/loongarch/power/hibernate_asm.S b/arch/loongarch/power/hibernate_asm.S new file mode 100644 index 0000000000000000000000000000000000000000..3c747c08d65dc687baf8f26237ee26312f73b5fd --- /dev/null +++ b/arch/loongarch/power/hibernate_asm.S @@ -0,0 +1,66 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Hibernation support specific for LoongArch + * + * Author: Huacai Chen + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#include +#include +#include +#include + +.text +SYM_FUNC_START(swsusp_asm_suspend) + la.pcrel t0, saved_regs + PTR_S ra, t0, PT_R1 + PTR_S tp, t0, PT_R2 + PTR_S sp, t0, PT_R3 + PTR_S u0, t0, PT_R21 + PTR_S fp, t0, PT_R22 + PTR_S s0, t0, PT_R23 + PTR_S s1, t0, PT_R24 + PTR_S s2, t0, PT_R25 + PTR_S s3, t0, PT_R26 + PTR_S s4, t0, PT_R27 + PTR_S s5, t0, PT_R28 + PTR_S s6, t0, PT_R29 + PTR_S s7, t0, PT_R30 + PTR_S s8, t0, PT_R31 + b swsusp_save +SYM_FUNC_END(swsusp_asm_suspend) + +SYM_FUNC_START(swsusp_asm_resume) + la.pcrel t0, restore_pblist + PTR_L t0, t0, 0 +0: + PTR_L t1, t0, PBE_ADDRESS /* source */ + PTR_L t2, t0, PBE_ORIG_ADDRESS /* destination */ + PTR_LI t3, _PAGE_SIZE + PTR_ADD t3, t3, t1 +1: + REG_L t8, t1, 0 + REG_S t8, t2, 0 + PTR_ADDI t1, t1, SZREG + PTR_ADDI t2, t2, SZREG + bne t1, t3, 1b + PTR_L t0, t0, PBE_NEXT + bnez t0, 0b + la.pcrel t0, saved_regs + PTR_L ra, t0, PT_R1 + PTR_L tp, t0, PT_R2 + PTR_L sp, t0, PT_R3 + PTR_L u0, t0, PT_R21 + PTR_L fp, t0, PT_R22 + PTR_L s0, t0, PT_R23 + PTR_L s1, t0, PT_R24 + PTR_L s2, t0, PT_R25 + PTR_L s3, t0, PT_R26 + PTR_L s4, t0, PT_R27 + PTR_L s5, t0, PT_R28 + PTR_L s6, t0, PT_R29 + PTR_L s7, t0, PT_R30 + PTR_L s8, t0, PT_R31 + PTR_LI a0, 0x0 + jirl zero, ra, 0 +SYM_FUNC_END(swsusp_asm_resume) diff --git a/arch/loongarch/power/platform.c b/arch/loongarch/power/platform.c new file mode 100644 index 0000000000000000000000000000000000000000..3ea8e07aa225f959ad54138a8079bb6a4648eb33 --- /dev/null +++ b/arch/loongarch/power/platform.c @@ -0,0 +1,57 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Author: Huacai Chen + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#include +#include + +#include +#include + +void enable_gpe_wakeup(void) +{ + if (acpi_disabled) + return; + + if (acpi_gbl_reduced_hardware) + return; + + acpi_enable_all_wakeup_gpes(); +} + +void enable_pci_wakeup(void) +{ + if (acpi_disabled) + return; + + if (acpi_gbl_reduced_hardware) + return; + + acpi_write_bit_register(ACPI_BITREG_PCIEXP_WAKE_STATUS, 1); + + if (acpi_gbl_FADT.flags & ACPI_FADT_PCI_EXPRESS_WAKE) + acpi_write_bit_register(ACPI_BITREG_PCIEXP_WAKE_DISABLE, 0); +} + +static int __init loongson3_acpi_suspend_init(void) +{ +#ifdef CONFIG_ACPI + acpi_status status; + uint64_t suspend_addr = 0; + + if (acpi_disabled || acpi_gbl_reduced_hardware) + return 0; + + acpi_write_bit_register(ACPI_BITREG_SCI_ENABLE, 1); + status = acpi_evaluate_integer(NULL, "\\SADR", NULL, &suspend_addr); + if (ACPI_FAILURE(status) || !suspend_addr) { + pr_err("ACPI S3 is not support!\n"); + return -1; + } + loongson_sysconf.suspend_addr = (u64)phys_to_virt(PHYSADDR(suspend_addr)); +#endif + return 0; +} + +device_initcall(loongson3_acpi_suspend_init); diff --git a/arch/loongarch/power/suspend.c b/arch/loongarch/power/suspend.c new file mode 100644 index 0000000000000000000000000000000000000000..5e19733e5e05f4397ad288d2acfd0e4c51284d65 --- /dev/null +++ b/arch/loongarch/power/suspend.c @@ -0,0 +1,73 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * loongson-specific suspend support + * + * Author: Huacai Chen + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#include +#include +#include + +#include +#include +#include +#include +#include + +u64 loongarch_suspend_addr; + +struct saved_registers { + u32 ecfg; + u32 euen; + u64 pgd; + u64 kpgd; + u32 pwctl0; + u32 pwctl1; +}; +static struct saved_registers saved_regs; + +static void arch_common_suspend(void) +{ + save_counter(); + saved_regs.pgd = csr_read64(LOONGARCH_CSR_PGDL); + saved_regs.kpgd = csr_read64(LOONGARCH_CSR_PGDH); + saved_regs.pwctl0 = csr_read32(LOONGARCH_CSR_PWCTL0); + saved_regs.pwctl1 = csr_read32(LOONGARCH_CSR_PWCTL1); + saved_regs.ecfg = csr_read32(LOONGARCH_CSR_ECFG); + saved_regs.euen = csr_read32(LOONGARCH_CSR_EUEN); + + loongarch_suspend_addr = loongson_sysconf.suspend_addr; +} + +static void arch_common_resume(void) +{ + sync_counter(); + local_flush_tlb_all(); + csr_write64(per_cpu_offset(0), PERCPU_BASE_KS); + csr_write64(eentry, LOONGARCH_CSR_EENTRY); + csr_write64(eentry, LOONGARCH_CSR_MERRENTRY); + csr_write64(tlbrentry, LOONGARCH_CSR_TLBRENTRY); + + csr_write64(saved_regs.pgd, LOONGARCH_CSR_PGDL); + csr_write64(saved_regs.kpgd, LOONGARCH_CSR_PGDH); + csr_write32(saved_regs.pwctl0, LOONGARCH_CSR_PWCTL0); + csr_write32(saved_regs.pwctl1, LOONGARCH_CSR_PWCTL1); + csr_write32(saved_regs.ecfg, LOONGARCH_CSR_ECFG); + csr_write32(saved_regs.euen, LOONGARCH_CSR_EUEN); +} + +int loongarch_acpi_suspend(void) +{ + enable_gpe_wakeup(); + enable_pci_wakeup(); + + arch_common_suspend(); + + /* processor specific suspend */ + loongarch_suspend_enter(); + + arch_common_resume(); + + return 0; +} diff --git a/arch/loongarch/power/suspend_asm.S b/arch/loongarch/power/suspend_asm.S new file mode 100644 index 0000000000000000000000000000000000000000..eb2675642f9f455ff2b7aa635ccc699980e205a9 --- /dev/null +++ b/arch/loongarch/power/suspend_asm.S @@ -0,0 +1,89 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Sleep helper for Loongson-3 sleep mode. + * + * Author: Huacai Chen + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ + +#include +#include +#include +#include +#include + +/* preparatory stuff */ +.macro SETUP_SLEEP + addi.d sp, sp, -PT_SIZE + st.d $r1, sp, PT_R1 + st.d $r2, sp, PT_R2 + st.d $r3, sp, PT_R3 + st.d $r4, sp, PT_R4 + st.d $r21, sp, PT_R21 + st.d $r22, sp, PT_R22 + st.d $r23, sp, PT_R23 + st.d $r24, sp, PT_R24 + st.d $r25, sp, PT_R25 + st.d $r26, sp, PT_R26 + st.d $r27, sp, PT_R27 + st.d $r28, sp, PT_R28 + st.d $r29, sp, PT_R29 + st.d $r30, sp, PT_R30 + st.d $r31, sp, PT_R31 + + la.pcrel t0, acpi_saved_sp + st.d sp, t0, 0 +.endm + +.macro SETUP_WAKEUP + ld.d $r1, sp, PT_R1 + ld.d $r2, sp, PT_R2 + ld.d $r3, sp, PT_R3 + ld.d $r4, sp, PT_R4 + ld.d $r21, sp, PT_R21 + ld.d $r22, sp, PT_R22 + ld.d $r23, sp, PT_R23 + ld.d $r24, sp, PT_R24 + ld.d $r25, sp, PT_R25 + ld.d $r26, sp, PT_R26 + ld.d $r27, sp, PT_R27 + ld.d $r28, sp, PT_R28 + ld.d $r29, sp, PT_R29 + ld.d $r30, sp, PT_R30 + ld.d $r31, sp, PT_R31 +.endm + + .text + .align 12 + +/* Sleep/wakeup code for Loongson-3 */ +SYM_FUNC_START(loongarch_suspend_enter) + SETUP_SLEEP + bl __flush_cache_all + + /* Pass RA and SP to BIOS */ + addi.d a1, sp, 0 + la.pcrel a0, loongarch_wakeup_start + la.pcrel t0, loongarch_suspend_addr + ld.d t0, t0, 0 + jirl a0, t0, 0 /* Call BIOS's STR sleep routine */ + + /* + * This is where we return upon wakeup. + * Reload all of the registers and return. + */ +SYM_INNER_LABEL(loongarch_wakeup_start, SYM_L_GLOBAL) + li.d t0, CSR_DMW0_INIT # UC, PLV0 + csrwr t0, LOONGARCH_CSR_DMWIN0 + li.d t0, CSR_DMW1_INIT # CA, PLV0 + csrwr t0, LOONGARCH_CSR_DMWIN1 + + la.abs t0, 0f + jr t0 +0: + la.pcrel t0, acpi_saved_sp + ld.d sp, t0, 0 + SETUP_WAKEUP + addi.d sp, sp, PT_SIZE + jr ra +SYM_FUNC_END(loongarch_suspend_enter) diff --git a/arch/m68k/include/asm/pgtable_mm.h b/arch/m68k/include/asm/pgtable_mm.h index 9b4e2fe2ac821671ee5615801cbe791fbee4a8f1..b93c41fe2067864e5f9f3656f28a4ec5fc1aef6f 100644 --- a/arch/m68k/include/asm/pgtable_mm.h +++ b/arch/m68k/include/asm/pgtable_mm.h @@ -145,8 +145,6 @@ static inline void update_mmu_cache(struct vm_area_struct *vma, #endif /* !__ASSEMBLY__ */ -#define kern_addr_valid(addr) (1) - /* MMU-specific headers */ #ifdef CONFIG_SUN3 diff --git a/arch/m68k/include/asm/pgtable_no.h b/arch/m68k/include/asm/pgtable_no.h index bce5ca56c3883dea12147eff894e99b968d964fb..fed58da3a6b65c5adc216135b2f3a37bfaeb10b8 100644 --- a/arch/m68k/include/asm/pgtable_no.h +++ b/arch/m68k/include/asm/pgtable_no.h @@ -20,7 +20,6 @@ #define pgd_none(pgd) (0) #define pgd_bad(pgd) (0) #define pgd_clear(pgdp) -#define kern_addr_valid(addr) (1) #define pmd_offset(a, b) ((void *)0) #define PAGE_NONE __pgprot(0) diff --git a/arch/m68k/kernel/setup_no.c b/arch/m68k/kernel/setup_no.c index cb6def585851e6c2fb9b3055239eecf612b377a4..37fb663559b47497c9ae67e4817ae970fcd7fb1e 100644 --- a/arch/m68k/kernel/setup_no.c +++ b/arch/m68k/kernel/setup_no.c @@ -90,8 +90,7 @@ void __init setup_arch(char **cmdline_p) config_BSP(&command_line[0], sizeof(command_line)); #if defined(CONFIG_BOOTPARAM) - strncpy(&command_line[0], CONFIG_BOOTPARAM_STRING, sizeof(command_line)); - command_line[sizeof(command_line) - 1] = 0; + strscpy(&command_line[0], CONFIG_BOOTPARAM_STRING, sizeof(command_line)); #endif /* CONFIG_BOOTPARAM */ process_uboot_commandline(&command_line[0], sizeof(command_line)); diff --git a/arch/microblaze/include/asm/pgtable.h b/arch/microblaze/include/asm/pgtable.h index ba348e997dbb4a26602b9277692523b9142c3ca5..42f5988e998b8f3be833a6d7db533860edef0cf5 100644 --- a/arch/microblaze/include/asm/pgtable.h +++ b/arch/microblaze/include/asm/pgtable.h @@ -416,9 +416,6 @@ extern unsigned long iopa(unsigned long addr); #define IOMAP_NOCACHE_NONSER 2 #define IOMAP_NO_COPYBACK 3 -/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */ -#define kern_addr_valid(addr) (1) - void do_page_fault(struct pt_regs *regs, unsigned long address, unsigned long error_code); diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index b26b77673c2cc307882a1b029ac1f0d16ded1df1..15cb692b0a0976f615dc1a65ee48cb2112f1a5d2 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -46,7 +46,7 @@ config MIPS select GENERIC_SCHED_CLOCK if !CAVIUM_OCTEON_SOC select GENERIC_SMP_IDLE_THREAD select GENERIC_TIME_VSYSCALL - select GUP_GET_PTE_LOW_HIGH if CPU_MIPS32 && PHYS_ADDR_T_64BIT + select GUP_GET_PXX_LOW_HIGH if CPU_MIPS32 && PHYS_ADDR_T_64BIT select HAVE_ARCH_COMPILER_H select HAVE_ARCH_JUMP_LABEL select HAVE_ARCH_KGDB if MIPS_FP_SUPPORT diff --git a/arch/mips/include/asm/octeon/cvmx.h b/arch/mips/include/asm/octeon/cvmx.h index 25854abc95f8a2cdfd9d78e57309e7fea6bc6f76..72e775bf31e6008b9392e5bfc5921ec553259049 100644 --- a/arch/mips/include/asm/octeon/cvmx.h +++ b/arch/mips/include/asm/octeon/cvmx.h @@ -154,13 +154,13 @@ static inline uint64_t cvmx_build_bits(uint64_t high_bit, /** * Convert a memory pointer (void*) into a hardware compatible - * memory address (uint64_t). Octeon hardware widgets don't + * memory address (phys_addr_t). Octeon hardware widgets don't * understand logical addresses. * * @ptr: C style memory pointer * Returns Hardware physical address */ -static inline uint64_t cvmx_ptr_to_phys(void *ptr) +static inline phys_addr_t cvmx_ptr_to_phys(void *ptr) { if (sizeof(void *) == 8) { /* diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h index 796035784c73389c1e10fcfb3b14e3d0403bf206..f72e737dda214fd1dcbbe70835e05c515a2f1b05 100644 --- a/arch/mips/include/asm/pgalloc.h +++ b/arch/mips/include/asm/pgalloc.h @@ -33,7 +33,7 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, /* * Initialize a new pmd table with invalid pointers. */ -extern void pmd_init(unsigned long page, unsigned long pagetable); +extern void pmd_init(void *addr); #ifndef __PAGETABLE_PMD_FOLDED @@ -44,9 +44,9 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) #endif /* - * Initialize a new pgd / pmd table with invalid pointers. + * Initialize a new pgd table with invalid pointers. */ -extern void pgd_init(unsigned long page); +extern void pgd_init(void *addr); extern pgd_t *pgd_alloc(struct mm_struct *mm); static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) @@ -77,7 +77,7 @@ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address) } pmd = (pmd_t *)page_address(pg); - pmd_init((unsigned long)pmd, (unsigned long)invalid_pte_table); + pmd_init(pmd); return pmd; } @@ -93,7 +93,7 @@ static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address) pud = (pud_t *) __get_free_pages(GFP_KERNEL, PUD_TABLE_ORDER); if (pud) - pud_init((unsigned long)pud, (unsigned long)invalid_pmd_table); + pud_init(pud); return pud; } diff --git a/arch/mips/include/asm/pgtable-64.h b/arch/mips/include/asm/pgtable-64.h index 436c29d698fa3f52543f2bf8f4c0c1f68ae4c60a..c6310192b654dd53e56046b485fa0801e4b57826 100644 --- a/arch/mips/include/asm/pgtable-64.h +++ b/arch/mips/include/asm/pgtable-64.h @@ -313,11 +313,11 @@ static inline pmd_t *pud_pgtable(pud_t pud) #endif /* - * Initialize a new pgd / pmd table with invalid pointers. + * Initialize a new pgd / pud / pmd table with invalid pointers. */ -extern void pgd_init(unsigned long page); -extern void pud_init(unsigned long page, unsigned long pagetable); -extern void pmd_init(unsigned long page, unsigned long pagetable); +extern void pgd_init(void *addr); +extern void pud_init(void *addr); +extern void pmd_init(void *addr); /* * Non-present pages: high 40 bits are offset, next 8 bits type, diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h index 4678627673dfea0ef7369382cd8021229ebc52d4..a68c0b01d8cdcefb80eadf29eebabc15b55900c6 100644 --- a/arch/mips/include/asm/pgtable.h +++ b/arch/mips/include/asm/pgtable.h @@ -550,8 +550,6 @@ static inline void update_mmu_cache_pmd(struct vm_area_struct *vma, __update_tlb(vma, address, pte); } -#define kern_addr_valid(addr) (1) - /* * Allow physical addresses to be fixed up to help 36-bit peripherals. */ diff --git a/arch/mips/kvm/mmu.c b/arch/mips/kvm/mmu.c index 74cd64a24d059af6a13627dc1a884b653f5f2b90..e8c08988ed377dabebc0dbf89d343d77aced3a1d 100644 --- a/arch/mips/kvm/mmu.c +++ b/arch/mips/kvm/mmu.c @@ -122,8 +122,7 @@ static pte_t *kvm_mips_walk_pgd(pgd_t *pgd, struct kvm_mmu_memory_cache *cache, if (!cache) return NULL; new_pmd = kvm_mmu_memory_cache_alloc(cache); - pmd_init((unsigned long)new_pmd, - (unsigned long)invalid_pte_table); + pmd_init(new_pmd); pud_populate(NULL, pud, new_pmd); } pmd = pmd_offset(pud, addr); diff --git a/arch/mips/mm/pgtable-32.c b/arch/mips/mm/pgtable-32.c index 61891af250197bd721a377045bf80ec7296799ee..f57fb69472f847fc0b2eb19ed54a00abca02596b 100644 --- a/arch/mips/mm/pgtable-32.c +++ b/arch/mips/mm/pgtable-32.c @@ -13,9 +13,9 @@ #include #include -void pgd_init(unsigned long page) +void pgd_init(void *addr) { - unsigned long *p = (unsigned long *) page; + unsigned long *p = (unsigned long *)addr; int i; for (i = 0; i < USER_PTRS_PER_PGD; i+=8) { @@ -61,9 +61,8 @@ void __init pagetable_init(void) #endif /* Initialize the entire pgd. */ - pgd_init((unsigned long)swapper_pg_dir); - pgd_init((unsigned long)swapper_pg_dir - + sizeof(pgd_t) * USER_PTRS_PER_PGD); + pgd_init(swapper_pg_dir); + pgd_init(&swapper_pg_dir[USER_PTRS_PER_PGD]); pgd_base = swapper_pg_dir; diff --git a/arch/mips/mm/pgtable-64.c b/arch/mips/mm/pgtable-64.c index 7536f7804c440cc6c8b1779d0b49dd2e715e66cc..b4386a0e2ef8718a1300fc38b2c92ecea0c3cc5a 100644 --- a/arch/mips/mm/pgtable-64.c +++ b/arch/mips/mm/pgtable-64.c @@ -13,7 +13,7 @@ #include #include -void pgd_init(unsigned long page) +void pgd_init(void *addr) { unsigned long *p, *end; unsigned long entry; @@ -26,7 +26,7 @@ void pgd_init(unsigned long page) entry = (unsigned long)invalid_pte_table; #endif - p = (unsigned long *) page; + p = (unsigned long *) addr; end = p + PTRS_PER_PGD; do { @@ -43,11 +43,12 @@ void pgd_init(unsigned long page) } #ifndef __PAGETABLE_PMD_FOLDED -void pmd_init(unsigned long addr, unsigned long pagetable) +void pmd_init(void *addr) { unsigned long *p, *end; + unsigned long pagetable = (unsigned long)invalid_pte_table; - p = (unsigned long *) addr; + p = (unsigned long *)addr; end = p + PTRS_PER_PMD; do { @@ -66,9 +67,10 @@ EXPORT_SYMBOL_GPL(pmd_init); #endif #ifndef __PAGETABLE_PUD_FOLDED -void pud_init(unsigned long addr, unsigned long pagetable) +void pud_init(void *addr) { unsigned long *p, *end; + unsigned long pagetable = (unsigned long)invalid_pmd_table; p = (unsigned long *)addr; end = p + PTRS_PER_PUD; @@ -108,12 +110,12 @@ void __init pagetable_init(void) pgd_t *pgd_base; /* Initialize the entire pgd. */ - pgd_init((unsigned long)swapper_pg_dir); + pgd_init(swapper_pg_dir); #ifndef __PAGETABLE_PUD_FOLDED - pud_init((unsigned long)invalid_pud_table, (unsigned long)invalid_pmd_table); + pud_init(invalid_pud_table); #endif #ifndef __PAGETABLE_PMD_FOLDED - pmd_init((unsigned long)invalid_pmd_table, (unsigned long)invalid_pte_table); + pmd_init(invalid_pmd_table); #endif pgd_base = swapper_pg_dir; /* diff --git a/arch/mips/mm/pgtable.c b/arch/mips/mm/pgtable.c index 3b7590660a04b19244c32d935af67d895dac85bc..b13314be5d0e571b0a34351ea19d8394f0119b0e 100644 --- a/arch/mips/mm/pgtable.c +++ b/arch/mips/mm/pgtable.c @@ -15,7 +15,7 @@ pgd_t *pgd_alloc(struct mm_struct *mm) ret = (pgd_t *) __get_free_pages(GFP_KERNEL, PGD_TABLE_ORDER); if (ret) { init = pgd_offset(&init_mm, 0UL); - pgd_init((unsigned long)ret); + pgd_init(ret); memcpy(ret + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD, (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); } diff --git a/arch/nios2/include/asm/pgalloc.h b/arch/nios2/include/asm/pgalloc.h index 3c4ae74d57985f3e71209b3da3ff6e8fc8b8d235..ecd1657bb2cede78b1381b5061101953b8aa7b3d 100644 --- a/arch/nios2/include/asm/pgalloc.h +++ b/arch/nios2/include/asm/pgalloc.h @@ -26,11 +26,6 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, set_pmd(pmd, __pmd((unsigned long)page_address(pte))); } -/* - * Initialize a new pmd table with invalid pointers. - */ -extern void pmd_init(unsigned long page, unsigned long pagetable); - extern pgd_t *pgd_alloc(struct mm_struct *mm); #define __pte_free_tlb(tlb, pte, addr) \ diff --git a/arch/nios2/include/asm/pgtable.h b/arch/nios2/include/asm/pgtable.h index b3d45e815295f8a833954efe381a7f565de14f53..ab793bc517f5cc8ff115f3659c28ce54d713333d 100644 --- a/arch/nios2/include/asm/pgtable.h +++ b/arch/nios2/include/asm/pgtable.h @@ -249,8 +249,6 @@ static inline unsigned long pmd_page_vaddr(pmd_t pmd) #define __swp_entry_to_pte(swp) ((pte_t) { (swp).val }) #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) -#define kern_addr_valid(addr) (1) - extern void __init paging_init(void); extern void __init mmu_init(void); diff --git a/arch/nios2/include/asm/processor.h b/arch/nios2/include/asm/processor.h index 8916d93d5c2d0ba37dae84bf47979c3a21131838..eb44130364a9a16e4d6e32a64f1e99e8bff4d501 100644 --- a/arch/nios2/include/asm/processor.h +++ b/arch/nios2/include/asm/processor.h @@ -50,9 +50,6 @@ struct thread_struct { unsigned long kpsr; }; -#define INIT_MMAP \ - { &init_mm, (0), (0), __pgprot(0x0), VM_READ | VM_WRITE | VM_EXEC } - # define INIT_THREAD { \ .kregs = NULL, \ .ksp = 0, \ diff --git a/arch/openrisc/include/asm/pgtable.h b/arch/openrisc/include/asm/pgtable.h index dcae8aea132fdc7d5d7853c7b3c083c9f2725928..6477c17b3062d83c88ddc7765a497d07f763ec07 100644 --- a/arch/openrisc/include/asm/pgtable.h +++ b/arch/openrisc/include/asm/pgtable.h @@ -395,8 +395,6 @@ static inline void update_mmu_cache(struct vm_area_struct *vma, #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) -#define kern_addr_valid(addr) (1) - typedef pte_t *pte_addr_t; #endif /* __ASSEMBLY__ */ diff --git a/arch/parisc/include/asm/pdc.h b/arch/parisc/include/asm/pdc.h index fcbcf9a96c1112d39de10868cab3ac544a5b161e..40793bef8429fc74b680ae54da3f4174f074f706 100644 --- a/arch/parisc/include/asm/pdc.h +++ b/arch/parisc/include/asm/pdc.h @@ -37,7 +37,7 @@ int pdc_system_map_find_mods(struct pdc_system_map_mod_info *pdc_mod_info, int pdc_system_map_find_addrs(struct pdc_system_map_addr_info *pdc_addr_info, long mod_index, long addr_index); int pdc_model_info(struct pdc_model *model); -int pdc_model_sysmodel(char *name); +int pdc_model_sysmodel(unsigned int os_id, char *name); int pdc_model_cpuid(unsigned long *cpu_id); int pdc_model_versions(unsigned long *versions, int id); int pdc_model_capabilities(unsigned long *capabilities); diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h index ecd028854469856e0493e191e10b8738cf4fefed..ea357430aafeb42733fe3b01bfa158328eb16e51 100644 --- a/arch/parisc/include/asm/pgtable.h +++ b/arch/parisc/include/asm/pgtable.h @@ -23,21 +23,6 @@ #include #include -/* - * kern_addr_valid(ADDR) tests if ADDR is pointing to valid kernel - * memory. For the return value to be meaningful, ADDR must be >= - * PAGE_OFFSET. This operation can be relatively expensive (e.g., - * require a hash-, or multi-level tree-lookup or something of that - * sort) but it guarantees to return TRUE only if accessing the page - * at that address does not cause an error. Note that there may be - * addresses for which kern_addr_valid() returns FALSE even though an - * access would not cause an error (e.g., this is typically true for - * memory mapped I/O regions. - * - * XXX Need to implement this for parisc. - */ -#define kern_addr_valid(addr) (1) - /* This is for the serialization of PxTLB broadcasts. At least on the N class * systems, only one PxTLB inter processor broadcast can be active at any one * time on the Merced bus. */ @@ -166,8 +151,8 @@ extern void __update_cache(pte_t pte); /* This calculates the number of initial pages we need for the initial * page tables */ -#if (KERNEL_INITIAL_ORDER) >= (PMD_SHIFT) -# define PT_INITIAL (1 << (KERNEL_INITIAL_ORDER - PMD_SHIFT)) +#if (KERNEL_INITIAL_ORDER) >= (PLD_SHIFT + BITS_PER_PTE) +# define PT_INITIAL (1 << (KERNEL_INITIAL_ORDER - PLD_SHIFT - BITS_PER_PTE)) #else # define PT_INITIAL (1) /* all initial PTEs fit into one page */ #endif diff --git a/arch/parisc/include/uapi/asm/mman.h b/arch/parisc/include/uapi/asm/mman.h index 22133a6a506efba842e7286deed19c4fe9c1e6ce..68c44f99bc931bf73f3d6937cf7be082e71a0d7f 100644 --- a/arch/parisc/include/uapi/asm/mman.h +++ b/arch/parisc/include/uapi/asm/mman.h @@ -49,6 +49,19 @@ #define MADV_DONTFORK 10 /* don't inherit across fork */ #define MADV_DOFORK 11 /* do inherit across fork */ +#define MADV_MERGEABLE 12 /* KSM may merge identical pages */ +#define MADV_UNMERGEABLE 13 /* KSM may not merge identical pages */ + +#define MADV_HUGEPAGE 14 /* Worth backing with hugepages */ +#define MADV_NOHUGEPAGE 15 /* Not worth backing with hugepages */ + +#define MADV_DONTDUMP 16 /* Explicity exclude from the core dump, + overrides the coredump filter bits */ +#define MADV_DODUMP 17 /* Clear the MADV_NODUMP flag */ + +#define MADV_WIPEONFORK 18 /* Zero memory on fork, child only */ +#define MADV_KEEPONFORK 19 /* Undo MADV_WIPEONFORK */ + #define MADV_COLD 20 /* deactivate these pages */ #define MADV_PAGEOUT 21 /* reclaim these pages */ @@ -57,27 +70,13 @@ #define MADV_DONTNEED_LOCKED 24 /* like DONTNEED, but drop locked pages too */ -#define MADV_MERGEABLE 65 /* KSM may merge identical pages */ -#define MADV_UNMERGEABLE 66 /* KSM may not merge identical pages */ - -#define MADV_HUGEPAGE 67 /* Worth backing with hugepages */ -#define MADV_NOHUGEPAGE 68 /* Not worth backing with hugepages */ - -#define MADV_DONTDUMP 69 /* Explicity exclude from the core dump, - overrides the coredump filter bits */ -#define MADV_DODUMP 70 /* Clear the MADV_NODUMP flag */ - -#define MADV_WIPEONFORK 71 /* Zero memory on fork, child only */ -#define MADV_KEEPONFORK 72 /* Undo MADV_WIPEONFORK */ - -#define MADV_COLLAPSE 73 /* Synchronous hugepage collapse */ +#define MADV_COLLAPSE 25 /* Synchronous hugepage collapse */ #define MADV_HWPOISON 100 /* poison a page for testing */ #define MADV_SOFT_OFFLINE 101 /* soft offline page for testing */ /* compatibility flags */ #define MAP_FILE 0 -#define MAP_VARIABLE 0 #define PKEY_DISABLE_ACCESS 0x1 #define PKEY_DISABLE_WRITE 0x2 diff --git a/arch/parisc/kernel/firmware.c b/arch/parisc/kernel/firmware.c index 6a7e315bcc2e5b8215dd3d8769a5aa736d03c099..4dfe1f49c5c8b2ffd884b0b806afb7b7b77bb89f 100644 --- a/arch/parisc/kernel/firmware.c +++ b/arch/parisc/kernel/firmware.c @@ -74,8 +74,8 @@ static DEFINE_SPINLOCK(pdc_lock); #endif -extern unsigned long pdc_result[NUM_PDC_RESULT]; -extern unsigned long pdc_result2[NUM_PDC_RESULT]; +unsigned long pdc_result[NUM_PDC_RESULT] __aligned(8); +unsigned long pdc_result2[NUM_PDC_RESULT] __aligned(8); #ifdef CONFIG_64BIT #define WIDE_FIRMWARE 0x1 @@ -527,14 +527,14 @@ int pdc_model_info(struct pdc_model *model) * Using OS_ID_HPUX will return the equivalent of the 'modelname' command * on HP/UX. */ -int pdc_model_sysmodel(char *name) +int pdc_model_sysmodel(unsigned int os_id, char *name) { int retval; unsigned long flags; spin_lock_irqsave(&pdc_lock, flags); retval = mem_pdc_call(PDC_MODEL, PDC_MODEL_SYSMODEL, __pa(pdc_result), - OS_ID_HPUX, __pa(name)); + os_id, __pa(name)); convert_to_wide(pdc_result); if (retval == PDC_OK) { @@ -1288,9 +1288,8 @@ void pdc_io_reset_devices(void) #endif /* defined(BOOTLOADER) */ -/* locked by pdc_console_lock */ -static int __attribute__((aligned(8))) iodc_retbuf[32]; -static char __attribute__((aligned(64))) iodc_dbuf[4096]; +/* locked by pdc_lock */ +static char iodc_dbuf[4096] __page_aligned_bss; /** * pdc_iodc_print - Console print using IODC. @@ -1307,6 +1306,9 @@ int pdc_iodc_print(const unsigned char *str, unsigned count) unsigned int i; unsigned long flags; + count = min_t(unsigned int, count, sizeof(iodc_dbuf)); + + spin_lock_irqsave(&pdc_lock, flags); for (i = 0; i < count;) { switch(str[i]) { case '\n': @@ -1322,12 +1324,11 @@ int pdc_iodc_print(const unsigned char *str, unsigned count) } print: - spin_lock_irqsave(&pdc_lock, flags); - real32_call(PAGE0->mem_cons.iodc_io, - (unsigned long)PAGE0->mem_cons.hpa, ENTRY_IO_COUT, - PAGE0->mem_cons.spa, __pa(PAGE0->mem_cons.dp.layers), - __pa(iodc_retbuf), 0, __pa(iodc_dbuf), i, 0); - spin_unlock_irqrestore(&pdc_lock, flags); + real32_call(PAGE0->mem_cons.iodc_io, + (unsigned long)PAGE0->mem_cons.hpa, ENTRY_IO_COUT, + PAGE0->mem_cons.spa, __pa(PAGE0->mem_cons.dp.layers), + __pa(pdc_result), 0, __pa(iodc_dbuf), i, 0); + spin_unlock_irqrestore(&pdc_lock, flags); return i; } @@ -1354,10 +1355,11 @@ int pdc_iodc_getc(void) real32_call(PAGE0->mem_kbd.iodc_io, (unsigned long)PAGE0->mem_kbd.hpa, ENTRY_IO_CIN, PAGE0->mem_kbd.spa, __pa(PAGE0->mem_kbd.dp.layers), - __pa(iodc_retbuf), 0, __pa(iodc_dbuf), 1, 0); + __pa(pdc_result), 0, __pa(iodc_dbuf), 1, 0); ch = *iodc_dbuf; - status = *iodc_retbuf; + /* like convert_to_wide() but for first return value only: */ + status = *(int *)&pdc_result; spin_unlock_irqrestore(&pdc_lock, flags); if (status == 0) diff --git a/arch/parisc/kernel/kgdb.c b/arch/parisc/kernel/kgdb.c index ab7620f695be1143ff27b3111922ae974581f71c..b16fa9bac5f44ccc27d2ba7d0bef2aa4090e98fb 100644 --- a/arch/parisc/kernel/kgdb.c +++ b/arch/parisc/kernel/kgdb.c @@ -208,23 +208,3 @@ int kgdb_arch_handle_exception(int trap, int signo, } return -1; } - -/* KGDB console driver which uses PDC to read chars from keyboard */ - -static void kgdb_pdc_write_char(u8 chr) -{ - /* no need to print char. kgdb will do it. */ -} - -static struct kgdb_io kgdb_pdc_io_ops = { - .name = "kgdb_pdc", - .read_char = pdc_iodc_getc, - .write_char = kgdb_pdc_write_char, -}; - -static int __init kgdb_pdc_init(void) -{ - kgdb_register_io_module(&kgdb_pdc_io_ops); - return 0; -} -early_initcall(kgdb_pdc_init); diff --git a/arch/parisc/kernel/pdc_cons.c b/arch/parisc/kernel/pdc_cons.c index 7d0989f523d031bf74888e4ab96ed22b291ea457..cf3bf82323746864e5d76b87051bf804b1f7e08f 100644 --- a/arch/parisc/kernel/pdc_cons.c +++ b/arch/parisc/kernel/pdc_cons.c @@ -12,37 +12,27 @@ #include /* for PAGE0 */ #include /* for iodc_call() proto and friends */ -static DEFINE_SPINLOCK(pdc_console_lock); - static void pdc_console_write(struct console *co, const char *s, unsigned count) { int i = 0; - unsigned long flags; - spin_lock_irqsave(&pdc_console_lock, flags); do { i += pdc_iodc_print(s + i, count - i); } while (i < count); - spin_unlock_irqrestore(&pdc_console_lock, flags); } #ifdef CONFIG_KGDB static int kgdb_pdc_read_char(void) { - int c; - unsigned long flags; - - spin_lock_irqsave(&pdc_console_lock, flags); - c = pdc_iodc_getc(); - spin_unlock_irqrestore(&pdc_console_lock, flags); + int c = pdc_iodc_getc(); return (c <= 0) ? NO_POLL_CHAR : c; } static void kgdb_pdc_write_char(u8 chr) { - if (PAGE0->mem_cons.cl_class != CL_DUPLEX) - pdc_console_write(NULL, &chr, 1); + /* no need to print char as it's shown on standard console */ + /* pdc_iodc_print(&chr, 1); */ } static struct kgdb_io kgdb_pdc_io_ops = { diff --git a/arch/parisc/kernel/pdt.c b/arch/parisc/kernel/pdt.c index e391b175f5eceed27086eaa51d9f1d3fad1b3aad..80943a00e2459425c9988562b3d684960317917f 100644 --- a/arch/parisc/kernel/pdt.c +++ b/arch/parisc/kernel/pdt.c @@ -18,8 +18,7 @@ #include #include #include -#include -#include +#include #include #include @@ -232,7 +231,7 @@ void __init pdc_pdt_init(void) /* mark memory page bad */ memblock_reserve(pdt_entry[i] & PAGE_MASK, PAGE_SIZE); - num_poisoned_pages_inc(); + num_poisoned_pages_inc(addr >> PAGE_SHIFT); } } diff --git a/arch/parisc/kernel/processor.c b/arch/parisc/kernel/processor.c index dddaaa6e7a825d445735b0b63fafe9cce5f6f20f..ba07e760d3c762f1251c3d23b865ef9875723ea1 100644 --- a/arch/parisc/kernel/processor.c +++ b/arch/parisc/kernel/processor.c @@ -272,10 +272,15 @@ void __init collect_boot_cpu_data(void) printk(KERN_INFO "capabilities 0x%lx\n", boot_cpu_data.pdc.capabilities); - if (pdc_model_sysmodel(boot_cpu_data.pdc.sys_model_name) == PDC_OK) - printk(KERN_INFO "model %s\n", + if (pdc_model_sysmodel(OS_ID_HPUX, boot_cpu_data.pdc.sys_model_name) == PDC_OK) + pr_info("HP-UX model name: %s\n", boot_cpu_data.pdc.sys_model_name); + serial_no[0] = 0; + if (pdc_model_sysmodel(OS_ID_MPEXL, serial_no) == PDC_OK && + serial_no[0]) + pr_info("MPE/iX model name: %s\n", serial_no); + dump_stack_set_arch_desc("%s", boot_cpu_data.pdc.sys_model_name); boot_cpu_data.hversion = boot_cpu_data.pdc.model.hversion; diff --git a/arch/parisc/kernel/real2.S b/arch/parisc/kernel/real2.S index 2b16d8d6598f1d6015795a4455a24a4c039d0108..4dc12c4c098096a5bbff866f81eb6d08c0b6276f 100644 --- a/arch/parisc/kernel/real2.S +++ b/arch/parisc/kernel/real2.S @@ -15,28 +15,15 @@ #include - - .section .bss - - .export pdc_result - .export pdc_result2 - .align 8 -pdc_result: - .block ASM_PDC_RESULT_SIZE -pdc_result2: - .block ASM_PDC_RESULT_SIZE - .export real_stack - .export real32_stack .export real64_stack - .align 64 + __PAGE_ALIGNED_BSS real_stack: -real32_stack: real64_stack: .block 8192 #define N_SAVED_REGS 9 - + .section .bss save_cr_space: .block REG_SZ * N_SAVED_REGS save_cr_end: diff --git a/arch/parisc/kernel/setup.c b/arch/parisc/kernel/setup.c index 375f38d6e1a4de1b3d73d330facb9c484ba84427..0797db617962b629eb5a5cff00bfccfc1d68c727 100644 --- a/arch/parisc/kernel/setup.c +++ b/arch/parisc/kernel/setup.c @@ -50,15 +50,15 @@ void __init setup_cmdline(char **cmdline_p) extern unsigned int boot_args[]; char *p; - /* Collect stuff passed in from the boot loader */ + *cmdline_p = command_line; /* boot_args[0] is free-mem start, boot_args[1] is ptr to command line */ - if (boot_args[0] < 64) { - /* called from hpux boot loader */ - boot_command_line[0] = '\0'; - } else { - strscpy(boot_command_line, (char *)__va(boot_args[1]), - COMMAND_LINE_SIZE); + if (boot_args[0] < 64) + return; /* return if called from hpux boot loader */ + + /* Collect stuff passed in from the boot loader */ + strscpy(boot_command_line, (char *)__va(boot_args[1]), + COMMAND_LINE_SIZE); /* autodetect console type (if not done by palo yet) */ p = boot_command_line; @@ -75,16 +75,14 @@ void __init setup_cmdline(char **cmdline_p) strlcat(p, " earlycon=pdc", COMMAND_LINE_SIZE); #ifdef CONFIG_BLK_DEV_INITRD - if (boot_args[2] != 0) /* did palo pass us a ramdisk? */ - { - initrd_start = (unsigned long)__va(boot_args[2]); - initrd_end = (unsigned long)__va(boot_args[3]); - } -#endif + /* did palo pass us a ramdisk? */ + if (boot_args[2] != 0) { + initrd_start = (unsigned long)__va(boot_args[2]); + initrd_end = (unsigned long)__va(boot_args[3]); } +#endif strscpy(command_line, boot_command_line, COMMAND_LINE_SIZE); - *cmdline_p = command_line; } #ifdef CONFIG_PA11 diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c index 848b0702005d6178bceeed876a5ee3ce24b8d295..09a34b07f02e646db4e836eb8bb8cddbacbec099 100644 --- a/arch/parisc/kernel/sys_parisc.c +++ b/arch/parisc/kernel/sys_parisc.c @@ -465,3 +465,31 @@ asmlinkage long parisc_inotify_init1(int flags) flags = FIX_O_NONBLOCK(flags); return sys_inotify_init1(flags); } + +/* + * madvise() wrapper + * + * Up to kernel v6.1 parisc has different values than all other + * platforms for the MADV_xxx flags listed below. + * To keep binary compatibility with existing userspace programs + * translate the former values to the new values. + * + * XXX: Remove this wrapper in year 2025 (or later) + */ + +asmlinkage notrace long parisc_madvise(unsigned long start, size_t len_in, int behavior) +{ + switch (behavior) { + case 65: behavior = MADV_MERGEABLE; break; + case 66: behavior = MADV_UNMERGEABLE; break; + case 67: behavior = MADV_HUGEPAGE; break; + case 68: behavior = MADV_NOHUGEPAGE; break; + case 69: behavior = MADV_DONTDUMP; break; + case 70: behavior = MADV_DODUMP; break; + case 71: behavior = MADV_WIPEONFORK; break; + case 72: behavior = MADV_KEEPONFORK; break; + case 73: behavior = MADV_COLLAPSE; break; + } + + return sys_madvise(start, len_in, behavior); +} diff --git a/arch/parisc/kernel/syscalls/syscall.tbl b/arch/parisc/kernel/syscalls/syscall.tbl index 8a99c998da9bb7dc2fcb03c0845155242a172bae..0e42fceb2d5e2d574b6ab4e69f69bf891ab035c0 100644 --- a/arch/parisc/kernel/syscalls/syscall.tbl +++ b/arch/parisc/kernel/syscalls/syscall.tbl @@ -131,7 +131,7 @@ 116 common sysinfo sys_sysinfo compat_sys_sysinfo 117 common shutdown sys_shutdown 118 common fsync sys_fsync -119 common madvise sys_madvise +119 common madvise parisc_madvise 120 common clone sys_clone_wrapper 121 common setdomainname sys_setdomainname 122 common sendfile sys_sendfile compat_sys_sendfile diff --git a/arch/parisc/kernel/vdso32/Makefile b/arch/parisc/kernel/vdso32/Makefile index 85b1c6d261d128586796db698cd8ab73ab4a40a0..4459a48d2303350b7ca8ec12e315c7a201193863 100644 --- a/arch/parisc/kernel/vdso32/Makefile +++ b/arch/parisc/kernel/vdso32/Makefile @@ -26,7 +26,7 @@ $(obj)/vdso32_wrapper.o : $(obj)/vdso32.so FORCE # Force dependency (incbin is bad) # link rule for the .so file, .lds has to be first -$(obj)/vdso32.so: $(src)/vdso32.lds $(obj-vdso32) $(obj-cvdso32) $(VDSO_LIBGCC) +$(obj)/vdso32.so: $(src)/vdso32.lds $(obj-vdso32) $(obj-cvdso32) $(VDSO_LIBGCC) FORCE $(call if_changed,vdso32ld) # assembly rules for the .S files @@ -38,7 +38,7 @@ $(obj-cvdso32): %.o: %.c FORCE # actual build commands quiet_cmd_vdso32ld = VDSO32L $@ - cmd_vdso32ld = $(CROSS32CC) $(c_flags) -Wl,-T $^ -o $@ + cmd_vdso32ld = $(CROSS32CC) $(c_flags) -Wl,-T $(filter-out FORCE, $^) -o $@ quiet_cmd_vdso32as = VDSO32A $@ cmd_vdso32as = $(CROSS32CC) $(a_flags) -c -o $@ $< quiet_cmd_vdso32cc = VDSO32C $@ diff --git a/arch/parisc/kernel/vdso64/Makefile b/arch/parisc/kernel/vdso64/Makefile index a30f5ec5eb4bfab62e192779784548305b9146b6..f3d6045793f4c88c1ab395f494d6334f4edb6b7b 100644 --- a/arch/parisc/kernel/vdso64/Makefile +++ b/arch/parisc/kernel/vdso64/Makefile @@ -26,7 +26,7 @@ $(obj)/vdso64_wrapper.o : $(obj)/vdso64.so FORCE # Force dependency (incbin is bad) # link rule for the .so file, .lds has to be first -$(obj)/vdso64.so: $(src)/vdso64.lds $(obj-vdso64) $(VDSO_LIBGCC) +$(obj)/vdso64.so: $(src)/vdso64.lds $(obj-vdso64) $(VDSO_LIBGCC) FORCE $(call if_changed,vdso64ld) # assembly rules for the .S files @@ -35,7 +35,7 @@ $(obj-vdso64): %.o: %.S FORCE # actual build commands quiet_cmd_vdso64ld = VDSO64L $@ - cmd_vdso64ld = $(CC) $(c_flags) -Wl,-T $^ -o $@ + cmd_vdso64ld = $(CC) $(c_flags) -Wl,-T $(filter-out FORCE, $^) -o $@ quiet_cmd_vdso64as = VDSO64A $@ cmd_vdso64as = $(CC) $(a_flags) -c -o $@ $< diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 2ca5418457ed2102b56aef6d742530c1df1c7a63..b8c4ac56bddc52468dee8553e9586c238b722a22 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -1,6 +1,9 @@ # SPDX-License-Identifier: GPL-2.0 source "arch/powerpc/platforms/Kconfig.cputype" +config CC_HAS_ELFV2 + def_bool PPC64 && $(cc-option, -mabi=elfv2) + config 32BIT bool default y if PPC32 @@ -96,7 +99,7 @@ config LOCKDEP_SUPPORT config GENERIC_LOCKBREAK bool default y - depends on SMP && PREEMPTION + depends on SMP && PREEMPTION && !PPC_QUEUED_SPINLOCKS config GENERIC_HWEIGHT bool @@ -155,7 +158,6 @@ config PPC select ARCH_USE_CMPXCHG_LOCKREF if PPC64 select ARCH_USE_MEMTEST select ARCH_USE_QUEUED_RWLOCKS if PPC_QUEUED_SPINLOCKS - select ARCH_USE_QUEUED_SPINLOCKS if PPC_QUEUED_SPINLOCKS select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT select ARCH_WANT_IPC_PARSE_VERSION select ARCH_WANT_IRQS_OFF_ACTIVATE_MM @@ -239,6 +241,8 @@ config PPC select HAVE_MOD_ARCH_SPECIFIC select HAVE_NMI if PERF_EVENTS || (PPC64 && PPC_BOOK3S) select HAVE_OPTPROBES + select HAVE_OBJTOOL if PPC32 || MPROFILE_KERNEL + select HAVE_OBJTOOL_MCOUNT if HAVE_OBJTOOL select HAVE_PERF_EVENTS select HAVE_PERF_EVENTS_NMI if PPC64 select HAVE_PERF_REGS @@ -294,6 +298,9 @@ config PPC_BARRIER_NOSPEC default y depends on PPC_BOOK3S_64 || PPC_E500 +config PPC_HAS_LBARX_LHARX + bool + config EARLY_PRINTK bool default y @@ -529,6 +536,15 @@ config HOTPLUG_CPU Say N if you are unsure. +config INTERRUPT_SANITIZE_REGISTERS + bool "Clear gprs on interrupt arrival" + depends on PPC64 && ARCH_HAS_SYSCALL_WRAPPER + default PPC_BOOK3E_64 || PPC_PSERIES || PPC_POWERNV + help + Reduce the influence of user register state on interrupt handlers and + syscalls through clearing user state from registers before handling + the exception. + config PPC_QUEUED_SPINLOCKS bool "Queued spinlocks" if EXPERT depends on SMP @@ -583,6 +599,24 @@ config KEXEC_FILE config ARCH_HAS_KEXEC_PURGATORY def_bool KEXEC_FILE +config PPC64_BIG_ENDIAN_ELF_ABI_V2 + bool "Build big-endian kernel using ELF ABI V2 (EXPERIMENTAL)" + depends on PPC64 && CPU_BIG_ENDIAN + depends on CC_HAS_ELFV2 + depends on LD_IS_BFD && LD_VERSION >= 22400 + default n + help + This builds the kernel image using the "Power Architecture 64-Bit ELF + V2 ABI Specification", which has a reduced stack overhead and faster + function calls. This internal kernel ABI option does not affect + userspace compatibility. + + The V2 ABI is standard for 64-bit little-endian, but for big-endian + it is less well tested by kernel and toolchain. However some distros + build userspace this way, and it can produce a functioning kernel. + + This requires GCC and binutils 2.24 or newer. + config RELOCATABLE bool "Build a relocatable kernel" depends on PPC64 || (FLATMEM && (44x || PPC_85xx)) @@ -1012,19 +1046,6 @@ config PPC_SECVAR_SYSFS read/write operations on these variables. Say Y if you have secure boot enabled and want to expose variables to userspace. -config PPC_RTAS_FILTER - bool "Enable filtering of RTAS syscalls" - default y - depends on PPC_RTAS - help - The RTAS syscall API has security issues that could be used to - compromise system integrity. This option enforces restrictions on the - RTAS calls and arguments passed by userspace programs to mitigate - these issues. - - Say Y unless you know what you are doing and the filter is causing - problems for you. - endmenu config ISA_DMA_API diff --git a/arch/powerpc/boot/dts/fsl/t1024qds.dts b/arch/powerpc/boot/dts/fsl/t1024qds.dts index d6858b7cd93fb5d56b056a9aa3e320277d673d51..9ea7942f914e14ee8258cb84306a9f485054a608 100644 --- a/arch/powerpc/boot/dts/fsl/t1024qds.dts +++ b/arch/powerpc/boot/dts/fsl/t1024qds.dts @@ -151,7 +151,7 @@ }; i2c@118000 { - pca9547@77 { + i2c-mux@77 { compatible = "nxp,pca9547"; reg = <0x77>; #address-cells = <1>; diff --git a/arch/powerpc/boot/dts/fsl/t1024rdb.dts b/arch/powerpc/boot/dts/fsl/t1024rdb.dts index dbcd31cc35dcd2bb4522d562b8474a215fad9710..270aaf631f2ab0fa969316142e09bf05cb06075b 100644 --- a/arch/powerpc/boot/dts/fsl/t1024rdb.dts +++ b/arch/powerpc/boot/dts/fsl/t1024rdb.dts @@ -165,7 +165,7 @@ }; i2c@118100 { - pca9546@77 { + i2c-mux@77 { compatible = "nxp,pca9546"; reg = <0x77>; #address-cells = <1>; diff --git a/arch/powerpc/boot/dts/fsl/t104xqds.dtsi b/arch/powerpc/boot/dts/fsl/t104xqds.dtsi index 615479732252404f43438e786d21f5e274173eb2..1c329f076f64f2dc47ec9498dd8ce645baa08848 100644 --- a/arch/powerpc/boot/dts/fsl/t104xqds.dtsi +++ b/arch/powerpc/boot/dts/fsl/t104xqds.dtsi @@ -268,7 +268,7 @@ }; i2c@118000 { - pca9547@77 { + i2c-mux@77 { compatible = "nxp,pca9547"; reg = <0x77>; }; diff --git a/arch/powerpc/boot/dts/fsl/t104xrdb.dtsi b/arch/powerpc/boot/dts/fsl/t104xrdb.dtsi index bfe1ed5be337492ee6146dc1b36ac4f746fb812a..fc7bec5dcb90fc2ddecd93ded4ec3ea90275103c 100644 --- a/arch/powerpc/boot/dts/fsl/t104xrdb.dtsi +++ b/arch/powerpc/boot/dts/fsl/t104xrdb.dtsi @@ -128,7 +128,7 @@ }; i2c@118100 { - pca9546@77 { + i2c-mux@77 { compatible = "nxp,pca9546"; reg = <0x77>; #address-cells = <1>; diff --git a/arch/powerpc/boot/dts/fsl/t208xqds.dtsi b/arch/powerpc/boot/dts/fsl/t208xqds.dtsi index db4139999b28ebf48ceb5b46a8e177430e974bae..962c99941645b965b060f797892731f2b88aebca 100644 --- a/arch/powerpc/boot/dts/fsl/t208xqds.dtsi +++ b/arch/powerpc/boot/dts/fsl/t208xqds.dtsi @@ -135,7 +135,7 @@ }; i2c@118000 { - pca9547@77 { + i2c-mux@77 { compatible = "nxp,pca9547"; reg = <0x77>; #address-cells = <1>; diff --git a/arch/powerpc/boot/dts/fsl/t208xrdb.dtsi b/arch/powerpc/boot/dts/fsl/t208xrdb.dtsi index ff87e67c70da4a292e353b57b824c5319ba3f5f1..ecc3e8c7394cad20ec5b3b9e93f7b171101867f9 100644 --- a/arch/powerpc/boot/dts/fsl/t208xrdb.dtsi +++ b/arch/powerpc/boot/dts/fsl/t208xrdb.dtsi @@ -138,7 +138,7 @@ }; i2c@118100 { - pca9546@77 { + i2c-mux@77 { compatible = "nxp,pca9546"; reg = <0x77>; }; diff --git a/arch/powerpc/boot/dts/microwatt.dts b/arch/powerpc/boot/dts/microwatt.dts index b69db1d275cd60f1db0950b578e6508a5d4b8d56..269e930b3b0b14a2bf6a76261062294c1d003c72 100644 --- a/arch/powerpc/boot/dts/microwatt.dts +++ b/arch/powerpc/boot/dts/microwatt.dts @@ -21,6 +21,14 @@ reg = <0x00000000 0x00000000 0x00000000 0x10000000>; }; + clocks { + sys_clk: litex_sys_clk { + #clock-cells = <0>; + compatible = "fixed-clock"; + clock-frequency = <100000000>; + }; + }; + cpus { #size-cells = <0x00>; #address-cells = <0x01>; @@ -141,6 +149,20 @@ litex,slot-size = <0x800>; interrupts = <0x11 0x1>; }; + + mmc@8040000 { + compatible = "litex,mmc"; + reg = <0x8042800 0x800 + 0x8041000 0x800 + 0x8040800 0x800 + 0x8042000 0x800 + 0x8041800 0x800>; + reg-names = "phy", "core", "reader", "writer", "irq"; + bus-width = <4>; + interrupts = <0x13 1>; + cap-sd-highspeed; + clocks = <&sys_clk>; + }; }; chosen { diff --git a/arch/powerpc/boot/dts/turris1x.dts b/arch/powerpc/boot/dts/turris1x.dts index 045af668e92847844c2431ef6532da430289adcf..e9cda34a140e0fb282b02f22d1b3b81d4d5376f1 100644 --- a/arch/powerpc/boot/dts/turris1x.dts +++ b/arch/powerpc/boot/dts/turris1x.dts @@ -69,6 +69,20 @@ interrupt-parent = <&gpio>; interrupts = <12 IRQ_TYPE_LEVEL_LOW>, /* GPIO12 - ALERT pin */ <13 IRQ_TYPE_LEVEL_LOW>; /* GPIO13 - CRIT pin */ + #address-cells = <1>; + #size-cells = <0>; + + /* Local temperature sensor (SA56004ED internal) */ + channel@0 { + reg = <0>; + label = "board"; + }; + + /* Remote temperature sensor (D+/D- connected to P2020 CPU Temperature Diode) */ + channel@1 { + reg = <1>; + label = "cpu"; + }; }; /* DDR3 SPD/EEPROM */ diff --git a/arch/powerpc/boot/dts/warp.dts b/arch/powerpc/boot/dts/warp.dts index b4f32740870e0c4d511ea574a6b23b2b0159d65a..aa62d08e97c2f12da5d80f3332f94bf57a48eef6 100644 --- a/arch/powerpc/boot/dts/warp.dts +++ b/arch/powerpc/boot/dts/warp.dts @@ -258,14 +258,12 @@ }; power-leds { - compatible = "gpio-leds"; + compatible = "warp-power-leds"; green { gpios = <&GPIO1 0 0>; - default-state = "keep"; }; red { gpios = <&GPIO1 1 0>; - default-state = "keep"; }; }; diff --git a/arch/powerpc/boot/wrapper b/arch/powerpc/boot/wrapper index 5bdd4dd20bbb18e5d360b7ad12b14e0c12494e3b..af04cea82b941f8174e5e9a5c7fb01e4b9ac55b9 100755 --- a/arch/powerpc/boot/wrapper +++ b/arch/powerpc/boot/wrapper @@ -215,6 +215,11 @@ ld_version() }' } +ld_is_lld() +{ + ${CROSS}ld -V 2>&1 | grep -q LLD +} + # Do not include PT_INTERP segment when linking pie. Non-pie linking # just ignores this option. LD_VERSION=$(${CROSS}ld --version | ld_version) @@ -223,6 +228,14 @@ if [ "$LD_VERSION" -ge "$LD_NO_DL_MIN_VERSION" ] ; then nodl="--no-dynamic-linker" fi +# suppress some warnings in recent ld versions +nowarn="-z noexecstack" +if ! ld_is_lld; then + if [ "$LD_VERSION" -ge "$(echo 2.39 | ld_version)" ]; then + nowarn="$nowarn --no-warn-rwx-segments" + fi +fi + platformo=$object/"$platform".o lds=$object/zImage.lds ext=strip @@ -504,7 +517,7 @@ if [ "$platform" != "miboot" ]; then text_start="-Ttext $link_address" fi #link everything - ${CROSS}ld -m $format -T $lds $text_start $pie $nodl $rodynamic $notext -o "$ofile" $map \ + ${CROSS}ld -m $format -T $lds $text_start $pie $nodl $nowarn $rodynamic $notext -o "$ofile" $map \ $platformo $tmp $object/wrapper.a rm $tmp fi @@ -581,7 +594,7 @@ ps3) # reached, then enter the system reset vector of the partially decompressed # image. No warning is issued. rm -f "$odir"/{otheros,otheros-too-big}.bld - size=$(${CROSS}nm --no-sort --radix=d "$ofile" | egrep ' _end$' | cut -d' ' -f1) + size=$(${CROSS}nm --no-sort --radix=d "$ofile" | grep -E ' _end$' | cut -d' ' -f1) bld="otheros.bld" if [ $size -gt $((0x1000000)) ]; then bld="otheros-too-big.bld" diff --git a/arch/powerpc/configs/ppc6xx_defconfig b/arch/powerpc/configs/ppc6xx_defconfig index 115d40be5481f0dc6f034b6104ef67e7fe21cf45..1102582779599bf11e224a3d95f58f8a4ffc978a 100644 --- a/arch/powerpc/configs/ppc6xx_defconfig +++ b/arch/powerpc/configs/ppc6xx_defconfig @@ -911,7 +911,6 @@ CONFIG_USB_IDMOUSE=m CONFIG_USB_FTDI_ELAN=m CONFIG_USB_APPLEDISPLAY=m CONFIG_USB_SISUSBVGA=m -CONFIG_USB_SISUSBVGA_CON=y CONFIG_USB_LD=m CONFIG_USB_TRANCEVIBRATOR=m CONFIG_USB_IOWARRIOR=m diff --git a/arch/powerpc/include/asm/asm.h b/arch/powerpc/include/asm/asm.h new file mode 100644 index 0000000000000000000000000000000000000000..86f46b604e9a68e967005d1b1d8400ba0ee8f709 --- /dev/null +++ b/arch/powerpc/include/asm/asm.h @@ -0,0 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_POWERPC_ASM_H +#define _ASM_POWERPC_ASM_H + +#define _ASM_PTR " .long " + +#endif /* _ASM_POWERPC_ASM_H */ diff --git a/arch/powerpc/include/asm/book3s/32/tlbflush.h b/arch/powerpc/include/asm/book3s/32/tlbflush.h index ba1743c52b56d4bd6b9a94631a117dfd784fb699..4be572908124383b6292c88d9c3e0c5e58cb38d8 100644 --- a/arch/powerpc/include/asm/book3s/32/tlbflush.h +++ b/arch/powerpc/include/asm/book3s/32/tlbflush.h @@ -2,6 +2,8 @@ #ifndef _ASM_POWERPC_BOOK3S_32_TLBFLUSH_H #define _ASM_POWERPC_BOOK3S_32_TLBFLUSH_H +#include + #define MMU_NO_CONTEXT (0) /* * TLB flushing for "classic" hash-MMU 32-bit CPUs, 6xx, 7xx, 7xxx @@ -74,6 +76,13 @@ static inline void local_flush_tlb_page(struct vm_area_struct *vma, { flush_tlb_page(vma, vmaddr); } + +static inline void local_flush_tlb_page_psize(struct mm_struct *mm, + unsigned long vmaddr, int psize) +{ + BUILD_BUG(); +} + static inline void local_flush_tlb_mm(struct mm_struct *mm) { flush_tlb_mm(mm); diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h index c436d84226540f60d8e6947454d5eb290d0f4ac9..cb4c67bf45d7129f20fbea56e4cf6681d27c785e 100644 --- a/arch/powerpc/include/asm/book3s/64/pgtable.h +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h @@ -401,35 +401,9 @@ static inline int __ptep_test_and_clear_young(struct mm_struct *mm, #define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH #define pmdp_clear_flush_young pmdp_test_and_clear_young -static inline int __pte_write(pte_t pte) -{ - return !!(pte_raw(pte) & cpu_to_be64(_PAGE_WRITE)); -} - -#ifdef CONFIG_NUMA_BALANCING -#define pte_savedwrite pte_savedwrite -static inline bool pte_savedwrite(pte_t pte) -{ - /* - * Saved write ptes are prot none ptes that doesn't have - * privileged bit sit. We mark prot none as one which has - * present and pviliged bit set and RWX cleared. To mark - * protnone which used to have _PAGE_WRITE set we clear - * the privileged bit. - */ - return !(pte_raw(pte) & cpu_to_be64(_PAGE_RWX | _PAGE_PRIVILEGED)); -} -#else -#define pte_savedwrite pte_savedwrite -static inline bool pte_savedwrite(pte_t pte) -{ - return false; -} -#endif - static inline int pte_write(pte_t pte) { - return __pte_write(pte) || pte_savedwrite(pte); + return !!(pte_raw(pte) & cpu_to_be64(_PAGE_WRITE)); } static inline int pte_read(pte_t pte) @@ -441,24 +415,16 @@ static inline int pte_read(pte_t pte) static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { - if (__pte_write(*ptep)) + if (pte_write(*ptep)) pte_update(mm, addr, ptep, _PAGE_WRITE, 0, 0); - else if (unlikely(pte_savedwrite(*ptep))) - pte_update(mm, addr, ptep, 0, _PAGE_PRIVILEGED, 0); } #define __HAVE_ARCH_HUGE_PTEP_SET_WRPROTECT static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { - /* - * We should not find protnone for hugetlb, but this complete the - * interface. - */ - if (__pte_write(*ptep)) + if (pte_write(*ptep)) pte_update(mm, addr, ptep, _PAGE_WRITE, 0, 1); - else if (unlikely(pte_savedwrite(*ptep))) - pte_update(mm, addr, ptep, 0, _PAGE_PRIVILEGED, 1); } #define __HAVE_ARCH_PTEP_GET_AND_CLEAR @@ -535,36 +501,6 @@ static inline int pte_protnone(pte_t pte) return (pte_raw(pte) & cpu_to_be64(_PAGE_PRESENT | _PAGE_PTE | _PAGE_RWX)) == cpu_to_be64(_PAGE_PRESENT | _PAGE_PTE); } - -#define pte_mk_savedwrite pte_mk_savedwrite -static inline pte_t pte_mk_savedwrite(pte_t pte) -{ - /* - * Used by Autonuma subsystem to preserve the write bit - * while marking the pte PROT_NONE. Only allow this - * on PROT_NONE pte - */ - VM_BUG_ON((pte_raw(pte) & cpu_to_be64(_PAGE_PRESENT | _PAGE_RWX | _PAGE_PRIVILEGED)) != - cpu_to_be64(_PAGE_PRESENT | _PAGE_PRIVILEGED)); - return __pte_raw(pte_raw(pte) & cpu_to_be64(~_PAGE_PRIVILEGED)); -} - -#define pte_clear_savedwrite pte_clear_savedwrite -static inline pte_t pte_clear_savedwrite(pte_t pte) -{ - /* - * Used by KSM subsystem to make a protnone pte readonly. - */ - VM_BUG_ON(!pte_protnone(pte)); - return __pte_raw(pte_raw(pte) | cpu_to_be64(_PAGE_PRIVILEGED)); -} -#else -#define pte_clear_savedwrite pte_clear_savedwrite -static inline pte_t pte_clear_savedwrite(pte_t pte) -{ - VM_WARN_ON(1); - return __pte_raw(pte_raw(pte) & cpu_to_be64(~_PAGE_WRITE)); -} #endif /* CONFIG_NUMA_BALANCING */ static inline bool pte_hw_valid(pte_t pte) @@ -641,8 +577,6 @@ static inline unsigned long pte_pfn(pte_t pte) /* Generic modifiers for PTE bits */ static inline pte_t pte_wrprotect(pte_t pte) { - if (unlikely(pte_savedwrite(pte))) - return pte_clear_savedwrite(pte); return __pte_raw(pte_raw(pte) & cpu_to_be64(~_PAGE_WRITE)); } @@ -1139,8 +1073,6 @@ static inline pte_t *pmdp_ptep(pmd_t *pmd) #define pmd_mkclean(pmd) pte_pmd(pte_mkclean(pmd_pte(pmd))) #define pmd_mkyoung(pmd) pte_pmd(pte_mkyoung(pmd_pte(pmd))) #define pmd_mkwrite(pmd) pte_pmd(pte_mkwrite(pmd_pte(pmd))) -#define pmd_mk_savedwrite(pmd) pte_pmd(pte_mk_savedwrite(pmd_pte(pmd))) -#define pmd_clear_savedwrite(pmd) pte_pmd(pte_clear_savedwrite(pmd_pte(pmd))) #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY #define pmd_soft_dirty(pmd) pte_soft_dirty(pmd_pte(pmd)) @@ -1162,8 +1094,6 @@ static inline int pmd_protnone(pmd_t pmd) #endif /* CONFIG_NUMA_BALANCING */ #define pmd_write(pmd) pte_write(pmd_pte(pmd)) -#define __pmd_write(pmd) __pte_write(pmd_pte(pmd)) -#define pmd_savedwrite(pmd) pte_savedwrite(pmd_pte(pmd)) #define pmd_access_permitted pmd_access_permitted static inline bool pmd_access_permitted(pmd_t pmd, bool write) @@ -1241,10 +1171,8 @@ static inline int __pmdp_test_and_clear_young(struct mm_struct *mm, static inline void pmdp_set_wrprotect(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp) { - if (__pmd_write((*pmdp))) + if (pmd_write(*pmdp)) pmd_hugepage_update(mm, addr, pmdp, _PAGE_WRITE, 0); - else if (unlikely(pmd_savedwrite(*pmdp))) - pmd_hugepage_update(mm, addr, pmdp, 0, _PAGE_PRIVILEGED); } /* diff --git a/arch/powerpc/include/asm/book3s/64/tlbflush-hash.h b/arch/powerpc/include/asm/book3s/64/tlbflush-hash.h index 751921f6db461b3bc18a80a1683c400796d9c3fc..146287d9580f491fea0ef89d20eb1f9afcebed13 100644 --- a/arch/powerpc/include/asm/book3s/64/tlbflush-hash.h +++ b/arch/powerpc/include/asm/book3s/64/tlbflush-hash.h @@ -65,56 +65,6 @@ extern void flush_hash_range(unsigned long number, int local); extern void flush_hash_hugepage(unsigned long vsid, unsigned long addr, pmd_t *pmdp, unsigned int psize, int ssize, unsigned long flags); -static inline void hash__local_flush_tlb_mm(struct mm_struct *mm) -{ -} - -static inline void hash__flush_tlb_mm(struct mm_struct *mm) -{ -} - -static inline void hash__local_flush_all_mm(struct mm_struct *mm) -{ - /* - * There's no Page Walk Cache for hash, so what is needed is - * the same as flush_tlb_mm(), which doesn't really make sense - * with hash. So the only thing we could do is flush the - * entire LPID! Punt for now, as it's not being used. - */ - WARN_ON_ONCE(1); -} - -static inline void hash__flush_all_mm(struct mm_struct *mm) -{ - /* - * There's no Page Walk Cache for hash, so what is needed is - * the same as flush_tlb_mm(), which doesn't really make sense - * with hash. So the only thing we could do is flush the - * entire LPID! Punt for now, as it's not being used. - */ - WARN_ON_ONCE(1); -} - -static inline void hash__local_flush_tlb_page(struct vm_area_struct *vma, - unsigned long vmaddr) -{ -} - -static inline void hash__flush_tlb_page(struct vm_area_struct *vma, - unsigned long vmaddr) -{ -} - -static inline void hash__flush_tlb_range(struct vm_area_struct *vma, - unsigned long start, unsigned long end) -{ -} - -static inline void hash__flush_tlb_kernel_range(unsigned long start, - unsigned long end) -{ -} - struct mmu_gather; extern void hash__tlb_flush(struct mmu_gather *tlb); diff --git a/arch/powerpc/include/asm/book3s/64/tlbflush.h b/arch/powerpc/include/asm/book3s/64/tlbflush.h index 67655cd6054563bbdcbd84396921a5f05ff419d4..dd39313242b4d964e9badbaca4a167e5ec56f4f3 100644 --- a/arch/powerpc/include/asm/book3s/64/tlbflush.h +++ b/arch/powerpc/include/asm/book3s/64/tlbflush.h @@ -47,8 +47,7 @@ static inline void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { if (radix_enabled()) - return radix__flush_pmd_tlb_range(vma, start, end); - return hash__flush_tlb_range(vma, start, end); + radix__flush_pmd_tlb_range(vma, start, end); } #define __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE @@ -57,81 +56,65 @@ static inline void flush_hugetlb_tlb_range(struct vm_area_struct *vma, unsigned long end) { if (radix_enabled()) - return radix__flush_hugetlb_tlb_range(vma, start, end); - return hash__flush_tlb_range(vma, start, end); + radix__flush_hugetlb_tlb_range(vma, start, end); } static inline void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { if (radix_enabled()) - return radix__flush_tlb_range(vma, start, end); - return hash__flush_tlb_range(vma, start, end); + radix__flush_tlb_range(vma, start, end); } static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end) { if (radix_enabled()) - return radix__flush_tlb_kernel_range(start, end); - return hash__flush_tlb_kernel_range(start, end); + radix__flush_tlb_kernel_range(start, end); } static inline void local_flush_tlb_mm(struct mm_struct *mm) { if (radix_enabled()) - return radix__local_flush_tlb_mm(mm); - return hash__local_flush_tlb_mm(mm); + radix__local_flush_tlb_mm(mm); } static inline void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr) { if (radix_enabled()) - return radix__local_flush_tlb_page(vma, vmaddr); - return hash__local_flush_tlb_page(vma, vmaddr); + radix__local_flush_tlb_page(vma, vmaddr); } -static inline void local_flush_all_mm(struct mm_struct *mm) +static inline void local_flush_tlb_page_psize(struct mm_struct *mm, + unsigned long vmaddr, int psize) { if (radix_enabled()) - return radix__local_flush_all_mm(mm); - return hash__local_flush_all_mm(mm); + radix__local_flush_tlb_page_psize(mm, vmaddr, psize); } static inline void tlb_flush(struct mmu_gather *tlb) { if (radix_enabled()) - return radix__tlb_flush(tlb); - return hash__tlb_flush(tlb); + radix__tlb_flush(tlb); } #ifdef CONFIG_SMP static inline void flush_tlb_mm(struct mm_struct *mm) { if (radix_enabled()) - return radix__flush_tlb_mm(mm); - return hash__flush_tlb_mm(mm); + radix__flush_tlb_mm(mm); } static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr) { if (radix_enabled()) - return radix__flush_tlb_page(vma, vmaddr); - return hash__flush_tlb_page(vma, vmaddr); -} - -static inline void flush_all_mm(struct mm_struct *mm) -{ - if (radix_enabled()) - return radix__flush_all_mm(mm); - return hash__flush_all_mm(mm); + radix__flush_tlb_page(vma, vmaddr); } #else #define flush_tlb_mm(mm) local_flush_tlb_mm(mm) #define flush_tlb_page(vma, addr) local_flush_tlb_page(vma, addr) -#define flush_all_mm(mm) local_flush_all_mm(mm) #endif /* CONFIG_SMP */ #define flush_tlb_fix_spurious_fault flush_tlb_fix_spurious_fault diff --git a/arch/powerpc/include/asm/bug.h b/arch/powerpc/include/asm/bug.h index 61a4736355c244448104080e144631a3cb8839b2..ef42adb44aa3ffe7624313cf9fbe576a3c1ba3d2 100644 --- a/arch/powerpc/include/asm/bug.h +++ b/arch/powerpc/include/asm/bug.h @@ -99,7 +99,8 @@ __label__ __label_warn_on; \ \ WARN_ENTRY("twi 31, 0, 0", BUGFLAG_WARNING | (flags), __label_warn_on); \ - unreachable(); \ + barrier_before_unreachable(); \ + __builtin_unreachable(); \ \ __label_warn_on: \ break; \ diff --git a/arch/powerpc/include/asm/cmpxchg.h b/arch/powerpc/include/asm/cmpxchg.h index 05f246c0e36eb3855fd713b69f5681dee0637416..d0ea0571e79ab51b169637058916ae380542d445 100644 --- a/arch/powerpc/include/asm/cmpxchg.h +++ b/arch/powerpc/include/asm/cmpxchg.h @@ -77,10 +77,76 @@ u32 __cmpxchg_##type##sfx(volatile void *p, u32 old, u32 new) \ * the previous value stored there. */ +#ifndef CONFIG_PPC_HAS_LBARX_LHARX XCHG_GEN(u8, _local, "memory"); XCHG_GEN(u8, _relaxed, "cc"); XCHG_GEN(u16, _local, "memory"); XCHG_GEN(u16, _relaxed, "cc"); +#else +static __always_inline unsigned long +__xchg_u8_local(volatile void *p, unsigned long val) +{ + unsigned long prev; + + __asm__ __volatile__( +"1: lbarx %0,0,%2 # __xchg_u8_local\n" +" stbcx. %3,0,%2 \n" +" bne- 1b" + : "=&r" (prev), "+m" (*(volatile unsigned char *)p) + : "r" (p), "r" (val) + : "cc", "memory"); + + return prev; +} + +static __always_inline unsigned long +__xchg_u8_relaxed(u8 *p, unsigned long val) +{ + unsigned long prev; + + __asm__ __volatile__( +"1: lbarx %0,0,%2 # __xchg_u8_relaxed\n" +" stbcx. %3,0,%2\n" +" bne- 1b" + : "=&r" (prev), "+m" (*p) + : "r" (p), "r" (val) + : "cc"); + + return prev; +} + +static __always_inline unsigned long +__xchg_u16_local(volatile void *p, unsigned long val) +{ + unsigned long prev; + + __asm__ __volatile__( +"1: lharx %0,0,%2 # __xchg_u16_local\n" +" sthcx. %3,0,%2\n" +" bne- 1b" + : "=&r" (prev), "+m" (*(volatile unsigned short *)p) + : "r" (p), "r" (val) + : "cc", "memory"); + + return prev; +} + +static __always_inline unsigned long +__xchg_u16_relaxed(u16 *p, unsigned long val) +{ + unsigned long prev; + + __asm__ __volatile__( +"1: lharx %0,0,%2 # __xchg_u16_relaxed\n" +" sthcx. %3,0,%2\n" +" bne- 1b" + : "=&r" (prev), "+m" (*p) + : "r" (p), "r" (val) + : "cc"); + + return prev; +} +#endif static __always_inline unsigned long __xchg_u32_local(volatile void *p, unsigned long val) @@ -198,11 +264,12 @@ __xchg_relaxed(void *ptr, unsigned long x, unsigned int size) (__typeof__(*(ptr))) __xchg_relaxed((ptr), \ (unsigned long)_x_, sizeof(*(ptr))); \ }) + /* * Compare and exchange - if *p == old, set it to new, * and return the old value of *p. */ - +#ifndef CONFIG_PPC_HAS_LBARX_LHARX CMPXCHG_GEN(u8, , PPC_ATOMIC_ENTRY_BARRIER, PPC_ATOMIC_EXIT_BARRIER, "memory"); CMPXCHG_GEN(u8, _local, , , "memory"); CMPXCHG_GEN(u8, _acquire, , PPC_ACQUIRE_BARRIER, "memory"); @@ -211,6 +278,168 @@ CMPXCHG_GEN(u16, , PPC_ATOMIC_ENTRY_BARRIER, PPC_ATOMIC_EXIT_BARRIER, "memory"); CMPXCHG_GEN(u16, _local, , , "memory"); CMPXCHG_GEN(u16, _acquire, , PPC_ACQUIRE_BARRIER, "memory"); CMPXCHG_GEN(u16, _relaxed, , , "cc"); +#else +static __always_inline unsigned long +__cmpxchg_u8(volatile unsigned char *p, unsigned long old, unsigned long new) +{ + unsigned int prev; + + __asm__ __volatile__ ( + PPC_ATOMIC_ENTRY_BARRIER +"1: lbarx %0,0,%2 # __cmpxchg_u8\n" +" cmpw 0,%0,%3\n" +" bne- 2f\n" +" stbcx. %4,0,%2\n" +" bne- 1b" + PPC_ATOMIC_EXIT_BARRIER + "\n\ +2:" + : "=&r" (prev), "+m" (*p) + : "r" (p), "r" (old), "r" (new) + : "cc", "memory"); + + return prev; +} + +static __always_inline unsigned long +__cmpxchg_u8_local(volatile unsigned char *p, unsigned long old, + unsigned long new) +{ + unsigned int prev; + + __asm__ __volatile__ ( +"1: lbarx %0,0,%2 # __cmpxchg_u8_local\n" +" cmpw 0,%0,%3\n" +" bne- 2f\n" +" stbcx. %4,0,%2\n" +" bne- 1b\n" +"2:" + : "=&r" (prev), "+m" (*p) + : "r" (p), "r" (old), "r" (new) + : "cc", "memory"); + + return prev; +} + +static __always_inline unsigned long +__cmpxchg_u8_relaxed(u8 *p, unsigned long old, unsigned long new) +{ + unsigned long prev; + + __asm__ __volatile__ ( +"1: lbarx %0,0,%2 # __cmpxchg_u8_relaxed\n" +" cmpw 0,%0,%3\n" +" bne- 2f\n" +" stbcx. %4,0,%2\n" +" bne- 1b\n" +"2:" + : "=&r" (prev), "+m" (*p) + : "r" (p), "r" (old), "r" (new) + : "cc"); + + return prev; +} + +static __always_inline unsigned long +__cmpxchg_u8_acquire(u8 *p, unsigned long old, unsigned long new) +{ + unsigned long prev; + + __asm__ __volatile__ ( +"1: lbarx %0,0,%2 # __cmpxchg_u8_acquire\n" +" cmpw 0,%0,%3\n" +" bne- 2f\n" +" stbcx. %4,0,%2\n" +" bne- 1b\n" + PPC_ACQUIRE_BARRIER +"2:" + : "=&r" (prev), "+m" (*p) + : "r" (p), "r" (old), "r" (new) + : "cc", "memory"); + + return prev; +} + +static __always_inline unsigned long +__cmpxchg_u16(volatile unsigned short *p, unsigned long old, unsigned long new) +{ + unsigned int prev; + + __asm__ __volatile__ ( + PPC_ATOMIC_ENTRY_BARRIER +"1: lharx %0,0,%2 # __cmpxchg_u16\n" +" cmpw 0,%0,%3\n" +" bne- 2f\n" +" sthcx. %4,0,%2\n" +" bne- 1b\n" + PPC_ATOMIC_EXIT_BARRIER +"2:" + : "=&r" (prev), "+m" (*p) + : "r" (p), "r" (old), "r" (new) + : "cc", "memory"); + + return prev; +} + +static __always_inline unsigned long +__cmpxchg_u16_local(volatile unsigned short *p, unsigned long old, + unsigned long new) +{ + unsigned int prev; + + __asm__ __volatile__ ( +"1: lharx %0,0,%2 # __cmpxchg_u16_local\n" +" cmpw 0,%0,%3\n" +" bne- 2f\n" +" sthcx. %4,0,%2\n" +" bne- 1b" +"2:" + : "=&r" (prev), "+m" (*p) + : "r" (p), "r" (old), "r" (new) + : "cc", "memory"); + + return prev; +} + +static __always_inline unsigned long +__cmpxchg_u16_relaxed(u16 *p, unsigned long old, unsigned long new) +{ + unsigned long prev; + + __asm__ __volatile__ ( +"1: lharx %0,0,%2 # __cmpxchg_u16_relaxed\n" +" cmpw 0,%0,%3\n" +" bne- 2f\n" +" sthcx. %4,0,%2\n" +" bne- 1b\n" +"2:" + : "=&r" (prev), "+m" (*p) + : "r" (p), "r" (old), "r" (new) + : "cc"); + + return prev; +} + +static __always_inline unsigned long +__cmpxchg_u16_acquire(u16 *p, unsigned long old, unsigned long new) +{ + unsigned long prev; + + __asm__ __volatile__ ( +"1: lharx %0,0,%2 # __cmpxchg_u16_acquire\n" +" cmpw 0,%0,%3\n" +" bne- 2f\n" +" sthcx. %4,0,%2\n" +" bne- 1b\n" + PPC_ACQUIRE_BARRIER +"2:" + : "=&r" (prev), "+m" (*p) + : "r" (p), "r" (old), "r" (new) + : "cc", "memory"); + + return prev; +} +#endif static __always_inline unsigned long __cmpxchg_u32(volatile unsigned int *p, unsigned long old, unsigned long new) diff --git a/arch/powerpc/include/asm/code-patching.h b/arch/powerpc/include/asm/code-patching.h index 1c6316ec4b74f9bdc9722bfd03d9e1cbf39be6cf..3f881548fb61c12fcd04a9d8a5cf518aeb6d706b 100644 --- a/arch/powerpc/include/asm/code-patching.h +++ b/arch/powerpc/include/asm/code-patching.h @@ -22,8 +22,6 @@ #define BRANCH_SET_LINK 0x1 #define BRANCH_ABSOLUTE 0x2 -DECLARE_STATIC_KEY_FALSE(init_mem_is_free); - /* * Powerpc branch instruction is : * diff --git a/arch/powerpc/include/asm/cputime.h b/arch/powerpc/include/asm/cputime.h index 431ae234302227905f42d1c99745ef47ef356023..4961fb38e4385f3d5dc2c497d4acdfdeb0b9e1ae 100644 --- a/arch/powerpc/include/asm/cputime.h +++ b/arch/powerpc/include/asm/cputime.h @@ -21,23 +21,8 @@ #include #include -typedef u64 __nocast cputime_t; -typedef u64 __nocast cputime64_t; - -#define cmpxchg_cputime(ptr, old, new) cmpxchg(ptr, old, new) - #ifdef __KERNEL__ -/* - * Convert cputime <-> microseconds - */ -extern u64 __cputime_usec_factor; - -static inline unsigned long cputime_to_usecs(const cputime_t ct) -{ - return mulhdu((__force u64) ct, __cputime_usec_factor); -} - -#define cputime_to_nsecs(cputime) tb_to_ns((__force u64)cputime) +#define cputime_to_nsecs(cputime) tb_to_ns(cputime) /* * PPC64 uses PACA which is task independent for storing accounting data while diff --git a/arch/powerpc/include/asm/debug.h b/arch/powerpc/include/asm/debug.h index 86a14736c76c36fa7e04de7e5a2525b73f8958b1..51c744608f37a8cc09c3514a8b7f6547a7718b39 100644 --- a/arch/powerpc/include/asm/debug.h +++ b/arch/powerpc/include/asm/debug.h @@ -46,6 +46,8 @@ static inline int debugger_fault_handler(struct pt_regs *regs) { return 0; } #endif void __set_breakpoint(int nr, struct arch_hw_breakpoint *brk); +void suspend_breakpoints(void); +void restore_breakpoints(void); bool ppc_breakpoint_available(void); #ifdef CONFIG_PPC_ADV_DEBUG_REGS extern void do_send_trap(struct pt_regs *regs, unsigned long address, diff --git a/arch/powerpc/include/asm/ftrace.h b/arch/powerpc/include/asm/ftrace.h index 259b9dd5fe1c5fc73bed12dafe2c6e4f80a1f03f..91c049d51d0e1088ffdafdb348fe214ce620d384 100644 --- a/arch/powerpc/include/asm/ftrace.h +++ b/arch/powerpc/include/asm/ftrace.h @@ -10,6 +10,13 @@ #define HAVE_FUNCTION_GRAPH_RET_ADDR_PTR +/* Ignore unused weak functions which will have larger offsets */ +#ifdef CONFIG_MPROFILE_KERNEL +#define FTRACE_MCOUNT_MAX_OFFSET 12 +#elif defined(CONFIG_PPC32) +#define FTRACE_MCOUNT_MAX_OFFSET 8 +#endif + #ifndef __ASSEMBLY__ extern void _mcount(void); @@ -84,17 +91,6 @@ void ftrace_graph_func(unsigned long ip, unsigned long parent_ip, * those. */ #define ARCH_HAS_SYSCALL_MATCH_SYM_NAME -#ifdef CONFIG_PPC64_ELF_ABI_V1 -static inline bool arch_syscall_match_sym_name(const char *sym, const char *name) -{ - /* We need to skip past the initial dot, and the __se_sys alias */ - return !strcmp(sym + 1, name) || - (!strncmp(sym, ".__se_sys", 9) && !strcmp(sym + 6, name)) || - (!strncmp(sym, ".ppc_", 5) && !strcmp(sym + 5, name + 4)) || - (!strncmp(sym, ".ppc32_", 7) && !strcmp(sym + 7, name + 4)) || - (!strncmp(sym, ".ppc64_", 7) && !strcmp(sym + 7, name + 4)); -} -#else static inline bool arch_syscall_match_sym_name(const char *sym, const char *name) { return !strcmp(sym, name) || @@ -103,7 +99,6 @@ static inline bool arch_syscall_match_sym_name(const char *sym, const char *name (!strncmp(sym, "ppc32_", 6) && !strcmp(sym + 6, name + 4)) || (!strncmp(sym, "ppc64_", 6) && !strcmp(sym + 6, name + 4)); } -#endif /* CONFIG_PPC64_ELF_ABI_V1 */ #endif /* CONFIG_FTRACE_SYSCALLS */ #if defined(CONFIG_PPC64) && defined(CONFIG_FUNCTION_TRACER) diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h index 8abae463f6c12882fc85978d837011d96425d363..95fd7f9485d553f9f9cb188dca103103ee281b16 100644 --- a/arch/powerpc/include/asm/hvcall.h +++ b/arch/powerpc/include/asm/hvcall.h @@ -79,7 +79,7 @@ #define H_NOT_ENOUGH_RESOURCES -44 #define H_R_STATE -45 #define H_RESCINDED -46 -#define H_P1 -54 +#define H_ABORTED -54 #define H_P2 -55 #define H_P3 -56 #define H_P4 -57 @@ -100,7 +100,6 @@ #define H_COP_HW -74 #define H_STATE -75 #define H_IN_USE -77 -#define H_ABORTED -78 #define H_UNSUPPORTED_FLAG_START -256 #define H_UNSUPPORTED_FLAG_END -511 #define H_MULTI_THREADS_ACTIVE -9005 diff --git a/arch/powerpc/include/asm/irqflags.h b/arch/powerpc/include/asm/irqflags.h index 1a6c1ce17735a5f3a80b6f3254e9f082d72f14c4..47d46712928ac6d766cf400621664d642ee8bf82 100644 --- a/arch/powerpc/include/asm/irqflags.h +++ b/arch/powerpc/include/asm/irqflags.h @@ -11,64 +11,6 @@ */ #include -#else -#ifdef CONFIG_TRACE_IRQFLAGS -#ifdef CONFIG_IRQSOFF_TRACER -/* - * Since the ftrace irqsoff latency trace checks CALLER_ADDR1, - * which is the stack frame here, we need to force a stack frame - * in case we came from user space. - */ -#define TRACE_WITH_FRAME_BUFFER(func) \ - mflr r0; \ - stdu r1, -STACK_FRAME_OVERHEAD(r1); \ - std r0, 16(r1); \ - stdu r1, -STACK_FRAME_OVERHEAD(r1); \ - bl func; \ - ld r1, 0(r1); \ - ld r1, 0(r1); -#else -#define TRACE_WITH_FRAME_BUFFER(func) \ - bl func; -#endif - -/* - * These are calls to C code, so the caller must be prepared for volatiles to - * be clobbered. - */ -#define TRACE_ENABLE_INTS TRACE_WITH_FRAME_BUFFER(trace_hardirqs_on) -#define TRACE_DISABLE_INTS TRACE_WITH_FRAME_BUFFER(trace_hardirqs_off) - -/* - * This is used by assembly code to soft-disable interrupts first and - * reconcile irq state. - * - * NB: This may call C code, so the caller must be prepared for volatiles to - * be clobbered. - */ -#define RECONCILE_IRQ_STATE(__rA, __rB) \ - lbz __rA,PACAIRQSOFTMASK(r13); \ - lbz __rB,PACAIRQHAPPENED(r13); \ - andi. __rA,__rA,IRQS_DISABLED; \ - li __rA,IRQS_DISABLED; \ - ori __rB,__rB,PACA_IRQ_HARD_DIS; \ - stb __rB,PACAIRQHAPPENED(r13); \ - bne 44f; \ - stb __rA,PACAIRQSOFTMASK(r13); \ - TRACE_DISABLE_INTS; \ -44: - -#else -#define TRACE_ENABLE_INTS -#define TRACE_DISABLE_INTS - -#define RECONCILE_IRQ_STATE(__rA, __rB) \ - lbz __rA,PACAIRQHAPPENED(r13); \ - li __rB,IRQS_DISABLED; \ - ori __rA,__rA,PACA_IRQ_HARD_DIS; \ - stb __rB,PACAIRQSOFTMASK(r13); \ - stb __rA,PACAIRQHAPPENED(r13) -#endif #endif #endif diff --git a/arch/powerpc/include/asm/kvm_book3s_asm.h b/arch/powerpc/include/asm/kvm_book3s_asm.h index c8882d9b86c27c5118c1b09ff6f8fc15bed32dfb..a36797938620f016907875913c2a14c1f098426e 100644 --- a/arch/powerpc/include/asm/kvm_book3s_asm.h +++ b/arch/powerpc/include/asm/kvm_book3s_asm.h @@ -105,7 +105,7 @@ struct kvmppc_host_state { void __iomem *xive_tima_virt; u32 saved_xirr; u64 dabr; - u64 host_mmcr[10]; /* MMCR 0,1,A, SIAR, SDAR, MMCR2, SIER, MMCR3, SIER2/3 */ + u64 host_mmcr[7]; /* MMCR 0,1,A, SIAR, SDAR, MMCR2, SIER */ u32 host_pmc[8]; u64 host_purr; u64 host_spurr; diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h index bfacf12784dde98be592ac7a4091a22acea92fe3..eae9619b61903c95e7526928df406282fce34586 100644 --- a/arch/powerpc/include/asm/kvm_ppc.h +++ b/arch/powerpc/include/asm/kvm_ppc.h @@ -1014,6 +1014,18 @@ static inline void kvmppc_fix_ee_before_entry(void) #endif } +static inline void kvmppc_fix_ee_after_exit(void) +{ +#ifdef CONFIG_PPC64 + /* Only need to enable IRQs by hard enabling them after this */ + local_paca->irq_happened = PACA_IRQ_HARD_DIS; + irq_soft_mask_set(IRQS_ALL_DISABLED); +#endif + + trace_hardirqs_off(); +} + + static inline ulong kvmppc_get_ea_indexed(struct kvm_vcpu *vcpu, int ra, int rb) { ulong ea; diff --git a/arch/powerpc/include/asm/linkage.h b/arch/powerpc/include/asm/linkage.h index b71b9582e7546864fcef9947eed506cb5e47ca8f..b88d1d2cf304470f1ce102201e7e1588060f4011 100644 --- a/arch/powerpc/include/asm/linkage.h +++ b/arch/powerpc/include/asm/linkage.h @@ -4,6 +4,9 @@ #include +#define __ALIGN .align 2 +#define __ALIGN_STR ".align 2" + #ifdef CONFIG_PPC64_ELF_ABI_V1 #define cond_syscall(x) \ asm ("\t.weak " #x "\n\t.set " #x ", sys_ni_syscall\n" \ diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h index c1ea270bb8486d881c7dcbfe4c7fce0d58ac6751..57f5017111f476d3a43de48f40ed2fb92cc1dde4 100644 --- a/arch/powerpc/include/asm/mmu_context.h +++ b/arch/powerpc/include/asm/mmu_context.h @@ -151,8 +151,8 @@ static inline void mm_context_remove_copro(struct mm_struct *mm) * nMMU and/or PSL need to be cleaned up. * * Both the 'copros' and 'active_cpus' counts are looked at in - * flush_all_mm() to determine the scope (local/global) of the - * TLBIs, so we need to flush first before decrementing + * radix__flush_all_mm() to determine the scope (local/global) + * of the TLBIs, so we need to flush first before decrementing * 'copros'. If this API is used by several callers for the * same context, it can lead to over-flushing. It's hopefully * not common enough to be a problem. @@ -164,7 +164,7 @@ static inline void mm_context_remove_copro(struct mm_struct *mm) * in-between. */ if (radix_enabled()) { - flush_all_mm(mm); + radix__flush_all_mm(mm); c = atomic_dec_if_positive(&mm->context.copros); /* Detect imbalance between add and remove */ diff --git a/arch/powerpc/include/asm/nohash/32/pgtable.h b/arch/powerpc/include/asm/nohash/32/pgtable.h index 0d40b33184ebe9a7c1f3372c3dbaaa78aa1b4e9a..70edad44dff6ff4a47269c172faecb5526ec2718 100644 --- a/arch/powerpc/include/asm/nohash/32/pgtable.h +++ b/arch/powerpc/include/asm/nohash/32/pgtable.h @@ -256,14 +256,20 @@ static inline pte_basic_t pte_update(struct mm_struct *mm, unsigned long addr, p num = number_of_cells_per_pte(pmd, new, huge); - for (i = 0; i < num; i++, entry++, new += SZ_4K) - *entry = new; + for (i = 0; i < num; i += PAGE_SIZE / SZ_4K, new += PAGE_SIZE) { + *entry++ = new; + if (IS_ENABLED(CONFIG_PPC_16K_PAGES) && num != 1) { + *entry++ = new; + *entry++ = new; + *entry++ = new; + } + } return old; } #ifdef CONFIG_PPC_16K_PAGES -#define __HAVE_ARCH_PTEP_GET +#define ptep_get ptep_get static inline pte_t ptep_get(pte_t *ptep) { pte_basic_t val = READ_ONCE(ptep->pte); diff --git a/arch/powerpc/include/asm/nohash/pgtable.h b/arch/powerpc/include/asm/nohash/pgtable.h index d9067dfc531ccdd3f739a7fb29407a38a355f5d9..69c3a050a3d8557eab88ef832e11dc718e528f5c 100644 --- a/arch/powerpc/include/asm/nohash/pgtable.h +++ b/arch/powerpc/include/asm/nohash/pgtable.h @@ -183,7 +183,7 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, * cases, and 32-bit non-hash with 32-bit PTEs. */ #if defined(CONFIG_PPC_8xx) && defined(CONFIG_PPC_16K_PAGES) - ptep->pte = ptep->pte1 = ptep->pte2 = ptep->pte3 = pte_val(pte); + ptep->pte3 = ptep->pte2 = ptep->pte1 = ptep->pte = pte_val(pte); #else *ptep = pte; #endif diff --git a/arch/powerpc/include/asm/nohash/tlbflush.h b/arch/powerpc/include/asm/nohash/tlbflush.h index bdaf34ad41ea6a36d4d56d837901f867cbeded75..9a2cf83ea4f19ce881698b887e9874de4684254f 100644 --- a/arch/powerpc/include/asm/nohash/tlbflush.h +++ b/arch/powerpc/include/asm/nohash/tlbflush.h @@ -45,6 +45,12 @@ static inline void local_flush_tlb_page(struct vm_area_struct *vma, unsigned lon asm volatile ("tlbie %0; sync" : : "r" (vmaddr) : "memory"); } +static inline void local_flush_tlb_page_psize(struct mm_struct *mm, + unsigned long vmaddr, int psize) +{ + asm volatile ("tlbie %0; sync" : : "r" (vmaddr) : "memory"); +} + static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end) { start &= PAGE_MASK; @@ -58,6 +64,7 @@ static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); extern void local_flush_tlb_mm(struct mm_struct *mm); extern void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr); +void local_flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr, int psize); extern void __local_flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr, int tsize, int ind); diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h index 283f40d05a4d7ee725a3d4bf4c77ee9482ca8feb..9972626ddaf68e3c5d8d2dbd36d8fa8ff7dfd6cd 100644 --- a/arch/powerpc/include/asm/pgtable.h +++ b/arch/powerpc/include/asm/pgtable.h @@ -81,13 +81,6 @@ void poking_init(void); extern unsigned long ioremap_bot; extern const pgprot_t protection_map[16]; -/* - * kern_addr_valid is intended to indicate whether an address is a valid - * kernel address. Most 32-bit archs define it as always true (like this) - * but most 64-bit archs actually perform a test. What should we do here? - */ -#define kern_addr_valid(addr) (1) - #ifndef CONFIG_TRANSPARENT_HUGEPAGE #define pmd_large(pmd) 0 #endif diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h index 753a2757bcd4f3c78927a0b5ded949d6c5d3b19a..d2f44612f4b0270cb6a30a21e3f6a21e6700f51c 100644 --- a/arch/powerpc/include/asm/ppc_asm.h +++ b/arch/powerpc/include/asm/ppc_asm.h @@ -74,6 +74,25 @@ #define SAVE_GPR(n, base) SAVE_GPRS(n, n, base) #define REST_GPR(n, base) REST_GPRS(n, n, base) +/* macros for handling user register sanitisation */ +#ifdef CONFIG_INTERRUPT_SANITIZE_REGISTERS +#define SANITIZE_SYSCALL_GPRS() ZEROIZE_GPR(0); \ + ZEROIZE_GPRS(5, 12); \ + ZEROIZE_NVGPRS() +#define SANITIZE_GPR(n) ZEROIZE_GPR(n) +#define SANITIZE_GPRS(start, end) ZEROIZE_GPRS(start, end) +#define SANITIZE_NVGPRS() ZEROIZE_NVGPRS() +#define SANITIZE_RESTORE_NVGPRS() REST_NVGPRS(r1) +#define HANDLER_RESTORE_NVGPRS() +#else +#define SANITIZE_SYSCALL_GPRS() +#define SANITIZE_GPR(n) +#define SANITIZE_GPRS(start, end) +#define SANITIZE_NVGPRS() +#define SANITIZE_RESTORE_NVGPRS() +#define HANDLER_RESTORE_NVGPRS() REST_NVGPRS(r1) +#endif /* CONFIG_INTERRUPT_SANITIZE_REGISTERS */ + #define SAVE_FPR(n, base) stfd n,8*TS_FPRWIDTH*(n)(base) #define SAVE_2FPRS(n, base) SAVE_FPR(n, base); SAVE_FPR(n+1, base) #define SAVE_4FPRS(n, base) SAVE_2FPRS(n, base); SAVE_2FPRS(n+2, base) diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h index 631802999d5988f1aa7bf2f993a794563454ca06..e96c9b8c2a60bdac9fb2a833c48bf241a21fa4c7 100644 --- a/arch/powerpc/include/asm/processor.h +++ b/arch/powerpc/include/asm/processor.h @@ -374,9 +374,18 @@ static inline unsigned long __pack_fe01(unsigned int fpmode) #endif -/* Check that a certain kernel stack pointer is valid in task_struct p */ -int validate_sp(unsigned long sp, struct task_struct *p, - unsigned long nbytes); +/* + * Check that a certain kernel stack pointer is a valid (minimum sized) + * stack frame in task_struct p. + */ +int validate_sp(unsigned long sp, struct task_struct *p); + +/* + * validate the stack frame of a particular minimum size, used for when we are + * looking at a certain object in the stack beyond the minimum. + */ +int validate_sp_size(unsigned long sp, struct task_struct *p, + unsigned long nbytes); /* * Prefetch macros. diff --git a/arch/powerpc/include/asm/prom.h b/arch/powerpc/include/asm/prom.h index 2e82820fbd64cf7cc10c135d89e302fc06394965..c0107d8ddd8c827e8f9e9aafc78840d312e3164c 100644 --- a/arch/powerpc/include/asm/prom.h +++ b/arch/powerpc/include/asm/prom.h @@ -85,6 +85,7 @@ struct of_drc_info { extern int of_read_drc_info_cell(struct property **prop, const __be32 **curval, struct of_drc_info *data); +extern unsigned int boot_cpu_node_count; /* * There are two methods for telling firmware what our capabilities are. diff --git a/arch/powerpc/include/asm/ps3.h b/arch/powerpc/include/asm/ps3.h index 8a0d8fb3532863c1b3fb590fe5f203a776ad7306..d503dbd7856cb86441dc0467e6567f5b5ab434c5 100644 --- a/arch/powerpc/include/asm/ps3.h +++ b/arch/powerpc/include/asm/ps3.h @@ -425,10 +425,6 @@ static inline void *ps3_system_bus_get_drvdata( return dev_get_drvdata(&dev->core); } -/* These two need global scope for get_arch_dma_ops(). */ - -extern struct bus_type ps3_system_bus_type; - /* system manager */ struct ps3_sys_manager_ops { diff --git a/arch/powerpc/include/asm/pte-walk.h b/arch/powerpc/include/asm/pte-walk.h index 714a35f0d425b960ae5e15a667fcf615a4ef3525..73c22c579a79168036c5398005a2c503d35dc540 100644 --- a/arch/powerpc/include/asm/pte-walk.h +++ b/arch/powerpc/include/asm/pte-walk.h @@ -60,29 +60,4 @@ static inline phys_addr_t ppc_find_vmap_phys(unsigned long addr) return pa; } -/* - * This is what we should always use. Any other lockless page table lookup needs - * careful audit against THP split. - */ -static inline pte_t *find_current_mm_pte(pgd_t *pgdir, unsigned long ea, - bool *is_thp, unsigned *hshift) -{ - pte_t *pte; - - VM_WARN(!arch_irqs_disabled(), "%s called with irq enabled\n", __func__); - VM_WARN(pgdir != current->mm->pgd, - "%s lock less page table lookup called on wrong mm\n", __func__); - pte = __find_linux_pte(pgdir, ea, is_thp, hshift); - -#if defined(CONFIG_DEBUG_VM) && \ - !(defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)) - /* - * We should not find huge page if these configs are not enabled. - */ - if (hshift) - WARN_ON(*hshift); -#endif - return pte; -} - #endif /* _ASM_POWERPC_PTE_WALK_H */ diff --git a/arch/powerpc/include/asm/ptrace.h b/arch/powerpc/include/asm/ptrace.h index 2efec6d87049ea76f8bacd00f2797208a45fec66..0eb90a0133466690b6286b53589a4e480af15494 100644 --- a/arch/powerpc/include/asm/ptrace.h +++ b/arch/powerpc/include/asm/ptrace.h @@ -97,8 +97,6 @@ struct pt_regs #endif -#define STACK_FRAME_WITH_PT_REGS (STACK_FRAME_OVERHEAD + sizeof(struct pt_regs)) - // Always displays as "REGS" in memory dumps #ifdef CONFIG_CPU_BIG_ENDIAN #define STACK_FRAME_REGS_MARKER ASM_CONST(0x52454753) @@ -120,16 +118,27 @@ struct pt_regs #define USER_REDZONE_SIZE 512 #define KERNEL_REDZONE_SIZE 288 -#define STACK_FRAME_OVERHEAD 112 /* size of minimum stack frame */ #define STACK_FRAME_LR_SAVE 2 /* Location of LR in stack frame */ -#define STACK_INT_FRAME_SIZE (sizeof(struct pt_regs) + \ - STACK_FRAME_OVERHEAD + KERNEL_REDZONE_SIZE) -#define STACK_FRAME_MARKER 12 #ifdef CONFIG_PPC64_ELF_ABI_V2 #define STACK_FRAME_MIN_SIZE 32 +#define STACK_USER_INT_FRAME_SIZE (sizeof(struct pt_regs) + STACK_FRAME_MIN_SIZE + 16) +#define STACK_INT_FRAME_REGS (STACK_FRAME_MIN_SIZE + 16) +#define STACK_INT_FRAME_MARKER STACK_FRAME_MIN_SIZE +#define STACK_SWITCH_FRAME_SIZE (sizeof(struct pt_regs) + STACK_FRAME_MIN_SIZE + 16) +#define STACK_SWITCH_FRAME_REGS (STACK_FRAME_MIN_SIZE + 16) #else -#define STACK_FRAME_MIN_SIZE STACK_FRAME_OVERHEAD +/* + * The ELFv1 ABI specifies 48 bytes plus a minimum 64 byte parameter save + * area. This parameter area is not used by calls to C from interrupt entry, + * so the second from last one of those is used for the frame marker. + */ +#define STACK_FRAME_MIN_SIZE 112 +#define STACK_USER_INT_FRAME_SIZE (sizeof(struct pt_regs) + STACK_FRAME_MIN_SIZE) +#define STACK_INT_FRAME_REGS STACK_FRAME_MIN_SIZE +#define STACK_INT_FRAME_MARKER (STACK_FRAME_MIN_SIZE - 16) +#define STACK_SWITCH_FRAME_SIZE (sizeof(struct pt_regs) + STACK_FRAME_MIN_SIZE) +#define STACK_SWITCH_FRAME_REGS STACK_FRAME_MIN_SIZE #endif /* Size of dummy stack frame allocated when calling signal handler. */ @@ -140,17 +149,22 @@ struct pt_regs #define USER_REDZONE_SIZE 0 #define KERNEL_REDZONE_SIZE 0 -#define STACK_FRAME_OVERHEAD 16 /* size of minimum stack frame */ +#define STACK_FRAME_MIN_SIZE 16 #define STACK_FRAME_LR_SAVE 1 /* Location of LR in stack frame */ -#define STACK_INT_FRAME_SIZE (sizeof(struct pt_regs) + STACK_FRAME_OVERHEAD) -#define STACK_FRAME_MARKER 2 -#define STACK_FRAME_MIN_SIZE STACK_FRAME_OVERHEAD +#define STACK_USER_INT_FRAME_SIZE (sizeof(struct pt_regs) + STACK_FRAME_MIN_SIZE) +#define STACK_INT_FRAME_REGS STACK_FRAME_MIN_SIZE +#define STACK_INT_FRAME_MARKER (STACK_FRAME_MIN_SIZE - 8) +#define STACK_SWITCH_FRAME_SIZE (sizeof(struct pt_regs) + STACK_FRAME_MIN_SIZE) +#define STACK_SWITCH_FRAME_REGS STACK_FRAME_MIN_SIZE /* Size of stack frame allocated when calling signal handler. */ #define __SIGNAL_FRAMESIZE 64 #endif /* __powerpc64__ */ +#define STACK_INT_FRAME_SIZE (KERNEL_REDZONE_SIZE + STACK_USER_INT_FRAME_SIZE) +#define STACK_INT_FRAME_MARKER_LONGS (STACK_INT_FRAME_MARKER/sizeof(long)) + #ifndef __ASSEMBLY__ #include diff --git a/arch/powerpc/include/asm/qspinlock.h b/arch/powerpc/include/asm/qspinlock.h index b676c4fb90fd71706941ea1438792eafca8db229..28a53fb69b381a65e0eee111b75427d5e51b20b3 100644 --- a/arch/powerpc/include/asm/qspinlock.h +++ b/arch/powerpc/include/asm/qspinlock.h @@ -2,83 +2,173 @@ #ifndef _ASM_POWERPC_QSPINLOCK_H #define _ASM_POWERPC_QSPINLOCK_H -#include +#include +#include #include -#define _Q_PENDING_LOOPS (1 << 9) /* not tuned */ +#ifdef CONFIG_PPC64 +/* + * Use the EH=1 hint for accesses that result in the lock being acquired. + * The hardware is supposed to optimise this pattern by holding the lock + * cacheline longer, and releasing when a store to the same memory (the + * unlock) is performed. + */ +#define _Q_SPIN_EH_HINT 1 +#else +#define _Q_SPIN_EH_HINT 0 +#endif -#ifdef CONFIG_PARAVIRT_SPINLOCKS -extern void native_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); -extern void __pv_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); -extern void __pv_queued_spin_unlock(struct qspinlock *lock); +/* + * The trylock itself may steal. This makes trylocks slightly stronger, and + * makes locks slightly more efficient when stealing. + * + * This is compile-time, so if true then there may always be stealers, so the + * nosteal paths become unused. + */ +#define _Q_SPIN_TRY_LOCK_STEAL 1 -static __always_inline void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val) -{ - if (!is_shared_processor()) - native_queued_spin_lock_slowpath(lock, val); - else - __pv_queued_spin_lock_slowpath(lock, val); -} +/* + * Put a speculation barrier after testing the lock/node and finding it + * busy. Try to prevent pointless speculation in slow paths. + * + * Slows down the lockstorm microbenchmark with no stealing, where locking + * is purely FIFO through the queue. May have more benefit in real workload + * where speculating into the wrong place could have a greater cost. + */ +#define _Q_SPIN_SPEC_BARRIER 0 -#define queued_spin_unlock queued_spin_unlock -static inline void queued_spin_unlock(struct qspinlock *lock) -{ - if (!is_shared_processor()) - smp_store_release(&lock->locked, 0); - else - __pv_queued_spin_unlock(lock); -} +#ifdef CONFIG_PPC64 +/* + * Execute a miso instruction after passing the MCS lock ownership to the + * queue head. Miso is intended to make stores visible to other CPUs sooner. + * + * This seems to make the lockstorm microbenchmark nospin test go slightly + * faster on POWER10, but disable for now. + */ +#define _Q_SPIN_MISO 0 +#else +#define _Q_SPIN_MISO 0 +#endif +#ifdef CONFIG_PPC64 +/* + * This executes miso after an unlock of the lock word, having ownership + * pass to the next CPU sooner. This will slow the uncontended path to some + * degree. Not evidence it helps yet. + */ +#define _Q_SPIN_MISO_UNLOCK 0 #else -extern void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); +#define _Q_SPIN_MISO_UNLOCK 0 #endif -static __always_inline void queued_spin_lock(struct qspinlock *lock) +/* + * Seems to slow down lockstorm microbenchmark, suspect queue node just + * has to become shared again right afterwards when its waiter spins on + * the lock field. + */ +#define _Q_SPIN_PREFETCH_NEXT 0 + +static __always_inline int queued_spin_is_locked(struct qspinlock *lock) { - u32 val = 0; + return READ_ONCE(lock->val); +} - if (likely(arch_atomic_try_cmpxchg_lock(&lock->val, &val, _Q_LOCKED_VAL))) - return; +static __always_inline int queued_spin_value_unlocked(struct qspinlock lock) +{ + return !lock.val; +} - queued_spin_lock_slowpath(lock, val); +static __always_inline int queued_spin_is_contended(struct qspinlock *lock) +{ + return !!(READ_ONCE(lock->val) & _Q_TAIL_CPU_MASK); } -#define queued_spin_lock queued_spin_lock -#ifdef CONFIG_PARAVIRT_SPINLOCKS -#define SPIN_THRESHOLD (1<<15) /* not tuned */ +static __always_inline u32 queued_spin_encode_locked_val(void) +{ + /* XXX: make this use lock value in paca like simple spinlocks? */ + return _Q_LOCKED_VAL | (smp_processor_id() << _Q_OWNER_CPU_OFFSET); +} -static __always_inline void pv_wait(u8 *ptr, u8 val) +static __always_inline int __queued_spin_trylock_nosteal(struct qspinlock *lock) { - if (*ptr != val) - return; - yield_to_any(); - /* - * We could pass in a CPU here if waiting in the queue and yield to - * the previous CPU in the queue. - */ + u32 new = queued_spin_encode_locked_val(); + u32 prev; + + /* Trylock succeeds only when unlocked and no queued nodes */ + asm volatile( +"1: lwarx %0,0,%1,%3 # __queued_spin_trylock_nosteal \n" +" cmpwi 0,%0,0 \n" +" bne- 2f \n" +" stwcx. %2,0,%1 \n" +" bne- 1b \n" +"\t" PPC_ACQUIRE_BARRIER " \n" +"2: \n" + : "=&r" (prev) + : "r" (&lock->val), "r" (new), + "i" (_Q_SPIN_EH_HINT) + : "cr0", "memory"); + + return likely(prev == 0); } -static __always_inline void pv_kick(int cpu) +static __always_inline int __queued_spin_trylock_steal(struct qspinlock *lock) { - prod_cpu(cpu); + u32 new = queued_spin_encode_locked_val(); + u32 prev, tmp; + + /* Trylock may get ahead of queued nodes if it finds unlocked */ + asm volatile( +"1: lwarx %0,0,%2,%5 # __queued_spin_trylock_steal \n" +" andc. %1,%0,%4 \n" +" bne- 2f \n" +" and %1,%0,%4 \n" +" or %1,%1,%3 \n" +" stwcx. %1,0,%2 \n" +" bne- 1b \n" +"\t" PPC_ACQUIRE_BARRIER " \n" +"2: \n" + : "=&r" (prev), "=&r" (tmp) + : "r" (&lock->val), "r" (new), "r" (_Q_TAIL_CPU_MASK), + "i" (_Q_SPIN_EH_HINT) + : "cr0", "memory"); + + return likely(!(prev & ~_Q_TAIL_CPU_MASK)); } -extern void __pv_init_lock_hash(void); +static __always_inline int queued_spin_trylock(struct qspinlock *lock) +{ + if (!_Q_SPIN_TRY_LOCK_STEAL) + return __queued_spin_trylock_nosteal(lock); + else + return __queued_spin_trylock_steal(lock); +} -static inline void pv_spinlocks_init(void) +void queued_spin_lock_slowpath(struct qspinlock *lock); + +static __always_inline void queued_spin_lock(struct qspinlock *lock) { - __pv_init_lock_hash(); + if (!queued_spin_trylock(lock)) + queued_spin_lock_slowpath(lock); } -#endif +static inline void queued_spin_unlock(struct qspinlock *lock) +{ + smp_store_release(&lock->locked, 0); + if (_Q_SPIN_MISO_UNLOCK) + asm volatile("miso" ::: "memory"); +} -/* - * Queued spinlocks rely heavily on smp_cond_load_relaxed() to busy-wait, - * which was found to have performance problems if implemented with - * the preferred spin_begin()/spin_end() SMT priority pattern. Use the - * generic version instead. - */ +#define arch_spin_is_locked(l) queued_spin_is_locked(l) +#define arch_spin_is_contended(l) queued_spin_is_contended(l) +#define arch_spin_value_unlocked(l) queued_spin_value_unlocked(l) +#define arch_spin_lock(l) queued_spin_lock(l) +#define arch_spin_trylock(l) queued_spin_trylock(l) +#define arch_spin_unlock(l) queued_spin_unlock(l) -#include +#ifdef CONFIG_PARAVIRT_SPINLOCKS +void pv_spinlocks_init(void); +#else +static inline void pv_spinlocks_init(void) { } +#endif #endif /* _ASM_POWERPC_QSPINLOCK_H */ diff --git a/arch/powerpc/include/asm/qspinlock_paravirt.h b/arch/powerpc/include/asm/qspinlock_paravirt.h deleted file mode 100644 index 6b60e7736a47c303ea23825e4d94e7371e054a5c..0000000000000000000000000000000000000000 --- a/arch/powerpc/include/asm/qspinlock_paravirt.h +++ /dev/null @@ -1,7 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -#ifndef _ASM_POWERPC_QSPINLOCK_PARAVIRT_H -#define _ASM_POWERPC_QSPINLOCK_PARAVIRT_H - -EXPORT_SYMBOL(__pv_queued_spin_unlock); - -#endif /* _ASM_POWERPC_QSPINLOCK_PARAVIRT_H */ diff --git a/arch/powerpc/include/asm/qspinlock_types.h b/arch/powerpc/include/asm/qspinlock_types.h new file mode 100644 index 0000000000000000000000000000000000000000..4766a7aa03cbc30b538137c2956a5d729ba563e8 --- /dev/null +++ b/arch/powerpc/include/asm/qspinlock_types.h @@ -0,0 +1,72 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +#ifndef _ASM_POWERPC_QSPINLOCK_TYPES_H +#define _ASM_POWERPC_QSPINLOCK_TYPES_H + +#include +#include + +typedef struct qspinlock { + union { + u32 val; + +#ifdef __LITTLE_ENDIAN + struct { + u16 locked; + u8 reserved[2]; + }; +#else + struct { + u8 reserved[2]; + u16 locked; + }; +#endif + }; +} arch_spinlock_t; + +#define __ARCH_SPIN_LOCK_UNLOCKED { { .val = 0 } } + +/* + * Bitfields in the lock word: + * + * 0: locked bit + * 1-14: lock holder cpu + * 15: lock owner or queuer vcpus observed to be preempted bit + * 16: must queue bit + * 17-31: tail cpu (+1) + */ +#define _Q_SET_MASK(type) (((1U << _Q_ ## type ## _BITS) - 1)\ + << _Q_ ## type ## _OFFSET) +/* 0x00000001 */ +#define _Q_LOCKED_OFFSET 0 +#define _Q_LOCKED_BITS 1 +#define _Q_LOCKED_VAL (1U << _Q_LOCKED_OFFSET) + +/* 0x00007ffe */ +#define _Q_OWNER_CPU_OFFSET 1 +#define _Q_OWNER_CPU_BITS 14 +#define _Q_OWNER_CPU_MASK _Q_SET_MASK(OWNER_CPU) + +#if CONFIG_NR_CPUS > (1U << _Q_OWNER_CPU_BITS) +#error "qspinlock does not support such large CONFIG_NR_CPUS" +#endif + +/* 0x00008000 */ +#define _Q_SLEEPY_OFFSET 15 +#define _Q_SLEEPY_BITS 1 +#define _Q_SLEEPY_VAL (1U << _Q_SLEEPY_OFFSET) + +/* 0x00010000 */ +#define _Q_MUST_Q_OFFSET 16 +#define _Q_MUST_Q_BITS 1 +#define _Q_MUST_Q_VAL (1U << _Q_MUST_Q_OFFSET) + +/* 0xfffe0000 */ +#define _Q_TAIL_CPU_OFFSET 17 +#define _Q_TAIL_CPU_BITS 15 +#define _Q_TAIL_CPU_MASK _Q_SET_MASK(TAIL_CPU) + +#if CONFIG_NR_CPUS >= (1U << _Q_TAIL_CPU_BITS) +#error "qspinlock does not support such large CONFIG_NR_CPUS" +#endif + +#endif /* _ASM_POWERPC_QSPINLOCK_TYPES_H */ diff --git a/arch/powerpc/include/asm/rtas.h b/arch/powerpc/include/asm/rtas.h index 56319aea646e6a6b0f3fa7a0540f4f9963cd372c..479a95cb27701be8509a0a7b18ff12b2164c8145 100644 --- a/arch/powerpc/include/asm/rtas.h +++ b/arch/powerpc/include/asm/rtas.h @@ -33,21 +33,6 @@ #define RTAS_THREADS_ACTIVE -9005 /* Multiple processor threads active */ #define RTAS_OUTSTANDING_COPROC -9006 /* Outstanding coprocessor operations */ -/* - * In general to call RTAS use rtas_token("string") to lookup - * an RTAS token for the given string (e.g. "event-scan"). - * To actually perform the call use - * ret = rtas_call(token, n_in, n_out, ...) - * Where n_in is the number of input parameters and - * n_out is the number of output parameters - * - * If the "string" is invalid on this system, RTAS_UNKNOWN_SERVICE - * will be returned as a token. rtas_call() does look for this - * token and error out gracefully so rtas_call(rtas_token("str"), ...) - * may be safely used for one-shot calls to RTAS. - * - */ - /* RTAS event classes */ #define RTAS_INTERNAL_ERROR 0x80000000 /* set bit 0 */ #define RTAS_EPOW_WARNING 0x40000000 /* set bit 1 */ diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h index bd75872a6334a4ce21d6d8ce86df80ecd0bf2884..7dafca8e3f02efe53f0030ab021df3909e4dfc29 100644 --- a/arch/powerpc/include/asm/spinlock.h +++ b/arch/powerpc/include/asm/spinlock.h @@ -13,7 +13,7 @@ /* See include/linux/spinlock.h */ #define smp_mb__after_spinlock() smp_mb() -#ifndef CONFIG_PARAVIRT_SPINLOCKS +#ifndef CONFIG_PPC_QUEUED_SPINLOCKS static inline void pv_spinlocks_init(void) { } #endif diff --git a/arch/powerpc/include/asm/spinlock_types.h b/arch/powerpc/include/asm/spinlock_types.h index d5f8a74ed2e8c3619404c283e1f94261afc84b32..40b01446cf755e862b99d0e9cef116089cf2b8b1 100644 --- a/arch/powerpc/include/asm/spinlock_types.h +++ b/arch/powerpc/include/asm/spinlock_types.h @@ -7,7 +7,7 @@ #endif #ifdef CONFIG_PPC_QUEUED_SPINLOCKS -#include +#include #include #else #include diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index 4ce2a4aa3985436e3ff788c3b06223a931f86269..d24a59a98c0c9e88e66814b143a7b8d1fafe4b11 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c @@ -72,7 +72,7 @@ #endif #define STACK_PT_REGS_OFFSET(sym, val) \ - DEFINE(sym, STACK_FRAME_OVERHEAD + offsetof(struct pt_regs, val)) + DEFINE(sym, STACK_INT_FRAME_REGS + offsetof(struct pt_regs, val)) int main(void) { @@ -167,9 +167,8 @@ int main(void) OFFSET(THREAD_CKVRSTATE, thread_struct, ckvr_state.vr); OFFSET(THREAD_CKVRSAVE, thread_struct, ckvrsave); OFFSET(THREAD_CKFPSTATE, thread_struct, ckfp_state.fpr); - /* Local pt_regs on stack for Transactional Memory funcs. */ - DEFINE(TM_FRAME_SIZE, STACK_FRAME_OVERHEAD + - sizeof(struct pt_regs) + 16); + /* Local pt_regs on stack in int frame form, plus 16 bytes for TM */ + DEFINE(TM_FRAME_SIZE, STACK_INT_FRAME_SIZE + 16); #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ OFFSET(TI_LOCAL_FLAGS, thread_info, local_flags); @@ -261,7 +260,7 @@ int main(void) /* Interrupt register frame */ DEFINE(INT_FRAME_SIZE, STACK_INT_FRAME_SIZE); - DEFINE(SWITCH_FRAME_SIZE, STACK_FRAME_WITH_PT_REGS); + DEFINE(SWITCH_FRAME_SIZE, STACK_SWITCH_FRAME_SIZE); STACK_PT_REGS_OFFSET(GPR0, gpr[0]); STACK_PT_REGS_OFFSET(GPR1, gpr[1]); STACK_PT_REGS_OFFSET(GPR2, gpr[2]); @@ -418,21 +417,18 @@ int main(void) /* book3s */ #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE - OFFSET(KVM_TLB_SETS, kvm, arch.tlb_sets); OFFSET(KVM_SDR1, kvm, arch.sdr1); OFFSET(KVM_HOST_LPID, kvm, arch.host_lpid); OFFSET(KVM_HOST_LPCR, kvm, arch.host_lpcr); OFFSET(KVM_HOST_SDR1, kvm, arch.host_sdr1); OFFSET(KVM_ENABLED_HCALLS, kvm, arch.enabled_hcalls); OFFSET(KVM_VRMA_SLB_V, kvm, arch.vrma_slb_v); - OFFSET(KVM_RADIX, kvm, arch.radix); OFFSET(KVM_SECURE_GUEST, kvm, arch.secure_guest); OFFSET(VCPU_DSISR, kvm_vcpu, arch.shregs.dsisr); OFFSET(VCPU_DAR, kvm_vcpu, arch.shregs.dar); OFFSET(VCPU_VPA, kvm_vcpu, arch.vpa.pinned_addr); OFFSET(VCPU_VPA_DIRTY, kvm_vcpu, arch.vpa.dirty); OFFSET(VCPU_HEIR, kvm_vcpu, arch.emul_inst); - OFFSET(VCPU_NESTED, kvm_vcpu, arch.nested); OFFSET(VCPU_CPU, kvm_vcpu, cpu); OFFSET(VCPU_THREAD_CPU, kvm_vcpu, arch.thread_cpu); #endif @@ -449,16 +445,12 @@ int main(void) OFFSET(VCPU_DABRX, kvm_vcpu, arch.dabrx); OFFSET(VCPU_DAWR0, kvm_vcpu, arch.dawr0); OFFSET(VCPU_DAWRX0, kvm_vcpu, arch.dawrx0); - OFFSET(VCPU_DAWR1, kvm_vcpu, arch.dawr1); - OFFSET(VCPU_DAWRX1, kvm_vcpu, arch.dawrx1); OFFSET(VCPU_CIABR, kvm_vcpu, arch.ciabr); OFFSET(VCPU_HFLAGS, kvm_vcpu, arch.hflags); OFFSET(VCPU_DEC_EXPIRES, kvm_vcpu, arch.dec_expires); OFFSET(VCPU_PENDING_EXC, kvm_vcpu, arch.pending_exceptions); OFFSET(VCPU_CEDED, kvm_vcpu, arch.ceded); OFFSET(VCPU_PRODDED, kvm_vcpu, arch.prodded); - OFFSET(VCPU_IRQ_PENDING, kvm_vcpu, arch.irq_pending); - OFFSET(VCPU_DBELL_REQ, kvm_vcpu, arch.doorbell_request); OFFSET(VCPU_MMCR, kvm_vcpu, arch.mmcr); OFFSET(VCPU_MMCRA, kvm_vcpu, arch.mmcra); OFFSET(VCPU_MMCRS, kvm_vcpu, arch.mmcrs); @@ -486,8 +478,6 @@ int main(void) OFFSET(VCPU_TCSCR, kvm_vcpu, arch.tcscr); OFFSET(VCPU_ACOP, kvm_vcpu, arch.acop); OFFSET(VCPU_WORT, kvm_vcpu, arch.wort); - OFFSET(VCPU_TID, kvm_vcpu, arch.tid); - OFFSET(VCPU_PSSCR, kvm_vcpu, arch.psscr); OFFSET(VCPU_HFSCR, kvm_vcpu, arch.hfscr); OFFSET(VCORE_ENTRY_EXIT, kvmppc_vcore, entry_exit_map); OFFSET(VCORE_IN_GUEST, kvmppc_vcore, in_guest); @@ -582,8 +572,6 @@ int main(void) HSTATE_FIELD(HSTATE_HWTHREAD_STATE, hwthread_state); HSTATE_FIELD(HSTATE_KVM_VCPU, kvm_vcpu); HSTATE_FIELD(HSTATE_KVM_VCORE, kvm_vcore); - HSTATE_FIELD(HSTATE_XIVE_TIMA_PHYS, xive_tima_phys); - HSTATE_FIELD(HSTATE_XIVE_TIMA_VIRT, xive_tima_virt); HSTATE_FIELD(HSTATE_HOST_IPI, host_ipi); HSTATE_FIELD(HSTATE_PTID, ptid); HSTATE_FIELD(HSTATE_FAKE_SUSPEND, fake_suspend); @@ -594,9 +582,6 @@ int main(void) HSTATE_FIELD(HSTATE_SDAR, host_mmcr[4]); HSTATE_FIELD(HSTATE_MMCR2, host_mmcr[5]); HSTATE_FIELD(HSTATE_SIER, host_mmcr[6]); - HSTATE_FIELD(HSTATE_MMCR3, host_mmcr[7]); - HSTATE_FIELD(HSTATE_SIER2, host_mmcr[8]); - HSTATE_FIELD(HSTATE_SIER3, host_mmcr[9]); HSTATE_FIELD(HSTATE_PMC1, host_pmc[0]); HSTATE_FIELD(HSTATE_PMC2, host_pmc[1]); HSTATE_FIELD(HSTATE_PMC3, host_pmc[2]); @@ -672,17 +657,6 @@ int main(void) OFFSET(VCPU_HOST_MAS6, kvm_vcpu, arch.host_mas6); #endif -#ifdef CONFIG_KVM_XICS - DEFINE(VCPU_XIVE_SAVED_STATE, offsetof(struct kvm_vcpu, - arch.xive_saved_state)); - DEFINE(VCPU_XIVE_CAM_WORD, offsetof(struct kvm_vcpu, - arch.xive_cam_word)); - DEFINE(VCPU_XIVE_PUSHED, offsetof(struct kvm_vcpu, arch.xive_pushed)); - DEFINE(VCPU_XIVE_ESC_ON, offsetof(struct kvm_vcpu, arch.xive_esc_on)); - DEFINE(VCPU_XIVE_ESC_RADDR, offsetof(struct kvm_vcpu, arch.xive_esc_raddr)); - DEFINE(VCPU_XIVE_ESC_VADDR, offsetof(struct kvm_vcpu, arch.xive_esc_vaddr)); -#endif - #ifdef CONFIG_KVM_EXIT_TIMING OFFSET(VCPU_TIMING_EXIT_TBU, kvm_vcpu, arch.timing_exit.tv32.tbu); OFFSET(VCPU_TIMING_EXIT_TBL, kvm_vcpu, arch.timing_exit.tv32.tbl); diff --git a/arch/powerpc/kernel/cpu_setup_6xx.S b/arch/powerpc/kernel/cpu_setup_6xx.S index f8b5ff64b604f38023f73bbcaecaf0d9dd86906d..f29ce3dd6140f40c026a0d8f67e79858c86dabd9 100644 --- a/arch/powerpc/kernel/cpu_setup_6xx.S +++ b/arch/powerpc/kernel/cpu_setup_6xx.S @@ -4,6 +4,8 @@ * Copyright (C) 2003 Benjamin Herrenschmidt (benh@kernel.crashing.org) */ +#include + #include #include #include @@ -81,7 +83,7 @@ _GLOBAL(__setup_cpu_745x) blr /* Enable caches for 603's, 604, 750 & 7400 */ -setup_common_caches: +SYM_FUNC_START_LOCAL(setup_common_caches) mfspr r11,SPRN_HID0 andi. r0,r11,HID0_DCE ori r11,r11,HID0_ICE|HID0_DCE @@ -95,11 +97,12 @@ setup_common_caches: sync isync blr +SYM_FUNC_END(setup_common_caches) /* 604, 604e, 604ev, ... * Enable superscalar execution & branch history table */ -setup_604_hid0: +SYM_FUNC_START_LOCAL(setup_604_hid0) mfspr r11,SPRN_HID0 ori r11,r11,HID0_SIED|HID0_BHTE ori r8,r11,HID0_BTCD @@ -110,6 +113,7 @@ setup_604_hid0: sync isync blr +SYM_FUNC_END(setup_604_hid0) /* 7400 <= rev 2.7 and 7410 rev = 1.0 suffer from some * erratas we work around here. @@ -125,13 +129,14 @@ setup_604_hid0: * needed once we have applied workaround #5 (though it's * not set by Apple's firmware at least). */ -setup_7400_workarounds: +SYM_FUNC_START_LOCAL(setup_7400_workarounds) mfpvr r3 rlwinm r3,r3,0,20,31 cmpwi 0,r3,0x0207 ble 1f blr -setup_7410_workarounds: +SYM_FUNC_END(setup_7400_workarounds) +SYM_FUNC_START_LOCAL(setup_7410_workarounds) mfpvr r3 rlwinm r3,r3,0,20,31 cmpwi 0,r3,0x0100 @@ -151,6 +156,7 @@ setup_7410_workarounds: sync isync blr +SYM_FUNC_END(setup_7410_workarounds) /* 740/750/7400/7410 * Enable Store Gathering (SGE), Address Broadcast (ABE), @@ -158,7 +164,7 @@ setup_7410_workarounds: * Dynamic Power Management (DPM), Speculative (SPD) * Clear Instruction cache throttling (ICTC) */ -setup_750_7400_hid0: +SYM_FUNC_START_LOCAL(setup_750_7400_hid0) mfspr r11,SPRN_HID0 ori r11,r11,HID0_SGE | HID0_ABE | HID0_BHTE | HID0_BTIC oris r11,r11,HID0_DPM@h @@ -177,12 +183,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_NO_DPM) sync isync blr +SYM_FUNC_END(setup_750_7400_hid0) /* 750cx specific * Looks like we have to disable NAP feature for some PLL settings... * (waiting for confirmation) */ -setup_750cx: +SYM_FUNC_START_LOCAL(setup_750cx) mfspr r10, SPRN_HID1 rlwinm r10,r10,4,28,31 cmpwi cr0,r10,7 @@ -196,11 +203,13 @@ setup_750cx: andc r6,r6,r7 stw r6,CPU_SPEC_FEATURES(r4) blr +SYM_FUNC_END(setup_750cx) /* 750fx specific */ -setup_750fx: +SYM_FUNC_START_LOCAL(setup_750fx) blr +SYM_FUNC_END(setup_750fx) /* MPC 745x * Enable Store Gathering (SGE), Branch Folding (FOLD) @@ -212,7 +221,7 @@ setup_750fx: * Clear Instruction cache throttling (ICTC) * Enable L2 HW prefetch */ -setup_745x_specifics: +SYM_FUNC_START_LOCAL(setup_745x_specifics) /* We check for the presence of an L3 cache setup by * the firmware. If any, we disable NAP capability as * it's known to be bogus on rev 2.1 and earlier @@ -270,6 +279,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_NO_DPM) sync isync blr +SYM_FUNC_END(setup_745x_specifics) /* * Initialize the FPU registers. This is needed to work around an errata diff --git a/arch/powerpc/kernel/cpu_setup_e500.S b/arch/powerpc/kernel/cpu_setup_e500.S index 2ab25161b0adc94deac23e15c587722bc04c4d66..077cfccc3461efdb1028b18adb82230467a4a0a4 100644 --- a/arch/powerpc/kernel/cpu_setup_e500.S +++ b/arch/powerpc/kernel/cpu_setup_e500.S @@ -8,6 +8,8 @@ * Benjamin Herrenschmidt */ +#include + #include #include #include @@ -274,7 +276,7 @@ _GLOBAL(flush_dcache_L1) blr -has_L2_cache: +SYM_FUNC_START_LOCAL(has_L2_cache) /* skip L2 cache on P2040/P2040E as they have no L2 cache */ mfspr r3, SPRN_SVR /* shift right by 8 bits and clear E bit of SVR */ @@ -290,9 +292,10 @@ has_L2_cache: 1: li r3, 0 blr +SYM_FUNC_END(has_L2_cache) /* flush backside L2 cache */ -flush_backside_L2_cache: +SYM_FUNC_START_LOCAL(flush_backside_L2_cache) mflr r10 bl has_L2_cache mtlr r10 @@ -313,6 +316,7 @@ flush_backside_L2_cache: bne 1b 2: blr +SYM_FUNC_END(flush_backside_L2_cache) _GLOBAL(cpu_down_flush_e500v2) mflr r0 diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S index 3fc7c9886bb709be106d1d5ebcb6c63b59301edb..5604c9a1ac22150372c8c952ef3686a1a84d3b7a 100644 --- a/arch/powerpc/kernel/entry_32.S +++ b/arch/powerpc/kernel/entry_32.S @@ -18,6 +18,8 @@ #include #include #include +#include + #include #include #include @@ -74,17 +76,18 @@ _ASM_NOKPROBE_SYMBOL(prepare_transfer_to_handler) #endif /* CONFIG_PPC_BOOK3S_32 || CONFIG_PPC_E500 */ #if defined(CONFIG_PPC_KUEP) && defined(CONFIG_PPC_BOOK3S_32) - .globl __kuep_lock -__kuep_lock: +SYM_FUNC_START(__kuep_lock) lwz r9, THREAD+THSR0(r2) update_user_segments_by_4 r9, r10, r11, r12 blr +SYM_FUNC_END(__kuep_lock) -__kuep_unlock: +SYM_FUNC_START_LOCAL(__kuep_unlock) lwz r9, THREAD+THSR0(r2) rlwinm r9,r9,0,~SR_NX update_user_segments_by_4 r9, r10, r11, r12 blr +SYM_FUNC_END(__kuep_unlock) .macro kuep_lock bl __kuep_lock @@ -114,7 +117,7 @@ transfer_to_syscall: addi r12,r12,STACK_FRAME_REGS_MARKER@l stw r9,_MSR(r1) li r2, INTERRUPT_SYSCALL - stw r12,8(r1) + stw r12,STACK_INT_FRAME_MARKER(r1) stw r2,_TRAP(r1) SAVE_GPR(0, r1) SAVE_GPRS(3, 8, r1) @@ -123,12 +126,12 @@ transfer_to_syscall: kuep_lock /* Calling convention has r3 = regs, r4 = orig r0 */ - addi r3,r1,STACK_FRAME_OVERHEAD + addi r3,r1,STACK_INT_FRAME_REGS mr r4,r0 bl system_call_exception ret_from_syscall: - addi r4,r1,STACK_FRAME_OVERHEAD + addi r4,r1,STACK_INT_FRAME_REGS li r5,0 bl syscall_exit_prepare #ifdef CONFIG_PPC_47x @@ -215,9 +218,9 @@ ret_from_kernel_thread: * in arch/ppc/kernel/process.c */ _GLOBAL(_switch) - stwu r1,-INT_FRAME_SIZE(r1) + stwu r1,-SWITCH_FRAME_SIZE(r1) mflr r0 - stw r0,INT_FRAME_SIZE+4(r1) + stw r0,SWITCH_FRAME_SIZE+4(r1) /* r3-r12 are caller saved -- Cort */ SAVE_NVGPRS(r1) stw r0,_NIP(r1) /* Return to switch caller */ @@ -248,7 +251,7 @@ _GLOBAL(_switch) lwz r4,_NIP(r1) /* Return to _switch caller in new task */ mtlr r4 - addi r1,r1,INT_FRAME_SIZE + addi r1,r1,SWITCH_FRAME_SIZE blr .globl fast_exception_return @@ -293,7 +296,7 @@ _ASM_NOKPROBE_SYMBOL(fast_exception_return) .globl interrupt_return interrupt_return: lwz r4,_MSR(r1) - addi r3,r1,STACK_FRAME_OVERHEAD + addi r3,r1,STACK_INT_FRAME_REGS andi. r0,r4,MSR_PR beq .Lkernel_interrupt_return bl interrupt_exit_user_prepare diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index 3e2e37e6ecabe9a8b81f74641e9130b28019af02..1bf1121e17f1c12853f7afabd7281eb9212d6a1c 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S @@ -14,6 +14,7 @@ * code, and exception/interrupt return code for PowerPC. */ +#include #include #include #include @@ -73,6 +74,7 @@ flush_branch_caches: // Flush the link stack .rept 64 + ANNOTATE_INTRA_FUNCTION_CALL bl .+4 .endr b 1f diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S index 2f68fb2ee4fc3253b16ed28b4da34d6b41bfa233..3f86091e68b3bd4231ec069412c7b5edecb8e60b 100644 --- a/arch/powerpc/kernel/exceptions-64e.S +++ b/arch/powerpc/kernel/exceptions-64e.S @@ -358,7 +358,6 @@ ret_from_mc_except: std r14,PACA_EXMC+EX_R14(r13); \ std r15,PACA_EXMC+EX_R15(r13) - /* Core exception code for all exceptions except TLB misses. */ #define EXCEPTION_COMMON_LVL(n, scratch, excf) \ exc_##n##_common: \ @@ -391,10 +390,11 @@ exc_##n##_common: \ std r10,_CCR(r1); /* store orig CR in stackframe */ \ std r9,GPR1(r1); /* store stack frame back link */ \ std r11,SOFTE(r1); /* and save it to stackframe */ \ - std r12,STACK_FRAME_OVERHEAD-16(r1); /* mark the frame */ \ + std r12,STACK_INT_FRAME_MARKER(r1); /* mark the frame */ \ std r3,_TRAP(r1); /* set trap number */ \ std r0,RESULT(r1); /* clear regs->result */ \ - SAVE_NVGPRS(r1); + SAVE_NVGPRS(r1); \ + SANITIZE_NVGPRS(); /* minimise speculation influence */ #define EXCEPTION_COMMON(n) \ EXCEPTION_COMMON_LVL(n, SPRN_SPRG_GEN_SCRATCH, PACA_EXGEN) @@ -455,7 +455,7 @@ exc_##n##_bad_stack: \ EXCEPTION_COMMON(trapnum) \ ack(r8); \ CHECK_NAPPING(); \ - addi r3,r1,STACK_FRAME_OVERHEAD; \ + addi r3,r1,STACK_INT_FRAME_REGS; \ bl hdlr; \ b interrupt_return @@ -504,7 +504,7 @@ __end_interrupts: EXCEPTION_COMMON_CRIT(0x100) bl special_reg_save CHECK_NAPPING(); - addi r3,r1,STACK_FRAME_OVERHEAD + addi r3,r1,STACK_INT_FRAME_REGS bl unknown_nmi_exception b ret_from_crit_except @@ -515,7 +515,7 @@ __end_interrupts: EXCEPTION_COMMON_MC(0x000) bl special_reg_save CHECK_NAPPING(); - addi r3,r1,STACK_FRAME_OVERHEAD + addi r3,r1,STACK_INT_FRAME_REGS bl machine_check_exception b ret_from_mc_except @@ -570,7 +570,7 @@ __end_interrupts: std r14,_ESR(r1) ld r14,PACA_EXGEN+EX_R14(r13) EXCEPTION_COMMON(0x700) - addi r3,r1,STACK_FRAME_OVERHEAD + addi r3,r1,STACK_INT_FRAME_REGS bl program_check_exception REST_NVGPRS(r1) b interrupt_return @@ -586,7 +586,7 @@ __end_interrupts: beq- 1f bl load_up_fpu b fast_interrupt_return -1: addi r3,r1,STACK_FRAME_OVERHEAD +1: addi r3,r1,STACK_INT_FRAME_REGS bl kernel_fp_unavailable_exception b interrupt_return @@ -606,7 +606,7 @@ BEGIN_FTR_SECTION 1: END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) #endif - addi r3,r1,STACK_FRAME_OVERHEAD + addi r3,r1,STACK_INT_FRAME_REGS bl altivec_unavailable_exception b interrupt_return @@ -616,7 +616,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) BOOKE_INTERRUPT_ALTIVEC_ASSIST, PROLOG_ADDITION_NONE) EXCEPTION_COMMON(0x220) - addi r3,r1,STACK_FRAME_OVERHEAD + addi r3,r1,STACK_INT_FRAME_REGS #ifdef CONFIG_ALTIVEC BEGIN_FTR_SECTION bl altivec_assist_exception @@ -643,7 +643,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) EXCEPTION_COMMON_CRIT(0x9f0) bl special_reg_save CHECK_NAPPING(); - addi r3,r1,STACK_FRAME_OVERHEAD + addi r3,r1,STACK_INT_FRAME_REGS #ifdef CONFIG_BOOKE_WDT bl WatchdogException #else @@ -664,7 +664,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) NORMAL_EXCEPTION_PROLOG(0xf20, BOOKE_INTERRUPT_AP_UNAVAIL, PROLOG_ADDITION_NONE) EXCEPTION_COMMON(0xf20) - addi r3,r1,STACK_FRAME_OVERHEAD + addi r3,r1,STACK_INT_FRAME_REGS bl unknown_exception b interrupt_return @@ -731,7 +731,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) ld r14,PACA_EXCRIT+EX_R14(r13) ld r15,PACA_EXCRIT+EX_R15(r13) EXCEPTION_COMMON_CRIT(0xd00) - addi r3,r1,STACK_FRAME_OVERHEAD + addi r3,r1,STACK_INT_FRAME_REGS bl DebugException REST_NVGPRS(r1) b interrupt_return @@ -802,7 +802,7 @@ kernel_dbg_exc: ld r14,PACA_EXDBG+EX_R14(r13) ld r15,PACA_EXDBG+EX_R15(r13) EXCEPTION_COMMON_DBG(0xd08) - addi r3,r1,STACK_FRAME_OVERHEAD + addi r3,r1,STACK_INT_FRAME_REGS bl DebugException REST_NVGPRS(r1) b interrupt_return @@ -812,7 +812,7 @@ kernel_dbg_exc: PROLOG_ADDITION_NONE) EXCEPTION_COMMON(0x260) CHECK_NAPPING() - addi r3,r1,STACK_FRAME_OVERHEAD + addi r3,r1,STACK_INT_FRAME_REGS /* * XXX: Returning from performance_monitor_exception taken as a * soft-NMI (Linux irqs disabled) may be risky to use interrupt_return @@ -834,7 +834,7 @@ kernel_dbg_exc: EXCEPTION_COMMON_CRIT(0x2a0) bl special_reg_save CHECK_NAPPING(); - addi r3,r1,STACK_FRAME_OVERHEAD + addi r3,r1,STACK_INT_FRAME_REGS bl unknown_nmi_exception b ret_from_crit_except @@ -846,7 +846,7 @@ kernel_dbg_exc: GDBELL_EXCEPTION_PROLOG(0x2c0, BOOKE_INTERRUPT_GUEST_DBELL, PROLOG_ADDITION_NONE) EXCEPTION_COMMON(0x2c0) - addi r3,r1,STACK_FRAME_OVERHEAD + addi r3,r1,STACK_INT_FRAME_REGS bl unknown_exception b interrupt_return @@ -857,7 +857,7 @@ kernel_dbg_exc: EXCEPTION_COMMON_CRIT(0x2e0) bl special_reg_save CHECK_NAPPING(); - addi r3,r1,STACK_FRAME_OVERHEAD + addi r3,r1,STACK_INT_FRAME_REGS bl unknown_nmi_exception b ret_from_crit_except @@ -866,7 +866,7 @@ kernel_dbg_exc: NORMAL_EXCEPTION_PROLOG(0x310, BOOKE_INTERRUPT_HV_SYSCALL, PROLOG_ADDITION_NONE) EXCEPTION_COMMON(0x310) - addi r3,r1,STACK_FRAME_OVERHEAD + addi r3,r1,STACK_INT_FRAME_REGS bl unknown_exception b interrupt_return @@ -875,7 +875,7 @@ kernel_dbg_exc: NORMAL_EXCEPTION_PROLOG(0x320, BOOKE_INTERRUPT_HV_PRIV, PROLOG_ADDITION_NONE) EXCEPTION_COMMON(0x320) - addi r3,r1,STACK_FRAME_OVERHEAD + addi r3,r1,STACK_INT_FRAME_REGS bl unknown_exception b interrupt_return @@ -884,7 +884,7 @@ kernel_dbg_exc: NORMAL_EXCEPTION_PROLOG(0x340, BOOKE_INTERRUPT_LRAT_ERROR, PROLOG_ADDITION_NONE) EXCEPTION_COMMON(0x340) - addi r3,r1,STACK_FRAME_OVERHEAD + addi r3,r1,STACK_INT_FRAME_REGS bl unknown_exception b interrupt_return @@ -979,7 +979,7 @@ masked_interrupt_book3e_0x2c0: * original values stashed away in the PACA */ storage_fault_common: - addi r3,r1,STACK_FRAME_OVERHEAD + addi r3,r1,STACK_INT_FRAME_REGS bl do_page_fault b interrupt_return @@ -988,7 +988,7 @@ storage_fault_common: * continues here. */ alignment_more: - addi r3,r1,STACK_FRAME_OVERHEAD + addi r3,r1,STACK_INT_FRAME_REGS bl alignment_exception REST_NVGPRS(r1) b interrupt_return @@ -1069,7 +1069,7 @@ bad_stack_book3e: ZEROIZE_GPR(12) std r12,0(r11) LOAD_PACA_TOC() -1: addi r3,r1,STACK_FRAME_OVERHEAD +1: addi r3,r1,STACK_INT_FRAME_REGS bl kernel_bad_stack b 1b diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index 651c36b056bde3f444ed93e472ff9d507563a085..6441a1ba57ace5b552ca407806ad13c3a1ed6aac 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S @@ -13,6 +13,7 @@ * */ +#include #include #include #include @@ -111,6 +112,7 @@ name: #define ISTACK .L_ISTACK_\name\() /* Set regular kernel stack */ #define __ISTACK(name) .L_ISTACK_ ## name #define IKUAP .L_IKUAP_\name\() /* Do KUAP lock */ +#define IMSR_R12 .L_IMSR_R12_\name\() /* Assumes MSR saved to r12 */ #define INT_DEFINE_BEGIN(n) \ .macro int_define_ ## n name @@ -176,6 +178,9 @@ do_define_int n .ifndef IKUAP IKUAP=1 .endif + .ifndef IMSR_R12 + IMSR_R12=0 + .endif .endm /* @@ -502,6 +507,7 @@ DEFINE_FIXED_SYMBOL(\name\()_common_real, text) std r10,0(r1) /* make stack chain pointer */ std r0,GPR0(r1) /* save r0 in stackframe */ std r10,GPR1(r1) /* save r1 in stackframe */ + SANITIZE_GPR(0) /* Mark our [H]SRRs valid for return */ li r10,1 @@ -544,8 +550,14 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) std r9,GPR11(r1) std r10,GPR12(r1) std r11,GPR13(r1) + .if !IMSR_R12 + SANITIZE_GPRS(9, 12) + .else + SANITIZE_GPRS(9, 11) + .endif SAVE_NVGPRS(r1) + SANITIZE_NVGPRS() .if IDAR .if IISIDE @@ -577,8 +589,8 @@ BEGIN_FTR_SECTION END_FTR_SECTION_IFSET(CPU_FTR_CFAR) ld r10,IAREA+EX_CTR(r13) std r10,_CTR(r1) - std r2,GPR2(r1) /* save r2 in stackframe */ - SAVE_GPRS(3, 8, r1) /* save r3 - r8 in stackframe */ + SAVE_GPRS(2, 8, r1) /* save r2 - r8 in stackframe */ + SANITIZE_GPRS(2, 8) mflr r9 /* Get LR, later save to stack */ LOAD_PACA_TOC() /* get kernel TOC into r2 */ std r9,_LINK(r1) @@ -591,7 +603,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_CFAR) li r10,0 LOAD_REG_IMMEDIATE(r11, STACK_FRAME_REGS_MARKER) std r10,RESULT(r1) /* clear regs->result */ - std r11,STACK_FRAME_OVERHEAD-16(r1) /* mark the frame */ + std r11,STACK_INT_FRAME_MARKER(r1) /* mark the frame */ .endm /* @@ -696,6 +708,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_CFAR) mtlr r9 ld r9,_CCR(r1) mtcr r9 + SANITIZE_RESTORE_NVGPRS() REST_GPRS(2, 13, r1) REST_GPR(0, r1) /* restore original r1. */ @@ -1061,7 +1074,7 @@ EXC_COMMON_BEGIN(system_reset_common) subi r1,r1,INT_FRAME_SIZE __GEN_COMMON_BODY system_reset - addi r3,r1,STACK_FRAME_OVERHEAD + addi r3,r1,STACK_INT_FRAME_REGS bl system_reset_exception /* Clear MSR_RI before setting SRR0 and SRR1. */ @@ -1208,7 +1221,7 @@ EXC_COMMON_BEGIN(machine_check_early_common) BEGIN_FTR_SECTION bl enable_machine_check END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) - addi r3,r1,STACK_FRAME_OVERHEAD + addi r3,r1,STACK_INT_FRAME_REGS BEGIN_FTR_SECTION bl machine_check_early_boot END_FTR_SECTION(0, 1) // nop out after boot @@ -1298,7 +1311,7 @@ EXC_COMMON_BEGIN(machine_check_common) * save area: PACA_EXMC instead of PACA_EXGEN. */ GEN_COMMON machine_check - addi r3,r1,STACK_FRAME_OVERHEAD + addi r3,r1,STACK_INT_FRAME_REGS bl machine_check_exception_async b interrupt_return_srr @@ -1364,14 +1377,14 @@ END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) * This is the NMI version of the handler because we are called from * the early handler which is a true NMI. */ - addi r3,r1,STACK_FRAME_OVERHEAD + addi r3,r1,STACK_INT_FRAME_REGS bl machine_check_exception /* * We will not reach here. Even if we did, there is no way out. * Call unrecoverable_exception and die. */ - addi r3,r1,STACK_FRAME_OVERHEAD + addi r3,r1,STACK_INT_FRAME_REGS bl unrecoverable_exception b . @@ -1422,7 +1435,7 @@ EXC_VIRT_END(data_access, 0x4300, 0x80) EXC_COMMON_BEGIN(data_access_common) GEN_COMMON data_access ld r4,_DSISR(r1) - addi r3,r1,STACK_FRAME_OVERHEAD + addi r3,r1,STACK_INT_FRAME_REGS andis. r0,r4,DSISR_DABRMATCH@h bne- 1f #ifdef CONFIG_PPC_64S_HASH_MMU @@ -1441,7 +1454,7 @@ ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX) * do_break() may have changed the NV GPRS while handling a breakpoint. * If so, we need to restore them with their updated values. */ - REST_NVGPRS(r1) + HANDLER_RESTORE_NVGPRS() b interrupt_return_srr @@ -1479,7 +1492,7 @@ EXC_COMMON_BEGIN(data_access_slb_common) #ifdef CONFIG_PPC_64S_HASH_MMU BEGIN_MMU_FTR_SECTION /* HPT case, do SLB fault */ - addi r3,r1,STACK_FRAME_OVERHEAD + addi r3,r1,STACK_INT_FRAME_REGS bl do_slb_fault cmpdi r3,0 bne- 1f @@ -1493,7 +1506,7 @@ ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX) li r3,-EFAULT #endif std r3,RESULT(r1) - addi r3,r1,STACK_FRAME_OVERHEAD + addi r3,r1,STACK_INT_FRAME_REGS bl do_bad_segment_interrupt b interrupt_return_srr @@ -1525,7 +1538,7 @@ EXC_VIRT_BEGIN(instruction_access, 0x4400, 0x80) EXC_VIRT_END(instruction_access, 0x4400, 0x80) EXC_COMMON_BEGIN(instruction_access_common) GEN_COMMON instruction_access - addi r3,r1,STACK_FRAME_OVERHEAD + addi r3,r1,STACK_INT_FRAME_REGS #ifdef CONFIG_PPC_64S_HASH_MMU BEGIN_MMU_FTR_SECTION bl do_hash_fault @@ -1567,7 +1580,7 @@ EXC_COMMON_BEGIN(instruction_access_slb_common) #ifdef CONFIG_PPC_64S_HASH_MMU BEGIN_MMU_FTR_SECTION /* HPT case, do SLB fault */ - addi r3,r1,STACK_FRAME_OVERHEAD + addi r3,r1,STACK_INT_FRAME_REGS bl do_slb_fault cmpdi r3,0 bne- 1f @@ -1581,7 +1594,7 @@ ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX) li r3,-EFAULT #endif std r3,RESULT(r1) - addi r3,r1,STACK_FRAME_OVERHEAD + addi r3,r1,STACK_INT_FRAME_REGS bl do_bad_segment_interrupt b interrupt_return_srr @@ -1635,7 +1648,7 @@ EXC_VIRT_BEGIN(hardware_interrupt, 0x4500, 0x100) EXC_VIRT_END(hardware_interrupt, 0x4500, 0x100) EXC_COMMON_BEGIN(hardware_interrupt_common) GEN_COMMON hardware_interrupt - addi r3,r1,STACK_FRAME_OVERHEAD + addi r3,r1,STACK_INT_FRAME_REGS bl do_IRQ BEGIN_FTR_SECTION b interrupt_return_hsrr @@ -1665,9 +1678,9 @@ EXC_VIRT_BEGIN(alignment, 0x4600, 0x100) EXC_VIRT_END(alignment, 0x4600, 0x100) EXC_COMMON_BEGIN(alignment_common) GEN_COMMON alignment - addi r3,r1,STACK_FRAME_OVERHEAD + addi r3,r1,STACK_INT_FRAME_REGS bl alignment_exception - REST_NVGPRS(r1) /* instruction emulation may change GPRs */ + HANDLER_RESTORE_NVGPRS() /* instruction emulation may change GPRs */ b interrupt_return_srr @@ -1731,9 +1744,9 @@ EXC_COMMON_BEGIN(program_check_common) __GEN_COMMON_BODY program_check .Ldo_program_check: - addi r3,r1,STACK_FRAME_OVERHEAD + addi r3,r1,STACK_INT_FRAME_REGS bl program_check_exception - REST_NVGPRS(r1) /* instruction emulation may change GPRs */ + HANDLER_RESTORE_NVGPRS() /* instruction emulation may change GPRs */ b interrupt_return_srr @@ -1751,6 +1764,7 @@ INT_DEFINE_BEGIN(fp_unavailable) #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE IKVM_REAL=1 #endif + IMSR_R12=1 INT_DEFINE_END(fp_unavailable) EXC_REAL_BEGIN(fp_unavailable, 0x800, 0x100) @@ -1762,7 +1776,7 @@ EXC_VIRT_END(fp_unavailable, 0x4800, 0x100) EXC_COMMON_BEGIN(fp_unavailable_common) GEN_COMMON fp_unavailable bne 1f /* if from user, just load it up */ - addi r3,r1,STACK_FRAME_OVERHEAD + addi r3,r1,STACK_INT_FRAME_REGS bl kernel_fp_unavailable_exception 0: trap EMIT_BUG_ENTRY 0b, __FILE__, __LINE__, 0 @@ -1780,7 +1794,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_TM) b fast_interrupt_return_srr #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 2: /* User process was in a transaction */ - addi r3,r1,STACK_FRAME_OVERHEAD + addi r3,r1,STACK_INT_FRAME_REGS bl fp_unavailable_tm b interrupt_return_srr #endif @@ -1824,7 +1838,7 @@ EXC_VIRT_BEGIN(decrementer, 0x4900, 0x80) EXC_VIRT_END(decrementer, 0x4900, 0x80) EXC_COMMON_BEGIN(decrementer_common) GEN_COMMON decrementer - addi r3,r1,STACK_FRAME_OVERHEAD + addi r3,r1,STACK_INT_FRAME_REGS bl timer_interrupt b interrupt_return_srr @@ -1909,7 +1923,7 @@ EXC_VIRT_BEGIN(doorbell_super, 0x4a00, 0x100) EXC_VIRT_END(doorbell_super, 0x4a00, 0x100) EXC_COMMON_BEGIN(doorbell_super_common) GEN_COMMON doorbell_super - addi r3,r1,STACK_FRAME_OVERHEAD + addi r3,r1,STACK_INT_FRAME_REGS #ifdef CONFIG_PPC_DOORBELL bl doorbell_exception #else @@ -2076,7 +2090,7 @@ EXC_VIRT_BEGIN(single_step, 0x4d00, 0x100) EXC_VIRT_END(single_step, 0x4d00, 0x100) EXC_COMMON_BEGIN(single_step_common) GEN_COMMON single_step - addi r3,r1,STACK_FRAME_OVERHEAD + addi r3,r1,STACK_INT_FRAME_REGS bl single_step_exception b interrupt_return_srr @@ -2110,7 +2124,7 @@ EXC_VIRT_BEGIN(h_data_storage, 0x4e00, 0x20) EXC_VIRT_END(h_data_storage, 0x4e00, 0x20) EXC_COMMON_BEGIN(h_data_storage_common) GEN_COMMON h_data_storage - addi r3,r1,STACK_FRAME_OVERHEAD + addi r3,r1,STACK_INT_FRAME_REGS BEGIN_MMU_FTR_SECTION bl do_bad_page_fault_segv MMU_FTR_SECTION_ELSE @@ -2139,7 +2153,7 @@ EXC_VIRT_BEGIN(h_instr_storage, 0x4e20, 0x20) EXC_VIRT_END(h_instr_storage, 0x4e20, 0x20) EXC_COMMON_BEGIN(h_instr_storage_common) GEN_COMMON h_instr_storage - addi r3,r1,STACK_FRAME_OVERHEAD + addi r3,r1,STACK_INT_FRAME_REGS bl unknown_exception b interrupt_return_hsrr @@ -2162,9 +2176,9 @@ EXC_VIRT_BEGIN(emulation_assist, 0x4e40, 0x20) EXC_VIRT_END(emulation_assist, 0x4e40, 0x20) EXC_COMMON_BEGIN(emulation_assist_common) GEN_COMMON emulation_assist - addi r3,r1,STACK_FRAME_OVERHEAD + addi r3,r1,STACK_INT_FRAME_REGS bl emulation_assist_interrupt - REST_NVGPRS(r1) /* instruction emulation may change GPRs */ + HANDLER_RESTORE_NVGPRS() /* instruction emulation may change GPRs */ b interrupt_return_hsrr @@ -2222,7 +2236,7 @@ EXC_COMMON_BEGIN(hmi_exception_early_common) __GEN_COMMON_BODY hmi_exception_early - addi r3,r1,STACK_FRAME_OVERHEAD + addi r3,r1,STACK_INT_FRAME_REGS bl hmi_exception_realmode cmpdi cr0,r3,0 bne 1f @@ -2240,7 +2254,7 @@ EXC_COMMON_BEGIN(hmi_exception_early_common) EXC_COMMON_BEGIN(hmi_exception_common) GEN_COMMON hmi_exception - addi r3,r1,STACK_FRAME_OVERHEAD + addi r3,r1,STACK_INT_FRAME_REGS bl handle_hmi_exception b interrupt_return_hsrr @@ -2274,7 +2288,7 @@ EXC_VIRT_BEGIN(h_doorbell, 0x4e80, 0x20) EXC_VIRT_END(h_doorbell, 0x4e80, 0x20) EXC_COMMON_BEGIN(h_doorbell_common) GEN_COMMON h_doorbell - addi r3,r1,STACK_FRAME_OVERHEAD + addi r3,r1,STACK_INT_FRAME_REGS #ifdef CONFIG_PPC_DOORBELL bl doorbell_exception #else @@ -2310,7 +2324,7 @@ EXC_VIRT_BEGIN(h_virt_irq, 0x4ea0, 0x20) EXC_VIRT_END(h_virt_irq, 0x4ea0, 0x20) EXC_COMMON_BEGIN(h_virt_irq_common) GEN_COMMON h_virt_irq - addi r3,r1,STACK_FRAME_OVERHEAD + addi r3,r1,STACK_INT_FRAME_REGS bl do_IRQ b interrupt_return_hsrr @@ -2356,7 +2370,7 @@ EXC_VIRT_BEGIN(performance_monitor, 0x4f00, 0x20) EXC_VIRT_END(performance_monitor, 0x4f00, 0x20) EXC_COMMON_BEGIN(performance_monitor_common) GEN_COMMON performance_monitor - addi r3,r1,STACK_FRAME_OVERHEAD + addi r3,r1,STACK_INT_FRAME_REGS lbz r4,PACAIRQSOFTMASK(r13) cmpdi r4,IRQS_ENABLED bne 1f @@ -2384,6 +2398,7 @@ INT_DEFINE_BEGIN(altivec_unavailable) #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE IKVM_REAL=1 #endif + IMSR_R12=1 INT_DEFINE_END(altivec_unavailable) EXC_REAL_BEGIN(altivec_unavailable, 0xf20, 0x20) @@ -2410,14 +2425,14 @@ BEGIN_FTR_SECTION b fast_interrupt_return_srr #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 2: /* User process was in a transaction */ - addi r3,r1,STACK_FRAME_OVERHEAD + addi r3,r1,STACK_INT_FRAME_REGS bl altivec_unavailable_tm b interrupt_return_srr #endif 1: END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) #endif - addi r3,r1,STACK_FRAME_OVERHEAD + addi r3,r1,STACK_INT_FRAME_REGS bl altivec_unavailable_exception b interrupt_return_srr @@ -2433,6 +2448,7 @@ INT_DEFINE_BEGIN(vsx_unavailable) #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE IKVM_REAL=1 #endif + IMSR_R12=1 INT_DEFINE_END(vsx_unavailable) EXC_REAL_BEGIN(vsx_unavailable, 0xf40, 0x20) @@ -2458,14 +2474,14 @@ BEGIN_FTR_SECTION b load_up_vsx #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 2: /* User process was in a transaction */ - addi r3,r1,STACK_FRAME_OVERHEAD + addi r3,r1,STACK_INT_FRAME_REGS bl vsx_unavailable_tm b interrupt_return_srr #endif 1: END_FTR_SECTION_IFSET(CPU_FTR_VSX) #endif - addi r3,r1,STACK_FRAME_OVERHEAD + addi r3,r1,STACK_INT_FRAME_REGS bl vsx_unavailable_exception b interrupt_return_srr @@ -2492,9 +2508,9 @@ EXC_VIRT_BEGIN(facility_unavailable, 0x4f60, 0x20) EXC_VIRT_END(facility_unavailable, 0x4f60, 0x20) EXC_COMMON_BEGIN(facility_unavailable_common) GEN_COMMON facility_unavailable - addi r3,r1,STACK_FRAME_OVERHEAD + addi r3,r1,STACK_INT_FRAME_REGS bl facility_unavailable_exception - REST_NVGPRS(r1) /* instruction emulation may change GPRs */ + HANDLER_RESTORE_NVGPRS() /* instruction emulation may change GPRs */ b interrupt_return_srr @@ -2520,9 +2536,10 @@ EXC_VIRT_BEGIN(h_facility_unavailable, 0x4f80, 0x20) EXC_VIRT_END(h_facility_unavailable, 0x4f80, 0x20) EXC_COMMON_BEGIN(h_facility_unavailable_common) GEN_COMMON h_facility_unavailable - addi r3,r1,STACK_FRAME_OVERHEAD + addi r3,r1,STACK_INT_FRAME_REGS bl facility_unavailable_exception - REST_NVGPRS(r1) /* XXX Shouldn't be necessary in practice */ + /* XXX Shouldn't be necessary in practice */ + HANDLER_RESTORE_NVGPRS() b interrupt_return_hsrr @@ -2550,7 +2567,7 @@ EXC_REAL_END(cbe_system_error, 0x1200, 0x100) EXC_VIRT_NONE(0x5200, 0x100) EXC_COMMON_BEGIN(cbe_system_error_common) GEN_COMMON cbe_system_error - addi r3,r1,STACK_FRAME_OVERHEAD + addi r3,r1,STACK_INT_FRAME_REGS bl cbe_system_error_exception b interrupt_return_hsrr @@ -2581,7 +2598,7 @@ EXC_VIRT_BEGIN(instruction_breakpoint, 0x5300, 0x100) EXC_VIRT_END(instruction_breakpoint, 0x5300, 0x100) EXC_COMMON_BEGIN(instruction_breakpoint_common) GEN_COMMON instruction_breakpoint - addi r3,r1,STACK_FRAME_OVERHEAD + addi r3,r1,STACK_INT_FRAME_REGS bl instruction_breakpoint_exception b interrupt_return_srr @@ -2703,7 +2720,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_CFAR) EXC_COMMON_BEGIN(denorm_exception_common) GEN_COMMON denorm_exception - addi r3,r1,STACK_FRAME_OVERHEAD + addi r3,r1,STACK_INT_FRAME_REGS bl unknown_exception b interrupt_return_hsrr @@ -2720,7 +2737,7 @@ EXC_REAL_END(cbe_maintenance, 0x1600, 0x100) EXC_VIRT_NONE(0x5600, 0x100) EXC_COMMON_BEGIN(cbe_maintenance_common) GEN_COMMON cbe_maintenance - addi r3,r1,STACK_FRAME_OVERHEAD + addi r3,r1,STACK_INT_FRAME_REGS bl cbe_maintenance_exception b interrupt_return_hsrr @@ -2745,10 +2762,10 @@ EXC_VIRT_BEGIN(altivec_assist, 0x5700, 0x100) EXC_VIRT_END(altivec_assist, 0x5700, 0x100) EXC_COMMON_BEGIN(altivec_assist_common) GEN_COMMON altivec_assist - addi r3,r1,STACK_FRAME_OVERHEAD + addi r3,r1,STACK_INT_FRAME_REGS #ifdef CONFIG_ALTIVEC bl altivec_assist_exception - REST_NVGPRS(r1) /* instruction emulation may change GPRs */ + HANDLER_RESTORE_NVGPRS() /* instruction emulation may change GPRs */ #else bl unknown_exception #endif @@ -2767,7 +2784,7 @@ EXC_REAL_END(cbe_thermal, 0x1800, 0x100) EXC_VIRT_NONE(0x5800, 0x100) EXC_COMMON_BEGIN(cbe_thermal_common) GEN_COMMON cbe_thermal - addi r3,r1,STACK_FRAME_OVERHEAD + addi r3,r1,STACK_INT_FRAME_REGS bl cbe_thermal_exception b interrupt_return_hsrr @@ -2800,7 +2817,7 @@ EXC_COMMON_BEGIN(soft_nmi_common) subi r1,r1,INT_FRAME_SIZE __GEN_COMMON_BODY soft_nmi - addi r3,r1,STACK_FRAME_OVERHEAD + addi r3,r1,STACK_INT_FRAME_REGS bl soft_nmi_interrupt /* Clear MSR_RI before setting SRR0 and SRR1. */ @@ -3124,7 +3141,7 @@ _GLOBAL(enable_machine_check) blr /* MSR[RI] should be clear because this uses SRR[01] */ -disable_machine_check: +SYM_FUNC_START_LOCAL(disable_machine_check) mflr r0 bcl 20,31,$+4 0: mflr r3 @@ -3137,3 +3154,4 @@ disable_machine_check: RFI_TO_KERNEL 1: mtlr r0 blr +SYM_FUNC_END(disable_machine_check) diff --git a/arch/powerpc/kernel/head_32.h b/arch/powerpc/kernel/head_32.h index c3286260a7d1c280213cece8a4f47a49f0ac42da..f8e2911478a79949103a9378c10f8cba8f91461f 100644 --- a/arch/powerpc/kernel/head_32.h +++ b/arch/powerpc/kernel/head_32.h @@ -112,7 +112,7 @@ _ASM_NOKPROBE_SYMBOL(\name\()_virt) stw r0,GPR0(r1) lis r10,STACK_FRAME_REGS_MARKER@ha /* exception frame marker */ addi r10,r10,STACK_FRAME_REGS_MARKER@l - stw r10,8(r1) + stw r10,STACK_INT_FRAME_MARKER(r1) li r10, \trapno stw r10,_TRAP(r1) SAVE_GPRS(3, 8, r1) @@ -127,7 +127,7 @@ _ASM_NOKPROBE_SYMBOL(\name\()_virt) mfspr r10,SPRN_XER addi r2, r2, -THREAD stw r10,_XER(r1) - addi r3,r1,STACK_FRAME_OVERHEAD + addi r3,r1,STACK_INT_FRAME_REGS .endm .macro prepare_transfer_to_handler diff --git a/arch/powerpc/kernel/head_40x.S b/arch/powerpc/kernel/head_40x.S index 088f500896c78cba923446f9b6a5704e47dde30f..3f68a162464620be3f3f2dcabe2c06dcb6f762e7 100644 --- a/arch/powerpc/kernel/head_40x.S +++ b/arch/powerpc/kernel/head_40x.S @@ -28,6 +28,8 @@ #include #include #include +#include + #include #include #include @@ -602,7 +604,7 @@ start_here: lis r1,init_thread_union@ha addi r1,r1,init_thread_union@l li r0,0 - stwu r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1) + stwu r0,THREAD_SIZE-STACK_FRAME_MIN_SIZE(r1) bl early_init /* We have to do this with MMU on */ @@ -662,7 +664,7 @@ start_here: * kernel initialization. This maps the first 32 MBytes of memory 1:1 * virtual to physical and more importantly sets the cache mode. */ -initial_mmu: +SYM_FUNC_START_LOCAL(initial_mmu) tlbia /* Invalidate all TLB entries */ isync @@ -711,6 +713,7 @@ initial_mmu: mtspr SPRN_EVPR,r0 blr +SYM_FUNC_END(initial_mmu) _GLOBAL(abort) mfspr r13,SPRN_DBCR0 diff --git a/arch/powerpc/kernel/head_44x.S b/arch/powerpc/kernel/head_44x.S index f15cb9fdb692f18522ae27a5ea9da8c017b27f4c..63a85c16fef460b81dbacf7294b33945118c9aa7 100644 --- a/arch/powerpc/kernel/head_44x.S +++ b/arch/powerpc/kernel/head_44x.S @@ -109,7 +109,7 @@ _GLOBAL(_start); lis r1,init_thread_union@h ori r1,r1,init_thread_union@l li r0,0 - stwu r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1) + stwu r0,THREAD_SIZE-STACK_FRAME_MIN_SIZE(r1) bl early_init @@ -1012,7 +1012,7 @@ _GLOBAL(start_secondary_47x) */ lis r1,temp_boot_stack@h ori r1,r1,temp_boot_stack@l - addi r1,r1,1024-STACK_FRAME_OVERHEAD + addi r1,r1,1024-STACK_FRAME_MIN_SIZE li r0,0 stw r0,0(r1) bl mmu_init_secondary @@ -1025,7 +1025,7 @@ _GLOBAL(start_secondary_47x) lwz r1,TASK_STACK(r2) /* Current stack pointer */ - addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD + addi r1,r1,THREAD_SIZE-STACK_FRAME_MIN_SIZE li r0,0 stw r0,0(r1) diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S index dedcc6fe2263a1d75159a12f5cd2f17ea384be15..7558ba4eb86429e4df0eab1c14c4f5b27bb461ae 100644 --- a/arch/powerpc/kernel/head_64.S +++ b/arch/powerpc/kernel/head_64.S @@ -18,6 +18,7 @@ * variants. */ +#include #include #include #include @@ -424,7 +425,7 @@ generic_secondary_common_init: /* Create a temp kernel stack for use before relocation is on. */ ld r1,PACAEMERGSP(r13) - subi r1,r1,STACK_FRAME_OVERHEAD + subi r1,r1,STACK_FRAME_MIN_SIZE /* See if we need to call a cpu state restore handler */ LOAD_REG_ADDR(r23, cur_cpu_spec) @@ -462,7 +463,7 @@ generic_secondary_common_init: * Assumes we're mapped EA == RA if the MMU is on. */ #ifdef CONFIG_PPC_BOOK3S -__mmu_off: +SYM_FUNC_START_LOCAL(__mmu_off) mfmsr r3 andi. r0,r3,MSR_IR|MSR_DR beqlr @@ -473,6 +474,7 @@ __mmu_off: sync rfid b . /* prevent speculative execution */ +SYM_FUNC_END(__mmu_off) #endif @@ -780,7 +782,7 @@ _GLOBAL(pmac_secondary_start) /* Create a temp kernel stack for use before relocation is on. */ ld r1,PACAEMERGSP(r13) - subi r1,r1,STACK_FRAME_OVERHEAD + subi r1,r1,STACK_FRAME_MIN_SIZE b __secondary_start @@ -869,7 +871,7 @@ _GLOBAL(start_secondary_resume) /* * This subroutine clobbers r11 and r12 */ -enable_64b_mode: +SYM_FUNC_START_LOCAL(enable_64b_mode) mfmsr r11 /* grab the current MSR */ #ifdef CONFIG_PPC_BOOK3E_64 oris r11,r11,0x8000 /* CM bit set, we'll set ICM later */ @@ -881,6 +883,7 @@ enable_64b_mode: isync #endif blr +SYM_FUNC_END(enable_64b_mode) /* * This puts the TOC pointer into r2, offset by 0x8000 (as expected @@ -958,7 +961,7 @@ start_here_multiplatform: LOAD_REG_IMMEDIATE(r1,THREAD_SIZE) add r1,r3,r1 li r0,0 - stdu r0,-STACK_FRAME_OVERHEAD(r1) + stdu r0,-STACK_FRAME_MIN_SIZE(r1) /* * Do very early kernel initializations, including initial hash table diff --git a/arch/powerpc/kernel/head_85xx.S b/arch/powerpc/kernel/head_85xx.S index 52c0ab416326a8ee543b91292f9acf52d4801c7d..d438ca74e96cc9756735a3d934fedc96c3f3aa8c 100644 --- a/arch/powerpc/kernel/head_85xx.S +++ b/arch/powerpc/kernel/head_85xx.S @@ -29,6 +29,8 @@ #include #include #include +#include + #include #include #include @@ -229,7 +231,7 @@ set_ivor: lis r1,init_thread_union@h ori r1,r1,init_thread_union@l li r0,0 - stwu r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1) + stwu r0,THREAD_SIZE-STACK_FRAME_MIN_SIZE(r1) #ifdef CONFIG_SMP stw r24, TASK_CPU(r2) @@ -885,7 +887,7 @@ KernelSPE: * Translate the effec addr in r3 to phys addr. The phys addr will be put * into r3(higher 32bit) and r4(lower 32bit) */ -get_phys_addr: +SYM_FUNC_START_LOCAL(get_phys_addr) mfmsr r8 mfspr r9,SPRN_PID rlwinm r9,r9,16,0x3fff0000 /* turn PID into MAS6[SPID] */ @@ -907,6 +909,7 @@ get_phys_addr: mfspr r3,SPRN_MAS7 #endif blr +SYM_FUNC_END(get_phys_addr) /* * Global functions @@ -972,10 +975,10 @@ _GLOBAL(__giveup_spe) li r4,THREAD_ACC evstddx evr6, r4, r3 /* save off accumulator */ beq 1f - lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5) + lwz r4,_MSR-STACK_INT_FRAME_REGS(r5) lis r3,MSR_SPE@h andc r4,r4,r3 /* disable SPE for previous task */ - stw r4,_MSR-STACK_FRAME_OVERHEAD(r5) + stw r4,_MSR-STACK_INT_FRAME_REGS(r5) 1: blr #endif /* CONFIG_SPE */ @@ -1044,7 +1047,7 @@ __secondary_start: lwz r1,TASK_STACK(r2) /* stack */ - addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD + addi r1,r1,THREAD_SIZE-STACK_FRAME_MIN_SIZE li r0,0 stw r0,0(r1) diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S index 0b05f2be66b9fefcd00539cbd35e7ab0466f5188..a79751e05781da40427fc270255ad37e373cef19 100644 --- a/arch/powerpc/kernel/head_8xx.S +++ b/arch/powerpc/kernel/head_8xx.S @@ -18,6 +18,8 @@ #include #include #include +#include + #include #include #include @@ -537,7 +539,7 @@ start_here: ori r0, r0, STACK_END_MAGIC@l stw r0, 0(r1) li r0,0 - stwu r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1) + stwu r0,THREAD_SIZE-STACK_FRAME_MIN_SIZE(r1) lis r6, swapper_pg_dir@ha tophys(r6,r6) @@ -625,7 +627,7 @@ start_here: * 24 Mbytes of data, and the 512k IMMR space. Anything not covered by * these mappings is mapped by page tables. */ -initial_mmu: +SYM_FUNC_START_LOCAL(initial_mmu) li r8, 0 mtspr SPRN_MI_CTR, r8 /* remove PINNED ITLB entries */ lis r10, MD_TWAM@h @@ -686,6 +688,7 @@ initial_mmu: #endif mtspr SPRN_DER, r8 blr +SYM_FUNC_END(initial_mmu) _GLOBAL(mmu_pin_tlb) lis r9, (1f - PAGE_OFFSET)@h diff --git a/arch/powerpc/kernel/head_book3s_32.S b/arch/powerpc/kernel/head_book3s_32.S index 519b606951675b46932d7b727a31c3a13176efc8..c51f28b5abc05b35f14f0fc89961f863749b9ba6 100644 --- a/arch/powerpc/kernel/head_book3s_32.S +++ b/arch/powerpc/kernel/head_book3s_32.S @@ -18,6 +18,8 @@ #include #include +#include + #include #include #include @@ -840,7 +842,7 @@ __secondary_start: lwz r1,TASK_STACK(r1) /* stack */ - addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD + addi r1,r1,THREAD_SIZE-STACK_FRAME_MIN_SIZE li r0,0 tophys(r3,r1) stw r0,0(r3) @@ -877,7 +879,7 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_HPTE_TABLE) * Load stuff into the MMU. Intended to be called with * IR=0 and DR=0. */ -early_hash_table: +SYM_FUNC_START_LOCAL(early_hash_table) sync /* Force all PTE updates to finish */ isync tlbia /* Clear all TLB entries */ @@ -888,8 +890,9 @@ early_hash_table: ori r6, r6, 3 /* 256kB table */ mtspr SPRN_SDR1, r6 blr +SYM_FUNC_END(early_hash_table) -load_up_mmu: +SYM_FUNC_START_LOCAL(load_up_mmu) sync /* Force all PTE updates to finish */ isync tlbia /* Clear all TLB entries */ @@ -918,6 +921,7 @@ BEGIN_MMU_FTR_SECTION LOAD_BAT(7,r3,r4,r5) END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS) blr +SYM_FUNC_END(load_up_mmu) _GLOBAL(load_segment_registers) li r0, NUM_USER_SEGMENTS /* load up user segment register values */ @@ -966,7 +970,7 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_HPTE_TABLE) lis r1,init_thread_union@ha addi r1,r1,init_thread_union@l li r0,0 - stwu r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1) + stwu r0,THREAD_SIZE-STACK_FRAME_MIN_SIZE(r1) /* * Do early platform-specific initialization, * and set up the MMU. @@ -1028,7 +1032,7 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_HPTE_TABLE) * this makes sure it's done. * -- Cort */ -clear_bats: +SYM_FUNC_START_LOCAL(clear_bats) li r10,0 mtspr SPRN_DBAT0U,r10 @@ -1072,6 +1076,7 @@ BEGIN_MMU_FTR_SECTION mtspr SPRN_IBAT7L,r10 END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS) blr +SYM_FUNC_END(clear_bats) _GLOBAL(update_bats) lis r4, 1f@h @@ -1108,15 +1113,16 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS) mtspr SPRN_SRR1, r6 rfi -flush_tlbs: +SYM_FUNC_START_LOCAL(flush_tlbs) lis r10, 0x40 1: addic. r10, r10, -0x1000 tlbie r10 bgt 1b sync blr +SYM_FUNC_END(flush_tlbs) -mmu_off: +SYM_FUNC_START_LOCAL(mmu_off) addi r4, r3, __after_mmu_off - _start mfmsr r3 andi. r0,r3,MSR_DR|MSR_IR /* MMU enabled? */ @@ -1128,9 +1134,10 @@ mmu_off: mtspr SPRN_SRR1,r3 sync rfi +SYM_FUNC_END(mmu_off) /* We use one BAT to map up to 256M of RAM at _PAGE_OFFSET */ -initial_bats: +SYM_FUNC_START_LOCAL(initial_bats) lis r11,PAGE_OFFSET@h tophys(r8,r11) #ifdef CONFIG_SMP @@ -1146,9 +1153,10 @@ initial_bats: mtspr SPRN_IBAT0U,r11 isync blr +SYM_FUNC_END(initial_bats) #ifdef CONFIG_BOOTX_TEXT -setup_disp_bat: +SYM_FUNC_START_LOCAL(setup_disp_bat) /* * setup the display bat prepared for us in prom.c */ @@ -1164,10 +1172,11 @@ setup_disp_bat: mtspr SPRN_DBAT3L,r8 mtspr SPRN_DBAT3U,r11 blr +SYM_FUNC_END(setup_disp_bat) #endif /* CONFIG_BOOTX_TEXT */ #ifdef CONFIG_PPC_EARLY_DEBUG_CPM -setup_cpm_bat: +SYM_FUNC_START_LOCAL(setup_cpm_bat) lis r8, 0xf000 ori r8, r8, 0x002a mtspr SPRN_DBAT1L, r8 @@ -1177,10 +1186,11 @@ setup_cpm_bat: mtspr SPRN_DBAT1U, r11 blr +SYM_FUNC_END(setup_cpm_bat) #endif #ifdef CONFIG_PPC_EARLY_DEBUG_USBGECKO -setup_usbgecko_bat: +SYM_FUNC_START_LOCAL(setup_usbgecko_bat) /* prepare a BAT for early io */ #if defined(CONFIG_GAMECUBE) lis r8, 0x0c00 @@ -1199,6 +1209,7 @@ setup_usbgecko_bat: mtspr SPRN_DBAT1L, r8 mtspr SPRN_DBAT1U, r11 blr +SYM_FUNC_END(setup_usbgecko_bat) #endif .data diff --git a/arch/powerpc/kernel/head_booke.h b/arch/powerpc/kernel/head_booke.h index 1cb9d0f7cbf257962799cb4201512e2134adda0a..37d43c1726766f23ed5af1211ff403d9d1b26e15 100644 --- a/arch/powerpc/kernel/head_booke.h +++ b/arch/powerpc/kernel/head_booke.h @@ -84,7 +84,7 @@ END_BTB_FLUSH_SECTION stw r0,GPR0(r1) lis r10, STACK_FRAME_REGS_MARKER@ha /* exception frame marker */ addi r10, r10, STACK_FRAME_REGS_MARKER@l - stw r10, 8(r1) + stw r10, STACK_INT_FRAME_MARKER(r1) li r10, \trapno stw r10,_TRAP(r1) SAVE_GPRS(3, 8, r1) @@ -99,7 +99,7 @@ END_BTB_FLUSH_SECTION mfspr r10,SPRN_XER addi r2, r2, -THREAD stw r10,_XER(r1) - addi r3,r1,STACK_FRAME_OVERHEAD + addi r3,r1,STACK_INT_FRAME_REGS .endm .macro prepare_transfer_to_handler diff --git a/arch/powerpc/kernel/hw_breakpoint.c b/arch/powerpc/kernel/hw_breakpoint.c index 8db1a15d7acbe17815f2a843242b5ea5339fbcf1..e1b4e70c8fd0f0abadc2ad26536a906ca2a44e94 100644 --- a/arch/powerpc/kernel/hw_breakpoint.c +++ b/arch/powerpc/kernel/hw_breakpoint.c @@ -646,7 +646,7 @@ int hw_breakpoint_handler(struct die_args *args) ppc_inst_t instr = ppc_inst(0); int type = 0; int size = 0; - unsigned long ea; + unsigned long ea = 0; /* Disable breakpoints during exception handling */ hw_breakpoint_disable(); diff --git a/arch/powerpc/kernel/interrupt_64.S b/arch/powerpc/kernel/interrupt_64.S index a019ed6fc83937af635b1aa8c7206979d7c4c10e..fccc34489add8456bb060b8c0a476acfc3428a88 100644 --- a/arch/powerpc/kernel/interrupt_64.S +++ b/arch/powerpc/kernel/interrupt_64.S @@ -77,11 +77,11 @@ _ASM_NOKPROBE_SYMBOL(system_call_vectored_\name) std r11,_TRAP(r1) std r12,_CCR(r1) std r3,ORIG_GPR3(r1) + LOAD_REG_IMMEDIATE(r11, STACK_FRAME_REGS_MARKER) + std r11,STACK_INT_FRAME_MARKER(r1) /* "regs" marker */ /* Calling convention has r3 = regs, r4 = orig r0 */ - addi r3,r1,STACK_FRAME_OVERHEAD + addi r3,r1,STACK_INT_FRAME_REGS mr r4,r0 - LOAD_REG_IMMEDIATE(r11, STACK_FRAME_REGS_MARKER) - std r11,-16(r3) /* "regshere" marker */ BEGIN_FTR_SECTION HMT_MEDIUM @@ -96,10 +96,15 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) * but this is the best we can do. */ + /* + * Zero user registers to prevent influencing speculative execution + * state of kernel code. + */ + SANITIZE_SYSCALL_GPRS() bl system_call_exception .Lsyscall_vectored_\name\()_exit: - addi r4,r1,STACK_FRAME_OVERHEAD + addi r4,r1,STACK_INT_FRAME_REGS li r5,1 /* scv */ bl syscall_exit_prepare std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */ @@ -124,6 +129,7 @@ BEGIN_FTR_SECTION HMT_MEDIUM_LOW END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) + SANITIZE_RESTORE_NVGPRS() cmpdi r3,0 bne .Lsyscall_vectored_\name\()_restore_regs @@ -159,7 +165,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) ld r4,_LINK(r1) ld r5,_XER(r1) - REST_NVGPRS(r1) + HANDLER_RESTORE_NVGPRS() REST_GPR(0, r1) mtcr r2 mtctr r3 @@ -176,7 +182,7 @@ _ASM_NOKPROBE_SYMBOL(syscall_vectored_\name\()_restart) ld r1,PACA_EXIT_SAVE_R1(r13) LOAD_PACA_TOC() ld r3,RESULT(r1) - addi r4,r1,STACK_FRAME_OVERHEAD + addi r4,r1,STACK_INT_FRAME_REGS li r11,IRQS_ALL_DISABLED stb r11,PACAIRQSOFTMASK(r13) bl syscall_exit_restart @@ -250,11 +256,11 @@ END_BTB_FLUSH_SECTION std r11,_TRAP(r1) std r12,_CCR(r1) std r3,ORIG_GPR3(r1) + LOAD_REG_IMMEDIATE(r11, STACK_FRAME_REGS_MARKER) + std r11,STACK_INT_FRAME_MARKER(r1) /* "regs" marker */ /* Calling convention has r3 = regs, r4 = orig r0 */ - addi r3,r1,STACK_FRAME_OVERHEAD + addi r3,r1,STACK_INT_FRAME_REGS mr r4,r0 - LOAD_REG_IMMEDIATE(r11, STACK_FRAME_REGS_MARKER) - std r11,-16(r3) /* "regshere" marker */ #ifdef CONFIG_PPC_BOOK3S li r11,1 @@ -275,10 +281,15 @@ END_BTB_FLUSH_SECTION wrteei 1 #endif + /* + * Zero user registers to prevent influencing speculative execution + * state of kernel code. + */ + SANITIZE_SYSCALL_GPRS() bl system_call_exception .Lsyscall_exit: - addi r4,r1,STACK_FRAME_OVERHEAD + addi r4,r1,STACK_INT_FRAME_REGS li r5,0 /* !scv */ bl syscall_exit_prepare std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */ @@ -315,6 +326,7 @@ BEGIN_FTR_SECTION stdcx. r0,0,r1 /* to clear the reservation */ END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS) + SANITIZE_RESTORE_NVGPRS() cmpdi r3,0 bne .Lsyscall_restore_regs /* Zero volatile regs that may contain sensitive kernel data */ @@ -342,7 +354,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) .Lsyscall_restore_regs: ld r3,_CTR(r1) ld r4,_XER(r1) - REST_NVGPRS(r1) + HANDLER_RESTORE_NVGPRS() mtctr r3 mtspr SPRN_XER,r4 REST_GPR(0, r1) @@ -357,7 +369,7 @@ _ASM_NOKPROBE_SYMBOL(syscall_restart) ld r1,PACA_EXIT_SAVE_R1(r13) LOAD_PACA_TOC() ld r3,RESULT(r1) - addi r4,r1,STACK_FRAME_OVERHEAD + addi r4,r1,STACK_INT_FRAME_REGS li r11,IRQS_ALL_DISABLED stb r11,PACAIRQSOFTMASK(r13) bl syscall_exit_restart @@ -388,7 +400,7 @@ _ASM_NOKPROBE_SYMBOL(fast_interrupt_return_srr) andi. r0,r5,MSR_RI li r3,0 /* 0 return value, no EMULATE_STACK_STORE */ bne+ .Lfast_kernel_interrupt_return_srr - addi r3,r1,STACK_FRAME_OVERHEAD + addi r3,r1,STACK_INT_FRAME_REGS bl unrecoverable_exception b . /* should not get here */ #else @@ -406,11 +418,13 @@ _ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()) beq interrupt_return_\srr\()_kernel interrupt_return_\srr\()_user: /* make backtraces match the _kernel variant */ _ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()_user) - addi r3,r1,STACK_FRAME_OVERHEAD + addi r3,r1,STACK_INT_FRAME_REGS bl interrupt_exit_user_prepare +#ifndef CONFIG_INTERRUPT_SANITIZE_REGISTERS cmpdi r3,0 bne- .Lrestore_nvgprs_\srr .Lrestore_nvgprs_\srr\()_cont: +#endif std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */ #ifdef CONFIG_PPC_BOOK3S .Linterrupt_return_\srr\()_user_rst_start: @@ -424,6 +438,7 @@ _ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()_user) stb r11,PACAIRQHAPPENED(r13) # clear out possible HARD_DIS .Lfast_user_interrupt_return_\srr\(): + SANITIZE_RESTORE_NVGPRS() #ifdef CONFIG_PPC_BOOK3S .ifc \srr,srr lbz r4,PACASRR_VALID(r13) @@ -493,9 +508,11 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS) b . /* prevent speculative execution */ .Linterrupt_return_\srr\()_user_rst_end: +#ifndef CONFIG_INTERRUPT_SANITIZE_REGISTERS .Lrestore_nvgprs_\srr\(): REST_NVGPRS(r1) b .Lrestore_nvgprs_\srr\()_cont +#endif #ifdef CONFIG_PPC_BOOK3S interrupt_return_\srr\()_user_restart: @@ -503,7 +520,7 @@ _ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()_user_restart) GET_PACA(r13) ld r1,PACA_EXIT_SAVE_R1(r13) LOAD_PACA_TOC() - addi r3,r1,STACK_FRAME_OVERHEAD + addi r3,r1,STACK_INT_FRAME_REGS li r11,IRQS_ALL_DISABLED stb r11,PACAIRQSOFTMASK(r13) bl interrupt_exit_user_restart @@ -518,7 +535,7 @@ RESTART_TABLE(.Linterrupt_return_\srr\()_user_rst_start, .Linterrupt_return_\srr .balign IFETCH_ALIGN_BYTES interrupt_return_\srr\()_kernel: _ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()_kernel) - addi r3,r1,STACK_FRAME_OVERHEAD + addi r3,r1,STACK_INT_FRAME_REGS bl interrupt_exit_kernel_prepare std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */ @@ -585,6 +602,7 @@ _ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()_kernel) stb r11,PACAIRQHAPPENED(r13) // clear the possible HARD_DIS .Lfast_kernel_interrupt_return_\srr\(): + SANITIZE_RESTORE_NVGPRS() cmpdi cr1,r3,0 #ifdef CONFIG_PPC_BOOK3S .ifc \srr,srr @@ -637,7 +655,7 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS) * Leaving a stale STACK_FRAME_REGS_MARKER on the stack can confuse * the reliable stack unwinder later on. Clear it. */ - std r0,STACK_FRAME_OVERHEAD-16(r1) + std r0,STACK_INT_FRAME_MARKER(r1) REST_GPRS(2, 5, r1) @@ -684,7 +702,7 @@ _ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()_kernel_restart) GET_PACA(r13) ld r1,PACA_EXIT_SAVE_R1(r13) LOAD_PACA_TOC() - addi r3,r1,STACK_FRAME_OVERHEAD + addi r3,r1,STACK_INT_FRAME_REGS li r11,IRQS_ALL_DISABLED stb r11,PACAIRQSOFTMASK(r13) bl interrupt_exit_kernel_restart diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index 9ede61a5a469efb7f4252412c2a5c98c5f77c48b..c5b9ce8874834afccf3c822af51313cf5e544dcd 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c @@ -210,7 +210,7 @@ static __always_inline void call_do_softirq(const void *sp) PPC_LL " %%r1, 0(%%r1) ;" : // Outputs : // Inputs - [sp] "b" (sp), [offset] "i" (THREAD_SIZE - STACK_FRAME_OVERHEAD), + [sp] "b" (sp), [offset] "i" (THREAD_SIZE - STACK_FRAME_MIN_SIZE), [callee] "i" (__do_softirq) : // Clobbers "lr", "xer", "ctr", "memory", "cr0", "cr1", "cr5", "cr6", @@ -264,7 +264,7 @@ static __always_inline void call_do_irq(struct pt_regs *regs, void *sp) : // Outputs "+r" (r3) : // Inputs - [sp] "b" (sp), [offset] "i" (THREAD_SIZE - STACK_FRAME_OVERHEAD), + [sp] "b" (sp), [offset] "i" (THREAD_SIZE - STACK_FRAME_MIN_SIZE), [callee] "i" (__do_irq) : // Clobbers "lr", "xer", "ctr", "memory", "cr0", "cr1", "cr5", "cr6", diff --git a/arch/powerpc/kernel/kgdb.c b/arch/powerpc/kernel/kgdb.c index 1a1e9995dae35b5745a9e0ae7ed2afef07417ada..ebe4d1645ca1fd0118013b4d74bdac90050033f1 100644 --- a/arch/powerpc/kernel/kgdb.c +++ b/arch/powerpc/kernel/kgdb.c @@ -191,7 +191,7 @@ static int kgdb_break_match(struct pt_regs *regs) void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p) { struct pt_regs *regs = (struct pt_regs *)(p->thread.ksp + - STACK_FRAME_OVERHEAD); + STACK_INT_FRAME_REGS); unsigned long *ptr = gdb_regs; int reg; diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c index bd7b1a035459488d12c3d5c615e2569524a3a7b3..b20ee72e873a1b881283ded2c8ffef94acf187f7 100644 --- a/arch/powerpc/kernel/kprobes.c +++ b/arch/powerpc/kernel/kprobes.c @@ -20,12 +20,12 @@ #include #include #include +#include #include #include #include #include #include -#include #include DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; @@ -134,10 +134,9 @@ void *alloc_insn_page(void) if (!page) return NULL; - if (strict_module_rwx_enabled()) { - set_memory_ro((unsigned long)page, 1); - set_memory_x((unsigned long)page, 1); - } + if (strict_module_rwx_enabled()) + set_memory_rox((unsigned long)page, 1); + return page; } @@ -158,9 +157,7 @@ int arch_prepare_kprobe(struct kprobe *p) printk("Cannot register a kprobe on the second word of prefixed instruction\n"); ret = -EINVAL; } - preempt_disable(); prev = get_kprobe(p->addr - 1); - preempt_enable_no_resched(); /* * When prev is a ftrace-based kprobe, we don't have an insn, and it @@ -371,7 +368,7 @@ int kprobe_handler(struct pt_regs *regs) if (ret > 0) { restore_previous_kprobe(kcb); - preempt_enable_no_resched(); + preempt_enable(); return 1; } } @@ -384,7 +381,7 @@ int kprobe_handler(struct pt_regs *regs) if (p->pre_handler && p->pre_handler(p, regs)) { /* handler changed execution path, so skip ss setup */ reset_current_kprobe(); - preempt_enable_no_resched(); + preempt_enable(); return 1; } @@ -397,7 +394,7 @@ int kprobe_handler(struct pt_regs *regs) kcb->kprobe_status = KPROBE_HIT_SSDONE; reset_current_kprobe(); - preempt_enable_no_resched(); + preempt_enable(); return 1; } } @@ -406,7 +403,7 @@ int kprobe_handler(struct pt_regs *regs) return 1; no_kprobe: - preempt_enable_no_resched(); + preempt_enable(); return ret; } NOKPROBE_SYMBOL(kprobe_handler); @@ -492,7 +489,7 @@ int kprobe_post_handler(struct pt_regs *regs) } reset_current_kprobe(); out: - preempt_enable_no_resched(); + preempt_enable(); /* * if somebody else is singlestepping across a probe point, msr @@ -531,7 +528,7 @@ int kprobe_fault_handler(struct pt_regs *regs, int trapnr) restore_previous_kprobe(kcb); else reset_current_kprobe(); - preempt_enable_no_resched(); + preempt_enable(); break; case KPROBE_HIT_ACTIVE: case KPROBE_HIT_SSDONE: diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S index e5127b19fec29c56f94cca89757db567dc490ef5..daf8f87d2372853b91be6810edb20b0663a1441d 100644 --- a/arch/powerpc/kernel/misc_32.S +++ b/arch/powerpc/kernel/misc_32.S @@ -382,7 +382,7 @@ EXPORT_SYMBOL(__bswapdi2) _GLOBAL(start_secondary_resume) /* Reset stack */ rlwinm r1, r1, 0, 0, 31 - THREAD_SHIFT - addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD + addi r1,r1,THREAD_SIZE-STACK_FRAME_MIN_SIZE li r3,0 stw r3,0(r1) /* Zero the stack frame pointer */ bl start_secondary diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S index 36184cada00b1107a67885da80dc6a96dfa5a4e8..c39c07a4c06ef120fc4e489409f10b33c04a2951 100644 --- a/arch/powerpc/kernel/misc_64.S +++ b/arch/powerpc/kernel/misc_64.S @@ -9,6 +9,7 @@ * PPC64 updates by Dave Engebretsen (engebret@us.ibm.com) */ +#include #include #include #include @@ -353,7 +354,7 @@ _GLOBAL(kexec_smp_wait) * * don't overwrite r3 here, it is live for kexec_wait above. */ -real_mode: /* assume normal blr return */ +SYM_FUNC_START_LOCAL(real_mode) /* assume normal blr return */ #ifdef CONFIG_PPC_BOOK3E_64 /* Create an identity mapping. */ b kexec_create_tlb @@ -370,6 +371,7 @@ real_mode: /* assume normal blr return */ mtspr SPRN_SRR0,r11 rfid #endif +SYM_FUNC_END(real_mode) /* * kexec_sequence(newstack, start, image, control, clear_all(), @@ -384,7 +386,7 @@ _GLOBAL(kexec_sequence) std r0,16(r1) /* switch stacks to newstack -- &kexec_stack.stack */ - stdu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r3) + stdu r1,THREAD_SIZE-STACK_FRAME_MIN_SIZE(r3) mr r1,r3 li r0,0 @@ -401,7 +403,7 @@ _GLOBAL(kexec_sequence) std r26,-48(r1) std r25,-56(r1) - stdu r1,-STACK_FRAME_OVERHEAD-64(r1) + stdu r1,-STACK_FRAME_MIN_SIZE-64(r1) /* save args into preserved regs */ mr r31,r3 /* newstack (both) */ diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c index 7e45dc98df8a136e08bf3b4af2b0f1d76086effa..ff045644f13ffffe5e83f693ebd38752d11cbfc9 100644 --- a/arch/powerpc/kernel/module_64.c +++ b/arch/powerpc/kernel/module_64.c @@ -31,6 +31,16 @@ this, and makes other things simpler. Anton? --RR. */ +bool module_elf_check_arch(Elf_Ehdr *hdr) +{ + unsigned long abi_level = hdr->e_flags & 0x3; + + if (IS_ENABLED(CONFIG_PPC64_ELF_ABI_V2)) + return abi_level == 2; + else + return abi_level < 2; +} + #ifdef CONFIG_PPC64_ELF_ABI_V2 static func_desc_t func_desc(unsigned long addr) diff --git a/arch/powerpc/kernel/optprobes.c b/arch/powerpc/kernel/optprobes.c index 3b1c2236cbee57fc373e9abf76c97f441d9ac95b..004fae2044a3e0f9388c37693fc65b60f994d8bd 100644 --- a/arch/powerpc/kernel/optprobes.c +++ b/arch/powerpc/kernel/optprobes.c @@ -112,7 +112,7 @@ static void optimized_callback(struct optimized_kprobe *op, __this_cpu_write(current_kprobe, NULL); } - preempt_enable_no_resched(); + preempt_enable(); } NOKPROBE_SYMBOL(optimized_callback); diff --git a/arch/powerpc/kernel/optprobes_head.S b/arch/powerpc/kernel/optprobes_head.S index cd4e7bc32609d32e6892a670ade162992a737ea1..35932f45fb4ecb6190303619993517e8a2438c76 100644 --- a/arch/powerpc/kernel/optprobes_head.S +++ b/arch/powerpc/kernel/optprobes_head.S @@ -85,7 +85,7 @@ optprobe_template_op_address: TEMPLATE_FOR_IMM_LOAD_INSNS /* 2. pt_regs pointer in r4 */ - addi r4,r1,STACK_FRAME_OVERHEAD + addi r4,r1,STACK_INT_FRAME_REGS .global optprobe_template_call_handler optprobe_template_call_handler: @@ -96,7 +96,7 @@ optprobe_template_call_handler: * Parameters for instruction emulation: * 1. Pass SP in register r3. */ - addi r3,r1,STACK_FRAME_OVERHEAD + addi r3,r1,STACK_INT_FRAME_REGS .global optprobe_template_insn optprobe_template_insn: diff --git a/arch/powerpc/kernel/ppc_save_regs.S b/arch/powerpc/kernel/ppc_save_regs.S index 2d4d21bb46a97e78605d1645e0b2da8e6a5080fe..49813f98246813aedb229e46c87d8d4243ac2c52 100644 --- a/arch/powerpc/kernel/ppc_save_regs.S +++ b/arch/powerpc/kernel/ppc_save_regs.S @@ -21,60 +21,33 @@ * different ABIs, though). */ _GLOBAL(ppc_save_regs) - PPC_STL r0,0*SZL(r3) + /* This allows stack frame accessor macros and offsets to be used */ + subi r3,r3,STACK_INT_FRAME_REGS + PPC_STL r0,GPR0(r3) #ifdef CONFIG_PPC32 - stmw r2, 2*SZL(r3) + stmw r2,GPR2(r3) #else - PPC_STL r2,2*SZL(r3) - PPC_STL r3,3*SZL(r3) - PPC_STL r4,4*SZL(r3) - PPC_STL r5,5*SZL(r3) - PPC_STL r6,6*SZL(r3) - PPC_STL r7,7*SZL(r3) - PPC_STL r8,8*SZL(r3) - PPC_STL r9,9*SZL(r3) - PPC_STL r10,10*SZL(r3) - PPC_STL r11,11*SZL(r3) - PPC_STL r12,12*SZL(r3) - PPC_STL r13,13*SZL(r3) - PPC_STL r14,14*SZL(r3) - PPC_STL r15,15*SZL(r3) - PPC_STL r16,16*SZL(r3) - PPC_STL r17,17*SZL(r3) - PPC_STL r18,18*SZL(r3) - PPC_STL r19,19*SZL(r3) - PPC_STL r20,20*SZL(r3) - PPC_STL r21,21*SZL(r3) - PPC_STL r22,22*SZL(r3) - PPC_STL r23,23*SZL(r3) - PPC_STL r24,24*SZL(r3) - PPC_STL r25,25*SZL(r3) - PPC_STL r26,26*SZL(r3) - PPC_STL r27,27*SZL(r3) - PPC_STL r28,28*SZL(r3) - PPC_STL r29,29*SZL(r3) - PPC_STL r30,30*SZL(r3) - PPC_STL r31,31*SZL(r3) + SAVE_GPRS(2, 31, r3) lbz r0,PACAIRQSOFTMASK(r13) - PPC_STL r0,SOFTE-STACK_FRAME_OVERHEAD(r3) + PPC_STL r0,SOFTE(r3) #endif /* go up one stack frame for SP */ PPC_LL r4,0(r1) - PPC_STL r4,1*SZL(r3) + PPC_STL r4,GPR1(r3) /* get caller's LR */ PPC_LL r0,LRSAVE(r4) - PPC_STL r0,_LINK-STACK_FRAME_OVERHEAD(r3) + PPC_STL r0,_LINK(r3) mflr r0 - PPC_STL r0,_NIP-STACK_FRAME_OVERHEAD(r3) + PPC_STL r0,_NIP(r3) mfmsr r0 - PPC_STL r0,_MSR-STACK_FRAME_OVERHEAD(r3) + PPC_STL r0,_MSR(r3) mfctr r0 - PPC_STL r0,_CTR-STACK_FRAME_OVERHEAD(r3) + PPC_STL r0,_CTR(r3) mfxer r0 - PPC_STL r0,_XER-STACK_FRAME_OVERHEAD(r3) + PPC_STL r0,_XER(r3) mfcr r0 - PPC_STL r0,_CCR-STACK_FRAME_OVERHEAD(r3) + PPC_STL r0,_CCR(r3) li r0,0 - PPC_STL r0,_TRAP-STACK_FRAME_OVERHEAD(r3) - PPC_STL r0,ORIG_GPR3-STACK_FRAME_OVERHEAD(r3) + PPC_STL r0,_TRAP(r3) + PPC_STL r0,ORIG_GPR3(r3) blr diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index fcf604370c667381ec7c82cc438ea159735818c2..c22cc234672f93feaae608a80520fe6c7443bb61 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -862,10 +862,8 @@ static inline int set_breakpoint_8xx(struct arch_hw_breakpoint *brk) return 0; } -void __set_breakpoint(int nr, struct arch_hw_breakpoint *brk) +static void set_hw_breakpoint(int nr, struct arch_hw_breakpoint *brk) { - memcpy(this_cpu_ptr(¤t_brk[nr]), brk, sizeof(*brk)); - if (dawr_enabled()) // Power8 or later set_dawr(nr, brk); @@ -879,6 +877,12 @@ void __set_breakpoint(int nr, struct arch_hw_breakpoint *brk) WARN_ON_ONCE(1); } +void __set_breakpoint(int nr, struct arch_hw_breakpoint *brk) +{ + memcpy(this_cpu_ptr(¤t_brk[nr]), brk, sizeof(*brk)); + set_hw_breakpoint(nr, brk); +} + /* Check if we have DAWR or DABR hardware */ bool ppc_breakpoint_available(void) { @@ -891,6 +895,34 @@ bool ppc_breakpoint_available(void) } EXPORT_SYMBOL_GPL(ppc_breakpoint_available); +/* Disable the breakpoint in hardware without touching current_brk[] */ +void suspend_breakpoints(void) +{ + struct arch_hw_breakpoint brk = {0}; + int i; + + if (!ppc_breakpoint_available()) + return; + + for (i = 0; i < nr_wp_slots(); i++) + set_hw_breakpoint(i, &brk); +} + +/* + * Re-enable breakpoints suspended by suspend_breakpoints() in hardware + * from current_brk[] + */ +void restore_breakpoints(void) +{ + int i; + + if (!ppc_breakpoint_available()) + return; + + for (i = 0; i < nr_wp_slots(); i++) + set_hw_breakpoint(i, this_cpu_ptr(¤t_brk[i])); +} + #ifdef CONFIG_PPC_TRANSACTIONAL_MEM static inline bool tm_enabled(struct task_struct *tsk) @@ -1359,7 +1391,7 @@ static void show_instructions(struct pt_regs *regs) unsigned long nip = regs->nip; unsigned long pc = regs->nip - (NR_INSN_TO_PRINT * 3 / 4 * sizeof(int)); - printk("Instruction dump:"); + printk("Code: "); /* * If we were executing with the MMU off for instructions, adjust pc @@ -1373,9 +1405,6 @@ static void show_instructions(struct pt_regs *regs) for (i = 0; i < NR_INSN_TO_PRINT; i++) { int instr; - if (!(i % 8)) - pr_cont("\n"); - if (!__kernel_text_address(pc) || get_kernel_nofault(instr, (const void *)pc)) { pr_cont("XXXXXXXX "); @@ -1726,13 +1755,17 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args) klp_init_thread_info(p); + /* Create initial stack frame. */ + sp -= STACK_USER_INT_FRAME_SIZE; + *(unsigned long *)(sp + STACK_INT_FRAME_MARKER) = STACK_FRAME_REGS_MARKER; + /* Copy registers */ - sp -= sizeof(struct pt_regs); - childregs = (struct pt_regs *) sp; + childregs = (struct pt_regs *)(sp + STACK_INT_FRAME_REGS); if (unlikely(args->fn)) { /* kernel thread */ + ((unsigned long *)sp)[0] = 0; memset(childregs, 0, sizeof(struct pt_regs)); - childregs->gpr[1] = sp + sizeof(struct pt_regs); + childregs->gpr[1] = sp + STACK_USER_INT_FRAME_SIZE; /* function */ if (args->fn) childregs->gpr[14] = ppc_function_entry((void *)args->fn); @@ -1750,6 +1783,7 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args) *childregs = *regs; if (usp) childregs->gpr[1] = usp; + ((unsigned long *)sp)[0] = childregs->gpr[1]; p->thread.regs = childregs; /* 64s sets this in ret_from_fork */ if (!IS_ENABLED(CONFIG_PPC_BOOK3S_64)) @@ -1767,7 +1801,6 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args) f = ret_from_fork; } childregs->msr &= ~(MSR_FP|MSR_VEC|MSR_VSX); - sp -= STACK_FRAME_OVERHEAD; /* * The way this works is that at some point in the future @@ -1777,11 +1810,12 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args) * do some house keeping and then return from the fork or clone * system call, using the stack frame created above. */ - ((unsigned long *)sp)[0] = 0; - sp -= sizeof(struct pt_regs); - kregs = (struct pt_regs *) sp; - sp -= STACK_FRAME_OVERHEAD; + ((unsigned long *)sp)[STACK_FRAME_LR_SAVE] = (unsigned long)f; + sp -= STACK_SWITCH_FRAME_SIZE; + ((unsigned long *)sp)[0] = sp + STACK_SWITCH_FRAME_SIZE; + kregs = (struct pt_regs *)(sp + STACK_SWITCH_FRAME_REGS); p->thread.ksp = sp; + #ifdef CONFIG_HAVE_HW_BREAKPOINT for (i = 0; i < nr_wp_slots(); i++) p->thread.ptrace_bps[i] = NULL; @@ -2123,9 +2157,12 @@ static inline int valid_emergency_stack(unsigned long sp, struct task_struct *p, return 0; } - -int validate_sp(unsigned long sp, struct task_struct *p, - unsigned long nbytes) +/* + * validate the stack frame of a particular minimum size, used for when we are + * looking at a certain object in the stack beyond the minimum. + */ +int validate_sp_size(unsigned long sp, struct task_struct *p, + unsigned long nbytes) { unsigned long stack_page = (unsigned long)task_stack_page(p); @@ -2141,7 +2178,10 @@ int validate_sp(unsigned long sp, struct task_struct *p, return valid_emergency_stack(sp, p, nbytes); } -EXPORT_SYMBOL(validate_sp); +int validate_sp(unsigned long sp, struct task_struct *p) +{ + return validate_sp_size(sp, p, STACK_FRAME_MIN_SIZE); +} static unsigned long ___get_wchan(struct task_struct *p) { @@ -2149,13 +2189,12 @@ static unsigned long ___get_wchan(struct task_struct *p) int count = 0; sp = p->thread.ksp; - if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD)) + if (!validate_sp(sp, p)) return 0; do { sp = READ_ONCE_NOCHECK(*(unsigned long *)sp); - if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD) || - task_is_running(p)) + if (!validate_sp(sp, p) || task_is_running(p)) return 0; if (count > 0) { ip = READ_ONCE_NOCHECK(((unsigned long *)sp)[STACK_FRAME_LR_SAVE]); @@ -2209,7 +2248,7 @@ void __no_sanitize_address show_stack(struct task_struct *tsk, lr = 0; printk("%sCall Trace:\n", loglvl); do { - if (!validate_sp(sp, tsk, STACK_FRAME_OVERHEAD)) + if (!validate_sp(sp, tsk)) break; stack = (unsigned long *) sp; @@ -2230,12 +2269,16 @@ void __no_sanitize_address show_stack(struct task_struct *tsk, /* * See if this is an exception frame. - * We look for the "regshere" marker in the current frame. + * We look for the "regs" marker in the current frame. + * + * STACK_SWITCH_FRAME_SIZE being the smallest frame that + * could hold a pt_regs, if that does not fit then it can't + * have regs. */ - if (validate_sp(sp, tsk, STACK_FRAME_WITH_PT_REGS) - && stack[STACK_FRAME_MARKER] == STACK_FRAME_REGS_MARKER) { + if (validate_sp_size(sp, tsk, STACK_SWITCH_FRAME_SIZE) + && stack[STACK_INT_FRAME_MARKER_LONGS] == STACK_FRAME_REGS_MARKER) { struct pt_regs *regs = (struct pt_regs *) - (sp + STACK_FRAME_OVERHEAD); + (sp + STACK_INT_FRAME_REGS); lr = regs->link; printk("%s--- interrupt: %lx at %pS\n", diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c index 1eed87d954ba8fb4129f258eb47f23bb5f6bf547..4f1c920aa13ed5b0c5b0cb2e6890269a5d10ac46 100644 --- a/arch/powerpc/kernel/prom.c +++ b/arch/powerpc/kernel/prom.c @@ -72,6 +72,7 @@ int __initdata iommu_is_off; int __initdata iommu_force_on; unsigned long tce_alloc_start, tce_alloc_end; u64 ppc64_rma_size; +unsigned int boot_cpu_node_count __ro_after_init; #endif static phys_addr_t first_memblock_size; static int __initdata boot_cpu_count; @@ -335,6 +336,9 @@ static int __init early_init_dt_scan_cpus(unsigned long node, if (type == NULL || strcmp(type, "cpu") != 0) return 0; + if (IS_ENABLED(CONFIG_PPC64)) + boot_cpu_node_count++; + /* Get physical cpuid */ intserv = of_get_flat_dt_prop(node, "ibm,ppc-interrupt-server#s", &len); if (!intserv) diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c index e847f9b1c5b9fdd8190817207156f05e86ad70f7..deded51a79784ba17e40f569ef2640d9331f51ad 100644 --- a/arch/powerpc/kernel/rtas.c +++ b/arch/powerpc/kernel/rtas.c @@ -7,43 +7,35 @@ * Copyright (C) 2001 IBM. */ -#include -#include -#include -#include -#include -#include +#define pr_fmt(fmt) "rtas: " fmt + #include #include -#include -#include -#include -#include -#include +#include +#include +#include #include -#include +#include +#include #include +#include #include +#include +#include +#include #include -#include -#include +#include +#include +#include +#include #include -#include -#include #include -#include +#include #include -#include -#include -#include -#include -#include -#include -#include +#include #include -#include -#include +#include /* This is here deliberately so it's only used in this file */ void enter_rtas(unsigned long); @@ -353,6 +345,9 @@ int rtas_service_present(const char *service) EXPORT_SYMBOL(rtas_service_present); #ifdef CONFIG_RTAS_ERROR_LOGGING + +static u32 rtas_error_log_max __ro_after_init = RTAS_ERROR_LOG_MAX; + /* * Return the firmware-specified size of the error log buffer * for all rtas calls that require an error buffer argument. @@ -360,21 +355,30 @@ EXPORT_SYMBOL(rtas_service_present); */ int rtas_get_error_log_max(void) { - static int rtas_error_log_max; - if (rtas_error_log_max) - return rtas_error_log_max; - - rtas_error_log_max = rtas_token ("rtas-error-log-max"); - if ((rtas_error_log_max == RTAS_UNKNOWN_SERVICE) || - (rtas_error_log_max > RTAS_ERROR_LOG_MAX)) { - printk (KERN_WARNING "RTAS: bad log buffer size %d\n", - rtas_error_log_max); - rtas_error_log_max = RTAS_ERROR_LOG_MAX; - } return rtas_error_log_max; } EXPORT_SYMBOL(rtas_get_error_log_max); +static void __init init_error_log_max(void) +{ + static const char propname[] __initconst = "rtas-error-log-max"; + u32 max; + + if (of_property_read_u32(rtas.dev, propname, &max)) { + pr_warn("%s not found, using default of %u\n", + propname, RTAS_ERROR_LOG_MAX); + max = RTAS_ERROR_LOG_MAX; + } + + if (max > RTAS_ERROR_LOG_MAX) { + pr_warn("%s = %u, clamping max error log size to %u\n", + propname, max, RTAS_ERROR_LOG_MAX); + max = RTAS_ERROR_LOG_MAX; + } + + rtas_error_log_max = max; +} + static char rtas_err_buf[RTAS_ERROR_LOG_MAX]; static int rtas_last_error_token; @@ -432,6 +436,7 @@ static char *__fetch_rtas_last_error(char *altbuf) #else /* CONFIG_RTAS_ERROR_LOGGING */ #define __fetch_rtas_last_error(x) NULL #define get_errorlog_buffer() NULL +static void __init init_error_log_max(void) {} #endif @@ -467,6 +472,64 @@ void rtas_call_unlocked(struct rtas_args *args, int token, int nargs, int nret, static int ibm_open_errinjct_token; static int ibm_errinjct_token; +/** + * rtas_call() - Invoke an RTAS firmware function. + * @token: Identifies the function being invoked. + * @nargs: Number of input parameters. Does not include token. + * @nret: Number of output parameters, including the call status. + * @outputs: Array of @nret output words. + * @....: List of @nargs input parameters. + * + * Invokes the RTAS function indicated by @token, which the caller + * should obtain via rtas_token(). + * + * The @nargs and @nret arguments must match the number of input and + * output parameters specified for the RTAS function. + * + * rtas_call() returns RTAS status codes, not conventional Linux errno + * values. Callers must translate any failure to an appropriate errno + * in syscall context. Most callers of RTAS functions that can return + * -2 or 990x should use rtas_busy_delay() to correctly handle those + * statuses before calling again. + * + * The return value descriptions are adapted from 7.2.8 [RTAS] Return + * Codes of the PAPR and CHRP specifications. + * + * Context: Process context preferably, interrupt context if + * necessary. Acquires an internal spinlock and may perform + * GFP_ATOMIC slab allocation in error path. Unsafe for NMI + * context. + * Return: + * * 0 - RTAS function call succeeded. + * * -1 - RTAS function encountered a hardware or + * platform error, or the token is invalid, + * or the function is restricted by kernel policy. + * * -2 - Specs say "A necessary hardware device was busy, + * and the requested function could not be + * performed. The operation should be retried at + * a later time." This is misleading, at least with + * respect to current RTAS implementations. What it + * usually means in practice is that the function + * could not be completed while meeting RTAS's + * deadline for returning control to the OS (250us + * for PAPR/PowerVM, typically), but the call may be + * immediately reattempted to resume work on it. + * * -3 - Parameter error. + * * -7 - Unexpected state change. + * * 9000...9899 - Vendor-specific success codes. + * * 9900...9905 - Advisory extended delay. Caller should try + * again after ~10^x ms has elapsed, where x is + * the last digit of the status [0-5]. Again going + * beyond the PAPR text, 990x on PowerVM indicates + * contention for RTAS-internal resources. Other + * RTAS call sequences in progress should be + * allowed to complete before reattempting the + * call. + * * -9000 - Multi-level isolation error. + * * -9999...-9004 - Vendor-specific error codes. + * * Additional negative values - Function-specific error. + * * Additional positive values - Function-specific success. + */ int rtas_call(int token, int nargs, int nret, int *outputs, ...) { va_list list; @@ -657,8 +720,7 @@ static int rtas_error_rc(int rtas_rc) rc = -ENODEV; break; default: - printk(KERN_ERR "%s: unexpected RTAS error %d\n", - __func__, rtas_rc); + pr_err("%s: unexpected error %d\n", __func__, rtas_rc); rc = -ERANGE; break; } @@ -862,8 +924,8 @@ void __noreturn rtas_restart(char *cmd) { if (rtas_flash_term_hook) rtas_flash_term_hook(SYS_RESTART); - printk("RTAS system-reboot returned %d\n", - rtas_call(rtas_token("system-reboot"), 0, 1, NULL)); + pr_emerg("system-reboot returned %d\n", + rtas_call(rtas_token("system-reboot"), 0, 1, NULL)); for (;;); } @@ -872,8 +934,8 @@ void rtas_power_off(void) if (rtas_flash_term_hook) rtas_flash_term_hook(SYS_POWER_OFF); /* allow power on only with power button press */ - printk("RTAS power-off returned %d\n", - rtas_call(rtas_token("power-off"), 2, 1, NULL, -1, -1)); + pr_emerg("power-off returned %d\n", + rtas_call(rtas_token("power-off"), 2, 1, NULL, -1, -1)); for (;;); } @@ -882,13 +944,14 @@ void __noreturn rtas_halt(void) if (rtas_flash_term_hook) rtas_flash_term_hook(SYS_HALT); /* allow power on only with power button press */ - printk("RTAS power-off returned %d\n", - rtas_call(rtas_token("power-off"), 2, 1, NULL, -1, -1)); + pr_emerg("power-off returned %d\n", + rtas_call(rtas_token("power-off"), 2, 1, NULL, -1, -1)); for (;;); } /* Must be in the RMO region, so we place it here */ static char rtas_os_term_buf[2048]; +static s32 ibm_os_term_token = RTAS_UNKNOWN_SERVICE; void rtas_os_term(char *str) { @@ -900,19 +963,23 @@ void rtas_os_term(char *str) * this property may terminate the partition which we want to avoid * since it interferes with panic_timeout. */ - if (RTAS_UNKNOWN_SERVICE == rtas_token("ibm,os-term") || - RTAS_UNKNOWN_SERVICE == rtas_token("ibm,extended-os-term")) + if (ibm_os_term_token == RTAS_UNKNOWN_SERVICE) return; snprintf(rtas_os_term_buf, 2048, "OS panic: %s", str); + /* + * Keep calling as long as RTAS returns a "try again" status, + * but don't use rtas_busy_delay(), which potentially + * schedules. + */ do { - status = rtas_call(rtas_token("ibm,os-term"), 1, 1, NULL, + status = rtas_call(ibm_os_term_token, 1, 1, NULL, __pa(rtas_os_term_buf)); - } while (rtas_busy_delay(status)); + } while (rtas_busy_delay_time(status)); if (status != 0) - printk(KERN_EMERG "ibm,os-term call failed %d\n", status); + pr_emerg("ibm,os-term call failed %d\n", status); } /** @@ -983,8 +1050,6 @@ noinstr struct pseries_errorlog *get_pseries_errorlog(struct rtas_error_log *log return NULL; } -#ifdef CONFIG_PPC_RTAS_FILTER - /* * The sys_rtas syscall, as originally designed, allows root to pass * arbitrary physical addresses to RTAS calls. A number of RTAS calls @@ -1133,20 +1198,6 @@ static void __init rtas_syscall_filter_init(void) rtas_filters[i].token = rtas_token(rtas_filters[i].name); } -#else - -static bool block_rtas_call(int token, int nargs, - struct rtas_args *args) -{ - return false; -} - -static void __init rtas_syscall_filter_init(void) -{ -} - -#endif /* CONFIG_PPC_RTAS_FILTER */ - /* We assume to be passed big endian arguments */ SYSCALL_DEFINE1(rtas, struct rtas_args __user *, uargs) { @@ -1277,6 +1328,15 @@ void __init rtas_initialize(void) no_entry = of_property_read_u32(rtas.dev, "linux,rtas-entry", &entry); rtas.entry = no_entry ? rtas.base : entry; + init_error_log_max(); + + /* + * Discover these now to avoid device tree lookups in the + * panic path. + */ + if (of_property_read_bool(rtas.dev, "ibm,extended-os-term")) + ibm_os_term_token = rtas_token("ibm,os-term"); + /* If RTAS was found, allocate the RMO buffer for it and look for * the stop-self token if any */ diff --git a/arch/powerpc/kernel/rtasd.c b/arch/powerpc/kernel/rtasd.c index 5270b450bbde406f2f0258a39ccaf79ee609b0ee..cc56ac6ba4b0498b757d6908070ce0ded53a0ce6 100644 --- a/arch/powerpc/kernel/rtasd.c +++ b/arch/powerpc/kernel/rtasd.c @@ -9,6 +9,7 @@ #include #include #include +#include #include #include #include @@ -499,6 +500,8 @@ EXPORT_SYMBOL_GPL(rtas_cancel_event_scan); static int __init rtas_event_scan_init(void) { + int err; + if (!machine_is(pseries) && !machine_is(chrp)) return 0; @@ -509,8 +512,8 @@ static int __init rtas_event_scan_init(void) return -ENODEV; } - rtas_event_scan_rate = rtas_token("rtas-event-scan-rate"); - if (rtas_event_scan_rate == RTAS_UNKNOWN_SERVICE) { + err = of_property_read_u32(rtas.dev, "rtas-event-scan-rate", &rtas_event_scan_rate); + if (err) { printk(KERN_ERR "rtasd: no rtas-event-scan-rate on system\n"); return -ENODEV; } diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c index 6d041993a45dccf868bccb2d7fca38d2583b15bc..9b10e57040c6c705f4b8ba0e17cd08d5e14b545e 100644 --- a/arch/powerpc/kernel/setup-common.c +++ b/arch/powerpc/kernel/setup-common.c @@ -59,6 +59,7 @@ #include #include #include +#include #include #include #include diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index 0da6e59161cd494678ebabce5feb8dd72e5fe017..6b90f10a6c819b2cf95accf64950e23ce92cc538 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c @@ -1249,7 +1249,7 @@ static void cpu_idle_thread_init(unsigned int cpu, struct task_struct *idle) #ifdef CONFIG_PPC64 paca_ptrs[cpu]->__current = idle; paca_ptrs[cpu]->kstack = (unsigned long)task_stack_page(idle) + - THREAD_SIZE - STACK_FRAME_OVERHEAD; + THREAD_SIZE - STACK_FRAME_MIN_SIZE; #endif task_thread_info(idle)->cpu = cpu; secondary_current = current_set[cpu] = idle; diff --git a/arch/powerpc/kernel/stacktrace.c b/arch/powerpc/kernel/stacktrace.c index a2443d61728ebc708423297aa877d2b456d02cec..5de8597eaab8dc428828abb0c6fba244e62fe23b 100644 --- a/arch/powerpc/kernel/stacktrace.c +++ b/arch/powerpc/kernel/stacktrace.c @@ -43,7 +43,7 @@ void __no_sanitize_address arch_stack_walk(stack_trace_consume_fn consume_entry, unsigned long *stack = (unsigned long *) sp; unsigned long newsp, ip; - if (!validate_sp(sp, task, STACK_FRAME_OVERHEAD)) + if (!validate_sp(sp, task)) return; newsp = stack[0]; @@ -77,7 +77,7 @@ int __no_sanitize_address arch_stack_walk_reliable(stack_trace_consume_fn consum /* * For user tasks, this is the SP value loaded on * kernel entry, see "PACAKSAVE(r13)" in _switch() and - * system_call_common()/EXCEPTION_PROLOG_COMMON(). + * system_call_common(). * * Likewise for non-swapper kernel threads, * this also happens to be the top of the stack @@ -88,13 +88,13 @@ int __no_sanitize_address arch_stack_walk_reliable(stack_trace_consume_fn consum * an unreliable stack trace until it's been * _switch()'ed to for the first time. */ - stack_end -= STACK_FRAME_OVERHEAD + sizeof(struct pt_regs); + stack_end -= STACK_USER_INT_FRAME_SIZE; } else { /* * idle tasks have a custom stack layout, * c.f. cpu_idle_thread_init(). */ - stack_end -= STACK_FRAME_OVERHEAD; + stack_end -= STACK_FRAME_MIN_SIZE; } if (task == current) @@ -136,7 +136,7 @@ int __no_sanitize_address arch_stack_walk_reliable(stack_trace_consume_fn consum /* Mark stacktraces with exception frames as unreliable. */ if (sp <= stack_end - STACK_INT_FRAME_SIZE && - stack[STACK_FRAME_MARKER] == STACK_FRAME_REGS_MARKER) { + stack[STACK_INT_FRAME_MARKER_LONGS] == STACK_FRAME_REGS_MARKER) { return -EINVAL; } diff --git a/arch/powerpc/kernel/swsusp_32.S b/arch/powerpc/kernel/swsusp_32.S index e0cbd63007f21e01e9ee8c444672f97fcfca65f9..ffb79326483c0b4428330139c039aa5877265ea0 100644 --- a/arch/powerpc/kernel/swsusp_32.S +++ b/arch/powerpc/kernel/swsusp_32.S @@ -1,5 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0 */ #include +#include + #include #include #include @@ -400,7 +402,7 @@ _ASM_NOKPROBE_SYMBOL(swsusp_arch_resume) /* FIXME:This construct is actually not useful since we don't shut * down the instruction MMU, we could just flip back MSR-DR on. */ -turn_on_mmu: +SYM_FUNC_START_LOCAL(turn_on_mmu) mflr r4 mtsrr0 r4 mtsrr1 r3 @@ -408,4 +410,5 @@ turn_on_mmu: isync rfi _ASM_NOKPROBE_SYMBOL(turn_on_mmu) +SYM_FUNC_END(turn_on_mmu) diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index a2ab397065c6667bdd26a2dcbd6561984b2158ed..d68de3618741e19b3b9eb3d90b5ff97ddbc09c26 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c @@ -130,7 +130,7 @@ unsigned long tb_ticks_per_jiffy; unsigned long tb_ticks_per_usec = 100; /* sane default */ EXPORT_SYMBOL(tb_ticks_per_usec); unsigned long tb_ticks_per_sec; -EXPORT_SYMBOL(tb_ticks_per_sec); /* for cputime_t conversions */ +EXPORT_SYMBOL(tb_ticks_per_sec); /* for cputime conversions */ DEFINE_SPINLOCK(rtc_lock); EXPORT_SYMBOL_GPL(rtc_lock); @@ -150,21 +150,6 @@ EXPORT_SYMBOL_GPL(ppc_tb_freq); bool tb_invalid; #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE -/* - * Factor for converting from cputime_t (timebase ticks) to - * microseconds. This is stored as 0.64 fixed-point binary fraction. - */ -u64 __cputime_usec_factor; -EXPORT_SYMBOL(__cputime_usec_factor); - -static void calc_cputime_factors(void) -{ - struct div_result res; - - div128_by_32(1000000, 0, tb_ticks_per_sec, &res); - __cputime_usec_factor = res.result_low; -} - /* * Read the SPURR on systems that have it, otherwise the PURR, * or if that doesn't exist return the timebase value passed in. @@ -369,10 +354,7 @@ void vtime_flush(struct task_struct *tsk) acct->hardirq_time = 0; acct->softirq_time = 0; } - -#else /* ! CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */ -#define calc_cputime_factors() -#endif +#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */ void __delay(unsigned long loops) { @@ -914,7 +896,6 @@ void __init time_init(void) tb_ticks_per_jiffy = ppc_tb_freq / HZ; tb_ticks_per_sec = ppc_tb_freq; tb_ticks_per_usec = ppc_tb_freq / 1000000; - calc_cputime_factors(); /* * Compute scale factor for sched_clock. diff --git a/arch/powerpc/kernel/tm.S b/arch/powerpc/kernel/tm.S index 5a0f023a26e90fd260fa1df9192febe19eed0031..9feab5e0485bf8387e1e55f34a9e74e8222ce31c 100644 --- a/arch/powerpc/kernel/tm.S +++ b/arch/powerpc/kernel/tm.S @@ -117,7 +117,7 @@ _GLOBAL(tm_reclaim) std r2, STK_GOT(r1) stdu r1, -TM_FRAME_SIZE(r1) - /* We've a struct pt_regs at [r1+STACK_FRAME_OVERHEAD]. */ + /* We've a struct pt_regs at [r1+STACK_INT_FRAME_REGS]. */ std r3, STK_PARAM(R3)(r1) SAVE_NVGPRS(r1) @@ -222,7 +222,7 @@ _GLOBAL(tm_reclaim) * Make r7 look like an exception frame so that we can use the neat * GPRx(n) macros. r7 is NOT a pt_regs ptr! */ - subi r7, r7, STACK_FRAME_OVERHEAD + subi r7, r7, STACK_INT_FRAME_REGS /* Sync the userland GPRs 2-12, 14-31 to thread->regs: */ SAVE_GPR(0, r7) /* user r0 */ @@ -359,7 +359,7 @@ _GLOBAL(__tm_recheckpoint) stdu r1, -TM_FRAME_SIZE(r1) /* - * We've a struct pt_regs at [r1+STACK_FRAME_OVERHEAD]. + * We've a struct pt_regs at [r1+STACK_INT_FRAME_REGS]. * This is used for backing up the NVGPRs: */ SAVE_NVGPRS(r1) @@ -379,7 +379,7 @@ _GLOBAL(__tm_recheckpoint) * Make r7 look like an exception frame so that we can use the neat * GPRx(n) macros. r7 is now NOT a pt_regs ptr! */ - subi r7, r7, STACK_FRAME_OVERHEAD + subi r7, r7, STACK_INT_FRAME_REGS /* We need to setup MSR for FP/VMX/VSX register save instructions. */ mfmsr r6 diff --git a/arch/powerpc/kernel/trace/ftrace_mprofile.S b/arch/powerpc/kernel/trace/ftrace_mprofile.S index d031093bc43671309e07c52ea300842e35246b04..ffb1db3868499896f89584acfa74d14872892557 100644 --- a/arch/powerpc/kernel/trace/ftrace_mprofile.S +++ b/arch/powerpc/kernel/trace/ftrace_mprofile.S @@ -110,7 +110,7 @@ .endif /* Load &pt_regs in r6 for call below */ - addi r6, r1, STACK_FRAME_OVERHEAD + addi r6, r1, STACK_INT_FRAME_REGS .endm .macro ftrace_regs_exit allregs diff --git a/arch/powerpc/kernel/vdso/Makefile b/arch/powerpc/kernel/vdso/Makefile index a2e7b0ce5b1913536e1b24fb7072c1ea49c4687f..6a977b0d8ffc3105ad3d884a29d0d164d293107a 100644 --- a/arch/powerpc/kernel/vdso/Makefile +++ b/arch/powerpc/kernel/vdso/Makefile @@ -102,3 +102,5 @@ quiet_cmd_vdso64ld_and_check = VDSO64L $@ cmd_vdso64ld_and_check = $(VDSOCC) $(c_flags) $(CC64FLAGS) -o $@ -Wl,-T$(filter %.lds,$^) $(filter %.o,$^) -z noexecstack ; $(cmd_vdso_check) quiet_cmd_vdso64as = VDSO64A $@ cmd_vdso64as = $(VDSOCC) $(a_flags) $(CC64FLAGS) $(AS64FLAGS) -c -o $@ $< + +OBJECT_FILES_NON_STANDARD := y diff --git a/arch/powerpc/kernel/vector.S b/arch/powerpc/kernel/vector.S index 5cf64740edb82db60c9d14d0625dbbaf2d98da17..ffe5d90abe17998ab8052eb36d05c2babbf9c013 100644 --- a/arch/powerpc/kernel/vector.S +++ b/arch/powerpc/kernel/vector.S @@ -1,4 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ +#include #include #include #include @@ -185,7 +186,7 @@ fphalf: * Internal routine to enable floating point and set FPSCR to 0. * Don't call it from C; it doesn't use the normal calling convention. */ -fpenable: +SYM_FUNC_START_LOCAL(fpenable) #ifdef CONFIG_PPC32 stwu r1,-64(r1) #else @@ -202,6 +203,7 @@ fpenable: mffs fr31 MTFSF_L(fr1) blr +SYM_FUNC_END(fpenable) fpdisable: mtlr r12 diff --git a/arch/powerpc/kexec/file_load_64.c b/arch/powerpc/kexec/file_load_64.c index 60e12b716d3c8c59a25fb66af119920b6f1e191e..af8854f9eae3c85645a8482cadbaf84524b017e4 100644 --- a/arch/powerpc/kexec/file_load_64.c +++ b/arch/powerpc/kexec/file_load_64.c @@ -26,6 +26,7 @@ #include #include #include +#include struct umem_info { u64 *buf; /* data buffer for usable-memory property */ @@ -928,6 +929,45 @@ out: return ret; } +/** + * get_cpu_node_size - Compute the size of a CPU node in the FDT. + * This should be done only once and the value is stored in + * a static variable. + * Returns the max size of a CPU node in the FDT. + */ +static unsigned int cpu_node_size(void) +{ + static unsigned int size; + struct device_node *dn; + struct property *pp; + + /* + * Don't compute it twice, we are assuming that the per CPU node size + * doesn't change during the system's life. + */ + if (size) + return size; + + dn = of_find_node_by_type(NULL, "cpu"); + if (WARN_ON_ONCE(!dn)) { + // Unlikely to happen + return 0; + } + + /* + * We compute the sub node size for a CPU node, assuming it + * will be the same for all. + */ + size += strlen(dn->name) + 5; + for_each_property_of_node(dn, pp) { + size += strlen(pp->name); + size += pp->length; + } + + of_node_put(dn); + return size; +} + /** * kexec_extra_fdt_size_ppc64 - Return the estimated additional size needed to * setup FDT for kexec/kdump kernel. @@ -937,6 +977,8 @@ out: */ unsigned int kexec_extra_fdt_size_ppc64(struct kimage *image) { + unsigned int cpu_nodes, extra_size; + struct device_node *dn; u64 usm_entries; if (image->type != KEXEC_TYPE_CRASH) @@ -949,7 +991,22 @@ unsigned int kexec_extra_fdt_size_ppc64(struct kimage *image) */ usm_entries = ((memblock_end_of_DRAM() / drmem_lmb_size()) + (2 * (resource_size(&crashk_res) / drmem_lmb_size()))); - return (unsigned int)(usm_entries * sizeof(u64)); + + extra_size = (unsigned int)(usm_entries * sizeof(u64)); + + /* + * Get the number of CPU nodes in the current DT. This allows to + * reserve places for CPU nodes added since the boot time. + */ + cpu_nodes = 0; + for_each_node_by_type(dn, "cpu") { + cpu_nodes++; + } + + if (cpu_nodes > boot_cpu_node_count) + extra_size += (cpu_nodes - boot_cpu_node_count) * cpu_node_size(); + + return extra_size; } /** diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c index e9744b41a226ca9a13b89f524d24f057dbfea9d0..7006bcbc2e3757c8fa4e092a044c123b6f6e225c 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_hv.c +++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c @@ -598,7 +598,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_vcpu *vcpu, write_ok = true; } else { /* Call KVM generic code to do the slow-path check */ - pfn = __gfn_to_pfn_memslot(memslot, gfn, false, NULL, + pfn = __gfn_to_pfn_memslot(memslot, gfn, false, false, NULL, writing, &write_ok, NULL); if (is_error_noslot_pfn(pfn)) return -EFAULT; @@ -1202,7 +1202,7 @@ static int resize_hpt_allocate(struct kvm_resize_hpt *resize) if (rc < 0) return rc; - resize_hpt_debug(resize, "resize_hpt_allocate(): HPT @ 0x%lx\n", + resize_hpt_debug(resize, "%s(): HPT @ 0x%lx\n", __func__, resize->hpt.virt); return 0; @@ -1443,7 +1443,7 @@ static void resize_hpt_prepare_work(struct work_struct *work) */ mutex_unlock(&kvm->arch.mmu_setup_lock); - resize_hpt_debug(resize, "resize_hpt_prepare_work(): order = %d\n", + resize_hpt_debug(resize, "%s(): order = %d\n", __func__, resize->order); err = resize_hpt_allocate(resize); @@ -1887,8 +1887,7 @@ static ssize_t kvm_htab_write(struct file *file, const char __user *buf, ret = kvmppc_virtmode_do_h_enter(kvm, H_EXACT, i, v, r, tmp); if (ret != H_SUCCESS) { - pr_err("kvm_htab_write ret %ld i=%ld v=%lx " - "r=%lx\n", ret, i, v, r); + pr_err("%s ret %ld i=%ld v=%lx r=%lx\n", __func__, ret, i, v, r); goto out; } if (!mmu_ready && is_vrma_hpte(v)) { diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c index 5d5e12f3bf864a89ad9985788a4bb26b78a87745..9d3743ca16d53e6baf9083bee97cd58101acd695 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_radix.c +++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c @@ -846,7 +846,7 @@ int kvmppc_book3s_instantiate_page(struct kvm_vcpu *vcpu, unsigned long pfn; /* Call KVM generic code to do the slow-path check */ - pfn = __gfn_to_pfn_memslot(memslot, gfn, false, NULL, + pfn = __gfn_to_pfn_memslot(memslot, gfn, false, false, NULL, writing, upgrade_p, NULL); if (is_error_noslot_pfn(pfn)) return -EFAULT; diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c index 40864373ef876facf71e828a030001b6d70062df..95e738ef9062bfb925355906b891632d9c047b20 100644 --- a/arch/powerpc/kvm/book3s_64_vio.c +++ b/arch/powerpc/kvm/book3s_64_vio.c @@ -294,14 +294,14 @@ long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm, struct kvmppc_spapr_tce_table *stt = NULL; struct kvmppc_spapr_tce_table *siter; struct mm_struct *mm = kvm->mm; - unsigned long npages, size = args->size; + unsigned long npages; int ret; if (!args->size || args->page_shift < 12 || args->page_shift > 34 || (args->offset + args->size > (ULLONG_MAX >> args->page_shift))) return -EINVAL; - npages = kvmppc_tce_pages(size); + npages = kvmppc_tce_pages(args->size); ret = account_locked_vm(mm, kvmppc_stt_pages(npages), true); if (ret) return ret; @@ -314,7 +314,7 @@ long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm, stt->liobn = args->liobn; stt->page_shift = args->page_shift; stt->offset = args->offset; - stt->size = size; + stt->size = args->size; stt->kvm = kvm; mutex_init(&stt->alloc_lock); INIT_LIST_HEAD_RCU(&stt->iommu_tables); diff --git a/arch/powerpc/kvm/book3s_hv_interrupts.S b/arch/powerpc/kvm/book3s_hv_interrupts.S index 59d89e4b154a6d553345d7d84ecef914991f0499..c0deeea7eef30178271237068882858e55a27424 100644 --- a/arch/powerpc/kvm/book3s_hv_interrupts.S +++ b/arch/powerpc/kvm/book3s_hv_interrupts.S @@ -9,6 +9,7 @@ * Authors: Alexander Graf */ +#include #include #include #include @@ -107,7 +108,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) /* * void kvmhv_save_host_pmu(void) */ -kvmhv_save_host_pmu: +SYM_FUNC_START_LOCAL(kvmhv_save_host_pmu) BEGIN_FTR_SECTION /* Work around P8 PMAE bug */ li r3, -1 @@ -154,3 +155,4 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) stw r8, HSTATE_PMC5(r13) stw r9, HSTATE_PMC6(r13) 31: blr +SYM_FUNC_END(kvmhv_save_host_pmu) diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c index 5a05953ae13fe27e17dacc151b9c4b8e071c89fd..9182324dbef931df3dc314af659dc7501b9c8de4 100644 --- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c +++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c @@ -265,7 +265,7 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags, } pte = kvmppc_read_update_linux_pte(ptep, writing); if (pte_present(pte) && !pte_protnone(pte)) { - if (writing && !__pte_write(pte)) + if (writing && !pte_write(pte)) /* make the actual HPTE be read-only */ ptel = hpte_make_readonly(ptel); is_ci = pte_ci(pte); diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index 37f50861dd98fe538af9424d68424675dee3e612..acf80915f406e5857bdd491ad23833eca86fd526 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S @@ -10,6 +10,8 @@ * Authors: Alexander Graf */ +#include +#include #include #include #include @@ -1522,12 +1524,14 @@ kvm_flush_link_stack: /* Flush the link stack. On Power8 it's up to 32 entries in size. */ .rept 32 + ANNOTATE_INTRA_FUNCTION_CALL bl .+4 .endr /* And on Power9 it's up to 64. */ BEGIN_FTR_SECTION .rept 32 + ANNOTATE_INTRA_FUNCTION_CALL bl .+4 .endr END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) @@ -2358,7 +2362,7 @@ hmi_realmode: * This routine calls kvmppc_read_intr, a C function, if an external * interrupt is pending. */ -kvmppc_check_wake_reason: +SYM_FUNC_START_LOCAL(kvmppc_check_wake_reason) mfspr r6, SPRN_SRR1 BEGIN_FTR_SECTION rlwinm r6, r6, 45-31, 0xf /* extract wake reason field (P8) */ @@ -2427,6 +2431,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) addi r1, r1, PPC_MIN_STKFRM mtlr r0 blr +SYM_FUNC_END(kvmppc_check_wake_reason) /* * Save away FP, VMX and VSX registers. @@ -2434,7 +2439,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) * N.B. r30 and r31 are volatile across this function, * thus it is not callable from C. */ -kvmppc_save_fp: +SYM_FUNC_START_LOCAL(kvmppc_save_fp) mflr r30 mr r31,r3 mfmsr r5 @@ -2462,6 +2467,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) stw r6,VCPU_VRSAVE(r31) mtlr r30 blr +SYM_FUNC_END(kvmppc_save_fp) /* * Load up FP, VMX and VSX registers @@ -2469,7 +2475,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) * N.B. r30 and r31 are volatile across this function, * thus it is not callable from C. */ -kvmppc_load_fp: +SYM_FUNC_START_LOCAL(kvmppc_load_fp) mflr r30 mr r31,r4 mfmsr r9 @@ -2498,6 +2504,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) mtlr r30 mr r4,r31 blr +SYM_FUNC_END(kvmppc_load_fp) #ifdef CONFIG_PPC_TRANSACTIONAL_MEM /* @@ -2729,7 +2736,7 @@ kvmppc_bad_host_intr: std r6, SOFTE(r1) LOAD_PACA_TOC() LOAD_REG_IMMEDIATE(3, STACK_FRAME_REGS_MARKER) - std r3, STACK_FRAME_OVERHEAD-16(r1) + std r3, STACK_INT_FRAME_MARKER(r1) /* * XXX On POWER7 and POWER8, we just spin here since we don't @@ -2746,7 +2753,7 @@ kvmppc_bad_host_intr: * r9 has a vcpu pointer (in) * r0 is used as a scratch register */ -kvmppc_msr_interrupt: +SYM_FUNC_START_LOCAL(kvmppc_msr_interrupt) rldicl r0, r11, 64 - MSR_TS_S_LG, 62 cmpwi r0, 2 /* Check if we are in transactional state.. */ ld r11, VCPU_INTR_MSR(r9) @@ -2755,13 +2762,14 @@ kvmppc_msr_interrupt: li r0, 1 1: rldimi r11, r0, MSR_TS_S_LG, 63 - MSR_TS_T_LG blr +SYM_FUNC_END(kvmppc_msr_interrupt) /* * void kvmhv_load_guest_pmu(struct kvm_vcpu *vcpu) * * Load up guest PMU state. R3 points to the vcpu struct. */ -kvmhv_load_guest_pmu: +SYM_FUNC_START_LOCAL(kvmhv_load_guest_pmu) mr r4, r3 mflr r0 li r3, 1 @@ -2811,13 +2819,14 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) isync mtlr r0 blr +SYM_FUNC_END(kvmhv_load_guest_pmu) /* * void kvmhv_load_host_pmu(void) * * Reload host PMU state saved in the PACA by kvmhv_save_host_pmu. */ -kvmhv_load_host_pmu: +SYM_FUNC_START_LOCAL(kvmhv_load_host_pmu) mflr r0 lbz r4, PACA_PMCINUSE(r13) /* is the host using the PMU? */ cmpwi r4, 0 @@ -2859,6 +2868,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) isync mtlr r0 23: blr +SYM_FUNC_END(kvmhv_load_host_pmu) /* * void kvmhv_save_guest_pmu(struct kvm_vcpu *vcpu, bool pmu_in_use) @@ -2866,7 +2876,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) * Save guest PMU state into the vcpu struct. * r3 = vcpu, r4 = full save flag (PMU in use flag set in VPA) */ -kvmhv_save_guest_pmu: +SYM_FUNC_START_LOCAL(kvmhv_save_guest_pmu) mr r9, r3 mr r8, r4 BEGIN_FTR_SECTION @@ -2942,6 +2952,7 @@ BEGIN_FTR_SECTION mtspr SPRN_MMCRS, r4 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 22: blr +SYM_FUNC_END(kvmhv_save_guest_pmu) /* * This works around a hardware bug on POWER8E processors, where diff --git a/arch/powerpc/kvm/book3s_hv_uvmem.c b/arch/powerpc/kvm/book3s_hv_uvmem.c index e2f11f9c3f2aa691bd4b1a130d06d38d989ecc5c..1d67baa5557a2ddd0c80e09ad8186fb7f0c2dea8 100644 --- a/arch/powerpc/kvm/book3s_hv_uvmem.c +++ b/arch/powerpc/kvm/book3s_hv_uvmem.c @@ -1190,8 +1190,7 @@ int kvmppc_uvmem_init(void) pfn_first = res->start >> PAGE_SHIFT; pfn_last = pfn_first + (resource_size(res) >> PAGE_SHIFT); - kvmppc_uvmem_bitmap = kcalloc(BITS_TO_LONGS(pfn_last - pfn_first), - sizeof(unsigned long), GFP_KERNEL); + kvmppc_uvmem_bitmap = bitmap_zalloc(pfn_last - pfn_first, GFP_KERNEL); if (!kvmppc_uvmem_bitmap) { ret = -ENOMEM; goto out_unmap; @@ -1215,5 +1214,5 @@ void kvmppc_uvmem_free(void) memunmap_pages(&kvmppc_uvmem_pgmap); release_mem_region(kvmppc_uvmem_pgmap.range.start, range_len(&kvmppc_uvmem_pgmap.range)); - kfree(kvmppc_uvmem_bitmap); + bitmap_free(kvmppc_uvmem_bitmap); } diff --git a/arch/powerpc/kvm/book3s_xive.c b/arch/powerpc/kvm/book3s_xive.c index 4ca23644f75258d23812eb15ac2d8f77e319ef88..f4115819e738cc039dd8f9b4bebe45b7820818b9 100644 --- a/arch/powerpc/kvm/book3s_xive.c +++ b/arch/powerpc/kvm/book3s_xive.c @@ -539,7 +539,7 @@ static int xive_vm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr) if (irq == XICS_IPI || irq == 0) { /* * This barrier orders the setting of xc->cppr vs. - * subsquent test of xc->mfrr done inside + * subsequent test of xc->mfrr done inside * scan_interrupts and push_pending_to_hw */ smp_mb(); @@ -563,7 +563,7 @@ static int xive_vm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr) /* * This barrier orders both setting of in_eoi above vs, * subsequent test of guest_priority, and the setting - * of xc->cppr vs. subsquent test of xc->mfrr done inside + * of xc->cppr vs. subsequent test of xc->mfrr done inside * scan_interrupts and push_pending_to_hw */ smp_mb(); @@ -1785,8 +1785,7 @@ void kvmppc_xive_disable_vcpu_interrupts(struct kvm_vcpu *vcpu) * stale_p (because it has no easy way to address it). Hence we have * to adjust stale_p before shutting down the interrupt. */ -void xive_cleanup_single_escalation(struct kvm_vcpu *vcpu, - struct kvmppc_xive_vcpu *xc, int irq) +void xive_cleanup_single_escalation(struct kvm_vcpu *vcpu, int irq) { struct irq_data *d = irq_get_irq_data(irq); struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); @@ -1827,8 +1826,7 @@ void kvmppc_xive_cleanup_vcpu(struct kvm_vcpu *vcpu) for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) { if (xc->esc_virq[i]) { if (kvmppc_xive_has_single_escalation(xc->xive)) - xive_cleanup_single_escalation(vcpu, xc, - xc->esc_virq[i]); + xive_cleanup_single_escalation(vcpu, xc->esc_virq[i]); free_irq(xc->esc_virq[i], vcpu); irq_dispose_mapping(xc->esc_virq[i]); kfree(xc->esc_virq_names[i]); @@ -2392,7 +2390,7 @@ static int xive_set_source(struct kvmppc_xive *xive, long irq, u64 addr) /* * Now, we select a target if we have one. If we don't we * leave the interrupt untargetted. It means that an interrupt - * can become "untargetted" accross migration if it was masked + * can become "untargetted" across migration if it was masked * by set_xive() but there is little we can do about it. */ diff --git a/arch/powerpc/kvm/book3s_xive.h b/arch/powerpc/kvm/book3s_xive.h index 1e48f72e8aa5d3ed8351dd148bd5361ffc3cfbd5..62bf39f537839472f97634eedc7b7ddda0c0e7ce 100644 --- a/arch/powerpc/kvm/book3s_xive.h +++ b/arch/powerpc/kvm/book3s_xive.h @@ -299,8 +299,7 @@ int kvmppc_xive_select_target(struct kvm *kvm, u32 *server, u8 prio); int kvmppc_xive_attach_escalation(struct kvm_vcpu *vcpu, u8 prio, bool single_escalation); struct kvmppc_xive *kvmppc_xive_get_device(struct kvm *kvm, u32 type); -void xive_cleanup_single_escalation(struct kvm_vcpu *vcpu, - struct kvmppc_xive_vcpu *xc, int irq); +void xive_cleanup_single_escalation(struct kvm_vcpu *vcpu, int irq); int kvmppc_xive_compute_vp_id(struct kvmppc_xive *xive, u32 cpu, u32 *vp); int kvmppc_xive_set_nr_servers(struct kvmppc_xive *xive, u64 addr); bool kvmppc_xive_check_save_restore(struct kvm_vcpu *vcpu); diff --git a/arch/powerpc/kvm/book3s_xive_native.c b/arch/powerpc/kvm/book3s_xive_native.c index 5271c33fe79e4d5f6e83c5afa21a089bfec92c81..4f566bea5e10f8861690877820d081f39e910b4e 100644 --- a/arch/powerpc/kvm/book3s_xive_native.c +++ b/arch/powerpc/kvm/book3s_xive_native.c @@ -93,8 +93,7 @@ void kvmppc_xive_native_cleanup_vcpu(struct kvm_vcpu *vcpu) /* Free the escalation irq */ if (xc->esc_virq[i]) { if (kvmppc_xive_has_single_escalation(xc->xive)) - xive_cleanup_single_escalation(vcpu, xc, - xc->esc_virq[i]); + xive_cleanup_single_escalation(vcpu, xc->esc_virq[i]); free_irq(xc->esc_virq[i], vcpu); irq_dispose_mapping(xc->esc_virq[i]); kfree(xc->esc_virq_names[i]); diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c index 7b4920e9fd26304bcf47ee43dd746d162f8d63a8..0dce93ccaadfa23a81dde9ae98f8de92ee76f07a 100644 --- a/arch/powerpc/kvm/booke.c +++ b/arch/powerpc/kvm/booke.c @@ -1015,6 +1015,9 @@ int kvmppc_handle_exit(struct kvm_vcpu *vcpu, unsigned int exit_nr) u32 last_inst = KVM_INST_FETCH_FAILED; enum emulation_result emulated = EMULATE_DONE; + /* Fix irq state (pairs with kvmppc_fix_ee_before_entry()) */ + kvmppc_fix_ee_after_exit(); + /* update before a new last_exit_type is rewritten */ kvmppc_update_timing_stats(vcpu); diff --git a/arch/powerpc/kvm/bookehv_interrupts.S b/arch/powerpc/kvm/bookehv_interrupts.S index 8262c14fc9e63627b2aaf25bf9a47a120922baa7..b5fe6fb53c662d00c399dcc6b8669c69b076b32a 100644 --- a/arch/powerpc/kvm/bookehv_interrupts.S +++ b/arch/powerpc/kvm/bookehv_interrupts.S @@ -424,15 +424,6 @@ _GLOBAL(kvmppc_resume_host) mtspr SPRN_EPCR, r3 isync -#ifdef CONFIG_64BIT - /* - * We enter with interrupts disabled in hardware, but - * we need to call RECONCILE_IRQ_STATE to ensure - * that the software state is kept in sync. - */ - RECONCILE_IRQ_STATE(r3,r5) -#endif - /* Switch to kernel stack and jump to handler. */ mr r3, r4 mr r5, r14 /* intno */ diff --git a/arch/powerpc/kvm/fpu.S b/arch/powerpc/kvm/fpu.S index 315c94946bad144f9283b18f3ec2b47de7d1e774..b68e7f26a81f417357a15b9bb6f0eaecc6824440 100644 --- a/arch/powerpc/kvm/fpu.S +++ b/arch/powerpc/kvm/fpu.S @@ -6,6 +6,8 @@ */ #include +#include + #include #include #include @@ -110,18 +112,22 @@ FPS_THREE_IN(fsel) * R8 = (double*)¶m3 [load_three] * LR = instruction call function */ -fpd_load_three: +SYM_FUNC_START_LOCAL(fpd_load_three) lfd 2,0(r8) /* load param3 */ -fpd_load_two: +SYM_FUNC_START_LOCAL(fpd_load_two) lfd 1,0(r7) /* load param2 */ -fpd_load_one: +SYM_FUNC_START_LOCAL(fpd_load_one) lfd 0,0(r6) /* load param1 */ -fpd_load_none: +SYM_FUNC_START_LOCAL(fpd_load_none) lfd 3,0(r3) /* load up fpscr value */ MTFSF_L(3) lwz r6, 0(r4) /* load cr */ mtcr r6 blr +SYM_FUNC_END(fpd_load_none) +SYM_FUNC_END(fpd_load_one) +SYM_FUNC_END(fpd_load_two) +SYM_FUNC_END(fpd_load_three) /* * End of double instruction processing @@ -131,13 +137,14 @@ fpd_load_none: * R5 = (double*)&result * LR = caller of instruction call function */ -fpd_return: +SYM_FUNC_START_LOCAL(fpd_return) mfcr r6 stfd 0,0(r5) /* save result */ mffs 0 stfd 0,0(r3) /* save new fpscr value */ stw r6,0(r4) /* save new cr value */ blr +SYM_FUNC_END(fpd_return) /* * Double operation with no input operand diff --git a/arch/powerpc/kvm/irq.h b/arch/powerpc/kvm/irq.h deleted file mode 100644 index e6463f866abc24bdc1a1f1e04e3c0b6333fbdedc..0000000000000000000000000000000000000000 --- a/arch/powerpc/kvm/irq.h +++ /dev/null @@ -1,22 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef __IRQ_H -#define __IRQ_H - -#include - -static inline int irqchip_in_kernel(struct kvm *kvm) -{ - int ret = 0; - -#ifdef CONFIG_KVM_MPIC - ret = ret || (kvm->arch.mpic != NULL); -#endif -#ifdef CONFIG_KVM_XICS - ret = ret || (kvm->arch.xics != NULL); - ret = ret || (kvm->arch.xive != NULL); -#endif - smp_rmb(); - return ret; -} - -#endif diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index b850b0efa201a35bc49b4c24bffc56581c8a34d4..04494a4fb37a0ad4026dde26ec7b11e285137f1f 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c @@ -36,7 +36,6 @@ #include #include "timing.h" -#include "irq.h" #include "../mm/mmu_decl.h" #define CREATE_TRACE_POINTS @@ -2165,10 +2164,25 @@ static int kvm_vm_ioctl_get_pvinfo(struct kvm_ppc_pvinfo *pvinfo) return 0; } +bool kvm_arch_irqchip_in_kernel(struct kvm *kvm) +{ + int ret = 0; + +#ifdef CONFIG_KVM_MPIC + ret = ret || (kvm->arch.mpic != NULL); +#endif +#ifdef CONFIG_KVM_XICS + ret = ret || (kvm->arch.xics != NULL); + ret = ret || (kvm->arch.xive != NULL); +#endif + smp_rmb(); + return ret; +} + int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event, bool line_status) { - if (!irqchip_in_kernel(kvm)) + if (!kvm_arch_irqchip_in_kernel(kvm)) return -ENXIO; irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile index 8560c912186dfcc84cea889442359571ef9e2e22..4de71cbf6e8ee56efc95694dc3ff252403452f8c 100644 --- a/arch/powerpc/lib/Makefile +++ b/arch/powerpc/lib/Makefile @@ -52,7 +52,9 @@ obj-$(CONFIG_PPC_BOOK3S_64) += copyuser_power7.o copypage_power7.o \ obj64-y += copypage_64.o copyuser_64.o mem_64.o hweight_64.o \ memcpy_64.o copy_mc_64.o -ifndef CONFIG_PPC_QUEUED_SPINLOCKS +ifdef CONFIG_PPC_QUEUED_SPINLOCKS +obj-$(CONFIG_SMP) += qspinlock.o +else obj64-$(CONFIG_SMP) += locks.o endif diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c index ad0cf3108dd09c78c5fb50d1395253ef5d43d6f6..b00112d7ad467d30712d168538a418bac4d5c60c 100644 --- a/arch/powerpc/lib/code-patching.c +++ b/arch/powerpc/lib/code-patching.c @@ -4,12 +4,17 @@ */ #include +#include +#include #include #include #include #include #include +#include +#include +#include #include #include #include @@ -41,12 +46,59 @@ int raw_patch_instruction(u32 *addr, ppc_inst_t instr) return __patch_instruction(addr, instr, addr); } -#ifdef CONFIG_STRICT_KERNEL_RWX -static DEFINE_PER_CPU(struct vm_struct *, text_poke_area); +struct patch_context { + union { + struct vm_struct *area; + struct mm_struct *mm; + }; + unsigned long addr; + pte_t *pte; +}; + +static DEFINE_PER_CPU(struct patch_context, cpu_patching_context); static int map_patch_area(void *addr, unsigned long text_poke_addr); static void unmap_patch_area(unsigned long addr); +static bool mm_patch_enabled(void) +{ + return IS_ENABLED(CONFIG_SMP) && radix_enabled(); +} + +/* + * The following applies for Radix MMU. Hash MMU has different requirements, + * and so is not supported. + * + * Changing mm requires context synchronising instructions on both sides of + * the context switch, as well as a hwsync between the last instruction for + * which the address of an associated storage access was translated using + * the current context. + * + * switch_mm_irqs_off() performs an isync after the context switch. It is + * the responsibility of the caller to perform the CSI and hwsync before + * starting/stopping the temp mm. + */ +static struct mm_struct *start_using_temp_mm(struct mm_struct *temp_mm) +{ + struct mm_struct *orig_mm = current->active_mm; + + lockdep_assert_irqs_disabled(); + switch_mm_irqs_off(orig_mm, temp_mm, current); + + WARN_ON(!mm_is_thread_local(temp_mm)); + + suspend_breakpoints(); + return orig_mm; +} + +static void stop_using_temp_mm(struct mm_struct *temp_mm, + struct mm_struct *orig_mm) +{ + lockdep_assert_irqs_disabled(); + switch_mm_irqs_off(temp_mm, orig_mm, current); + restore_breakpoints(); +} + static int text_area_cpu_up(unsigned int cpu) { struct vm_struct *area; @@ -68,29 +120,108 @@ static int text_area_cpu_up(unsigned int cpu) unmap_patch_area(addr); - this_cpu_write(text_poke_area, area); + this_cpu_write(cpu_patching_context.area, area); + this_cpu_write(cpu_patching_context.addr, addr); + this_cpu_write(cpu_patching_context.pte, virt_to_kpte(addr)); return 0; } static int text_area_cpu_down(unsigned int cpu) { - free_vm_area(this_cpu_read(text_poke_area)); + free_vm_area(this_cpu_read(cpu_patching_context.area)); + this_cpu_write(cpu_patching_context.area, NULL); + this_cpu_write(cpu_patching_context.addr, 0); + this_cpu_write(cpu_patching_context.pte, NULL); + return 0; +} + +static void put_patching_mm(struct mm_struct *mm, unsigned long patching_addr) +{ + struct mmu_gather tlb; + + tlb_gather_mmu(&tlb, mm); + free_pgd_range(&tlb, patching_addr, patching_addr + PAGE_SIZE, 0, 0); + mmput(mm); +} + +static int text_area_cpu_up_mm(unsigned int cpu) +{ + struct mm_struct *mm; + unsigned long addr; + pte_t *pte; + spinlock_t *ptl; + + mm = mm_alloc(); + if (WARN_ON(!mm)) + goto fail_no_mm; + + /* + * Choose a random page-aligned address from the interval + * [PAGE_SIZE .. DEFAULT_MAP_WINDOW - PAGE_SIZE]. + * The lower address bound is PAGE_SIZE to avoid the zero-page. + */ + addr = (1 + (get_random_long() % (DEFAULT_MAP_WINDOW / PAGE_SIZE - 2))) << PAGE_SHIFT; + + /* + * PTE allocation uses GFP_KERNEL which means we need to + * pre-allocate the PTE here because we cannot do the + * allocation during patching when IRQs are disabled. + * + * Using get_locked_pte() to avoid open coding, the lock + * is unnecessary. + */ + pte = get_locked_pte(mm, addr, &ptl); + if (!pte) + goto fail_no_pte; + pte_unmap_unlock(pte, ptl); + + this_cpu_write(cpu_patching_context.mm, mm); + this_cpu_write(cpu_patching_context.addr, addr); + + return 0; + +fail_no_pte: + put_patching_mm(mm, addr); +fail_no_mm: + return -ENOMEM; +} + +static int text_area_cpu_down_mm(unsigned int cpu) +{ + put_patching_mm(this_cpu_read(cpu_patching_context.mm), + this_cpu_read(cpu_patching_context.addr)); + + this_cpu_write(cpu_patching_context.mm, NULL); + this_cpu_write(cpu_patching_context.addr, 0); + return 0; } static __ro_after_init DEFINE_STATIC_KEY_FALSE(poking_init_done); -/* - * Although BUG_ON() is rude, in this case it should only happen if ENOMEM, and - * we judge it as being preferable to a kernel that will crash later when - * someone tries to use patch_instruction(). - */ void __init poking_init(void) { - BUG_ON(!cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, - "powerpc/text_poke:online", text_area_cpu_up, - text_area_cpu_down)); + int ret; + + if (!IS_ENABLED(CONFIG_STRICT_KERNEL_RWX)) + return; + + if (mm_patch_enabled()) + ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, + "powerpc/text_poke_mm:online", + text_area_cpu_up_mm, + text_area_cpu_down_mm); + else + ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, + "powerpc/text_poke:online", + text_area_cpu_up, + text_area_cpu_down); + + /* cpuhp_setup_state returns >= 0 on success */ + if (WARN_ON(ret < 0)) + return; + static_branch_enable(&poking_init_done); } @@ -147,6 +278,56 @@ static void unmap_patch_area(unsigned long addr) flush_tlb_kernel_range(addr, addr + PAGE_SIZE); } +static int __do_patch_instruction_mm(u32 *addr, ppc_inst_t instr) +{ + int err; + u32 *patch_addr; + unsigned long text_poke_addr; + pte_t *pte; + unsigned long pfn = get_patch_pfn(addr); + struct mm_struct *patching_mm; + struct mm_struct *orig_mm; + spinlock_t *ptl; + + patching_mm = __this_cpu_read(cpu_patching_context.mm); + text_poke_addr = __this_cpu_read(cpu_patching_context.addr); + patch_addr = (u32 *)(text_poke_addr + offset_in_page(addr)); + + pte = get_locked_pte(patching_mm, text_poke_addr, &ptl); + if (!pte) + return -ENOMEM; + + __set_pte_at(patching_mm, text_poke_addr, pte, pfn_pte(pfn, PAGE_KERNEL), 0); + + /* order PTE update before use, also serves as the hwsync */ + asm volatile("ptesync": : :"memory"); + + /* order context switch after arbitrary prior code */ + isync(); + + orig_mm = start_using_temp_mm(patching_mm); + + err = __patch_instruction(addr, instr, patch_addr); + + /* hwsync performed by __patch_instruction (sync) if successful */ + if (err) + mb(); /* sync */ + + /* context synchronisation performed by __patch_instruction (isync or exception) */ + stop_using_temp_mm(patching_mm, orig_mm); + + pte_clear(patching_mm, text_poke_addr, pte); + /* + * ptesync to order PTE update before TLB invalidation done + * by radix__local_flush_tlb_page_psize (in _tlbiel_va) + */ + local_flush_tlb_page_psize(patching_mm, text_poke_addr, mmu_virtual_psize); + + pte_unmap_unlock(pte, ptl); + + return err; +} + static int __do_patch_instruction(u32 *addr, ppc_inst_t instr) { int err; @@ -155,10 +336,10 @@ static int __do_patch_instruction(u32 *addr, ppc_inst_t instr) pte_t *pte; unsigned long pfn = get_patch_pfn(addr); - text_poke_addr = (unsigned long)__this_cpu_read(text_poke_area)->addr & PAGE_MASK; + text_poke_addr = (unsigned long)__this_cpu_read(cpu_patching_context.addr) & PAGE_MASK; patch_addr = (u32 *)(text_poke_addr + offset_in_page(addr)); - pte = virt_to_kpte(text_poke_addr); + pte = __this_cpu_read(cpu_patching_context.pte); __set_pte_at(&init_mm, text_poke_addr, pte, pfn_pte(pfn, PAGE_KERNEL), 0); /* See ptesync comment in radix__set_pte_at() */ if (radix_enabled()) @@ -172,7 +353,7 @@ static int __do_patch_instruction(u32 *addr, ppc_inst_t instr) return err; } -static int do_patch_instruction(u32 *addr, ppc_inst_t instr) +int patch_instruction(u32 *addr, ppc_inst_t instr) { int err; unsigned long flags; @@ -182,34 +363,19 @@ static int do_patch_instruction(u32 *addr, ppc_inst_t instr) * when text_poke_area is not ready, but we still need * to allow patching. We just do the plain old patching */ - if (!static_branch_likely(&poking_init_done)) + if (!IS_ENABLED(CONFIG_STRICT_KERNEL_RWX) || + !static_branch_likely(&poking_init_done)) return raw_patch_instruction(addr, instr); local_irq_save(flags); - err = __do_patch_instruction(addr, instr); + if (mm_patch_enabled()) + err = __do_patch_instruction_mm(addr, instr); + else + err = __do_patch_instruction(addr, instr); local_irq_restore(flags); return err; } -#else /* !CONFIG_STRICT_KERNEL_RWX */ - -static int do_patch_instruction(u32 *addr, ppc_inst_t instr) -{ - return raw_patch_instruction(addr, instr); -} - -#endif /* CONFIG_STRICT_KERNEL_RWX */ - -__ro_after_init DEFINE_STATIC_KEY_FALSE(init_mem_is_free); - -int patch_instruction(u32 *addr, ppc_inst_t instr) -{ - /* Make sure we aren't patching a freed init section */ - if (static_branch_likely(&init_mem_is_free) && init_section_contains(addr, 4)) - return 0; - - return do_patch_instruction(addr, instr); -} NOKPROBE_SYMBOL(patch_instruction); int patch_branch(u32 *addr, unsigned long target, int flags) diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c index 31f40f544de547b826a9287d924376efa93a2f52..80def1c2afcb68ed93fd7fcc3685380dc1d8026f 100644 --- a/arch/powerpc/lib/feature-fixups.c +++ b/arch/powerpc/lib/feature-fixups.c @@ -117,10 +117,64 @@ void do_feature_fixups(unsigned long value, void *fixup_start, void *fixup_end) } } +#ifdef CONFIG_PPC_BARRIER_NOSPEC +static bool is_fixup_addr_valid(void *dest, size_t size) +{ + return system_state < SYSTEM_FREEING_INITMEM || + !init_section_contains(dest, size); +} + +static int do_patch_fixups(long *start, long *end, unsigned int *instrs, int num) +{ + int i; + + for (i = 0; start < end; start++, i++) { + int j; + unsigned int *dest = (void *)start + *start; + + if (!is_fixup_addr_valid(dest, sizeof(*instrs) * num)) + continue; + + pr_devel("patching dest %lx\n", (unsigned long)dest); + + for (j = 0; j < num; j++) + patch_instruction(dest + j, ppc_inst(instrs[j])); + } + return i; +} +#endif + #ifdef CONFIG_PPC_BOOK3S_64 +static int do_patch_entry_fixups(long *start, long *end, unsigned int *instrs, + bool do_fallback, void *fallback) +{ + int i; + + for (i = 0; start < end; start++, i++) { + unsigned int *dest = (void *)start + *start; + + if (!is_fixup_addr_valid(dest, sizeof(*instrs) * 3)) + continue; + + pr_devel("patching dest %lx\n", (unsigned long)dest); + + // See comment in do_entry_flush_fixups() RE order of patching + if (do_fallback) { + patch_instruction(dest, ppc_inst(instrs[0])); + patch_instruction(dest + 2, ppc_inst(instrs[2])); + patch_branch(dest + 1, (unsigned long)fallback, BRANCH_SET_LINK); + } else { + patch_instruction(dest + 1, ppc_inst(instrs[1])); + patch_instruction(dest + 2, ppc_inst(instrs[2])); + patch_instruction(dest, ppc_inst(instrs[0])); + } + } + return i; +} + static void do_stf_entry_barrier_fixups(enum stf_barrier_type types) { - unsigned int instrs[3], *dest; + unsigned int instrs[3]; long *start, *end; int i; @@ -144,23 +198,8 @@ static void do_stf_entry_barrier_fixups(enum stf_barrier_type types) instrs[i++] = PPC_RAW_ORI(_R31, _R31, 0); /* speculation barrier */ } - for (i = 0; start < end; start++, i++) { - dest = (void *)start + *start; - - pr_devel("patching dest %lx\n", (unsigned long)dest); - - // See comment in do_entry_flush_fixups() RE order of patching - if (types & STF_BARRIER_FALLBACK) { - patch_instruction(dest, ppc_inst(instrs[0])); - patch_instruction(dest + 2, ppc_inst(instrs[2])); - patch_branch(dest + 1, - (unsigned long)&stf_barrier_fallback, BRANCH_SET_LINK); - } else { - patch_instruction(dest + 1, ppc_inst(instrs[1])); - patch_instruction(dest + 2, ppc_inst(instrs[2])); - patch_instruction(dest, ppc_inst(instrs[0])); - } - } + i = do_patch_entry_fixups(start, end, instrs, types & STF_BARRIER_FALLBACK, + &stf_barrier_fallback); printk(KERN_DEBUG "stf-barrier: patched %d entry locations (%s barrier)\n", i, (types == STF_BARRIER_NONE) ? "no" : @@ -172,7 +211,7 @@ static void do_stf_entry_barrier_fixups(enum stf_barrier_type types) static void do_stf_exit_barrier_fixups(enum stf_barrier_type types) { - unsigned int instrs[6], *dest; + unsigned int instrs[6]; long *start, *end; int i; @@ -206,18 +245,8 @@ static void do_stf_exit_barrier_fixups(enum stf_barrier_type types) instrs[i++] = PPC_RAW_EIEIO() | 0x02000000; /* eieio + bit 6 hint */ } - for (i = 0; start < end; start++, i++) { - dest = (void *)start + *start; + i = do_patch_fixups(start, end, instrs, ARRAY_SIZE(instrs)); - pr_devel("patching dest %lx\n", (unsigned long)dest); - - patch_instruction(dest, ppc_inst(instrs[0])); - patch_instruction(dest + 1, ppc_inst(instrs[1])); - patch_instruction(dest + 2, ppc_inst(instrs[2])); - patch_instruction(dest + 3, ppc_inst(instrs[3])); - patch_instruction(dest + 4, ppc_inst(instrs[4])); - patch_instruction(dest + 5, ppc_inst(instrs[5])); - } printk(KERN_DEBUG "stf-barrier: patched %d exit locations (%s barrier)\n", i, (types == STF_BARRIER_NONE) ? "no" : (types == STF_BARRIER_FALLBACK) ? "fallback" : @@ -274,7 +303,7 @@ void do_stf_barrier_fixups(enum stf_barrier_type types) void do_uaccess_flush_fixups(enum l1d_flush_type types) { - unsigned int instrs[4], *dest; + unsigned int instrs[4]; long *start, *end; int i; @@ -300,17 +329,7 @@ void do_uaccess_flush_fixups(enum l1d_flush_type types) if (types & L1D_FLUSH_MTTRIG) instrs[i++] = PPC_RAW_MTSPR(SPRN_TRIG2, _R0); - for (i = 0; start < end; start++, i++) { - dest = (void *)start + *start; - - pr_devel("patching dest %lx\n", (unsigned long)dest); - - patch_instruction(dest, ppc_inst(instrs[0])); - - patch_instruction(dest + 1, ppc_inst(instrs[1])); - patch_instruction(dest + 2, ppc_inst(instrs[2])); - patch_instruction(dest + 3, ppc_inst(instrs[3])); - } + i = do_patch_fixups(start, end, instrs, ARRAY_SIZE(instrs)); printk(KERN_DEBUG "uaccess-flush: patched %d locations (%s flush)\n", i, (types == L1D_FLUSH_NONE) ? "no" : @@ -325,7 +344,7 @@ void do_uaccess_flush_fixups(enum l1d_flush_type types) static int __do_entry_flush_fixups(void *data) { enum l1d_flush_type types = *(enum l1d_flush_type *)data; - unsigned int instrs[3], *dest; + unsigned int instrs[3]; long *start, *end; int i; @@ -375,42 +394,13 @@ static int __do_entry_flush_fixups(void *data) start = PTRRELOC(&__start___entry_flush_fixup); end = PTRRELOC(&__stop___entry_flush_fixup); - for (i = 0; start < end; start++, i++) { - dest = (void *)start + *start; - - pr_devel("patching dest %lx\n", (unsigned long)dest); - - if (types == L1D_FLUSH_FALLBACK) { - patch_instruction(dest, ppc_inst(instrs[0])); - patch_instruction(dest + 2, ppc_inst(instrs[2])); - patch_branch(dest + 1, - (unsigned long)&entry_flush_fallback, BRANCH_SET_LINK); - } else { - patch_instruction(dest + 1, ppc_inst(instrs[1])); - patch_instruction(dest + 2, ppc_inst(instrs[2])); - patch_instruction(dest, ppc_inst(instrs[0])); - } - } + i = do_patch_entry_fixups(start, end, instrs, types == L1D_FLUSH_FALLBACK, + &entry_flush_fallback); start = PTRRELOC(&__start___scv_entry_flush_fixup); end = PTRRELOC(&__stop___scv_entry_flush_fixup); - for (; start < end; start++, i++) { - dest = (void *)start + *start; - - pr_devel("patching dest %lx\n", (unsigned long)dest); - - if (types == L1D_FLUSH_FALLBACK) { - patch_instruction(dest, ppc_inst(instrs[0])); - patch_instruction(dest + 2, ppc_inst(instrs[2])); - patch_branch(dest + 1, - (unsigned long)&scv_entry_flush_fallback, BRANCH_SET_LINK); - } else { - patch_instruction(dest + 1, ppc_inst(instrs[1])); - patch_instruction(dest + 2, ppc_inst(instrs[2])); - patch_instruction(dest, ppc_inst(instrs[0])); - } - } - + i += do_patch_entry_fixups(start, end, instrs, types == L1D_FLUSH_FALLBACK, + &scv_entry_flush_fallback); printk(KERN_DEBUG "entry-flush: patched %d locations (%s flush)\n", i, (types == L1D_FLUSH_NONE) ? "no" : @@ -438,7 +428,7 @@ void do_entry_flush_fixups(enum l1d_flush_type types) static int __do_rfi_flush_fixups(void *data) { enum l1d_flush_type types = *(enum l1d_flush_type *)data; - unsigned int instrs[3], *dest; + unsigned int instrs[3]; long *start, *end; int i; @@ -462,15 +452,7 @@ static int __do_rfi_flush_fixups(void *data) if (types & L1D_FLUSH_MTTRIG) instrs[i++] = PPC_RAW_MTSPR(SPRN_TRIG2, _R0); - for (i = 0; start < end; start++, i++) { - dest = (void *)start + *start; - - pr_devel("patching dest %lx\n", (unsigned long)dest); - - patch_instruction(dest, ppc_inst(instrs[0])); - patch_instruction(dest + 1, ppc_inst(instrs[1])); - patch_instruction(dest + 2, ppc_inst(instrs[2])); - } + i = do_patch_fixups(start, end, instrs, ARRAY_SIZE(instrs)); printk(KERN_DEBUG "rfi-flush: patched %d locations (%s flush)\n", i, (types == L1D_FLUSH_NONE) ? "no" : @@ -512,7 +494,7 @@ void do_rfi_flush_fixups(enum l1d_flush_type types) void do_barrier_nospec_fixups_range(bool enable, void *fixup_start, void *fixup_end) { - unsigned int instr, *dest; + unsigned int instr; long *start, *end; int i; @@ -526,12 +508,7 @@ void do_barrier_nospec_fixups_range(bool enable, void *fixup_start, void *fixup_ instr = PPC_RAW_ORI(_R31, _R31, 0); /* speculation barrier */ } - for (i = 0; start < end; start++, i++) { - dest = (void *)start + *start; - - pr_devel("patching dest %lx\n", (unsigned long)dest); - patch_instruction(dest, ppc_inst(instr)); - } + i = do_patch_fixups(start, end, &instr, 1); printk(KERN_DEBUG "barrier-nospec: patched %d locations\n", i); } @@ -553,7 +530,7 @@ void do_barrier_nospec_fixups(bool enable) #ifdef CONFIG_PPC_E500 void do_barrier_nospec_fixups_range(bool enable, void *fixup_start, void *fixup_end) { - unsigned int instr[2], *dest; + unsigned int instr[2]; long *start, *end; int i; @@ -569,13 +546,7 @@ void do_barrier_nospec_fixups_range(bool enable, void *fixup_start, void *fixup_ instr[1] = PPC_RAW_SYNC(); } - for (i = 0; start < end; start++, i++) { - dest = (void *)start + *start; - - pr_devel("patching dest %lx\n", (unsigned long)dest); - patch_instruction(dest, ppc_inst(instr[0])); - patch_instruction(dest + 1, ppc_inst(instr[1])); - } + i = do_patch_fixups(start, end, instr, ARRAY_SIZE(instr)); printk(KERN_DEBUG "barrier-nospec: patched %d locations\n", i); } diff --git a/arch/powerpc/lib/qspinlock.c b/arch/powerpc/lib/qspinlock.c new file mode 100644 index 0000000000000000000000000000000000000000..e4bd145255d0010df9e87737e4c3a12f0d8282fb --- /dev/null +++ b/arch/powerpc/lib/qspinlock.c @@ -0,0 +1,997 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define MAX_NODES 4 + +struct qnode { + struct qnode *next; + struct qspinlock *lock; + int cpu; + int yield_cpu; + u8 locked; /* 1 if lock acquired */ +}; + +struct qnodes { + int count; + struct qnode nodes[MAX_NODES]; +}; + +/* Tuning parameters */ +static int steal_spins __read_mostly = (1 << 5); +static int remote_steal_spins __read_mostly = (1 << 2); +#if _Q_SPIN_TRY_LOCK_STEAL == 1 +static const bool maybe_stealers = true; +#else +static bool maybe_stealers __read_mostly = true; +#endif +static int head_spins __read_mostly = (1 << 8); + +static bool pv_yield_owner __read_mostly = true; +static bool pv_yield_allow_steal __read_mostly = false; +static bool pv_spin_on_preempted_owner __read_mostly = false; +static bool pv_sleepy_lock __read_mostly = true; +static bool pv_sleepy_lock_sticky __read_mostly = false; +static u64 pv_sleepy_lock_interval_ns __read_mostly = 0; +static int pv_sleepy_lock_factor __read_mostly = 256; +static bool pv_yield_prev __read_mostly = true; +static bool pv_yield_propagate_owner __read_mostly = true; +static bool pv_prod_head __read_mostly = false; + +static DEFINE_PER_CPU_ALIGNED(struct qnodes, qnodes); +static DEFINE_PER_CPU_ALIGNED(u64, sleepy_lock_seen_clock); + +#if _Q_SPIN_SPEC_BARRIER == 1 +#define spec_barrier() do { asm volatile("ori 31,31,0" ::: "memory"); } while (0) +#else +#define spec_barrier() do { } while (0) +#endif + +static __always_inline bool recently_sleepy(void) +{ + /* pv_sleepy_lock is true when this is called */ + if (pv_sleepy_lock_interval_ns) { + u64 seen = this_cpu_read(sleepy_lock_seen_clock); + + if (seen) { + u64 delta = sched_clock() - seen; + if (delta < pv_sleepy_lock_interval_ns) + return true; + this_cpu_write(sleepy_lock_seen_clock, 0); + } + } + + return false; +} + +static __always_inline int get_steal_spins(bool paravirt, bool sleepy) +{ + if (paravirt && sleepy) + return steal_spins * pv_sleepy_lock_factor; + else + return steal_spins; +} + +static __always_inline int get_remote_steal_spins(bool paravirt, bool sleepy) +{ + if (paravirt && sleepy) + return remote_steal_spins * pv_sleepy_lock_factor; + else + return remote_steal_spins; +} + +static __always_inline int get_head_spins(bool paravirt, bool sleepy) +{ + if (paravirt && sleepy) + return head_spins * pv_sleepy_lock_factor; + else + return head_spins; +} + +static inline u32 encode_tail_cpu(int cpu) +{ + return (cpu + 1) << _Q_TAIL_CPU_OFFSET; +} + +static inline int decode_tail_cpu(u32 val) +{ + return (val >> _Q_TAIL_CPU_OFFSET) - 1; +} + +static inline int get_owner_cpu(u32 val) +{ + return (val & _Q_OWNER_CPU_MASK) >> _Q_OWNER_CPU_OFFSET; +} + +/* + * Try to acquire the lock if it was not already locked. If the tail matches + * mytail then clear it, otherwise leave it unchnaged. Return previous value. + * + * This is used by the head of the queue to acquire the lock and clean up + * its tail if it was the last one queued. + */ +static __always_inline u32 trylock_clean_tail(struct qspinlock *lock, u32 tail) +{ + u32 newval = queued_spin_encode_locked_val(); + u32 prev, tmp; + + asm volatile( +"1: lwarx %0,0,%2,%7 # trylock_clean_tail \n" + /* This test is necessary if there could be stealers */ +" andi. %1,%0,%5 \n" +" bne 3f \n" + /* Test whether the lock tail == mytail */ +" and %1,%0,%6 \n" +" cmpw 0,%1,%3 \n" + /* Merge the new locked value */ +" or %1,%1,%4 \n" +" bne 2f \n" + /* If the lock tail matched, then clear it, otherwise leave it. */ +" andc %1,%1,%6 \n" +"2: stwcx. %1,0,%2 \n" +" bne- 1b \n" +"\t" PPC_ACQUIRE_BARRIER " \n" +"3: \n" + : "=&r" (prev), "=&r" (tmp) + : "r" (&lock->val), "r"(tail), "r" (newval), + "i" (_Q_LOCKED_VAL), + "r" (_Q_TAIL_CPU_MASK), + "i" (_Q_SPIN_EH_HINT) + : "cr0", "memory"); + + return prev; +} + +/* + * Publish our tail, replacing previous tail. Return previous value. + * + * This provides a release barrier for publishing node, this pairs with the + * acquire barrier in get_tail_qnode() when the next CPU finds this tail + * value. + */ +static __always_inline u32 publish_tail_cpu(struct qspinlock *lock, u32 tail) +{ + u32 prev, tmp; + + asm volatile( +"\t" PPC_RELEASE_BARRIER " \n" +"1: lwarx %0,0,%2 # publish_tail_cpu \n" +" andc %1,%0,%4 \n" +" or %1,%1,%3 \n" +" stwcx. %1,0,%2 \n" +" bne- 1b \n" + : "=&r" (prev), "=&r"(tmp) + : "r" (&lock->val), "r" (tail), "r"(_Q_TAIL_CPU_MASK) + : "cr0", "memory"); + + return prev; +} + +static __always_inline u32 set_mustq(struct qspinlock *lock) +{ + u32 prev; + + asm volatile( +"1: lwarx %0,0,%1 # set_mustq \n" +" or %0,%0,%2 \n" +" stwcx. %0,0,%1 \n" +" bne- 1b \n" + : "=&r" (prev) + : "r" (&lock->val), "r" (_Q_MUST_Q_VAL) + : "cr0", "memory"); + + return prev; +} + +static __always_inline u32 clear_mustq(struct qspinlock *lock) +{ + u32 prev; + + asm volatile( +"1: lwarx %0,0,%1 # clear_mustq \n" +" andc %0,%0,%2 \n" +" stwcx. %0,0,%1 \n" +" bne- 1b \n" + : "=&r" (prev) + : "r" (&lock->val), "r" (_Q_MUST_Q_VAL) + : "cr0", "memory"); + + return prev; +} + +static __always_inline bool try_set_sleepy(struct qspinlock *lock, u32 old) +{ + u32 prev; + u32 new = old | _Q_SLEEPY_VAL; + + BUG_ON(!(old & _Q_LOCKED_VAL)); + BUG_ON(old & _Q_SLEEPY_VAL); + + asm volatile( +"1: lwarx %0,0,%1 # try_set_sleepy \n" +" cmpw 0,%0,%2 \n" +" bne- 2f \n" +" stwcx. %3,0,%1 \n" +" bne- 1b \n" +"2: \n" + : "=&r" (prev) + : "r" (&lock->val), "r"(old), "r" (new) + : "cr0", "memory"); + + return likely(prev == old); +} + +static __always_inline void seen_sleepy_owner(struct qspinlock *lock, u32 val) +{ + if (pv_sleepy_lock) { + if (pv_sleepy_lock_interval_ns) + this_cpu_write(sleepy_lock_seen_clock, sched_clock()); + if (!(val & _Q_SLEEPY_VAL)) + try_set_sleepy(lock, val); + } +} + +static __always_inline void seen_sleepy_lock(void) +{ + if (pv_sleepy_lock && pv_sleepy_lock_interval_ns) + this_cpu_write(sleepy_lock_seen_clock, sched_clock()); +} + +static __always_inline void seen_sleepy_node(struct qspinlock *lock, u32 val) +{ + if (pv_sleepy_lock) { + if (pv_sleepy_lock_interval_ns) + this_cpu_write(sleepy_lock_seen_clock, sched_clock()); + if (val & _Q_LOCKED_VAL) { + if (!(val & _Q_SLEEPY_VAL)) + try_set_sleepy(lock, val); + } + } +} + +static struct qnode *get_tail_qnode(struct qspinlock *lock, u32 val) +{ + int cpu = decode_tail_cpu(val); + struct qnodes *qnodesp = per_cpu_ptr(&qnodes, cpu); + int idx; + + /* + * After publishing the new tail and finding a previous tail in the + * previous val (which is the control dependency), this barrier + * orders the release barrier in publish_tail_cpu performed by the + * last CPU, with subsequently looking at its qnode structures + * after the barrier. + */ + smp_acquire__after_ctrl_dep(); + + for (idx = 0; idx < MAX_NODES; idx++) { + struct qnode *qnode = &qnodesp->nodes[idx]; + if (qnode->lock == lock) + return qnode; + } + + BUG(); +} + +/* Called inside spin_begin(). Returns whether or not the vCPU was preempted. */ +static __always_inline bool __yield_to_locked_owner(struct qspinlock *lock, u32 val, bool paravirt, bool mustq) +{ + int owner; + u32 yield_count; + bool preempted = false; + + BUG_ON(!(val & _Q_LOCKED_VAL)); + + if (!paravirt) + goto relax; + + if (!pv_yield_owner) + goto relax; + + owner = get_owner_cpu(val); + yield_count = yield_count_of(owner); + + if ((yield_count & 1) == 0) + goto relax; /* owner vcpu is running */ + + spin_end(); + + seen_sleepy_owner(lock, val); + preempted = true; + + /* + * Read the lock word after sampling the yield count. On the other side + * there may a wmb because the yield count update is done by the + * hypervisor preemption and the value update by the OS, however this + * ordering might reduce the chance of out of order accesses and + * improve the heuristic. + */ + smp_rmb(); + + if (READ_ONCE(lock->val) == val) { + if (mustq) + clear_mustq(lock); + yield_to_preempted(owner, yield_count); + if (mustq) + set_mustq(lock); + spin_begin(); + + /* Don't relax if we yielded. Maybe we should? */ + return preempted; + } + spin_begin(); +relax: + spin_cpu_relax(); + + return preempted; +} + +/* Called inside spin_begin(). Returns whether or not the vCPU was preempted. */ +static __always_inline bool yield_to_locked_owner(struct qspinlock *lock, u32 val, bool paravirt) +{ + return __yield_to_locked_owner(lock, val, paravirt, false); +} + +/* Called inside spin_begin(). Returns whether or not the vCPU was preempted. */ +static __always_inline bool yield_head_to_locked_owner(struct qspinlock *lock, u32 val, bool paravirt) +{ + bool mustq = false; + + if ((val & _Q_MUST_Q_VAL) && pv_yield_allow_steal) + mustq = true; + + return __yield_to_locked_owner(lock, val, paravirt, mustq); +} + +static __always_inline void propagate_yield_cpu(struct qnode *node, u32 val, int *set_yield_cpu, bool paravirt) +{ + struct qnode *next; + int owner; + + if (!paravirt) + return; + if (!pv_yield_propagate_owner) + return; + + owner = get_owner_cpu(val); + if (*set_yield_cpu == owner) + return; + + next = READ_ONCE(node->next); + if (!next) + return; + + if (vcpu_is_preempted(owner)) { + next->yield_cpu = owner; + *set_yield_cpu = owner; + } else if (*set_yield_cpu != -1) { + next->yield_cpu = owner; + *set_yield_cpu = owner; + } +} + +/* Called inside spin_begin() */ +static __always_inline bool yield_to_prev(struct qspinlock *lock, struct qnode *node, u32 val, bool paravirt) +{ + int prev_cpu = decode_tail_cpu(val); + u32 yield_count; + int yield_cpu; + bool preempted = false; + + if (!paravirt) + goto relax; + + if (!pv_yield_propagate_owner) + goto yield_prev; + + yield_cpu = READ_ONCE(node->yield_cpu); + if (yield_cpu == -1) { + /* Propagate back the -1 CPU */ + if (node->next && node->next->yield_cpu != -1) + node->next->yield_cpu = yield_cpu; + goto yield_prev; + } + + yield_count = yield_count_of(yield_cpu); + if ((yield_count & 1) == 0) + goto yield_prev; /* owner vcpu is running */ + + spin_end(); + + preempted = true; + seen_sleepy_node(lock, val); + + smp_rmb(); + + if (yield_cpu == node->yield_cpu) { + if (node->next && node->next->yield_cpu != yield_cpu) + node->next->yield_cpu = yield_cpu; + yield_to_preempted(yield_cpu, yield_count); + spin_begin(); + return preempted; + } + spin_begin(); + +yield_prev: + if (!pv_yield_prev) + goto relax; + + yield_count = yield_count_of(prev_cpu); + if ((yield_count & 1) == 0) + goto relax; /* owner vcpu is running */ + + spin_end(); + + preempted = true; + seen_sleepy_node(lock, val); + + smp_rmb(); /* See __yield_to_locked_owner comment */ + + if (!node->locked) { + yield_to_preempted(prev_cpu, yield_count); + spin_begin(); + return preempted; + } + spin_begin(); + +relax: + spin_cpu_relax(); + + return preempted; +} + +static __always_inline bool steal_break(u32 val, int iters, bool paravirt, bool sleepy) +{ + if (iters >= get_steal_spins(paravirt, sleepy)) + return true; + + if (IS_ENABLED(CONFIG_NUMA) && + (iters >= get_remote_steal_spins(paravirt, sleepy))) { + int cpu = get_owner_cpu(val); + if (numa_node_id() != cpu_to_node(cpu)) + return true; + } + return false; +} + +static __always_inline bool try_to_steal_lock(struct qspinlock *lock, bool paravirt) +{ + bool seen_preempted = false; + bool sleepy = false; + int iters = 0; + u32 val; + + if (!steal_spins) { + /* XXX: should spin_on_preempted_owner do anything here? */ + return false; + } + + /* Attempt to steal the lock */ + spin_begin(); + do { + bool preempted = false; + + val = READ_ONCE(lock->val); + if (val & _Q_MUST_Q_VAL) + break; + spec_barrier(); + + if (unlikely(!(val & _Q_LOCKED_VAL))) { + spin_end(); + if (__queued_spin_trylock_steal(lock)) + return true; + spin_begin(); + } else { + preempted = yield_to_locked_owner(lock, val, paravirt); + } + + if (paravirt && pv_sleepy_lock) { + if (!sleepy) { + if (val & _Q_SLEEPY_VAL) { + seen_sleepy_lock(); + sleepy = true; + } else if (recently_sleepy()) { + sleepy = true; + } + } + if (pv_sleepy_lock_sticky && seen_preempted && + !(val & _Q_SLEEPY_VAL)) { + if (try_set_sleepy(lock, val)) + val |= _Q_SLEEPY_VAL; + } + } + + if (preempted) { + seen_preempted = true; + sleepy = true; + if (!pv_spin_on_preempted_owner) + iters++; + /* + * pv_spin_on_preempted_owner don't increase iters + * while the owner is preempted -- we won't interfere + * with it by definition. This could introduce some + * latency issue if we continually observe preempted + * owners, but hopefully that's a rare corner case of + * a badly oversubscribed system. + */ + } else { + iters++; + } + } while (!steal_break(val, iters, paravirt, sleepy)); + + spin_end(); + + return false; +} + +static __always_inline void queued_spin_lock_mcs_queue(struct qspinlock *lock, bool paravirt) +{ + struct qnodes *qnodesp; + struct qnode *next, *node; + u32 val, old, tail; + bool seen_preempted = false; + bool sleepy = false; + bool mustq = false; + int idx; + int set_yield_cpu = -1; + int iters = 0; + + BUILD_BUG_ON(CONFIG_NR_CPUS >= (1U << _Q_TAIL_CPU_BITS)); + + qnodesp = this_cpu_ptr(&qnodes); + if (unlikely(qnodesp->count >= MAX_NODES)) { + spec_barrier(); + while (!queued_spin_trylock(lock)) + cpu_relax(); + return; + } + + idx = qnodesp->count++; + /* + * Ensure that we increment the head node->count before initialising + * the actual node. If the compiler is kind enough to reorder these + * stores, then an IRQ could overwrite our assignments. + */ + barrier(); + node = &qnodesp->nodes[idx]; + node->next = NULL; + node->lock = lock; + node->cpu = smp_processor_id(); + node->yield_cpu = -1; + node->locked = 0; + + tail = encode_tail_cpu(node->cpu); + + old = publish_tail_cpu(lock, tail); + + /* + * If there was a previous node; link it and wait until reaching the + * head of the waitqueue. + */ + if (old & _Q_TAIL_CPU_MASK) { + struct qnode *prev = get_tail_qnode(lock, old); + + /* Link @node into the waitqueue. */ + WRITE_ONCE(prev->next, node); + + /* Wait for mcs node lock to be released */ + spin_begin(); + while (!node->locked) { + spec_barrier(); + + if (yield_to_prev(lock, node, old, paravirt)) + seen_preempted = true; + } + spec_barrier(); + spin_end(); + + /* Clear out stale propagated yield_cpu */ + if (paravirt && pv_yield_propagate_owner && node->yield_cpu != -1) + node->yield_cpu = -1; + + smp_rmb(); /* acquire barrier for the mcs lock */ + + /* + * Generic qspinlocks have this prefetch here, but it seems + * like it could cause additional line transitions because + * the waiter will keep loading from it. + */ + if (_Q_SPIN_PREFETCH_NEXT) { + next = READ_ONCE(node->next); + if (next) + prefetchw(next); + } + } + + /* We're at the head of the waitqueue, wait for the lock. */ +again: + spin_begin(); + for (;;) { + bool preempted; + + val = READ_ONCE(lock->val); + if (!(val & _Q_LOCKED_VAL)) + break; + spec_barrier(); + + if (paravirt && pv_sleepy_lock && maybe_stealers) { + if (!sleepy) { + if (val & _Q_SLEEPY_VAL) { + seen_sleepy_lock(); + sleepy = true; + } else if (recently_sleepy()) { + sleepy = true; + } + } + if (pv_sleepy_lock_sticky && seen_preempted && + !(val & _Q_SLEEPY_VAL)) { + if (try_set_sleepy(lock, val)) + val |= _Q_SLEEPY_VAL; + } + } + + propagate_yield_cpu(node, val, &set_yield_cpu, paravirt); + preempted = yield_head_to_locked_owner(lock, val, paravirt); + if (!maybe_stealers) + continue; + + if (preempted) + seen_preempted = true; + + if (paravirt && preempted) { + sleepy = true; + + if (!pv_spin_on_preempted_owner) + iters++; + } else { + iters++; + } + + if (!mustq && iters >= get_head_spins(paravirt, sleepy)) { + mustq = true; + set_mustq(lock); + val |= _Q_MUST_Q_VAL; + } + } + spec_barrier(); + spin_end(); + + /* If we're the last queued, must clean up the tail. */ + old = trylock_clean_tail(lock, tail); + if (unlikely(old & _Q_LOCKED_VAL)) { + BUG_ON(!maybe_stealers); + goto again; /* Can only be true if maybe_stealers. */ + } + + if ((old & _Q_TAIL_CPU_MASK) == tail) + goto release; /* We were the tail, no next. */ + + /* There is a next, must wait for node->next != NULL (MCS protocol) */ + next = READ_ONCE(node->next); + if (!next) { + spin_begin(); + while (!(next = READ_ONCE(node->next))) + cpu_relax(); + spin_end(); + } + spec_barrier(); + + /* + * Unlock the next mcs waiter node. Release barrier is not required + * here because the acquirer is only accessing the lock word, and + * the acquire barrier we took the lock with orders that update vs + * this store to locked. The corresponding barrier is the smp_rmb() + * acquire barrier for mcs lock, above. + */ + if (paravirt && pv_prod_head) { + int next_cpu = next->cpu; + WRITE_ONCE(next->locked, 1); + if (_Q_SPIN_MISO) + asm volatile("miso" ::: "memory"); + if (vcpu_is_preempted(next_cpu)) + prod_cpu(next_cpu); + } else { + WRITE_ONCE(next->locked, 1); + if (_Q_SPIN_MISO) + asm volatile("miso" ::: "memory"); + } + +release: + qnodesp->count--; /* release the node */ +} + +void queued_spin_lock_slowpath(struct qspinlock *lock) +{ + /* + * This looks funny, but it induces the compiler to inline both + * sides of the branch rather than share code as when the condition + * is passed as the paravirt argument to the functions. + */ + if (IS_ENABLED(CONFIG_PARAVIRT_SPINLOCKS) && is_shared_processor()) { + if (try_to_steal_lock(lock, true)) { + spec_barrier(); + return; + } + queued_spin_lock_mcs_queue(lock, true); + } else { + if (try_to_steal_lock(lock, false)) { + spec_barrier(); + return; + } + queued_spin_lock_mcs_queue(lock, false); + } +} +EXPORT_SYMBOL(queued_spin_lock_slowpath); + +#ifdef CONFIG_PARAVIRT_SPINLOCKS +void pv_spinlocks_init(void) +{ +} +#endif + +#include +static int steal_spins_set(void *data, u64 val) +{ +#if _Q_SPIN_TRY_LOCK_STEAL == 1 + /* MAYBE_STEAL remains true */ + steal_spins = val; +#else + static DEFINE_MUTEX(lock); + + /* + * The lock slow path has a !maybe_stealers case that can assume + * the head of queue will not see concurrent waiters. That waiter + * is unsafe in the presence of stealers, so must keep them away + * from one another. + */ + + mutex_lock(&lock); + if (val && !steal_spins) { + maybe_stealers = true; + /* wait for queue head waiter to go away */ + synchronize_rcu(); + steal_spins = val; + } else if (!val && steal_spins) { + steal_spins = val; + /* wait for all possible stealers to go away */ + synchronize_rcu(); + maybe_stealers = false; + } else { + steal_spins = val; + } + mutex_unlock(&lock); +#endif + + return 0; +} + +static int steal_spins_get(void *data, u64 *val) +{ + *val = steal_spins; + + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(fops_steal_spins, steal_spins_get, steal_spins_set, "%llu\n"); + +static int remote_steal_spins_set(void *data, u64 val) +{ + remote_steal_spins = val; + + return 0; +} + +static int remote_steal_spins_get(void *data, u64 *val) +{ + *val = remote_steal_spins; + + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(fops_remote_steal_spins, remote_steal_spins_get, remote_steal_spins_set, "%llu\n"); + +static int head_spins_set(void *data, u64 val) +{ + head_spins = val; + + return 0; +} + +static int head_spins_get(void *data, u64 *val) +{ + *val = head_spins; + + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(fops_head_spins, head_spins_get, head_spins_set, "%llu\n"); + +static int pv_yield_owner_set(void *data, u64 val) +{ + pv_yield_owner = !!val; + + return 0; +} + +static int pv_yield_owner_get(void *data, u64 *val) +{ + *val = pv_yield_owner; + + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(fops_pv_yield_owner, pv_yield_owner_get, pv_yield_owner_set, "%llu\n"); + +static int pv_yield_allow_steal_set(void *data, u64 val) +{ + pv_yield_allow_steal = !!val; + + return 0; +} + +static int pv_yield_allow_steal_get(void *data, u64 *val) +{ + *val = pv_yield_allow_steal; + + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(fops_pv_yield_allow_steal, pv_yield_allow_steal_get, pv_yield_allow_steal_set, "%llu\n"); + +static int pv_spin_on_preempted_owner_set(void *data, u64 val) +{ + pv_spin_on_preempted_owner = !!val; + + return 0; +} + +static int pv_spin_on_preempted_owner_get(void *data, u64 *val) +{ + *val = pv_spin_on_preempted_owner; + + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(fops_pv_spin_on_preempted_owner, pv_spin_on_preempted_owner_get, pv_spin_on_preempted_owner_set, "%llu\n"); + +static int pv_sleepy_lock_set(void *data, u64 val) +{ + pv_sleepy_lock = !!val; + + return 0; +} + +static int pv_sleepy_lock_get(void *data, u64 *val) +{ + *val = pv_sleepy_lock; + + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(fops_pv_sleepy_lock, pv_sleepy_lock_get, pv_sleepy_lock_set, "%llu\n"); + +static int pv_sleepy_lock_sticky_set(void *data, u64 val) +{ + pv_sleepy_lock_sticky = !!val; + + return 0; +} + +static int pv_sleepy_lock_sticky_get(void *data, u64 *val) +{ + *val = pv_sleepy_lock_sticky; + + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(fops_pv_sleepy_lock_sticky, pv_sleepy_lock_sticky_get, pv_sleepy_lock_sticky_set, "%llu\n"); + +static int pv_sleepy_lock_interval_ns_set(void *data, u64 val) +{ + pv_sleepy_lock_interval_ns = val; + + return 0; +} + +static int pv_sleepy_lock_interval_ns_get(void *data, u64 *val) +{ + *val = pv_sleepy_lock_interval_ns; + + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(fops_pv_sleepy_lock_interval_ns, pv_sleepy_lock_interval_ns_get, pv_sleepy_lock_interval_ns_set, "%llu\n"); + +static int pv_sleepy_lock_factor_set(void *data, u64 val) +{ + pv_sleepy_lock_factor = val; + + return 0; +} + +static int pv_sleepy_lock_factor_get(void *data, u64 *val) +{ + *val = pv_sleepy_lock_factor; + + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(fops_pv_sleepy_lock_factor, pv_sleepy_lock_factor_get, pv_sleepy_lock_factor_set, "%llu\n"); + +static int pv_yield_prev_set(void *data, u64 val) +{ + pv_yield_prev = !!val; + + return 0; +} + +static int pv_yield_prev_get(void *data, u64 *val) +{ + *val = pv_yield_prev; + + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(fops_pv_yield_prev, pv_yield_prev_get, pv_yield_prev_set, "%llu\n"); + +static int pv_yield_propagate_owner_set(void *data, u64 val) +{ + pv_yield_propagate_owner = !!val; + + return 0; +} + +static int pv_yield_propagate_owner_get(void *data, u64 *val) +{ + *val = pv_yield_propagate_owner; + + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(fops_pv_yield_propagate_owner, pv_yield_propagate_owner_get, pv_yield_propagate_owner_set, "%llu\n"); + +static int pv_prod_head_set(void *data, u64 val) +{ + pv_prod_head = !!val; + + return 0; +} + +static int pv_prod_head_get(void *data, u64 *val) +{ + *val = pv_prod_head; + + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(fops_pv_prod_head, pv_prod_head_get, pv_prod_head_set, "%llu\n"); + +static __init int spinlock_debugfs_init(void) +{ + debugfs_create_file("qspl_steal_spins", 0600, arch_debugfs_dir, NULL, &fops_steal_spins); + debugfs_create_file("qspl_remote_steal_spins", 0600, arch_debugfs_dir, NULL, &fops_remote_steal_spins); + debugfs_create_file("qspl_head_spins", 0600, arch_debugfs_dir, NULL, &fops_head_spins); + if (is_shared_processor()) { + debugfs_create_file("qspl_pv_yield_owner", 0600, arch_debugfs_dir, NULL, &fops_pv_yield_owner); + debugfs_create_file("qspl_pv_yield_allow_steal", 0600, arch_debugfs_dir, NULL, &fops_pv_yield_allow_steal); + debugfs_create_file("qspl_pv_spin_on_preempted_owner", 0600, arch_debugfs_dir, NULL, &fops_pv_spin_on_preempted_owner); + debugfs_create_file("qspl_pv_sleepy_lock", 0600, arch_debugfs_dir, NULL, &fops_pv_sleepy_lock); + debugfs_create_file("qspl_pv_sleepy_lock_sticky", 0600, arch_debugfs_dir, NULL, &fops_pv_sleepy_lock_sticky); + debugfs_create_file("qspl_pv_sleepy_lock_interval_ns", 0600, arch_debugfs_dir, NULL, &fops_pv_sleepy_lock_interval_ns); + debugfs_create_file("qspl_pv_sleepy_lock_factor", 0600, arch_debugfs_dir, NULL, &fops_pv_sleepy_lock_factor); + debugfs_create_file("qspl_pv_yield_prev", 0600, arch_debugfs_dir, NULL, &fops_pv_yield_prev); + debugfs_create_file("qspl_pv_yield_propagate_owner", 0600, arch_debugfs_dir, NULL, &fops_pv_yield_propagate_owner); + debugfs_create_file("qspl_pv_prod_head", 0600, arch_debugfs_dir, NULL, &fops_pv_prod_head); + } + + return 0; +} +device_initcall(spinlock_debugfs_init); diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c index 398b5694aeb7065aae5e536a4e00a5a7e9efce3f..38158b77a80193f4448f1eba8787e71664000a93 100644 --- a/arch/powerpc/lib/sstep.c +++ b/arch/powerpc/lib/sstep.c @@ -2284,15 +2284,7 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, op->type = MKOP(STCX, 0, 4); break; -#ifdef __powerpc64__ - case 84: /* ldarx */ - op->type = MKOP(LARX, 0, 8); - break; - - case 214: /* stdcx. */ - op->type = MKOP(STCX, 0, 8); - break; - +#ifdef CONFIG_PPC_HAS_LBARX_LHARX case 52: /* lbarx */ op->type = MKOP(LARX, 0, 1); break; @@ -2308,6 +2300,15 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, case 726: /* sthcx. */ op->type = MKOP(STCX, 0, 2); break; +#endif +#ifdef __powerpc64__ + case 84: /* ldarx */ + op->type = MKOP(LARX, 0, 8); + break; + + case 214: /* stdcx. */ + op->type = MKOP(STCX, 0, 8); + break; case 276: /* lqarx */ if (!((rd & 1) || rd == ra || rd == rb)) @@ -3334,7 +3335,7 @@ int emulate_loadstore(struct pt_regs *regs, struct instruction_op *op) err = 0; val = 0; switch (size) { -#ifdef __powerpc64__ +#ifdef CONFIG_PPC_HAS_LBARX_LHARX case 1: __get_user_asmx(val, ea, err, "lbarx"); break; diff --git a/arch/powerpc/lib/test_emulate_step_exec_instr.S b/arch/powerpc/lib/test_emulate_step_exec_instr.S index 5473f9d03df3a00e3da2a8c8a57bc04a177cffde..e2b646a4f7fa190814d0abf7111d75e466215ea7 100644 --- a/arch/powerpc/lib/test_emulate_step_exec_instr.S +++ b/arch/powerpc/lib/test_emulate_step_exec_instr.S @@ -16,7 +16,7 @@ _GLOBAL(exec_instr) /* * Stack frame layout (INT_FRAME_SIZE bytes) - * In-memory pt_regs (SP + STACK_FRAME_OVERHEAD) + * In-memory pt_regs (SP + STACK_INT_FRAME_REGS) * Scratch space (SP + 8) * Back chain (SP + 0) */ diff --git a/arch/powerpc/mm/book3s64/hash_4k.c b/arch/powerpc/mm/book3s64/hash_4k.c index 7de1a8a0c62a8320e4aca5c9512e45f304956716..02acbfd05b460c1e096b639506437ce9033e0c3f 100644 --- a/arch/powerpc/mm/book3s64/hash_4k.c +++ b/arch/powerpc/mm/book3s64/hash_4k.c @@ -16,6 +16,8 @@ #include #include +#include "internal.h" + int __hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid, pte_t *ptep, unsigned long trap, unsigned long flags, int ssize, int subpg_prot) @@ -118,6 +120,9 @@ repeat: } new_pte = (new_pte & ~_PAGE_HPTEFLAGS) | H_PAGE_HASHPTE; new_pte |= pte_set_hidx(ptep, rpte, 0, slot, PTRS_PER_PTE); + + if (stress_hpt()) + hpt_do_stress(ea, hpte_group); } *ptep = __pte(new_pte & ~H_PAGE_BUSY); return 0; diff --git a/arch/powerpc/mm/book3s64/hash_64k.c b/arch/powerpc/mm/book3s64/hash_64k.c index 998c6817ed47b11240cf873a4e9c80403460a230..954af420f358623d60494d57821b341668777792 100644 --- a/arch/powerpc/mm/book3s64/hash_64k.c +++ b/arch/powerpc/mm/book3s64/hash_64k.c @@ -16,6 +16,8 @@ #include #include +#include "internal.h" + /* * Return true, if the entry has a slot value which * the software considers as invalid. @@ -216,6 +218,9 @@ repeat: new_pte |= pte_set_hidx(ptep, rpte, subpg_index, slot, PTRS_PER_PTE); new_pte |= H_PAGE_HASHPTE; + if (stress_hpt()) + hpt_do_stress(ea, hpte_group); + *ptep = __pte(new_pte & ~H_PAGE_BUSY); return 0; } @@ -327,7 +332,12 @@ repeat: new_pte = (new_pte & ~_PAGE_HPTEFLAGS) | H_PAGE_HASHPTE; new_pte |= pte_set_hidx(ptep, rpte, 0, slot, PTRS_PER_PTE); + + if (stress_hpt()) + hpt_do_stress(ea, hpte_group); } + *ptep = __pte(new_pte & ~H_PAGE_BUSY); + return 0; } diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c index 6df4c6d38b66017c57f7ba0e25597cddf6261c2e..80a148c57de8137aba48022373d3475a567c219f 100644 --- a/arch/powerpc/mm/book3s64/hash_utils.c +++ b/arch/powerpc/mm/book3s64/hash_utils.c @@ -471,7 +471,7 @@ int htab_remove_mapping(unsigned long vstart, unsigned long vend, return ret; } -static bool disable_1tb_segments = false; +static bool disable_1tb_segments __ro_after_init; static int __init parse_disable_1tb_segments(char *p) { @@ -480,6 +480,40 @@ static int __init parse_disable_1tb_segments(char *p) } early_param("disable_1tb_segments", parse_disable_1tb_segments); +bool stress_hpt_enabled __initdata; + +static int __init parse_stress_hpt(char *p) +{ + stress_hpt_enabled = true; + return 0; +} +early_param("stress_hpt", parse_stress_hpt); + +__ro_after_init DEFINE_STATIC_KEY_FALSE(stress_hpt_key); + +/* + * per-CPU array allocated if we enable stress_hpt. + */ +#define STRESS_MAX_GROUPS 16 +struct stress_hpt_struct { + unsigned long last_group[STRESS_MAX_GROUPS]; +}; + +static inline int stress_nr_groups(void) +{ + /* + * LPAR H_REMOVE flushes TLB, so need some number > 1 of entries + * to allow practical forward progress. Bare metal returns 1, which + * seems to help uncover more bugs. + */ + if (firmware_has_feature(FW_FEATURE_LPAR)) + return STRESS_MAX_GROUPS; + else + return 1; +} + +static struct stress_hpt_struct *stress_hpt_struct; + static int __init htab_dt_scan_seg_sizes(unsigned long node, const char *uname, int depth, void *data) @@ -976,6 +1010,23 @@ static void __init hash_init_partition_table(phys_addr_t hash_table, pr_info("Partition table %p\n", partition_tb); } +void hpt_clear_stress(void); +static struct timer_list stress_hpt_timer; +void stress_hpt_timer_fn(struct timer_list *timer) +{ + int next_cpu; + + hpt_clear_stress(); + if (!firmware_has_feature(FW_FEATURE_LPAR)) + tlbiel_all(); + + next_cpu = cpumask_next(raw_smp_processor_id(), cpu_online_mask); + if (next_cpu >= nr_cpu_ids) + next_cpu = cpumask_first(cpu_online_mask); + stress_hpt_timer.expires = jiffies + msecs_to_jiffies(10); + add_timer_on(&stress_hpt_timer, next_cpu); +} + static void __init htab_initialize(void) { unsigned long table; @@ -995,6 +1046,20 @@ static void __init htab_initialize(void) if (stress_slb_enabled) static_branch_enable(&stress_slb_key); + if (stress_hpt_enabled) { + unsigned long tmp; + static_branch_enable(&stress_hpt_key); + // Too early to use nr_cpu_ids, so use NR_CPUS + tmp = memblock_phys_alloc_range(sizeof(struct stress_hpt_struct) * NR_CPUS, + 0, 0, MEMBLOCK_ALLOC_ANYWHERE); + memset((void *)tmp, 0xff, sizeof(struct stress_hpt_struct) * NR_CPUS); + stress_hpt_struct = __va(tmp); + + timer_setup(&stress_hpt_timer, stress_hpt_timer_fn, 0); + stress_hpt_timer.expires = jiffies + msecs_to_jiffies(10); + add_timer(&stress_hpt_timer); + } + /* * Calculate the required size of the htab. We want the number of * PTEGs to equal one half the number of real pages. @@ -1980,6 +2045,69 @@ repeat: return slot; } +void hpt_clear_stress(void) +{ + int cpu = raw_smp_processor_id(); + int g; + + for (g = 0; g < stress_nr_groups(); g++) { + unsigned long last_group; + last_group = stress_hpt_struct[cpu].last_group[g]; + + if (last_group != -1UL) { + int i; + for (i = 0; i < HPTES_PER_GROUP; i++) { + if (mmu_hash_ops.hpte_remove(last_group) == -1) + break; + } + stress_hpt_struct[cpu].last_group[g] = -1; + } + } +} + +void hpt_do_stress(unsigned long ea, unsigned long hpte_group) +{ + unsigned long last_group; + int cpu = raw_smp_processor_id(); + + last_group = stress_hpt_struct[cpu].last_group[stress_nr_groups() - 1]; + if (hpte_group == last_group) + return; + + if (last_group != -1UL) { + int i; + /* + * Concurrent CPUs might be inserting into this group, so + * give up after a number of iterations, to prevent a live + * lock. + */ + for (i = 0; i < HPTES_PER_GROUP; i++) { + if (mmu_hash_ops.hpte_remove(last_group) == -1) + break; + } + stress_hpt_struct[cpu].last_group[stress_nr_groups() - 1] = -1; + } + + if (ea >= PAGE_OFFSET) { + /* + * We would really like to prefetch to get the TLB loaded, then + * remove the PTE before returning from fault interrupt, to + * increase the hash fault rate. + * + * Unfortunately QEMU TCG does not model the TLB in a way that + * makes this possible, and systemsim (mambo) emulator does not + * bring in TLBs with prefetches (although loads/stores do + * work for non-CI PTEs). + * + * So remember this PTE and clear it on the next hash fault. + */ + memmove(&stress_hpt_struct[cpu].last_group[1], + &stress_hpt_struct[cpu].last_group[0], + (stress_nr_groups() - 1) * sizeof(unsigned long)); + stress_hpt_struct[cpu].last_group[0] = hpte_group; + } +} + #if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KFENCE) static DEFINE_RAW_SPINLOCK(linear_map_hash_lock); diff --git a/arch/powerpc/mm/book3s64/internal.h b/arch/powerpc/mm/book3s64/internal.h index 5045048ce244da88439a469fe766b4e7577ca02a..a57a25f06a215b38417d26ef6e23a3827e7656c2 100644 --- a/arch/powerpc/mm/book3s64/internal.h +++ b/arch/powerpc/mm/book3s64/internal.h @@ -13,6 +13,17 @@ static inline bool stress_slb(void) return static_branch_unlikely(&stress_slb_key); } +extern bool stress_hpt_enabled; + +DECLARE_STATIC_KEY_FALSE(stress_hpt_key); + +static inline bool stress_hpt(void) +{ + return static_branch_unlikely(&stress_hpt_key); +} + +void hpt_do_stress(unsigned long ea, unsigned long hpte_group); + void slb_setup_new_exec(void); void exit_lazy_flush_tlb(struct mm_struct *mm, bool always_flush); diff --git a/arch/powerpc/mm/book3s64/pgtable.c b/arch/powerpc/mm/book3s64/pgtable.c index f6151a5892982ac288934e3ce0f3533bb4743911..85c84e89e3eafcda18e9082f52384df33b605960 100644 --- a/arch/powerpc/mm/book3s64/pgtable.c +++ b/arch/powerpc/mm/book3s64/pgtable.c @@ -100,14 +100,14 @@ static void do_serialize(void *arg) } /* - * Serialize against find_current_mm_pte which does lock-less + * Serialize against __find_linux_pte() which does lock-less * lookup in page tables with local interrupts disabled. For huge pages * it casts pmd_t to pte_t. Since format of pte_t is different from * pmd_t we want to prevent transit from pmd pointing to page table * to pmd pointing to huge page (and back) while interrupts are disabled. * We clear pmd to possibly replace it with page table pointer in * different code paths. So make sure we wait for the parallel - * find_current_mm_pte to finish. + * __find_linux_pte() to finish. */ void serialize_against_pte_lookup(struct mm_struct *mm) { diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index 5852a86d990d66f6b0ddf24db4dcee6fd0ab7fb9..f1ba8d1e8c1a1d12c3fc392719e4d5ae5d3f79bf 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c @@ -506,43 +506,6 @@ void hugetlb_free_pgd_range(struct mmu_gather *tlb, } while (addr = next, addr != end); } -struct page *follow_huge_pd(struct vm_area_struct *vma, - unsigned long address, hugepd_t hpd, - int flags, int pdshift) -{ - pte_t *ptep; - spinlock_t *ptl; - struct page *page = NULL; - unsigned long mask; - int shift = hugepd_shift(hpd); - struct mm_struct *mm = vma->vm_mm; - -retry: - /* - * hugepage directory entries are protected by mm->page_table_lock - * Use this instead of huge_pte_lockptr - */ - ptl = &mm->page_table_lock; - spin_lock(ptl); - - ptep = hugepte_offset(hpd, address, pdshift); - if (pte_present(*ptep)) { - mask = (1UL << shift) - 1; - page = pte_page(*ptep); - page += ((address & mask) >> PAGE_SHIFT); - if (flags & FOLL_GET) - get_page(page); - } else { - if (is_hugetlb_entry_migration(*ptep)) { - spin_unlock(ptl); - __migration_entry_wait(mm, ptep, ptl); - goto retry; - } - } - spin_unlock(ptl); - return page; -} - bool __init arch_hugetlb_valid_size(unsigned long size) { int shift = __ffs(size); diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index 84d171953ba44eb4fbcfb42772da104404554ff3..8b121df7b08f8677d72c08fa0d63b1d7509fef96 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c @@ -344,7 +344,6 @@ void free_initmem(void) { ppc_md.progress = ppc_printk_progress; mark_initmem_nx(); - static_branch_enable(&init_mem_is_free); free_initmem_default(POISON_FREE_INITMEM); ftrace_free_init_tramp(); } diff --git a/arch/powerpc/mm/nohash/kaslr_booke.c b/arch/powerpc/mm/nohash/kaslr_booke.c index 0d04f9d5da8d2dd194cdb6be7f7d00805b9a29ee..2fb3edafe9ab65740ec1dcc7aefc55a71473a0bc 100644 --- a/arch/powerpc/mm/nohash/kaslr_booke.c +++ b/arch/powerpc/mm/nohash/kaslr_booke.c @@ -19,7 +19,6 @@ #include #include #include -#include struct regions { unsigned long pa_start; diff --git a/arch/powerpc/mm/nohash/tlb.c b/arch/powerpc/mm/nohash/tlb.c index 2c15c86c701571d8c4c1d7533da9dd5774cd4304..a903b308acc548d46dd616656844a61e242e9d25 100644 --- a/arch/powerpc/mm/nohash/tlb.c +++ b/arch/powerpc/mm/nohash/tlb.c @@ -184,6 +184,14 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr) mmu_get_tsize(mmu_virtual_psize), 0); } EXPORT_SYMBOL(local_flush_tlb_page); + +void local_flush_tlb_page_psize(struct mm_struct *mm, + unsigned long vmaddr, int psize) +{ + __local_flush_tlb_page(mm, vmaddr, mmu_get_tsize(psize), 0); +} +EXPORT_SYMBOL(local_flush_tlb_page_psize); + #endif /* diff --git a/arch/powerpc/perf/callchain.c b/arch/powerpc/perf/callchain.c index 082f6d0308a472916365d203e2cc19e1bd4563db..6b4434dd0ff307c5673e0bd0a8ff25e38023c005 100644 --- a/arch/powerpc/perf/callchain.c +++ b/arch/powerpc/perf/callchain.c @@ -27,7 +27,7 @@ static int valid_next_sp(unsigned long sp, unsigned long prev_sp) { if (sp & 0xf) return 0; /* must be 16-byte aligned */ - if (!validate_sp(sp, current, STACK_FRAME_OVERHEAD)) + if (!validate_sp(sp, current)) return 0; if (sp >= prev_sp + STACK_FRAME_MIN_SIZE) return 1; @@ -53,7 +53,7 @@ perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *re sp = regs->gpr[1]; perf_callchain_store(entry, perf_instruction_pointer(regs)); - if (!validate_sp(sp, current, STACK_FRAME_OVERHEAD)) + if (!validate_sp(sp, current)) return; for (;;) { @@ -61,12 +61,13 @@ perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *re next_sp = fp[0]; if (next_sp == sp + STACK_INT_FRAME_SIZE && - fp[STACK_FRAME_MARKER] == STACK_FRAME_REGS_MARKER) { + validate_sp_size(sp, current, STACK_INT_FRAME_SIZE) && + fp[STACK_INT_FRAME_MARKER_LONGS] == STACK_FRAME_REGS_MARKER) { /* * This looks like an interrupt frame for an * interrupt that occurred in the kernel */ - regs = (struct pt_regs *)(sp + STACK_FRAME_OVERHEAD); + regs = (struct pt_regs *)(sp + STACK_INT_FRAME_REGS); next_ip = regs->nip; lr = regs->link; level = 0; diff --git a/arch/powerpc/perf/hv-gpci-requests.h b/arch/powerpc/perf/hv-gpci-requests.h index 8965b4463d433a7aaaa940b47215acf59dd64bfd..5e86371a20c7860afdb2ea4baa1c207804a9d8ca 100644 --- a/arch/powerpc/perf/hv-gpci-requests.h +++ b/arch/powerpc/perf/hv-gpci-requests.h @@ -79,6 +79,7 @@ REQUEST(__field(0, 8, partition_id) ) #include I(REQUEST_END) +#ifdef ENABLE_EVENTS_COUNTERINFO_V6 /* * Not available for counter_info_version >= 0x8, use * run_instruction_cycles_by_partition(0x100) instead. @@ -92,6 +93,7 @@ REQUEST(__field(0, 8, partition_id) __count(0x10, 8, cycles) ) #include I(REQUEST_END) +#endif #define REQUEST_NAME system_performance_capabilities #define REQUEST_NUM 0x40 @@ -103,6 +105,7 @@ REQUEST(__field(0, 1, perf_collect_privileged) ) #include I(REQUEST_END) +#ifdef ENABLE_EVENTS_COUNTERINFO_V6 #define REQUEST_NAME processor_bus_utilization_abc_links #define REQUEST_NUM 0x50 #define REQUEST_IDX_KIND "hw_chip_id=?" @@ -194,6 +197,7 @@ REQUEST(__field(0, 4, phys_processor_idx) __count(0x28, 8, instructions_completed) ) #include I(REQUEST_END) +#endif /* Processor_core_power_mode (0x95) skipped, no counters */ /* Affinity_domain_information_by_virtual_processor (0xA0) skipped, diff --git a/arch/powerpc/perf/hv-gpci.c b/arch/powerpc/perf/hv-gpci.c index 5eb60ed5b5e8a8a593e46790f29682d7d433633f..7ff8ff3509f5f6f2f414b6475a86b051a30c335e 100644 --- a/arch/powerpc/perf/hv-gpci.c +++ b/arch/powerpc/perf/hv-gpci.c @@ -70,9 +70,9 @@ static const struct attribute_group format_group = { .attrs = format_attrs, }; -static const struct attribute_group event_group = { +static struct attribute_group event_group = { .name = "events", - .attrs = hv_gpci_event_attrs, + /* .attrs is set in init */ }; #define HV_CAPS_ATTR(_name, _format) \ @@ -330,6 +330,7 @@ static int hv_gpci_init(void) int r; unsigned long hret; struct hv_perf_caps caps; + struct hv_gpci_request_buffer *arg; hv_gpci_assert_offsets_correct(); @@ -353,6 +354,36 @@ static int hv_gpci_init(void) /* sampling not supported */ h_gpci_pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT; + arg = (void *)get_cpu_var(hv_gpci_reqb); + memset(arg, 0, HGPCI_REQ_BUFFER_SIZE); + + /* + * hcall H_GET_PERF_COUNTER_INFO populates the output + * counter_info_version value based on the system hypervisor. + * Pass the counter request 0x10 corresponds to request type + * 'Dispatch_timebase_by_processor', to get the supported + * counter_info_version. + */ + arg->params.counter_request = cpu_to_be32(0x10); + + r = plpar_hcall_norets(H_GET_PERF_COUNTER_INFO, + virt_to_phys(arg), HGPCI_REQ_BUFFER_SIZE); + if (r) { + pr_devel("hcall failed, can't get supported counter_info_version: 0x%x\n", r); + arg->params.counter_info_version_out = 0x8; + } + + /* + * Use counter_info_version_out value to assign + * required hv-gpci event list. + */ + if (arg->params.counter_info_version_out >= 0x8) + event_group.attrs = hv_gpci_event_attrs; + else + event_group.attrs = hv_gpci_event_attrs_v6; + + put_cpu_var(hv_gpci_reqb); + r = perf_pmu_register(&h_gpci_pmu, h_gpci_pmu.name, -1); if (r) return r; diff --git a/arch/powerpc/perf/hv-gpci.h b/arch/powerpc/perf/hv-gpci.h index 4d108262bed791d4ec8282987dfc20304c6661f9..c72020912dea570a8312d304c87f9d560e73ce48 100644 --- a/arch/powerpc/perf/hv-gpci.h +++ b/arch/powerpc/perf/hv-gpci.h @@ -26,6 +26,7 @@ enum { #define REQUEST_FILE "../hv-gpci-requests.h" #define NAME_LOWER hv_gpci #define NAME_UPPER HV_GPCI +#define ENABLE_EVENTS_COUNTERINFO_V6 #include "req-gen/perf.h" #undef REQUEST_FILE #undef NAME_LOWER diff --git a/arch/powerpc/perf/req-gen/perf.h b/arch/powerpc/perf/req-gen/perf.h index fa9bc804e67af1d3a31a9d3676457219a1c183b1..6b2a59fefffa79cd2194dcfffde4b71d431eacfa 100644 --- a/arch/powerpc/perf/req-gen/perf.h +++ b/arch/powerpc/perf/req-gen/perf.h @@ -139,6 +139,26 @@ PMU_EVENT_ATTR_STRING( \ #define REQUEST_(r_name, r_value, r_idx_1, r_fields) \ r_fields +/* Generate event list for platforms with counter_info_version 0x6 or below */ +static __maybe_unused struct attribute *hv_gpci_event_attrs_v6[] = { +#include REQUEST_FILE + NULL +}; + +/* + * Based on getPerfCountInfo v1.018 documentation, some of the hv-gpci + * events were deprecated for platform firmware that supports + * counter_info_version 0x8 or above. + * Those deprecated events are still part of platform firmware that + * support counter_info_version 0x6 and below. As per the getPerfCountInfo + * v1.018 documentation there is no counter_info_version 0x7. + * Undefining macro ENABLE_EVENTS_COUNTERINFO_V6, to disable the addition of + * deprecated events in "hv_gpci_event_attrs" attribute group, for platforms + * that supports counter_info_version 0x8 or above. + */ +#undef ENABLE_EVENTS_COUNTERINFO_V6 + +/* Generate event list for platforms with counter_info_version 0x8 or above*/ static __maybe_unused struct attribute *hv_gpci_event_attrs[] = { #include REQUEST_FILE NULL diff --git a/arch/powerpc/platforms/44x/warp.c b/arch/powerpc/platforms/44x/warp.c index f03432ef010bd12836bfd0e194abf06bdb17a126..cefa313c09f0b4226aab6e1a46c795b2aaf24c3a 100644 --- a/arch/powerpc/platforms/44x/warp.c +++ b/arch/powerpc/platforms/44x/warp.c @@ -5,15 +5,17 @@ * Copyright (c) 2008-2009 PIKA Technologies * Sean MacLennan */ +#include #include #include #include +#include #include #include #include #include #include -#include +#include #include #include @@ -92,8 +94,6 @@ static int __init warp_post_info(void) static LIST_HEAD(dtm_shutdown_list); static void __iomem *dtm_fpga; -static unsigned green_led, red_led; - struct dtm_shutdown { struct list_head list; @@ -101,7 +101,6 @@ struct dtm_shutdown { void *arg; }; - int pika_dtm_register_shutdown(void (*func)(void *arg), void *arg) { struct dtm_shutdown *shutdown; @@ -132,6 +131,35 @@ int pika_dtm_unregister_shutdown(void (*func)(void *arg), void *arg) return -EINVAL; } +#define WARP_GREEN_LED 0 +#define WARP_RED_LED 1 + +static struct gpio_led warp_gpio_led_pins[] = { + [WARP_GREEN_LED] = { + .name = "green", + .default_state = LEDS_DEFSTATE_KEEP, + .gpiod = NULL, /* to be filled by pika_setup_leds() */ + }, + [WARP_RED_LED] = { + .name = "red", + .default_state = LEDS_DEFSTATE_KEEP, + .gpiod = NULL, /* to be filled by pika_setup_leds() */ + }, +}; + +static struct gpio_led_platform_data warp_gpio_led_data = { + .leds = warp_gpio_led_pins, + .num_leds = ARRAY_SIZE(warp_gpio_led_pins), +}; + +static struct platform_device warp_gpio_leds = { + .name = "leds-gpio", + .id = -1, + .dev = { + .platform_data = &warp_gpio_led_data, + }, +}; + static irqreturn_t temp_isr(int irq, void *context) { struct dtm_shutdown *shutdown; @@ -139,7 +167,7 @@ static irqreturn_t temp_isr(int irq, void *context) local_irq_disable(); - gpio_set_value(green_led, 0); + gpiod_set_value(warp_gpio_led_pins[WARP_GREEN_LED].gpiod, 0); /* Run through the shutdown list. */ list_for_each_entry(shutdown, &dtm_shutdown_list, list) @@ -153,7 +181,7 @@ static irqreturn_t temp_isr(int irq, void *context) out_be32(dtm_fpga + 0x14, reset); } - gpio_set_value(red_led, value); + gpiod_set_value(warp_gpio_led_pins[WARP_RED_LED].gpiod, value); value ^= 1; mdelay(500); } @@ -162,25 +190,78 @@ static irqreturn_t temp_isr(int irq, void *context) return IRQ_HANDLED; } +/* + * Because green and red power LEDs are normally driven by leds-gpio driver, + * but in case of critical temperature shutdown we want to drive them + * ourselves, we acquire both and then create leds-gpio platform device + * ourselves, instead of doing it through device tree. This way we can still + * keep access to the gpios and use them when needed. + */ static int pika_setup_leds(void) { struct device_node *np, *child; + struct gpio_desc *gpio; + struct gpio_led *led; + int led_count = 0; + int error; + int i; - np = of_find_compatible_node(NULL, NULL, "gpio-leds"); + np = of_find_compatible_node(NULL, NULL, "warp-power-leds"); if (!np) { printk(KERN_ERR __FILE__ ": Unable to find leds\n"); return -ENOENT; } - for_each_child_of_node(np, child) - if (of_node_name_eq(child, "green")) - green_led = of_get_gpio(child, 0); - else if (of_node_name_eq(child, "red")) - red_led = of_get_gpio(child, 0); + for_each_child_of_node(np, child) { + for (i = 0; i < ARRAY_SIZE(warp_gpio_led_pins); i++) { + led = &warp_gpio_led_pins[i]; + + if (!of_node_name_eq(child, led->name)) + continue; + + if (led->gpiod) { + printk(KERN_ERR __FILE__ ": %s led has already been defined\n", + led->name); + continue; + } + + gpio = fwnode_gpiod_get_index(of_fwnode_handle(child), + NULL, 0, GPIOD_ASIS, + led->name); + error = PTR_ERR_OR_ZERO(gpio); + if (error) { + printk(KERN_ERR __FILE__ ": Failed to get %s led gpio: %d\n", + led->name, error); + of_node_put(child); + goto err_cleanup_pins; + } + + led->gpiod = gpio; + led_count++; + } + } of_node_put(np); + /* Skip device registration if no leds have been defined */ + if (led_count) { + error = platform_device_register(&warp_gpio_leds); + if (error) { + printk(KERN_ERR __FILE__ ": Unable to add leds-gpio: %d\n", + error); + goto err_cleanup_pins; + } + } + return 0; + +err_cleanup_pins: + for (i = 0; i < ARRAY_SIZE(warp_gpio_led_pins); i++) { + led = &warp_gpio_led_pins[i]; + gpiod_put(led->gpiod); + led->gpiod = NULL; + } + return error; } static void pika_setup_critical_temp(struct device_node *np, diff --git a/arch/powerpc/platforms/4xx/hsta_msi.c b/arch/powerpc/platforms/4xx/hsta_msi.c index d4f7fff1fc8718b27be467880202c206a3037adb..e11b57a62b0543bd503ae7af0d24dcc2deeba713 100644 --- a/arch/powerpc/platforms/4xx/hsta_msi.c +++ b/arch/powerpc/platforms/4xx/hsta_msi.c @@ -115,6 +115,7 @@ static void hsta_teardown_msi_irqs(struct pci_dev *dev) msi_bitmap_free_hwirqs(&ppc4xx_hsta_msi.bmp, irq, 1); pr_debug("%s: Teardown IRQ %u (index %u)\n", __func__, entry->irq, irq); + entry->irq = 0; } } diff --git a/arch/powerpc/platforms/52xx/lite5200_sleep.S b/arch/powerpc/platforms/52xx/lite5200_sleep.S index afee8b1515a8e69f0d45eddd701e11cc04b008de..0b12647e7b420ceedd70676ce250a9367c82c342 100644 --- a/arch/powerpc/platforms/52xx/lite5200_sleep.S +++ b/arch/powerpc/platforms/52xx/lite5200_sleep.S @@ -1,4 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 */ +#include + #include #include #include @@ -178,7 +180,8 @@ sram_code: /* local udelay in sram is needed */ - udelay: /* r11 - tb_ticks_per_usec, r12 - usecs, overwrites r13 */ +SYM_FUNC_START_LOCAL(udelay) + /* r11 - tb_ticks_per_usec, r12 - usecs, overwrites r13 */ mullw r12, r12, r11 mftb r13 /* start */ add r12, r13, r12 /* end */ @@ -187,6 +190,7 @@ sram_code: cmp cr0, r13, r12 blt 1b blr +SYM_FUNC_END(udelay) sram_code_end: @@ -271,7 +275,7 @@ _ASM_NOKPROBE_SYMBOL(lite5200_wakeup) SAVE_SR(n+2, addr+2); \ SAVE_SR(n+3, addr+3); -save_regs: +SYM_FUNC_START_LOCAL(save_regs) stw r0, 0(r4) stw r1, 0x4(r4) stw r2, 0x8(r4) @@ -317,6 +321,7 @@ save_regs: SAVE_SPRN(TBRU, 0x5b) blr +SYM_FUNC_END(save_regs) /* restore registers */ @@ -336,7 +341,7 @@ save_regs: LOAD_SR(n+2, addr+2); \ LOAD_SR(n+3, addr+3); -restore_regs: +SYM_FUNC_START_LOCAL(restore_regs) lis r4, registers@h ori r4, r4, registers@l @@ -393,6 +398,7 @@ restore_regs: blr _ASM_NOKPROBE_SYMBOL(restore_regs) +SYM_FUNC_END(restore_regs) @@ -403,7 +409,7 @@ _ASM_NOKPROBE_SYMBOL(restore_regs) * Flush data cache * Do this by just reading lots of stuff into the cache. */ -flush_data_cache: +SYM_FUNC_START_LOCAL(flush_data_cache) lis r3,CONFIG_KERNEL_START@h ori r3,r3,CONFIG_KERNEL_START@l li r4,NUM_CACHE_LINES @@ -413,3 +419,4 @@ flush_data_cache: addi r3,r3,L1_CACHE_BYTES /* Next line, please */ bdnz 1b blr +SYM_FUNC_END(flush_data_cache) diff --git a/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c b/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c index 48038aaedbd3672e42c92193cb3f794231785264..6d1dd6e874786538e6c2d8b847f35037040deb52 100644 --- a/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c +++ b/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c @@ -59,6 +59,8 @@ static struct mpc52xx_lpbfifo lpbfifo; /** * mpc52xx_lpbfifo_kick - Trigger the next block of data to be transferred + * + * @req: Pointer to request structure */ static void mpc52xx_lpbfifo_kick(struct mpc52xx_lpbfifo_request *req) { @@ -178,6 +180,8 @@ static void mpc52xx_lpbfifo_kick(struct mpc52xx_lpbfifo_request *req) /** * mpc52xx_lpbfifo_irq - IRQ handler for LPB FIFO + * @irq: IRQ number to be handled + * @dev_id: device ID cookie * * On transmit, the dma completion irq triggers before the fifo completion * triggers. Handle the dma completion here instead of the LPB FIFO Bestcomm @@ -216,6 +220,8 @@ static void mpc52xx_lpbfifo_kick(struct mpc52xx_lpbfifo_request *req) * or nested spinlock condition. The out path is non-trivial, so * extra fiddling is done to make sure all paths lead to the same * outbound code. + * + * Return: irqreturn code (%IRQ_HANDLED) */ static irqreturn_t mpc52xx_lpbfifo_irq(int irq, void *dev_id) { @@ -320,8 +326,12 @@ static irqreturn_t mpc52xx_lpbfifo_irq(int irq, void *dev_id) /** * mpc52xx_lpbfifo_bcom_irq - IRQ handler for LPB FIFO Bestcomm task + * @irq: IRQ number to be handled + * @dev_id: device ID cookie * * Only used when receiving data. + * + * Return: irqreturn code (%IRQ_HANDLED) */ static irqreturn_t mpc52xx_lpbfifo_bcom_irq(int irq, void *dev_id) { @@ -372,7 +382,7 @@ static irqreturn_t mpc52xx_lpbfifo_bcom_irq(int irq, void *dev_id) } /** - * mpc52xx_lpbfifo_bcom_poll - Poll for DMA completion + * mpc52xx_lpbfifo_poll - Poll for DMA completion */ void mpc52xx_lpbfifo_poll(void) { @@ -393,6 +403,8 @@ EXPORT_SYMBOL(mpc52xx_lpbfifo_poll); /** * mpc52xx_lpbfifo_submit - Submit an LPB FIFO transfer request. * @req: Pointer to request structure + * + * Return: %0 on success, -errno code on error */ int mpc52xx_lpbfifo_submit(struct mpc52xx_lpbfifo_request *req) { @@ -531,6 +543,7 @@ static int mpc52xx_lpbfifo_probe(struct platform_device *op) err_bcom_rx_irq: bcom_gen_bd_rx_release(lpbfifo.bcom_rx_task); err_bcom_rx: + free_irq(lpbfifo.irq, &lpbfifo); err_irq: iounmap(lpbfifo.regs); lpbfifo.regs = NULL; diff --git a/arch/powerpc/platforms/83xx/mpc832x_rdb.c b/arch/powerpc/platforms/83xx/mpc832x_rdb.c index e12cb44e717f1f084226235b3386c7e79e96bc09..caa96edf0e72acce568ec38982f369150b84d0e5 100644 --- a/arch/powerpc/platforms/83xx/mpc832x_rdb.c +++ b/arch/powerpc/platforms/83xx/mpc832x_rdb.c @@ -107,7 +107,7 @@ static int __init of_fsl_spi_probe(char *type, char *compatible, u32 sysclk, goto next; unreg: - platform_device_del(pdev); + platform_device_put(pdev); err: pr_err("%pOF: registration failed\n", np); next: diff --git a/arch/powerpc/platforms/85xx/sgy_cts1000.c b/arch/powerpc/platforms/85xx/sgy_cts1000.c index e14d1b74d4e4c5b2864447e2f32df255b5cc47b7..751395cbf022270f27f77c24e2bec403878ac379 100644 --- a/arch/powerpc/platforms/85xx/sgy_cts1000.c +++ b/arch/powerpc/platforms/85xx/sgy_cts1000.c @@ -7,10 +7,13 @@ * Copyright 2012 by Servergy, Inc. */ +#define pr_fmt(fmt) "gpio-halt: " fmt + +#include #include #include +#include #include -#include #include #include #include @@ -18,7 +21,8 @@ #include -static struct device_node *halt_node; +static struct gpio_desc *halt_gpio; +static int halt_irq; static const struct of_device_id child_match[] = { { @@ -36,23 +40,10 @@ static DECLARE_WORK(gpio_halt_wq, gpio_halt_wfn); static void __noreturn gpio_halt_cb(void) { - enum of_gpio_flags flags; - int trigger, gpio; - - if (!halt_node) - panic("No reset GPIO information was provided in DT\n"); - - gpio = of_get_gpio_flags(halt_node, 0, &flags); - - if (!gpio_is_valid(gpio)) - panic("Provided GPIO is invalid\n"); - - trigger = (flags == OF_GPIO_ACTIVE_LOW); - - printk(KERN_INFO "gpio-halt: triggering GPIO.\n"); + pr_info("triggering GPIO.\n"); /* Probably wont return */ - gpio_set_value(gpio, trigger); + gpiod_set_value(halt_gpio, 1); panic("Halt failed\n"); } @@ -61,95 +52,78 @@ static void __noreturn gpio_halt_cb(void) * to handle the shutdown/poweroff. */ static irqreturn_t gpio_halt_irq(int irq, void *__data) { - printk(KERN_INFO "gpio-halt: shutdown due to power button IRQ.\n"); + struct platform_device *pdev = __data; + + dev_info(&pdev->dev, "scheduling shutdown due to power button IRQ\n"); schedule_work(&gpio_halt_wq); return IRQ_HANDLED; }; -static int gpio_halt_probe(struct platform_device *pdev) +static int __gpio_halt_probe(struct platform_device *pdev, + struct device_node *halt_node) { - enum of_gpio_flags flags; - struct device_node *node = pdev->dev.of_node; - struct device_node *child_node; - int gpio, err, irq; - int trigger; - - if (!node) - return -ENODEV; - - /* If there's no matching child, this isn't really an error */ - child_node = of_find_matching_node(node, child_match); - if (!child_node) - return 0; - - /* Technically we could just read the first one, but punish - * DT writers for invalid form. */ - if (of_gpio_count(child_node) != 1) { - err = -EINVAL; - goto err_put; - } - - /* Get the gpio number relative to the dynamic base. */ - gpio = of_get_gpio_flags(child_node, 0, &flags); - if (!gpio_is_valid(gpio)) { - err = -EINVAL; - goto err_put; - } + int err; - err = gpio_request(gpio, "gpio-halt"); + halt_gpio = fwnode_gpiod_get_index(of_fwnode_handle(halt_node), + NULL, 0, GPIOD_OUT_LOW, "gpio-halt"); + err = PTR_ERR_OR_ZERO(halt_gpio); if (err) { - printk(KERN_ERR "gpio-halt: error requesting GPIO %d.\n", - gpio); - goto err_put; + dev_err(&pdev->dev, "failed to request halt GPIO: %d\n", err); + return err; } - trigger = (flags == OF_GPIO_ACTIVE_LOW); - - gpio_direction_output(gpio, !trigger); - /* Now get the IRQ which tells us when the power button is hit */ - irq = irq_of_parse_and_map(child_node, 0); - err = request_irq(irq, gpio_halt_irq, IRQF_TRIGGER_RISING | - IRQF_TRIGGER_FALLING, "gpio-halt", child_node); + halt_irq = irq_of_parse_and_map(halt_node, 0); + err = request_irq(halt_irq, gpio_halt_irq, + IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, + "gpio-halt", pdev); if (err) { - printk(KERN_ERR "gpio-halt: error requesting IRQ %d for " - "GPIO %d.\n", irq, gpio); - gpio_free(gpio); - goto err_put; + dev_err(&pdev->dev, "failed to request IRQ %d: %d\n", + halt_irq, err); + gpiod_put(halt_gpio); + halt_gpio = NULL; + return err; } /* Register our halt function */ ppc_md.halt = gpio_halt_cb; pm_power_off = gpio_halt_cb; - printk(KERN_INFO "gpio-halt: registered GPIO %d (%d trigger, %d" - " irq).\n", gpio, trigger, irq); + dev_info(&pdev->dev, "registered halt GPIO, irq: %d\n", halt_irq); - halt_node = child_node; return 0; - -err_put: - of_node_put(child_node); - return err; } -static int gpio_halt_remove(struct platform_device *pdev) +static int gpio_halt_probe(struct platform_device *pdev) { - if (halt_node) { - int gpio = of_get_gpio(halt_node, 0); - int irq = irq_of_parse_and_map(halt_node, 0); + struct device_node *halt_node; + int ret; + + if (!pdev->dev.of_node) + return -ENODEV; + + /* If there's no matching child, this isn't really an error */ + halt_node = of_find_matching_node(pdev->dev.of_node, child_match); + if (!halt_node) + return -ENODEV; + + ret = __gpio_halt_probe(pdev, halt_node); + of_node_put(halt_node); - free_irq(irq, halt_node); + return ret; +} - ppc_md.halt = NULL; - pm_power_off = NULL; +static int gpio_halt_remove(struct platform_device *pdev) +{ + free_irq(halt_irq, pdev); + cancel_work_sync(&gpio_halt_wq); - gpio_free(gpio); + ppc_md.halt = NULL; + pm_power_off = NULL; - of_node_put(halt_node); - halt_node = NULL; - } + gpiod_put(halt_gpio); + halt_gpio = NULL; return 0; } diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype index 0c4eed9aea80623e673e93f370b38f8572fd82cd..9563336e3348f386d9a8cb554e22618829a17193 100644 --- a/arch/powerpc/platforms/Kconfig.cputype +++ b/arch/powerpc/platforms/Kconfig.cputype @@ -135,6 +135,7 @@ config GENERIC_CPU depends on PPC_BOOK3S_64 && CPU_LITTLE_ENDIAN select ARCH_HAS_FAST_MULTIPLIER select PPC_64S_HASH_MMU + select PPC_HAS_LBARX_LHARX config POWERPC_CPU bool "Generic 32 bits powerpc" @@ -160,17 +161,20 @@ config POWER7_CPU depends on PPC_BOOK3S_64 select ARCH_HAS_FAST_MULTIPLIER select PPC_64S_HASH_MMU + select PPC_HAS_LBARX_LHARX config POWER8_CPU bool "POWER8" depends on PPC_BOOK3S_64 select ARCH_HAS_FAST_MULTIPLIER select PPC_64S_HASH_MMU + select PPC_HAS_LBARX_LHARX config POWER9_CPU bool "POWER9" depends on PPC_BOOK3S_64 select ARCH_HAS_FAST_MULTIPLIER + select PPC_HAS_LBARX_LHARX config POWER10_CPU bool "POWER10" @@ -184,6 +188,7 @@ config E5500_CPU config E6500_CPU bool "Freescale e6500" depends on PPC64 && PPC_E500 + select PPC_HAS_LBARX_LHARX config 405_CPU bool "40x family" @@ -575,10 +580,10 @@ config CPU_LITTLE_ENDIAN endchoice config PPC64_ELF_ABI_V1 - def_bool PPC64 && CPU_BIG_ENDIAN + def_bool PPC64 && (CPU_BIG_ENDIAN && !PPC64_BIG_ENDIAN_ELF_ABI_V2) config PPC64_ELF_ABI_V2 - def_bool PPC64 && CPU_LITTLE_ENDIAN + def_bool PPC64 && !PPC64_ELF_ABI_V1 config PPC64_BOOT_WRAPPER def_bool n diff --git a/arch/powerpc/platforms/book3s/vas-api.c b/arch/powerpc/platforms/book3s/vas-api.c index 40f5ae5e1238d633ba71a020ba6a5b46ff296468..eb5bed33375035b9139f694df5c4edb9c6124110 100644 --- a/arch/powerpc/platforms/book3s/vas-api.c +++ b/arch/powerpc/platforms/book3s/vas-api.c @@ -53,7 +53,7 @@ struct coproc_instance { struct vas_window *txwin; }; -static char *coproc_devnode(struct device *dev, umode_t *mode) +static char *coproc_devnode(const struct device *dev, umode_t *mode) { return kasprintf(GFP_KERNEL, "crypto/%s", dev_name(dev)); } diff --git a/arch/powerpc/platforms/cell/axon_msi.c b/arch/powerpc/platforms/cell/axon_msi.c index 5b012abca773d03fc6d1c728f3aa5112c4550107..0c11aad896c7932e9ed80522cd8e8bc63c0700c3 100644 --- a/arch/powerpc/platforms/cell/axon_msi.c +++ b/arch/powerpc/platforms/cell/axon_msi.c @@ -289,6 +289,7 @@ static void axon_msi_teardown_msi_irqs(struct pci_dev *dev) msi_for_each_desc(entry, &dev->dev, MSI_DESC_ASSOCIATED) { irq_set_msi_desc(entry->irq, NULL); irq_dispose_mapping(entry->irq); + entry->irq = 0; } } diff --git a/arch/powerpc/platforms/pasemi/gpio_mdio.c b/arch/powerpc/platforms/pasemi/gpio_mdio.c index bf300167ad6bfca922d20f037595bb4ec3c2406d..913b77b92cea1e7ac2ebd3b8092d6c3b7a211f21 100644 --- a/arch/powerpc/platforms/pasemi/gpio_mdio.c +++ b/arch/powerpc/platforms/pasemi/gpio_mdio.c @@ -294,7 +294,7 @@ static struct platform_driver gpio_mdio_driver = }, }; -static int gpio_mdio_init(void) +static int __init gpio_mdio_init(void) { struct device_node *np; @@ -314,7 +314,7 @@ static int gpio_mdio_init(void) } module_init(gpio_mdio_init); -static void gpio_mdio_exit(void) +static void __exit gpio_mdio_exit(void) { platform_driver_unregister(&gpio_mdio_driver); if (gpio_regs) diff --git a/arch/powerpc/platforms/pasemi/msi.c b/arch/powerpc/platforms/pasemi/msi.c index dc1846660005c0c3b2e403a0f7c730847e803048..166c97fff16d2f0baa5d06439d37e250431696af 100644 --- a/arch/powerpc/platforms/pasemi/msi.c +++ b/arch/powerpc/platforms/pasemi/msi.c @@ -66,6 +66,7 @@ static void pasemi_msi_teardown_msi_irqs(struct pci_dev *pdev) hwirq = virq_to_hw(entry->irq); irq_set_msi_desc(entry->irq, NULL); irq_dispose_mapping(entry->irq); + entry->irq = 0; msi_bitmap_free_hwirqs(&msi_mpic->msi_bitmap, hwirq, ALLOC_CHUNK); } } diff --git a/arch/powerpc/platforms/powermac/setup.c b/arch/powerpc/platforms/powermac/setup.c index 04daa7f0a03c651228f4ac2a115e307dc8b4590b..4f7ee885a78ff4638a904ca18688139778d7588b 100644 --- a/arch/powerpc/platforms/powermac/setup.c +++ b/arch/powerpc/platforms/powermac/setup.c @@ -70,9 +70,7 @@ #undef SHOW_GATWICK_IRQS -int ppc_override_l2cr = 0; -int ppc_override_l2cr_value; -int has_l2cache = 0; +static int has_l2cache; int pmac_newworld; @@ -236,22 +234,16 @@ static void __init l2cr_init(void) const unsigned int *l2cr = of_get_property(np, "l2cr-value", NULL); if (l2cr) { - ppc_override_l2cr = 1; - ppc_override_l2cr_value = *l2cr; _set_L2CR(0); - _set_L2CR(ppc_override_l2cr_value); + _set_L2CR(*l2cr); + pr_info("L2CR overridden (0x%x), backside cache is %s\n", + *l2cr, ((*l2cr) & 0x80000000) ? + "enabled" : "disabled"); } of_node_put(np); break; } } - - if (ppc_override_l2cr) - printk(KERN_INFO "L2CR overridden (0x%x), " - "backside cache is %s\n", - ppc_override_l2cr_value, - (ppc_override_l2cr_value & 0x80000000) - ? "enabled" : "disabled"); } #endif diff --git a/arch/powerpc/platforms/ps3/system-bus.c b/arch/powerpc/platforms/ps3/system-bus.c index 2502e9b17df4abe7b4eb9472e4b2d1e3d9187e78..38a7e02295c8f2663c1e23a2fd2c5c6bae5af3ef 100644 --- a/arch/powerpc/platforms/ps3/system-bus.c +++ b/arch/powerpc/platforms/ps3/system-bus.c @@ -466,7 +466,7 @@ static struct attribute *ps3_system_bus_dev_attrs[] = { }; ATTRIBUTE_GROUPS(ps3_system_bus_dev); -struct bus_type ps3_system_bus_type = { +static struct bus_type ps3_system_bus_type = { .name = "ps3_system_bus", .match = ps3_system_bus_match, .uevent = ps3_system_bus_uevent, diff --git a/arch/powerpc/platforms/pseries/eeh_pseries.c b/arch/powerpc/platforms/pseries/eeh_pseries.c index 8e40ccac0f44eea03043d2668b6144ba4deda5d1..6b507b62ce8f1d3534a500b514970a23e6ffe24b 100644 --- a/arch/powerpc/platforms/pseries/eeh_pseries.c +++ b/arch/powerpc/platforms/pseries/eeh_pseries.c @@ -154,7 +154,7 @@ static int pseries_eeh_get_pe_config_addr(struct pci_dn *pdn) /** * pseries_eeh_phb_reset - Reset the specified PHB * @phb: PCI controller - * @config_adddr: the associated config address + * @config_addr: the associated config address * @option: reset option * * Reset the specified PHB/PE @@ -188,7 +188,7 @@ static int pseries_eeh_phb_reset(struct pci_controller *phb, int config_addr, in /** * pseries_eeh_phb_configure_bridge - Configure PCI bridges in the indicated PE * @phb: PCI controller - * @config_adddr: the associated config address + * @config_addr: the associated config address * * The function will be called to reconfigure the bridges included * in the specified PE so that the mulfunctional PE would be recovered @@ -848,16 +848,7 @@ static int __init eeh_pseries_init(void) } /* Initialize error log size */ - eeh_error_buf_size = rtas_token("rtas-error-log-max"); - if (eeh_error_buf_size == RTAS_UNKNOWN_SERVICE) { - pr_info("%s: unknown EEH error log size\n", - __func__); - eeh_error_buf_size = 1024; - } else if (eeh_error_buf_size > RTAS_ERROR_LOG_MAX) { - pr_info("%s: EEH error log size %d exceeds the maximal %d\n", - __func__, eeh_error_buf_size, RTAS_ERROR_LOG_MAX); - eeh_error_buf_size = RTAS_ERROR_LOG_MAX; - } + eeh_error_buf_size = rtas_get_error_log_max(); /* Set EEH probe mode */ eeh_add_flag(EEH_PROBE_MODE_DEVTREE | EEH_ENABLE_IO_FOR_LOG); diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c index e0a7ac5db15d9fa22684b61ce6737c0d31efd80a..090ae5a1e0f5eefac69317a3f33bec2c9261addf 100644 --- a/arch/powerpc/platforms/pseries/hotplug-cpu.c +++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c @@ -70,6 +70,7 @@ static void pseries_cpu_offline_self(void) xics_teardown_cpu(); unregister_slb_shadow(hwcpu); + unregister_vpa(hwcpu); rtas_stop_self(); /* Should never get here... */ diff --git a/arch/powerpc/platforms/pseries/hvCall.S b/arch/powerpc/platforms/pseries/hvCall.S index 762eb15d3bd42645d3a13287986eb7fef3c558a4..783c16ad648b86eec1aa4e8aa46e1b371a5a9795 100644 --- a/arch/powerpc/platforms/pseries/hvCall.S +++ b/arch/powerpc/platforms/pseries/hvCall.S @@ -27,7 +27,9 @@ hcall_tracepoint_refcount: /* * precall must preserve all registers. use unused STK_PARAM() - * areas to save snapshots and opcode. + * areas to save snapshots and opcode. STK_PARAM() in the caller's + * frame will be available even on ELFv2 because these are all + * variadic functions. */ #define HCALL_INST_PRECALL(FIRST_REG) \ mflr r0; \ @@ -41,29 +43,29 @@ hcall_tracepoint_refcount: std r10,STK_PARAM(R10)(r1); \ std r0,16(r1); \ addi r4,r1,STK_PARAM(FIRST_REG); \ - stdu r1,-STACK_FRAME_OVERHEAD(r1); \ + stdu r1,-STACK_FRAME_MIN_SIZE(r1); \ bl __trace_hcall_entry; \ - ld r3,STACK_FRAME_OVERHEAD+STK_PARAM(R3)(r1); \ - ld r4,STACK_FRAME_OVERHEAD+STK_PARAM(R4)(r1); \ - ld r5,STACK_FRAME_OVERHEAD+STK_PARAM(R5)(r1); \ - ld r6,STACK_FRAME_OVERHEAD+STK_PARAM(R6)(r1); \ - ld r7,STACK_FRAME_OVERHEAD+STK_PARAM(R7)(r1); \ - ld r8,STACK_FRAME_OVERHEAD+STK_PARAM(R8)(r1); \ - ld r9,STACK_FRAME_OVERHEAD+STK_PARAM(R9)(r1); \ - ld r10,STACK_FRAME_OVERHEAD+STK_PARAM(R10)(r1) + ld r3,STACK_FRAME_MIN_SIZE+STK_PARAM(R3)(r1); \ + ld r4,STACK_FRAME_MIN_SIZE+STK_PARAM(R4)(r1); \ + ld r5,STACK_FRAME_MIN_SIZE+STK_PARAM(R5)(r1); \ + ld r6,STACK_FRAME_MIN_SIZE+STK_PARAM(R6)(r1); \ + ld r7,STACK_FRAME_MIN_SIZE+STK_PARAM(R7)(r1); \ + ld r8,STACK_FRAME_MIN_SIZE+STK_PARAM(R8)(r1); \ + ld r9,STACK_FRAME_MIN_SIZE+STK_PARAM(R9)(r1); \ + ld r10,STACK_FRAME_MIN_SIZE+STK_PARAM(R10)(r1) /* * postcall is performed immediately before function return which * allows liberal use of volatile registers. */ #define __HCALL_INST_POSTCALL \ - ld r0,STACK_FRAME_OVERHEAD+STK_PARAM(R3)(r1); \ - std r3,STACK_FRAME_OVERHEAD+STK_PARAM(R3)(r1); \ + ld r0,STACK_FRAME_MIN_SIZE+STK_PARAM(R3)(r1); \ + std r3,STACK_FRAME_MIN_SIZE+STK_PARAM(R3)(r1); \ mr r4,r3; \ mr r3,r0; \ bl __trace_hcall_exit; \ - ld r0,STACK_FRAME_OVERHEAD+16(r1); \ - addi r1,r1,STACK_FRAME_OVERHEAD; \ + ld r0,STACK_FRAME_MIN_SIZE+16(r1); \ + addi r1,r1,STACK_FRAME_MIN_SIZE; \ ld r3,STK_PARAM(R3)(r1); \ mtlr r0 @@ -303,14 +305,14 @@ plpar_hcall9_trace: mr r7,r8 mr r8,r9 mr r9,r10 - ld r10,STACK_FRAME_OVERHEAD+STK_PARAM(R11)(r1) - ld r11,STACK_FRAME_OVERHEAD+STK_PARAM(R12)(r1) - ld r12,STACK_FRAME_OVERHEAD+STK_PARAM(R13)(r1) + ld r10,STACK_FRAME_MIN_SIZE+STK_PARAM(R11)(r1) + ld r11,STACK_FRAME_MIN_SIZE+STK_PARAM(R12)(r1) + ld r12,STACK_FRAME_MIN_SIZE+STK_PARAM(R13)(r1) HVSC mr r0,r12 - ld r12,STACK_FRAME_OVERHEAD+STK_PARAM(R4)(r1) + ld r12,STACK_FRAME_MIN_SIZE+STK_PARAM(R4)(r1) std r4,0(r12) std r5,8(r12) std r6,16(r12) diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c index 561adac690229129cbca2eecd765edd743b3e9cb..c74b71d4733d409d3a916b75171cb66319c2ee6a 100644 --- a/arch/powerpc/platforms/pseries/iommu.c +++ b/arch/powerpc/platforms/pseries/iommu.c @@ -248,7 +248,7 @@ static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum, * Set up the page with TCE data, looping through and setting * the values. */ - limit = min_t(long, npages, 4096/TCE_ENTRY_SIZE); + limit = min_t(long, npages, 4096 / TCE_ENTRY_SIZE); for (l = 0; l < limit; l++) { tcep[l] = cpu_to_be64(proto_tce | rpn << tceshift); diff --git a/arch/powerpc/platforms/pseries/mobility.c b/arch/powerpc/platforms/pseries/mobility.c index 634fac5db3f98da9400e235a107e4cf5318570d7..4cea71aa0f41d5b3527944196fb68627a8f69d15 100644 --- a/arch/powerpc/platforms/pseries/mobility.c +++ b/arch/powerpc/platforms/pseries/mobility.c @@ -635,10 +635,13 @@ retry: prod_others(); } /* - * Execution may have been suspended for several seconds, so - * reset the watchdog. + * Execution may have been suspended for several seconds, so reset + * the watchdogs. touch_nmi_watchdog() also touches the soft lockup + * watchdog. */ + rcu_cpu_stall_reset(); touch_nmi_watchdog(); + return ret; } diff --git a/arch/powerpc/platforms/pseries/plpks.c b/arch/powerpc/platforms/pseries/plpks.c index f4b5b5a64db3d3a0e9ae65282f39ec7c60e160e6..4edd1585e24573b384ee8948f579646d4376aecf 100644 --- a/arch/powerpc/platforms/pseries/plpks.c +++ b/arch/powerpc/platforms/pseries/plpks.c @@ -75,7 +75,7 @@ static int pseries_status_to_err(int rc) case H_FUNCTION: err = -ENXIO; break; - case H_P1: + case H_PARAMETER: case H_P2: case H_P3: case H_P4: @@ -111,7 +111,7 @@ static int pseries_status_to_err(int rc) err = -EEXIST; break; case H_ABORTED: - err = -EINTR; + err = -EIO; break; default: err = -EINVAL; @@ -162,19 +162,15 @@ static struct plpks_auth *construct_auth(u8 consumer) if (consumer > PKS_OS_OWNER) return ERR_PTR(-EINVAL); - auth = kmalloc(struct_size(auth, password, maxpwsize), GFP_KERNEL); + auth = kzalloc(struct_size(auth, password, maxpwsize), GFP_KERNEL); if (!auth) return ERR_PTR(-ENOMEM); auth->version = 1; auth->consumer = consumer; - auth->rsvd0 = 0; - auth->rsvd1 = 0; - if (consumer == PKS_FW_OWNER || consumer == PKS_BOOTLOADER_OWNER) { - auth->passwordlength = 0; + if (consumer == PKS_FW_OWNER || consumer == PKS_BOOTLOADER_OWNER) return auth; - } memcpy(auth->password, ospassword, ospasswordlength); @@ -312,10 +308,6 @@ int plpks_write_var(struct plpks_var var) if (!rc) rc = plpks_confirm_object_flushed(label, auth); - if (rc) - pr_err("Failed to write variable %s for component %s with error %d\n", - var.name, var.component, rc); - rc = pseries_status_to_err(rc); kfree(label); out: @@ -350,10 +342,6 @@ int plpks_remove_var(char *component, u8 varos, struct plpks_var_name vname) if (!rc) rc = plpks_confirm_object_flushed(label, auth); - if (rc) - pr_err("Failed to remove variable %s for component %s with error %d\n", - vname.name, component, rc); - rc = pseries_status_to_err(rc); kfree(label); out: @@ -366,22 +354,24 @@ static int plpks_read_var(u8 consumer, struct plpks_var *var) { unsigned long retbuf[PLPAR_HCALL_BUFSIZE] = { 0 }; struct plpks_auth *auth; - struct label *label; + struct label *label = NULL; u8 *output; int rc; if (var->namelen > MAX_NAME_SIZE) return -EINVAL; - auth = construct_auth(PKS_OS_OWNER); + auth = construct_auth(consumer); if (IS_ERR(auth)) return PTR_ERR(auth); - label = construct_label(var->component, var->os, var->name, - var->namelen); - if (IS_ERR(label)) { - rc = PTR_ERR(label); - goto out_free_auth; + if (consumer == PKS_OS_OWNER) { + label = construct_label(var->component, var->os, var->name, + var->namelen); + if (IS_ERR(label)) { + rc = PTR_ERR(label); + goto out_free_auth; + } } output = kzalloc(maxobjsize, GFP_KERNEL); @@ -390,13 +380,17 @@ static int plpks_read_var(u8 consumer, struct plpks_var *var) goto out_free_label; } - rc = plpar_hcall(H_PKS_READ_OBJECT, retbuf, virt_to_phys(auth), - virt_to_phys(label), label->size, virt_to_phys(output), - maxobjsize); + if (consumer == PKS_OS_OWNER) + rc = plpar_hcall(H_PKS_READ_OBJECT, retbuf, virt_to_phys(auth), + virt_to_phys(label), label->size, virt_to_phys(output), + maxobjsize); + else + rc = plpar_hcall(H_PKS_READ_OBJECT, retbuf, virt_to_phys(auth), + virt_to_phys(var->name), var->namelen, virt_to_phys(output), + maxobjsize); + if (rc != H_SUCCESS) { - pr_err("Failed to read variable %s for component %s with error %d\n", - var->name, var->component, rc); rc = pseries_status_to_err(rc); goto out_free_output; } diff --git a/arch/powerpc/platforms/pseries/plpks.h b/arch/powerpc/platforms/pseries/plpks.h index c6a291367bb139c2029f77030501fdc54059842b..275ccd86bfb5e46ffff11c06e2adb4b992bc0657 100644 --- a/arch/powerpc/platforms/pseries/plpks.h +++ b/arch/powerpc/platforms/pseries/plpks.h @@ -17,7 +17,7 @@ #define WORLDREADABLE 0x08000000 #define SIGNEDUPDATE 0x01000000 -#define PLPKS_VAR_LINUX 0x01 +#define PLPKS_VAR_LINUX 0x02 #define PLPKS_VAR_COMMON 0x04 struct plpks_var { diff --git a/arch/powerpc/sysdev/fsl_msi.c b/arch/powerpc/sysdev/fsl_msi.c index 73c2d70706c0add48ea9553fec807c6953cb5a7d..57978a44d55b691ac2c5ed9bff726fe3ff409974 100644 --- a/arch/powerpc/sysdev/fsl_msi.c +++ b/arch/powerpc/sysdev/fsl_msi.c @@ -132,6 +132,7 @@ static void fsl_teardown_msi_irqs(struct pci_dev *pdev) msi_data = irq_get_chip_data(entry->irq); irq_set_msi_desc(entry->irq, NULL); irq_dispose_mapping(entry->irq); + entry->irq = 0; msi_bitmap_free_hwirqs(&msi_data->bitmap, hwirq, 1); } } diff --git a/arch/powerpc/sysdev/fsl_pci.c b/arch/powerpc/sysdev/fsl_pci.c index 974d3db6faab237dc7d56ce5a89eb0420e8e8c31..b7232c46b24481a8f6b35ef266fb2e074a21abdb 100644 --- a/arch/powerpc/sysdev/fsl_pci.c +++ b/arch/powerpc/sysdev/fsl_pci.c @@ -1138,6 +1138,19 @@ void __init fsl_pci_assign_primary(void) return; } + /* + * If there's no PCI host bridge with ISA then check for + * PCI host bridge with alias "pci0" (first PCI host bridge). + */ + np = of_find_node_by_path("pci0"); + if (np && of_match_node(pci_ids, np) && of_device_is_available(np)) { + fsl_pci_primary = np; + of_node_put(np); + return; + } + if (np) + of_node_put(np); + /* * If there's no PCI host bridge with ISA, arbitrarily * designate one as primary. This can go away once diff --git a/arch/powerpc/sysdev/mpic_msgr.c b/arch/powerpc/sysdev/mpic_msgr.c index a439e33eae0618e2d227e3f7338362781f33c396..d75064fb7d12fedbf991e4f8585dea8eb19790d9 100644 --- a/arch/powerpc/sysdev/mpic_msgr.c +++ b/arch/powerpc/sysdev/mpic_msgr.c @@ -20,7 +20,7 @@ #define MPIC_MSGR_REGISTERS_PER_BLOCK 4 #define MPIC_MSGR_STRIDE 0x10 -#define MPIC_MSGR_MER_OFFSET 0x100 +#define MPIC_MSGR_MER_OFFSET (0x100 / sizeof(u32)) #define MSGR_INUSE 0 #define MSGR_FREE 1 @@ -234,7 +234,7 @@ static int mpic_msgr_probe(struct platform_device *dev) reg_number = block_number * MPIC_MSGR_REGISTERS_PER_BLOCK + i; msgr->base = msgr_block_addr + i * MPIC_MSGR_STRIDE; - msgr->mer = (u32 *)((u8 *)msgr->base + MPIC_MSGR_MER_OFFSET); + msgr->mer = msgr->base + MPIC_MSGR_MER_OFFSET; msgr->in_use = MSGR_FREE; msgr->num = i; raw_spin_lock_init(&msgr->lock); diff --git a/arch/powerpc/sysdev/mpic_u3msi.c b/arch/powerpc/sysdev/mpic_u3msi.c index 1d8cfdfdf115ec284f3b615d48dff21d5fdef414..492cb03c0b623581f9165fcef8d1e99d38fd5c89 100644 --- a/arch/powerpc/sysdev/mpic_u3msi.c +++ b/arch/powerpc/sysdev/mpic_u3msi.c @@ -108,6 +108,7 @@ static void u3msi_teardown_msi_irqs(struct pci_dev *pdev) hwirq = virq_to_hw(entry->irq); irq_set_msi_desc(entry->irq, NULL); irq_dispose_mapping(entry->irq); + entry->irq = 0; msi_bitmap_free_hwirqs(&msi_mpic->msi_bitmap, hwirq, 1); } } diff --git a/arch/powerpc/sysdev/xive/native.c b/arch/powerpc/sysdev/xive/native.c index 3925825954bcceba55ba2a8dffcbbdc5df5dcf68..19d880ebc5e61b236184bfa90d98d59845b460cd 100644 --- a/arch/powerpc/sysdev/xive/native.c +++ b/arch/powerpc/sysdev/xive/native.c @@ -535,13 +535,13 @@ static bool __init xive_parse_provisioning(struct device_node *np) static void __init xive_native_setup_pools(void) { /* Allocate a pool big enough */ - pr_debug("XIVE: Allocating VP block for pool size %u\n", nr_cpu_ids); + pr_debug("Allocating VP block for pool size %u\n", nr_cpu_ids); xive_pool_vps = xive_native_alloc_vp_block(nr_cpu_ids); if (WARN_ON(xive_pool_vps == XIVE_INVALID_VP)) - pr_err("XIVE: Failed to allocate pool VP, KVM might not function\n"); + pr_err("Failed to allocate pool VP, KVM might not function\n"); - pr_debug("XIVE: Pool VPs allocated at 0x%x for %u max CPUs\n", + pr_debug("Pool VPs allocated at 0x%x for %u max CPUs\n", xive_pool_vps, nr_cpu_ids); } diff --git a/arch/powerpc/sysdev/xive/spapr.c b/arch/powerpc/sysdev/xive/spapr.c index e2c8f93b535ba1d532e66c9110c89a4546865a66..e454192643910075d3e501382453aba8ee0fae08 100644 --- a/arch/powerpc/sysdev/xive/spapr.c +++ b/arch/powerpc/sysdev/xive/spapr.c @@ -439,6 +439,7 @@ static int xive_spapr_populate_irq_data(u32 hw_irq, struct xive_irq_data *data) data->trig_mmio = ioremap(data->trig_page, 1u << data->esb_shift); if (!data->trig_mmio) { + iounmap(data->eoi_mmio); pr_err("Failed to map trigger page for irq 0x%x\n", hw_irq); return -ENOMEM; } diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c index f51c882bf9023af3e8007c79fd4bbfee16b49a3e..0da66bc4823d4d5fc501427c1f73fd21353ccf9c 100644 --- a/arch/powerpc/xmon/xmon.c +++ b/arch/powerpc/xmon/xmon.c @@ -1525,9 +1525,9 @@ bpt_cmds(void) cmd = inchar(); switch (cmd) { - static const char badaddr[] = "Only kernel addresses are permitted for breakpoints\n"; - int mode; - case 'd': /* bd - hardware data breakpoint */ + case 'd': { /* bd - hardware data breakpoint */ + static const char badaddr[] = "Only kernel addresses are permitted for breakpoints\n"; + int mode; if (xmon_is_ro) { printf(xmon_ro_msg); break; @@ -1560,6 +1560,7 @@ bpt_cmds(void) force_enable_xmon(); break; + } case 'i': /* bi - hardware instr breakpoint */ if (xmon_is_ro) { @@ -1720,7 +1721,6 @@ static void get_function_bounds(unsigned long pc, unsigned long *startp, } #define LRSAVE_OFFSET (STACK_FRAME_LR_SAVE * sizeof(unsigned long)) -#define MARKER_OFFSET (STACK_FRAME_MARKER * sizeof(unsigned long)) static void xmon_show_stack(unsigned long sp, unsigned long lr, unsigned long pc) @@ -1781,14 +1781,13 @@ static void xmon_show_stack(unsigned long sp, unsigned long lr, xmon_print_symbol(ip, " ", "\n"); } - /* Look for "regshere" marker to see if this is + /* Look for "regs" marker to see if this is an exception frame. */ - if (mread(sp + MARKER_OFFSET, &marker, sizeof(unsigned long)) + if (mread(sp + STACK_INT_FRAME_MARKER, &marker, sizeof(unsigned long)) && marker == STACK_FRAME_REGS_MARKER) { - if (mread(sp + STACK_FRAME_OVERHEAD, ®s, sizeof(regs)) - != sizeof(regs)) { + if (mread(sp + STACK_INT_FRAME_REGS, ®s, sizeof(regs)) != sizeof(regs)) { printf("Couldn't read registers at %lx\n", - sp + STACK_FRAME_OVERHEAD); + sp + STACK_INT_FRAME_REGS); break; } printf("--- Exception: %lx %s at ", regs.trap, diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig index 593cf09264d80a6afbe3704b176d44ff0f024929..e2b656043abf31128571abb8329327cc1044e96f 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig @@ -25,6 +25,7 @@ config RISCV select ARCH_HAS_GIGANTIC_PAGE select ARCH_HAS_KCOV select ARCH_HAS_MMIOWB + select ARCH_HAS_PMEM_API select ARCH_HAS_PTE_SPECIAL select ARCH_HAS_SET_DIRECT_MAP if MMU select ARCH_HAS_SET_MEMORY if MMU @@ -72,6 +73,8 @@ config RISCV select GENERIC_VDSO_TIME_NS if HAVE_GENERIC_VDSO select HARDIRQS_SW_RESEND select HAVE_ARCH_AUDITSYSCALL + select HAVE_ARCH_HUGE_VMALLOC if HAVE_ARCH_HUGE_VMAP + select HAVE_ARCH_HUGE_VMAP if MMU && 64BIT && !XIP_KERNEL select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL select HAVE_ARCH_JUMP_LABEL_RELATIVE if !XIP_KERNEL select HAVE_ARCH_KASAN if MMU && 64BIT @@ -99,6 +102,7 @@ config RISCV select HAVE_KPROBES if !XIP_KERNEL select HAVE_KPROBES_ON_FTRACE if !XIP_KERNEL select HAVE_KRETPROBES if !XIP_KERNEL + select HAVE_RETHOOK if !XIP_KERNEL select HAVE_MOVE_PMD select HAVE_MOVE_PUD select HAVE_PCI @@ -123,12 +127,18 @@ config RISCV select PCI_MSI if PCI select RISCV_INTC select RISCV_TIMER if RISCV_SBI + select SIFIVE_PLIC select SPARSE_IRQ select SYSCTL_EXCEPTION_TRACE select THREAD_INFO_IN_TASK select TRACE_IRQFLAGS_SUPPORT select UACCESS_MEMCPY if !MMU select ZONE_DMA32 if 64BIT + select HAVE_DYNAMIC_FTRACE if !XIP_KERNEL && MMU && $(cc-option,-fpatchable-function-entry=8) + select HAVE_DYNAMIC_FTRACE_WITH_REGS if HAVE_DYNAMIC_FTRACE + select HAVE_FTRACE_MCOUNT_RECORD if !XIP_KERNEL + select HAVE_FUNCTION_GRAPH_TRACER + select HAVE_FUNCTION_TRACER if !XIP_KERNEL config ARCH_MMAP_RND_BITS_MIN default 18 if 64BIT @@ -274,11 +284,6 @@ config ARCH_RV64I bool "RV64I" select 64BIT select ARCH_SUPPORTS_INT128 if CC_HAS_INT128 - select HAVE_DYNAMIC_FTRACE if !XIP_KERNEL && MMU && $(cc-option,-fpatchable-function-entry=8) - select HAVE_DYNAMIC_FTRACE_WITH_REGS if HAVE_DYNAMIC_FTRACE - select HAVE_FTRACE_MCOUNT_RECORD if !XIP_KERNEL - select HAVE_FUNCTION_GRAPH_TRACER - select HAVE_FUNCTION_TRACER if !XIP_KERNEL select SWIOTLB if MMU endchoice @@ -502,7 +507,7 @@ config KEXEC_FILE select KEXEC_CORE select KEXEC_ELF select HAVE_IMA_KEXEC if IMA - depends on 64BIT + depends on 64BIT && MMU help This is new version of kexec system call. This system call is file based and takes file descriptors as system call argument @@ -691,6 +696,8 @@ menu "CPU Power Management" source "drivers/cpuidle/Kconfig" +source "drivers/cpufreq/Kconfig" + endmenu # "CPU Power Management" source "arch/riscv/kvm/Kconfig" diff --git a/arch/riscv/Kconfig.erratas b/arch/riscv/Kconfig.erratas index f3623df23b5fcd902339552e303c257a4dd6efa4..69621ae6d647aa369cee22c5751d3a55ec93e0b6 100644 --- a/arch/riscv/Kconfig.erratas +++ b/arch/riscv/Kconfig.erratas @@ -66,4 +66,17 @@ config ERRATA_THEAD_CMO If you don't know what to do here, say "Y". +config ERRATA_THEAD_PMU + bool "Apply T-Head PMU errata" + depends on ERRATA_THEAD && RISCV_PMU_SBI + default y + help + The T-Head C9xx cores implement a PMU overflow extension very + similar to the core SSCOFPMF extension. + + This will apply the overflow errata to handle the non-standard + behaviour via the regular SBI PMU driver and interface. + + If you don't know what to do here, say "Y". + endmenu # "CPU errata selection" diff --git a/arch/riscv/Kconfig.socs b/arch/riscv/Kconfig.socs index 75fb0390d6bd1fbcf4f2c1e77a81adae469eaa18..4b6deb2715f1c43f7b3b0e52329c4fa82b4ab5ae 100644 --- a/arch/riscv/Kconfig.socs +++ b/arch/riscv/Kconfig.socs @@ -3,7 +3,6 @@ menu "SoC selection" config SOC_MICROCHIP_POLARFIRE bool "Microchip PolarFire SoCs" select MCHP_CLK_MPFS - select SIFIVE_PLIC help This enables support for Microchip PolarFire SoC platforms. @@ -18,7 +17,6 @@ config SOC_SIFIVE select SERIAL_SIFIVE_CONSOLE if TTY select CLK_SIFIVE select CLK_SIFIVE_PRCI - select SIFIVE_PLIC select ERRATA_SIFIVE if !XIP_KERNEL help This enables support for SiFive SoC platform hardware. @@ -27,7 +25,6 @@ config SOC_STARFIVE bool "StarFive SoCs" select PINCTRL select RESET_CONTROLLER - select SIFIVE_PLIC help This enables support for StarFive SoC platform hardware. @@ -39,7 +36,6 @@ config SOC_VIRT select POWER_RESET_SYSCON_POWEROFF select GOLDFISH select RTC_DRV_GOLDFISH if RTC_CLASS - select SIFIVE_PLIC select PM_GENERIC_DOMAINS if PM select PM_GENERIC_DOMAINS_OF if PM && OF select RISCV_SBI_CPUIDLE if CPU_IDLE && RISCV_SBI @@ -52,7 +48,6 @@ config SOC_CANAAN select CLINT_TIMER if RISCV_M_MODE select SERIAL_SIFIVE if TTY select SERIAL_SIFIVE_CONSOLE if TTY - select SIFIVE_PLIC select ARCH_HAS_RESET_CONTROLLER select PINCTRL select COMMON_CLK diff --git a/arch/riscv/Makefile b/arch/riscv/Makefile index 0d13b597cb55f5efdd779f8a4ca354251b886383..faf2c2177094bbb835face6568088c447ddd092a 100644 --- a/arch/riscv/Makefile +++ b/arch/riscv/Makefile @@ -37,7 +37,7 @@ else endif ifeq ($(CONFIG_LD_IS_LLD),y) -ifeq ($(shell test $(CONFIG_LLD_VERSION) -lt 150000; echo $$?),0) +ifeq ($(call test-lt, $(CONFIG_LLD_VERSION), 150000),y) KBUILD_CFLAGS += -mno-relax KBUILD_AFLAGS += -mno-relax ifndef CONFIG_AS_IS_LLVM diff --git a/arch/riscv/boot/Makefile b/arch/riscv/boot/Makefile index d1a49adcb1d76283233785ec3bed58e2d1278e47..c72de7232abb2bf64a2bf279bb9359b2753c2667 100644 --- a/arch/riscv/boot/Makefile +++ b/arch/riscv/boot/Makefile @@ -56,6 +56,9 @@ $(obj)/Image.lzma: $(obj)/Image FORCE $(obj)/Image.lzo: $(obj)/Image FORCE $(call if_changed,lzo) +$(obj)/Image.zst: $(obj)/Image FORCE + $(call if_changed,zstd) + $(obj)/loader.bin: $(obj)/loader FORCE $(call if_changed,objcopy) diff --git a/arch/riscv/configs/defconfig b/arch/riscv/configs/defconfig index f7f32448f160c788911acdb8b9b91a7c9b1133b4..128dcf4c0814bb42fad778f13750073a824c35e8 100644 --- a/arch/riscv/configs/defconfig +++ b/arch/riscv/configs/defconfig @@ -39,6 +39,7 @@ CONFIG_KVM=m CONFIG_JUMP_LABEL=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y +CONFIG_SPARSEMEM_MANUAL=y CONFIG_BLK_DEV_THROTTLING=y CONFIG_NET=y CONFIG_PACKET=y @@ -123,6 +124,7 @@ CONFIG_MICROSEMI_PHY=y CONFIG_INPUT_MOUSEDEV=y CONFIG_SERIAL_8250=y CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_SERIAL_8250_DW=y CONFIG_SERIAL_OF_PLATFORM=y CONFIG_SERIAL_SH_SCI=y CONFIG_VIRTIO_CONSOLE=y @@ -162,6 +164,7 @@ CONFIG_RPMSG_CHAR=y CONFIG_RPMSG_CTRL=y CONFIG_RPMSG_VIRTIO=y CONFIG_ARCH_R9A07G043=y +CONFIG_LIBNVDIMM=y CONFIG_EXT4_FS=y CONFIG_EXT4_FS_POSIX_ACL=y CONFIG_EXT4_FS_SECURITY=y diff --git a/arch/riscv/errata/thead/errata.c b/arch/riscv/errata/thead/errata.c index 21546937db39bf5fb968af60a7ee5b096242925b..fac5742d1c1e6f8e34771291b783ed58c7710251 100644 --- a/arch/riscv/errata/thead/errata.c +++ b/arch/riscv/errata/thead/errata.c @@ -47,6 +47,22 @@ static bool errata_probe_cmo(unsigned int stage, return true; } +static bool errata_probe_pmu(unsigned int stage, + unsigned long arch_id, unsigned long impid) +{ + if (!IS_ENABLED(CONFIG_ERRATA_THEAD_PMU)) + return false; + + /* target-c9xx cores report arch_id and impid as 0 */ + if (arch_id != 0 || impid != 0) + return false; + + if (stage == RISCV_ALTERNATIVES_EARLY_BOOT) + return false; + + return true; +} + static u32 thead_errata_probe(unsigned int stage, unsigned long archid, unsigned long impid) { @@ -58,6 +74,9 @@ static u32 thead_errata_probe(unsigned int stage, if (errata_probe_cmo(stage, archid, impid)) cpu_req_errata |= BIT(ERRATA_THEAD_CMO); + if (errata_probe_pmu(stage, archid, impid)) + cpu_req_errata |= BIT(ERRATA_THEAD_PMU); + return cpu_req_errata; } diff --git a/arch/riscv/include/asm/alternative-macros.h b/arch/riscv/include/asm/alternative-macros.h index ec2f3f1b836f35171616030000209c45660d5f07..7226e2462584b65a78f7601c47e744ed7d6a54fc 100644 --- a/arch/riscv/include/asm/alternative-macros.h +++ b/arch/riscv/include/asm/alternative-macros.h @@ -33,7 +33,7 @@ .endif .endm -.macro __ALTERNATIVE_CFG old_c, new_c, vendor_id, errata_id, enable +.macro ALTERNATIVE_CFG old_c, new_c, vendor_id, errata_id, enable 886 : .option push .option norvc @@ -44,30 +44,14 @@ ALT_NEW_CONTENT \vendor_id, \errata_id, \enable, \new_c .endm -#define _ALTERNATIVE_CFG(old_c, new_c, vendor_id, errata_id, CONFIG_k) \ - __ALTERNATIVE_CFG old_c, new_c, vendor_id, errata_id, IS_ENABLED(CONFIG_k) - -.macro __ALTERNATIVE_CFG_2 old_c, new_c_1, vendor_id_1, errata_id_1, enable_1, \ - new_c_2, vendor_id_2, errata_id_2, enable_2 -886 : - .option push - .option norvc - .option norelax - \old_c - .option pop -887 : - ALT_NEW_CONTENT \vendor_id_1, \errata_id_1, \enable_1, \new_c_1 +.macro ALTERNATIVE_CFG_2 old_c, new_c_1, vendor_id_1, errata_id_1, enable_1, \ + new_c_2, vendor_id_2, errata_id_2, enable_2 + ALTERNATIVE_CFG \old_c, \new_c_1, \vendor_id_1, \errata_id_1, \enable_1 ALT_NEW_CONTENT \vendor_id_2, \errata_id_2, \enable_2, \new_c_2 .endm -#define _ALTERNATIVE_CFG_2(old_c, new_c_1, vendor_id_1, errata_id_1, \ - CONFIG_k_1, \ - new_c_2, vendor_id_2, errata_id_2, \ - CONFIG_k_2) \ - __ALTERNATIVE_CFG_2 old_c, new_c_1, vendor_id_1, errata_id_1, \ - IS_ENABLED(CONFIG_k_1), \ - new_c_2, vendor_id_2, errata_id_2, \ - IS_ENABLED(CONFIG_k_2) +#define __ALTERNATIVE_CFG(...) ALTERNATIVE_CFG __VA_ARGS__ +#define __ALTERNATIVE_CFG_2(...) ALTERNATIVE_CFG_2 __VA_ARGS__ #else /* !__ASSEMBLY__ */ @@ -109,63 +93,44 @@ "887 :\n" \ ALT_NEW_CONTENT(vendor_id, errata_id, enable, new_c) -#define _ALTERNATIVE_CFG(old_c, new_c, vendor_id, errata_id, CONFIG_k) \ - __ALTERNATIVE_CFG(old_c, new_c, vendor_id, errata_id, IS_ENABLED(CONFIG_k)) - -#define __ALTERNATIVE_CFG_2(old_c, new_c_1, vendor_id_1, errata_id_1, \ - enable_1, \ - new_c_2, vendor_id_2, errata_id_2, \ - enable_2) \ - "886 :\n" \ - ".option push\n" \ - ".option norvc\n" \ - ".option norelax\n" \ - old_c "\n" \ - ".option pop\n" \ - "887 :\n" \ - ALT_NEW_CONTENT(vendor_id_1, errata_id_1, enable_1, new_c_1) \ +#define __ALTERNATIVE_CFG_2(old_c, new_c_1, vendor_id_1, errata_id_1, enable_1, \ + new_c_2, vendor_id_2, errata_id_2, enable_2) \ + __ALTERNATIVE_CFG(old_c, new_c_1, vendor_id_1, errata_id_1, enable_1) \ ALT_NEW_CONTENT(vendor_id_2, errata_id_2, enable_2, new_c_2) -#define _ALTERNATIVE_CFG_2(old_c, new_c_1, vendor_id_1, errata_id_1, \ - CONFIG_k_1, \ - new_c_2, vendor_id_2, errata_id_2, \ - CONFIG_k_2) \ - __ALTERNATIVE_CFG_2(old_c, new_c_1, vendor_id_1, errata_id_1, \ - IS_ENABLED(CONFIG_k_1), \ - new_c_2, vendor_id_2, errata_id_2, \ - IS_ENABLED(CONFIG_k_2)) - #endif /* __ASSEMBLY__ */ +#define _ALTERNATIVE_CFG(old_c, new_c, vendor_id, errata_id, CONFIG_k) \ + __ALTERNATIVE_CFG(old_c, new_c, vendor_id, errata_id, IS_ENABLED(CONFIG_k)) + +#define _ALTERNATIVE_CFG_2(old_c, new_c_1, vendor_id_1, errata_id_1, CONFIG_k_1, \ + new_c_2, vendor_id_2, errata_id_2, CONFIG_k_2) \ + __ALTERNATIVE_CFG_2(old_c, new_c_1, vendor_id_1, errata_id_1, IS_ENABLED(CONFIG_k_1), \ + new_c_2, vendor_id_2, errata_id_2, IS_ENABLED(CONFIG_k_2)) + #else /* CONFIG_RISCV_ALTERNATIVE */ #ifdef __ASSEMBLY__ -.macro __ALTERNATIVE_CFG old_c +.macro ALTERNATIVE_CFG old_c \old_c .endm -#define _ALTERNATIVE_CFG(old_c, new_c, vendor_id, errata_id, CONFIG_k) \ - __ALTERNATIVE_CFG old_c +#define _ALTERNATIVE_CFG(old_c, ...) \ + ALTERNATIVE_CFG old_c -#define _ALTERNATIVE_CFG_2(old_c, new_c_1, vendor_id_1, errata_id_1, \ - CONFIG_k_1, \ - new_c_2, vendor_id_2, errata_id_2, \ - CONFIG_k_2) \ - __ALTERNATIVE_CFG old_c +#define _ALTERNATIVE_CFG_2(old_c, ...) \ + ALTERNATIVE_CFG old_c #else /* !__ASSEMBLY__ */ -#define __ALTERNATIVE_CFG(old_c) \ +#define __ALTERNATIVE_CFG(old_c) \ old_c "\n" -#define _ALTERNATIVE_CFG(old_c, new_c, vendor_id, errata_id, CONFIG_k) \ +#define _ALTERNATIVE_CFG(old_c, ...) \ __ALTERNATIVE_CFG(old_c) -#define _ALTERNATIVE_CFG_2(old_c, new_c_1, vendor_id_1, errata_id_1, \ - CONFIG_k_1, \ - new_c_2, vendor_id_2, errata_id_2, \ - CONFIG_k_2) \ - __ALTERNATIVE_CFG(old_c) +#define _ALTERNATIVE_CFG_2(old_c, ...) \ + __ALTERNATIVE_CFG(old_c) #endif /* __ASSEMBLY__ */ #endif /* CONFIG_RISCV_ALTERNATIVE */ @@ -193,13 +158,9 @@ * on the following sample code and then replace ALTERNATIVE() with * ALTERNATIVE_2() to append its customized content. */ -#define ALTERNATIVE_2(old_content, new_content_1, vendor_id_1, \ - errata_id_1, CONFIG_k_1, \ - new_content_2, vendor_id_2, \ - errata_id_2, CONFIG_k_2) \ - _ALTERNATIVE_CFG_2(old_content, new_content_1, vendor_id_1, \ - errata_id_1, CONFIG_k_1, \ - new_content_2, vendor_id_2, \ - errata_id_2, CONFIG_k_2) +#define ALTERNATIVE_2(old_content, new_content_1, vendor_id_1, errata_id_1, CONFIG_k_1, \ + new_content_2, vendor_id_2, errata_id_2, CONFIG_k_2) \ + _ALTERNATIVE_CFG_2(old_content, new_content_1, vendor_id_1, errata_id_1, CONFIG_k_1, \ + new_content_2, vendor_id_2, errata_id_2, CONFIG_k_2) #endif diff --git a/arch/riscv/include/asm/cacheflush.h b/arch/riscv/include/asm/cacheflush.h index f6fbe7042f1c847b5bae95f45b1e878617011a71..03e3b95ae6da5db11091827afbefc439862c93c6 100644 --- a/arch/riscv/include/asm/cacheflush.h +++ b/arch/riscv/include/asm/cacheflush.h @@ -17,6 +17,13 @@ static inline void local_flush_icache_all(void) static inline void flush_dcache_page(struct page *page) { + /* + * HugeTLB pages are always fully mapped and only head page will be + * set PG_dcache_clean (see comments in flush_icache_pte()). + */ + if (PageHuge(page)) + page = compound_head(page); + if (test_bit(PG_dcache_clean, &page->flags)) clear_bit(PG_dcache_clean, &page->flags); } diff --git a/arch/riscv/include/asm/errata_list.h b/arch/riscv/include/asm/errata_list.h index 19a771085781a69da552b1daaa31b6b325a17a9a..4180312d2a70107cd532c1eb2a3bf7ed96f56b37 100644 --- a/arch/riscv/include/asm/errata_list.h +++ b/arch/riscv/include/asm/errata_list.h @@ -6,6 +6,7 @@ #define ASM_ERRATA_LIST_H #include +#include #include #ifdef CONFIG_ERRATA_SIFIVE @@ -17,7 +18,8 @@ #ifdef CONFIG_ERRATA_THEAD #define ERRATA_THEAD_PBMT 0 #define ERRATA_THEAD_CMO 1 -#define ERRATA_THEAD_NUMBER 2 +#define ERRATA_THEAD_PMU 2 +#define ERRATA_THEAD_NUMBER 3 #endif #define CPUFEATURE_SVPBMT 0 @@ -142,6 +144,18 @@ asm volatile(ALTERNATIVE_2( \ "r"((unsigned long)(_start) + (_size)) \ : "a0") +#define THEAD_C9XX_RV_IRQ_PMU 17 +#define THEAD_C9XX_CSR_SCOUNTEROF 0x5c5 + +#define ALT_SBI_PMU_OVERFLOW(__ovl) \ +asm volatile(ALTERNATIVE( \ + "csrr %0, " __stringify(CSR_SSCOUNTOVF), \ + "csrr %0, " __stringify(THEAD_C9XX_CSR_SCOUNTEROF), \ + THEAD_VENDOR_ID, ERRATA_THEAD_PMU, \ + CONFIG_ERRATA_THEAD_PMU) \ + : "=r" (__ovl) : \ + : "memory") + #endif /* __ASSEMBLY__ */ #endif diff --git a/arch/riscv/include/asm/hugetlb.h b/arch/riscv/include/asm/hugetlb.h index a5c2ca1d1cd8ba8a7a958318aa503836ead36ebd..ec19d6afc89653b59d883ba414dbc7ecff6e8397 100644 --- a/arch/riscv/include/asm/hugetlb.h +++ b/arch/riscv/include/asm/hugetlb.h @@ -5,4 +5,10 @@ #include #include +static inline void arch_clear_hugepage_flags(struct page *page) +{ + clear_bit(PG_dcache_clean, &page->flags); +} +#define arch_clear_hugepage_flags arch_clear_hugepage_flags + #endif /* _ASM_RISCV_HUGETLB_H */ diff --git a/arch/riscv/include/asm/hwcap.h b/arch/riscv/include/asm/hwcap.h index b2252529007301767f4dac92f4bc93d128b020b5..86328e3acb02ee3b5752642039c2cdd2806989a1 100644 --- a/arch/riscv/include/asm/hwcap.h +++ b/arch/riscv/include/asm/hwcap.h @@ -59,8 +59,9 @@ enum riscv_isa_ext_id { RISCV_ISA_EXT_ZIHINTPAUSE, RISCV_ISA_EXT_SSTC, RISCV_ISA_EXT_SVINVAL, - RISCV_ISA_EXT_ID_MAX = RISCV_ISA_EXT_MAX, + RISCV_ISA_EXT_ID_MAX }; +static_assert(RISCV_ISA_EXT_ID_MAX <= RISCV_ISA_EXT_MAX); /* * This enum represents the logical ID for each RISC-V ISA extension static diff --git a/arch/riscv/include/asm/io.h b/arch/riscv/include/asm/io.h index 92080a227937283b2d094c2d204b979928d9e16d..42497d487a174642bee085b3a950ee6211396fd3 100644 --- a/arch/riscv/include/asm/io.h +++ b/arch/riscv/include/asm/io.h @@ -135,4 +135,9 @@ __io_writes_outs(outs, u64, q, __io_pbr(), __io_paw()) #include +#ifdef CONFIG_MMU +#define arch_memremap_wb(addr, size) \ + ((__force void *)ioremap_prot((addr), (size), _PAGE_KERNEL)) +#endif + #endif /* _ASM_RISCV_IO_H */ diff --git a/arch/riscv/include/asm/kexec.h b/arch/riscv/include/asm/kexec.h index eee260e8ab308e8a033995082e1119df4739eeb3..2b56769cb530cbcc164206a1fe0361a6a9fa013f 100644 --- a/arch/riscv/include/asm/kexec.h +++ b/arch/riscv/include/asm/kexec.h @@ -39,6 +39,7 @@ crash_setup_regs(struct pt_regs *newregs, #define ARCH_HAS_KIMAGE_ARCH struct kimage_arch { + void *fdt; /* For CONFIG_KEXEC_FILE */ unsigned long fdt_addr; }; @@ -62,6 +63,10 @@ int arch_kexec_apply_relocations_add(struct purgatory_info *pi, const Elf_Shdr *relsec, const Elf_Shdr *symtab); #define arch_kexec_apply_relocations_add arch_kexec_apply_relocations_add + +struct kimage; +int arch_kimage_file_post_load_cleanup(struct kimage *image); +#define arch_kimage_file_post_load_cleanup arch_kimage_file_post_load_cleanup #endif #endif diff --git a/arch/riscv/include/asm/kprobes.h b/arch/riscv/include/asm/kprobes.h index 217ef89f22b9f81f1f4e6de5ad12e16dcbcd9a80..e7882ccb0fd46a67da4e9e1d287066b121c5f188 100644 --- a/arch/riscv/include/asm/kprobes.h +++ b/arch/riscv/include/asm/kprobes.h @@ -40,8 +40,6 @@ void arch_remove_kprobe(struct kprobe *p); int kprobe_fault_handler(struct pt_regs *regs, unsigned int trapnr); bool kprobe_breakpoint_handler(struct pt_regs *regs); bool kprobe_single_step_handler(struct pt_regs *regs); -void __kretprobe_trampoline(void); -void __kprobes *trampoline_probe_handler(struct pt_regs *regs); #endif /* CONFIG_KPROBES */ #endif /* _ASM_RISCV_KPROBES_H */ diff --git a/arch/riscv/include/asm/mmu.h b/arch/riscv/include/asm/mmu.h index 0099dc1161683ddd1e3c45460309e331b7e6b0a7..5ff1f19fd45c29b4fc7d2c8b44ac4984caf1e75c 100644 --- a/arch/riscv/include/asm/mmu.h +++ b/arch/riscv/include/asm/mmu.h @@ -19,6 +19,8 @@ typedef struct { #ifdef CONFIG_SMP /* A local icache flush is needed before user execution can resume. */ cpumask_t icache_stale_mask; + /* A local tlb flush is needed before user execution can resume. */ + cpumask_t tlb_stale_mask; #endif } mm_context_t; diff --git a/arch/riscv/include/asm/page.h b/arch/riscv/include/asm/page.h index ac70b0fd9a9a35c49a2513840e5d9306e41dced6..9f432c1b528990e8492b35ef5dda19c482a0037d 100644 --- a/arch/riscv/include/asm/page.h +++ b/arch/riscv/include/asm/page.h @@ -123,20 +123,20 @@ extern phys_addr_t phys_ram_base; ((x) >= PAGE_OFFSET && (!IS_ENABLED(CONFIG_64BIT) || (x) < PAGE_OFFSET + KERN_VIRT_SIZE)) #define linear_mapping_pa_to_va(x) ((void *)((unsigned long)(x) + kernel_map.va_pa_offset)) -#define kernel_mapping_pa_to_va(y) ({ \ - unsigned long _y = y; \ - (IS_ENABLED(CONFIG_XIP_KERNEL) && _y < phys_ram_base) ? \ - (void *)((unsigned long)(_y) + kernel_map.va_kernel_xip_pa_offset) : \ - (void *)((unsigned long)(_y) + kernel_map.va_kernel_pa_offset + XIP_OFFSET); \ +#define kernel_mapping_pa_to_va(y) ({ \ + unsigned long _y = (unsigned long)(y); \ + (IS_ENABLED(CONFIG_XIP_KERNEL) && _y < phys_ram_base) ? \ + (void *)(_y + kernel_map.va_kernel_xip_pa_offset) : \ + (void *)(_y + kernel_map.va_kernel_pa_offset + XIP_OFFSET); \ }) #define __pa_to_va_nodebug(x) linear_mapping_pa_to_va(x) #define linear_mapping_va_to_pa(x) ((unsigned long)(x) - kernel_map.va_pa_offset) #define kernel_mapping_va_to_pa(y) ({ \ - unsigned long _y = y; \ - (IS_ENABLED(CONFIG_XIP_KERNEL) && _y < kernel_map.virt_addr + XIP_OFFSET) ? \ - ((unsigned long)(_y) - kernel_map.va_kernel_xip_pa_offset) : \ - ((unsigned long)(_y) - kernel_map.va_kernel_pa_offset - XIP_OFFSET); \ + unsigned long _y = (unsigned long)(y); \ + (IS_ENABLED(CONFIG_XIP_KERNEL) && _y < kernel_map.virt_addr + XIP_OFFSET) ? \ + (_y - kernel_map.va_kernel_xip_pa_offset) : \ + (_y - kernel_map.va_kernel_pa_offset - XIP_OFFSET); \ }) #define __va_to_pa_nodebug(x) ({ \ diff --git a/arch/riscv/include/asm/pgtable-64.h b/arch/riscv/include/asm/pgtable-64.h index dc42375c2357136356df1ca7f42480eeea98a3dc..42a042c0e13ed6ddac188dc8f9940a4008feb6c7 100644 --- a/arch/riscv/include/asm/pgtable-64.h +++ b/arch/riscv/include/asm/pgtable-64.h @@ -25,7 +25,11 @@ extern bool pgtable_l5_enabled; #define PGDIR_MASK (~(PGDIR_SIZE - 1)) /* p4d is folded into pgd in case of 4-level page table */ -#define P4D_SHIFT 39 +#define P4D_SHIFT_L3 30 +#define P4D_SHIFT_L4 39 +#define P4D_SHIFT_L5 39 +#define P4D_SHIFT (pgtable_l5_enabled ? P4D_SHIFT_L5 : \ + (pgtable_l4_enabled ? P4D_SHIFT_L4 : P4D_SHIFT_L3)) #define P4D_SIZE (_AC(1, UL) << P4D_SHIFT) #define P4D_MASK (~(P4D_SIZE - 1)) diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h index 92ec2d9d7273f450a03d17ce7682d9b6f7d5c240..4eba9a98d0e3d6444245d5bfbeb010225021708b 100644 --- a/arch/riscv/include/asm/pgtable.h +++ b/arch/riscv/include/asm/pgtable.h @@ -415,9 +415,12 @@ static inline void update_mmu_cache(struct vm_area_struct *vma, * Relying on flush_tlb_fix_spurious_fault would suffice, but * the extra traps reduce performance. So, eagerly SFENCE.VMA. */ - local_flush_tlb_page(address); + flush_tlb_page(vma, address); } +#define __HAVE_ARCH_UPDATE_MMU_TLB +#define update_mmu_tlb update_mmu_cache + static inline void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp) { @@ -802,8 +805,6 @@ static inline pmd_t pmdp_establish(struct vm_area_struct *vma, #endif /* !CONFIG_MMU */ -#define kern_addr_valid(addr) (1) /* FIXME */ - extern char _start[]; extern void *_dtb_early_va; extern uintptr_t _dtb_early_pa; diff --git a/arch/riscv/include/asm/sbi.h b/arch/riscv/include/asm/sbi.h index 2a0ef738695ed0847aa02c080489aa2f8f8981a6..4ca7fbacff42494bf3e222ce5a378b507d152383 100644 --- a/arch/riscv/include/asm/sbi.h +++ b/arch/riscv/include/asm/sbi.h @@ -327,4 +327,9 @@ int sbi_err_map_linux_errno(int err); static inline int sbi_remote_fence_i(const struct cpumask *cpu_mask) { return -1; } static inline void sbi_init(void) {} #endif /* CONFIG_RISCV_SBI */ + +unsigned long riscv_cached_mvendorid(unsigned int cpu_id); +unsigned long riscv_cached_marchid(unsigned int cpu_id); +unsigned long riscv_cached_mimpid(unsigned int cpu_id); + #endif /* _ASM_RISCV_SBI_H */ diff --git a/arch/riscv/include/asm/tlbflush.h b/arch/riscv/include/asm/tlbflush.h index 801019381dea3fec53f50fe443fb350c3ab92c43..907b9efd39a87dd3853c1f8f21f4baa2fdd1125c 100644 --- a/arch/riscv/include/asm/tlbflush.h +++ b/arch/riscv/include/asm/tlbflush.h @@ -22,6 +22,24 @@ static inline void local_flush_tlb_page(unsigned long addr) { ALT_FLUSH_TLB_PAGE(__asm__ __volatile__ ("sfence.vma %0" : : "r" (addr) : "memory")); } + +static inline void local_flush_tlb_all_asid(unsigned long asid) +{ + __asm__ __volatile__ ("sfence.vma x0, %0" + : + : "r" (asid) + : "memory"); +} + +static inline void local_flush_tlb_page_asid(unsigned long addr, + unsigned long asid) +{ + __asm__ __volatile__ ("sfence.vma %0, %1" + : + : "r" (addr), "r" (asid) + : "memory"); +} + #else /* CONFIG_MMU */ #define local_flush_tlb_all() do { } while (0) #define local_flush_tlb_page(addr) do { } while (0) diff --git a/arch/riscv/include/asm/vdso.h b/arch/riscv/include/asm/vdso.h index af981426fe0ff7b5c43b63d54670eac0c7f1cf7f..a7644f46d0e56e5f60ec38820b576ae22a4e7b68 100644 --- a/arch/riscv/include/asm/vdso.h +++ b/arch/riscv/include/asm/vdso.h @@ -10,7 +10,7 @@ /* * All systems with an MMU have a VDSO, but systems without an MMU don't - * support shared libraries and therefor don't have one. + * support shared libraries and therefore don't have one. */ #ifdef CONFIG_MMU diff --git a/arch/riscv/include/asm/vmalloc.h b/arch/riscv/include/asm/vmalloc.h index ff9abc00d1394f58933cb4f90d9b773c795b0f0d..48da5371f1e9acf96a5637df7baadf2d4a67ad20 100644 --- a/arch/riscv/include/asm/vmalloc.h +++ b/arch/riscv/include/asm/vmalloc.h @@ -1,4 +1,22 @@ #ifndef _ASM_RISCV_VMALLOC_H #define _ASM_RISCV_VMALLOC_H +#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP + +#define IOREMAP_MAX_ORDER (PUD_SHIFT) + +#define arch_vmap_pud_supported arch_vmap_pud_supported +static inline bool arch_vmap_pud_supported(pgprot_t prot) +{ + return true; +} + +#define arch_vmap_pmd_supported arch_vmap_pmd_supported +static inline bool arch_vmap_pmd_supported(pgprot_t prot) +{ + return true; +} + +#endif + #endif /* _ASM_RISCV_VMALLOC_H */ diff --git a/arch/riscv/include/uapi/asm/ucontext.h b/arch/riscv/include/uapi/asm/ucontext.h index 44eb993950e5e32fe26267fe99c6071e5ddf1f2d..516bd0bb0da5c1b47f1acb20c937f461526e4db7 100644 --- a/arch/riscv/include/uapi/asm/ucontext.h +++ b/arch/riscv/include/uapi/asm/ucontext.h @@ -15,19 +15,23 @@ struct ucontext { struct ucontext *uc_link; stack_t uc_stack; sigset_t uc_sigmask; - /* There's some padding here to allow sigset_t to be expanded in the + /* + * There's some padding here to allow sigset_t to be expanded in the * future. Though this is unlikely, other architectures put uc_sigmask * at the end of this structure and explicitly state it can be - * expanded, so we didn't want to box ourselves in here. */ + * expanded, so we didn't want to box ourselves in here. + */ __u8 __unused[1024 / 8 - sizeof(sigset_t)]; - /* We can't put uc_sigmask at the end of this structure because we need + /* + * We can't put uc_sigmask at the end of this structure because we need * to be able to expand sigcontext in the future. For example, the * vector ISA extension will almost certainly add ISA state. We want * to ensure all user-visible ISA state can be saved and restored via a * ucontext, so we're putting this at the end in order to allow for * infinite extensibility. Since we know this will be extended and we * assume sigset_t won't be extended an extreme amount, we're - * prioritizing this. */ + * prioritizing this. + */ struct sigcontext uc_mcontext; }; diff --git a/arch/riscv/kernel/Makefile b/arch/riscv/kernel/Makefile index db6e4b1294ba3761bed0a3e1a249dd2ac7e2d25e..4cf303a779ab906c313c9bc91f695bf2911c5687 100644 --- a/arch/riscv/kernel/Makefile +++ b/arch/riscv/kernel/Makefile @@ -81,6 +81,7 @@ obj-$(CONFIG_KGDB) += kgdb.o obj-$(CONFIG_KEXEC_CORE) += kexec_relocate.o crash_save_regs.o machine_kexec.o obj-$(CONFIG_KEXEC_FILE) += elf_kexec.o machine_kexec_file.o obj-$(CONFIG_CRASH_DUMP) += crash_dump.o +obj-$(CONFIG_CRASH_CORE) += crash_core.o obj-$(CONFIG_JUMP_LABEL) += jump_label.o diff --git a/arch/riscv/kernel/cpu.c b/arch/riscv/kernel/cpu.c index 852ecccd8920f7e608616bcaff07694441212ad5..1b9a5a66e55ab8eb0bbd7c228565674bd8b91569 100644 --- a/arch/riscv/kernel/cpu.c +++ b/arch/riscv/kernel/cpu.c @@ -70,8 +70,6 @@ int riscv_of_parent_hartid(struct device_node *node, unsigned long *hartid) return -1; } -#ifdef CONFIG_PROC_FS - struct riscv_cpuinfo { unsigned long mvendorid; unsigned long marchid; @@ -79,6 +77,30 @@ struct riscv_cpuinfo { }; static DEFINE_PER_CPU(struct riscv_cpuinfo, riscv_cpuinfo); +unsigned long riscv_cached_mvendorid(unsigned int cpu_id) +{ + struct riscv_cpuinfo *ci = per_cpu_ptr(&riscv_cpuinfo, cpu_id); + + return ci->mvendorid; +} +EXPORT_SYMBOL(riscv_cached_mvendorid); + +unsigned long riscv_cached_marchid(unsigned int cpu_id) +{ + struct riscv_cpuinfo *ci = per_cpu_ptr(&riscv_cpuinfo, cpu_id); + + return ci->marchid; +} +EXPORT_SYMBOL(riscv_cached_marchid); + +unsigned long riscv_cached_mimpid(unsigned int cpu_id) +{ + struct riscv_cpuinfo *ci = per_cpu_ptr(&riscv_cpuinfo, cpu_id); + + return ci->mimpid; +} +EXPORT_SYMBOL(riscv_cached_mimpid); + static int riscv_cpuinfo_starting(unsigned int cpu) { struct riscv_cpuinfo *ci = this_cpu_ptr(&riscv_cpuinfo); @@ -113,7 +135,9 @@ static int __init riscv_cpuinfo_init(void) return 0; } -device_initcall(riscv_cpuinfo_init); +arch_initcall(riscv_cpuinfo_init); + +#ifdef CONFIG_PROC_FS #define __RISCV_ISA_EXT_DATA(UPROP, EXTID) \ { \ diff --git a/arch/riscv/kernel/cpufeature.c b/arch/riscv/kernel/cpufeature.c index 694267d1fe8149a6a29aab095e441aafaa07b0ec..93e45560af307c21533946fe1e2b328a947d46f3 100644 --- a/arch/riscv/kernel/cpufeature.c +++ b/arch/riscv/kernel/cpufeature.c @@ -9,6 +9,7 @@ #include #include #include +#include #include #include #include @@ -68,21 +69,38 @@ bool __riscv_isa_extension_available(const unsigned long *isa_bitmap, int bit) } EXPORT_SYMBOL_GPL(__riscv_isa_extension_available); +static bool riscv_isa_extension_check(int id) +{ + switch (id) { + case RISCV_ISA_EXT_ZICBOM: + if (!riscv_cbom_block_size) { + pr_err("Zicbom detected in ISA string, but no cbom-block-size found\n"); + return false; + } else if (!is_power_of_2(riscv_cbom_block_size)) { + pr_err("cbom-block-size present, but is not a power-of-2\n"); + return false; + } + return true; + } + + return true; +} + void __init riscv_fill_hwcap(void) { struct device_node *node; const char *isa; char print_str[NUM_ALPHA_EXTS + 1]; int i, j, rc; - static unsigned long isa2hwcap[256] = {0}; + unsigned long isa2hwcap[26] = {0}; unsigned long hartid; - isa2hwcap['i'] = isa2hwcap['I'] = COMPAT_HWCAP_ISA_I; - isa2hwcap['m'] = isa2hwcap['M'] = COMPAT_HWCAP_ISA_M; - isa2hwcap['a'] = isa2hwcap['A'] = COMPAT_HWCAP_ISA_A; - isa2hwcap['f'] = isa2hwcap['F'] = COMPAT_HWCAP_ISA_F; - isa2hwcap['d'] = isa2hwcap['D'] = COMPAT_HWCAP_ISA_D; - isa2hwcap['c'] = isa2hwcap['C'] = COMPAT_HWCAP_ISA_C; + isa2hwcap['i' - 'a'] = COMPAT_HWCAP_ISA_I; + isa2hwcap['m' - 'a'] = COMPAT_HWCAP_ISA_M; + isa2hwcap['a' - 'a'] = COMPAT_HWCAP_ISA_A; + isa2hwcap['f' - 'a'] = COMPAT_HWCAP_ISA_F; + isa2hwcap['d' - 'a'] = COMPAT_HWCAP_ISA_D; + isa2hwcap['c' - 'a'] = COMPAT_HWCAP_ISA_C; elf_hwcap = 0; @@ -189,15 +207,20 @@ void __init riscv_fill_hwcap(void) #define SET_ISA_EXT_MAP(name, bit) \ do { \ if ((ext_end - ext == sizeof(name) - 1) && \ - !memcmp(ext, name, sizeof(name) - 1)) \ + !memcmp(ext, name, sizeof(name) - 1) && \ + riscv_isa_extension_check(bit)) \ set_bit(bit, this_isa); \ } while (false) \ if (unlikely(ext_err)) continue; if (!ext_long) { - this_hwcap |= isa2hwcap[(unsigned char)(*ext)]; - set_bit(*ext - 'a', this_isa); + int nr = *ext - 'a'; + + if (riscv_isa_extension_check(nr)) { + this_hwcap |= isa2hwcap[nr]; + set_bit(nr, this_isa); + } } else { SET_ISA_EXT_MAP("sscofpmf", RISCV_ISA_EXT_SSCOFPMF); SET_ISA_EXT_MAP("svpbmt", RISCV_ISA_EXT_SVPBMT); diff --git a/arch/riscv/kernel/crash_core.c b/arch/riscv/kernel/crash_core.c new file mode 100644 index 0000000000000000000000000000000000000000..b351a3c013555650b6ad4f75e0b7e2de28c8ba81 --- /dev/null +++ b/arch/riscv/kernel/crash_core.c @@ -0,0 +1,21 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include +#include + +void arch_crash_save_vmcoreinfo(void) +{ + VMCOREINFO_NUMBER(VA_BITS); + VMCOREINFO_NUMBER(phys_ram_base); + + vmcoreinfo_append_str("NUMBER(PAGE_OFFSET)=0x%lx\n", PAGE_OFFSET); + vmcoreinfo_append_str("NUMBER(VMALLOC_START)=0x%lx\n", VMALLOC_START); + vmcoreinfo_append_str("NUMBER(VMALLOC_END)=0x%lx\n", VMALLOC_END); + vmcoreinfo_append_str("NUMBER(VMEMMAP_START)=0x%lx\n", VMEMMAP_START); + vmcoreinfo_append_str("NUMBER(VMEMMAP_END)=0x%lx\n", VMEMMAP_END); +#ifdef CONFIG_64BIT + vmcoreinfo_append_str("NUMBER(MODULES_VADDR)=0x%lx\n", MODULES_VADDR); + vmcoreinfo_append_str("NUMBER(MODULES_END)=0x%lx\n", MODULES_END); +#endif + vmcoreinfo_append_str("NUMBER(KERNEL_LINK_ADDR)=0x%lx\n", KERNEL_LINK_ADDR); +} diff --git a/arch/riscv/kernel/elf_kexec.c b/arch/riscv/kernel/elf_kexec.c index 0cb94992c15b32a852716ca877d95b5f14f63c6f..5372b708fae21a072f72434625d27d792b83abc7 100644 --- a/arch/riscv/kernel/elf_kexec.c +++ b/arch/riscv/kernel/elf_kexec.c @@ -21,6 +21,18 @@ #include #include +int arch_kimage_file_post_load_cleanup(struct kimage *image) +{ + kvfree(image->arch.fdt); + image->arch.fdt = NULL; + + vfree(image->elf_headers); + image->elf_headers = NULL; + image->elf_headers_sz = 0; + + return kexec_image_post_load_cleanup_default(image); +} + static int riscv_kexec_elf_load(struct kimage *image, struct elfhdr *ehdr, struct kexec_elf_info *elf_info, unsigned long old_pbase, unsigned long new_pbase) @@ -298,6 +310,8 @@ static void *elf_kexec_load(struct kimage *image, char *kernel_buf, pr_err("Error add DTB kbuf ret=%d\n", ret); goto out_free_fdt; } + /* Cache the fdt buffer address for memory cleanup */ + image->arch.fdt = fdt; pr_notice("Loaded device tree at 0x%lx\n", kbuf.mem); goto out; diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S index 186abd146eaffca06c6c3db10902c42ffa769dbc..99d38fdf8b18f2f18fe3bae2dce2d7a3e5b11b5a 100644 --- a/arch/riscv/kernel/entry.S +++ b/arch/riscv/kernel/entry.S @@ -248,7 +248,7 @@ ret_from_syscall_rejected: andi t0, t0, _TIF_SYSCALL_WORK bnez t0, handle_syscall_trace_exit -ret_from_exception: +SYM_CODE_START_NOALIGN(ret_from_exception) REG_L s0, PT_STATUS(sp) csrc CSR_STATUS, SR_IE #ifdef CONFIG_TRACE_IRQFLAGS @@ -262,13 +262,13 @@ ret_from_exception: andi s0, s0, SR_SPP #endif bnez s0, resume_kernel +SYM_CODE_END(ret_from_exception) -resume_userspace: /* Interrupts must be disabled here so flags are checked atomically */ REG_L s0, TASK_TI_FLAGS(tp) /* current_thread_info->flags */ andi s1, s0, _TIF_WORK_MASK - bnez s1, work_pending - + bnez s1, resume_userspace_slow +resume_userspace: #ifdef CONFIG_CONTEXT_TRACKING_USER call user_enter_callable #endif @@ -368,19 +368,12 @@ resume_kernel: j restore_all #endif -work_pending: +resume_userspace_slow: /* Enter slow path for supplementary processing */ - la ra, ret_from_exception - andi s1, s0, _TIF_NEED_RESCHED - bnez s1, work_resched -work_notifysig: - /* Handle pending signals and notify-resume requests */ - csrs CSR_STATUS, SR_IE /* Enable interrupts for do_notify_resume() */ move a0, sp /* pt_regs */ move a1, s0 /* current_thread_info->flags */ - tail do_notify_resume -work_resched: - tail schedule + call do_work_pending + j resume_userspace /* Slow paths for ptrace. */ handle_syscall_trace_enter: diff --git a/arch/riscv/kernel/mcount.S b/arch/riscv/kernel/mcount.S index 6d462681c9c0238e012daa973a3441a849ed34a9..30102aadc4d73ce463dce7b88bc7e73c124a109c 100644 --- a/arch/riscv/kernel/mcount.S +++ b/arch/riscv/kernel/mcount.S @@ -15,8 +15,8 @@ .macro SAVE_ABI_STATE addi sp, sp, -16 - sd s0, 0(sp) - sd ra, 8(sp) + REG_S s0, 0*SZREG(sp) + REG_S ra, 1*SZREG(sp) addi s0, sp, 16 .endm @@ -25,24 +25,26 @@ * register if a0 was not saved. */ .macro SAVE_RET_ABI_STATE - addi sp, sp, -32 - sd s0, 16(sp) - sd ra, 24(sp) - sd a0, 8(sp) - addi s0, sp, 32 + addi sp, sp, -4*SZREG + REG_S s0, 2*SZREG(sp) + REG_S ra, 3*SZREG(sp) + REG_S a0, 1*SZREG(sp) + REG_S a1, 0*SZREG(sp) + addi s0, sp, 4*SZREG .endm .macro RESTORE_ABI_STATE - ld ra, 8(sp) - ld s0, 0(sp) + REG_L ra, 1*SZREG(sp) + REG_L s0, 0*SZREG(sp) addi sp, sp, 16 .endm .macro RESTORE_RET_ABI_STATE - ld ra, 24(sp) - ld s0, 16(sp) - ld a0, 8(sp) - addi sp, sp, 32 + REG_L ra, 3*SZREG(sp) + REG_L s0, 2*SZREG(sp) + REG_L a0, 1*SZREG(sp) + REG_L a1, 0*SZREG(sp) + addi sp, sp, 4*SZREG .endm ENTRY(ftrace_stub) @@ -71,9 +73,9 @@ ENTRY(return_to_handler) mv a0, t6 #endif call ftrace_return_to_handler - mv a1, a0 + mv a2, a0 RESTORE_RET_ABI_STATE - jalr a1 + jalr a2 ENDPROC(return_to_handler) #endif @@ -82,16 +84,16 @@ ENTRY(MCOUNT_NAME) la t4, ftrace_stub #ifdef CONFIG_FUNCTION_GRAPH_TRACER la t0, ftrace_graph_return - ld t1, 0(t0) + REG_L t1, 0(t0) bne t1, t4, do_ftrace_graph_caller la t3, ftrace_graph_entry - ld t2, 0(t3) + REG_L t2, 0(t3) la t6, ftrace_graph_entry_stub bne t2, t6, do_ftrace_graph_caller #endif la t3, ftrace_trace_function - ld t5, 0(t3) + REG_L t5, 0(t3) bne t5, t4, do_trace ret @@ -101,10 +103,10 @@ ENTRY(MCOUNT_NAME) * prepare_to_return(&ra_to_caller_of_caller, ra_to_caller) */ do_ftrace_graph_caller: - addi a0, s0, -8 + addi a0, s0, -SZREG mv a1, ra #ifdef HAVE_FUNCTION_GRAPH_FP_TEST - ld a2, -16(s0) + REG_L a2, -2*SZREG(s0) #endif SAVE_ABI_STATE call prepare_ftrace_return @@ -117,7 +119,7 @@ do_ftrace_graph_caller: * (*ftrace_trace_function)(ra_to_caller, ra_to_caller_of_caller) */ do_trace: - ld a1, -8(s0) + REG_L a1, -SZREG(s0) mv a0, ra SAVE_ABI_STATE diff --git a/arch/riscv/kernel/probes/Makefile b/arch/riscv/kernel/probes/Makefile index 7f0840dcc31bc78eeecd6c3ff45a5a645f2b2283..c40139e9ca4781e414946bd9e58648f15c89d25a 100644 --- a/arch/riscv/kernel/probes/Makefile +++ b/arch/riscv/kernel/probes/Makefile @@ -1,6 +1,6 @@ # SPDX-License-Identifier: GPL-2.0 obj-$(CONFIG_KPROBES) += kprobes.o decode-insn.o simulate-insn.o -obj-$(CONFIG_KPROBES) += kprobes_trampoline.o +obj-$(CONFIG_RETHOOK) += rethook.o rethook_trampoline.o obj-$(CONFIG_KPROBES_ON_FTRACE) += ftrace.o obj-$(CONFIG_UPROBES) += uprobes.o decode-insn.o simulate-insn.o CFLAGS_REMOVE_simulate-insn.o = $(CC_FLAGS_FTRACE) diff --git a/arch/riscv/kernel/probes/kprobes.c b/arch/riscv/kernel/probes/kprobes.c index e6e950b7cf32742ed5902b6cd74f6249e0bde94d..f21592d203062ac2f9db083364f2e7d526e0ba8e 100644 --- a/arch/riscv/kernel/probes/kprobes.c +++ b/arch/riscv/kernel/probes/kprobes.c @@ -345,19 +345,6 @@ int __init arch_populate_kprobe_blacklist(void) return ret; } -void __kprobes __used *trampoline_probe_handler(struct pt_regs *regs) -{ - return (void *)kretprobe_trampoline_handler(regs, NULL); -} - -void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, - struct pt_regs *regs) -{ - ri->ret_addr = (kprobe_opcode_t *)regs->ra; - ri->fp = NULL; - regs->ra = (unsigned long) &__kretprobe_trampoline; -} - int __kprobes arch_trampoline_kprobe(struct kprobe *p) { return 0; diff --git a/arch/riscv/kernel/probes/rethook.c b/arch/riscv/kernel/probes/rethook.c new file mode 100644 index 0000000000000000000000000000000000000000..5c27c1f509894d69825d71196182837fb79ca000 --- /dev/null +++ b/arch/riscv/kernel/probes/rethook.c @@ -0,0 +1,27 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Generic return hook for riscv. + */ + +#include +#include +#include "rethook.h" + +/* This is called from arch_rethook_trampoline() */ +unsigned long __used arch_rethook_trampoline_callback(struct pt_regs *regs) +{ + return rethook_trampoline_handler(regs, regs->s0); +} + +NOKPROBE_SYMBOL(arch_rethook_trampoline_callback); + +void arch_rethook_prepare(struct rethook_node *rhn, struct pt_regs *regs, bool mcount) +{ + rhn->ret_addr = regs->ra; + rhn->frame = regs->s0; + + /* replace return addr with trampoline */ + regs->ra = (unsigned long)arch_rethook_trampoline; +} + +NOKPROBE_SYMBOL(arch_rethook_prepare); diff --git a/arch/riscv/kernel/probes/rethook.h b/arch/riscv/kernel/probes/rethook.h new file mode 100644 index 0000000000000000000000000000000000000000..4758f7e3ce881f78d0134e058223f03580736194 --- /dev/null +++ b/arch/riscv/kernel/probes/rethook.h @@ -0,0 +1,8 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +#ifndef __RISCV_RETHOOK_H +#define __RISCV_RETHOOK_H + +unsigned long arch_rethook_trampoline_callback(struct pt_regs *regs); +void arch_rethook_prepare(struct rethook_node *rhn, struct pt_regs *regs, bool mcount); + +#endif diff --git a/arch/riscv/kernel/probes/kprobes_trampoline.S b/arch/riscv/kernel/probes/rethook_trampoline.S similarity index 94% rename from arch/riscv/kernel/probes/kprobes_trampoline.S rename to arch/riscv/kernel/probes/rethook_trampoline.S index 7bdb09ded39b8c3b1dc3f064c51bf65d1b72c801..21bac92a170a9bd43be973f213cb8be3329175ef 100644 --- a/arch/riscv/kernel/probes/kprobes_trampoline.S +++ b/arch/riscv/kernel/probes/rethook_trampoline.S @@ -75,13 +75,13 @@ REG_L x31, PT_T6(sp) .endm -ENTRY(__kretprobe_trampoline) +ENTRY(arch_rethook_trampoline) addi sp, sp, -(PT_SIZE_ON_STACK) save_all_base_regs move a0, sp /* pt_regs */ - call trampoline_probe_handler + call arch_rethook_trampoline_callback /* use the result as the return-address */ move ra, a0 @@ -90,4 +90,4 @@ ENTRY(__kretprobe_trampoline) addi sp, sp, PT_SIZE_ON_STACK ret -ENDPROC(__kretprobe_trampoline) +ENDPROC(arch_rethook_trampoline) diff --git a/arch/riscv/kernel/signal.c b/arch/riscv/kernel/signal.c index 5c591123c440910594f0051be1bb6387d72d05d6..bfb2afa4135f89690b90da20d281252a539d8cda 100644 --- a/arch/riscv/kernel/signal.c +++ b/arch/riscv/kernel/signal.c @@ -313,19 +313,27 @@ static void do_signal(struct pt_regs *regs) } /* - * notification of userspace execution resumption - * - triggered by the _TIF_WORK_MASK flags + * Handle any pending work on the resume-to-userspace path, as indicated by + * _TIF_WORK_MASK. Entered from assembly with IRQs off. */ -asmlinkage __visible void do_notify_resume(struct pt_regs *regs, - unsigned long thread_info_flags) +asmlinkage __visible void do_work_pending(struct pt_regs *regs, + unsigned long thread_info_flags) { - if (thread_info_flags & _TIF_UPROBE) - uprobe_notify_resume(regs); - - /* Handle pending signal delivery */ - if (thread_info_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL)) - do_signal(regs); - - if (thread_info_flags & _TIF_NOTIFY_RESUME) - resume_user_mode_work(regs); + do { + if (thread_info_flags & _TIF_NEED_RESCHED) { + schedule(); + } else { + local_irq_enable(); + if (thread_info_flags & _TIF_UPROBE) + uprobe_notify_resume(regs); + /* Handle pending signal delivery */ + if (thread_info_flags & (_TIF_SIGPENDING | + _TIF_NOTIFY_SIGNAL)) + do_signal(regs); + if (thread_info_flags & _TIF_NOTIFY_RESUME) + resume_user_mode_work(regs); + } + local_irq_disable(); + thread_info_flags = read_thread_flags(); + } while (thread_info_flags & _TIF_WORK_MASK); } diff --git a/arch/riscv/kernel/stacktrace.c b/arch/riscv/kernel/stacktrace.c index 08d11a53f39e7a767a643b27b996b2ce36a3fb16..75c8dd64fc48e02ba56942d2d1a029ff88c061a4 100644 --- a/arch/riscv/kernel/stacktrace.c +++ b/arch/riscv/kernel/stacktrace.c @@ -16,6 +16,8 @@ #ifdef CONFIG_FRAME_POINTER +extern asmlinkage void ret_from_exception(void); + void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs, bool (*fn)(void *, unsigned long), void *arg) { @@ -58,7 +60,14 @@ void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs, } else { fp = frame->fp; pc = ftrace_graph_ret_addr(current, NULL, frame->ra, - (unsigned long *)(fp - 8)); + &frame->ra); + if (pc == (unsigned long)ret_from_exception) { + if (unlikely(!__kernel_text_address(pc) || !fn(arg, pc))) + break; + + pc = ((struct pt_regs *)sp)->epc; + fp = ((struct pt_regs *)sp)->s0; + } } } diff --git a/arch/riscv/kernel/traps.c b/arch/riscv/kernel/traps.c index 7abd8e4c4df63e29ec4b88ca31900e0bce377483..549bde5c970a13e9412148c7c45aca1ef2034825 100644 --- a/arch/riscv/kernel/traps.c +++ b/arch/riscv/kernel/traps.c @@ -208,18 +208,18 @@ int is_valid_bugaddr(unsigned long pc) #endif /* CONFIG_GENERIC_BUG */ #ifdef CONFIG_VMAP_STACK +/* + * Extra stack space that allows us to provide panic messages when the kernel + * has overflowed its stack. + */ static DEFINE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack)__aligned(16); /* - * shadow stack, handled_ kernel_ stack_ overflow(in kernel/entry.S) is used - * to get per-cpu overflow stack(get_overflow_stack). + * A temporary stack for use by handle_kernel_stack_overflow. This is used so + * we can call into C code to get the per-hart overflow stack. Usage of this + * stack must be protected by spin_shadow_stack. */ -long shadow_stack[SHADOW_OVERFLOW_STACK_SIZE/sizeof(long)]; -asmlinkage unsigned long get_overflow_stack(void) -{ - return (unsigned long)this_cpu_ptr(overflow_stack) + - OVERFLOW_STACK_SIZE; -} +long shadow_stack[SHADOW_OVERFLOW_STACK_SIZE/sizeof(long)] __aligned(16); /* * A pseudo spinlock to protect the shadow stack from being used by multiple @@ -230,6 +230,12 @@ asmlinkage unsigned long get_overflow_stack(void) */ unsigned long spin_shadow_stack; +asmlinkage unsigned long get_overflow_stack(void) +{ + return (unsigned long)this_cpu_ptr(overflow_stack) + + OVERFLOW_STACK_SIZE; +} + asmlinkage void handle_bad_stack(struct pt_regs *regs) { unsigned long tsk_stk = (unsigned long)current->stack; diff --git a/arch/riscv/mm/Makefile b/arch/riscv/mm/Makefile index d76aabf4b94d6a8bf4ef20eab680de4d4be16ff9..2ac177c053520b5fa2f6eb3c4f2006f494c8fd3c 100644 --- a/arch/riscv/mm/Makefile +++ b/arch/riscv/mm/Makefile @@ -13,6 +13,8 @@ obj-y += extable.o obj-$(CONFIG_MMU) += fault.o pageattr.o obj-y += cacheflush.o obj-y += context.o +obj-y += pgtable.o +obj-y += pmem.o ifeq ($(CONFIG_MMU),y) obj-$(CONFIG_SMP) += tlbflush.o diff --git a/arch/riscv/mm/cacheflush.c b/arch/riscv/mm/cacheflush.c index 57b40a3504206411ac8e152b6a13d8e183477c43..3cc07ed45aeb3c3e68399c01f046f62856a58b61 100644 --- a/arch/riscv/mm/cacheflush.c +++ b/arch/riscv/mm/cacheflush.c @@ -83,6 +83,13 @@ void flush_icache_pte(pte_t pte) { struct page *page = pte_page(pte); + /* + * HugeTLB pages are always fully mapped, so only setting head page's + * PG_dcache_clean flag is enough. + */ + if (PageHuge(page)) + page = compound_head(page); + if (!test_and_set_bit(PG_dcache_clean, &page->flags)) flush_icache_all(); } diff --git a/arch/riscv/mm/context.c b/arch/riscv/mm/context.c index 7acbfbd14557e600ba69b888c7389fef64f72428..80ce9caba8d225979426f01f9a636d11890f9de6 100644 --- a/arch/riscv/mm/context.c +++ b/arch/riscv/mm/context.c @@ -196,6 +196,16 @@ switch_mm_fast: if (need_flush_tlb) local_flush_tlb_all(); +#ifdef CONFIG_SMP + else { + cpumask_t *mask = &mm->context.tlb_stale_mask; + + if (cpumask_test_cpu(cpu, mask)) { + cpumask_clear_cpu(cpu, mask); + local_flush_tlb_all_asid(cntx & asid_mask); + } + } +#endif } static void set_mm_noasid(struct mm_struct *mm) diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c index 50a1b6edd491823517fd9c31948c3a9e2d4d7caa..478d6763a01a1ebde626635a2f7703f4415649a7 100644 --- a/arch/riscv/mm/init.c +++ b/arch/riscv/mm/init.c @@ -672,10 +672,11 @@ void __init create_pgd_mapping(pgd_t *pgdp, static uintptr_t __init best_map_size(phys_addr_t base, phys_addr_t size) { /* Upgrade to PMD_SIZE mappings whenever possible */ - if ((base & (PMD_SIZE - 1)) || (size & (PMD_SIZE - 1))) - return PAGE_SIZE; + base &= PMD_SIZE - 1; + if (!base && size >= PMD_SIZE) + return PMD_SIZE; - return PMD_SIZE; + return PAGE_SIZE; } #ifdef CONFIG_XIP_KERNEL @@ -926,15 +927,15 @@ static void __init pt_ops_set_early(void) */ static void __init pt_ops_set_fixmap(void) { - pt_ops.alloc_pte = kernel_mapping_pa_to_va((uintptr_t)alloc_pte_fixmap); - pt_ops.get_pte_virt = kernel_mapping_pa_to_va((uintptr_t)get_pte_virt_fixmap); + pt_ops.alloc_pte = kernel_mapping_pa_to_va(alloc_pte_fixmap); + pt_ops.get_pte_virt = kernel_mapping_pa_to_va(get_pte_virt_fixmap); #ifndef __PAGETABLE_PMD_FOLDED - pt_ops.alloc_pmd = kernel_mapping_pa_to_va((uintptr_t)alloc_pmd_fixmap); - pt_ops.get_pmd_virt = kernel_mapping_pa_to_va((uintptr_t)get_pmd_virt_fixmap); - pt_ops.alloc_pud = kernel_mapping_pa_to_va((uintptr_t)alloc_pud_fixmap); - pt_ops.get_pud_virt = kernel_mapping_pa_to_va((uintptr_t)get_pud_virt_fixmap); - pt_ops.alloc_p4d = kernel_mapping_pa_to_va((uintptr_t)alloc_p4d_fixmap); - pt_ops.get_p4d_virt = kernel_mapping_pa_to_va((uintptr_t)get_p4d_virt_fixmap); + pt_ops.alloc_pmd = kernel_mapping_pa_to_va(alloc_pmd_fixmap); + pt_ops.get_pmd_virt = kernel_mapping_pa_to_va(get_pmd_virt_fixmap); + pt_ops.alloc_pud = kernel_mapping_pa_to_va(alloc_pud_fixmap); + pt_ops.get_pud_virt = kernel_mapping_pa_to_va(get_pud_virt_fixmap); + pt_ops.alloc_p4d = kernel_mapping_pa_to_va(alloc_p4d_fixmap); + pt_ops.get_p4d_virt = kernel_mapping_pa_to_va(get_p4d_virt_fixmap); #endif } @@ -1110,9 +1111,9 @@ static void __init setup_vm_final(void) if (end >= __pa(PAGE_OFFSET) + memory_limit) end = __pa(PAGE_OFFSET) + memory_limit; - map_size = best_map_size(start, end - start); for (pa = start; pa < end; pa += map_size) { va = (uintptr_t)__va(pa); + map_size = best_map_size(pa, end - pa); create_pgd_mapping(swapper_pg_dir, va, pa, map_size, pgprot_from_va(va)); diff --git a/arch/riscv/mm/pgtable.c b/arch/riscv/mm/pgtable.c new file mode 100644 index 0000000000000000000000000000000000000000..6645ead1a7c16d40dbe232c5358b4a484e10909f --- /dev/null +++ b/arch/riscv/mm/pgtable.c @@ -0,0 +1,83 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include +#include +#include + +#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP +int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot) +{ + return 0; +} + +void p4d_clear_huge(p4d_t *p4d) +{ +} + +int pud_set_huge(pud_t *pud, phys_addr_t phys, pgprot_t prot) +{ + pud_t new_pud = pfn_pud(__phys_to_pfn(phys), prot); + + set_pud(pud, new_pud); + return 1; +} + +int pud_clear_huge(pud_t *pud) +{ + if (!pud_leaf(READ_ONCE(*pud))) + return 0; + pud_clear(pud); + return 1; +} + +int pud_free_pmd_page(pud_t *pud, unsigned long addr) +{ + pmd_t *pmd = pud_pgtable(*pud); + int i; + + pud_clear(pud); + + flush_tlb_kernel_range(addr, addr + PUD_SIZE); + + for (i = 0; i < PTRS_PER_PMD; i++) { + if (!pmd_none(pmd[i])) { + pte_t *pte = (pte_t *)pmd_page_vaddr(pmd[i]); + + pte_free_kernel(NULL, pte); + } + } + + pmd_free(NULL, pmd); + + return 1; +} + +int pmd_set_huge(pmd_t *pmd, phys_addr_t phys, pgprot_t prot) +{ + pmd_t new_pmd = pfn_pmd(__phys_to_pfn(phys), prot); + + set_pmd(pmd, new_pmd); + return 1; +} + +int pmd_clear_huge(pmd_t *pmd) +{ + if (!pmd_leaf(READ_ONCE(*pmd))) + return 0; + pmd_clear(pmd); + return 1; +} + +int pmd_free_pte_page(pmd_t *pmd, unsigned long addr) +{ + pte_t *pte = (pte_t *)pmd_page_vaddr(*pmd); + + pmd_clear(pmd); + + flush_tlb_kernel_range(addr, addr + PMD_SIZE); + pte_free_kernel(NULL, pte); + return 1; +} + +#endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */ diff --git a/arch/riscv/mm/physaddr.c b/arch/riscv/mm/physaddr.c index 19cf25a74ee29dc4fafb31fa1ef5eda17255906c..9b18bda74154e8a3b2b807cd0ae2065eb7c28612 100644 --- a/arch/riscv/mm/physaddr.c +++ b/arch/riscv/mm/physaddr.c @@ -22,7 +22,7 @@ EXPORT_SYMBOL(__virt_to_phys); phys_addr_t __phys_addr_symbol(unsigned long x) { unsigned long kernel_start = kernel_map.virt_addr; - unsigned long kernel_end = (unsigned long)_end; + unsigned long kernel_end = kernel_start + kernel_map.size; /* * Boundary checking aginst the kernel image mapping. diff --git a/arch/riscv/mm/pmem.c b/arch/riscv/mm/pmem.c new file mode 100644 index 0000000000000000000000000000000000000000..089df92ae8760a4e27d3c53c0ef05e5f4035eae3 --- /dev/null +++ b/arch/riscv/mm/pmem.c @@ -0,0 +1,21 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2022 Ventana Micro Systems Inc. + */ + +#include +#include + +#include + +void arch_wb_cache_pmem(void *addr, size_t size) +{ + ALT_CMO_OP(clean, addr, size, riscv_cbom_block_size); +} +EXPORT_SYMBOL_GPL(arch_wb_cache_pmem); + +void arch_invalidate_pmem(void *addr, size_t size) +{ + ALT_CMO_OP(inval, addr, size, riscv_cbom_block_size); +} +EXPORT_SYMBOL_GPL(arch_invalidate_pmem); diff --git a/arch/riscv/mm/tlbflush.c b/arch/riscv/mm/tlbflush.c index 37ed760d007c3eef9c302adda361cd4ad7d1db9d..ce7dfc81bb3fe386748557f44310aa4b1a86f3a9 100644 --- a/arch/riscv/mm/tlbflush.c +++ b/arch/riscv/mm/tlbflush.c @@ -5,23 +5,7 @@ #include #include #include - -static inline void local_flush_tlb_all_asid(unsigned long asid) -{ - __asm__ __volatile__ ("sfence.vma x0, %0" - : - : "r" (asid) - : "memory"); -} - -static inline void local_flush_tlb_page_asid(unsigned long addr, - unsigned long asid) -{ - __asm__ __volatile__ ("sfence.vma %0, %1" - : - : "r" (addr), "r" (asid) - : "memory"); -} +#include void flush_tlb_all(void) { @@ -31,6 +15,7 @@ void flush_tlb_all(void) static void __sbi_tlb_flush_range(struct mm_struct *mm, unsigned long start, unsigned long size, unsigned long stride) { + struct cpumask *pmask = &mm->context.tlb_stale_mask; struct cpumask *cmask = mm_cpumask(mm); unsigned int cpuid; bool broadcast; @@ -44,6 +29,15 @@ static void __sbi_tlb_flush_range(struct mm_struct *mm, unsigned long start, if (static_branch_unlikely(&use_asid_allocator)) { unsigned long asid = atomic_long_read(&mm->context.id); + /* + * TLB will be immediately flushed on harts concurrently + * executing this MM context. TLB flush on other harts + * is deferred until this MM context migrates there. + */ + cpumask_setall(pmask); + cpumask_clear_cpu(cpuid, pmask); + cpumask_andnot(pmask, pmask, cmask); + if (broadcast) { sbi_remote_sfence_vma_asid(cmask, start, size, asid); } else if (size <= stride) { diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h index b1e98a9ed152b91365f014c7a54b3b5cfbeff459..d67ce719d16a994e8bbfc36ea53d5ffb69e4a50b 100644 --- a/arch/s390/include/asm/kvm_host.h +++ b/arch/s390/include/asm/kvm_host.h @@ -142,8 +142,7 @@ struct mcck_volatile_info { CR14_EXTERNAL_DAMAGE_SUBMASK) #define SIDAD_SIZE_MASK 0xff -#define sida_origin(sie_block) \ - ((sie_block)->sidad & PAGE_MASK) +#define sida_addr(sie_block) phys_to_virt((sie_block)->sidad & PAGE_MASK) #define sida_size(sie_block) \ ((((sie_block)->sidad & SIDAD_SIZE_MASK) + 1) * PAGE_SIZE) @@ -276,6 +275,7 @@ struct kvm_s390_sie_block { #define ECB3_AES 0x04 #define ECB3_RI 0x01 __u8 ecb3; /* 0x0063 */ +#define ESCA_SCAOL_MASK ~0x3fU __u32 scaol; /* 0x0064 */ __u8 sdf; /* 0x0068 */ __u8 epdx; /* 0x0069 */ @@ -942,6 +942,8 @@ struct kvm_s390_pv { unsigned long stor_base; void *stor_var; bool dumping; + void *set_aside; + struct list_head need_cleanup; struct mmu_notifier mmu_notifier; }; @@ -1017,7 +1019,13 @@ void kvm_arch_crypto_clear_masks(struct kvm *kvm); void kvm_arch_crypto_set_masks(struct kvm *kvm, unsigned long *apm, unsigned long *aqm, unsigned long *adm); -extern int sie64a(struct kvm_s390_sie_block *, u64 *); +int __sie64a(phys_addr_t sie_block_phys, struct kvm_s390_sie_block *sie_block, u64 *rsa); + +static inline int sie64a(struct kvm_s390_sie_block *sie_block, u64 *rsa) +{ + return __sie64a(virt_to_phys(sie_block), sie_block, rsa); +} + extern char sie_exit; extern int kvm_s390_gisc_register(struct kvm *kvm, u32 gisc); diff --git a/arch/s390/include/asm/mem_encrypt.h b/arch/s390/include/asm/mem_encrypt.h index 08a8b96606d7eb3eaab19ebe498f6d33f91aadee..b85e13505a0f2e7a771199160b4524718ca39116 100644 --- a/arch/s390/include/asm/mem_encrypt.h +++ b/arch/s390/include/asm/mem_encrypt.h @@ -4,8 +4,8 @@ #ifndef __ASSEMBLY__ -int set_memory_encrypted(unsigned long addr, int numpages); -int set_memory_decrypted(unsigned long addr, int numpages); +int set_memory_encrypted(unsigned long vaddr, int numpages); +int set_memory_decrypted(unsigned long vaddr, int numpages); #endif /* __ASSEMBLY__ */ diff --git a/arch/s390/include/asm/pci.h b/arch/s390/include/asm/pci.h index 108e732d7b140def7918271a0c3fb75f038dde3a..b248694e00247b57b5be3338535038e9910335d1 100644 --- a/arch/s390/include/asm/pci.h +++ b/arch/s390/include/asm/pci.h @@ -117,7 +117,9 @@ struct zpci_bus { struct zpci_dev { struct zpci_bus *zbus; struct list_head entry; /* list of all zpci_devices, needed for hotplug, etc. */ + struct list_head iommu_list; struct kref kref; + struct rcu_head rcu; struct hotplug_slot hotplug_slot; enum zpci_state state; @@ -155,7 +157,6 @@ struct zpci_dev { /* DMA stuff */ unsigned long *dma_table; - spinlock_t dma_table_lock; int tlb_refresh; spinlock_t iommu_bitmap_lock; @@ -220,7 +221,7 @@ void zpci_device_reserved(struct zpci_dev *zdev); bool zpci_is_device_configured(struct zpci_dev *zdev); int zpci_hot_reset_device(struct zpci_dev *zdev); -int zpci_register_ioat(struct zpci_dev *, u8, u64, u64, u64); +int zpci_register_ioat(struct zpci_dev *, u8, u64, u64, u64, u8 *); int zpci_unregister_ioat(struct zpci_dev *, u8); void zpci_remove_reserved_devices(void); void zpci_update_fh(struct zpci_dev *zdev, u32 fh); diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index 11e901286414c5bc6e941d8d8d3972690fa576e6..b26cbf1c533c9fa4034a6fc4aac648f0ac9c2068 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h @@ -1774,8 +1774,6 @@ static inline swp_entry_t __swp_entry(unsigned long type, unsigned long offset) #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) -#define kern_addr_valid(addr) (1) - extern int vmem_add_mapping(unsigned long start, unsigned long size); extern void vmem_remove_mapping(unsigned long start, unsigned long size); extern int __vmem_map_4k_page(unsigned long addr, unsigned long phys, pgprot_t prot, bool alloc); diff --git a/arch/s390/include/asm/stacktrace.h b/arch/s390/include/asm/stacktrace.h index b23c658dce77f19e6b03a9fdb093e6fdf913f9c9..1802be5abb5dc3256de62bcb07b509f3b3afd3b3 100644 --- a/arch/s390/include/asm/stacktrace.h +++ b/arch/s390/include/asm/stacktrace.h @@ -46,6 +46,7 @@ struct stack_frame { unsigned long sie_savearea; unsigned long sie_reason; unsigned long sie_flags; + unsigned long sie_control_block_phys; }; }; unsigned long gprs[10]; diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h index 3a5c8fb590e55ddfda72e6908ced1f4626e2c7c0..b91f4a9b044cd9e40d86b46ee42d0bfff3a2679a 100644 --- a/arch/s390/include/asm/tlb.h +++ b/arch/s390/include/asm/tlb.h @@ -25,7 +25,8 @@ void __tlb_remove_table(void *_table); static inline void tlb_flush(struct mmu_gather *tlb); static inline bool __tlb_remove_page_size(struct mmu_gather *tlb, - struct page *page, int page_size); + struct encoded_page *page, + int page_size); #define tlb_flush tlb_flush #define pte_free_tlb pte_free_tlb @@ -40,11 +41,15 @@ static inline bool __tlb_remove_page_size(struct mmu_gather *tlb, * Release the page cache reference for a pte removed by * tlb_ptep_clear_flush. In both flush modes the tlb for a page cache page * has already been freed, so just do free_page_and_swap_cache. + * + * s390 doesn't delay rmap removal, so there is nothing encoded in + * the page pointer. */ static inline bool __tlb_remove_page_size(struct mmu_gather *tlb, - struct page *page, int page_size) + struct encoded_page *page, + int page_size) { - free_page_and_swap_cache(page); + free_page_and_swap_cache(encoded_page_ptr(page)); return false; } diff --git a/arch/s390/include/asm/uv.h b/arch/s390/include/asm/uv.h index be3ef9dd697265dad3dbc72fc58edacf14755dc9..28a9ad57b6f16ca30cf2be582eb047425bc7827b 100644 --- a/arch/s390/include/asm/uv.h +++ b/arch/s390/include/asm/uv.h @@ -34,6 +34,7 @@ #define UVC_CMD_INIT_UV 0x000f #define UVC_CMD_CREATE_SEC_CONF 0x0100 #define UVC_CMD_DESTROY_SEC_CONF 0x0101 +#define UVC_CMD_DESTROY_SEC_CONF_FAST 0x0102 #define UVC_CMD_CREATE_SEC_CPU 0x0120 #define UVC_CMD_DESTROY_SEC_CPU 0x0121 #define UVC_CMD_CONV_TO_SEC_STOR 0x0200 @@ -81,6 +82,7 @@ enum uv_cmds_inst { BIT_UVC_CMD_UNSHARE_ALL = 20, BIT_UVC_CMD_PIN_PAGE_SHARED = 21, BIT_UVC_CMD_UNPIN_PAGE_SHARED = 22, + BIT_UVC_CMD_DESTROY_SEC_CONF_FAST = 23, BIT_UVC_CMD_DUMP_INIT = 24, BIT_UVC_CMD_DUMP_CONFIG_STOR_STATE = 25, BIT_UVC_CMD_DUMP_CPU = 26, @@ -230,6 +232,14 @@ struct uv_cb_nodata { u64 reserved20[4]; } __packed __aligned(8); +/* Destroy Configuration Fast */ +struct uv_cb_destroy_fast { + struct uv_cb_header header; + u64 reserved08[2]; + u64 handle; + u64 reserved20[5]; +} __packed __aligned(8); + /* Set Shared Access */ struct uv_cb_share { struct uv_cb_header header; diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c index d8ce965c0a97c6cbf5668ca247e936f1362bad2f..3f8e760298c2201c3bf207433d2e25ecdcca2dd8 100644 --- a/arch/s390/kernel/asm-offsets.c +++ b/arch/s390/kernel/asm-offsets.c @@ -62,6 +62,7 @@ int main(void) OFFSET(__SF_SIE_SAVEAREA, stack_frame, sie_savearea); OFFSET(__SF_SIE_REASON, stack_frame, sie_reason); OFFSET(__SF_SIE_FLAGS, stack_frame, sie_flags); + OFFSET(__SF_SIE_CONTROL_PHYS, stack_frame, sie_control_block_phys); DEFINE(STACK_FRAME_OVERHEAD, sizeof(struct stack_frame)); BLANK(); /* idle data offsets */ diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S index e0d11f3adfccd05dfec8fef7dc2b920eef67cf62..0f423e9df09565b20c3442309e5d031ee55fdc16 100644 --- a/arch/s390/kernel/entry.S +++ b/arch/s390/kernel/entry.S @@ -207,18 +207,20 @@ ENDPROC(__switch_to) #if IS_ENABLED(CONFIG_KVM) /* - * sie64a calling convention: - * %r2 pointer to sie control block - * %r3 guest register save area + * __sie64a calling convention: + * %r2 pointer to sie control block phys + * %r3 pointer to sie control block virt + * %r4 guest register save area */ -ENTRY(sie64a) +ENTRY(__sie64a) stmg %r6,%r14,__SF_GPRS(%r15) # save kernel registers lg %r12,__LC_CURRENT - stg %r2,__SF_SIE_CONTROL(%r15) # save control block pointer - stg %r3,__SF_SIE_SAVEAREA(%r15) # save guest register save area + stg %r2,__SF_SIE_CONTROL_PHYS(%r15) # save sie block physical.. + stg %r3,__SF_SIE_CONTROL(%r15) # ...and virtual addresses + stg %r4,__SF_SIE_SAVEAREA(%r15) # save guest register save area xc __SF_SIE_REASON(8,%r15),__SF_SIE_REASON(%r15) # reason code = 0 mvc __SF_SIE_FLAGS(8,%r15),__TI_flags(%r12) # copy thread flags - lmg %r0,%r13,0(%r3) # load guest gprs 0-13 + lmg %r0,%r13,0(%r4) # load guest gprs 0-13 lg %r14,__LC_GMAP # get gmap pointer ltgr %r14,%r14 jz .Lsie_gmap @@ -230,6 +232,7 @@ ENTRY(sie64a) jnz .Lsie_skip TSTMSK __LC_CPU_FLAGS,_CIF_FPU jo .Lsie_skip # exit if fp/vx regs changed + lg %r14,__SF_SIE_CONTROL_PHYS(%r15) # get sie block phys addr BPEXIT __SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST) .Lsie_entry: sie 0(%r14) @@ -240,13 +243,14 @@ ENTRY(sie64a) BPOFF BPENTER __SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST) .Lsie_skip: + lg %r14,__SF_SIE_CONTROL(%r15) # get control block pointer ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE lctlg %c1,%c1,__LC_KERNEL_ASCE # load primary asce .Lsie_done: # some program checks are suppressing. C code (e.g. do_protection_exception) # will rewind the PSW by the ILC, which is often 4 bytes in case of SIE. There # are some corner cases (e.g. runtime instrumentation) where ILC is unpredictable. -# Other instructions between sie64a and .Lsie_done should not cause program +# Other instructions between __sie64a and .Lsie_done should not cause program # interrupts. So lets use 3 nops as a landing pad for all possible rewinds. .Lrewind_pad6: nopr 7 @@ -275,8 +279,8 @@ sie_exit: EX_TABLE(.Lrewind_pad4,.Lsie_fault) EX_TABLE(.Lrewind_pad2,.Lsie_fault) EX_TABLE(sie_exit,.Lsie_fault) -ENDPROC(sie64a) -EXPORT_SYMBOL(sie64a) +ENDPROC(__sie64a) +EXPORT_SYMBOL(__sie64a) EXPORT_SYMBOL(sie_exit) #endif @@ -355,7 +359,7 @@ ENTRY(pgm_check_handler) j 3f # -> fault in user space .Lpgm_skip_asce: #if IS_ENABLED(CONFIG_KVM) - # cleanup critical section for program checks in sie64a + # cleanup critical section for program checks in __sie64a OUTSIDE %r9,.Lsie_gmap,.Lsie_done,1f SIEEXIT lghi %r10,_PIF_GUEST_FAULT diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index 2094f575c5329470ac3fc57adee3136faa99f8cd..2b6091349daa2553c504a362ed1e7339aff8bd62 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c @@ -52,6 +52,7 @@ #include #include +#include #include #include #include diff --git a/arch/s390/kernel/uv.c b/arch/s390/kernel/uv.c index f9810d2a267c68b37bcb940e59d3896444ef024e..9f18a4af9c13198468602bc56f22342b5e4a373b 100644 --- a/arch/s390/kernel/uv.c +++ b/arch/s390/kernel/uv.c @@ -255,6 +255,13 @@ static int make_secure_pte(pte_t *ptep, unsigned long addr, */ static bool should_export_before_import(struct uv_cb_header *uvcb, struct mm_struct *mm) { + /* + * The misc feature indicates, among other things, that importing a + * shared page from a different protected VM will automatically also + * transfer its ownership. + */ + if (test_bit_inv(BIT_UV_FEAT_MISC, &uv_info.uv_feature_indications)) + return false; if (uvcb->cmd == UVC_CMD_UNPIN_PAGE_SHARED) return false; return atomic_read(&mm->context.protected_count) > 1; diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c index 88112065d94116f852fdbcc9b1a8d8241a6fca6e..0ee02dae14b2bdc1294d4685825f25f042715fac 100644 --- a/arch/s390/kvm/intercept.c +++ b/arch/s390/kvm/intercept.c @@ -217,7 +217,7 @@ static int handle_itdb(struct kvm_vcpu *vcpu) return 0; if (current->thread.per_flags & PER_FLAG_NO_TE) return 0; - itdb = (struct kvm_s390_itdb *)vcpu->arch.sie_block->itdba; + itdb = phys_to_virt(vcpu->arch.sie_block->itdba); rc = write_guest_lc(vcpu, __LC_PGM_TDB, itdb, sizeof(*itdb)); if (rc) return rc; @@ -409,8 +409,7 @@ int handle_sthyi(struct kvm_vcpu *vcpu) out: if (!cc) { if (kvm_s390_pv_cpu_is_protected(vcpu)) { - memcpy((void *)(sida_origin(vcpu->arch.sie_block)), - sctns, PAGE_SIZE); + memcpy(sida_addr(vcpu->arch.sie_block), sctns, PAGE_SIZE); } else { r = write_guest(vcpu, addr, reg2, sctns, PAGE_SIZE); if (r) { @@ -464,7 +463,7 @@ static int handle_operexc(struct kvm_vcpu *vcpu) static int handle_pv_spx(struct kvm_vcpu *vcpu) { - u32 pref = *(u32 *)vcpu->arch.sie_block->sidad; + u32 pref = *(u32 *)sida_addr(vcpu->arch.sie_block); kvm_s390_set_prefix(vcpu, pref); trace_kvm_s390_handle_prefix(vcpu, 1, pref); @@ -497,7 +496,7 @@ static int handle_pv_sclp(struct kvm_vcpu *vcpu) static int handle_pv_uvc(struct kvm_vcpu *vcpu) { - struct uv_cb_share *guest_uvcb = (void *)vcpu->arch.sie_block->sidad; + struct uv_cb_share *guest_uvcb = sida_addr(vcpu->arch.sie_block); struct uv_cb_cts uvcb = { .header.cmd = UVC_CMD_UNPIN_PAGE_SHARED, .header.len = sizeof(uvcb), diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index ab569faf0df241a8bbc03a05e9aa28870c22b61c..1dae78deddf28602d32d051dc5ad956be6dd5315 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c @@ -314,11 +314,6 @@ static inline u8 gisa_get_ipm(struct kvm_s390_gisa *gisa) return READ_ONCE(gisa->ipm); } -static inline void gisa_clear_ipm_gisc(struct kvm_s390_gisa *gisa, u32 gisc) -{ - clear_bit_inv(IPM_BIT_OFFSET + gisc, (unsigned long *) gisa); -} - static inline int gisa_tac_ipm_gisc(struct kvm_s390_gisa *gisa, u32 gisc) { return test_and_clear_bit_inv(IPM_BIT_OFFSET + gisc, (unsigned long *) gisa); diff --git a/arch/s390/kvm/irq.h b/arch/s390/kvm/irq.h deleted file mode 100644 index 484608c71dd01185b88f9db1a11e3f523bcefebf..0000000000000000000000000000000000000000 --- a/arch/s390/kvm/irq.h +++ /dev/null @@ -1,19 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * s390 irqchip routines - * - * Copyright IBM Corp. 2014 - * - * Author(s): Cornelia Huck - */ -#ifndef __KVM_IRQ_H -#define __KVM_IRQ_H - -#include - -static inline int irqchip_in_kernel(struct kvm *kvm) -{ - return 1; -} - -#endif diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index bc491a73815c34803d56060d798f5620446bba8b..e4890e04b2108cdd484fb0288f4ae203797c3c17 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -209,6 +209,14 @@ unsigned int diag9c_forwarding_hz; module_param(diag9c_forwarding_hz, uint, 0644); MODULE_PARM_DESC(diag9c_forwarding_hz, "Maximum diag9c forwarding per second, 0 to turn off"); +/* + * allow asynchronous deinit for protected guests; enable by default since + * the feature is opt-in anyway + */ +static int async_destroy = 1; +module_param(async_destroy, int, 0444); +MODULE_PARM_DESC(async_destroy, "Asynchronous destroy for protected guests"); + /* * For now we handle at most 16 double words as this is what the s390 base * kernel handles and stores in the prefix page. If we ever need to go beyond @@ -616,6 +624,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) case KVM_CAP_S390_BPB: r = test_facility(82); break; + case KVM_CAP_S390_PROTECTED_ASYNC_DISABLE: + r = async_destroy && is_prot_virt_host(); + break; case KVM_CAP_S390_PROTECTED: r = is_prot_virt_host(); break; @@ -2519,9 +2530,13 @@ static int kvm_s390_pv_dmp(struct kvm *kvm, struct kvm_pv_cmd *cmd, static int kvm_s390_handle_pv(struct kvm *kvm, struct kvm_pv_cmd *cmd) { + const bool need_lock = (cmd->cmd != KVM_PV_ASYNC_CLEANUP_PERFORM); + void __user *argp = (void __user *)cmd->data; int r = 0; u16 dummy; - void __user *argp = (void __user *)cmd->data; + + if (need_lock) + mutex_lock(&kvm->lock); switch (cmd->cmd) { case KVM_PV_ENABLE: { @@ -2555,6 +2570,31 @@ static int kvm_s390_handle_pv(struct kvm *kvm, struct kvm_pv_cmd *cmd) set_bit(IRQ_PEND_EXT_SERVICE, &kvm->arch.float_int.masked_irqs); break; } + case KVM_PV_ASYNC_CLEANUP_PREPARE: + r = -EINVAL; + if (!kvm_s390_pv_is_protected(kvm) || !async_destroy) + break; + + r = kvm_s390_cpus_from_pv(kvm, &cmd->rc, &cmd->rrc); + /* + * If a CPU could not be destroyed, destroy VM will also fail. + * There is no point in trying to destroy it. Instead return + * the rc and rrc from the first CPU that failed destroying. + */ + if (r) + break; + r = kvm_s390_pv_set_aside(kvm, &cmd->rc, &cmd->rrc); + + /* no need to block service interrupts any more */ + clear_bit(IRQ_PEND_EXT_SERVICE, &kvm->arch.float_int.masked_irqs); + break; + case KVM_PV_ASYNC_CLEANUP_PERFORM: + r = -EINVAL; + if (!async_destroy) + break; + /* kvm->lock must not be held; this is asserted inside the function. */ + r = kvm_s390_pv_deinit_aside_vm(kvm, &cmd->rc, &cmd->rrc); + break; case KVM_PV_DISABLE: { r = -EINVAL; if (!kvm_s390_pv_is_protected(kvm)) @@ -2568,7 +2608,7 @@ static int kvm_s390_handle_pv(struct kvm *kvm, struct kvm_pv_cmd *cmd) */ if (r) break; - r = kvm_s390_pv_deinit_vm(kvm, &cmd->rc, &cmd->rrc); + r = kvm_s390_pv_deinit_cleanup_all(kvm, &cmd->rc, &cmd->rrc); /* no need to block service interrupts any more */ clear_bit(IRQ_PEND_EXT_SERVICE, &kvm->arch.float_int.masked_irqs); @@ -2718,6 +2758,9 @@ static int kvm_s390_handle_pv(struct kvm *kvm, struct kvm_pv_cmd *cmd) default: r = -ENOTTY; } + if (need_lock) + mutex_unlock(&kvm->lock); + return r; } @@ -2922,9 +2965,8 @@ long kvm_arch_vm_ioctl(struct file *filp, r = -EINVAL; break; } - mutex_lock(&kvm->lock); + /* must be called without kvm->lock */ r = kvm_s390_handle_pv(kvm, &args); - mutex_unlock(&kvm->lock); if (copy_to_user(argp, &args, sizeof(args))) { r = -EFAULT; break; @@ -3243,6 +3285,8 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) kvm_s390_vsie_init(kvm); if (use_gisa) kvm_s390_gisa_init(kvm); + INIT_LIST_HEAD(&kvm->arch.pv.need_cleanup); + kvm->arch.pv.set_aside = NULL; KVM_EVENT(3, "vm 0x%pK created by pid %u", kvm, current->pid); return 0; @@ -3287,11 +3331,9 @@ void kvm_arch_destroy_vm(struct kvm *kvm) /* * We are already at the end of life and kvm->lock is not taken. * This is ok as the file descriptor is closed by now and nobody - * can mess with the pv state. To avoid lockdep_assert_held from - * complaining we do not use kvm_s390_pv_is_protected. + * can mess with the pv state. */ - if (kvm_s390_pv_get_handle(kvm)) - kvm_s390_pv_deinit_vm(kvm, &rc, &rrc); + kvm_s390_pv_deinit_cleanup_all(kvm, &rc, &rrc); /* * Remove the mmu notifier only when the whole KVM VM is torn down, * and only if one was registered to begin with. If the VM is @@ -3344,28 +3386,30 @@ static void sca_del_vcpu(struct kvm_vcpu *vcpu) static void sca_add_vcpu(struct kvm_vcpu *vcpu) { if (!kvm_s390_use_sca_entries()) { - struct bsca_block *sca = vcpu->kvm->arch.sca; + phys_addr_t sca_phys = virt_to_phys(vcpu->kvm->arch.sca); /* we still need the basic sca for the ipte control */ - vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32); - vcpu->arch.sie_block->scaol = (__u32)(__u64)sca; + vcpu->arch.sie_block->scaoh = sca_phys >> 32; + vcpu->arch.sie_block->scaol = sca_phys; return; } read_lock(&vcpu->kvm->arch.sca_lock); if (vcpu->kvm->arch.use_esca) { struct esca_block *sca = vcpu->kvm->arch.sca; + phys_addr_t sca_phys = virt_to_phys(sca); - sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block; - vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32); - vcpu->arch.sie_block->scaol = (__u32)(__u64)sca & ~0x3fU; + sca->cpu[vcpu->vcpu_id].sda = virt_to_phys(vcpu->arch.sie_block); + vcpu->arch.sie_block->scaoh = sca_phys >> 32; + vcpu->arch.sie_block->scaol = sca_phys & ESCA_SCAOL_MASK; vcpu->arch.sie_block->ecb2 |= ECB2_ESCA; set_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn); } else { struct bsca_block *sca = vcpu->kvm->arch.sca; + phys_addr_t sca_phys = virt_to_phys(sca); - sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block; - vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32); - vcpu->arch.sie_block->scaol = (__u32)(__u64)sca; + sca->cpu[vcpu->vcpu_id].sda = virt_to_phys(vcpu->arch.sie_block); + vcpu->arch.sie_block->scaoh = sca_phys >> 32; + vcpu->arch.sie_block->scaol = sca_phys; set_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn); } read_unlock(&vcpu->kvm->arch.sca_lock); @@ -3396,6 +3440,7 @@ static int sca_switch_to_extended(struct kvm *kvm) struct kvm_vcpu *vcpu; unsigned long vcpu_idx; u32 scaol, scaoh; + phys_addr_t new_sca_phys; if (kvm->arch.use_esca) return 0; @@ -3404,8 +3449,9 @@ static int sca_switch_to_extended(struct kvm *kvm) if (!new_sca) return -ENOMEM; - scaoh = (u32)((u64)(new_sca) >> 32); - scaol = (u32)(u64)(new_sca) & ~0x3fU; + new_sca_phys = virt_to_phys(new_sca); + scaoh = new_sca_phys >> 32; + scaol = new_sca_phys & ESCA_SCAOL_MASK; kvm_s390_vcpu_block_all(kvm); write_lock(&kvm->arch.sca_lock); @@ -3625,15 +3671,18 @@ static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu) void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu) { - free_page(vcpu->arch.sie_block->cbrlo); + free_page((unsigned long)phys_to_virt(vcpu->arch.sie_block->cbrlo)); vcpu->arch.sie_block->cbrlo = 0; } int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu) { - vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL_ACCOUNT); - if (!vcpu->arch.sie_block->cbrlo) + void *cbrlo_page = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT); + + if (!cbrlo_page) return -ENOMEM; + + vcpu->arch.sie_block->cbrlo = virt_to_phys(cbrlo_page); return 0; } @@ -3643,7 +3692,7 @@ static void kvm_s390_vcpu_setup_model(struct kvm_vcpu *vcpu) vcpu->arch.sie_block->ibc = model->ibc; if (test_kvm_facility(vcpu->kvm, 7)) - vcpu->arch.sie_block->fac = (u32)(u64) model->fac_list; + vcpu->arch.sie_block->fac = virt_to_phys(model->fac_list); } static int kvm_s390_vcpu_setup(struct kvm_vcpu *vcpu) @@ -3700,9 +3749,8 @@ static int kvm_s390_vcpu_setup(struct kvm_vcpu *vcpu) VCPU_EVENT(vcpu, 3, "AIV gisa format-%u enabled for cpu %03u", vcpu->arch.sie_block->gd & 0x3, vcpu->vcpu_id); } - vcpu->arch.sie_block->sdnxo = ((unsigned long) &vcpu->run->s.regs.sdnx) - | SDNXC; - vcpu->arch.sie_block->riccbd = (unsigned long) &vcpu->run->s.regs.riccb; + vcpu->arch.sie_block->sdnxo = virt_to_phys(&vcpu->run->s.regs.sdnx) | SDNXC; + vcpu->arch.sie_block->riccbd = virt_to_phys(&vcpu->run->s.regs.riccb); if (sclp.has_kss) kvm_s390_set_cpuflags(vcpu, CPUSTAT_KSS); @@ -3752,7 +3800,7 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) return -ENOMEM; vcpu->arch.sie_block = &sie_page->sie_block; - vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb; + vcpu->arch.sie_block->itdba = virt_to_phys(&sie_page->itdb); /* the real guest size will always be smaller than msl */ vcpu->arch.sie_block->mso = 0; @@ -5169,6 +5217,7 @@ static long kvm_s390_vcpu_sida_op(struct kvm_vcpu *vcpu, struct kvm_s390_mem_op *mop) { void __user *uaddr = (void __user *)mop->buf; + void *sida_addr; int r = 0; if (mop->flags || !mop->size) @@ -5180,16 +5229,16 @@ static long kvm_s390_vcpu_sida_op(struct kvm_vcpu *vcpu, if (!kvm_s390_pv_cpu_is_protected(vcpu)) return -EINVAL; + sida_addr = (char *)sida_addr(vcpu->arch.sie_block) + mop->sida_offset; + switch (mop->op) { case KVM_S390_MEMOP_SIDA_READ: - if (copy_to_user(uaddr, (void *)(sida_origin(vcpu->arch.sie_block) + - mop->sida_offset), mop->size)) + if (copy_to_user(uaddr, sida_addr, mop->size)) r = -EFAULT; break; case KVM_S390_MEMOP_SIDA_WRITE: - if (copy_from_user((void *)(sida_origin(vcpu->arch.sie_block) + - mop->sida_offset), uaddr, mop->size)) + if (copy_from_user(sida_addr, uaddr, mop->size)) r = -EFAULT; break; } @@ -5567,6 +5616,11 @@ vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) return VM_FAULT_SIGBUS; } +bool kvm_arch_irqchip_in_kernel(struct kvm *kvm) +{ + return true; +} + /* Section: memory related */ int kvm_arch_prepare_memory_region(struct kvm *kvm, const struct kvm_memory_slot *old, diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h index 4755492dfabc65037608b84f2fdba29b4e8f66f6..d48588c207d8ba87dc3cb5fa37f7b11bbec299e9 100644 --- a/arch/s390/kvm/kvm-s390.h +++ b/arch/s390/kvm/kvm-s390.h @@ -23,7 +23,8 @@ /* Transactional Memory Execution related macros */ #define IS_TE_ENABLED(vcpu) ((vcpu->arch.sie_block->ecb & ECB_TE)) #define TDB_FORMAT1 1 -#define IS_ITDB_VALID(vcpu) ((*(char *)vcpu->arch.sie_block->itdba == TDB_FORMAT1)) +#define IS_ITDB_VALID(vcpu) \ + ((*(char *)phys_to_virt((vcpu)->arch.sie_block->itdba) == TDB_FORMAT1)) extern debug_info_t *kvm_s390_dbf; extern debug_info_t *kvm_s390_dbf_uv; @@ -233,7 +234,7 @@ static inline unsigned long kvm_s390_get_gfn_end(struct kvm_memslots *slots) static inline u32 kvm_s390_get_gisa_desc(struct kvm *kvm) { - u32 gd = (u32)(u64)kvm->arch.gisa_int.origin; + u32 gd = virt_to_phys(kvm->arch.gisa_int.origin); if (gd && sclp.has_gisaf) gd |= GISA_FORMAT1; @@ -243,6 +244,9 @@ static inline u32 kvm_s390_get_gisa_desc(struct kvm *kvm) /* implemented in pv.c */ int kvm_s390_pv_destroy_cpu(struct kvm_vcpu *vcpu, u16 *rc, u16 *rrc); int kvm_s390_pv_create_cpu(struct kvm_vcpu *vcpu, u16 *rc, u16 *rrc); +int kvm_s390_pv_set_aside(struct kvm *kvm, u16 *rc, u16 *rrc); +int kvm_s390_pv_deinit_aside_vm(struct kvm *kvm, u16 *rc, u16 *rrc); +int kvm_s390_pv_deinit_cleanup_all(struct kvm *kvm, u16 *rc, u16 *rrc); int kvm_s390_pv_deinit_vm(struct kvm *kvm, u16 *rc, u16 *rrc); int kvm_s390_pv_init_vm(struct kvm *kvm, u16 *rc, u16 *rrc); int kvm_s390_pv_set_sec_parms(struct kvm *kvm, void *hdr, u64 length, u16 *rc, diff --git a/arch/s390/kvm/pci.c b/arch/s390/kvm/pci.c index ded1af2ddae99b2321a377ae7d6280c490d138a7..ec51e810e381edfe97074f572993a740d0a0ac24 100644 --- a/arch/s390/kvm/pci.c +++ b/arch/s390/kvm/pci.c @@ -434,6 +434,7 @@ static void kvm_s390_pci_dev_release(struct zpci_dev *zdev) static int kvm_s390_pci_register_kvm(void *opaque, struct kvm *kvm) { struct zpci_dev *zdev = opaque; + u8 status; int rc; if (!zdev) @@ -486,7 +487,7 @@ static int kvm_s390_pci_register_kvm(void *opaque, struct kvm *kvm) /* Re-register the IOMMU that was already created */ rc = zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma, - virt_to_phys(zdev->dma_table)); + virt_to_phys(zdev->dma_table), &status); if (rc) goto clear_gisa; @@ -516,6 +517,7 @@ static void kvm_s390_pci_unregister_kvm(void *opaque) { struct zpci_dev *zdev = opaque; struct kvm *kvm; + u8 status; if (!zdev) return; @@ -554,7 +556,7 @@ static void kvm_s390_pci_unregister_kvm(void *opaque) /* Re-register the IOMMU that was already created */ zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma, - virt_to_phys(zdev->dma_table)); + virt_to_phys(zdev->dma_table), &status); out: spin_lock(&kvm->arch.kzdev_list_lock); diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c index 3335fa09b6f1d205a518beb3b84848225966b2e5..9f8a192bd750f9b4150779aabbaed2000c962cab 100644 --- a/arch/s390/kvm/priv.c +++ b/arch/s390/kvm/priv.c @@ -924,8 +924,7 @@ static int handle_stsi(struct kvm_vcpu *vcpu) return -EREMOTE; } if (kvm_s390_pv_cpu_is_protected(vcpu)) { - memcpy((void *)sida_origin(vcpu->arch.sie_block), (void *)mem, - PAGE_SIZE); + memcpy(sida_addr(vcpu->arch.sie_block), (void *)mem, PAGE_SIZE); rc = 0; } else { rc = write_guest(vcpu, operand2, ar, (void *)mem, PAGE_SIZE); diff --git a/arch/s390/kvm/pv.c b/arch/s390/kvm/pv.c index 7cb7799a0acb4000157554df8beec25634c6084b..e032ebbf51b976827e3c48934a72e49e90db111e 100644 --- a/arch/s390/kvm/pv.c +++ b/arch/s390/kvm/pv.c @@ -18,6 +18,29 @@ #include #include "kvm-s390.h" +/** + * struct pv_vm_to_be_destroyed - Represents a protected VM that needs to + * be destroyed + * + * @list: list head for the list of leftover VMs + * @old_gmap_table: the gmap table of the leftover protected VM + * @handle: the handle of the leftover protected VM + * @stor_var: pointer to the variable storage of the leftover protected VM + * @stor_base: address of the base storage of the leftover protected VM + * + * Represents a protected VM that is still registered with the Ultravisor, + * but which does not correspond any longer to an active KVM VM. It should + * be destroyed at some point later, either asynchronously or when the + * process terminates. + */ +struct pv_vm_to_be_destroyed { + struct list_head list; + unsigned long old_gmap_table; + u64 handle; + void *stor_var; + unsigned long stor_base; +}; + static void kvm_s390_clear_pv_state(struct kvm *kvm) { kvm->arch.pv.handle = 0; @@ -44,7 +67,7 @@ int kvm_s390_pv_destroy_cpu(struct kvm_vcpu *vcpu, u16 *rc, u16 *rrc) free_pages(vcpu->arch.pv.stor_base, get_order(uv_info.guest_cpu_stor_len)); - free_page(sida_origin(vcpu->arch.sie_block)); + free_page((unsigned long)sida_addr(vcpu->arch.sie_block)); vcpu->arch.sie_block->pv_handle_cpu = 0; vcpu->arch.sie_block->pv_handle_config = 0; memset(&vcpu->arch.pv, 0, sizeof(vcpu->arch.pv)); @@ -66,6 +89,7 @@ int kvm_s390_pv_create_cpu(struct kvm_vcpu *vcpu, u16 *rc, u16 *rrc) .header.cmd = UVC_CMD_CREATE_SEC_CPU, .header.len = sizeof(uvcb), }; + void *sida_addr; int cc; if (kvm_s390_pv_cpu_get_handle(vcpu)) @@ -79,16 +103,17 @@ int kvm_s390_pv_create_cpu(struct kvm_vcpu *vcpu, u16 *rc, u16 *rrc) /* Input */ uvcb.guest_handle = kvm_s390_pv_get_handle(vcpu->kvm); uvcb.num = vcpu->arch.sie_block->icpua; - uvcb.state_origin = (u64)vcpu->arch.sie_block; - uvcb.stor_origin = (u64)vcpu->arch.pv.stor_base; + uvcb.state_origin = virt_to_phys(vcpu->arch.sie_block); + uvcb.stor_origin = virt_to_phys((void *)vcpu->arch.pv.stor_base); /* Alloc Secure Instruction Data Area Designation */ - vcpu->arch.sie_block->sidad = __get_free_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO); - if (!vcpu->arch.sie_block->sidad) { + sida_addr = (void *)__get_free_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO); + if (!sida_addr) { free_pages(vcpu->arch.pv.stor_base, get_order(uv_info.guest_cpu_stor_len)); return -ENOMEM; } + vcpu->arch.sie_block->sidad = virt_to_phys(sida_addr); cc = uv_call(0, (u64)&uvcb); *rc = uvcb.header.rc; @@ -159,23 +184,192 @@ out_err: return -ENOMEM; } -/* this should not fail, but if it does, we must not free the donated memory */ -int kvm_s390_pv_deinit_vm(struct kvm *kvm, u16 *rc, u16 *rrc) +/** + * kvm_s390_pv_dispose_one_leftover - Clean up one leftover protected VM. + * @kvm: the KVM that was associated with this leftover protected VM + * @leftover: details about the leftover protected VM that needs a clean up + * @rc: the RC code of the Destroy Secure Configuration UVC + * @rrc: the RRC code of the Destroy Secure Configuration UVC + * + * Destroy one leftover protected VM. + * On success, kvm->mm->context.protected_count will be decremented atomically + * and all other resources used by the VM will be freed. + * + * Return: 0 in case of success, otherwise 1 + */ +static int kvm_s390_pv_dispose_one_leftover(struct kvm *kvm, + struct pv_vm_to_be_destroyed *leftover, + u16 *rc, u16 *rrc) { int cc; - cc = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm), - UVC_CMD_DESTROY_SEC_CONF, rc, rrc); + /* It used the destroy-fast UVC, nothing left to do here */ + if (!leftover->handle) + goto done_fast; + cc = uv_cmd_nodata(leftover->handle, UVC_CMD_DESTROY_SEC_CONF, rc, rrc); + KVM_UV_EVENT(kvm, 3, "PROTVIRT DESTROY LEFTOVER VM: rc %x rrc %x", *rc, *rrc); + WARN_ONCE(cc, "protvirt destroy leftover vm failed rc %x rrc %x", *rc, *rrc); + if (cc) + return cc; + /* + * Intentionally leak unusable memory. If the UVC fails, the memory + * used for the VM and its metadata is permanently unusable. + * This can only happen in case of a serious KVM or hardware bug; it + * is not expected to happen in normal operation. + */ + free_pages(leftover->stor_base, get_order(uv_info.guest_base_stor_len)); + free_pages(leftover->old_gmap_table, CRST_ALLOC_ORDER); + vfree(leftover->stor_var); +done_fast: + atomic_dec(&kvm->mm->context.protected_count); + return 0; +} + +/** + * kvm_s390_destroy_lower_2g - Destroy the first 2GB of protected guest memory. + * @kvm: the VM whose memory is to be cleared. + * + * Destroy the first 2GB of guest memory, to avoid prefix issues after reboot. + * The CPUs of the protected VM need to be destroyed beforehand. + */ +static void kvm_s390_destroy_lower_2g(struct kvm *kvm) +{ + const unsigned long pages_2g = SZ_2G / PAGE_SIZE; + struct kvm_memory_slot *slot; + unsigned long len; + int srcu_idx; + + srcu_idx = srcu_read_lock(&kvm->srcu); + + /* Take the memslot containing guest absolute address 0 */ + slot = gfn_to_memslot(kvm, 0); + /* Clear all slots or parts thereof that are below 2GB */ + while (slot && slot->base_gfn < pages_2g) { + len = min_t(u64, slot->npages, pages_2g - slot->base_gfn) * PAGE_SIZE; + s390_uv_destroy_range(kvm->mm, slot->userspace_addr, slot->userspace_addr + len); + /* Take the next memslot */ + slot = gfn_to_memslot(kvm, slot->base_gfn + slot->npages); + } + + srcu_read_unlock(&kvm->srcu, srcu_idx); +} + +static int kvm_s390_pv_deinit_vm_fast(struct kvm *kvm, u16 *rc, u16 *rrc) +{ + struct uv_cb_destroy_fast uvcb = { + .header.cmd = UVC_CMD_DESTROY_SEC_CONF_FAST, + .header.len = sizeof(uvcb), + .handle = kvm_s390_pv_get_handle(kvm), + }; + int cc; + + cc = uv_call_sched(0, (u64)&uvcb); + if (rc) + *rc = uvcb.header.rc; + if (rrc) + *rrc = uvcb.header.rrc; WRITE_ONCE(kvm->arch.gmap->guest_handle, 0); + KVM_UV_EVENT(kvm, 3, "PROTVIRT DESTROY VM FAST: rc %x rrc %x", + uvcb.header.rc, uvcb.header.rrc); + WARN_ONCE(cc, "protvirt destroy vm fast failed handle %llx rc %x rrc %x", + kvm_s390_pv_get_handle(kvm), uvcb.header.rc, uvcb.header.rrc); + /* Inteded memory leak on "impossible" error */ + if (!cc) + kvm_s390_pv_dealloc_vm(kvm); + return cc ? -EIO : 0; +} + +static inline bool is_destroy_fast_available(void) +{ + return test_bit_inv(BIT_UVC_CMD_DESTROY_SEC_CONF_FAST, uv_info.inst_calls_list); +} + +/** + * kvm_s390_pv_set_aside - Set aside a protected VM for later teardown. + * @kvm: the VM + * @rc: return value for the RC field of the UVCB + * @rrc: return value for the RRC field of the UVCB + * + * Set aside the protected VM for a subsequent teardown. The VM will be able + * to continue immediately as a non-secure VM, and the information needed to + * properly tear down the protected VM is set aside. If another protected VM + * was already set aside without starting its teardown, this function will + * fail. + * The CPUs of the protected VM need to be destroyed beforehand. + * + * Context: kvm->lock needs to be held + * + * Return: 0 in case of success, -EINVAL if another protected VM was already set + * aside, -ENOMEM if the system ran out of memory. + */ +int kvm_s390_pv_set_aside(struct kvm *kvm, u16 *rc, u16 *rrc) +{ + struct pv_vm_to_be_destroyed *priv; + int res = 0; + + lockdep_assert_held(&kvm->lock); /* - * if the mm still has a mapping, make all its pages accessible - * before destroying the guest + * If another protected VM was already prepared for teardown, refuse. + * A normal deinitialization has to be performed instead. */ - if (mmget_not_zero(kvm->mm)) { - s390_uv_destroy_range(kvm->mm, 0, TASK_SIZE); - mmput(kvm->mm); + if (kvm->arch.pv.set_aside) + return -EINVAL; + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + if (is_destroy_fast_available()) { + res = kvm_s390_pv_deinit_vm_fast(kvm, rc, rrc); + } else { + priv->stor_var = kvm->arch.pv.stor_var; + priv->stor_base = kvm->arch.pv.stor_base; + priv->handle = kvm_s390_pv_get_handle(kvm); + priv->old_gmap_table = (unsigned long)kvm->arch.gmap->table; + WRITE_ONCE(kvm->arch.gmap->guest_handle, 0); + if (s390_replace_asce(kvm->arch.gmap)) + res = -ENOMEM; } + if (res) { + kfree(priv); + return res; + } + + kvm_s390_destroy_lower_2g(kvm); + kvm_s390_clear_pv_state(kvm); + kvm->arch.pv.set_aside = priv; + + *rc = UVC_RC_EXECUTED; + *rrc = 42; + return 0; +} + +/** + * kvm_s390_pv_deinit_vm - Deinitialize the current protected VM + * @kvm: the KVM whose protected VM needs to be deinitialized + * @rc: the RC code of the UVC + * @rrc: the RRC code of the UVC + * + * Deinitialize the current protected VM. This function will destroy and + * cleanup the current protected VM, but it will not cleanup the guest + * memory. This function should only be called when the protected VM has + * just been created and therefore does not have any guest memory, or when + * the caller cleans up the guest memory separately. + * + * This function should not fail, but if it does, the donated memory must + * not be freed. + * + * Context: kvm->lock needs to be held + * + * Return: 0 in case of success, otherwise -EIO + */ +int kvm_s390_pv_deinit_vm(struct kvm *kvm, u16 *rc, u16 *rrc) +{ + int cc; + + cc = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm), + UVC_CMD_DESTROY_SEC_CONF, rc, rrc); + WRITE_ONCE(kvm->arch.gmap->guest_handle, 0); if (!cc) { atomic_dec(&kvm->mm->context.protected_count); kvm_s390_pv_dealloc_vm(kvm); @@ -189,11 +383,137 @@ int kvm_s390_pv_deinit_vm(struct kvm *kvm, u16 *rc, u16 *rrc) return cc ? -EIO : 0; } +/** + * kvm_s390_pv_deinit_cleanup_all - Clean up all protected VMs associated + * with a specific KVM. + * @kvm: the KVM to be cleaned up + * @rc: the RC code of the first failing UVC + * @rrc: the RRC code of the first failing UVC + * + * This function will clean up all protected VMs associated with a KVM. + * This includes the active one, the one prepared for deinitialization with + * kvm_s390_pv_set_aside, and any still pending in the need_cleanup list. + * + * Context: kvm->lock needs to be held unless being called from + * kvm_arch_destroy_vm. + * + * Return: 0 if all VMs are successfully cleaned up, otherwise -EIO + */ +int kvm_s390_pv_deinit_cleanup_all(struct kvm *kvm, u16 *rc, u16 *rrc) +{ + struct pv_vm_to_be_destroyed *cur; + bool need_zap = false; + u16 _rc, _rrc; + int cc = 0; + + /* Make sure the counter does not reach 0 before calling s390_uv_destroy_range */ + atomic_inc(&kvm->mm->context.protected_count); + + *rc = 1; + /* If the current VM is protected, destroy it */ + if (kvm_s390_pv_get_handle(kvm)) { + cc = kvm_s390_pv_deinit_vm(kvm, rc, rrc); + need_zap = true; + } + + /* If a previous protected VM was set aside, put it in the need_cleanup list */ + if (kvm->arch.pv.set_aside) { + list_add(kvm->arch.pv.set_aside, &kvm->arch.pv.need_cleanup); + kvm->arch.pv.set_aside = NULL; + } + + /* Cleanup all protected VMs in the need_cleanup list */ + while (!list_empty(&kvm->arch.pv.need_cleanup)) { + cur = list_first_entry(&kvm->arch.pv.need_cleanup, typeof(*cur), list); + need_zap = true; + if (kvm_s390_pv_dispose_one_leftover(kvm, cur, &_rc, &_rrc)) { + cc = 1; + /* + * Only return the first error rc and rrc, so make + * sure it is not overwritten. All destroys will + * additionally be reported via KVM_UV_EVENT(). + */ + if (*rc == UVC_RC_EXECUTED) { + *rc = _rc; + *rrc = _rrc; + } + } + list_del(&cur->list); + kfree(cur); + } + + /* + * If the mm still has a mapping, try to mark all its pages as + * accessible. The counter should not reach zero before this + * cleanup has been performed. + */ + if (need_zap && mmget_not_zero(kvm->mm)) { + s390_uv_destroy_range(kvm->mm, 0, TASK_SIZE); + mmput(kvm->mm); + } + + /* Now the counter can safely reach 0 */ + atomic_dec(&kvm->mm->context.protected_count); + return cc ? -EIO : 0; +} + +/** + * kvm_s390_pv_deinit_aside_vm - Teardown a previously set aside protected VM. + * @kvm: the VM previously associated with the protected VM + * @rc: return value for the RC field of the UVCB + * @rrc: return value for the RRC field of the UVCB + * + * Tear down the protected VM that had been previously prepared for teardown + * using kvm_s390_pv_set_aside_vm. Ideally this should be called by + * userspace asynchronously from a separate thread. + * + * Context: kvm->lock must not be held. + * + * Return: 0 in case of success, -EINVAL if no protected VM had been + * prepared for asynchronous teardowm, -EIO in case of other errors. + */ +int kvm_s390_pv_deinit_aside_vm(struct kvm *kvm, u16 *rc, u16 *rrc) +{ + struct pv_vm_to_be_destroyed *p; + int ret = 0; + + lockdep_assert_not_held(&kvm->lock); + mutex_lock(&kvm->lock); + p = kvm->arch.pv.set_aside; + kvm->arch.pv.set_aside = NULL; + mutex_unlock(&kvm->lock); + if (!p) + return -EINVAL; + + /* When a fatal signal is received, stop immediately */ + if (s390_uv_destroy_range_interruptible(kvm->mm, 0, TASK_SIZE_MAX)) + goto done; + if (kvm_s390_pv_dispose_one_leftover(kvm, p, rc, rrc)) + ret = -EIO; + kfree(p); + p = NULL; +done: + /* + * p is not NULL if we aborted because of a fatal signal, in which + * case queue the leftover for later cleanup. + */ + if (p) { + mutex_lock(&kvm->lock); + list_add(&p->list, &kvm->arch.pv.need_cleanup); + mutex_unlock(&kvm->lock); + /* Did not finish, but pretend things went well */ + *rc = UVC_RC_EXECUTED; + *rrc = 42; + } + return ret; +} + static void kvm_s390_pv_mmu_notifier_release(struct mmu_notifier *subscription, struct mm_struct *mm) { struct kvm *kvm = container_of(subscription, struct kvm, arch.pv.mmu_notifier); u16 dummy; + int r; /* * No locking is needed since this is the last thread of the last user of this @@ -202,7 +522,9 @@ static void kvm_s390_pv_mmu_notifier_release(struct mmu_notifier *subscription, * unregistered. This means that if this notifier runs, then the * struct kvm is still valid. */ - kvm_s390_cpus_from_pv(kvm, &dummy, &dummy); + r = kvm_s390_cpus_from_pv(kvm, &dummy, &dummy); + if (!r && is_destroy_fast_available() && kvm_s390_pv_get_handle(kvm)) + kvm_s390_pv_deinit_vm_fast(kvm, &dummy, &dummy); } static const struct mmu_notifier_ops kvm_s390_pv_mmu_notifier_ops = { @@ -226,8 +548,9 @@ int kvm_s390_pv_init_vm(struct kvm *kvm, u16 *rc, u16 *rrc) uvcb.guest_stor_origin = 0; /* MSO is 0 for KVM */ uvcb.guest_stor_len = kvm->arch.pv.guest_len; uvcb.guest_asce = kvm->arch.gmap->asce; - uvcb.guest_sca = (unsigned long)kvm->arch.sca; - uvcb.conf_base_stor_origin = (u64)kvm->arch.pv.stor_base; + uvcb.guest_sca = virt_to_phys(kvm->arch.sca); + uvcb.conf_base_stor_origin = + virt_to_phys((void *)kvm->arch.pv.stor_base); uvcb.conf_virt_stor_origin = (u64)kvm->arch.pv.stor_var; cc = uv_call_sched(0, (u64)&uvcb); diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c index ace2541ababd383448ff654f917b66983f3b33a4..b6a0219e470a4a4ba4609c5fb2131a06d9ddc0c7 100644 --- a/arch/s390/kvm/vsie.c +++ b/arch/s390/kvm/vsie.c @@ -656,7 +656,7 @@ static int pin_guest_page(struct kvm *kvm, gpa_t gpa, hpa_t *hpa) page = gfn_to_page(kvm, gpa_to_gfn(gpa)); if (is_error_page(page)) return -EINVAL; - *hpa = (hpa_t) page_to_virt(page) + (gpa & ~PAGE_MASK); + *hpa = (hpa_t)page_to_phys(page) + (gpa & ~PAGE_MASK); return 0; } @@ -871,7 +871,7 @@ static int pin_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page, WARN_ON_ONCE(rc); return 1; } - vsie_page->scb_o = (struct kvm_s390_sie_block *) hpa; + vsie_page->scb_o = phys_to_virt(hpa); return 0; } diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c index 02d15c8dc92e994692f2322cf3232f85a032f76a..74e1d873dce050fae2c1bc282498371ba3f981bb 100644 --- a/arch/s390/mm/gmap.c +++ b/arch/s390/mm/gmap.c @@ -72,7 +72,7 @@ static struct gmap *gmap_alloc(unsigned long limit) goto out_free; page->index = 0; list_add(&page->lru, &gmap->crst_list); - table = (unsigned long *) page_to_phys(page); + table = page_to_virt(page); crst_table_init(table, etype); gmap->table = table; gmap->asce = atype | _ASCE_TABLE_LENGTH | @@ -311,12 +311,12 @@ static int gmap_alloc_table(struct gmap *gmap, unsigned long *table, page = alloc_pages(GFP_KERNEL_ACCOUNT, CRST_ALLOC_ORDER); if (!page) return -ENOMEM; - new = (unsigned long *) page_to_phys(page); + new = page_to_virt(page); crst_table_init(new, init); spin_lock(&gmap->guest_table_lock); if (*table & _REGION_ENTRY_INVALID) { list_add(&page->lru, &gmap->crst_list); - *table = (unsigned long) new | _REGION_ENTRY_LENGTH | + *table = __pa(new) | _REGION_ENTRY_LENGTH | (*table & _REGION_ENTRY_TYPE_MASK); page->index = gaddr; page = NULL; @@ -336,12 +336,11 @@ static int gmap_alloc_table(struct gmap *gmap, unsigned long *table, static unsigned long __gmap_segment_gaddr(unsigned long *entry) { struct page *page; - unsigned long offset, mask; + unsigned long offset; offset = (unsigned long) entry / sizeof(unsigned long); offset = (offset & (PTRS_PER_PMD - 1)) * PMD_SIZE; - mask = ~(PTRS_PER_PMD * sizeof(pmd_t) - 1); - page = virt_to_page((void *)((unsigned long) entry & mask)); + page = pmd_pgtable_page((pmd_t *) entry); return page->index + offset; } @@ -557,7 +556,7 @@ int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr) gmap_alloc_table(gmap, table, _REGION2_ENTRY_EMPTY, gaddr & _REGION1_MASK)) return -ENOMEM; - table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); + table = __va(*table & _REGION_ENTRY_ORIGIN); } if ((gmap->asce & _ASCE_TYPE_MASK) >= _ASCE_TYPE_REGION2) { table += (gaddr & _REGION2_INDEX) >> _REGION2_SHIFT; @@ -565,7 +564,7 @@ int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr) gmap_alloc_table(gmap, table, _REGION3_ENTRY_EMPTY, gaddr & _REGION2_MASK)) return -ENOMEM; - table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); + table = __va(*table & _REGION_ENTRY_ORIGIN); } if ((gmap->asce & _ASCE_TYPE_MASK) >= _ASCE_TYPE_REGION3) { table += (gaddr & _REGION3_INDEX) >> _REGION3_SHIFT; @@ -573,7 +572,7 @@ int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr) gmap_alloc_table(gmap, table, _SEGMENT_ENTRY_EMPTY, gaddr & _REGION3_MASK)) return -ENOMEM; - table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); + table = __va(*table & _REGION_ENTRY_ORIGIN); } table += (gaddr & _SEGMENT_INDEX) >> _SEGMENT_SHIFT; /* Walk the parent mm page table */ @@ -813,7 +812,7 @@ static inline unsigned long *gmap_table_walk(struct gmap *gmap, break; if (*table & _REGION_ENTRY_INVALID) return NULL; - table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); + table = __va(*table & _REGION_ENTRY_ORIGIN); fallthrough; case _ASCE_TYPE_REGION2: table += (gaddr & _REGION2_INDEX) >> _REGION2_SHIFT; @@ -821,7 +820,7 @@ static inline unsigned long *gmap_table_walk(struct gmap *gmap, break; if (*table & _REGION_ENTRY_INVALID) return NULL; - table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); + table = __va(*table & _REGION_ENTRY_ORIGIN); fallthrough; case _ASCE_TYPE_REGION3: table += (gaddr & _REGION3_INDEX) >> _REGION3_SHIFT; @@ -829,7 +828,7 @@ static inline unsigned long *gmap_table_walk(struct gmap *gmap, break; if (*table & _REGION_ENTRY_INVALID) return NULL; - table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); + table = __va(*table & _REGION_ENTRY_ORIGIN); fallthrough; case _ASCE_TYPE_SEGMENT: table += (gaddr & _SEGMENT_INDEX) >> _SEGMENT_SHIFT; @@ -837,7 +836,7 @@ static inline unsigned long *gmap_table_walk(struct gmap *gmap, break; if (*table & _REGION_ENTRY_INVALID) return NULL; - table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN); + table = __va(*table & _SEGMENT_ENTRY_ORIGIN); table += (gaddr & _PAGE_INDEX) >> _PAGE_SHIFT; } return table; @@ -1150,7 +1149,7 @@ int gmap_read_table(struct gmap *gmap, unsigned long gaddr, unsigned long *val) if (pte_present(pte) && (pte_val(pte) & _PAGE_READ)) { address = pte_val(pte) & PAGE_MASK; address += gaddr & ~PAGE_MASK; - *val = *(unsigned long *) address; + *val = *(unsigned long *)__va(address); set_pte(ptep, set_pte_bit(*ptep, __pgprot(_PAGE_YOUNG))); /* Do *NOT* clear the _PAGE_INVALID bit! */ rc = 0; @@ -1335,7 +1334,8 @@ static void __gmap_unshadow_pgt(struct gmap *sg, unsigned long raddr, */ static void gmap_unshadow_pgt(struct gmap *sg, unsigned long raddr) { - unsigned long sto, *ste, *pgt; + unsigned long *ste; + phys_addr_t sto, pgt; struct page *page; BUG_ON(!gmap_is_shadow(sg)); @@ -1343,13 +1343,13 @@ static void gmap_unshadow_pgt(struct gmap *sg, unsigned long raddr) if (!ste || !(*ste & _SEGMENT_ENTRY_ORIGIN)) return; gmap_call_notifier(sg, raddr, raddr + _SEGMENT_SIZE - 1); - sto = (unsigned long) (ste - ((raddr & _SEGMENT_INDEX) >> _SEGMENT_SHIFT)); + sto = __pa(ste - ((raddr & _SEGMENT_INDEX) >> _SEGMENT_SHIFT)); gmap_idte_one(sto | _ASCE_TYPE_SEGMENT, raddr); - pgt = (unsigned long *)(*ste & _SEGMENT_ENTRY_ORIGIN); + pgt = *ste & _SEGMENT_ENTRY_ORIGIN; *ste = _SEGMENT_ENTRY_EMPTY; - __gmap_unshadow_pgt(sg, raddr, pgt); + __gmap_unshadow_pgt(sg, raddr, __va(pgt)); /* Free page table */ - page = pfn_to_page(__pa(pgt) >> PAGE_SHIFT); + page = phys_to_page(pgt); list_del(&page->lru); page_table_free_pgste(page); } @@ -1365,19 +1365,19 @@ static void gmap_unshadow_pgt(struct gmap *sg, unsigned long raddr) static void __gmap_unshadow_sgt(struct gmap *sg, unsigned long raddr, unsigned long *sgt) { - unsigned long *pgt; struct page *page; + phys_addr_t pgt; int i; BUG_ON(!gmap_is_shadow(sg)); for (i = 0; i < _CRST_ENTRIES; i++, raddr += _SEGMENT_SIZE) { if (!(sgt[i] & _SEGMENT_ENTRY_ORIGIN)) continue; - pgt = (unsigned long *)(sgt[i] & _REGION_ENTRY_ORIGIN); + pgt = sgt[i] & _REGION_ENTRY_ORIGIN; sgt[i] = _SEGMENT_ENTRY_EMPTY; - __gmap_unshadow_pgt(sg, raddr, pgt); + __gmap_unshadow_pgt(sg, raddr, __va(pgt)); /* Free page table */ - page = pfn_to_page(__pa(pgt) >> PAGE_SHIFT); + page = phys_to_page(pgt); list_del(&page->lru); page_table_free_pgste(page); } @@ -1392,7 +1392,8 @@ static void __gmap_unshadow_sgt(struct gmap *sg, unsigned long raddr, */ static void gmap_unshadow_sgt(struct gmap *sg, unsigned long raddr) { - unsigned long r3o, *r3e, *sgt; + unsigned long r3o, *r3e; + phys_addr_t sgt; struct page *page; BUG_ON(!gmap_is_shadow(sg)); @@ -1401,12 +1402,12 @@ static void gmap_unshadow_sgt(struct gmap *sg, unsigned long raddr) return; gmap_call_notifier(sg, raddr, raddr + _REGION3_SIZE - 1); r3o = (unsigned long) (r3e - ((raddr & _REGION3_INDEX) >> _REGION3_SHIFT)); - gmap_idte_one(r3o | _ASCE_TYPE_REGION3, raddr); - sgt = (unsigned long *)(*r3e & _REGION_ENTRY_ORIGIN); + gmap_idte_one(__pa(r3o) | _ASCE_TYPE_REGION3, raddr); + sgt = *r3e & _REGION_ENTRY_ORIGIN; *r3e = _REGION3_ENTRY_EMPTY; - __gmap_unshadow_sgt(sg, raddr, sgt); + __gmap_unshadow_sgt(sg, raddr, __va(sgt)); /* Free segment table */ - page = pfn_to_page(__pa(sgt) >> PAGE_SHIFT); + page = phys_to_page(sgt); list_del(&page->lru); __free_pages(page, CRST_ALLOC_ORDER); } @@ -1422,19 +1423,19 @@ static void gmap_unshadow_sgt(struct gmap *sg, unsigned long raddr) static void __gmap_unshadow_r3t(struct gmap *sg, unsigned long raddr, unsigned long *r3t) { - unsigned long *sgt; struct page *page; + phys_addr_t sgt; int i; BUG_ON(!gmap_is_shadow(sg)); for (i = 0; i < _CRST_ENTRIES; i++, raddr += _REGION3_SIZE) { if (!(r3t[i] & _REGION_ENTRY_ORIGIN)) continue; - sgt = (unsigned long *)(r3t[i] & _REGION_ENTRY_ORIGIN); + sgt = r3t[i] & _REGION_ENTRY_ORIGIN; r3t[i] = _REGION3_ENTRY_EMPTY; - __gmap_unshadow_sgt(sg, raddr, sgt); + __gmap_unshadow_sgt(sg, raddr, __va(sgt)); /* Free segment table */ - page = pfn_to_page(__pa(sgt) >> PAGE_SHIFT); + page = phys_to_page(sgt); list_del(&page->lru); __free_pages(page, CRST_ALLOC_ORDER); } @@ -1449,7 +1450,8 @@ static void __gmap_unshadow_r3t(struct gmap *sg, unsigned long raddr, */ static void gmap_unshadow_r3t(struct gmap *sg, unsigned long raddr) { - unsigned long r2o, *r2e, *r3t; + unsigned long r2o, *r2e; + phys_addr_t r3t; struct page *page; BUG_ON(!gmap_is_shadow(sg)); @@ -1458,12 +1460,12 @@ static void gmap_unshadow_r3t(struct gmap *sg, unsigned long raddr) return; gmap_call_notifier(sg, raddr, raddr + _REGION2_SIZE - 1); r2o = (unsigned long) (r2e - ((raddr & _REGION2_INDEX) >> _REGION2_SHIFT)); - gmap_idte_one(r2o | _ASCE_TYPE_REGION2, raddr); - r3t = (unsigned long *)(*r2e & _REGION_ENTRY_ORIGIN); + gmap_idte_one(__pa(r2o) | _ASCE_TYPE_REGION2, raddr); + r3t = *r2e & _REGION_ENTRY_ORIGIN; *r2e = _REGION2_ENTRY_EMPTY; - __gmap_unshadow_r3t(sg, raddr, r3t); + __gmap_unshadow_r3t(sg, raddr, __va(r3t)); /* Free region 3 table */ - page = pfn_to_page(__pa(r3t) >> PAGE_SHIFT); + page = phys_to_page(r3t); list_del(&page->lru); __free_pages(page, CRST_ALLOC_ORDER); } @@ -1479,7 +1481,7 @@ static void gmap_unshadow_r3t(struct gmap *sg, unsigned long raddr) static void __gmap_unshadow_r2t(struct gmap *sg, unsigned long raddr, unsigned long *r2t) { - unsigned long *r3t; + phys_addr_t r3t; struct page *page; int i; @@ -1487,11 +1489,11 @@ static void __gmap_unshadow_r2t(struct gmap *sg, unsigned long raddr, for (i = 0; i < _CRST_ENTRIES; i++, raddr += _REGION2_SIZE) { if (!(r2t[i] & _REGION_ENTRY_ORIGIN)) continue; - r3t = (unsigned long *)(r2t[i] & _REGION_ENTRY_ORIGIN); + r3t = r2t[i] & _REGION_ENTRY_ORIGIN; r2t[i] = _REGION2_ENTRY_EMPTY; - __gmap_unshadow_r3t(sg, raddr, r3t); + __gmap_unshadow_r3t(sg, raddr, __va(r3t)); /* Free region 3 table */ - page = pfn_to_page(__pa(r3t) >> PAGE_SHIFT); + page = phys_to_page(r3t); list_del(&page->lru); __free_pages(page, CRST_ALLOC_ORDER); } @@ -1506,8 +1508,9 @@ static void __gmap_unshadow_r2t(struct gmap *sg, unsigned long raddr, */ static void gmap_unshadow_r2t(struct gmap *sg, unsigned long raddr) { - unsigned long r1o, *r1e, *r2t; + unsigned long r1o, *r1e; struct page *page; + phys_addr_t r2t; BUG_ON(!gmap_is_shadow(sg)); r1e = gmap_table_walk(sg, raddr, 4); /* get region-1 pointer */ @@ -1515,12 +1518,12 @@ static void gmap_unshadow_r2t(struct gmap *sg, unsigned long raddr) return; gmap_call_notifier(sg, raddr, raddr + _REGION1_SIZE - 1); r1o = (unsigned long) (r1e - ((raddr & _REGION1_INDEX) >> _REGION1_SHIFT)); - gmap_idte_one(r1o | _ASCE_TYPE_REGION1, raddr); - r2t = (unsigned long *)(*r1e & _REGION_ENTRY_ORIGIN); + gmap_idte_one(__pa(r1o) | _ASCE_TYPE_REGION1, raddr); + r2t = *r1e & _REGION_ENTRY_ORIGIN; *r1e = _REGION1_ENTRY_EMPTY; - __gmap_unshadow_r2t(sg, raddr, r2t); + __gmap_unshadow_r2t(sg, raddr, __va(r2t)); /* Free region 2 table */ - page = pfn_to_page(__pa(r2t) >> PAGE_SHIFT); + page = phys_to_page(r2t); list_del(&page->lru); __free_pages(page, CRST_ALLOC_ORDER); } @@ -1536,22 +1539,23 @@ static void gmap_unshadow_r2t(struct gmap *sg, unsigned long raddr) static void __gmap_unshadow_r1t(struct gmap *sg, unsigned long raddr, unsigned long *r1t) { - unsigned long asce, *r2t; + unsigned long asce; struct page *page; + phys_addr_t r2t; int i; BUG_ON(!gmap_is_shadow(sg)); - asce = (unsigned long) r1t | _ASCE_TYPE_REGION1; + asce = __pa(r1t) | _ASCE_TYPE_REGION1; for (i = 0; i < _CRST_ENTRIES; i++, raddr += _REGION1_SIZE) { if (!(r1t[i] & _REGION_ENTRY_ORIGIN)) continue; - r2t = (unsigned long *)(r1t[i] & _REGION_ENTRY_ORIGIN); - __gmap_unshadow_r2t(sg, raddr, r2t); + r2t = r1t[i] & _REGION_ENTRY_ORIGIN; + __gmap_unshadow_r2t(sg, raddr, __va(r2t)); /* Clear entry and flush translation r1t -> r2t */ gmap_idte_one(asce, raddr); r1t[i] = _REGION1_ENTRY_EMPTY; /* Free region 2 table */ - page = pfn_to_page(__pa(r2t) >> PAGE_SHIFT); + page = phys_to_page(r2t); list_del(&page->lru); __free_pages(page, CRST_ALLOC_ORDER); } @@ -1573,7 +1577,7 @@ static void gmap_unshadow(struct gmap *sg) sg->removed = 1; gmap_call_notifier(sg, 0, -1UL); gmap_flush_tlb(sg); - table = (unsigned long *)(sg->asce & _ASCE_ORIGIN); + table = __va(sg->asce & _ASCE_ORIGIN); switch (sg->asce & _ASCE_TYPE_MASK) { case _ASCE_TYPE_REGION1: __gmap_unshadow_r1t(sg, 0, table); @@ -1748,7 +1752,8 @@ int gmap_shadow_r2t(struct gmap *sg, unsigned long saddr, unsigned long r2t, int fake) { unsigned long raddr, origin, offset, len; - unsigned long *s_r2t, *table; + unsigned long *table; + phys_addr_t s_r2t; struct page *page; int rc; @@ -1760,7 +1765,7 @@ int gmap_shadow_r2t(struct gmap *sg, unsigned long saddr, unsigned long r2t, page->index = r2t & _REGION_ENTRY_ORIGIN; if (fake) page->index |= GMAP_SHADOW_FAKE_TABLE; - s_r2t = (unsigned long *) page_to_phys(page); + s_r2t = page_to_phys(page); /* Install shadow region second table */ spin_lock(&sg->guest_table_lock); table = gmap_table_walk(sg, saddr, 4); /* get region-1 pointer */ @@ -1775,9 +1780,9 @@ int gmap_shadow_r2t(struct gmap *sg, unsigned long saddr, unsigned long r2t, rc = -EAGAIN; /* Race with shadow */ goto out_free; } - crst_table_init(s_r2t, _REGION2_ENTRY_EMPTY); + crst_table_init(__va(s_r2t), _REGION2_ENTRY_EMPTY); /* mark as invalid as long as the parent table is not protected */ - *table = (unsigned long) s_r2t | _REGION_ENTRY_LENGTH | + *table = s_r2t | _REGION_ENTRY_LENGTH | _REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INVALID; if (sg->edat_level >= 1) *table |= (r2t & _REGION_ENTRY_PROTECT); @@ -1798,8 +1803,7 @@ int gmap_shadow_r2t(struct gmap *sg, unsigned long saddr, unsigned long r2t, spin_lock(&sg->guest_table_lock); if (!rc) { table = gmap_table_walk(sg, saddr, 4); - if (!table || (*table & _REGION_ENTRY_ORIGIN) != - (unsigned long) s_r2t) + if (!table || (*table & _REGION_ENTRY_ORIGIN) != s_r2t) rc = -EAGAIN; /* Race with unshadow */ else *table &= ~_REGION_ENTRY_INVALID; @@ -1832,7 +1836,8 @@ int gmap_shadow_r3t(struct gmap *sg, unsigned long saddr, unsigned long r3t, int fake) { unsigned long raddr, origin, offset, len; - unsigned long *s_r3t, *table; + unsigned long *table; + phys_addr_t s_r3t; struct page *page; int rc; @@ -1844,7 +1849,7 @@ int gmap_shadow_r3t(struct gmap *sg, unsigned long saddr, unsigned long r3t, page->index = r3t & _REGION_ENTRY_ORIGIN; if (fake) page->index |= GMAP_SHADOW_FAKE_TABLE; - s_r3t = (unsigned long *) page_to_phys(page); + s_r3t = page_to_phys(page); /* Install shadow region second table */ spin_lock(&sg->guest_table_lock); table = gmap_table_walk(sg, saddr, 3); /* get region-2 pointer */ @@ -1859,9 +1864,9 @@ int gmap_shadow_r3t(struct gmap *sg, unsigned long saddr, unsigned long r3t, rc = -EAGAIN; /* Race with shadow */ goto out_free; } - crst_table_init(s_r3t, _REGION3_ENTRY_EMPTY); + crst_table_init(__va(s_r3t), _REGION3_ENTRY_EMPTY); /* mark as invalid as long as the parent table is not protected */ - *table = (unsigned long) s_r3t | _REGION_ENTRY_LENGTH | + *table = s_r3t | _REGION_ENTRY_LENGTH | _REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INVALID; if (sg->edat_level >= 1) *table |= (r3t & _REGION_ENTRY_PROTECT); @@ -1882,8 +1887,7 @@ int gmap_shadow_r3t(struct gmap *sg, unsigned long saddr, unsigned long r3t, spin_lock(&sg->guest_table_lock); if (!rc) { table = gmap_table_walk(sg, saddr, 3); - if (!table || (*table & _REGION_ENTRY_ORIGIN) != - (unsigned long) s_r3t) + if (!table || (*table & _REGION_ENTRY_ORIGIN) != s_r3t) rc = -EAGAIN; /* Race with unshadow */ else *table &= ~_REGION_ENTRY_INVALID; @@ -1916,7 +1920,8 @@ int gmap_shadow_sgt(struct gmap *sg, unsigned long saddr, unsigned long sgt, int fake) { unsigned long raddr, origin, offset, len; - unsigned long *s_sgt, *table; + unsigned long *table; + phys_addr_t s_sgt; struct page *page; int rc; @@ -1928,7 +1933,7 @@ int gmap_shadow_sgt(struct gmap *sg, unsigned long saddr, unsigned long sgt, page->index = sgt & _REGION_ENTRY_ORIGIN; if (fake) page->index |= GMAP_SHADOW_FAKE_TABLE; - s_sgt = (unsigned long *) page_to_phys(page); + s_sgt = page_to_phys(page); /* Install shadow region second table */ spin_lock(&sg->guest_table_lock); table = gmap_table_walk(sg, saddr, 2); /* get region-3 pointer */ @@ -1943,9 +1948,9 @@ int gmap_shadow_sgt(struct gmap *sg, unsigned long saddr, unsigned long sgt, rc = -EAGAIN; /* Race with shadow */ goto out_free; } - crst_table_init(s_sgt, _SEGMENT_ENTRY_EMPTY); + crst_table_init(__va(s_sgt), _SEGMENT_ENTRY_EMPTY); /* mark as invalid as long as the parent table is not protected */ - *table = (unsigned long) s_sgt | _REGION_ENTRY_LENGTH | + *table = s_sgt | _REGION_ENTRY_LENGTH | _REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INVALID; if (sg->edat_level >= 1) *table |= sgt & _REGION_ENTRY_PROTECT; @@ -1966,8 +1971,7 @@ int gmap_shadow_sgt(struct gmap *sg, unsigned long saddr, unsigned long sgt, spin_lock(&sg->guest_table_lock); if (!rc) { table = gmap_table_walk(sg, saddr, 2); - if (!table || (*table & _REGION_ENTRY_ORIGIN) != - (unsigned long) s_sgt) + if (!table || (*table & _REGION_ENTRY_ORIGIN) != s_sgt) rc = -EAGAIN; /* Race with unshadow */ else *table &= ~_REGION_ENTRY_INVALID; @@ -2040,8 +2044,9 @@ int gmap_shadow_pgt(struct gmap *sg, unsigned long saddr, unsigned long pgt, int fake) { unsigned long raddr, origin; - unsigned long *s_pgt, *table; + unsigned long *table; struct page *page; + phys_addr_t s_pgt; int rc; BUG_ON(!gmap_is_shadow(sg) || (pgt & _SEGMENT_ENTRY_LARGE)); @@ -2052,7 +2057,7 @@ int gmap_shadow_pgt(struct gmap *sg, unsigned long saddr, unsigned long pgt, page->index = pgt & _SEGMENT_ENTRY_ORIGIN; if (fake) page->index |= GMAP_SHADOW_FAKE_TABLE; - s_pgt = (unsigned long *) page_to_phys(page); + s_pgt = page_to_phys(page); /* Install shadow page table */ spin_lock(&sg->guest_table_lock); table = gmap_table_walk(sg, saddr, 1); /* get segment pointer */ @@ -2085,8 +2090,7 @@ int gmap_shadow_pgt(struct gmap *sg, unsigned long saddr, unsigned long pgt, spin_lock(&sg->guest_table_lock); if (!rc) { table = gmap_table_walk(sg, saddr, 1); - if (!table || (*table & _SEGMENT_ENTRY_ORIGIN) != - (unsigned long) s_pgt) + if (!table || (*table & _SEGMENT_ENTRY_ORIGIN) != s_pgt) rc = -EAGAIN; /* Race with unshadow */ else *table &= ~_SEGMENT_ENTRY_INVALID; diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c index 1a25d456d8657664f52149f68e0bdbbd0241a028..30ab55f868f6d87dbbb80c3d81b24105ee00de4c 100644 --- a/arch/s390/mm/init.c +++ b/arch/s390/mm/init.c @@ -141,25 +141,25 @@ void mark_rodata_ro(void) debug_checkwx(); } -int set_memory_encrypted(unsigned long addr, int numpages) +int set_memory_encrypted(unsigned long vaddr, int numpages) { int i; /* make specified pages unshared, (swiotlb, dma_free) */ for (i = 0; i < numpages; ++i) { - uv_remove_shared(addr); - addr += PAGE_SIZE; + uv_remove_shared(virt_to_phys((void *)vaddr)); + vaddr += PAGE_SIZE; } return 0; } -int set_memory_decrypted(unsigned long addr, int numpages) +int set_memory_decrypted(unsigned long vaddr, int numpages) { int i; /* make specified pages shared (swiotlb, dma_alloca) */ for (i = 0; i < numpages; ++i) { - uv_set_shared(addr); - addr += PAGE_SIZE; + uv_set_shared(virt_to_phys((void *)vaddr)); + vaddr += PAGE_SIZE; } return 0; } diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c index 73cdc55393847adee690f015d1ef0ce37714f97e..ef38b1514c77aedb56b66f004b5c2a4fcfc23e4f 100644 --- a/arch/s390/pci/pci.c +++ b/arch/s390/pci/pci.c @@ -116,20 +116,20 @@ EXPORT_SYMBOL_GPL(pci_proc_domain); /* Modify PCI: Register I/O address translation parameters */ int zpci_register_ioat(struct zpci_dev *zdev, u8 dmaas, - u64 base, u64 limit, u64 iota) + u64 base, u64 limit, u64 iota, u8 *status) { u64 req = ZPCI_CREATE_REQ(zdev->fh, dmaas, ZPCI_MOD_FC_REG_IOAT); struct zpci_fib fib = {0}; - u8 cc, status; + u8 cc; WARN_ON_ONCE(iota & 0x3fff); fib.pba = base; fib.pal = limit; fib.iota = iota | ZPCI_IOTA_RTTO_FLAG; fib.gd = zdev->gisa; - cc = zpci_mod_fc(req, &fib, &status); + cc = zpci_mod_fc(req, &fib, status); if (cc) - zpci_dbg(3, "reg ioat fid:%x, cc:%d, status:%d\n", zdev->fid, cc, status); + zpci_dbg(3, "reg ioat fid:%x, cc:%d, status:%d\n", zdev->fid, cc, *status); return cc; } EXPORT_SYMBOL_GPL(zpci_register_ioat); @@ -764,6 +764,7 @@ EXPORT_SYMBOL_GPL(zpci_disable_device); */ int zpci_hot_reset_device(struct zpci_dev *zdev) { + u8 status; int rc; zpci_dbg(3, "rst fid:%x, fh:%x\n", zdev->fid, zdev->fh); @@ -787,7 +788,7 @@ int zpci_hot_reset_device(struct zpci_dev *zdev) if (zdev->dma_table) rc = zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma, - virt_to_phys(zdev->dma_table)); + virt_to_phys(zdev->dma_table), &status); else rc = zpci_dma_init_device(zdev); if (rc) { @@ -995,7 +996,7 @@ void zpci_release_device(struct kref *kref) break; } zpci_dbg(3, "rem fid:%x\n", zdev->fid); - kfree(zdev); + kfree_rcu(zdev, rcu); } int zpci_report_error(struct pci_dev *pdev, diff --git a/arch/s390/pci/pci_dma.c b/arch/s390/pci/pci_dma.c index 227cf0a62800b4cd353aead646240fe6bc93f90d..ea478d11fbd132221fc4ad56d39eefd9da95d9c1 100644 --- a/arch/s390/pci/pci_dma.c +++ b/arch/s390/pci/pci_dma.c @@ -63,37 +63,55 @@ static void dma_free_page_table(void *table) kmem_cache_free(dma_page_table_cache, table); } -static unsigned long *dma_get_seg_table_origin(unsigned long *entry) +static unsigned long *dma_get_seg_table_origin(unsigned long *rtep) { + unsigned long old_rte, rte; unsigned long *sto; - if (reg_entry_isvalid(*entry)) - sto = get_rt_sto(*entry); - else { + rte = READ_ONCE(*rtep); + if (reg_entry_isvalid(rte)) { + sto = get_rt_sto(rte); + } else { sto = dma_alloc_cpu_table(); if (!sto) return NULL; - set_rt_sto(entry, virt_to_phys(sto)); - validate_rt_entry(entry); - entry_clr_protected(entry); + set_rt_sto(&rte, virt_to_phys(sto)); + validate_rt_entry(&rte); + entry_clr_protected(&rte); + + old_rte = cmpxchg(rtep, ZPCI_TABLE_INVALID, rte); + if (old_rte != ZPCI_TABLE_INVALID) { + /* Somone else was faster, use theirs */ + dma_free_cpu_table(sto); + sto = get_rt_sto(old_rte); + } } return sto; } -static unsigned long *dma_get_page_table_origin(unsigned long *entry) +static unsigned long *dma_get_page_table_origin(unsigned long *step) { + unsigned long old_ste, ste; unsigned long *pto; - if (reg_entry_isvalid(*entry)) - pto = get_st_pto(*entry); - else { + ste = READ_ONCE(*step); + if (reg_entry_isvalid(ste)) { + pto = get_st_pto(ste); + } else { pto = dma_alloc_page_table(); if (!pto) return NULL; - set_st_pto(entry, virt_to_phys(pto)); - validate_st_entry(entry); - entry_clr_protected(entry); + set_st_pto(&ste, virt_to_phys(pto)); + validate_st_entry(&ste); + entry_clr_protected(&ste); + + old_ste = cmpxchg(step, ZPCI_TABLE_INVALID, ste); + if (old_ste != ZPCI_TABLE_INVALID) { + /* Somone else was faster, use theirs */ + dma_free_page_table(pto); + pto = get_st_pto(old_ste); + } } return pto; } @@ -117,19 +135,24 @@ unsigned long *dma_walk_cpu_trans(unsigned long *rto, dma_addr_t dma_addr) return &pto[px]; } -void dma_update_cpu_trans(unsigned long *entry, phys_addr_t page_addr, int flags) +void dma_update_cpu_trans(unsigned long *ptep, phys_addr_t page_addr, int flags) { + unsigned long pte; + + pte = READ_ONCE(*ptep); if (flags & ZPCI_PTE_INVALID) { - invalidate_pt_entry(entry); + invalidate_pt_entry(&pte); } else { - set_pt_pfaa(entry, page_addr); - validate_pt_entry(entry); + set_pt_pfaa(&pte, page_addr); + validate_pt_entry(&pte); } if (flags & ZPCI_TABLE_PROTECTED) - entry_set_protected(entry); + entry_set_protected(&pte); else - entry_clr_protected(entry); + entry_clr_protected(&pte); + + xchg(ptep, pte); } static int __dma_update_trans(struct zpci_dev *zdev, phys_addr_t pa, @@ -137,18 +160,14 @@ static int __dma_update_trans(struct zpci_dev *zdev, phys_addr_t pa, { unsigned int nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; phys_addr_t page_addr = (pa & PAGE_MASK); - unsigned long irq_flags; unsigned long *entry; int i, rc = 0; if (!nr_pages) return -EINVAL; - spin_lock_irqsave(&zdev->dma_table_lock, irq_flags); - if (!zdev->dma_table) { - rc = -EINVAL; - goto out_unlock; - } + if (!zdev->dma_table) + return -EINVAL; for (i = 0; i < nr_pages; i++) { entry = dma_walk_cpu_trans(zdev->dma_table, dma_addr); @@ -173,8 +192,6 @@ undo_cpu_trans: dma_update_cpu_trans(entry, page_addr, flags); } } -out_unlock: - spin_unlock_irqrestore(&zdev->dma_table_lock, irq_flags); return rc; } @@ -547,6 +564,7 @@ static void s390_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int zpci_dma_init_device(struct zpci_dev *zdev) { + u8 status; int rc; /* @@ -557,7 +575,6 @@ int zpci_dma_init_device(struct zpci_dev *zdev) WARN_ON(zdev->s390_domain); spin_lock_init(&zdev->iommu_bitmap_lock); - spin_lock_init(&zdev->dma_table_lock); zdev->dma_table = dma_alloc_cpu_table(); if (!zdev->dma_table) { @@ -598,7 +615,7 @@ int zpci_dma_init_device(struct zpci_dev *zdev) } if (zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma, - virt_to_phys(zdev->dma_table))) { + virt_to_phys(zdev->dma_table), &status)) { rc = -EIO; goto free_bitmap; } diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index 5f220e903e5abad8342e31125fbfd2a2f5c79811..0665ac0add0b4991fd6ed82d4928dfa86063ffe7 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig @@ -24,7 +24,7 @@ config SUPERH select GENERIC_PCI_IOMAP if PCI select GENERIC_SCHED_CLOCK select GENERIC_SMP_IDLE_THREAD - select GUP_GET_PTE_LOW_HIGH if X2TLB + select GUP_GET_PXX_LOW_HIGH if X2TLB select HAVE_ARCH_AUDITSYSCALL select HAVE_ARCH_KGDB select HAVE_ARCH_SECCOMP_FILTER diff --git a/arch/sh/configs/landisk_defconfig b/arch/sh/configs/landisk_defconfig index 492a0a2e0e36707c56526ad6c8938f9f79bdd870..7037320b654ad65b83d0df1d71c24dfc8e73ba59 100644 --- a/arch/sh/configs/landisk_defconfig +++ b/arch/sh/configs/landisk_defconfig @@ -92,7 +92,6 @@ CONFIG_USB_SERIAL_PL2303=m CONFIG_USB_EMI62=m CONFIG_USB_EMI26=m CONFIG_USB_SISUSBVGA=m -CONFIG_USB_SISUSBVGA_CON=y CONFIG_EXT2_FS=y CONFIG_EXT3_FS=y # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set diff --git a/arch/sh/include/asm/pgtable-3level.h b/arch/sh/include/asm/pgtable-3level.h index cdced80a7ffa3944535eeebb3a1fc49764a6bdd9..a889a3a938bab85510dc136d73386c96615bb32d 100644 --- a/arch/sh/include/asm/pgtable-3level.h +++ b/arch/sh/include/asm/pgtable-3level.h @@ -28,9 +28,15 @@ #define pmd_ERROR(e) \ printk("%s:%d: bad pmd %016llx.\n", __FILE__, __LINE__, pmd_val(e)) -typedef struct { unsigned long long pmd; } pmd_t; +typedef struct { + struct { + unsigned long pmd_low; + unsigned long pmd_high; + }; + unsigned long long pmd; +} pmd_t; #define pmd_val(x) ((x).pmd) -#define __pmd(x) ((pmd_t) { (x) } ) +#define __pmd(x) ((pmd_t) { .pmd = (x) } ) static inline pmd_t *pud_pgtable(pud_t pud) { diff --git a/arch/sh/include/asm/pgtable.h b/arch/sh/include/asm/pgtable.h index 6fb9ec54cf9b413ab7c1aa8a99a294f509eb161a..3ce30becf6dfa9a85f85e80a9a7cbd198816b70b 100644 --- a/arch/sh/include/asm/pgtable.h +++ b/arch/sh/include/asm/pgtable.h @@ -92,8 +92,6 @@ static inline unsigned long phys_addr_mask(void) typedef pte_t *pte_addr_t; -#define kern_addr_valid(addr) (1) - #define pte_pfn(x) ((unsigned long)(((x).pte_low >> PAGE_SHIFT))) struct vm_area_struct; diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h index 8ff549004fac4920037fd7622b723c2062062a9b..5acc05b572e65f4dfe1f1c70a6f08649fd8f6b65 100644 --- a/arch/sparc/include/asm/pgtable_32.h +++ b/arch/sparc/include/asm/pgtable_32.h @@ -368,12 +368,6 @@ __get_iospace (unsigned long addr) } } -extern unsigned long *sparc_valid_addr_bitmap; - -/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */ -#define kern_addr_valid(addr) \ - (test_bit(__pa((unsigned long)(addr))>>20, sparc_valid_addr_bitmap)) - /* * For sparc32&64, the pfn in io_remap_pfn_range() carries in * its high 4 bits. These macros/functions put it there or get it from there. diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c index d88e774c8eb49ea778b39c76d1631fdb02dea7e0..9c0ea457bdf05571e816ccb83e18a3321cd6ed03 100644 --- a/arch/sparc/mm/init_32.c +++ b/arch/sparc/mm/init_32.c @@ -37,8 +37,7 @@ #include "mm_32.h" -unsigned long *sparc_valid_addr_bitmap; -EXPORT_SYMBOL(sparc_valid_addr_bitmap); +static unsigned long *sparc_valid_addr_bitmap; unsigned long phys_base; EXPORT_SYMBOL(phys_base); diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index d6faee23c77dd04b8e4fd62d8541d52a7d36d3f7..04f9db0c31117936733dfe32d87438a78fcb818c 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c @@ -1667,7 +1667,6 @@ bool kern_addr_valid(unsigned long addr) return pfn_valid(pte_pfn(*pte)); } -EXPORT_SYMBOL(kern_addr_valid); static unsigned long __ref kernel_map_hugepud(unsigned long vstart, unsigned long vend, diff --git a/arch/um/drivers/random.c b/arch/um/drivers/random.c index 32b3341fe9707ada1069ff0962a594b76c0e11c3..da985e0dc69a5c3722180f9106c69f2696f6f5e6 100644 --- a/arch/um/drivers/random.c +++ b/arch/um/drivers/random.c @@ -82,7 +82,6 @@ static int __init rng_init (void) sigio_broken(random_fd); hwrng.name = RNG_MODULE_NAME; hwrng.read = rng_dev_read; - hwrng.quality = 1024; err = hwrng_register(&hwrng); if (err) { diff --git a/arch/um/drivers/virt-pci.c b/arch/um/drivers/virt-pci.c index acb55b302b14c34d7dfe14fb6ace9067d5441308..3ac220dafec4a506ac7c2ed993f2baa58043ba6a 100644 --- a/arch/um/drivers/virt-pci.c +++ b/arch/um/drivers/virt-pci.c @@ -97,7 +97,8 @@ static int um_pci_send_cmd(struct um_pci_device *dev, } buf = get_cpu_var(um_pci_msg_bufs); - memcpy(buf, cmd, cmd_size); + if (buf) + memcpy(buf, cmd, cmd_size); if (posted) { u8 *ncmd = kmalloc(cmd_size + extra_size, GFP_ATOMIC); @@ -182,6 +183,7 @@ static unsigned long um_pci_cfgspace_read(void *priv, unsigned int offset, struct um_pci_message_buffer *buf; u8 *data; unsigned long ret = ULONG_MAX; + size_t bytes = sizeof(buf->data); if (!dev) return ULONG_MAX; @@ -189,7 +191,8 @@ static unsigned long um_pci_cfgspace_read(void *priv, unsigned int offset, buf = get_cpu_var(um_pci_msg_bufs); data = buf->data; - memset(buf->data, 0xff, sizeof(buf->data)); + if (buf) + memset(data, 0xff, bytes); switch (size) { case 1: @@ -204,7 +207,7 @@ static unsigned long um_pci_cfgspace_read(void *priv, unsigned int offset, goto out; } - if (um_pci_send_cmd(dev, &hdr, sizeof(hdr), NULL, 0, data, 8)) + if (um_pci_send_cmd(dev, &hdr, sizeof(hdr), NULL, 0, data, bytes)) goto out; switch (size) { diff --git a/arch/um/include/asm/pgtable-3level.h b/arch/um/include/asm/pgtable-3level.h index cb896e6121c8e611641e32c491dd97687bd781eb..8a5032ec231fd969b7e0dc916e76073a9c0105b7 100644 --- a/arch/um/include/asm/pgtable-3level.h +++ b/arch/um/include/asm/pgtable-3level.h @@ -58,11 +58,7 @@ #define pud_populate(mm, pud, pmd) \ set_pud(pud, __pud(_PAGE_TABLE + __pa(pmd))) -#ifdef CONFIG_64BIT -#define set_pud(pudptr, pudval) set_64bit((u64 *) (pudptr), pud_val(pudval)) -#else #define set_pud(pudptr, pudval) (*(pudptr) = (pudval)) -#endif static inline int pgd_newpage(pgd_t pgd) { @@ -71,11 +67,7 @@ static inline int pgd_newpage(pgd_t pgd) static inline void pgd_mkuptodate(pgd_t pgd) { pgd_val(pgd) &= ~_PAGE_NEWPAGE; } -#ifdef CONFIG_64BIT -#define set_pmd(pmdptr, pmdval) set_64bit((u64 *) (pmdptr), pmd_val(pmdval)) -#else #define set_pmd(pmdptr, pmdval) (*(pmdptr) = (pmdval)) -#endif static inline void pud_clear (pud_t *pud) { diff --git a/arch/um/include/asm/pgtable.h b/arch/um/include/asm/pgtable.h index 66bc3f99d9bef1103fca18fb583c24b3145ced90..4e3052f2671a04c4b7f6d23c0812575942ffe67b 100644 --- a/arch/um/include/asm/pgtable.h +++ b/arch/um/include/asm/pgtable.h @@ -298,8 +298,6 @@ extern pte_t *virt_to_pte(struct mm_struct *mm, unsigned long addr); ((swp_entry_t) { pte_val(pte_mkuptodate(pte)) }) #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) -#define kern_addr_valid(addr) (1) - /* Clear a kernel PTE and flush it from the TLB */ #define kpte_clear_flush(ptep, vaddr) \ do { \ diff --git a/arch/um/kernel/um_arch.c b/arch/um/kernel/um_arch.c index 8adf8e89b25588763799f12a676958a3f99e75ae..786b44dc20c98c14bdb704f7b34ae20137212eaa 100644 --- a/arch/um/kernel/um_arch.c +++ b/arch/um/kernel/um_arch.c @@ -444,6 +444,11 @@ void apply_returns(s32 *start, s32 *end) { } +void apply_fineibt(s32 *start_retpoline, s32 *end_retpoline, + s32 *start_cfi, s32 *end_cfi) +{ +} + void apply_alternatives(struct alt_instr *start, struct alt_instr *end) { } diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 9f8c03b347f9e2a2a7159332268913882e42cc93..3604074a878b8ac91d7ace3a81e0a74103f38829 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -159,7 +159,7 @@ config X86 select GENERIC_TIME_VSYSCALL select GENERIC_GETTIMEOFDAY select GENERIC_VDSO_TIME_NS - select GUP_GET_PTE_LOW_HIGH if X86_PAE + select GUP_GET_PXX_LOW_HIGH if X86_PAE select HARDIRQS_SW_RESEND select HARDLOCKUP_CHECK_TIMESTAMP if X86_64 select HAVE_ACPI_APEI if ACPI @@ -197,6 +197,7 @@ config X86 select HAVE_CONTEXT_TRACKING_USER_OFFSTACK if HAVE_CONTEXT_TRACKING_USER select HAVE_C_RECORDMCOUNT select HAVE_OBJTOOL_MCOUNT if HAVE_OBJTOOL + select HAVE_OBJTOOL_NOP_MCOUNT if HAVE_OBJTOOL_MCOUNT select HAVE_BUILDTIME_MCOUNT_SORT select HAVE_DEBUG_KMEMLEAK select HAVE_DMA_CONTIGUOUS @@ -292,6 +293,8 @@ config X86 select X86_FEATURE_NAMES if PROC_FS select PROC_PID_ARCH_STATUS if PROC_FS select HAVE_ARCH_NODE_DEV_GROUP if X86_SGX + select FUNCTION_ALIGNMENT_16B if X86_64 || X86_ALIGNMENT_16 + select FUNCTION_ALIGNMENT_4B imply IMA_SECURE_AND_OR_TRUSTED_BOOT if EFI select HAVE_DYNAMIC_FTRACE_NO_PATCHABLE @@ -359,11 +362,6 @@ config ARCH_HAS_CPU_RELAX config ARCH_HIBERNATION_POSSIBLE def_bool y -config ARCH_NR_GPIO - int - default 1024 if X86_64 - default 512 - config ARCH_SUSPEND_POSSIBLE def_bool y @@ -1855,7 +1853,7 @@ config CC_HAS_IBT config X86_KERNEL_IBT prompt "Indirect Branch Tracking" - bool + def_bool y depends on X86_64 && CC_HAS_IBT && HAVE_OBJTOOL # https://github.com/llvm/llvm-project/commit/9d7001eba9c4cb311e03cd8cdc231f9e579f2d0f depends on !LD_IS_LLD || LLD_VERSION >= 140000 @@ -2492,6 +2490,46 @@ config CC_HAS_SLS config CC_HAS_RETURN_THUNK def_bool $(cc-option,-mfunction-return=thunk-extern) +config CC_HAS_ENTRY_PADDING + def_bool $(cc-option,-fpatchable-function-entry=16,16) + +config FUNCTION_PADDING_CFI + int + default 59 if FUNCTION_ALIGNMENT_64B + default 27 if FUNCTION_ALIGNMENT_32B + default 11 if FUNCTION_ALIGNMENT_16B + default 3 if FUNCTION_ALIGNMENT_8B + default 0 + +# Basically: FUNCTION_ALIGNMENT - 5*CFI_CLANG +# except Kconfig can't do arithmetic :/ +config FUNCTION_PADDING_BYTES + int + default FUNCTION_PADDING_CFI if CFI_CLANG + default FUNCTION_ALIGNMENT + +config CALL_PADDING + def_bool n + depends on CC_HAS_ENTRY_PADDING && OBJTOOL + select FUNCTION_ALIGNMENT_16B + +config FINEIBT + def_bool y + depends on X86_KERNEL_IBT && CFI_CLANG && RETPOLINE + select CALL_PADDING + +config HAVE_CALL_THUNKS + def_bool y + depends on CC_HAS_ENTRY_PADDING && RETHUNK && OBJTOOL + +config CALL_THUNKS + def_bool n + select CALL_PADDING + +config PREFIX_SYMBOLS + def_bool y + depends on CALL_PADDING && !CFI_CLANG + menuconfig SPECULATION_MITIGATIONS bool "Mitigations for speculative execution vulnerabilities" default y @@ -2543,6 +2581,37 @@ config CPU_UNRET_ENTRY help Compile the kernel with support for the retbleed=unret mitigation. +config CALL_DEPTH_TRACKING + bool "Mitigate RSB underflow with call depth tracking" + depends on CPU_SUP_INTEL && HAVE_CALL_THUNKS + select HAVE_DYNAMIC_FTRACE_NO_PATCHABLE + select CALL_THUNKS + default y + help + Compile the kernel with call depth tracking to mitigate the Intel + SKL Return-Speculation-Buffer (RSB) underflow issue. The + mitigation is off by default and needs to be enabled on the + kernel command line via the retbleed=stuff option. For + non-affected systems the overhead of this option is marginal as + the call depth tracking is using run-time generated call thunks + in a compiler generated padding area and call patching. This + increases text size by ~5%. For non affected systems this space + is unused. On affected SKL systems this results in a significant + performance gain over the IBRS mitigation. + +config CALL_THUNKS_DEBUG + bool "Enable call thunks and call depth tracking debugging" + depends on CALL_DEPTH_TRACKING + select FUNCTION_ALIGNMENT_32B + default n + help + Enable call/ret counters for imbalance detection and build in + a noisy dmesg about callthunks generation and call patching for + trouble shooting. The debug prints need to be enabled on the + kernel command line with 'debug-callthunks'. + Only enable this, when you are debugging call thunks as this + creates a noticable runtime overhead. If unsure say N. + config CPU_IBPB_ENTRY bool "Enable IBPB on kernel entry" depends on CPU_SUP_AMD && X86_64 diff --git a/arch/x86/Makefile b/arch/x86/Makefile index 415a5d138de47c37c90890a71056775d023f5d7d..9cf07322875a42f914f16505a16003250f02cceb 100644 --- a/arch/x86/Makefile +++ b/arch/x86/Makefile @@ -208,10 +208,16 @@ ifdef CONFIG_SLS KBUILD_CFLAGS += -mharden-sls=all endif +ifdef CONFIG_CALL_PADDING +PADDING_CFLAGS := -fpatchable-function-entry=$(CONFIG_FUNCTION_PADDING_BYTES),$(CONFIG_FUNCTION_PADDING_BYTES) +KBUILD_CFLAGS += $(PADDING_CFLAGS) +export PADDING_CFLAGS +endif + KBUILD_LDFLAGS += -m elf_$(UTS_MACHINE) ifdef CONFIG_LTO_CLANG -ifeq ($(shell test $(CONFIG_LLD_VERSION) -lt 130000; echo $$?),0) +ifeq ($(call test-lt, $(CONFIG_LLD_VERSION), 130000),y) KBUILD_LDFLAGS += -plugin-opt=-stack-alignment=$(if $(CONFIG_X86_32),4,8) endif endif diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile index 3dc5db651dd0a1974d24b2a41c1c7490a1db8e7b..1acff356d97a9cd7f39a0955450819b1c67d24c1 100644 --- a/arch/x86/boot/compressed/Makefile +++ b/arch/x86/boot/compressed/Makefile @@ -68,7 +68,7 @@ KBUILD_LDFLAGS += $(call ld-option,--no-ld-generated-unwind-info) # address by the bootloader. LDFLAGS_vmlinux := -pie $(call ld-option, --no-dynamic-linker) ifdef CONFIG_LD_ORPHAN_WARN -LDFLAGS_vmlinux += --orphan-handling=warn +LDFLAGS_vmlinux += --orphan-handling=$(CONFIG_LD_ORPHAN_WARN_LEVEL) endif LDFLAGS_vmlinux += -z noexecstack ifeq ($(CONFIG_LD_IS_BFD),y) diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S index d4c4281db635c57f3701b212815273a1e2f5c909..a75712991df3e93647a793f4bf404108ca60496a 100644 --- a/arch/x86/boot/compressed/head_64.S +++ b/arch/x86/boot/compressed/head_64.S @@ -37,6 +37,14 @@ #include #include "pgtable.h" +/* + * Fix alignment at 16 bytes. Following CONFIG_FUNCTION_ALIGNMENT will result + * in assembly errors due to trying to move .org backward due to the excessive + * alignment. + */ +#undef __ALIGN +#define __ALIGN .balign 16, 0x90 + /* * Locally defined symbols should be marked hidden: */ diff --git a/arch/x86/crypto/Makefile b/arch/x86/crypto/Makefile index 3b1d701a4f6c509841998e8b3699096e11ea7dce..3e7a329235bdb8290799a80af50ff296425f21b3 100644 --- a/arch/x86/crypto/Makefile +++ b/arch/x86/crypto/Makefile @@ -107,3 +107,6 @@ quiet_cmd_perlasm = PERLASM $@ cmd_perlasm = $(PERL) $< > $@ $(obj)/%.S: $(src)/%.pl FORCE $(call if_changed,perlasm) + +# Disable GCOV in odd or sensitive code +GCOV_PROFILE_curve25519-x86_64.o := n diff --git a/arch/x86/crypto/aegis128-aesni-asm.S b/arch/x86/crypto/aegis128-aesni-asm.S index b48ddebb474894eecfb695efda35c74c5984c63d..cdf3215ec272ced263a234efab1dc754d240debd 100644 --- a/arch/x86/crypto/aegis128-aesni-asm.S +++ b/arch/x86/crypto/aegis128-aesni-asm.S @@ -7,6 +7,7 @@ */ #include +#include #include #define STATE0 %xmm0 @@ -402,7 +403,7 @@ SYM_FUNC_END(crypto_aegis128_aesni_ad) * void crypto_aegis128_aesni_enc(void *state, unsigned int length, * const void *src, void *dst); */ -SYM_FUNC_START(crypto_aegis128_aesni_enc) +SYM_TYPED_FUNC_START(crypto_aegis128_aesni_enc) FRAME_BEGIN cmp $0x10, LEN @@ -499,7 +500,7 @@ SYM_FUNC_END(crypto_aegis128_aesni_enc) * void crypto_aegis128_aesni_enc_tail(void *state, unsigned int length, * const void *src, void *dst); */ -SYM_FUNC_START(crypto_aegis128_aesni_enc_tail) +SYM_TYPED_FUNC_START(crypto_aegis128_aesni_enc_tail) FRAME_BEGIN /* load the state: */ @@ -556,7 +557,7 @@ SYM_FUNC_END(crypto_aegis128_aesni_enc_tail) * void crypto_aegis128_aesni_dec(void *state, unsigned int length, * const void *src, void *dst); */ -SYM_FUNC_START(crypto_aegis128_aesni_dec) +SYM_TYPED_FUNC_START(crypto_aegis128_aesni_dec) FRAME_BEGIN cmp $0x10, LEN @@ -653,7 +654,7 @@ SYM_FUNC_END(crypto_aegis128_aesni_dec) * void crypto_aegis128_aesni_dec_tail(void *state, unsigned int length, * const void *src, void *dst); */ -SYM_FUNC_START(crypto_aegis128_aesni_dec_tail) +SYM_TYPED_FUNC_START(crypto_aegis128_aesni_dec_tail) FRAME_BEGIN /* load the state: */ diff --git a/arch/x86/crypto/aria-aesni-avx-asm_64.S b/arch/x86/crypto/aria-aesni-avx-asm_64.S index c75fd7d015ed8c958819f30d363b59b8b972321c..03ae4cd1d976a2e0fab0e18dc5b597545c4fadd1 100644 --- a/arch/x86/crypto/aria-aesni-avx-asm_64.S +++ b/arch/x86/crypto/aria-aesni-avx-asm_64.S @@ -7,6 +7,7 @@ */ #include +#include #include /* struct aria_ctx: */ @@ -913,7 +914,7 @@ SYM_FUNC_START_LOCAL(__aria_aesni_avx_crypt_16way) RET; SYM_FUNC_END(__aria_aesni_avx_crypt_16way) -SYM_FUNC_START(aria_aesni_avx_encrypt_16way) +SYM_TYPED_FUNC_START(aria_aesni_avx_encrypt_16way) /* input: * %rdi: ctx, CTX * %rsi: dst @@ -938,7 +939,7 @@ SYM_FUNC_START(aria_aesni_avx_encrypt_16way) RET; SYM_FUNC_END(aria_aesni_avx_encrypt_16way) -SYM_FUNC_START(aria_aesni_avx_decrypt_16way) +SYM_TYPED_FUNC_START(aria_aesni_avx_decrypt_16way) /* input: * %rdi: ctx, CTX * %rsi: dst @@ -1039,7 +1040,7 @@ SYM_FUNC_START_LOCAL(__aria_aesni_avx_ctr_gen_keystream_16way) RET; SYM_FUNC_END(__aria_aesni_avx_ctr_gen_keystream_16way) -SYM_FUNC_START(aria_aesni_avx_ctr_crypt_16way) +SYM_TYPED_FUNC_START(aria_aesni_avx_ctr_crypt_16way) /* input: * %rdi: ctx * %rsi: dst @@ -1208,7 +1209,7 @@ SYM_FUNC_START_LOCAL(__aria_aesni_avx_gfni_crypt_16way) RET; SYM_FUNC_END(__aria_aesni_avx_gfni_crypt_16way) -SYM_FUNC_START(aria_aesni_avx_gfni_encrypt_16way) +SYM_TYPED_FUNC_START(aria_aesni_avx_gfni_encrypt_16way) /* input: * %rdi: ctx, CTX * %rsi: dst @@ -1233,7 +1234,7 @@ SYM_FUNC_START(aria_aesni_avx_gfni_encrypt_16way) RET; SYM_FUNC_END(aria_aesni_avx_gfni_encrypt_16way) -SYM_FUNC_START(aria_aesni_avx_gfni_decrypt_16way) +SYM_TYPED_FUNC_START(aria_aesni_avx_gfni_decrypt_16way) /* input: * %rdi: ctx, CTX * %rsi: dst @@ -1258,7 +1259,7 @@ SYM_FUNC_START(aria_aesni_avx_gfni_decrypt_16way) RET; SYM_FUNC_END(aria_aesni_avx_gfni_decrypt_16way) -SYM_FUNC_START(aria_aesni_avx_gfni_ctr_crypt_16way) +SYM_TYPED_FUNC_START(aria_aesni_avx_gfni_ctr_crypt_16way) /* input: * %rdi: ctx * %rsi: dst diff --git a/arch/x86/crypto/camellia-aesni-avx-asm_64.S b/arch/x86/crypto/camellia-aesni-avx-asm_64.S index 2e1658ddbe1a903b05a07f2cbcece4a0ba50d238..4a30618281ec2e9e85af99f0d0e1ad6ec3dcaa72 100644 --- a/arch/x86/crypto/camellia-aesni-avx-asm_64.S +++ b/arch/x86/crypto/camellia-aesni-avx-asm_64.S @@ -712,7 +712,6 @@ SYM_FUNC_END(roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab) .text -.align 8 SYM_FUNC_START_LOCAL(__camellia_enc_blk16) /* input: * %rdi: ctx, CTX @@ -799,7 +798,6 @@ SYM_FUNC_START_LOCAL(__camellia_enc_blk16) jmp .Lenc_done; SYM_FUNC_END(__camellia_enc_blk16) -.align 8 SYM_FUNC_START_LOCAL(__camellia_dec_blk16) /* input: * %rdi: ctx, CTX diff --git a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S index 0e4e9abbf4de365d8b979822abde3dab53e6163f..deaf62aa73a6b09cb002cb99562438206f43ef43 100644 --- a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S +++ b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S @@ -221,7 +221,6 @@ * Size optimization... with inlined roundsm32 binary would be over 5 times * larger and would only marginally faster. */ -.align 8 SYM_FUNC_START_LOCAL(roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd) roundsm32(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7, %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15, @@ -229,7 +228,6 @@ SYM_FUNC_START_LOCAL(roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_c RET; SYM_FUNC_END(roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd) -.align 8 SYM_FUNC_START_LOCAL(roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab) roundsm32(%ymm4, %ymm5, %ymm6, %ymm7, %ymm0, %ymm1, %ymm2, %ymm3, %ymm12, %ymm13, %ymm14, %ymm15, %ymm8, %ymm9, %ymm10, %ymm11, @@ -748,7 +746,6 @@ SYM_FUNC_END(roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab) .text -.align 8 SYM_FUNC_START_LOCAL(__camellia_enc_blk32) /* input: * %rdi: ctx, CTX @@ -835,7 +832,6 @@ SYM_FUNC_START_LOCAL(__camellia_enc_blk32) jmp .Lenc_done; SYM_FUNC_END(__camellia_enc_blk32) -.align 8 SYM_FUNC_START_LOCAL(__camellia_dec_blk32) /* input: * %rdi: ctx, CTX diff --git a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S index b258af420c92c7739fd59293585bed169bba11cd..0326a01503c3a554bf89482ed158eb7c8719c9ce 100644 --- a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S +++ b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S @@ -208,7 +208,6 @@ .text -.align 16 SYM_FUNC_START_LOCAL(__cast5_enc_blk16) /* input: * %rdi: ctx @@ -282,7 +281,6 @@ SYM_FUNC_START_LOCAL(__cast5_enc_blk16) RET; SYM_FUNC_END(__cast5_enc_blk16) -.align 16 SYM_FUNC_START_LOCAL(__cast5_dec_blk16) /* input: * %rdi: ctx diff --git a/arch/x86/crypto/crct10dif-pcl-asm_64.S b/arch/x86/crypto/crct10dif-pcl-asm_64.S index 721474abfb719003625b68d4dd0c54b1cd9f14bf..5286db5b8165e63244d305304fe9286ba5dd8fc3 100644 --- a/arch/x86/crypto/crct10dif-pcl-asm_64.S +++ b/arch/x86/crypto/crct10dif-pcl-asm_64.S @@ -94,7 +94,6 @@ # # Assumes len >= 16. # -.align 16 SYM_FUNC_START(crc_t10dif_pcl) movdqa .Lbswap_mask(%rip), BSWAP_MASK diff --git a/arch/x86/crypto/nh-avx2-x86_64.S b/arch/x86/crypto/nh-avx2-x86_64.S index 6a0b15e7196a8d711252482e725e5da1e3bc7d18..ef73a3ab87263e4637e5fcbbe92a11632c83fe42 100644 --- a/arch/x86/crypto/nh-avx2-x86_64.S +++ b/arch/x86/crypto/nh-avx2-x86_64.S @@ -8,6 +8,7 @@ */ #include +#include #define PASS0_SUMS %ymm0 #define PASS1_SUMS %ymm1 @@ -65,11 +66,11 @@ /* * void nh_avx2(const u32 *key, const u8 *message, size_t message_len, - * u8 hash[NH_HASH_BYTES]) + * __le64 hash[NH_NUM_PASSES]) * * It's guaranteed that message_len % 16 == 0. */ -SYM_FUNC_START(nh_avx2) +SYM_TYPED_FUNC_START(nh_avx2) vmovdqu 0x00(KEY), K0 vmovdqu 0x10(KEY), K1 diff --git a/arch/x86/crypto/nh-sse2-x86_64.S b/arch/x86/crypto/nh-sse2-x86_64.S index 34c567bbcb4faa9f570b648330cb3593f67efcd1..75fb994b6d177eb3f4f8fdea39182aec2cca6e1e 100644 --- a/arch/x86/crypto/nh-sse2-x86_64.S +++ b/arch/x86/crypto/nh-sse2-x86_64.S @@ -8,6 +8,7 @@ */ #include +#include #define PASS0_SUMS %xmm0 #define PASS1_SUMS %xmm1 @@ -67,11 +68,11 @@ /* * void nh_sse2(const u32 *key, const u8 *message, size_t message_len, - * u8 hash[NH_HASH_BYTES]) + * __le64 hash[NH_NUM_PASSES]) * * It's guaranteed that message_len % 16 == 0. */ -SYM_FUNC_START(nh_sse2) +SYM_TYPED_FUNC_START(nh_sse2) movdqu 0x00(KEY), K0 movdqu 0x10(KEY), K1 diff --git a/arch/x86/crypto/nhpoly1305-avx2-glue.c b/arch/x86/crypto/nhpoly1305-avx2-glue.c index 8ea5ab0f1ca74bfa041e607ccc0d790d6166c42b..46b036204ed918dc047e51f5448941a546484418 100644 --- a/arch/x86/crypto/nhpoly1305-avx2-glue.c +++ b/arch/x86/crypto/nhpoly1305-avx2-glue.c @@ -14,14 +14,7 @@ #include asmlinkage void nh_avx2(const u32 *key, const u8 *message, size_t message_len, - u8 hash[NH_HASH_BYTES]); - -/* wrapper to avoid indirect call to assembly, which doesn't work with CFI */ -static void _nh_avx2(const u32 *key, const u8 *message, size_t message_len, - __le64 hash[NH_NUM_PASSES]) -{ - nh_avx2(key, message, message_len, (u8 *)hash); -} + __le64 hash[NH_NUM_PASSES]); static int nhpoly1305_avx2_update(struct shash_desc *desc, const u8 *src, unsigned int srclen) @@ -33,7 +26,7 @@ static int nhpoly1305_avx2_update(struct shash_desc *desc, unsigned int n = min_t(unsigned int, srclen, SZ_4K); kernel_fpu_begin(); - crypto_nhpoly1305_update_helper(desc, src, n, _nh_avx2); + crypto_nhpoly1305_update_helper(desc, src, n, nh_avx2); kernel_fpu_end(); src += n; srclen -= n; diff --git a/arch/x86/crypto/nhpoly1305-sse2-glue.c b/arch/x86/crypto/nhpoly1305-sse2-glue.c index 2b353d42ed13f5c6bd58d02c747ae5daf2e51e75..4a4970d751076826a5ae52293c00ce1c128de52d 100644 --- a/arch/x86/crypto/nhpoly1305-sse2-glue.c +++ b/arch/x86/crypto/nhpoly1305-sse2-glue.c @@ -14,14 +14,7 @@ #include asmlinkage void nh_sse2(const u32 *key, const u8 *message, size_t message_len, - u8 hash[NH_HASH_BYTES]); - -/* wrapper to avoid indirect call to assembly, which doesn't work with CFI */ -static void _nh_sse2(const u32 *key, const u8 *message, size_t message_len, - __le64 hash[NH_NUM_PASSES]) -{ - nh_sse2(key, message, message_len, (u8 *)hash); -} + __le64 hash[NH_NUM_PASSES]); static int nhpoly1305_sse2_update(struct shash_desc *desc, const u8 *src, unsigned int srclen) @@ -33,7 +26,7 @@ static int nhpoly1305_sse2_update(struct shash_desc *desc, unsigned int n = min_t(unsigned int, srclen, SZ_4K); kernel_fpu_begin(); - crypto_nhpoly1305_update_helper(desc, src, n, _nh_sse2); + crypto_nhpoly1305_update_helper(desc, src, n, nh_sse2); kernel_fpu_end(); src += n; srclen -= n; diff --git a/arch/x86/crypto/poly1305-x86_64-cryptogams.pl b/arch/x86/crypto/poly1305-x86_64-cryptogams.pl index 2077ce7a5647933478727ea1df764af5335ba62a..b9abcd79c1f43b89de38e57a888b22dcc5150b63 100644 --- a/arch/x86/crypto/poly1305-x86_64-cryptogams.pl +++ b/arch/x86/crypto/poly1305-x86_64-cryptogams.pl @@ -108,7 +108,6 @@ if (!$kernel) { sub declare_function() { my ($name, $align, $nargs) = @_; if($kernel) { - $code .= ".align $align\n"; $code .= "SYM_FUNC_START($name)\n"; $code .= ".L$name:\n"; } else { diff --git a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S index 82f2313f512b8664a31d6744bd3225591eaaaca6..97e2836218518029fb5649cf253136b433ad1f8e 100644 --- a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S +++ b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S @@ -550,7 +550,6 @@ #define write_blocks(x0, x1, x2, x3, t0, t1, t2) \ transpose_4x4(x0, x1, x2, x3, t0, t1, t2) -.align 8 SYM_FUNC_START_LOCAL(__serpent_enc_blk8_avx) /* input: * %rdi: ctx, CTX @@ -604,7 +603,6 @@ SYM_FUNC_START_LOCAL(__serpent_enc_blk8_avx) RET; SYM_FUNC_END(__serpent_enc_blk8_avx) -.align 8 SYM_FUNC_START_LOCAL(__serpent_dec_blk8_avx) /* input: * %rdi: ctx, CTX diff --git a/arch/x86/crypto/serpent-avx2-asm_64.S b/arch/x86/crypto/serpent-avx2-asm_64.S index 8ea34c9b9316021609302bf353ce6318410399ac..6d60c50593a9b81c67e25a3b7ddf9e34001a639a 100644 --- a/arch/x86/crypto/serpent-avx2-asm_64.S +++ b/arch/x86/crypto/serpent-avx2-asm_64.S @@ -550,7 +550,6 @@ #define write_blocks(x0, x1, x2, x3, t0, t1, t2) \ transpose_4x4(x0, x1, x2, x3, t0, t1, t2) -.align 8 SYM_FUNC_START_LOCAL(__serpent_enc_blk16) /* input: * %rdi: ctx, CTX @@ -604,7 +603,6 @@ SYM_FUNC_START_LOCAL(__serpent_enc_blk16) RET; SYM_FUNC_END(__serpent_enc_blk16) -.align 8 SYM_FUNC_START_LOCAL(__serpent_dec_blk16) /* input: * %rdi: ctx, CTX diff --git a/arch/x86/crypto/sha1_ni_asm.S b/arch/x86/crypto/sha1_ni_asm.S index 2f94ec0e763bfefb088b7558b34c4cfefe177757..cade913d48822019fbf216144670ba86de964c41 100644 --- a/arch/x86/crypto/sha1_ni_asm.S +++ b/arch/x86/crypto/sha1_ni_asm.S @@ -54,6 +54,7 @@ */ #include +#include #define DIGEST_PTR %rdi /* 1st arg */ #define DATA_PTR %rsi /* 2nd arg */ @@ -92,8 +93,7 @@ * numBlocks: Number of blocks to process */ .text -.align 32 -SYM_FUNC_START(sha1_ni_transform) +SYM_TYPED_FUNC_START(sha1_ni_transform) push %rbp mov %rsp, %rbp sub $FRAME_SIZE, %rsp diff --git a/arch/x86/crypto/sha1_ssse3_asm.S b/arch/x86/crypto/sha1_ssse3_asm.S index 263f916362e025ead9d059c81017e3974be263ae..f54988c80eb405126de5e56ad87e747663d5d563 100644 --- a/arch/x86/crypto/sha1_ssse3_asm.S +++ b/arch/x86/crypto/sha1_ssse3_asm.S @@ -25,6 +25,7 @@ */ #include +#include #define CTX %rdi // arg1 #define BUF %rsi // arg2 @@ -67,7 +68,7 @@ * param: function's name */ .macro SHA1_VECTOR_ASM name - SYM_FUNC_START(\name) + SYM_TYPED_FUNC_START(\name) push %rbx push %r12 diff --git a/arch/x86/crypto/sha256-avx-asm.S b/arch/x86/crypto/sha256-avx-asm.S index 3baa1ec390974a86064819234f66c199af1169ba..5555b5d5215a449eff7624f1505829036b917139 100644 --- a/arch/x86/crypto/sha256-avx-asm.S +++ b/arch/x86/crypto/sha256-avx-asm.S @@ -48,6 +48,7 @@ ######################################################################## #include +#include ## assume buffers not aligned #define VMOVDQ vmovdqu @@ -346,8 +347,7 @@ a = TMP_ ## arg 3 : Num blocks ######################################################################## .text -SYM_FUNC_START(sha256_transform_avx) -.align 32 +SYM_TYPED_FUNC_START(sha256_transform_avx) pushq %rbx pushq %r12 pushq %r13 diff --git a/arch/x86/crypto/sha256-avx2-asm.S b/arch/x86/crypto/sha256-avx2-asm.S index 9bcdbc47b8b4beeee9f8caaf7a825cd09bea8311..3eada94168526665bbe79920886d70ace9938db0 100644 --- a/arch/x86/crypto/sha256-avx2-asm.S +++ b/arch/x86/crypto/sha256-avx2-asm.S @@ -49,6 +49,7 @@ ######################################################################## #include +#include ## assume buffers not aligned #define VMOVDQ vmovdqu @@ -523,8 +524,7 @@ STACK_SIZE = _CTX + _CTX_SIZE ## arg 3 : Num blocks ######################################################################## .text -SYM_FUNC_START(sha256_transform_rorx) -.align 32 +SYM_TYPED_FUNC_START(sha256_transform_rorx) pushq %rbx pushq %r12 pushq %r13 diff --git a/arch/x86/crypto/sha256-ssse3-asm.S b/arch/x86/crypto/sha256-ssse3-asm.S index c4a5db612c3276380d914bbe65170811ddc1e7d1..959288eecc6898917164bdbe9d2ba5896ad8cebf 100644 --- a/arch/x86/crypto/sha256-ssse3-asm.S +++ b/arch/x86/crypto/sha256-ssse3-asm.S @@ -47,6 +47,7 @@ ######################################################################## #include +#include ## assume buffers not aligned #define MOVDQ movdqu @@ -355,8 +356,7 @@ a = TMP_ ## arg 3 : Num blocks ######################################################################## .text -SYM_FUNC_START(sha256_transform_ssse3) -.align 32 +SYM_TYPED_FUNC_START(sha256_transform_ssse3) pushq %rbx pushq %r12 pushq %r13 diff --git a/arch/x86/crypto/sha256_ni_asm.S b/arch/x86/crypto/sha256_ni_asm.S index 94d50dd27cb532edb5c89fd7e939372fa275eb7c..537b6dcd7ed80ef1f285019e87f27cdba0634e58 100644 --- a/arch/x86/crypto/sha256_ni_asm.S +++ b/arch/x86/crypto/sha256_ni_asm.S @@ -54,6 +54,7 @@ */ #include +#include #define DIGEST_PTR %rdi /* 1st arg */ #define DATA_PTR %rsi /* 2nd arg */ @@ -96,8 +97,7 @@ */ .text -.align 32 -SYM_FUNC_START(sha256_ni_transform) +SYM_TYPED_FUNC_START(sha256_ni_transform) shl $6, NUM_BLKS /* convert to bytes */ jz .Ldone_hash diff --git a/arch/x86/crypto/sha512-avx-asm.S b/arch/x86/crypto/sha512-avx-asm.S index 1fefe6dd3a9e2f90de0deff603b04bafa08dc5c2..b0984f19fdb408e952773793fa0244f2a50ff8c9 100644 --- a/arch/x86/crypto/sha512-avx-asm.S +++ b/arch/x86/crypto/sha512-avx-asm.S @@ -48,6 +48,7 @@ ######################################################################## #include +#include .text @@ -273,7 +274,7 @@ frame_size = frame_WK + WK_SIZE # of SHA512 message blocks. # "blocks" is the message length in SHA512 blocks ######################################################################## -SYM_FUNC_START(sha512_transform_avx) +SYM_TYPED_FUNC_START(sha512_transform_avx) test msglen, msglen je nowork diff --git a/arch/x86/crypto/sha512-avx2-asm.S b/arch/x86/crypto/sha512-avx2-asm.S index 5cdaab7d6901544a868f95bb0cf894cb8bbe8f3a..b1ca99055ef994f5301a53b2e1ccfc7754e7f03e 100644 --- a/arch/x86/crypto/sha512-avx2-asm.S +++ b/arch/x86/crypto/sha512-avx2-asm.S @@ -50,6 +50,7 @@ ######################################################################## #include +#include .text @@ -565,7 +566,7 @@ frame_size = frame_CTX + CTX_SIZE # of SHA512 message blocks. # "blocks" is the message length in SHA512 blocks ######################################################################## -SYM_FUNC_START(sha512_transform_rorx) +SYM_TYPED_FUNC_START(sha512_transform_rorx) # Save GPRs push %rbx push %r12 diff --git a/arch/x86/crypto/sha512-ssse3-asm.S b/arch/x86/crypto/sha512-ssse3-asm.S index b84c22e06c5f7987f8b458810b23e96127f6e7b0..c06afb5270e5f5789ba0d45c948a5f68ac4235f3 100644 --- a/arch/x86/crypto/sha512-ssse3-asm.S +++ b/arch/x86/crypto/sha512-ssse3-asm.S @@ -48,6 +48,7 @@ ######################################################################## #include +#include .text @@ -274,7 +275,7 @@ frame_size = frame_WK + WK_SIZE # of SHA512 message blocks. # "blocks" is the message length in SHA512 blocks. ######################################################################## -SYM_FUNC_START(sha512_transform_ssse3) +SYM_TYPED_FUNC_START(sha512_transform_ssse3) test msglen, msglen je nowork diff --git a/arch/x86/crypto/sm3-avx-asm_64.S b/arch/x86/crypto/sm3-avx-asm_64.S index b12b9efb5ec5142dea9da76087e1dd84625aa96a..503bab450a9157200bc752dceaf03b9bbd020886 100644 --- a/arch/x86/crypto/sm3-avx-asm_64.S +++ b/arch/x86/crypto/sm3-avx-asm_64.S @@ -12,6 +12,7 @@ */ #include +#include #include /* Context structure */ @@ -327,8 +328,7 @@ * void sm3_transform_avx(struct sm3_state *state, * const u8 *data, int nblocks); */ -.align 16 -SYM_FUNC_START(sm3_transform_avx) +SYM_TYPED_FUNC_START(sm3_transform_avx) /* input: * %rdi: ctx, CTX * %rsi: data (64*nblks bytes) diff --git a/arch/x86/crypto/sm4-aesni-avx-asm_64.S b/arch/x86/crypto/sm4-aesni-avx-asm_64.S index 4767ab61ff489bebf206cd2a6ddea678b3d2304b..e2668d2fe6ce181650807a8fa5abef864b76bd1b 100644 --- a/arch/x86/crypto/sm4-aesni-avx-asm_64.S +++ b/arch/x86/crypto/sm4-aesni-avx-asm_64.S @@ -14,6 +14,7 @@ */ #include +#include #include #define rRIP (%rip) @@ -139,13 +140,11 @@ .text -.align 16 /* * void sm4_aesni_avx_crypt4(const u32 *rk, u8 *dst, * const u8 *src, int nblocks) */ -.align 8 SYM_FUNC_START(sm4_aesni_avx_crypt4) /* input: * %rdi: round key array, CTX @@ -249,7 +248,6 @@ SYM_FUNC_START(sm4_aesni_avx_crypt4) RET; SYM_FUNC_END(sm4_aesni_avx_crypt4) -.align 8 SYM_FUNC_START_LOCAL(__sm4_crypt_blk8) /* input: * %rdi: round key array, CTX @@ -363,7 +361,6 @@ SYM_FUNC_END(__sm4_crypt_blk8) * void sm4_aesni_avx_crypt8(const u32 *rk, u8 *dst, * const u8 *src, int nblocks) */ -.align 8 SYM_FUNC_START(sm4_aesni_avx_crypt8) /* input: * %rdi: round key array, CTX @@ -419,8 +416,7 @@ SYM_FUNC_END(sm4_aesni_avx_crypt8) * void sm4_aesni_avx_ctr_enc_blk8(const u32 *rk, u8 *dst, * const u8 *src, u8 *iv) */ -.align 8 -SYM_FUNC_START(sm4_aesni_avx_ctr_enc_blk8) +SYM_TYPED_FUNC_START(sm4_aesni_avx_ctr_enc_blk8) /* input: * %rdi: round key array, CTX * %rsi: dst (8 blocks) @@ -494,8 +490,7 @@ SYM_FUNC_END(sm4_aesni_avx_ctr_enc_blk8) * void sm4_aesni_avx_cbc_dec_blk8(const u32 *rk, u8 *dst, * const u8 *src, u8 *iv) */ -.align 8 -SYM_FUNC_START(sm4_aesni_avx_cbc_dec_blk8) +SYM_TYPED_FUNC_START(sm4_aesni_avx_cbc_dec_blk8) /* input: * %rdi: round key array, CTX * %rsi: dst (8 blocks) @@ -544,8 +539,7 @@ SYM_FUNC_END(sm4_aesni_avx_cbc_dec_blk8) * void sm4_aesni_avx_cfb_dec_blk8(const u32 *rk, u8 *dst, * const u8 *src, u8 *iv) */ -.align 8 -SYM_FUNC_START(sm4_aesni_avx_cfb_dec_blk8) +SYM_TYPED_FUNC_START(sm4_aesni_avx_cfb_dec_blk8) /* input: * %rdi: round key array, CTX * %rsi: dst (8 blocks) diff --git a/arch/x86/crypto/sm4-aesni-avx2-asm_64.S b/arch/x86/crypto/sm4-aesni-avx2-asm_64.S index 4732fe8bb65b6579e2ce8a4489dd7df8e89be74c..98ede94592877c4bb9f2b40614eb34f2055246aa 100644 --- a/arch/x86/crypto/sm4-aesni-avx2-asm_64.S +++ b/arch/x86/crypto/sm4-aesni-avx2-asm_64.S @@ -14,6 +14,7 @@ */ #include +#include #include #define rRIP (%rip) @@ -153,9 +154,6 @@ .long 0xdeadbeef, 0xdeadbeef, 0xdeadbeef .text -.align 16 - -.align 8 SYM_FUNC_START_LOCAL(__sm4_crypt_blk16) /* input: * %rdi: round key array, CTX @@ -281,8 +279,7 @@ SYM_FUNC_END(__sm4_crypt_blk16) * void sm4_aesni_avx2_ctr_enc_blk16(const u32 *rk, u8 *dst, * const u8 *src, u8 *iv) */ -.align 8 -SYM_FUNC_START(sm4_aesni_avx2_ctr_enc_blk16) +SYM_TYPED_FUNC_START(sm4_aesni_avx2_ctr_enc_blk16) /* input: * %rdi: round key array, CTX * %rsi: dst (16 blocks) @@ -394,8 +391,7 @@ SYM_FUNC_END(sm4_aesni_avx2_ctr_enc_blk16) * void sm4_aesni_avx2_cbc_dec_blk16(const u32 *rk, u8 *dst, * const u8 *src, u8 *iv) */ -.align 8 -SYM_FUNC_START(sm4_aesni_avx2_cbc_dec_blk16) +SYM_TYPED_FUNC_START(sm4_aesni_avx2_cbc_dec_blk16) /* input: * %rdi: round key array, CTX * %rsi: dst (16 blocks) @@ -448,8 +444,7 @@ SYM_FUNC_END(sm4_aesni_avx2_cbc_dec_blk16) * void sm4_aesni_avx2_cfb_dec_blk16(const u32 *rk, u8 *dst, * const u8 *src, u8 *iv) */ -.align 8 -SYM_FUNC_START(sm4_aesni_avx2_cfb_dec_blk16) +SYM_TYPED_FUNC_START(sm4_aesni_avx2_cfb_dec_blk16) /* input: * %rdi: round key array, CTX * %rsi: dst (16 blocks) diff --git a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S index 31f9b2ec3857d4d083a0d7b895cbf93c709cc134..12fde271cd3f6b2f20e80aff87c73658f9a7f5d3 100644 --- a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S +++ b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S @@ -228,7 +228,6 @@ vpxor x2, wkey, x2; \ vpxor x3, wkey, x3; -.align 8 SYM_FUNC_START_LOCAL(__twofish_enc_blk8) /* input: * %rdi: ctx, CTX @@ -270,7 +269,6 @@ SYM_FUNC_START_LOCAL(__twofish_enc_blk8) RET; SYM_FUNC_END(__twofish_enc_blk8) -.align 8 SYM_FUNC_START_LOCAL(__twofish_dec_blk8) /* input: * %rdi: ctx, CTX diff --git a/arch/x86/crypto/twofish_glue.c b/arch/x86/crypto/twofish_glue.c index f9c4adc274040a605718890689e018e81affd422..0614beece27932df8a1af3fd8a7f64e99b6d0db2 100644 --- a/arch/x86/crypto/twofish_glue.c +++ b/arch/x86/crypto/twofish_glue.c @@ -38,8 +38,8 @@ * Third Edition. */ +#include #include -#include #include #include #include diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S index e309e7156038935ab92cd025616c5c765e3a5214..91397f58ac3008556fc0cfbb2437eee45da706ff 100644 --- a/arch/x86/entry/entry_32.S +++ b/arch/x86/entry/entry_32.S @@ -1181,7 +1181,7 @@ SYM_CODE_START(asm_exc_nmi) * is using the thread stack right now, so it's safe for us to use it. */ movl %esp, %ebx - movl PER_CPU_VAR(cpu_current_top_of_stack), %esp + movl PER_CPU_VAR(pcpu_hot + X86_top_of_stack), %esp call exc_nmi movl %ebx, %esp @@ -1243,7 +1243,7 @@ SYM_CODE_START(rewind_stack_and_make_dead) /* Prevent any naive code from trying to unwind to our caller. */ xorl %ebp, %ebp - movl PER_CPU_VAR(cpu_current_top_of_stack), %esi + movl PER_CPU_VAR(pcpu_hot + X86_top_of_stack), %esi leal -TOP_OF_KERNEL_STACK_PADDING-PTREGS_SIZE(%esi), %esp call make_task_dead diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index 9953d966d12443115bc7ecb3d5c083c9b5c8847f..15739a2c09833ba30ac8b2eb6a9be9e93608f7ea 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S @@ -92,7 +92,7 @@ SYM_CODE_START(entry_SYSCALL_64) /* tss.sp2 is scratch space. */ movq %rsp, PER_CPU_VAR(cpu_tss_rw + TSS_sp2) SWITCH_TO_KERNEL_CR3 scratch_reg=%rsp - movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp + movq PER_CPU_VAR(pcpu_hot + X86_top_of_stack), %rsp SYM_INNER_LABEL(entry_SYSCALL_64_safe_stack, SYM_L_GLOBAL) ANNOTATE_NOENDBR @@ -252,7 +252,7 @@ SYM_FUNC_START(__switch_to_asm) #ifdef CONFIG_STACKPROTECTOR movq TASK_stack_canary(%rsi), %rbx - movq %rbx, PER_CPU_VAR(fixed_percpu_data) + stack_canary_offset + movq %rbx, PER_CPU_VAR(fixed_percpu_data) + FIXED_stack_canary #endif /* @@ -284,9 +284,11 @@ SYM_FUNC_END(__switch_to_asm) * r12: kernel thread arg */ .pushsection .text, "ax" -SYM_CODE_START(ret_from_fork) + __FUNC_ALIGN +SYM_CODE_START_NOALIGN(ret_from_fork) UNWIND_HINT_EMPTY ANNOTATE_NOENDBR // copy_thread + CALL_DEPTH_ACCOUNT movq %rax, %rdi call schedule_tail /* rdi: 'prev' task parameter */ @@ -326,11 +328,12 @@ SYM_CODE_END(ret_from_fork) #endif .endm -SYM_CODE_START_LOCAL(xen_error_entry) +SYM_CODE_START(xen_error_entry) + ANNOTATE_NOENDBR UNWIND_HINT_FUNC PUSH_AND_CLEAR_REGS save_ret=1 ENCODE_FRAME_POINTER 8 - UNTRAIN_RET + UNTRAIN_RET_FROM_CALL RET SYM_CODE_END(xen_error_entry) @@ -600,13 +603,13 @@ SYM_CODE_END(\asmsym) * shared between 32 and 64 bit and emit the __irqentry_text_* markers * so the stacktrace boundary checks work. */ - .align 16 + __ALIGN .globl __irqentry_text_start __irqentry_text_start: #include - .align 16 + __ALIGN .globl __irqentry_text_end __irqentry_text_end: ANNOTATE_NOENDBR @@ -828,7 +831,8 @@ EXPORT_SYMBOL(asm_load_gs_index) * * C calling convention: exc_xen_hypervisor_callback(struct *pt_regs) */ -SYM_CODE_START_LOCAL(exc_xen_hypervisor_callback) + __FUNC_ALIGN +SYM_CODE_START_LOCAL_NOALIGN(exc_xen_hypervisor_callback) /* * Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will @@ -856,7 +860,8 @@ SYM_CODE_END(exc_xen_hypervisor_callback) * We distinguish between categories by comparing each saved segment register * with its current contents: any discrepancy means we in category 1. */ -SYM_CODE_START(xen_failsafe_callback) + __FUNC_ALIGN +SYM_CODE_START_NOALIGN(xen_failsafe_callback) UNWIND_HINT_EMPTY ENDBR movl %ds, %ecx @@ -903,7 +908,8 @@ SYM_CODE_END(xen_failsafe_callback) * R14 - old CR3 * R15 - old SPEC_CTRL */ -SYM_CODE_START_LOCAL(paranoid_entry) +SYM_CODE_START(paranoid_entry) + ANNOTATE_NOENDBR UNWIND_HINT_FUNC PUSH_AND_CLEAR_REGS save_ret=1 ENCODE_FRAME_POINTER 8 @@ -972,7 +978,7 @@ SYM_CODE_START_LOCAL(paranoid_entry) * CR3 above, keep the old value in a callee saved register. */ IBRS_ENTER save_reg=%r15 - UNTRAIN_RET + UNTRAIN_RET_FROM_CALL RET SYM_CODE_END(paranoid_entry) @@ -1038,7 +1044,8 @@ SYM_CODE_END(paranoid_exit) /* * Switch GS and CR3 if needed. */ -SYM_CODE_START_LOCAL(error_entry) +SYM_CODE_START(error_entry) + ANNOTATE_NOENDBR UNWIND_HINT_FUNC PUSH_AND_CLEAR_REGS save_ret=1 @@ -1056,14 +1063,11 @@ SYM_CODE_START_LOCAL(error_entry) /* We have user CR3. Change to kernel CR3. */ SWITCH_TO_KERNEL_CR3 scratch_reg=%rax IBRS_ENTER - UNTRAIN_RET + UNTRAIN_RET_FROM_CALL leaq 8(%rsp), %rdi /* arg0 = pt_regs pointer */ -.Lerror_entry_from_usermode_after_swapgs: - /* Put us onto the real thread stack. */ - call sync_regs - RET + jmp sync_regs /* * There are two places in the kernel that can potentially fault with @@ -1094,6 +1098,7 @@ SYM_CODE_START_LOCAL(error_entry) */ .Lerror_entry_done_lfence: FENCE_SWAPGS_KERNEL_ENTRY + CALL_DEPTH_ACCOUNT leaq 8(%rsp), %rax /* return pt_regs pointer */ ANNOTATE_UNRET_END RET @@ -1112,7 +1117,7 @@ SYM_CODE_START_LOCAL(error_entry) FENCE_SWAPGS_USER_ENTRY SWITCH_TO_KERNEL_CR3 scratch_reg=%rax IBRS_ENTER - UNTRAIN_RET + UNTRAIN_RET_FROM_CALL /* * Pretend that the exception came from user mode: set up pt_regs @@ -1121,7 +1126,7 @@ SYM_CODE_START_LOCAL(error_entry) leaq 8(%rsp), %rdi /* arg0 = pt_regs pointer */ call fixup_bad_iret mov %rax, %rdi - jmp .Lerror_entry_from_usermode_after_swapgs + jmp sync_regs SYM_CODE_END(error_entry) SYM_CODE_START_LOCAL(error_return) @@ -1206,7 +1211,7 @@ SYM_CODE_START(asm_exc_nmi) FENCE_SWAPGS_USER_ENTRY SWITCH_TO_KERNEL_CR3 scratch_reg=%rdx movq %rsp, %rdx - movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp + movq PER_CPU_VAR(pcpu_hot + X86_top_of_stack), %rsp UNWIND_HINT_IRET_REGS base=%rdx offset=8 pushq 5*8(%rdx) /* pt_regs->ss */ pushq 4*8(%rdx) /* pt_regs->rsp */ @@ -1516,12 +1521,13 @@ SYM_CODE_END(ignore_sysret) #endif .pushsection .text, "ax" -SYM_CODE_START(rewind_stack_and_make_dead) + __FUNC_ALIGN +SYM_CODE_START_NOALIGN(rewind_stack_and_make_dead) UNWIND_HINT_FUNC /* Prevent any naive code from trying to unwind to our caller. */ xorl %ebp, %ebp - movq PER_CPU_VAR(cpu_current_top_of_stack), %rax + movq PER_CPU_VAR(pcpu_hot + X86_top_of_stack), %rax leaq -PTREGS_SIZE(%rax), %rsp UNWIND_HINT_REGS diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S index 59b93901660db94092d1e881d58affe919234673..70150298f8bdf5ea1cfa9f895a0443b521e5b60b 100644 --- a/arch/x86/entry/entry_64_compat.S +++ b/arch/x86/entry/entry_64_compat.S @@ -58,7 +58,7 @@ SYM_CODE_START(entry_SYSENTER_compat) SWITCH_TO_KERNEL_CR3 scratch_reg=%rax popq %rax - movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp + movq PER_CPU_VAR(pcpu_hot + X86_top_of_stack), %rsp /* Construct struct pt_regs on stack */ pushq $__USER_DS /* pt_regs->ss */ @@ -128,7 +128,6 @@ SYM_INNER_LABEL(entry_SYSENTER_compat_after_hwframe, SYM_L_GLOBAL) popfq jmp .Lsysenter_flags_fixed SYM_INNER_LABEL(__end_entry_SYSENTER_compat, SYM_L_GLOBAL) - ANNOTATE_NOENDBR // is_sysenter_singlestep SYM_CODE_END(entry_SYSENTER_compat) /* @@ -191,7 +190,7 @@ SYM_CODE_START(entry_SYSCALL_compat) SWITCH_TO_KERNEL_CR3 scratch_reg=%rsp /* Switch to the kernel stack */ - movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp + movq PER_CPU_VAR(pcpu_hot + X86_top_of_stack), %rsp SYM_INNER_LABEL(entry_SYSCALL_compat_safe_stack, SYM_L_GLOBAL) ANNOTATE_NOENDBR @@ -332,7 +331,7 @@ SYM_CODE_START(entry_INT80_compat) ALTERNATIVE "", "jmp .Lint80_keep_stack", X86_FEATURE_XENPV movq %rsp, %rax - movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp + movq PER_CPU_VAR(pcpu_hot + X86_top_of_stack), %rsp pushq 5*8(%rax) /* regs->ss */ pushq 4*8(%rax) /* regs->rsp */ diff --git a/arch/x86/entry/thunk_64.S b/arch/x86/entry/thunk_64.S index f38b07d2768bbfc186bf5df4963768166dc738f8..5e37f41e5f14d483283c8dc81f30463ff228be4b 100644 --- a/arch/x86/entry/thunk_64.S +++ b/arch/x86/entry/thunk_64.S @@ -11,7 +11,7 @@ /* rdi: arg1 ... normal C conventions. rax is saved/restored. */ .macro THUNK name, func -SYM_FUNC_START_NOALIGN(\name) +SYM_FUNC_START(\name) pushq %rbp movq %rsp, %rbp @@ -36,7 +36,7 @@ SYM_FUNC_END(\name) EXPORT_SYMBOL(preempt_schedule_thunk) EXPORT_SYMBOL(preempt_schedule_notrace_thunk) -SYM_CODE_START_LOCAL_NOALIGN(__thunk_restore) +SYM_CODE_START_LOCAL(__thunk_restore) popq %r11 popq %r10 popq %r9 diff --git a/arch/x86/entry/vdso/Makefile b/arch/x86/entry/vdso/Makefile index 3e88b9df8c8f137104566f4fb7c4b3d27ef03f09..838613ac15b8205ef7d982d2186b371c8b0fb1b6 100644 --- a/arch/x86/entry/vdso/Makefile +++ b/arch/x86/entry/vdso/Makefile @@ -33,11 +33,12 @@ vobjs32-y += vdso32/vclock_gettime.o vobjs-$(CONFIG_X86_SGX) += vsgx.o # files to link into kernel -obj-y += vma.o extable.o -KASAN_SANITIZE_vma.o := y -UBSAN_SANITIZE_vma.o := y -KCSAN_SANITIZE_vma.o := y -OBJECT_FILES_NON_STANDARD_vma.o := n +obj-y += vma.o extable.o +KASAN_SANITIZE_vma.o := y +UBSAN_SANITIZE_vma.o := y +KCSAN_SANITIZE_vma.o := y +OBJECT_FILES_NON_STANDARD_vma.o := n +OBJECT_FILES_NON_STANDARD_extable.o := n # vDSO images to build vdso_img-$(VDSO64-y) += 64 @@ -94,7 +95,7 @@ ifneq ($(RETPOLINE_VDSO_CFLAGS),) endif endif -$(vobjs): KBUILD_CFLAGS := $(filter-out $(CC_FLAGS_LTO) $(CC_FLAGS_CFI) $(RANDSTRUCT_CFLAGS) $(GCC_PLUGINS_CFLAGS) $(RETPOLINE_CFLAGS),$(KBUILD_CFLAGS)) $(CFL) +$(vobjs): KBUILD_CFLAGS := $(filter-out $(PADDING_CFLAGS) $(CC_FLAGS_LTO) $(CC_FLAGS_CFI) $(RANDSTRUCT_CFLAGS) $(GCC_PLUGINS_CFLAGS) $(RETPOLINE_CFLAGS),$(KBUILD_CFLAGS)) $(CFL) $(vobjs): KBUILD_AFLAGS += -DBUILD_VDSO # @@ -157,6 +158,7 @@ KBUILD_CFLAGS_32 := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS_32)) KBUILD_CFLAGS_32 := $(filter-out $(RETPOLINE_CFLAGS),$(KBUILD_CFLAGS_32)) KBUILD_CFLAGS_32 := $(filter-out $(CC_FLAGS_LTO),$(KBUILD_CFLAGS_32)) KBUILD_CFLAGS_32 := $(filter-out $(CC_FLAGS_CFI),$(KBUILD_CFLAGS_32)) +KBUILD_CFLAGS_32 := $(filter-out $(PADDING_CFLAGS),$(KBUILD_CFLAGS_32)) KBUILD_CFLAGS_32 += -m32 -msoft-float -mregparm=0 -fpic KBUILD_CFLAGS_32 += -fno-stack-protector KBUILD_CFLAGS_32 += $(call cc-option, -foptimize-sibling-calls) diff --git a/arch/x86/events/intel/lbr.c b/arch/x86/events/intel/lbr.c index 017baba56b01923bf974c4d756590828d55e72e7..1f21f576ca77fee6040a4d555eeac87ec706d638 100644 --- a/arch/x86/events/intel/lbr.c +++ b/arch/x86/events/intel/lbr.c @@ -1603,10 +1603,8 @@ clear_arch_lbr: * x86_perf_get_lbr - get the LBR records information * * @lbr: the caller's memory to store the LBR records information - * - * Returns: 0 indicates the LBR info has been successfully obtained */ -int x86_perf_get_lbr(struct x86_pmu_lbr *lbr) +void x86_perf_get_lbr(struct x86_pmu_lbr *lbr) { int lbr_fmt = x86_pmu.intel_cap.lbr_format; @@ -1614,8 +1612,6 @@ int x86_perf_get_lbr(struct x86_pmu_lbr *lbr) lbr->from = x86_pmu.lbr_from; lbr->to = x86_pmu.lbr_to; lbr->info = (lbr_fmt == LBR_FORMAT_INFO) ? x86_pmu.lbr_info : 0; - - return 0; } EXPORT_SYMBOL_GPL(x86_perf_get_lbr); diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h index 9542c582d546b2ae270f3568794c57de4478d0b4..7659217f4d492efe7badbd5cf5d8e866c12833f2 100644 --- a/arch/x86/include/asm/alternative.h +++ b/arch/x86/include/asm/alternative.h @@ -78,8 +78,43 @@ extern void apply_alternatives(struct alt_instr *start, struct alt_instr *end); extern void apply_retpolines(s32 *start, s32 *end); extern void apply_returns(s32 *start, s32 *end); extern void apply_ibt_endbr(s32 *start, s32 *end); +extern void apply_fineibt(s32 *start_retpoline, s32 *end_retpoine, + s32 *start_cfi, s32 *end_cfi); struct module; +struct paravirt_patch_site; + +struct callthunk_sites { + s32 *call_start, *call_end; + struct paravirt_patch_site *pv_start, *pv_end; +}; + +#ifdef CONFIG_CALL_THUNKS +extern void callthunks_patch_builtin_calls(void); +extern void callthunks_patch_module_calls(struct callthunk_sites *sites, + struct module *mod); +extern void *callthunks_translate_call_dest(void *dest); +extern bool is_callthunk(void *addr); +extern int x86_call_depth_emit_accounting(u8 **pprog, void *func); +#else +static __always_inline void callthunks_patch_builtin_calls(void) {} +static __always_inline void +callthunks_patch_module_calls(struct callthunk_sites *sites, + struct module *mod) {} +static __always_inline void *callthunks_translate_call_dest(void *dest) +{ + return dest; +} +static __always_inline bool is_callthunk(void *addr) +{ + return false; +} +static __always_inline int x86_call_depth_emit_accounting(u8 **pprog, + void *func) +{ + return 0; +} +#endif #ifdef CONFIG_SMP extern void alternatives_smp_module_add(struct module *mod, char *name, @@ -347,6 +382,7 @@ static inline int alternatives_text_reserved(void *start, void *end) #define old_len 141b-140b #define new_len1 144f-143f #define new_len2 145f-144f +#define new_len3 146f-145f /* * gas compatible max based on the idea from: @@ -354,7 +390,8 @@ static inline int alternatives_text_reserved(void *start, void *end) * * The additional "-" is needed because gas uses a "true" value of -1. */ -#define alt_max_short(a, b) ((a) ^ (((a) ^ (b)) & -(-((a) < (b))))) +#define alt_max_2(a, b) ((a) ^ (((a) ^ (b)) & -(-((a) < (b))))) +#define alt_max_3(a, b, c) (alt_max_2(alt_max_2(a, b), c)) /* @@ -366,13 +403,36 @@ static inline int alternatives_text_reserved(void *start, void *end) 140: \oldinstr 141: - .skip -((alt_max_short(new_len1, new_len2) - (old_len)) > 0) * \ - (alt_max_short(new_len1, new_len2) - (old_len)),0x90 + .skip -((alt_max_2(new_len1, new_len2) - (old_len)) > 0) * \ + (alt_max_2(new_len1, new_len2) - (old_len)),0x90 +142: + + .pushsection .altinstructions,"a" + altinstruction_entry 140b,143f,\feature1,142b-140b,144f-143f + altinstruction_entry 140b,144f,\feature2,142b-140b,145f-144f + .popsection + + .pushsection .altinstr_replacement,"ax" +143: + \newinstr1 +144: + \newinstr2 +145: + .popsection +.endm + +.macro ALTERNATIVE_3 oldinstr, newinstr1, feature1, newinstr2, feature2, newinstr3, feature3 +140: + \oldinstr +141: + .skip -((alt_max_3(new_len1, new_len2, new_len3) - (old_len)) > 0) * \ + (alt_max_3(new_len1, new_len2, new_len3) - (old_len)),0x90 142: .pushsection .altinstructions,"a" altinstruction_entry 140b,143f,\feature1,142b-140b,144f-143f altinstruction_entry 140b,144f,\feature2,142b-140b,145f-144f + altinstruction_entry 140b,145f,\feature3,142b-140b,146f-145f .popsection .pushsection .altinstr_replacement,"ax" @@ -381,6 +441,8 @@ static inline int alternatives_text_reserved(void *start, void *end) 144: \newinstr2 145: + \newinstr3 +146: .popsection .endm diff --git a/arch/x86/include/asm/cmpxchg_32.h b/arch/x86/include/asm/cmpxchg_32.h index 215f5a65790fd52e0b64ad3c93f3bf51b5a4ff3e..6ba80ce9438dde9f93a741f290fda08ac8c596a6 100644 --- a/arch/x86/include/asm/cmpxchg_32.h +++ b/arch/x86/include/asm/cmpxchg_32.h @@ -7,34 +7,6 @@ * you need to test for the feature in boot_cpu_data. */ -/* - * CMPXCHG8B only writes to the target if we had the previous - * value in registers, otherwise it acts as a read and gives us the - * "new previous" value. That is why there is a loop. Preloading - * EDX:EAX is a performance optimization: in the common case it means - * we need only one locked operation. - * - * A SIMD/3DNOW!/MMX/FPU 64-bit store here would require at the very - * least an FPU save and/or %cr0.ts manipulation. - * - * cmpxchg8b must be used with the lock prefix here to allow the - * instruction to be executed atomically. We need to have the reader - * side to see the coherent 64bit value. - */ -static inline void set_64bit(volatile u64 *ptr, u64 value) -{ - u32 low = value; - u32 high = value >> 32; - u64 prev = *ptr; - - asm volatile("\n1:\t" - LOCK_PREFIX "cmpxchg8b %0\n\t" - "jnz 1b" - : "=m" (*ptr), "+A" (prev) - : "b" (low), "c" (high) - : "memory"); -} - #ifdef CONFIG_X86_CMPXCHG64 #define arch_cmpxchg64(ptr, o, n) \ ((__typeof__(*(ptr)))__cmpxchg64((ptr), (unsigned long long)(o), \ diff --git a/arch/x86/include/asm/cmpxchg_64.h b/arch/x86/include/asm/cmpxchg_64.h index 250187ac824842a4dabcb77bf394902447c0609a..0d3beb27b7fe4fbc43c25f204f5ce76c592409a1 100644 --- a/arch/x86/include/asm/cmpxchg_64.h +++ b/arch/x86/include/asm/cmpxchg_64.h @@ -2,11 +2,6 @@ #ifndef _ASM_X86_CMPXCHG_64_H #define _ASM_X86_CMPXCHG_64_H -static inline void set_64bit(volatile u64 *ptr, u64 val) -{ - *ptr = val; -} - #define arch_cmpxchg64(ptr, o, n) \ ({ \ BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ diff --git a/arch/x86/include/asm/cpu_entry_area.h b/arch/x86/include/asm/cpu_entry_area.h index 75efc4c6f0766c3a601d023cb1012c81d7ada7c6..462fc34f131766bbc2e7f26bde19d592f7fc54bf 100644 --- a/arch/x86/include/asm/cpu_entry_area.h +++ b/arch/x86/include/asm/cpu_entry_area.h @@ -130,10 +130,6 @@ struct cpu_entry_area { }; #define CPU_ENTRY_AREA_SIZE (sizeof(struct cpu_entry_area)) -#define CPU_ENTRY_AREA_ARRAY_SIZE (CPU_ENTRY_AREA_SIZE * NR_CPUS) - -/* Total size includes the readonly IDT mapping page as well: */ -#define CPU_ENTRY_AREA_TOTAL_SIZE (CPU_ENTRY_AREA_ARRAY_SIZE + PAGE_SIZE) DECLARE_PER_CPU(struct cpu_entry_area *, cpu_entry_area); DECLARE_PER_CPU(struct cea_exception_stacks *, cea_exception_stacks); diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index c9f4730bb113f06b247d42b1e5aee2a45fdf4ef0..61012476d66e0e131939f2146d248e1215b7b152 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -305,13 +305,15 @@ #define X86_FEATURE_USE_IBPB_FW (11*32+16) /* "" Use IBPB during runtime firmware calls */ #define X86_FEATURE_RSB_VMEXIT_LITE (11*32+17) /* "" Fill RSB on VM exit when EIBRS is enabled */ #define X86_FEATURE_SGX_EDECCSSA (11*32+18) /* "" SGX EDECCSSA user leaf function */ - - +#define X86_FEATURE_CALL_DEPTH (11*32+19) /* "" Call depth tracking for RSB stuffing */ #define X86_FEATURE_MSR_TSX_CTRL (11*32+20) /* "" MSR IA32_TSX_CTRL (Intel) implemented */ /* Intel-defined CPU features, CPUID level 0x00000007:1 (EAX), word 12 */ #define X86_FEATURE_AVX_VNNI (12*32+ 4) /* AVX VNNI instructions */ #define X86_FEATURE_AVX512_BF16 (12*32+ 5) /* AVX512 BFLOAT16 instructions */ +#define X86_FEATURE_CMPCCXADD (12*32+ 7) /* "" CMPccXADD instructions */ +#define X86_FEATURE_AMX_FP16 (12*32+21) /* "" AMX fp16 Support */ +#define X86_FEATURE_AVX_IFMA (12*32+23) /* "" Support for VPMADD52[H,L]UQ */ /* AMD-defined CPU features, CPUID level 0x80000008 (EBX), word 13 */ #define X86_FEATURE_CLZERO (13*32+ 0) /* CLZERO instruction */ diff --git a/arch/x86/include/asm/current.h b/arch/x86/include/asm/current.h index 3e204e6140b51fb9567276c9886d21a56aafceed..a1168e7b69e5b747276b61ac62b755ad748e5534 100644 --- a/arch/x86/include/asm/current.h +++ b/arch/x86/include/asm/current.h @@ -3,16 +3,42 @@ #define _ASM_X86_CURRENT_H #include -#include #ifndef __ASSEMBLY__ + +#include +#include + struct task_struct; -DECLARE_PER_CPU(struct task_struct *, current_task); +struct pcpu_hot { + union { + struct { + struct task_struct *current_task; + int preempt_count; + int cpu_number; +#ifdef CONFIG_CALL_DEPTH_TRACKING + u64 call_depth; +#endif + unsigned long top_of_stack; + void *hardirq_stack_ptr; + u16 softirq_pending; +#ifdef CONFIG_X86_64 + bool hardirq_stack_inuse; +#else + void *softirq_stack_ptr; +#endif + }; + u8 pad[64]; + }; +}; +static_assert(sizeof(struct pcpu_hot) == 64); + +DECLARE_PER_CPU_ALIGNED(struct pcpu_hot, pcpu_hot); static __always_inline struct task_struct *get_current(void) { - return this_cpu_read_stable(current_task); + return this_cpu_read_stable(pcpu_hot.current_task); } #define current get_current() diff --git a/arch/x86/include/asm/debugreg.h b/arch/x86/include/asm/debugreg.h index cfdf307ddc0129019f64efebc01bf5a5159578fa..b049d950612fdd7c01b0508adcb9d8014ad9545f 100644 --- a/arch/x86/include/asm/debugreg.h +++ b/arch/x86/include/asm/debugreg.h @@ -2,8 +2,8 @@ #ifndef _ASM_X86_DEBUGREG_H #define _ASM_X86_DEBUGREG_H - #include +#include #include DECLARE_PER_CPU(unsigned long, cpu_dr7); diff --git a/arch/x86/include/asm/disabled-features.h b/arch/x86/include/asm/disabled-features.h index c862552d7d6d92e3b48e06ba4dcbe1e7f0065575..c44b56f7ffba0d0e4bf881c035a3a3668be9929e 100644 --- a/arch/x86/include/asm/disabled-features.h +++ b/arch/x86/include/asm/disabled-features.h @@ -69,6 +69,12 @@ # define DISABLE_UNRET (1 << (X86_FEATURE_UNRET & 31)) #endif +#ifdef CONFIG_CALL_DEPTH_TRACKING +# define DISABLE_CALL_DEPTH_TRACKING 0 +#else +# define DISABLE_CALL_DEPTH_TRACKING (1 << (X86_FEATURE_CALL_DEPTH & 31)) +#endif + #ifdef CONFIG_INTEL_IOMMU_SVM # define DISABLE_ENQCMD 0 #else @@ -107,7 +113,8 @@ #define DISABLED_MASK8 (DISABLE_XENPV|DISABLE_TDX_GUEST) #define DISABLED_MASK9 (DISABLE_SGX) #define DISABLED_MASK10 0 -#define DISABLED_MASK11 (DISABLE_RETPOLINE|DISABLE_RETHUNK|DISABLE_UNRET) +#define DISABLED_MASK11 (DISABLE_RETPOLINE|DISABLE_RETHUNK|DISABLE_UNRET| \ + DISABLE_CALL_DEPTH_TRACKING) #define DISABLED_MASK12 0 #define DISABLED_MASK13 0 #define DISABLED_MASK14 0 diff --git a/arch/x86/include/asm/hardirq.h b/arch/x86/include/asm/hardirq.h index 275e7fd20310f7a237d2d22f790a2c0928c170a9..66837b8c67f1a9f794f9b65008bace6278f1e3d3 100644 --- a/arch/x86/include/asm/hardirq.h +++ b/arch/x86/include/asm/hardirq.h @@ -3,9 +3,9 @@ #define _ASM_X86_HARDIRQ_H #include +#include typedef struct { - u16 __softirq_pending; #if IS_ENABLED(CONFIG_KVM_INTEL) u8 kvm_cpu_l1tf_flush_l1d; #endif @@ -60,6 +60,7 @@ extern u64 arch_irq_stat_cpu(unsigned int cpu); extern u64 arch_irq_stat(void); #define arch_irq_stat arch_irq_stat +#define local_softirq_pending_ref pcpu_hot.softirq_pending #if IS_ENABLED(CONFIG_KVM_INTEL) static inline void kvm_set_cpu_l1tf_flush_l1d(void) diff --git a/arch/x86/include/asm/hyperv-tlfs.h b/arch/x86/include/asm/hyperv-tlfs.h index 6d9368ea3701ca11b441a7324ba6cceed2e66308..08e822bd7aa601314980dfada7688407df539cbb 100644 --- a/arch/x86/include/asm/hyperv-tlfs.h +++ b/arch/x86/include/asm/hyperv-tlfs.h @@ -61,6 +61,8 @@ #define HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE BIT(10) /* Support for debug MSRs available */ #define HV_FEATURE_DEBUG_MSRS_AVAILABLE BIT(11) +/* Support for extended gva ranges for flush hypercalls available */ +#define HV_FEATURE_EXT_GVA_RANGES_FLUSH BIT(14) /* * Support for returning hypercall output block via XMM * registers is available @@ -607,6 +609,41 @@ struct hv_enlightened_vmcs { #define HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL 0xFFFF +/* + * Note, Hyper-V isn't actually stealing bit 28 from Intel, just abusing it by + * pairing it with architecturally impossible exit reasons. Bit 28 is set only + * on SMI exits to a SMI transfer monitor (STM) and if and only if a MTF VM-Exit + * is pending. I.e. it will never be set by hardware for non-SMI exits (there + * are only three), nor will it ever be set unless the VMM is an STM. + */ +#define HV_VMX_SYNTHETIC_EXIT_REASON_TRAP_AFTER_FLUSH 0x10000031 + +/* + * Hyper-V uses the software reserved 32 bytes in VMCB control area to expose + * SVM enlightenments to guests. + */ +struct hv_vmcb_enlightenments { + struct __packed hv_enlightenments_control { + u32 nested_flush_hypercall:1; + u32 msr_bitmap:1; + u32 enlightened_npt_tlb: 1; + u32 reserved:29; + } __packed hv_enlightenments_control; + u32 hv_vp_id; + u64 hv_vm_id; + u64 partition_assist_page; + u64 reserved; +} __packed; + +/* + * Hyper-V uses the software reserved clean bit in VMCB. + */ +#define HV_VMCB_NESTED_ENLIGHTENMENTS 31 + +/* Synthetic VM-Exit */ +#define HV_SVM_EXITCODE_ENL 0xf0000000 +#define HV_SVM_ENL_EXITCODE_TRAP_AFTER_FLUSH (1) + struct hv_partition_assist_pg { u32 tlb_lock_count; }; diff --git a/arch/x86/include/asm/irq_stack.h b/arch/x86/include/asm/irq_stack.h index 147cb8fdda92e9e0a9a2e74bb876968af8ba29dc..798183867d78969b2a7fd7cc3169cf310926fa8e 100644 --- a/arch/x86/include/asm/irq_stack.h +++ b/arch/x86/include/asm/irq_stack.h @@ -116,7 +116,7 @@ ASM_CALL_ARG2 #define call_on_irqstack(func, asm_call, argconstr...) \ - call_on_stack(__this_cpu_read(hardirq_stack_ptr), \ + call_on_stack(__this_cpu_read(pcpu_hot.hardirq_stack_ptr), \ func, asm_call, argconstr) /* Macros to assert type correctness for run_*_on_irqstack macros */ @@ -135,7 +135,7 @@ * User mode entry and interrupt on the irq stack do not \ * switch stacks. If from user mode the task stack is empty. \ */ \ - if (user_mode(regs) || __this_cpu_read(hardirq_stack_inuse)) { \ + if (user_mode(regs) || __this_cpu_read(pcpu_hot.hardirq_stack_inuse)) { \ irq_enter_rcu(); \ func(c_args); \ irq_exit_rcu(); \ @@ -146,9 +146,9 @@ * places. Invoke the stack switch macro with the call \ * sequence which matches the above direct invocation. \ */ \ - __this_cpu_write(hardirq_stack_inuse, true); \ + __this_cpu_write(pcpu_hot.hardirq_stack_inuse, true); \ call_on_irqstack(func, asm_call, constr); \ - __this_cpu_write(hardirq_stack_inuse, false); \ + __this_cpu_write(pcpu_hot.hardirq_stack_inuse, false); \ } \ } @@ -212,9 +212,9 @@ */ #define do_softirq_own_stack() \ { \ - __this_cpu_write(hardirq_stack_inuse, true); \ + __this_cpu_write(pcpu_hot.hardirq_stack_inuse, true); \ call_on_irqstack(__do_softirq, ASM_CALL_ARG0); \ - __this_cpu_write(hardirq_stack_inuse, false); \ + __this_cpu_write(pcpu_hot.hardirq_stack_inuse, false); \ } #endif diff --git a/arch/x86/include/asm/kasan.h b/arch/x86/include/asm/kasan.h index 13e70da38bedaaba39ad1624d78d57a0de947a35..de75306b932efd25d8ed4183f4b475fffe4d3bd1 100644 --- a/arch/x86/include/asm/kasan.h +++ b/arch/x86/include/asm/kasan.h @@ -28,9 +28,12 @@ #ifdef CONFIG_KASAN void __init kasan_early_init(void); void __init kasan_init(void); +void __init kasan_populate_shadow_for_vaddr(void *va, size_t size, int nid); #else static inline void kasan_early_init(void) { } static inline void kasan_init(void) { } +static inline void kasan_populate_shadow_for_vaddr(void *va, size_t size, + int nid) { } #endif #endif diff --git a/arch/x86/include/asm/kvm-x86-ops.h b/arch/x86/include/asm/kvm-x86-ops.h index 82ba4a564e5875abd05f5e1a34e76473eb20a109..abccd51dcfca1b5b6848a1b481797fbaa043870e 100644 --- a/arch/x86/include/asm/kvm-x86-ops.h +++ b/arch/x86/include/asm/kvm-x86-ops.h @@ -110,10 +110,12 @@ KVM_X86_OP_OPTIONAL_RET0(dy_apicv_has_pending_interrupt) KVM_X86_OP_OPTIONAL(set_hv_timer) KVM_X86_OP_OPTIONAL(cancel_hv_timer) KVM_X86_OP(setup_mce) +#ifdef CONFIG_KVM_SMM KVM_X86_OP(smi_allowed) KVM_X86_OP(enter_smm) KVM_X86_OP(leave_smm) KVM_X86_OP(enable_smi_window) +#endif KVM_X86_OP_OPTIONAL(mem_enc_ioctl) KVM_X86_OP_OPTIONAL(mem_enc_register_region) KVM_X86_OP_OPTIONAL(mem_enc_unregister_region) @@ -123,7 +125,7 @@ KVM_X86_OP_OPTIONAL(guest_memory_reclaimed) KVM_X86_OP(get_msr_feature) KVM_X86_OP(can_emulate_instruction) KVM_X86_OP(apic_init_signal_blocked) -KVM_X86_OP_OPTIONAL(enable_direct_tlbflush) +KVM_X86_OP_OPTIONAL(enable_l2_tlb_flush) KVM_X86_OP_OPTIONAL(migrate_timers) KVM_X86_OP(msr_filter_changed) KVM_X86_OP(complete_emulated_msr) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index f05ebaa26f0ff52a43294ea0c9e60175037c123e..f35f1ff4427bb85273abab37fe40a9154bf11cd9 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -25,6 +25,7 @@ #include #include #include +#include #include #include @@ -81,7 +82,9 @@ #define KVM_REQ_NMI KVM_ARCH_REQ(9) #define KVM_REQ_PMU KVM_ARCH_REQ(10) #define KVM_REQ_PMI KVM_ARCH_REQ(11) +#ifdef CONFIG_KVM_SMM #define KVM_REQ_SMI KVM_ARCH_REQ(12) +#endif #define KVM_REQ_MASTERCLOCK_UPDATE KVM_ARCH_REQ(13) #define KVM_REQ_MCLOCK_INPROGRESS \ KVM_ARCH_REQ_FLAGS(14, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) @@ -108,6 +111,8 @@ KVM_ARCH_REQ_FLAGS(30, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) #define KVM_REQ_MMU_FREE_OBSOLETE_ROOTS \ KVM_ARCH_REQ_FLAGS(31, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) +#define KVM_REQ_HV_TLB_FLUSH \ + KVM_ARCH_REQ_FLAGS(32, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) #define CR0_RESERVED_BITS \ (~(unsigned long)(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS \ @@ -204,6 +209,7 @@ typedef enum exit_fastpath_completion fastpath_t; struct x86_emulate_ctxt; struct x86_exception; +union kvm_smram; enum x86_intercept; enum x86_intercept_stage; @@ -253,16 +259,16 @@ enum x86_intercept_stage; #define PFERR_GUEST_PAGE_BIT 33 #define PFERR_IMPLICIT_ACCESS_BIT 48 -#define PFERR_PRESENT_MASK (1U << PFERR_PRESENT_BIT) -#define PFERR_WRITE_MASK (1U << PFERR_WRITE_BIT) -#define PFERR_USER_MASK (1U << PFERR_USER_BIT) -#define PFERR_RSVD_MASK (1U << PFERR_RSVD_BIT) -#define PFERR_FETCH_MASK (1U << PFERR_FETCH_BIT) -#define PFERR_PK_MASK (1U << PFERR_PK_BIT) -#define PFERR_SGX_MASK (1U << PFERR_SGX_BIT) -#define PFERR_GUEST_FINAL_MASK (1ULL << PFERR_GUEST_FINAL_BIT) -#define PFERR_GUEST_PAGE_MASK (1ULL << PFERR_GUEST_PAGE_BIT) -#define PFERR_IMPLICIT_ACCESS (1ULL << PFERR_IMPLICIT_ACCESS_BIT) +#define PFERR_PRESENT_MASK BIT(PFERR_PRESENT_BIT) +#define PFERR_WRITE_MASK BIT(PFERR_WRITE_BIT) +#define PFERR_USER_MASK BIT(PFERR_USER_BIT) +#define PFERR_RSVD_MASK BIT(PFERR_RSVD_BIT) +#define PFERR_FETCH_MASK BIT(PFERR_FETCH_BIT) +#define PFERR_PK_MASK BIT(PFERR_PK_BIT) +#define PFERR_SGX_MASK BIT(PFERR_SGX_BIT) +#define PFERR_GUEST_FINAL_MASK BIT_ULL(PFERR_GUEST_FINAL_BIT) +#define PFERR_GUEST_PAGE_MASK BIT_ULL(PFERR_GUEST_PAGE_BIT) +#define PFERR_IMPLICIT_ACCESS BIT_ULL(PFERR_IMPLICIT_ACCESS_BIT) #define PFERR_NESTED_GUEST_PAGE (PFERR_GUEST_PAGE_MASK | \ PFERR_WRITE_MASK | \ @@ -488,17 +494,19 @@ enum pmc_type { struct kvm_pmc { enum pmc_type type; u8 idx; + bool is_paused; + bool intr; u64 counter; + u64 prev_counter; u64 eventsel; struct perf_event *perf_event; struct kvm_vcpu *vcpu; /* + * only for creating or reusing perf_event, * eventsel value for general purpose counters, * ctrl value for fixed counters. */ u64 current_config; - bool is_paused; - bool intr; }; /* More counters may conflict with other existing Architectural MSRs */ @@ -524,7 +532,16 @@ struct kvm_pmu { struct kvm_pmc gp_counters[KVM_INTEL_PMC_MAX_GENERIC]; struct kvm_pmc fixed_counters[KVM_PMC_MAX_FIXED]; struct irq_work irq_work; - DECLARE_BITMAP(reprogram_pmi, X86_PMC_IDX_MAX); + + /* + * Overlay the bitmap with a 64-bit atomic so that all bits can be + * set in a single access, e.g. to reprogram all counters when the PMU + * filter changes. + */ + union { + DECLARE_BITMAP(reprogram_pmi, X86_PMC_IDX_MAX); + atomic64_t __reprogram_pmi; + }; DECLARE_BITMAP(all_valid_pmc_idx, X86_PMC_IDX_MAX); DECLARE_BITMAP(pmc_in_use, X86_PMC_IDX_MAX); @@ -602,6 +619,29 @@ struct kvm_vcpu_hv_synic { bool dont_zero_synic_pages; }; +/* The maximum number of entries on the TLB flush fifo. */ +#define KVM_HV_TLB_FLUSH_FIFO_SIZE (16) +/* + * Note: the following 'magic' entry is made up by KVM to avoid putting + * anything besides GVA on the TLB flush fifo. It is theoretically possible + * to observe a request to flush 4095 PFNs starting from 0xfffffffffffff000 + * which will look identical. KVM's action to 'flush everything' instead of + * flushing these particular addresses is, however, fully legitimate as + * flushing more than requested is always OK. + */ +#define KVM_HV_TLB_FLUSHALL_ENTRY ((u64)-1) + +enum hv_tlb_flush_fifos { + HV_L1_TLB_FLUSH_FIFO, + HV_L2_TLB_FLUSH_FIFO, + HV_NR_TLB_FLUSH_FIFOS, +}; + +struct kvm_vcpu_hv_tlb_flush_fifo { + spinlock_t write_lock; + DECLARE_KFIFO(entries, u64, KVM_HV_TLB_FLUSH_FIFO_SIZE); +}; + /* Hyper-V per vcpu emulation context */ struct kvm_vcpu_hv { struct kvm_vcpu *vcpu; @@ -623,6 +663,19 @@ struct kvm_vcpu_hv { u32 nested_eax; /* HYPERV_CPUID_NESTED_FEATURES.EAX */ u32 nested_ebx; /* HYPERV_CPUID_NESTED_FEATURES.EBX */ } cpuid_cache; + + struct kvm_vcpu_hv_tlb_flush_fifo tlb_flush_fifo[HV_NR_TLB_FLUSH_FIFOS]; + + /* Preallocated buffer for handling hypercalls passing sparse vCPU set */ + u64 sparse_banks[HV_MAX_SPARSE_VCPU_BANKS]; + + struct hv_vp_assist_page vp_assist_page; + + struct { + u64 pa_page_gpa; + u64 vm_id; + u32 vp_id; + } nested; }; /* Xen HVM per vcpu emulation context */ @@ -633,6 +686,7 @@ struct kvm_vcpu_xen { struct gfn_to_pfn_cache vcpu_info_cache; struct gfn_to_pfn_cache vcpu_time_info_cache; struct gfn_to_pfn_cache runstate_cache; + struct gfn_to_pfn_cache runstate2_cache; u64 last_steal; u64 runstate_entry_time; u64 runstate_times[4]; @@ -1059,6 +1113,7 @@ struct msr_bitmap_range { struct kvm_xen { u32 xen_version; bool long_mode; + bool runstate_update_flag; u8 upcall_vector; struct gfn_to_pfn_cache shinfo_cache; struct idr evtchn_ports; @@ -1156,7 +1211,18 @@ struct kvm_arch { struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES]; struct list_head active_mmu_pages; struct list_head zapped_obsolete_pages; - struct list_head lpage_disallowed_mmu_pages; + /* + * A list of kvm_mmu_page structs that, if zapped, could possibly be + * replaced by an NX huge page. A shadow page is on this list if its + * existence disallows an NX huge page (nx_huge_page_disallowed is set) + * and there are no other conditions that prevent a huge page, e.g. + * the backing host page is huge, dirtly logging is not enabled for its + * memslot, etc... Note, zapping shadow pages on this list doesn't + * guarantee an NX huge page will be created in its stead, e.g. if the + * guest attempts to execute from the region then KVM obviously can't + * create an NX huge page (without hanging the guest). + */ + struct list_head possible_nx_huge_pages; struct kvm_page_track_notifier_node mmu_sp_tracker; struct kvm_page_track_notifier_head track_notifier_head; /* @@ -1272,7 +1338,7 @@ struct kvm_arch { bool sgx_provisioning_allowed; struct kvm_pmu_event_filter __rcu *pmu_event_filter; - struct task_struct *nx_lpage_recovery_thread; + struct task_struct *nx_huge_page_recovery_thread; #ifdef CONFIG_X86_64 /* @@ -1284,6 +1350,9 @@ struct kvm_arch { */ bool tdp_mmu_enabled; + /* The number of TDP MMU pages across all roots. */ + atomic64_t tdp_mmu_pages; + /* * List of kvm_mmu_page structs being used as roots. * All kvm_mmu_page structs in the list should have @@ -1304,21 +1373,13 @@ struct kvm_arch { */ struct list_head tdp_mmu_roots; - /* - * List of kvm_mmu_page structs not being used as roots. - * All kvm_mmu_page structs in the list should have - * tdp_mmu_page set and a tdp_mmu_root_count of 0. - */ - struct list_head tdp_mmu_pages; - /* * Protects accesses to the following fields when the MMU lock * is held in read mode: * - tdp_mmu_roots (above) - * - tdp_mmu_pages (above) * - the link field of kvm_mmu_page structs used by the TDP MMU - * - lpage_disallowed_mmu_pages - * - the lpage_disallowed_link field of kvm_mmu_page structs used + * - possible_nx_huge_pages; + * - the possible_nx_huge_page_link field of kvm_mmu_page structs used * by the TDP MMU * It is acceptable, but not necessary, to acquire this lock when * the thread holds the MMU lock in write mode. @@ -1612,10 +1673,12 @@ struct kvm_x86_ops { void (*setup_mce)(struct kvm_vcpu *vcpu); +#ifdef CONFIG_KVM_SMM int (*smi_allowed)(struct kvm_vcpu *vcpu, bool for_injection); - int (*enter_smm)(struct kvm_vcpu *vcpu, char *smstate); - int (*leave_smm)(struct kvm_vcpu *vcpu, const char *smstate); + int (*enter_smm)(struct kvm_vcpu *vcpu, union kvm_smram *smram); + int (*leave_smm)(struct kvm_vcpu *vcpu, const union kvm_smram *smram); void (*enable_smi_window)(struct kvm_vcpu *vcpu); +#endif int (*mem_enc_ioctl)(struct kvm *kvm, void __user *argp); int (*mem_enc_register_region)(struct kvm *kvm, struct kvm_enc_region *argp); @@ -1630,7 +1693,7 @@ struct kvm_x86_ops { void *insn, int insn_len); bool (*apic_init_signal_blocked)(struct kvm_vcpu *vcpu); - int (*enable_direct_tlbflush)(struct kvm_vcpu *vcpu); + int (*enable_l2_tlb_flush)(struct kvm_vcpu *vcpu); void (*migrate_timers)(struct kvm_vcpu *vcpu); void (*msr_filter_changed)(struct kvm_vcpu *vcpu); @@ -1663,6 +1726,7 @@ struct kvm_x86_nested_ops { int (*enable_evmcs)(struct kvm_vcpu *vcpu, uint16_t *vmcs_version); uint16_t (*get_evmcs_version)(struct kvm_vcpu *vcpu); + void (*hv_inject_synthetic_vmexit_post_tlb_flush)(struct kvm_vcpu *vcpu); }; struct kvm_x86_init_ops { @@ -1844,6 +1908,7 @@ int kvm_emulate_ap_reset_hold(struct kvm_vcpu *vcpu); int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu); void kvm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg); +void kvm_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg); int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, int seg); void kvm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector); @@ -1909,8 +1974,6 @@ void kvm_mmu_free_roots(struct kvm *kvm, struct kvm_mmu *mmu, void kvm_mmu_free_guest_mode_roots(struct kvm *kvm, struct kvm_mmu *mmu); gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, struct x86_exception *exception); -gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva, - struct x86_exception *exception); gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva, struct x86_exception *exception); gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva, @@ -1994,14 +2057,18 @@ enum { #define HF_NMI_MASK (1 << 3) #define HF_IRET_MASK (1 << 4) #define HF_GUEST_MASK (1 << 5) /* VCPU is in guest-mode */ + +#ifdef CONFIG_KVM_SMM #define HF_SMM_MASK (1 << 6) #define HF_SMM_INSIDE_NMI_MASK (1 << 7) -#define __KVM_VCPU_MULTIPLE_ADDRESS_SPACE -#define KVM_ADDRESS_SPACE_NUM 2 - -#define kvm_arch_vcpu_memslots_id(vcpu) ((vcpu)->arch.hflags & HF_SMM_MASK ? 1 : 0) -#define kvm_memslots_for_spte_role(kvm, role) __kvm_memslots(kvm, (role).smm) +# define __KVM_VCPU_MULTIPLE_ADDRESS_SPACE +# define KVM_ADDRESS_SPACE_NUM 2 +# define kvm_arch_vcpu_memslots_id(vcpu) ((vcpu)->arch.hflags & HF_SMM_MASK ? 1 : 0) +# define kvm_memslots_for_spte_role(kvm, role) __kvm_memslots(kvm, (role).smm) +#else +# define kvm_memslots_for_spte_role(kvm, role) __kvm_memslots(kvm, 0) +#endif #define KVM_ARCH_WANT_MMU_NOTIFIER @@ -2089,14 +2156,6 @@ static inline int kvm_cpu_get_apicid(int mps_cpu) #endif } -#define put_smstate(type, buf, offset, val) \ - *(type *)((buf) + (offset) - 0x7e00) = val - -#define GET_SMSTATE(type, buf, offset) \ - (*(type *)((buf) + (offset) - 0x7e00)) - -int kvm_cpu_dirty_log_size(void); - int memslot_rmap_alloc(struct kvm_memory_slot *slot, unsigned long npages); #define KVM_CLOCK_VALID_FLAGS \ diff --git a/arch/x86/include/asm/linkage.h b/arch/x86/include/asm/linkage.h index f484d656d34ee0a8ae607d85c3f586ca608c5d90..dd9b8118f7848e279cf1fec9247f814812e8ce5f 100644 --- a/arch/x86/include/asm/linkage.h +++ b/arch/x86/include/asm/linkage.h @@ -12,13 +12,26 @@ #define asmlinkage CPP_ASMLINKAGE __attribute__((regparm(0))) #endif /* CONFIG_X86_32 */ -#ifdef __ASSEMBLY__ - -#if defined(CONFIG_X86_64) || defined(CONFIG_X86_ALIGNMENT_16) -#define __ALIGN .p2align 4, 0x90 +#define __ALIGN .balign CONFIG_FUNCTION_ALIGNMENT, 0x90; #define __ALIGN_STR __stringify(__ALIGN) + +#if defined(CONFIG_CALL_PADDING) && !defined(__DISABLE_EXPORTS) && !defined(BUILD_VDSO) +#define FUNCTION_PADDING .skip CONFIG_FUNCTION_ALIGNMENT, 0x90; +#else +#define FUNCTION_PADDING +#endif + +#if (CONFIG_FUNCTION_ALIGNMENT > 8) && !defined(__DISABLE_EXPORTS) && !defined(BULID_VDSO) +# define __FUNC_ALIGN __ALIGN; FUNCTION_PADDING +#else +# define __FUNC_ALIGN __ALIGN #endif +#define ASM_FUNC_ALIGN __stringify(__FUNC_ALIGN) +#define SYM_F_ALIGN __FUNC_ALIGN + +#ifdef __ASSEMBLY__ + #if defined(CONFIG_RETHUNK) && !defined(__DISABLE_EXPORTS) && !defined(BUILD_VDSO) #define RET jmp __x86_return_thunk #else /* CONFIG_RETPOLINE */ @@ -43,11 +56,45 @@ #endif /* __ASSEMBLY__ */ +/* + * Depending on -fpatchable-function-entry=N,N usage (CONFIG_CALL_PADDING) the + * CFI symbol layout changes. + * + * Without CALL_THUNKS: + * + * .align FUNCTION_ALIGNMENT + * __cfi_##name: + * .skip FUNCTION_PADDING, 0x90 + * .byte 0xb8 + * .long __kcfi_typeid_##name + * name: + * + * With CALL_THUNKS: + * + * .align FUNCTION_ALIGNMENT + * __cfi_##name: + * .byte 0xb8 + * .long __kcfi_typeid_##name + * .skip FUNCTION_PADDING, 0x90 + * name: + * + * In both cases the whole thing is FUNCTION_ALIGNMENT aligned and sized. + */ + +#ifdef CONFIG_CALL_PADDING +#define CFI_PRE_PADDING +#define CFI_POST_PADDING .skip CONFIG_FUNCTION_PADDING_BYTES, 0x90; +#else +#define CFI_PRE_PADDING .skip CONFIG_FUNCTION_PADDING_BYTES, 0x90; +#define CFI_POST_PADDING +#endif + #define __CFI_TYPE(name) \ SYM_START(__cfi_##name, SYM_L_LOCAL, SYM_A_NONE) \ - .fill 11, 1, 0x90 ASM_NL \ + CFI_PRE_PADDING \ .byte 0xb8 ASM_NL \ .long __kcfi_typeid_##name ASM_NL \ + CFI_POST_PADDING \ SYM_FUNC_END(__cfi_##name) /* SYM_TYPED_FUNC_START -- use for indirectly called globals, w/ CFI type */ @@ -57,7 +104,7 @@ /* SYM_FUNC_START -- use for global functions */ #define SYM_FUNC_START(name) \ - SYM_START(name, SYM_L_GLOBAL, SYM_A_ALIGN) \ + SYM_START(name, SYM_L_GLOBAL, SYM_F_ALIGN) \ ENDBR /* SYM_FUNC_START_NOALIGN -- use for global functions, w/o alignment */ @@ -67,7 +114,7 @@ /* SYM_FUNC_START_LOCAL -- use for local functions */ #define SYM_FUNC_START_LOCAL(name) \ - SYM_START(name, SYM_L_LOCAL, SYM_A_ALIGN) \ + SYM_START(name, SYM_L_LOCAL, SYM_F_ALIGN) \ ENDBR /* SYM_FUNC_START_LOCAL_NOALIGN -- use for local functions, w/o alignment */ @@ -77,7 +124,7 @@ /* SYM_FUNC_START_WEAK -- use for weak functions */ #define SYM_FUNC_START_WEAK(name) \ - SYM_START(name, SYM_L_WEAK, SYM_A_ALIGN) \ + SYM_START(name, SYM_L_WEAK, SYM_F_ALIGN) \ ENDBR /* SYM_FUNC_START_WEAK_NOALIGN -- use for weak functions, w/o alignment */ diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h index dfdb103ae4f6ff993f6b1fea9a6bd4fe942ec12a..771b0a2b7a34721185fb9eef37c8f0aeefcf3f45 100644 --- a/arch/x86/include/asm/nospec-branch.h +++ b/arch/x86/include/asm/nospec-branch.h @@ -12,8 +12,104 @@ #include #include #include +#include -#define RETPOLINE_THUNK_SIZE 32 +/* + * Call depth tracking for Intel SKL CPUs to address the RSB underflow + * issue in software. + * + * The tracking does not use a counter. It uses uses arithmetic shift + * right on call entry and logical shift left on return. + * + * The depth tracking variable is initialized to 0x8000.... when the call + * depth is zero. The arithmetic shift right sign extends the MSB and + * saturates after the 12th call. The shift count is 5 for both directions + * so the tracking covers 12 nested calls. + * + * Call + * 0: 0x8000000000000000 0x0000000000000000 + * 1: 0xfc00000000000000 0xf000000000000000 + * ... + * 11: 0xfffffffffffffff8 0xfffffffffffffc00 + * 12: 0xffffffffffffffff 0xffffffffffffffe0 + * + * After a return buffer fill the depth is credited 12 calls before the + * next stuffing has to take place. + * + * There is a inaccuracy for situations like this: + * + * 10 calls + * 5 returns + * 3 calls + * 4 returns + * 3 calls + * .... + * + * The shift count might cause this to be off by one in either direction, + * but there is still a cushion vs. the RSB depth. The algorithm does not + * claim to be perfect and it can be speculated around by the CPU, but it + * is considered that it obfuscates the problem enough to make exploitation + * extremly difficult. + */ +#define RET_DEPTH_SHIFT 5 +#define RSB_RET_STUFF_LOOPS 16 +#define RET_DEPTH_INIT 0x8000000000000000ULL +#define RET_DEPTH_INIT_FROM_CALL 0xfc00000000000000ULL +#define RET_DEPTH_CREDIT 0xffffffffffffffffULL + +#ifdef CONFIG_CALL_THUNKS_DEBUG +# define CALL_THUNKS_DEBUG_INC_CALLS \ + incq %gs:__x86_call_count; +# define CALL_THUNKS_DEBUG_INC_RETS \ + incq %gs:__x86_ret_count; +# define CALL_THUNKS_DEBUG_INC_STUFFS \ + incq %gs:__x86_stuffs_count; +# define CALL_THUNKS_DEBUG_INC_CTXSW \ + incq %gs:__x86_ctxsw_count; +#else +# define CALL_THUNKS_DEBUG_INC_CALLS +# define CALL_THUNKS_DEBUG_INC_RETS +# define CALL_THUNKS_DEBUG_INC_STUFFS +# define CALL_THUNKS_DEBUG_INC_CTXSW +#endif + +#if defined(CONFIG_CALL_DEPTH_TRACKING) && !defined(COMPILE_OFFSETS) + +#include + +#define CREDIT_CALL_DEPTH \ + movq $-1, PER_CPU_VAR(pcpu_hot + X86_call_depth); + +#define ASM_CREDIT_CALL_DEPTH \ + movq $-1, PER_CPU_VAR(pcpu_hot + X86_call_depth); + +#define RESET_CALL_DEPTH \ + mov $0x80, %rax; \ + shl $56, %rax; \ + movq %rax, PER_CPU_VAR(pcpu_hot + X86_call_depth); + +#define RESET_CALL_DEPTH_FROM_CALL \ + mov $0xfc, %rax; \ + shl $56, %rax; \ + movq %rax, PER_CPU_VAR(pcpu_hot + X86_call_depth); \ + CALL_THUNKS_DEBUG_INC_CALLS + +#define INCREMENT_CALL_DEPTH \ + sarq $5, %gs:pcpu_hot + X86_call_depth; \ + CALL_THUNKS_DEBUG_INC_CALLS + +#define ASM_INCREMENT_CALL_DEPTH \ + sarq $5, PER_CPU_VAR(pcpu_hot + X86_call_depth); \ + CALL_THUNKS_DEBUG_INC_CALLS + +#else +#define CREDIT_CALL_DEPTH +#define ASM_CREDIT_CALL_DEPTH +#define RESET_CALL_DEPTH +#define INCREMENT_CALL_DEPTH +#define ASM_INCREMENT_CALL_DEPTH +#define RESET_CALL_DEPTH_FROM_CALL +#endif /* * Fill the CPU return stack buffer. @@ -32,6 +128,7 @@ * from C via asm(".include ") but let's not go there. */ +#define RETPOLINE_THUNK_SIZE 32 #define RSB_CLEAR_LOOPS 32 /* To forcibly overwrite all entries */ /* @@ -60,7 +157,9 @@ dec reg; \ jnz 771b; \ /* barrier for jnz misprediction */ \ - lfence; + lfence; \ + ASM_CREDIT_CALL_DEPTH \ + CALL_THUNKS_DEBUG_INC_CTXSW #else /* * i386 doesn't unconditionally have LFENCE, as such it can't @@ -185,11 +284,32 @@ * where we have a stack but before any RET instruction. */ .macro UNTRAIN_RET -#if defined(CONFIG_CPU_UNRET_ENTRY) || defined(CONFIG_CPU_IBPB_ENTRY) +#if defined(CONFIG_CPU_UNRET_ENTRY) || defined(CONFIG_CPU_IBPB_ENTRY) || \ + defined(CONFIG_CALL_DEPTH_TRACKING) ANNOTATE_UNRET_END - ALTERNATIVE_2 "", \ - CALL_ZEN_UNTRAIN_RET, X86_FEATURE_UNRET, \ - "call entry_ibpb", X86_FEATURE_ENTRY_IBPB + ALTERNATIVE_3 "", \ + CALL_ZEN_UNTRAIN_RET, X86_FEATURE_UNRET, \ + "call entry_ibpb", X86_FEATURE_ENTRY_IBPB, \ + __stringify(RESET_CALL_DEPTH), X86_FEATURE_CALL_DEPTH +#endif +.endm + +.macro UNTRAIN_RET_FROM_CALL +#if defined(CONFIG_CPU_UNRET_ENTRY) || defined(CONFIG_CPU_IBPB_ENTRY) || \ + defined(CONFIG_CALL_DEPTH_TRACKING) + ANNOTATE_UNRET_END + ALTERNATIVE_3 "", \ + CALL_ZEN_UNTRAIN_RET, X86_FEATURE_UNRET, \ + "call entry_ibpb", X86_FEATURE_ENTRY_IBPB, \ + __stringify(RESET_CALL_DEPTH_FROM_CALL), X86_FEATURE_CALL_DEPTH +#endif +.endm + + +.macro CALL_DEPTH_ACCOUNT +#ifdef CONFIG_CALL_DEPTH_TRACKING + ALTERNATIVE "", \ + __stringify(ASM_INCREMENT_CALL_DEPTH), X86_FEATURE_CALL_DEPTH #endif .endm @@ -203,11 +323,45 @@ typedef u8 retpoline_thunk_t[RETPOLINE_THUNK_SIZE]; extern retpoline_thunk_t __x86_indirect_thunk_array[]; +extern retpoline_thunk_t __x86_indirect_call_thunk_array[]; +extern retpoline_thunk_t __x86_indirect_jump_thunk_array[]; extern void __x86_return_thunk(void); extern void zen_untrain_ret(void); extern void entry_ibpb(void); +#ifdef CONFIG_CALL_THUNKS +extern void (*x86_return_thunk)(void); +#else +#define x86_return_thunk (&__x86_return_thunk) +#endif + +#ifdef CONFIG_CALL_DEPTH_TRACKING +extern void __x86_return_skl(void); + +static inline void x86_set_skl_return_thunk(void) +{ + x86_return_thunk = &__x86_return_skl; +} + +#define CALL_DEPTH_ACCOUNT \ + ALTERNATIVE("", \ + __stringify(INCREMENT_CALL_DEPTH), \ + X86_FEATURE_CALL_DEPTH) + +#ifdef CONFIG_CALL_THUNKS_DEBUG +DECLARE_PER_CPU(u64, __x86_call_count); +DECLARE_PER_CPU(u64, __x86_ret_count); +DECLARE_PER_CPU(u64, __x86_stuffs_count); +DECLARE_PER_CPU(u64, __x86_ctxsw_count); +#endif +#else +static inline void x86_set_skl_return_thunk(void) {} + +#define CALL_DEPTH_ACCOUNT "" + +#endif + #ifdef CONFIG_RETPOLINE #define GEN(reg) \ @@ -215,6 +369,16 @@ extern void entry_ibpb(void); #include #undef GEN +#define GEN(reg) \ + extern retpoline_thunk_t __x86_indirect_call_thunk_ ## reg; +#include +#undef GEN + +#define GEN(reg) \ + extern retpoline_thunk_t __x86_indirect_jump_thunk_ ## reg; +#include +#undef GEN + #ifdef CONFIG_X86_64 /* diff --git a/arch/x86/include/asm/page_types.h b/arch/x86/include/asm/page_types.h index a506a411474d4ad0213d1cdfb64fe6598ab1f9ca..86bd4311daf8a5046e5fa4cb71979535667a2e3d 100644 --- a/arch/x86/include/asm/page_types.h +++ b/arch/x86/include/asm/page_types.h @@ -11,20 +11,14 @@ #define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT) #define PAGE_MASK (~(PAGE_SIZE-1)) -#define PMD_PAGE_SIZE (_AC(1, UL) << PMD_SHIFT) -#define PMD_PAGE_MASK (~(PMD_PAGE_SIZE-1)) - -#define PUD_PAGE_SIZE (_AC(1, UL) << PUD_SHIFT) -#define PUD_PAGE_MASK (~(PUD_PAGE_SIZE-1)) - #define __VIRTUAL_MASK ((1UL << __VIRTUAL_MASK_SHIFT) - 1) -/* Cast *PAGE_MASK to a signed type so that it is sign-extended if +/* Cast P*D_MASK to a signed type so that it is sign-extended if virtual addresses are 32-bits but physical addresses are larger (ie, 32-bit PAE). */ #define PHYSICAL_PAGE_MASK (((signed long)PAGE_MASK) & __PHYSICAL_MASK) -#define PHYSICAL_PMD_PAGE_MASK (((signed long)PMD_PAGE_MASK) & __PHYSICAL_MASK) -#define PHYSICAL_PUD_PAGE_MASK (((signed long)PUD_PAGE_MASK) & __PHYSICAL_MASK) +#define PHYSICAL_PMD_PAGE_MASK (((signed long)PMD_MASK) & __PHYSICAL_MASK) +#define PHYSICAL_PUD_PAGE_MASK (((signed long)PUD_MASK) & __PHYSICAL_MASK) #define HPAGE_SHIFT PMD_SHIFT #define HPAGE_SIZE (_AC(1,UL) << HPAGE_SHIFT) diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h index 2a0b8dd4ec33553c30584f520bed2f818f85ce62..73e9522db7c155eeb627f8c1907e39c805aa1786 100644 --- a/arch/x86/include/asm/paravirt.h +++ b/arch/x86/include/asm/paravirt.h @@ -4,13 +4,13 @@ /* Various instructions on x86 need to be replaced for * para-virtualization: those hooks are defined here. */ +#include + #ifdef CONFIG_PARAVIRT #include #include #include -#include - #ifndef __ASSEMBLY__ #include #include @@ -665,6 +665,7 @@ bool __raw_callee_save___native_vcpu_is_preempted(long cpu); asm(".pushsection " section ", \"ax\";" \ ".globl " PV_THUNK_NAME(func) ";" \ ".type " PV_THUNK_NAME(func) ", @function;" \ + ASM_FUNC_ALIGN \ PV_THUNK_NAME(func) ":" \ ASM_ENDBR \ FRAME_BEGIN \ @@ -730,6 +731,18 @@ static __always_inline unsigned long arch_local_irq_save(void) #undef PVOP_VCALL4 #undef PVOP_CALL4 +#define DEFINE_PARAVIRT_ASM(func, instr, sec) \ + asm (".pushsection " #sec ", \"ax\"\n" \ + ".global " #func "\n\t" \ + ".type " #func ", @function\n\t" \ + ASM_FUNC_ALIGN "\n" \ + #func ":\n\t" \ + ASM_ENDBR \ + instr "\n\t" \ + ASM_RET \ + ".size " #func ", . - " #func "\n\t" \ + ".popsection") + extern void default_banner(void); #else /* __ASSEMBLY__ */ diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h index f72bf0f6ccf71fe628f39a1aeef2f5041a61fa33..8c1da419260f96040e5e63a64c576126c35eee4b 100644 --- a/arch/x86/include/asm/paravirt_types.h +++ b/arch/x86/include/asm/paravirt_types.h @@ -2,6 +2,24 @@ #ifndef _ASM_X86_PARAVIRT_TYPES_H #define _ASM_X86_PARAVIRT_TYPES_H +#ifndef __ASSEMBLY__ +/* These all sit in the .parainstructions section to tell us what to patch. */ +struct paravirt_patch_site { + u8 *instr; /* original instructions */ + u8 type; /* type of this instruction */ + u8 len; /* length of original instruction */ +}; + +/* Lazy mode for batching updates / context switch */ +enum paravirt_lazy_mode { + PARAVIRT_LAZY_NONE, + PARAVIRT_LAZY_MMU, + PARAVIRT_LAZY_CPU, +}; +#endif + +#ifdef CONFIG_PARAVIRT + #ifndef __ASSEMBLY__ #include @@ -534,13 +552,6 @@ int paravirt_disable_iospace(void); __PVOP_VCALL(op, PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \ PVOP_CALL_ARG3(arg3), PVOP_CALL_ARG4(arg4)) -/* Lazy mode for batching updates / context switch */ -enum paravirt_lazy_mode { - PARAVIRT_LAZY_NONE, - PARAVIRT_LAZY_MMU, - PARAVIRT_LAZY_CPU, -}; - enum paravirt_lazy_mode paravirt_get_lazy_mode(void); void paravirt_start_context_switch(struct task_struct *prev); void paravirt_end_context_switch(struct task_struct *next); @@ -556,16 +567,9 @@ unsigned long paravirt_ret0(void); #define paravirt_nop ((void *)_paravirt_nop) -/* These all sit in the .parainstructions section to tell us what to patch. */ -struct paravirt_patch_site { - u8 *instr; /* original instructions */ - u8 type; /* type of this instruction */ - u8 len; /* length of original instruction */ -}; - extern struct paravirt_patch_site __parainstructions[], __parainstructions_end[]; #endif /* __ASSEMBLY__ */ - +#endif /* CONFIG_PARAVIRT */ #endif /* _ASM_X86_PARAVIRT_TYPES_H */ diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h index 9ac46dbe57d48fc3d6eb53471ef725899917f1d7..5d0f6891ae611aa069710b85585bee9467720114 100644 --- a/arch/x86/include/asm/perf_event.h +++ b/arch/x86/include/asm/perf_event.h @@ -543,12 +543,12 @@ static inline void perf_check_microcode(void) { } #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_INTEL) extern struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr, void *data); -extern int x86_perf_get_lbr(struct x86_pmu_lbr *lbr); +extern void x86_perf_get_lbr(struct x86_pmu_lbr *lbr); #else struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr, void *data); -static inline int x86_perf_get_lbr(struct x86_pmu_lbr *lbr) +static inline void x86_perf_get_lbr(struct x86_pmu_lbr *lbr) { - return -1; + memset(lbr, 0, sizeof(*lbr)); } #endif diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h index 28421a88720939e20ef4354684012e3a5f021bd8..967b135fa2c01eca1807a1bcb10fc6353fd74ed9 100644 --- a/arch/x86/include/asm/pgtable-3level.h +++ b/arch/x86/include/asm/pgtable-3level.h @@ -2,8 +2,6 @@ #ifndef _ASM_X86_PGTABLE_3LEVEL_H #define _ASM_X86_PGTABLE_3LEVEL_H -#include - /* * Intel Physical Address Extension (PAE) Mode - three-level page * tables on PPro+ CPUs. @@ -21,7 +19,15 @@ pr_err("%s:%d: bad pgd %p(%016Lx)\n", \ __FILE__, __LINE__, &(e), pgd_val(e)) -/* Rules for using set_pte: the pte being assigned *must* be +#define pxx_xchg64(_pxx, _ptr, _val) ({ \ + _pxx##val_t *_p = (_pxx##val_t *)_ptr; \ + _pxx##val_t _o = *_p; \ + do { } while (!try_cmpxchg64(_p, &_o, (_val))); \ + native_make_##_pxx(_o); \ +}) + +/* + * Rules for using set_pte: the pte being assigned *must* be * either not present or in a state where the hardware will * not attempt to update the pte. In places where this is * not possible, use pte_get_and_clear to obtain the old pte @@ -29,75 +35,19 @@ */ static inline void native_set_pte(pte_t *ptep, pte_t pte) { - ptep->pte_high = pte.pte_high; + WRITE_ONCE(ptep->pte_high, pte.pte_high); smp_wmb(); - ptep->pte_low = pte.pte_low; -} - -#define pmd_read_atomic pmd_read_atomic -/* - * pte_offset_map_lock() on 32-bit PAE kernels was reading the pmd_t with - * a "*pmdp" dereference done by GCC. Problem is, in certain places - * where pte_offset_map_lock() is called, concurrent page faults are - * allowed, if the mmap_lock is hold for reading. An example is mincore - * vs page faults vs MADV_DONTNEED. On the page fault side - * pmd_populate() rightfully does a set_64bit(), but if we're reading the - * pmd_t with a "*pmdp" on the mincore side, a SMP race can happen - * because GCC will not read the 64-bit value of the pmd atomically. - * - * To fix this all places running pte_offset_map_lock() while holding the - * mmap_lock in read mode, shall read the pmdp pointer using this - * function to know if the pmd is null or not, and in turn to know if - * they can run pte_offset_map_lock() or pmd_trans_huge() or other pmd - * operations. - * - * Without THP if the mmap_lock is held for reading, the pmd can only - * transition from null to not null while pmd_read_atomic() runs. So - * we can always return atomic pmd values with this function. - * - * With THP if the mmap_lock is held for reading, the pmd can become - * trans_huge or none or point to a pte (and in turn become "stable") - * at any time under pmd_read_atomic(). We could read it truly - * atomically here with an atomic64_read() for the THP enabled case (and - * it would be a whole lot simpler), but to avoid using cmpxchg8b we - * only return an atomic pmdval if the low part of the pmdval is later - * found to be stable (i.e. pointing to a pte). We are also returning a - * 'none' (zero) pmdval if the low part of the pmd is zero. - * - * In some cases the high and low part of the pmdval returned may not be - * consistent if THP is enabled (the low part may point to previously - * mapped hugepage, while the high part may point to a more recently - * mapped hugepage), but pmd_none_or_trans_huge_or_clear_bad() only - * needs the low part of the pmd to be read atomically to decide if the - * pmd is unstable or not, with the only exception when the low part - * of the pmd is zero, in which case we return a 'none' pmd. - */ -static inline pmd_t pmd_read_atomic(pmd_t *pmdp) -{ - pmdval_t ret; - u32 *tmp = (u32 *)pmdp; - - ret = (pmdval_t) (*tmp); - if (ret) { - /* - * If the low part is null, we must not read the high part - * or we can end up with a partial pmd. - */ - smp_rmb(); - ret |= ((pmdval_t)*(tmp + 1)) << 32; - } - - return (pmd_t) { ret }; + WRITE_ONCE(ptep->pte_low, pte.pte_low); } static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte) { - set_64bit((unsigned long long *)(ptep), native_pte_val(pte)); + pxx_xchg64(pte, ptep, native_pte_val(pte)); } static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd) { - set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd)); + pxx_xchg64(pmd, pmdp, native_pmd_val(pmd)); } static inline void native_set_pud(pud_t *pudp, pud_t pud) @@ -105,7 +55,7 @@ static inline void native_set_pud(pud_t *pudp, pud_t pud) #ifdef CONFIG_PAGE_TABLE_ISOLATION pud.p4d.pgd = pti_set_user_pgtbl(&pudp->p4d.pgd, pud.p4d.pgd); #endif - set_64bit((unsigned long long *)(pudp), native_pud_val(pud)); + pxx_xchg64(pud, pudp, native_pud_val(pud)); } /* @@ -116,17 +66,16 @@ static inline void native_set_pud(pud_t *pudp, pud_t pud) static inline void native_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { - ptep->pte_low = 0; + WRITE_ONCE(ptep->pte_low, 0); smp_wmb(); - ptep->pte_high = 0; + WRITE_ONCE(ptep->pte_high, 0); } -static inline void native_pmd_clear(pmd_t *pmd) +static inline void native_pmd_clear(pmd_t *pmdp) { - u32 *tmp = (u32 *)pmd; - *tmp = 0; + WRITE_ONCE(pmdp->pmd_low, 0); smp_wmb(); - *(tmp + 1) = 0; + WRITE_ONCE(pmdp->pmd_high, 0); } static inline void native_pud_clear(pud_t *pudp) @@ -149,41 +98,26 @@ static inline void pud_clear(pud_t *pudp) */ } + #ifdef CONFIG_SMP static inline pte_t native_ptep_get_and_clear(pte_t *ptep) { - pte_t res; - - res.pte = (pteval_t)arch_atomic64_xchg((atomic64_t *)ptep, 0); - - return res; + return pxx_xchg64(pte, ptep, 0ULL); } -#else -#define native_ptep_get_and_clear(xp) native_local_ptep_get_and_clear(xp) -#endif -union split_pmd { - struct { - u32 pmd_low; - u32 pmd_high; - }; - pmd_t pmd; -}; - -#ifdef CONFIG_SMP static inline pmd_t native_pmdp_get_and_clear(pmd_t *pmdp) { - union split_pmd res, *orig = (union split_pmd *)pmdp; - - /* xchg acts as a barrier before setting of the high bits */ - res.pmd_low = xchg(&orig->pmd_low, 0); - res.pmd_high = orig->pmd_high; - orig->pmd_high = 0; + return pxx_xchg64(pmd, pmdp, 0ULL); +} - return res.pmd; +static inline pud_t native_pudp_get_and_clear(pud_t *pudp) +{ + return pxx_xchg64(pud, pudp, 0ULL); } #else +#define native_ptep_get_and_clear(xp) native_local_ptep_get_and_clear(xp) #define native_pmdp_get_and_clear(xp) native_local_pmdp_get_and_clear(xp) +#define native_pudp_get_and_clear(xp) native_local_pudp_get_and_clear(xp) #endif #ifndef pmdp_establish @@ -199,53 +133,16 @@ static inline pmd_t pmdp_establish(struct vm_area_struct *vma, * anybody. */ if (!(pmd_val(pmd) & _PAGE_PRESENT)) { - union split_pmd old, new, *ptr; - - ptr = (union split_pmd *)pmdp; - - new.pmd = pmd; - /* xchg acts as a barrier before setting of the high bits */ - old.pmd_low = xchg(&ptr->pmd_low, new.pmd_low); - old.pmd_high = ptr->pmd_high; - ptr->pmd_high = new.pmd_high; - return old.pmd; - } - - do { - old = *pmdp; - } while (cmpxchg64(&pmdp->pmd, old.pmd, pmd.pmd) != old.pmd); - - return old; -} -#endif - -#ifdef CONFIG_SMP -union split_pud { - struct { - u32 pud_low; - u32 pud_high; - }; - pud_t pud; -}; - -static inline pud_t native_pudp_get_and_clear(pud_t *pudp) -{ - union split_pud res, *orig = (union split_pud *)pudp; + old.pmd_low = xchg(&pmdp->pmd_low, pmd.pmd_low); + old.pmd_high = READ_ONCE(pmdp->pmd_high); + WRITE_ONCE(pmdp->pmd_high, pmd.pmd_high); -#ifdef CONFIG_PAGE_TABLE_ISOLATION - pti_set_user_pgtbl(&pudp->p4d.pgd, __pgd(0)); -#endif - - /* xchg acts as a barrier before setting of the high bits */ - res.pud_low = xchg(&orig->pud_low, 0); - res.pud_high = orig->pud_high; - orig->pud_high = 0; + return old; + } - return res.pud; + return pxx_xchg64(pmd, pmdp, pmd.pmd); } -#else -#define native_pudp_get_and_clear(xp) native_local_pudp_get_and_clear(xp) #endif /* Encode and de-code a swap entry */ diff --git a/arch/x86/include/asm/pgtable-3level_types.h b/arch/x86/include/asm/pgtable-3level_types.h index 56baf43befb4924924ff986b6dd3da2cbf53cd31..80911349519e8ef3cdd45db642ab6394458719d7 100644 --- a/arch/x86/include/asm/pgtable-3level_types.h +++ b/arch/x86/include/asm/pgtable-3level_types.h @@ -18,6 +18,13 @@ typedef union { }; pteval_t pte; } pte_t; + +typedef union { + struct { + unsigned long pmd_low, pmd_high; + }; + pmdval_t pmd; +} pmd_t; #endif /* !__ASSEMBLY__ */ #define SHARED_KERNEL_PMD (!static_cpu_has(X86_FEATURE_PTI)) diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index 286a71810f9e95a7048069a3fd9cc724d0c9c756..0564edd24ffb5cd58ad293f2e5f9dc6c0683d704 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h @@ -292,7 +292,23 @@ static inline pte_t pte_clear_flags(pte_t pte, pteval_t clear) #ifdef CONFIG_HAVE_ARCH_USERFAULTFD_WP static inline int pte_uffd_wp(pte_t pte) { - return pte_flags(pte) & _PAGE_UFFD_WP; + bool wp = pte_flags(pte) & _PAGE_UFFD_WP; + +#ifdef CONFIG_DEBUG_VM + /* + * Having write bit for wr-protect-marked present ptes is fatal, + * because it means the uffd-wp bit will be ignored and write will + * just go through. + * + * Use any chance of pgtable walking to verify this (e.g., when + * page swapped out or being migrated for all purposes). It means + * something is already wrong. Tell the admin even before the + * process crashes. We also nail it with wrong pgtable setup. + */ + WARN_ON_ONCE(wp && pte_write(pte)); +#endif + + return wp; } static inline pte_t pte_mkuffd_wp(pte_t pte) diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h index 7c9c968a42efedcbbf83b4d9050743710c96d82a..7d4ad8907297c37600c742cca0a2013f93268858 100644 --- a/arch/x86/include/asm/pgtable_32.h +++ b/arch/x86/include/asm/pgtable_32.h @@ -47,15 +47,6 @@ do { \ #endif /* !__ASSEMBLY__ */ -/* - * kern_addr_valid() is (1) for FLATMEM and (0) for SPARSEMEM - */ -#ifdef CONFIG_FLATMEM -#define kern_addr_valid(addr) (1) -#else -#define kern_addr_valid(kaddr) (0) -#endif - /* * This is used to calculate the .brk reservation for initial pagetables. * Enough space is reserved to allocate pagetables sufficient to cover all diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h index e479491da8d5164497f6684a87d25246202f0f1e..7929327abe009112b59ed62436d28af4929e1424 100644 --- a/arch/x86/include/asm/pgtable_64.h +++ b/arch/x86/include/asm/pgtable_64.h @@ -240,7 +240,6 @@ static inline void native_pgd_clear(pgd_t *pgd) #define __swp_entry_to_pte(x) ((pte_t) { .pte = (x).val }) #define __swp_entry_to_pmd(x) ((pmd_t) { .pmd = (x).val }) -extern int kern_addr_valid(unsigned long addr); extern void cleanup_highmap(void); #define HAVE_ARCH_UNMAPPED_AREA diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h index 04f36063ad5468a9917247329046d34464b254d1..38bf837e3554449f21b2384d2b233483573e1565 100644 --- a/arch/x86/include/asm/pgtable_64_types.h +++ b/arch/x86/include/asm/pgtable_64_types.h @@ -19,6 +19,7 @@ typedef unsigned long pgdval_t; typedef unsigned long pgprotval_t; typedef struct { pteval_t pte; } pte_t; +typedef struct { pmdval_t pmd; } pmd_t; #ifdef CONFIG_X86_5LEVEL extern unsigned int __pgtable_l5_enabled; diff --git a/arch/x86/include/asm/pgtable_areas.h b/arch/x86/include/asm/pgtable_areas.h index d34cce1b995cf133b81058c4f53808880f0efe0a..4f056fb88174bb20036a53e809a9cb4759bd4dc3 100644 --- a/arch/x86/include/asm/pgtable_areas.h +++ b/arch/x86/include/asm/pgtable_areas.h @@ -11,6 +11,12 @@ #define CPU_ENTRY_AREA_RO_IDT_VADDR ((void *)CPU_ENTRY_AREA_RO_IDT) -#define CPU_ENTRY_AREA_MAP_SIZE (CPU_ENTRY_AREA_PER_CPU + CPU_ENTRY_AREA_ARRAY_SIZE - CPU_ENTRY_AREA_BASE) +#ifdef CONFIG_X86_32 +#define CPU_ENTRY_AREA_MAP_SIZE (CPU_ENTRY_AREA_PER_CPU + \ + (CPU_ENTRY_AREA_SIZE * NR_CPUS) - \ + CPU_ENTRY_AREA_BASE) +#else +#define CPU_ENTRY_AREA_MAP_SIZE P4D_SIZE +#endif #endif /* _ASM_X86_PGTABLE_AREAS_H */ diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h index aa174fed3a71cc8a841b7aaad77e43f429d31a0b..447d4bee25c48c7650b3adbed4606f54b9188743 100644 --- a/arch/x86/include/asm/pgtable_types.h +++ b/arch/x86/include/asm/pgtable_types.h @@ -361,11 +361,9 @@ static inline pudval_t native_pud_val(pud_t pud) #endif #if CONFIG_PGTABLE_LEVELS > 2 -typedef struct { pmdval_t pmd; } pmd_t; - static inline pmd_t native_make_pmd(pmdval_t val) { - return (pmd_t) { val }; + return (pmd_t) { .pmd = val }; } static inline pmdval_t native_pmd_val(pmd_t pmd) diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h index 5f6daea1ee24839756899a9041fa5b939cc838c5..2d13f25b1bd8f332b860f5bd48b6f47102ca0dae 100644 --- a/arch/x86/include/asm/preempt.h +++ b/arch/x86/include/asm/preempt.h @@ -4,11 +4,11 @@ #include #include +#include + #include #include -DECLARE_PER_CPU(int, __preempt_count); - /* We use the MSB mostly because its available */ #define PREEMPT_NEED_RESCHED 0x80000000 @@ -24,7 +24,7 @@ DECLARE_PER_CPU(int, __preempt_count); */ static __always_inline int preempt_count(void) { - return raw_cpu_read_4(__preempt_count) & ~PREEMPT_NEED_RESCHED; + return raw_cpu_read_4(pcpu_hot.preempt_count) & ~PREEMPT_NEED_RESCHED; } static __always_inline void preempt_count_set(int pc) @@ -32,10 +32,10 @@ static __always_inline void preempt_count_set(int pc) int old, new; do { - old = raw_cpu_read_4(__preempt_count); + old = raw_cpu_read_4(pcpu_hot.preempt_count); new = (old & PREEMPT_NEED_RESCHED) | (pc & ~PREEMPT_NEED_RESCHED); - } while (raw_cpu_cmpxchg_4(__preempt_count, old, new) != old); + } while (raw_cpu_cmpxchg_4(pcpu_hot.preempt_count, old, new) != old); } /* @@ -44,7 +44,7 @@ static __always_inline void preempt_count_set(int pc) #define init_task_preempt_count(p) do { } while (0) #define init_idle_preempt_count(p, cpu) do { \ - per_cpu(__preempt_count, (cpu)) = PREEMPT_DISABLED; \ + per_cpu(pcpu_hot.preempt_count, (cpu)) = PREEMPT_DISABLED; \ } while (0) /* @@ -58,17 +58,17 @@ static __always_inline void preempt_count_set(int pc) static __always_inline void set_preempt_need_resched(void) { - raw_cpu_and_4(__preempt_count, ~PREEMPT_NEED_RESCHED); + raw_cpu_and_4(pcpu_hot.preempt_count, ~PREEMPT_NEED_RESCHED); } static __always_inline void clear_preempt_need_resched(void) { - raw_cpu_or_4(__preempt_count, PREEMPT_NEED_RESCHED); + raw_cpu_or_4(pcpu_hot.preempt_count, PREEMPT_NEED_RESCHED); } static __always_inline bool test_preempt_need_resched(void) { - return !(raw_cpu_read_4(__preempt_count) & PREEMPT_NEED_RESCHED); + return !(raw_cpu_read_4(pcpu_hot.preempt_count) & PREEMPT_NEED_RESCHED); } /* @@ -77,12 +77,12 @@ static __always_inline bool test_preempt_need_resched(void) static __always_inline void __preempt_count_add(int val) { - raw_cpu_add_4(__preempt_count, val); + raw_cpu_add_4(pcpu_hot.preempt_count, val); } static __always_inline void __preempt_count_sub(int val) { - raw_cpu_add_4(__preempt_count, -val); + raw_cpu_add_4(pcpu_hot.preempt_count, -val); } /* @@ -92,7 +92,8 @@ static __always_inline void __preempt_count_sub(int val) */ static __always_inline bool __preempt_count_dec_and_test(void) { - return GEN_UNARY_RMWcc("decl", __preempt_count, e, __percpu_arg([var])); + return GEN_UNARY_RMWcc("decl", pcpu_hot.preempt_count, e, + __percpu_arg([var])); } /* @@ -100,7 +101,7 @@ static __always_inline bool __preempt_count_dec_and_test(void) */ static __always_inline bool should_resched(int preempt_offset) { - return unlikely(raw_cpu_read_4(__preempt_count) == preempt_offset); + return unlikely(raw_cpu_read_4(pcpu_hot.preempt_count) == preempt_offset); } #ifdef CONFIG_PREEMPTION diff --git a/arch/x86/include/asm/processor-flags.h b/arch/x86/include/asm/processor-flags.h index 02c2cbda4a74ed83bfccc653df9b526be69676da..a7f3d9100adb64849fcd10d731ff461351604f03 100644 --- a/arch/x86/include/asm/processor-flags.h +++ b/arch/x86/include/asm/processor-flags.h @@ -35,7 +35,7 @@ */ #ifdef CONFIG_X86_64 /* Mask off the address space ID and SME encryption bits. */ -#define CR3_ADDR_MASK __sme_clr(0x7FFFFFFFFFFFF000ull) +#define CR3_ADDR_MASK __sme_clr(PHYSICAL_PAGE_MASK) #define CR3_PCID_MASK 0xFFFull #define CR3_NOFLUSH BIT_ULL(63) diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index 6836c64b9819e589eedb08f5bd47d255ea95b05d..4e35c66edeb7dc028cccb9737769704d18fb2096 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -377,8 +377,6 @@ struct irq_stack { char stack[IRQ_STACK_SIZE]; } __aligned(IRQ_STACK_SIZE); -DECLARE_PER_CPU(unsigned long, cpu_current_top_of_stack); - #ifdef CONFIG_X86_64 struct fixed_percpu_data { /* @@ -401,8 +399,6 @@ static inline unsigned long cpu_kernelmode_gs_base(int cpu) return (unsigned long)per_cpu(fixed_percpu_data.gs_base, cpu); } -DECLARE_PER_CPU(void *, hardirq_stack_ptr); -DECLARE_PER_CPU(bool, hardirq_stack_inuse); extern asmlinkage void ignore_sysret(void); /* Save actual FS/GS selectors and bases to current->thread */ @@ -411,8 +407,6 @@ void current_save_fsgs(void); #ifdef CONFIG_STACKPROTECTOR DECLARE_PER_CPU(unsigned long, __stack_chk_guard); #endif -DECLARE_PER_CPU(struct irq_stack *, hardirq_stack_ptr); -DECLARE_PER_CPU(struct irq_stack *, softirq_stack_ptr); #endif /* !X86_64 */ struct perf_event; @@ -517,7 +511,7 @@ static __always_inline unsigned long current_top_of_stack(void) * and around vm86 mode and sp0 on x86_64 is special because of the * entry trampoline. */ - return this_cpu_read_stable(cpu_current_top_of_stack); + return this_cpu_read_stable(pcpu_hot.top_of_stack); } static __always_inline bool on_thread_stack(void) @@ -554,10 +548,9 @@ extern int sysenter_setup(void); /* Defined in head.S */ extern struct desc_ptr early_gdt_descr; -extern void switch_to_new_gdt(int); +extern void switch_gdt_and_percpu_base(int); extern void load_direct_gdt(int); extern void load_fixmap_gdt(int); -extern void load_percpu_segment(int); extern void cpu_init(void); extern void cpu_init_secondary(void); extern void cpu_init_exception_handling(void); diff --git a/arch/x86/include/asm/qspinlock_paravirt.h b/arch/x86/include/asm/qspinlock_paravirt.h index dbb38a6b4dfb6315da4fb345cf82a3b28feeaf80..42b17cf10b10e31d17ae1a02b3c00d193e46e009 100644 --- a/arch/x86/include/asm/qspinlock_paravirt.h +++ b/arch/x86/include/asm/qspinlock_paravirt.h @@ -14,8 +14,6 @@ __PV_CALLEE_SAVE_REGS_THUNK(__pv_queued_spin_unlock_slowpath, ".spinlock.text"); #define __pv_queued_spin_unlock __pv_queued_spin_unlock -#define PV_UNLOCK "__raw_callee_save___pv_queued_spin_unlock" -#define PV_UNLOCK_SLOWPATH "__raw_callee_save___pv_queued_spin_unlock_slowpath" /* * Optimized assembly version of __raw_callee_save___pv_queued_spin_unlock @@ -37,32 +35,27 @@ __PV_CALLEE_SAVE_REGS_THUNK(__pv_queued_spin_unlock_slowpath, ".spinlock.text"); * rsi = lockval (second argument) * rdx = internal variable (set to 0) */ -asm (".pushsection .spinlock.text, \"ax\";" - ".globl " PV_UNLOCK ";" - ".type " PV_UNLOCK ", @function;" - ".align 4,0x90;" - PV_UNLOCK ": " - ASM_ENDBR - FRAME_BEGIN - "push %rdx;" - "mov $0x1,%eax;" - "xor %edx,%edx;" - LOCK_PREFIX "cmpxchg %dl,(%rdi);" - "cmp $0x1,%al;" - "jne .slowpath;" - "pop %rdx;" +#define PV_UNLOCK_ASM \ + FRAME_BEGIN \ + "push %rdx\n\t" \ + "mov $0x1,%eax\n\t" \ + "xor %edx,%edx\n\t" \ + LOCK_PREFIX "cmpxchg %dl,(%rdi)\n\t" \ + "cmp $0x1,%al\n\t" \ + "jne .slowpath\n\t" \ + "pop %rdx\n\t" \ + FRAME_END \ + ASM_RET \ + ".slowpath:\n\t" \ + "push %rsi\n\t" \ + "movzbl %al,%esi\n\t" \ + "call __raw_callee_save___pv_queued_spin_unlock_slowpath\n\t" \ + "pop %rsi\n\t" \ + "pop %rdx\n\t" \ FRAME_END - ASM_RET - ".slowpath: " - "push %rsi;" - "movzbl %al,%esi;" - "call " PV_UNLOCK_SLOWPATH ";" - "pop %rsi;" - "pop %rdx;" - FRAME_END - ASM_RET - ".size " PV_UNLOCK ", .-" PV_UNLOCK ";" - ".popsection"); + +DEFINE_PARAVIRT_ASM(__raw_callee_save___pv_queued_spin_unlock, + PV_UNLOCK_ASM, .spinlock.text); #else /* CONFIG_64BIT */ diff --git a/arch/x86/include/asm/set_memory.h b/arch/x86/include/asm/set_memory.h index b45c4d27fd46e5f129876f8dac416b0ee33bc0af..a5e89641bd2dac7e9fa5e1ab548369836640908a 100644 --- a/arch/x86/include/asm/set_memory.h +++ b/arch/x86/include/asm/set_memory.h @@ -6,6 +6,9 @@ #include #include +#define set_memory_rox set_memory_rox +int set_memory_rox(unsigned long addr, int numpages); + /* * The set_memory_* API can be used to change various attributes of a virtual * address range. The attributes include: diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h index a73bced40e2419d0cec28c1fe8327db24342f1d1..b4dbb20dab1a11d509b4f702f4bde6a6269b4d18 100644 --- a/arch/x86/include/asm/smp.h +++ b/arch/x86/include/asm/smp.h @@ -3,10 +3,10 @@ #define _ASM_X86_SMP_H #ifndef __ASSEMBLY__ #include -#include -#include #include +#include +#include extern int smp_num_siblings; extern unsigned int num_processors; @@ -19,7 +19,6 @@ DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_llc_shared_map); DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_l2c_shared_map); DECLARE_PER_CPU_READ_MOSTLY(u16, cpu_llc_id); DECLARE_PER_CPU_READ_MOSTLY(u16, cpu_l2c_id); -DECLARE_PER_CPU_READ_MOSTLY(int, cpu_number); DECLARE_EARLY_PER_CPU_READ_MOSTLY(u16, x86_cpu_to_apicid); DECLARE_EARLY_PER_CPU_READ_MOSTLY(u32, x86_cpu_to_acpiid); @@ -150,11 +149,10 @@ __visible void smp_call_function_single_interrupt(struct pt_regs *r); /* * This function is needed by all SMP systems. It must _always_ be valid - * from the initial startup. We map APIC_BASE very early in page_setup(), - * so this is correct in the x86 case. + * from the initial startup. */ -#define raw_smp_processor_id() this_cpu_read(cpu_number) -#define __smp_processor_id() __this_cpu_read(cpu_number) +#define raw_smp_processor_id() this_cpu_read(pcpu_hot.cpu_number) +#define __smp_processor_id() __this_cpu_read(pcpu_hot.cpu_number) #ifdef CONFIG_X86_32 extern int safe_smp_processor_id(void); diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h index 0361626841bc00da38c9dd1713e6995914d29e5c..cb1ee53ad3b18900a6945b11bb4e267d5d77cd6e 100644 --- a/arch/x86/include/asm/svm.h +++ b/arch/x86/include/asm/svm.h @@ -5,6 +5,8 @@ #include #include +#include + /* * 32-bit intercept words in the VMCB Control Area, starting * at Byte offset 000h. @@ -161,7 +163,10 @@ struct __attribute__ ((__packed__)) vmcb_control_area { * Offset 0x3e0, 32 bytes reserved * for use by hypervisor/software. */ - u8 reserved_sw[32]; + union { + struct hv_vmcb_enlightenments hv_enlightenments; + u8 reserved_sw[32]; + }; }; @@ -293,12 +298,13 @@ struct vmcb_save_area { struct vmcb_seg ldtr; struct vmcb_seg idtr; struct vmcb_seg tr; - u8 reserved_1[42]; + /* Reserved fields are named following their struct offset */ + u8 reserved_0xa0[42]; u8 vmpl; u8 cpl; - u8 reserved_2[4]; + u8 reserved_0xcc[4]; u64 efer; - u8 reserved_3[112]; + u8 reserved_0xd8[112]; u64 cr4; u64 cr3; u64 cr0; @@ -306,7 +312,7 @@ struct vmcb_save_area { u64 dr6; u64 rflags; u64 rip; - u8 reserved_4[88]; + u8 reserved_0x180[88]; u64 rsp; u64 s_cet; u64 ssp; @@ -321,14 +327,14 @@ struct vmcb_save_area { u64 sysenter_esp; u64 sysenter_eip; u64 cr2; - u8 reserved_5[32]; + u8 reserved_0x248[32]; u64 g_pat; u64 dbgctl; u64 br_from; u64 br_to; u64 last_excp_from; u64 last_excp_to; - u8 reserved_6[72]; + u8 reserved_0x298[72]; u32 spec_ctrl; /* Guest version of SPEC_CTRL at 0x2E0 */ } __packed; @@ -349,12 +355,12 @@ struct sev_es_save_area { u64 vmpl2_ssp; u64 vmpl3_ssp; u64 u_cet; - u8 reserved_1[2]; + u8 reserved_0xc8[2]; u8 vmpl; u8 cpl; - u8 reserved_2[4]; + u8 reserved_0xcc[4]; u64 efer; - u8 reserved_3[104]; + u8 reserved_0xd8[104]; u64 xss; u64 cr4; u64 cr3; @@ -371,7 +377,7 @@ struct sev_es_save_area { u64 dr1_addr_mask; u64 dr2_addr_mask; u64 dr3_addr_mask; - u8 reserved_4[24]; + u8 reserved_0x1c0[24]; u64 rsp; u64 s_cet; u64 ssp; @@ -386,21 +392,21 @@ struct sev_es_save_area { u64 sysenter_esp; u64 sysenter_eip; u64 cr2; - u8 reserved_5[32]; + u8 reserved_0x248[32]; u64 g_pat; u64 dbgctl; u64 br_from; u64 br_to; u64 last_excp_from; u64 last_excp_to; - u8 reserved_7[80]; + u8 reserved_0x298[80]; u32 pkru; - u8 reserved_8[20]; - u64 reserved_9; /* rax already available at 0x01f8 */ + u32 tsc_aux; + u8 reserved_0x2f0[24]; u64 rcx; u64 rdx; u64 rbx; - u64 reserved_10; /* rsp already available at 0x01d8 */ + u64 reserved_0x320; /* rsp already available at 0x01d8 */ u64 rbp; u64 rsi; u64 rdi; @@ -412,7 +418,7 @@ struct sev_es_save_area { u64 r13; u64 r14; u64 r15; - u8 reserved_11[16]; + u8 reserved_0x380[16]; u64 guest_exit_info_1; u64 guest_exit_info_2; u64 guest_exit_int_info; @@ -425,7 +431,7 @@ struct sev_es_save_area { u64 pcpu_id; u64 event_inj; u64 xcr0; - u8 reserved_12[16]; + u8 reserved_0x3f0[16]; /* Floating point area */ u64 x87_dp; @@ -443,23 +449,23 @@ struct sev_es_save_area { } __packed; struct ghcb_save_area { - u8 reserved_1[203]; + u8 reserved_0x0[203]; u8 cpl; - u8 reserved_2[116]; + u8 reserved_0xcc[116]; u64 xss; - u8 reserved_3[24]; + u8 reserved_0x148[24]; u64 dr7; - u8 reserved_4[16]; + u8 reserved_0x168[16]; u64 rip; - u8 reserved_5[88]; + u8 reserved_0x180[88]; u64 rsp; - u8 reserved_6[24]; + u8 reserved_0x1e0[24]; u64 rax; - u8 reserved_7[264]; + u8 reserved_0x200[264]; u64 rcx; u64 rdx; u64 rbx; - u8 reserved_8[8]; + u8 reserved_0x320[8]; u64 rbp; u64 rsi; u64 rdi; @@ -471,12 +477,12 @@ struct ghcb_save_area { u64 r13; u64 r14; u64 r15; - u8 reserved_9[16]; + u8 reserved_0x380[16]; u64 sw_exit_code; u64 sw_exit_info_1; u64 sw_exit_info_2; u64 sw_scratch; - u8 reserved_10[56]; + u8 reserved_0x3b0[56]; u64 xcr0; u8 valid_bitmap[16]; u64 x87_state_gpa; @@ -490,7 +496,7 @@ struct ghcb { u8 shared_buffer[GHCB_SHARED_BUF_SIZE]; - u8 reserved_1[10]; + u8 reserved_0xff0[10]; u16 protocol_version; /* negotiated SEV-ES/GHCB protocol version */ u32 ghcb_usage; } __packed; @@ -502,6 +508,9 @@ struct ghcb { #define EXPECTED_VMCB_CONTROL_AREA_SIZE 1024 #define EXPECTED_GHCB_SIZE PAGE_SIZE +#define BUILD_BUG_RESERVED_OFFSET(x, y) \ + ASSERT_STRUCT_OFFSET(struct x, reserved ## _ ## y, y) + static inline void __unused_size_checks(void) { BUILD_BUG_ON(sizeof(struct vmcb_save_area) != EXPECTED_VMCB_SAVE_AREA_SIZE); @@ -509,6 +518,39 @@ static inline void __unused_size_checks(void) BUILD_BUG_ON(sizeof(struct sev_es_save_area) != EXPECTED_SEV_ES_SAVE_AREA_SIZE); BUILD_BUG_ON(sizeof(struct vmcb_control_area) != EXPECTED_VMCB_CONTROL_AREA_SIZE); BUILD_BUG_ON(sizeof(struct ghcb) != EXPECTED_GHCB_SIZE); + + /* Check offsets of reserved fields */ + + BUILD_BUG_RESERVED_OFFSET(vmcb_save_area, 0xa0); + BUILD_BUG_RESERVED_OFFSET(vmcb_save_area, 0xcc); + BUILD_BUG_RESERVED_OFFSET(vmcb_save_area, 0xd8); + BUILD_BUG_RESERVED_OFFSET(vmcb_save_area, 0x180); + BUILD_BUG_RESERVED_OFFSET(vmcb_save_area, 0x248); + BUILD_BUG_RESERVED_OFFSET(vmcb_save_area, 0x298); + + BUILD_BUG_RESERVED_OFFSET(sev_es_save_area, 0xc8); + BUILD_BUG_RESERVED_OFFSET(sev_es_save_area, 0xcc); + BUILD_BUG_RESERVED_OFFSET(sev_es_save_area, 0xd8); + BUILD_BUG_RESERVED_OFFSET(sev_es_save_area, 0x1c0); + BUILD_BUG_RESERVED_OFFSET(sev_es_save_area, 0x248); + BUILD_BUG_RESERVED_OFFSET(sev_es_save_area, 0x298); + BUILD_BUG_RESERVED_OFFSET(sev_es_save_area, 0x2f0); + BUILD_BUG_RESERVED_OFFSET(sev_es_save_area, 0x320); + BUILD_BUG_RESERVED_OFFSET(sev_es_save_area, 0x380); + BUILD_BUG_RESERVED_OFFSET(sev_es_save_area, 0x3f0); + + BUILD_BUG_RESERVED_OFFSET(ghcb_save_area, 0x0); + BUILD_BUG_RESERVED_OFFSET(ghcb_save_area, 0xcc); + BUILD_BUG_RESERVED_OFFSET(ghcb_save_area, 0x148); + BUILD_BUG_RESERVED_OFFSET(ghcb_save_area, 0x168); + BUILD_BUG_RESERVED_OFFSET(ghcb_save_area, 0x180); + BUILD_BUG_RESERVED_OFFSET(ghcb_save_area, 0x1e0); + BUILD_BUG_RESERVED_OFFSET(ghcb_save_area, 0x200); + BUILD_BUG_RESERVED_OFFSET(ghcb_save_area, 0x320); + BUILD_BUG_RESERVED_OFFSET(ghcb_save_area, 0x380); + BUILD_BUG_RESERVED_OFFSET(ghcb_save_area, 0x3b0); + + BUILD_BUG_RESERVED_OFFSET(ghcb, 0xff0); } struct vmcb { diff --git a/arch/x86/include/asm/text-patching.h b/arch/x86/include/asm/text-patching.h index 1cc15528ce29bab81582b4d23c02ba46c9309e49..f4b87f08f5c50d6083977f22d0043c318def3002 100644 --- a/arch/x86/include/asm/text-patching.h +++ b/arch/x86/include/asm/text-patching.h @@ -45,6 +45,7 @@ extern void *text_poke(void *addr, const void *opcode, size_t len); extern void text_poke_sync(void); extern void *text_poke_kgdb(void *addr, const void *opcode, size_t len); extern void *text_poke_copy(void *addr, const void *opcode, size_t len); +extern void *text_poke_copy_locked(void *addr, const void *opcode, size_t len, bool core_ok); extern void *text_poke_set(void *addr, int c, size_t len); extern int poke_int3_handler(struct pt_regs *regs); extern void text_poke_bp(void *addr, const void *opcode, size_t len, const void *emulate); diff --git a/arch/x86/include/uapi/asm/kvm.h b/arch/x86/include/uapi/asm/kvm.h index 46de10a809ecbd81aa30d28afd78ebcc266e97a5..e48deab8901d4ecb9fddaaf334247e27fb6c1762 100644 --- a/arch/x86/include/uapi/asm/kvm.h +++ b/arch/x86/include/uapi/asm/kvm.h @@ -53,14 +53,6 @@ /* Architectural interrupt line count. */ #define KVM_NR_INTERRUPTS 256 -struct kvm_memory_alias { - __u32 slot; /* this has a different namespace than memory slots */ - __u32 flags; - __u64 guest_phys_addr; - __u64 memory_size; - __u64 target_phys_addr; -}; - /* for KVM_GET_IRQCHIP and KVM_SET_IRQCHIP */ struct kvm_pic_state { __u8 last_irr; /* edge detection */ @@ -214,6 +206,8 @@ struct kvm_msr_list { struct kvm_msr_filter_range { #define KVM_MSR_FILTER_READ (1 << 0) #define KVM_MSR_FILTER_WRITE (1 << 1) +#define KVM_MSR_FILTER_RANGE_VALID_MASK (KVM_MSR_FILTER_READ | \ + KVM_MSR_FILTER_WRITE) __u32 flags; __u32 nmsrs; /* number of msrs in bitmap */ __u32 base; /* MSR index the bitmap starts at */ @@ -222,8 +216,11 @@ struct kvm_msr_filter_range { #define KVM_MSR_FILTER_MAX_RANGES 16 struct kvm_msr_filter { +#ifndef __KERNEL__ #define KVM_MSR_FILTER_DEFAULT_ALLOW (0 << 0) +#endif #define KVM_MSR_FILTER_DEFAULT_DENY (1 << 0) +#define KVM_MSR_FILTER_VALID_MASK (KVM_MSR_FILTER_DEFAULT_DENY) __u32 flags; struct kvm_msr_filter_range ranges[KVM_MSR_FILTER_MAX_RANGES]; }; diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index cceaafdd2d84a5725db58e260df05bed2f52ff0f..96d51bbc2bd420b95c31fd24f50a242a2c7be205 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile @@ -143,6 +143,8 @@ obj-$(CONFIG_AMD_MEM_ENCRYPT) += sev.o obj-$(CONFIG_CFI_CLANG) += cfi.o +obj-$(CONFIG_CALL_THUNKS) += callthunks.o + ### # 64 bit specific files ifeq ($(CONFIG_X86_64),y) diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index a9bea860e22ab0cf10a37eea51275949b4a03d99..7d8c3cbde3685688b1e87ecff1a4fe813c772ff4 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c @@ -116,6 +116,7 @@ static void __init_or_module add_nops(void *insns, unsigned int len) extern s32 __retpoline_sites[], __retpoline_sites_end[]; extern s32 __return_sites[], __return_sites_end[]; +extern s32 __cfi_sites[], __cfi_sites_end[]; extern s32 __ibt_endbr_seal[], __ibt_endbr_seal_end[]; extern struct alt_instr __alt_instructions[], __alt_instructions_end[]; extern s32 __smp_locks[], __smp_locks_end[]; @@ -377,6 +378,56 @@ static int emit_indirect(int op, int reg, u8 *bytes) return i; } +static inline bool is_jcc32(struct insn *insn) +{ + /* Jcc.d32 second opcode byte is in the range: 0x80-0x8f */ + return insn->opcode.bytes[0] == 0x0f && (insn->opcode.bytes[1] & 0xf0) == 0x80; +} + +static int emit_call_track_retpoline(void *addr, struct insn *insn, int reg, u8 *bytes) +{ + u8 op = insn->opcode.bytes[0]; + int i = 0; + + /* + * Clang does 'weird' Jcc __x86_indirect_thunk_r11 conditional + * tail-calls. Deal with them. + */ + if (is_jcc32(insn)) { + bytes[i++] = op; + op = insn->opcode.bytes[1]; + goto clang_jcc; + } + + if (insn->length == 6) + bytes[i++] = 0x2e; /* CS-prefix */ + + switch (op) { + case CALL_INSN_OPCODE: + __text_gen_insn(bytes+i, op, addr+i, + __x86_indirect_call_thunk_array[reg], + CALL_INSN_SIZE); + i += CALL_INSN_SIZE; + break; + + case JMP32_INSN_OPCODE: +clang_jcc: + __text_gen_insn(bytes+i, op, addr+i, + __x86_indirect_jump_thunk_array[reg], + JMP32_INSN_SIZE); + i += JMP32_INSN_SIZE; + break; + + default: + WARN(1, "%pS %px %*ph\n", addr, addr, 6, addr); + return -1; + } + + WARN_ON_ONCE(i != insn->length); + + return i; +} + /* * Rewrite the compiler generated retpoline thunk calls. * @@ -409,8 +460,12 @@ static int patch_retpoline(void *addr, struct insn *insn, u8 *bytes) BUG_ON(reg == 4); if (cpu_feature_enabled(X86_FEATURE_RETPOLINE) && - !cpu_feature_enabled(X86_FEATURE_RETPOLINE_LFENCE)) + !cpu_feature_enabled(X86_FEATURE_RETPOLINE_LFENCE)) { + if (cpu_feature_enabled(X86_FEATURE_CALL_DEPTH)) + return emit_call_track_retpoline(addr, insn, reg, bytes); + return -1; + } op = insn->opcode.bytes[0]; @@ -427,8 +482,7 @@ static int patch_retpoline(void *addr, struct insn *insn, u8 *bytes) * [ NOP ] * 1: */ - /* Jcc.d32 second opcode byte is in the range: 0x80-0x8f */ - if (op == 0x0f && (insn->opcode.bytes[1] & 0xf0) == 0x80) { + if (is_jcc32(insn)) { cc = insn->opcode.bytes[1] & 0xf; cc ^= 1; /* invert condition */ @@ -518,6 +572,11 @@ void __init_or_module noinline apply_retpolines(s32 *start, s32 *end) } #ifdef CONFIG_RETHUNK + +#ifdef CONFIG_CALL_THUNKS +void (*x86_return_thunk)(void) __ro_after_init = &__x86_return_thunk; +#endif + /* * Rewrite the compiler generated return thunk tail-calls. * @@ -533,14 +592,18 @@ static int patch_return(void *addr, struct insn *insn, u8 *bytes) { int i = 0; - if (cpu_feature_enabled(X86_FEATURE_RETHUNK)) - return -1; + if (cpu_feature_enabled(X86_FEATURE_RETHUNK)) { + if (x86_return_thunk == __x86_return_thunk) + return -1; - bytes[i++] = RET_INSN_OPCODE; + i = JMP32_INSN_SIZE; + __text_gen_insn(bytes, JMP32_INSN_OPCODE, addr, x86_return_thunk, i); + } else { + bytes[i++] = RET_INSN_OPCODE; + } for (; i < insn->length;) bytes[i++] = INT3_INSN_OPCODE; - return i; } @@ -594,6 +657,28 @@ void __init_or_module noinline apply_returns(s32 *start, s32 *end) { } #ifdef CONFIG_X86_KERNEL_IBT +static void poison_endbr(void *addr, bool warn) +{ + u32 endbr, poison = gen_endbr_poison(); + + if (WARN_ON_ONCE(get_kernel_nofault(endbr, addr))) + return; + + if (!is_endbr(endbr)) { + WARN_ON_ONCE(warn); + return; + } + + DPRINTK("ENDBR at: %pS (%px)", addr, addr); + + /* + * When we have IBT, the lack of ENDBR will trigger #CP + */ + DUMP_BYTES(((u8*)addr), 4, "%px: orig: ", addr); + DUMP_BYTES(((u8*)&poison), 4, "%px: repl: ", addr); + text_poke_early(addr, &poison, 4); +} + /* * Generated by: objtool --ibt */ @@ -602,31 +687,391 @@ void __init_or_module noinline apply_ibt_endbr(s32 *start, s32 *end) s32 *s; for (s = start; s < end; s++) { - u32 endbr, poison = gen_endbr_poison(); void *addr = (void *)s + *s; - if (WARN_ON_ONCE(get_kernel_nofault(endbr, addr))) + poison_endbr(addr, true); + if (IS_ENABLED(CONFIG_FINEIBT)) + poison_endbr(addr - 16, false); + } +} + +#else + +void __init_or_module apply_ibt_endbr(s32 *start, s32 *end) { } + +#endif /* CONFIG_X86_KERNEL_IBT */ + +#ifdef CONFIG_FINEIBT + +enum cfi_mode { + CFI_DEFAULT, + CFI_OFF, + CFI_KCFI, + CFI_FINEIBT, +}; + +static enum cfi_mode cfi_mode __ro_after_init = CFI_DEFAULT; +static bool cfi_rand __ro_after_init = true; +static u32 cfi_seed __ro_after_init; + +/* + * Re-hash the CFI hash with a boot-time seed while making sure the result is + * not a valid ENDBR instruction. + */ +static u32 cfi_rehash(u32 hash) +{ + hash ^= cfi_seed; + while (unlikely(is_endbr(hash) || is_endbr(-hash))) { + bool lsb = hash & 1; + hash >>= 1; + if (lsb) + hash ^= 0x80200003; + } + return hash; +} + +static __init int cfi_parse_cmdline(char *str) +{ + if (!str) + return -EINVAL; + + while (str) { + char *next = strchr(str, ','); + if (next) { + *next = 0; + next++; + } + + if (!strcmp(str, "auto")) { + cfi_mode = CFI_DEFAULT; + } else if (!strcmp(str, "off")) { + cfi_mode = CFI_OFF; + cfi_rand = false; + } else if (!strcmp(str, "kcfi")) { + cfi_mode = CFI_KCFI; + } else if (!strcmp(str, "fineibt")) { + cfi_mode = CFI_FINEIBT; + } else if (!strcmp(str, "norand")) { + cfi_rand = false; + } else { + pr_err("Ignoring unknown cfi option (%s).", str); + } + + str = next; + } + + return 0; +} +early_param("cfi", cfi_parse_cmdline); + +/* + * kCFI FineIBT + * + * __cfi_\func: __cfi_\func: + * movl $0x12345678,%eax // 5 endbr64 // 4 + * nop subl $0x12345678,%r10d // 7 + * nop jz 1f // 2 + * nop ud2 // 2 + * nop 1: nop // 1 + * nop + * nop + * nop + * nop + * nop + * nop + * nop + * + * + * caller: caller: + * movl $(-0x12345678),%r10d // 6 movl $0x12345678,%r10d // 6 + * addl $-15(%r11),%r10d // 4 sub $16,%r11 // 4 + * je 1f // 2 nop4 // 4 + * ud2 // 2 + * 1: call __x86_indirect_thunk_r11 // 5 call *%r11; nop2; // 5 + * + */ + +asm( ".pushsection .rodata \n" + "fineibt_preamble_start: \n" + " endbr64 \n" + " subl $0x12345678, %r10d \n" + " je fineibt_preamble_end \n" + " ud2 \n" + " nop \n" + "fineibt_preamble_end: \n" + ".popsection\n" +); + +extern u8 fineibt_preamble_start[]; +extern u8 fineibt_preamble_end[]; + +#define fineibt_preamble_size (fineibt_preamble_end - fineibt_preamble_start) +#define fineibt_preamble_hash 7 + +asm( ".pushsection .rodata \n" + "fineibt_caller_start: \n" + " movl $0x12345678, %r10d \n" + " sub $16, %r11 \n" + ASM_NOP4 + "fineibt_caller_end: \n" + ".popsection \n" +); + +extern u8 fineibt_caller_start[]; +extern u8 fineibt_caller_end[]; + +#define fineibt_caller_size (fineibt_caller_end - fineibt_caller_start) +#define fineibt_caller_hash 2 + +#define fineibt_caller_jmp (fineibt_caller_size - 2) + +static u32 decode_preamble_hash(void *addr) +{ + u8 *p = addr; + + /* b8 78 56 34 12 mov $0x12345678,%eax */ + if (p[0] == 0xb8) + return *(u32 *)(addr + 1); + + return 0; /* invalid hash value */ +} + +static u32 decode_caller_hash(void *addr) +{ + u8 *p = addr; + + /* 41 ba 78 56 34 12 mov $0x12345678,%r10d */ + if (p[0] == 0x41 && p[1] == 0xba) + return -*(u32 *)(addr + 2); + + /* e8 0c 78 56 34 12 jmp.d8 +12 */ + if (p[0] == JMP8_INSN_OPCODE && p[1] == fineibt_caller_jmp) + return -*(u32 *)(addr + 2); + + return 0; /* invalid hash value */ +} + +/* .retpoline_sites */ +static int cfi_disable_callers(s32 *start, s32 *end) +{ + /* + * Disable kCFI by patching in a JMP.d8, this leaves the hash immediate + * in tact for later usage. Also see decode_caller_hash() and + * cfi_rewrite_callers(). + */ + const u8 jmp[] = { JMP8_INSN_OPCODE, fineibt_caller_jmp }; + s32 *s; + + for (s = start; s < end; s++) { + void *addr = (void *)s + *s; + u32 hash; + + addr -= fineibt_caller_size; + hash = decode_caller_hash(addr); + if (!hash) /* nocfi callers */ continue; - if (WARN_ON_ONCE(!is_endbr(endbr))) + text_poke_early(addr, jmp, 2); + } + + return 0; +} + +static int cfi_enable_callers(s32 *start, s32 *end) +{ + /* + * Re-enable kCFI, undo what cfi_disable_callers() did. + */ + const u8 mov[] = { 0x41, 0xba }; + s32 *s; + + for (s = start; s < end; s++) { + void *addr = (void *)s + *s; + u32 hash; + + addr -= fineibt_caller_size; + hash = decode_caller_hash(addr); + if (!hash) /* nocfi callers */ continue; - DPRINTK("ENDBR at: %pS (%px)", addr, addr); + text_poke_early(addr, mov, 2); + } - /* - * When we have IBT, the lack of ENDBR will trigger #CP - */ - DUMP_BYTES(((u8*)addr), 4, "%px: orig: ", addr); - DUMP_BYTES(((u8*)&poison), 4, "%px: repl: ", addr); - text_poke_early(addr, &poison, 4); + return 0; +} + +/* .cfi_sites */ +static int cfi_rand_preamble(s32 *start, s32 *end) +{ + s32 *s; + + for (s = start; s < end; s++) { + void *addr = (void *)s + *s; + u32 hash; + + hash = decode_preamble_hash(addr); + if (WARN(!hash, "no CFI hash found at: %pS %px %*ph\n", + addr, addr, 5, addr)) + return -EINVAL; + + hash = cfi_rehash(hash); + text_poke_early(addr + 1, &hash, 4); + } + + return 0; +} + +static int cfi_rewrite_preamble(s32 *start, s32 *end) +{ + s32 *s; + + for (s = start; s < end; s++) { + void *addr = (void *)s + *s; + u32 hash; + + hash = decode_preamble_hash(addr); + if (WARN(!hash, "no CFI hash found at: %pS %px %*ph\n", + addr, addr, 5, addr)) + return -EINVAL; + + text_poke_early(addr, fineibt_preamble_start, fineibt_preamble_size); + WARN_ON(*(u32 *)(addr + fineibt_preamble_hash) != 0x12345678); + text_poke_early(addr + fineibt_preamble_hash, &hash, 4); + } + + return 0; +} + +/* .retpoline_sites */ +static int cfi_rand_callers(s32 *start, s32 *end) +{ + s32 *s; + + for (s = start; s < end; s++) { + void *addr = (void *)s + *s; + u32 hash; + + addr -= fineibt_caller_size; + hash = decode_caller_hash(addr); + if (hash) { + hash = -cfi_rehash(hash); + text_poke_early(addr + 2, &hash, 4); + } + } + + return 0; +} + +static int cfi_rewrite_callers(s32 *start, s32 *end) +{ + s32 *s; + + for (s = start; s < end; s++) { + void *addr = (void *)s + *s; + u32 hash; + + addr -= fineibt_caller_size; + hash = decode_caller_hash(addr); + if (hash) { + text_poke_early(addr, fineibt_caller_start, fineibt_caller_size); + WARN_ON(*(u32 *)(addr + fineibt_caller_hash) != 0x12345678); + text_poke_early(addr + fineibt_caller_hash, &hash, 4); + } + /* rely on apply_retpolines() */ } + + return 0; +} + +static void __apply_fineibt(s32 *start_retpoline, s32 *end_retpoline, + s32 *start_cfi, s32 *end_cfi, bool builtin) +{ + int ret; + + if (WARN_ONCE(fineibt_preamble_size != 16, + "FineIBT preamble wrong size: %ld", fineibt_preamble_size)) + return; + + if (cfi_mode == CFI_DEFAULT) { + cfi_mode = CFI_KCFI; + if (HAS_KERNEL_IBT && cpu_feature_enabled(X86_FEATURE_IBT)) + cfi_mode = CFI_FINEIBT; + } + + /* + * Rewrite the callers to not use the __cfi_ stubs, such that we might + * rewrite them. This disables all CFI. If this succeeds but any of the + * later stages fails, we're without CFI. + */ + ret = cfi_disable_callers(start_retpoline, end_retpoline); + if (ret) + goto err; + + if (cfi_rand) { + if (builtin) + cfi_seed = get_random_u32(); + + ret = cfi_rand_preamble(start_cfi, end_cfi); + if (ret) + goto err; + + ret = cfi_rand_callers(start_retpoline, end_retpoline); + if (ret) + goto err; + } + + switch (cfi_mode) { + case CFI_OFF: + if (builtin) + pr_info("Disabling CFI\n"); + return; + + case CFI_KCFI: + ret = cfi_enable_callers(start_retpoline, end_retpoline); + if (ret) + goto err; + + if (builtin) + pr_info("Using kCFI\n"); + return; + + case CFI_FINEIBT: + ret = cfi_rewrite_preamble(start_cfi, end_cfi); + if (ret) + goto err; + + ret = cfi_rewrite_callers(start_retpoline, end_retpoline); + if (ret) + goto err; + + if (builtin) + pr_info("Using FineIBT CFI\n"); + return; + + default: + break; + } + +err: + pr_err("Something went horribly wrong trying to rewrite the CFI implementation.\n"); } #else -void __init_or_module apply_ibt_endbr(s32 *start, s32 *end) { } +static void __apply_fineibt(s32 *start_retpoline, s32 *end_retpoline, + s32 *start_cfi, s32 *end_cfi, bool builtin) +{ +} -#endif /* CONFIG_X86_KERNEL_IBT */ +#endif + +void apply_fineibt(s32 *start_retpoline, s32 *end_retpoline, + s32 *start_cfi, s32 *end_cfi) +{ + return __apply_fineibt(start_retpoline, end_retpoline, + start_cfi, end_cfi, + /* .builtin = */ false); +} #ifdef CONFIG_SMP static void alternatives_smp_lock(const s32 *start, const s32 *end, @@ -934,6 +1379,9 @@ void __init alternative_instructions(void) */ apply_paravirt(__parainstructions, __parainstructions_end); + __apply_fineibt(__retpoline_sites, __retpoline_sites_end, + __cfi_sites, __cfi_sites_end, true); + /* * Rewrite the retpolines, must be done before alternatives since * those can rewrite the retpoline thunks. @@ -947,6 +1395,12 @@ void __init alternative_instructions(void) */ apply_alternatives(__alt_instructions, __alt_instructions_end); + /* + * Now all calls are established. Apply the call thunks if + * required. + */ + callthunks_patch_builtin_calls(); + apply_ibt_endbr(__ibt_endbr_seal, __ibt_endbr_seal_end); #ifdef CONFIG_SMP @@ -1236,27 +1690,15 @@ void *text_poke_kgdb(void *addr, const void *opcode, size_t len) return __text_poke(text_poke_memcpy, addr, opcode, len); } -/** - * text_poke_copy - Copy instructions into (an unused part of) RX memory - * @addr: address to modify - * @opcode: source of the copy - * @len: length to copy, could be more than 2x PAGE_SIZE - * - * Not safe against concurrent execution; useful for JITs to dump - * new code blocks into unused regions of RX memory. Can be used in - * conjunction with synchronize_rcu_tasks() to wait for existing - * execution to quiesce after having made sure no existing functions - * pointers are live. - */ -void *text_poke_copy(void *addr, const void *opcode, size_t len) +void *text_poke_copy_locked(void *addr, const void *opcode, size_t len, + bool core_ok) { unsigned long start = (unsigned long)addr; size_t patched = 0; - if (WARN_ON_ONCE(core_kernel_text(start))) + if (WARN_ON_ONCE(!core_ok && core_kernel_text(start))) return NULL; - mutex_lock(&text_mutex); while (patched < len) { unsigned long ptr = start + patched; size_t s; @@ -1266,6 +1708,25 @@ void *text_poke_copy(void *addr, const void *opcode, size_t len) __text_poke(text_poke_memcpy, (void *)ptr, opcode + patched, s); patched += s; } + return addr; +} + +/** + * text_poke_copy - Copy instructions into (an unused part of) RX memory + * @addr: address to modify + * @opcode: source of the copy + * @len: length to copy, could be more than 2x PAGE_SIZE + * + * Not safe against concurrent execution; useful for JITs to dump + * new code blocks into unused regions of RX memory. Can be used in + * conjunction with synchronize_rcu_tasks() to wait for existing + * execution to quiesce after having made sure no existing functions + * pointers are live. + */ +void *text_poke_copy(void *addr, const void *opcode, size_t len) +{ + mutex_lock(&text_mutex); + addr = text_poke_copy_locked(addr, opcode, len, false); mutex_unlock(&text_mutex); return addr; } @@ -1681,11 +2142,6 @@ void __ref text_poke_queue(void *addr, const void *opcode, size_t len, const voi { struct text_poke_loc *tp; - if (unlikely(system_state == SYSTEM_BOOTING)) { - text_poke_early(addr, opcode, len); - return; - } - text_poke_flush(addr); tp = &tp_vec[tp_vec_nr++]; @@ -1707,11 +2163,6 @@ void __ref text_poke_bp(void *addr, const void *opcode, size_t len, const void * { struct text_poke_loc tp; - if (unlikely(system_state == SYSTEM_BOOTING)) { - text_poke_early(addr, opcode, len); - return; - } - text_poke_loc_init(&tp, addr, opcode, len, emulate); text_poke_bp_batch(&tp, 1); } diff --git a/arch/x86/kernel/amd_gart_64.c b/arch/x86/kernel/amd_gart_64.c index 19a0207e529fe33f6d340ae36a6f5fcbf60ea475..56a917df410d30b49ed3dedc709b32f8c34decfd 100644 --- a/arch/x86/kernel/amd_gart_64.c +++ b/arch/x86/kernel/amd_gart_64.c @@ -504,7 +504,7 @@ static __init unsigned long check_iommu_size(unsigned long aper, u64 aper_size) } a = aper + iommu_size; - iommu_size -= round_up(a, PMD_PAGE_SIZE) - a; + iommu_size -= round_up(a, PMD_SIZE) - a; if (iommu_size < 64*1024*1024) { pr_warn("PCI-DMA: Warning: Small IOMMU %luMB." diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c index 437308004ef2e4f1345947ce318c62bc56a6036f..82c783da16a863d9b0318174ef96b076866700c1 100644 --- a/arch/x86/kernel/asm-offsets.c +++ b/arch/x86/kernel/asm-offsets.c @@ -107,4 +107,9 @@ static void __used common(void) OFFSET(TSS_sp0, tss_struct, x86_tss.sp0); OFFSET(TSS_sp1, tss_struct, x86_tss.sp1); OFFSET(TSS_sp2, tss_struct, x86_tss.sp2); + OFFSET(X86_top_of_stack, pcpu_hot, top_of_stack); +#ifdef CONFIG_CALL_DEPTH_TRACKING + OFFSET(X86_call_depth, pcpu_hot, call_depth); +#endif + } diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c index 9b698215d2618e6b01e3112bc5ea1047447dc8a9..bb65371ea9df521e37cf6d0c27688a9fbf47af19 100644 --- a/arch/x86/kernel/asm-offsets_64.c +++ b/arch/x86/kernel/asm-offsets_64.c @@ -57,7 +57,7 @@ int main(void) BLANK(); #ifdef CONFIG_STACKPROTECTOR - DEFINE(stack_canary_offset, offsetof(struct fixed_percpu_data, stack_canary)); + OFFSET(FIXED_stack_canary, fixed_percpu_data, stack_canary); BLANK(); #endif return 0; diff --git a/arch/x86/kernel/callthunks.c b/arch/x86/kernel/callthunks.c new file mode 100644 index 0000000000000000000000000000000000000000..7d2c75ec9a8cd9d145085f4b9007a8ce4136ede9 --- /dev/null +++ b/arch/x86/kernel/callthunks.c @@ -0,0 +1,388 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#define pr_fmt(fmt) "callthunks: " fmt + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static int __initdata_or_module debug_callthunks; + +#define prdbg(fmt, args...) \ +do { \ + if (debug_callthunks) \ + printk(KERN_DEBUG pr_fmt(fmt), ##args); \ +} while(0) + +static int __init debug_thunks(char *str) +{ + debug_callthunks = 1; + return 1; +} +__setup("debug-callthunks", debug_thunks); + +#ifdef CONFIG_CALL_THUNKS_DEBUG +DEFINE_PER_CPU(u64, __x86_call_count); +DEFINE_PER_CPU(u64, __x86_ret_count); +DEFINE_PER_CPU(u64, __x86_stuffs_count); +DEFINE_PER_CPU(u64, __x86_ctxsw_count); +EXPORT_SYMBOL_GPL(__x86_ctxsw_count); +EXPORT_SYMBOL_GPL(__x86_call_count); +#endif + +extern s32 __call_sites[], __call_sites_end[]; + +struct thunk_desc { + void *template; + unsigned int template_size; +}; + +struct core_text { + unsigned long base; + unsigned long end; + const char *name; +}; + +static bool thunks_initialized __ro_after_init; + +static const struct core_text builtin_coretext = { + .base = (unsigned long)_text, + .end = (unsigned long)_etext, + .name = "builtin", +}; + +asm ( + ".pushsection .rodata \n" + ".global skl_call_thunk_template \n" + "skl_call_thunk_template: \n" + __stringify(INCREMENT_CALL_DEPTH)" \n" + ".global skl_call_thunk_tail \n" + "skl_call_thunk_tail: \n" + ".popsection \n" +); + +extern u8 skl_call_thunk_template[]; +extern u8 skl_call_thunk_tail[]; + +#define SKL_TMPL_SIZE \ + ((unsigned int)(skl_call_thunk_tail - skl_call_thunk_template)) + +extern void error_entry(void); +extern void xen_error_entry(void); +extern void paranoid_entry(void); + +static inline bool within_coretext(const struct core_text *ct, void *addr) +{ + unsigned long p = (unsigned long)addr; + + return ct->base <= p && p < ct->end; +} + +static inline bool within_module_coretext(void *addr) +{ + bool ret = false; + +#ifdef CONFIG_MODULES + struct module *mod; + + preempt_disable(); + mod = __module_address((unsigned long)addr); + if (mod && within_module_core((unsigned long)addr, mod)) + ret = true; + preempt_enable(); +#endif + return ret; +} + +static bool is_coretext(const struct core_text *ct, void *addr) +{ + if (ct && within_coretext(ct, addr)) + return true; + if (within_coretext(&builtin_coretext, addr)) + return true; + return within_module_coretext(addr); +} + +static __init_or_module bool skip_addr(void *dest) +{ + if (dest == error_entry) + return true; + if (dest == paranoid_entry) + return true; + if (dest == xen_error_entry) + return true; + /* Does FILL_RSB... */ + if (dest == __switch_to_asm) + return true; + /* Accounts directly */ + if (dest == ret_from_fork) + return true; +#ifdef CONFIG_HOTPLUG_CPU + if (dest == start_cpu0) + return true; +#endif +#ifdef CONFIG_FUNCTION_TRACER + if (dest == __fentry__) + return true; +#endif +#ifdef CONFIG_KEXEC_CORE + if (dest >= (void *)relocate_kernel && + dest < (void*)relocate_kernel + KEXEC_CONTROL_CODE_MAX_SIZE) + return true; +#endif +#ifdef CONFIG_XEN + if (dest >= (void *)hypercall_page && + dest < (void*)hypercall_page + PAGE_SIZE) + return true; +#endif + return false; +} + +static __init_or_module void *call_get_dest(void *addr) +{ + struct insn insn; + void *dest; + int ret; + + ret = insn_decode_kernel(&insn, addr); + if (ret) + return ERR_PTR(ret); + + /* Patched out call? */ + if (insn.opcode.bytes[0] != CALL_INSN_OPCODE) + return NULL; + + dest = addr + insn.length + insn.immediate.value; + if (skip_addr(dest)) + return NULL; + return dest; +} + +static const u8 nops[] = { + 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, + 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, + 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, + 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, +}; + +static __init_or_module void *patch_dest(void *dest, bool direct) +{ + unsigned int tsize = SKL_TMPL_SIZE; + u8 *pad = dest - tsize; + + /* Already patched? */ + if (!bcmp(pad, skl_call_thunk_template, tsize)) + return pad; + + /* Ensure there are nops */ + if (bcmp(pad, nops, tsize)) { + pr_warn_once("Invalid padding area for %pS\n", dest); + return NULL; + } + + if (direct) + memcpy(pad, skl_call_thunk_template, tsize); + else + text_poke_copy_locked(pad, skl_call_thunk_template, tsize, true); + return pad; +} + +static __init_or_module void patch_call(void *addr, const struct core_text *ct) +{ + void *pad, *dest; + u8 bytes[8]; + + if (!within_coretext(ct, addr)) + return; + + dest = call_get_dest(addr); + if (!dest || WARN_ON_ONCE(IS_ERR(dest))) + return; + + if (!is_coretext(ct, dest)) + return; + + pad = patch_dest(dest, within_coretext(ct, dest)); + if (!pad) + return; + + prdbg("Patch call at: %pS %px to %pS %px -> %px \n", addr, addr, + dest, dest, pad); + __text_gen_insn(bytes, CALL_INSN_OPCODE, addr, pad, CALL_INSN_SIZE); + text_poke_early(addr, bytes, CALL_INSN_SIZE); +} + +static __init_or_module void +patch_call_sites(s32 *start, s32 *end, const struct core_text *ct) +{ + s32 *s; + + for (s = start; s < end; s++) + patch_call((void *)s + *s, ct); +} + +static __init_or_module void +patch_paravirt_call_sites(struct paravirt_patch_site *start, + struct paravirt_patch_site *end, + const struct core_text *ct) +{ + struct paravirt_patch_site *p; + + for (p = start; p < end; p++) + patch_call(p->instr, ct); +} + +static __init_or_module void +callthunks_setup(struct callthunk_sites *cs, const struct core_text *ct) +{ + prdbg("Patching call sites %s\n", ct->name); + patch_call_sites(cs->call_start, cs->call_end, ct); + patch_paravirt_call_sites(cs->pv_start, cs->pv_end, ct); + prdbg("Patching call sites done%s\n", ct->name); +} + +void __init callthunks_patch_builtin_calls(void) +{ + struct callthunk_sites cs = { + .call_start = __call_sites, + .call_end = __call_sites_end, + .pv_start = __parainstructions, + .pv_end = __parainstructions_end + }; + + if (!cpu_feature_enabled(X86_FEATURE_CALL_DEPTH)) + return; + + pr_info("Setting up call depth tracking\n"); + mutex_lock(&text_mutex); + callthunks_setup(&cs, &builtin_coretext); + static_call_force_reinit(); + thunks_initialized = true; + mutex_unlock(&text_mutex); +} + +void *callthunks_translate_call_dest(void *dest) +{ + void *target; + + lockdep_assert_held(&text_mutex); + + if (!thunks_initialized || skip_addr(dest)) + return dest; + + if (!is_coretext(NULL, dest)) + return dest; + + target = patch_dest(dest, false); + return target ? : dest; +} + +bool is_callthunk(void *addr) +{ + unsigned int tmpl_size = SKL_TMPL_SIZE; + void *tmpl = skl_call_thunk_template; + unsigned long dest; + + dest = roundup((unsigned long)addr, CONFIG_FUNCTION_ALIGNMENT); + if (!thunks_initialized || skip_addr((void *)dest)) + return false; + + return !bcmp((void *)(dest - tmpl_size), tmpl, tmpl_size); +} + +#ifdef CONFIG_BPF_JIT +int x86_call_depth_emit_accounting(u8 **pprog, void *func) +{ + unsigned int tmpl_size = SKL_TMPL_SIZE; + void *tmpl = skl_call_thunk_template; + + if (!thunks_initialized) + return 0; + + /* Is function call target a thunk? */ + if (func && is_callthunk(func)) + return 0; + + memcpy(*pprog, tmpl, tmpl_size); + *pprog += tmpl_size; + return tmpl_size; +} +#endif + +#ifdef CONFIG_MODULES +void noinline callthunks_patch_module_calls(struct callthunk_sites *cs, + struct module *mod) +{ + struct core_text ct = { + .base = (unsigned long)mod->core_layout.base, + .end = (unsigned long)mod->core_layout.base + mod->core_layout.size, + .name = mod->name, + }; + + if (!thunks_initialized) + return; + + mutex_lock(&text_mutex); + callthunks_setup(cs, &ct); + mutex_unlock(&text_mutex); +} +#endif /* CONFIG_MODULES */ + +#if defined(CONFIG_CALL_THUNKS_DEBUG) && defined(CONFIG_DEBUG_FS) +static int callthunks_debug_show(struct seq_file *m, void *p) +{ + unsigned long cpu = (unsigned long)m->private; + + seq_printf(m, "C: %16llu R: %16llu S: %16llu X: %16llu\n,", + per_cpu(__x86_call_count, cpu), + per_cpu(__x86_ret_count, cpu), + per_cpu(__x86_stuffs_count, cpu), + per_cpu(__x86_ctxsw_count, cpu)); + return 0; +} + +static int callthunks_debug_open(struct inode *inode, struct file *file) +{ + return single_open(file, callthunks_debug_show, inode->i_private); +} + +static const struct file_operations dfs_ops = { + .open = callthunks_debug_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static int __init callthunks_debugfs_init(void) +{ + struct dentry *dir; + unsigned long cpu; + + dir = debugfs_create_dir("callthunks", NULL); + for_each_possible_cpu(cpu) { + void *arg = (void *)cpu; + char name [10]; + + sprintf(name, "cpu%lu", cpu); + debugfs_create_file(name, 0644, dir, arg, &dfs_ops); + } + return 0; +} +__initcall(callthunks_debugfs_init); +#endif diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile index f10a921ee75658980b92d0d81238d9c16fb642ce..d7e3ceaf75c16637cf10db37afcae0b28ecf49b3 100644 --- a/arch/x86/kernel/cpu/Makefile +++ b/arch/x86/kernel/cpu/Makefile @@ -17,9 +17,6 @@ KMSAN_SANITIZE_common.o := n # As above, instrumenting secondary CPU boot code causes boot hangs. KCSAN_SANITIZE_common.o := n -# Make sure load_percpu_segment has no stackprotector -CFLAGS_common.o := -fno-stack-protector - obj-y := cacheinfo.o scattered.o topology.o obj-y += common.o obj-y += rdrand.o diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index a8a5033a804d6d8a0deb3f040854ad79b964a6ef..d970ddb0cc65bedf358c32321d202036cf792923 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -787,6 +787,7 @@ enum retbleed_mitigation { RETBLEED_MITIGATION_IBPB, RETBLEED_MITIGATION_IBRS, RETBLEED_MITIGATION_EIBRS, + RETBLEED_MITIGATION_STUFF, }; enum retbleed_mitigation_cmd { @@ -794,6 +795,7 @@ enum retbleed_mitigation_cmd { RETBLEED_CMD_AUTO, RETBLEED_CMD_UNRET, RETBLEED_CMD_IBPB, + RETBLEED_CMD_STUFF, }; static const char * const retbleed_strings[] = { @@ -802,6 +804,7 @@ static const char * const retbleed_strings[] = { [RETBLEED_MITIGATION_IBPB] = "Mitigation: IBPB", [RETBLEED_MITIGATION_IBRS] = "Mitigation: IBRS", [RETBLEED_MITIGATION_EIBRS] = "Mitigation: Enhanced IBRS", + [RETBLEED_MITIGATION_STUFF] = "Mitigation: Stuffing", }; static enum retbleed_mitigation retbleed_mitigation __ro_after_init = @@ -831,8 +834,12 @@ static int __init retbleed_parse_cmdline(char *str) retbleed_cmd = RETBLEED_CMD_UNRET; } else if (!strcmp(str, "ibpb")) { retbleed_cmd = RETBLEED_CMD_IBPB; + } else if (!strcmp(str, "stuff")) { + retbleed_cmd = RETBLEED_CMD_STUFF; } else if (!strcmp(str, "nosmt")) { retbleed_nosmt = true; + } else if (!strcmp(str, "force")) { + setup_force_cpu_bug(X86_BUG_RETBLEED); } else { pr_err("Ignoring unknown retbleed option (%s).", str); } @@ -879,6 +886,21 @@ static void __init retbleed_select_mitigation(void) } break; + case RETBLEED_CMD_STUFF: + if (IS_ENABLED(CONFIG_CALL_DEPTH_TRACKING) && + spectre_v2_enabled == SPECTRE_V2_RETPOLINE) { + retbleed_mitigation = RETBLEED_MITIGATION_STUFF; + + } else { + if (IS_ENABLED(CONFIG_CALL_DEPTH_TRACKING)) + pr_err("WARNING: retbleed=stuff depends on spectre_v2=retpoline\n"); + else + pr_err("WARNING: kernel not compiled with CALL_DEPTH_TRACKING.\n"); + + goto do_cmd_auto; + } + break; + do_cmd_auto: case RETBLEED_CMD_AUTO: default: @@ -916,6 +938,12 @@ do_cmd_auto: mitigate_smt = true; break; + case RETBLEED_MITIGATION_STUFF: + setup_force_cpu_cap(X86_FEATURE_RETHUNK); + setup_force_cpu_cap(X86_FEATURE_CALL_DEPTH); + x86_set_skl_return_thunk(); + break; + default: break; } @@ -926,7 +954,7 @@ do_cmd_auto: /* * Let IBRS trump all on Intel without affecting the effects of the - * retbleed= cmdline option. + * retbleed= cmdline option except for call depth based stuffing */ if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) { switch (spectre_v2_enabled) { @@ -939,7 +967,8 @@ do_cmd_auto: retbleed_mitigation = RETBLEED_MITIGATION_EIBRS; break; default: - pr_err(RETBLEED_INTEL_MSG); + if (retbleed_mitigation != RETBLEED_MITIGATION_STUFF) + pr_err(RETBLEED_INTEL_MSG); } } @@ -1413,6 +1442,7 @@ static void __init spectre_v2_select_mitigation(void) if (IS_ENABLED(CONFIG_CPU_IBRS_ENTRY) && boot_cpu_has_bug(X86_BUG_RETBLEED) && retbleed_cmd != RETBLEED_CMD_OFF && + retbleed_cmd != RETBLEED_CMD_STUFF && boot_cpu_has(X86_FEATURE_IBRS) && boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) { mode = SPECTRE_V2_IBRS; diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 62b83bc5b4b9233f9ebf51fbfa5ca91bdcb5a51f..9cfca3d7d0e207c518b7cadf192a0f1337f790d6 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -610,6 +610,7 @@ static __always_inline void setup_cet(struct cpuinfo_x86 *c) if (!ibt_selftest()) { pr_err("IBT selftest: Failed!\n"); + wrmsrl(MSR_IA32_S_CET, 0); setup_clear_cpu_cap(X86_FEATURE_IBT); return; } @@ -702,16 +703,6 @@ static const char *table_lookup_model(struct cpuinfo_x86 *c) __u32 cpu_caps_cleared[NCAPINTS + NBUGINTS] __aligned(sizeof(unsigned long)); __u32 cpu_caps_set[NCAPINTS + NBUGINTS] __aligned(sizeof(unsigned long)); -void load_percpu_segment(int cpu) -{ -#ifdef CONFIG_X86_32 - loadsegment(fs, __KERNEL_PERCPU); -#else - __loadsegment_simple(gs, 0); - wrmsrl(MSR_GS_BASE, cpu_kernelmode_gs_base(cpu)); -#endif -} - #ifdef CONFIG_X86_32 /* The 32-bit entry code needs to find cpu_entry_area. */ DEFINE_PER_CPU(struct cpu_entry_area *, cpu_entry_area); @@ -739,16 +730,45 @@ void load_fixmap_gdt(int cpu) } EXPORT_SYMBOL_GPL(load_fixmap_gdt); -/* - * Current gdt points %fs at the "master" per-cpu area: after this, - * it's on the real one. +/** + * switch_gdt_and_percpu_base - Switch to direct GDT and runtime per CPU base + * @cpu: The CPU number for which this is invoked + * + * Invoked during early boot to switch from early GDT and early per CPU to + * the direct GDT and the runtime per CPU area. On 32-bit the percpu base + * switch is implicit by loading the direct GDT. On 64bit this requires + * to update GSBASE. */ -void switch_to_new_gdt(int cpu) +void __init switch_gdt_and_percpu_base(int cpu) { - /* Load the original GDT */ load_direct_gdt(cpu); - /* Reload the per-cpu base */ - load_percpu_segment(cpu); + +#ifdef CONFIG_X86_64 + /* + * No need to load %gs. It is already correct. + * + * Writing %gs on 64bit would zero GSBASE which would make any per + * CPU operation up to the point of the wrmsrl() fault. + * + * Set GSBASE to the new offset. Until the wrmsrl() happens the + * early mapping is still valid. That means the GSBASE update will + * lose any prior per CPU data which was not copied over in + * setup_per_cpu_areas(). + * + * This works even with stackprotector enabled because the + * per CPU stack canary is 0 in both per CPU areas. + */ + wrmsrl(MSR_GS_BASE, cpu_kernelmode_gs_base(cpu)); +#else + /* + * %fs is already set to __KERNEL_PERCPU, but after switching GDT + * it is required to load FS again so that the 'hidden' part is + * updated from the new GDT. Up to this point the early per CPU + * translation is active. Any content of the early per CPU data + * which was not copied over in setup_per_cpu_areas() is lost. + */ + loadsegment(fs, __KERNEL_PERCPU); +#endif } static const struct cpu_dev *cpu_devs[X86_VENDOR_NUM] = {}; @@ -1993,27 +2013,18 @@ static __init int setup_clearcpuid(char *arg) } __setup("clearcpuid=", setup_clearcpuid); +DEFINE_PER_CPU_ALIGNED(struct pcpu_hot, pcpu_hot) = { + .current_task = &init_task, + .preempt_count = INIT_PREEMPT_COUNT, + .top_of_stack = TOP_OF_INIT_STACK, +}; +EXPORT_PER_CPU_SYMBOL(pcpu_hot); + #ifdef CONFIG_X86_64 DEFINE_PER_CPU_FIRST(struct fixed_percpu_data, fixed_percpu_data) __aligned(PAGE_SIZE) __visible; EXPORT_PER_CPU_SYMBOL_GPL(fixed_percpu_data); -/* - * The following percpu variables are hot. Align current_task to - * cacheline size such that they fall in the same cacheline. - */ -DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned = - &init_task; -EXPORT_PER_CPU_SYMBOL(current_task); - -DEFINE_PER_CPU(void *, hardirq_stack_ptr); -DEFINE_PER_CPU(bool, hardirq_stack_inuse); - -DEFINE_PER_CPU(int, __preempt_count) = INIT_PREEMPT_COUNT; -EXPORT_PER_CPU_SYMBOL(__preempt_count); - -DEFINE_PER_CPU(unsigned long, cpu_current_top_of_stack) = TOP_OF_INIT_STACK; - static void wrmsrl_cstar(unsigned long val) { /* @@ -2064,20 +2075,6 @@ void syscall_init(void) #else /* CONFIG_X86_64 */ -DEFINE_PER_CPU(struct task_struct *, current_task) = &init_task; -EXPORT_PER_CPU_SYMBOL(current_task); -DEFINE_PER_CPU(int, __preempt_count) = INIT_PREEMPT_COUNT; -EXPORT_PER_CPU_SYMBOL(__preempt_count); - -/* - * On x86_32, vm86 modifies tss.sp0, so sp0 isn't a reliable way to find - * the top of the kernel stack. Use an extra percpu variable to track the - * top of the kernel stack directly. - */ -DEFINE_PER_CPU(unsigned long, cpu_current_top_of_stack) = - (unsigned long)&init_thread_union + THREAD_SIZE; -EXPORT_PER_CPU_SYMBOL(cpu_current_top_of_stack); - #ifdef CONFIG_STACKPROTECTOR DEFINE_PER_CPU(unsigned long, __stack_chk_guard); EXPORT_PER_CPU_SYMBOL(__stack_chk_guard); @@ -2248,12 +2245,6 @@ void cpu_init(void) boot_cpu_has(X86_FEATURE_TSC) || boot_cpu_has(X86_FEATURE_DE)) cr4_clear_bits(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE); - /* - * Initialize the per-CPU GDT with the boot GDT, - * and set up the GDT descriptor: - */ - switch_to_new_gdt(cpu); - if (IS_ENABLED(CONFIG_X86_64)) { loadsegment(fs, 0); memset(cur->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8); diff --git a/arch/x86/kernel/cpu/resctrl/pseudo_lock.c b/arch/x86/kernel/cpu/resctrl/pseudo_lock.c index ba8d0763b36bb177627f401997a40592beae3644..524f8ff3e69c781c3bcd67c180c27b04faed8a81 100644 --- a/arch/x86/kernel/cpu/resctrl/pseudo_lock.c +++ b/arch/x86/kernel/cpu/resctrl/pseudo_lock.c @@ -1560,9 +1560,9 @@ static const struct file_operations pseudo_lock_dev_fops = { .mmap = pseudo_lock_dev_mmap, }; -static char *pseudo_lock_devnode(struct device *dev, umode_t *mode) +static char *pseudo_lock_devnode(const struct device *dev, umode_t *mode) { - struct rdtgroup *rdtgrp; + const struct rdtgroup *rdtgrp; rdtgrp = dev_get_drvdata(dev); if (mode) diff --git a/arch/x86/kernel/cpu/sgx/encl.c b/arch/x86/kernel/cpu/sgx/encl.c index 68f8b18d2278d75822bfb0d86156ba5c498d56ed..2a0e90fe2abce14f049fb3c3eb083e306f4bd94c 100644 --- a/arch/x86/kernel/cpu/sgx/encl.c +++ b/arch/x86/kernel/cpu/sgx/encl.c @@ -268,7 +268,7 @@ static struct sgx_encl_page *sgx_encl_load_page_in_vma(struct sgx_encl *encl, unsigned long addr, unsigned long vm_flags) { - unsigned long vm_prot_bits = vm_flags & (VM_READ | VM_WRITE | VM_EXEC); + unsigned long vm_prot_bits = vm_flags & VM_ACCESS_FLAGS; struct sgx_encl_page *entry; entry = xa_load(&encl->page_array, PFN_DOWN(addr)); @@ -502,7 +502,7 @@ static void sgx_vma_open(struct vm_area_struct *vma) int sgx_encl_may_map(struct sgx_encl *encl, unsigned long start, unsigned long end, unsigned long vm_flags) { - unsigned long vm_prot_bits = vm_flags & (VM_READ | VM_WRITE | VM_EXEC); + unsigned long vm_prot_bits = vm_flags & VM_ACCESS_FLAGS; struct sgx_encl_page *page; unsigned long count = 0; int ret = 0; diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c index 6f7b8cc1bc9fa342a66ed021e38aaba4927f750d..621ba9c0f17a9d53cccd156b7bf633bdb0f9979b 100644 --- a/arch/x86/kernel/cpuid.c +++ b/arch/x86/kernel/cpuid.c @@ -139,7 +139,7 @@ static int cpuid_device_destroy(unsigned int cpu) return 0; } -static char *cpuid_devnode(struct device *dev, umode_t *mode) +static char *cpuid_devnode(const struct device *dev, umode_t *mode) { return kasprintf(GFP_KERNEL, "cpu/%u/cpuid", MINOR(dev->devt)); } diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c index 722fd712e1cf029adff9a73c73aad44992dbcef6..b4905d5173fd36d1df190a54725eced03f9170dd 100644 --- a/arch/x86/kernel/dumpstack_32.c +++ b/arch/x86/kernel/dumpstack_32.c @@ -37,7 +37,7 @@ const char *stack_type_name(enum stack_type type) static bool in_hardirq_stack(unsigned long *stack, struct stack_info *info) { - unsigned long *begin = (unsigned long *)this_cpu_read(hardirq_stack_ptr); + unsigned long *begin = (unsigned long *)this_cpu_read(pcpu_hot.hardirq_stack_ptr); unsigned long *end = begin + (THREAD_SIZE / sizeof(long)); /* @@ -62,7 +62,7 @@ static bool in_hardirq_stack(unsigned long *stack, struct stack_info *info) static bool in_softirq_stack(unsigned long *stack, struct stack_info *info) { - unsigned long *begin = (unsigned long *)this_cpu_read(softirq_stack_ptr); + unsigned long *begin = (unsigned long *)this_cpu_read(pcpu_hot.softirq_stack_ptr); unsigned long *end = begin + (THREAD_SIZE / sizeof(long)); /* diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c index 6c5defd6569a3eb867b58c37fccb93dd33decaee..f05339fee7785b1da2cc2cec9713e06a1242dbe7 100644 --- a/arch/x86/kernel/dumpstack_64.c +++ b/arch/x86/kernel/dumpstack_64.c @@ -134,7 +134,7 @@ static __always_inline bool in_exception_stack(unsigned long *stack, struct stac static __always_inline bool in_irq_stack(unsigned long *stack, struct stack_info *info) { - unsigned long *end = (unsigned long *)this_cpu_read(hardirq_stack_ptr); + unsigned long *end = (unsigned long *)this_cpu_read(pcpu_hot.hardirq_stack_ptr); unsigned long *begin; /* diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c index bd165004776d93bf998c708cac2ec2567f9739a6..5e7ead52cfdb7ae0e092f1425bf2bcae3a8ba610 100644 --- a/arch/x86/kernel/ftrace.c +++ b/arch/x86/kernel/ftrace.c @@ -24,10 +24,10 @@ #include #include #include +#include #include -#include #include #include #include @@ -69,6 +69,10 @@ static const char *ftrace_nop_replace(void) static const char *ftrace_call_replace(unsigned long ip, unsigned long addr) { + /* + * No need to translate into a callthunk. The trampoline does + * the depth accounting itself. + */ return text_gen_insn(CALL_INSN_OPCODE, (void *)ip, (void *)addr); } @@ -217,7 +221,9 @@ void ftrace_replace_code(int enable) ret = ftrace_verify_code(rec->ip, old); if (ret) { + ftrace_expected = old; ftrace_bug(ret, rec); + ftrace_expected = NULL; return; } } @@ -317,7 +323,7 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size) unsigned long size; unsigned long *ptr; void *trampoline; - void *ip; + void *ip, *dest; /* 48 8b 15 is movq (%rip), %rdx */ unsigned const char op_ref[] = { 0x48, 0x8b, 0x15 }; unsigned const char retq[] = { RET_INSN_OPCODE, INT3_INSN_OPCODE }; @@ -359,7 +365,7 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size) ip = trampoline + size; if (cpu_feature_enabled(X86_FEATURE_RETHUNK)) - __text_gen_insn(ip, JMP32_INSN_OPCODE, ip, &__x86_return_thunk, JMP32_INSN_SIZE); + __text_gen_insn(ip, JMP32_INSN_OPCODE, ip, x86_return_thunk, JMP32_INSN_SIZE); else memcpy(ip, retq, sizeof(retq)); @@ -404,20 +410,20 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size) /* put in the call to the function */ mutex_lock(&text_mutex); call_offset -= start_offset; + /* + * No need to translate into a callthunk. The trampoline does + * the depth accounting before the call already. + */ + dest = ftrace_ops_get_func(ops); memcpy(trampoline + call_offset, - text_gen_insn(CALL_INSN_OPCODE, - trampoline + call_offset, - ftrace_ops_get_func(ops)), CALL_INSN_SIZE); + text_gen_insn(CALL_INSN_OPCODE, trampoline + call_offset, dest), + CALL_INSN_SIZE); mutex_unlock(&text_mutex); /* ALLOC_TRAMP flags lets us know we created it */ ops->flags |= FTRACE_OPS_FL_ALLOC_TRAMP; - set_vm_flush_reset_perms(trampoline); - - if (likely(system_state != SYSTEM_BOOTING)) - set_memory_ro((unsigned long)trampoline, npages); - set_memory_x((unsigned long)trampoline, npages); + set_memory_rox((unsigned long)trampoline, npages); return (unsigned long)trampoline; fail: tramp_free(trampoline); diff --git a/arch/x86/kernel/ftrace_64.S b/arch/x86/kernel/ftrace_64.S index 2a4be92fd1444d02f084d65c21ee4faa54076009..1265ad519249c027cae82d8941d394b8dbc50795 100644 --- a/arch/x86/kernel/ftrace_64.S +++ b/arch/x86/kernel/ftrace_64.S @@ -3,8 +3,9 @@ * Copyright (C) 2014 Steven Rostedt, Red Hat Inc */ -#include #include +#include +#include #include #include #include @@ -131,16 +132,19 @@ .endm SYM_TYPED_FUNC_START(ftrace_stub) + CALL_DEPTH_ACCOUNT RET SYM_FUNC_END(ftrace_stub) SYM_TYPED_FUNC_START(ftrace_stub_graph) + CALL_DEPTH_ACCOUNT RET SYM_FUNC_END(ftrace_stub_graph) #ifdef CONFIG_DYNAMIC_FTRACE SYM_FUNC_START(__fentry__) + CALL_DEPTH_ACCOUNT RET SYM_FUNC_END(__fentry__) EXPORT_SYMBOL(__fentry__) @@ -149,6 +153,8 @@ SYM_FUNC_START(ftrace_caller) /* save_mcount_regs fills in first two parameters */ save_mcount_regs + CALL_DEPTH_ACCOUNT + /* Stack - skipping return address of ftrace_caller */ leaq MCOUNT_REG_SIZE+8(%rsp), %rcx movq %rcx, RSP(%rsp) @@ -164,6 +170,9 @@ SYM_INNER_LABEL(ftrace_caller_op_ptr, SYM_L_GLOBAL) /* Only ops with REGS flag set should have CS register set */ movq $0, CS(%rsp) + /* Account for the function call below */ + CALL_DEPTH_ACCOUNT + SYM_INNER_LABEL(ftrace_call, SYM_L_GLOBAL) ANNOTATE_NOENDBR call ftrace_stub @@ -193,6 +202,8 @@ SYM_FUNC_START(ftrace_regs_caller) save_mcount_regs 8 /* save_mcount_regs fills in first two parameters */ + CALL_DEPTH_ACCOUNT + SYM_INNER_LABEL(ftrace_regs_caller_op_ptr, SYM_L_GLOBAL) ANNOTATE_NOENDBR /* Load the ftrace_ops into the 3rd parameter */ @@ -223,6 +234,9 @@ SYM_INNER_LABEL(ftrace_regs_caller_op_ptr, SYM_L_GLOBAL) /* regs go into 4th parameter */ leaq (%rsp), %rcx + /* Account for the function call below */ + CALL_DEPTH_ACCOUNT + SYM_INNER_LABEL(ftrace_regs_call, SYM_L_GLOBAL) ANNOTATE_NOENDBR call ftrace_stub @@ -275,7 +289,20 @@ SYM_INNER_LABEL(ftrace_regs_caller_end, SYM_L_GLOBAL) /* Restore flags */ popfq UNWIND_HINT_FUNC - RET + + /* + * The above left an extra return value on the stack; effectively + * doing a tail-call without using a register. This PUSH;RET + * pattern unbalances the RSB, inject a pointless CALL to rebalance. + */ + ANNOTATE_INTRA_FUNCTION_CALL + CALL .Ldo_rebalance + int3 +.Ldo_rebalance: + add $8, %rsp + ALTERNATIVE __stringify(RET), \ + __stringify(ANNOTATE_UNRET_SAFE; ret; int3), \ + X86_FEATURE_CALL_DEPTH SYM_FUNC_END(ftrace_regs_caller) STACK_FRAME_NON_STANDARD_FP(ftrace_regs_caller) @@ -284,6 +311,8 @@ STACK_FRAME_NON_STANDARD_FP(ftrace_regs_caller) #else /* ! CONFIG_DYNAMIC_FTRACE */ SYM_FUNC_START(__fentry__) + CALL_DEPTH_ACCOUNT + cmpq $ftrace_stub, ftrace_trace_function jnz trace RET @@ -337,6 +366,8 @@ SYM_CODE_START(return_to_handler) int3 .Ldo_rop: mov %rdi, (%rsp) - RET + ALTERNATIVE __stringify(RET), \ + __stringify(ANNOTATE_UNRET_SAFE; ret; int3), \ + X86_FEATURE_CALL_DEPTH SYM_CODE_END(return_to_handler) #endif diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c index 6a3cfaf6b72ad6351dadcb837459eedefe3834ba..387e4b12e823a5b472e41f7755f397494c9d26d8 100644 --- a/arch/x86/kernel/head64.c +++ b/arch/x86/kernel/head64.c @@ -203,7 +203,7 @@ unsigned long __head __startup_64(unsigned long physaddr, load_delta = physaddr - (unsigned long)(_text - __START_KERNEL_map); /* Is the address not 2M aligned? */ - if (load_delta & ~PMD_PAGE_MASK) + if (load_delta & ~PMD_MASK) for (;;); /* Include the SME encryption mask in the fixup value */ diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S index d860d437631b649dd37476ff96195fadfbf0b2e5..222efd4a09bc8861b3871f4a175013c0730f8759 100644 --- a/arch/x86/kernel/head_64.S +++ b/arch/x86/kernel/head_64.S @@ -370,6 +370,7 @@ SYM_CODE_END(secondary_startup_64) * start_secondary() via .Ljump_to_C_code. */ SYM_CODE_START(start_cpu0) + ANNOTATE_NOENDBR UNWIND_HINT_EMPTY movq initial_stack(%rip), %rsp jmp .Ljump_to_C_code diff --git a/arch/x86/kernel/hw_breakpoint.c b/arch/x86/kernel/hw_breakpoint.c index 668a4a6533d923c7eddef457f3f2ebac0040a00b..bbb0f737aab1904e82a54f08e4ad41b90d435ef5 100644 --- a/arch/x86/kernel/hw_breakpoint.c +++ b/arch/x86/kernel/hw_breakpoint.c @@ -266,7 +266,7 @@ static inline bool within_cpu_entry(unsigned long addr, unsigned long end) /* CPU entry erea is always used for CPU entry */ if (within_area(addr, end, CPU_ENTRY_AREA_BASE, - CPU_ENTRY_AREA_TOTAL_SIZE)) + CPU_ENTRY_AREA_MAP_SIZE)) return true; /* diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c index 01833ebf5e8e3582992a8860460bbe555923eb30..dc1049c01f9b9aa19c6f986e975d382502bce062 100644 --- a/arch/x86/kernel/irq_32.c +++ b/arch/x86/kernel/irq_32.c @@ -52,9 +52,6 @@ static inline int check_stack_overflow(void) { return 0; } static inline void print_stack_overflow(void) { } #endif -DEFINE_PER_CPU(struct irq_stack *, hardirq_stack_ptr); -DEFINE_PER_CPU(struct irq_stack *, softirq_stack_ptr); - static void call_on_stack(void *func, void *stack) { asm volatile("xchgl %%ebx,%%esp \n" @@ -77,7 +74,7 @@ static inline int execute_on_irq_stack(int overflow, struct irq_desc *desc) u32 *isp, *prev_esp, arg1; curstk = (struct irq_stack *) current_stack(); - irqstk = __this_cpu_read(hardirq_stack_ptr); + irqstk = __this_cpu_read(pcpu_hot.hardirq_stack_ptr); /* * this is where we switch to the IRQ stack. However, if we are @@ -115,7 +112,7 @@ int irq_init_percpu_irqstack(unsigned int cpu) int node = cpu_to_node(cpu); struct page *ph, *ps; - if (per_cpu(hardirq_stack_ptr, cpu)) + if (per_cpu(pcpu_hot.hardirq_stack_ptr, cpu)) return 0; ph = alloc_pages_node(node, THREADINFO_GFP, THREAD_SIZE_ORDER); @@ -127,8 +124,8 @@ int irq_init_percpu_irqstack(unsigned int cpu) return -ENOMEM; } - per_cpu(hardirq_stack_ptr, cpu) = page_address(ph); - per_cpu(softirq_stack_ptr, cpu) = page_address(ps); + per_cpu(pcpu_hot.hardirq_stack_ptr, cpu) = page_address(ph); + per_cpu(pcpu_hot.softirq_stack_ptr, cpu) = page_address(ps); return 0; } @@ -138,7 +135,7 @@ void do_softirq_own_stack(void) struct irq_stack *irqstk; u32 *isp, *prev_esp; - irqstk = __this_cpu_read(softirq_stack_ptr); + irqstk = __this_cpu_read(pcpu_hot.softirq_stack_ptr); /* build the stack frame on the softirq stack */ isp = (u32 *) ((char *)irqstk + sizeof(*irqstk)); diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c index 1c0fb96b9e39069d4d246f32f5e5c782e8e7be38..fe0c859873d17ef472aa92aac419bc427073c6e9 100644 --- a/arch/x86/kernel/irq_64.c +++ b/arch/x86/kernel/irq_64.c @@ -50,7 +50,7 @@ static int map_irq_stack(unsigned int cpu) return -ENOMEM; /* Store actual TOS to avoid adjustment in the hotpath */ - per_cpu(hardirq_stack_ptr, cpu) = va + IRQ_STACK_SIZE - 8; + per_cpu(pcpu_hot.hardirq_stack_ptr, cpu) = va + IRQ_STACK_SIZE - 8; return 0; } #else @@ -63,14 +63,14 @@ static int map_irq_stack(unsigned int cpu) void *va = per_cpu_ptr(&irq_stack_backing_store, cpu); /* Store actual TOS to avoid adjustment in the hotpath */ - per_cpu(hardirq_stack_ptr, cpu) = va + IRQ_STACK_SIZE - 8; + per_cpu(pcpu_hot.hardirq_stack_ptr, cpu) = va + IRQ_STACK_SIZE - 8; return 0; } #endif int irq_init_percpu_irqstack(unsigned int cpu) { - if (per_cpu(hardirq_stack_ptr, cpu)) + if (per_cpu(pcpu_hot.hardirq_stack_ptr, cpu)) return 0; return map_irq_stack(cpu); } diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c index eb8bc82846b997031da4529d7009bbc737bee0a9..66299682b6b7e24305160f6282ff6c40f07f82d4 100644 --- a/arch/x86/kernel/kprobes/core.c +++ b/arch/x86/kernel/kprobes/core.c @@ -43,6 +43,7 @@ #include #include #include +#include #include #include @@ -51,7 +52,6 @@ #include #include #include -#include #include #include "common.h" @@ -414,18 +414,11 @@ void *alloc_insn_page(void) if (!page) return NULL; - set_vm_flush_reset_perms(page); - /* - * First make the page read-only, and only then make it executable to - * prevent it from being W+X in between. - */ - set_memory_ro((unsigned long)page, 1); - /* * TODO: Once additional kernel code protection mechanisms are set, ensure * that the page was not maliciously altered and it is still zeroed. */ - set_memory_x((unsigned long)page, 1); + set_memory_rox((unsigned long)page, 1); return page; } diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c index d4e48b4a438b25d7170890ca2c281d4d96013985..1cceac5984daa902f311a4001ba1a028ab6f91b3 100644 --- a/arch/x86/kernel/kvm.c +++ b/arch/x86/kernel/kvm.c @@ -349,7 +349,7 @@ static notrace void kvm_guest_apic_eoi_write(u32 reg, u32 val) static void kvm_guest_cpu_init(void) { if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF_INT) && kvmapf) { - u64 pa = slow_virt_to_phys(this_cpu_ptr(&apf_reason)); + u64 pa; WARN_ON_ONCE(!static_branch_likely(&kvm_async_pf_enabled)); @@ -798,19 +798,13 @@ extern bool __raw_callee_save___kvm_vcpu_is_preempted(long); * Hand-optimize version for x86-64 to avoid 8 64-bit register saving and * restoring to/from the stack. */ -asm( -".pushsection .text;" -".global __raw_callee_save___kvm_vcpu_is_preempted;" -".type __raw_callee_save___kvm_vcpu_is_preempted, @function;" -"__raw_callee_save___kvm_vcpu_is_preempted:" -ASM_ENDBR -"movq __per_cpu_offset(,%rdi,8), %rax;" -"cmpb $0, " __stringify(KVM_STEAL_TIME_preempted) "+steal_time(%rax);" -"setne %al;" -ASM_RET -".size __raw_callee_save___kvm_vcpu_is_preempted, .-__raw_callee_save___kvm_vcpu_is_preempted;" -".popsection"); +#define PV_VCPU_PREEMPTED_ASM \ + "movq __per_cpu_offset(,%rdi,8), %rax\n\t" \ + "cmpb $0, " __stringify(KVM_STEAL_TIME_preempted) "+steal_time(%rax)\n\t" \ + "setne %al\n\t" +DEFINE_PARAVIRT_ASM(__raw_callee_save___kvm_vcpu_is_preempted, + PV_VCPU_PREEMPTED_ASM, .text); #endif static void __init kvm_guest_init(void) diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c index d85a6980e263d9fc54acc8cdf58b52f3cda46e8f..705fb2a41d7dd54b3fdc1737f8c117180c4b1439 100644 --- a/arch/x86/kernel/module.c +++ b/arch/x86/kernel/module.c @@ -74,10 +74,11 @@ void *module_alloc(unsigned long size) return NULL; p = __vmalloc_node_range(size, MODULE_ALIGN, - MODULES_VADDR + get_module_load_offset(), - MODULES_END, gfp_mask, - PAGE_KERNEL, VM_DEFER_KMEMLEAK, NUMA_NO_NODE, - __builtin_return_address(0)); + MODULES_VADDR + get_module_load_offset(), + MODULES_END, gfp_mask, PAGE_KERNEL, + VM_FLUSH_RESET_PERMS | VM_DEFER_KMEMLEAK, + NUMA_NO_NODE, __builtin_return_address(0)); + if (p && (kasan_alloc_module_shadow(p, size, gfp_mask) < 0)) { vfree(p); return NULL; @@ -253,7 +254,8 @@ int module_finalize(const Elf_Ehdr *hdr, { const Elf_Shdr *s, *alt = NULL, *locks = NULL, *para = NULL, *orc = NULL, *orc_ip = NULL, - *retpolines = NULL, *returns = NULL, *ibt_endbr = NULL; + *retpolines = NULL, *returns = NULL, *ibt_endbr = NULL, + *calls = NULL, *cfi = NULL; char *secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) { @@ -271,6 +273,10 @@ int module_finalize(const Elf_Ehdr *hdr, retpolines = s; if (!strcmp(".return_sites", secstrings + s->sh_name)) returns = s; + if (!strcmp(".call_sites", secstrings + s->sh_name)) + calls = s; + if (!strcmp(".cfi_sites", secstrings + s->sh_name)) + cfi = s; if (!strcmp(".ibt_endbr_seal", secstrings + s->sh_name)) ibt_endbr = s; } @@ -283,6 +289,22 @@ int module_finalize(const Elf_Ehdr *hdr, void *pseg = (void *)para->sh_addr; apply_paravirt(pseg, pseg + para->sh_size); } + if (retpolines || cfi) { + void *rseg = NULL, *cseg = NULL; + unsigned int rsize = 0, csize = 0; + + if (retpolines) { + rseg = (void *)retpolines->sh_addr; + rsize = retpolines->sh_size; + } + + if (cfi) { + cseg = (void *)cfi->sh_addr; + csize = cfi->sh_size; + } + + apply_fineibt(rseg, rseg + rsize, cseg, cseg + csize); + } if (retpolines) { void *rseg = (void *)retpolines->sh_addr; apply_retpolines(rseg, rseg + retpolines->sh_size); @@ -296,6 +318,21 @@ int module_finalize(const Elf_Ehdr *hdr, void *aseg = (void *)alt->sh_addr; apply_alternatives(aseg, aseg + alt->sh_size); } + if (calls || para) { + struct callthunk_sites cs = {}; + + if (calls) { + cs.call_start = (void *)calls->sh_addr; + cs.call_end = (void *)calls->sh_addr + calls->sh_size; + } + + if (para) { + cs.pv_start = (void *)para->sh_addr; + cs.pv_end = (void *)para->sh_addr + para->sh_size; + } + + callthunks_patch_module_calls(&cs, me); + } if (ibt_endbr) { void *iseg = (void *)ibt_endbr->sh_addr; apply_ibt_endbr(iseg, iseg + ibt_endbr->sh_size); diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c index ed8ac6bcbafb2fd8b18af132622199628295eb46..7087513117861e8f3e6f21a27240525ccb0b27fd 100644 --- a/arch/x86/kernel/msr.c +++ b/arch/x86/kernel/msr.c @@ -250,7 +250,7 @@ static int msr_device_destroy(unsigned int cpu) return 0; } -static char *msr_devnode(struct device *dev, umode_t *mode) +static char *msr_devnode(const struct device *dev, umode_t *mode) { return kasprintf(GFP_KERNEL, "cpu/%u/msr", MINOR(dev->devt)); } diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c index 7ca2d46c08cc9efc7130388da9ea1d8ecedcf94e..327757afb0276c877ec7c01af6570f90221c5385 100644 --- a/arch/x86/kernel/paravirt.c +++ b/arch/x86/kernel/paravirt.c @@ -37,27 +37,10 @@ * nop stub, which must not clobber anything *including the stack* to * avoid confusing the entry prologues. */ -extern void _paravirt_nop(void); -asm (".pushsection .entry.text, \"ax\"\n" - ".global _paravirt_nop\n" - "_paravirt_nop:\n\t" - ASM_ENDBR - ASM_RET - ".size _paravirt_nop, . - _paravirt_nop\n\t" - ".type _paravirt_nop, @function\n\t" - ".popsection"); +DEFINE_PARAVIRT_ASM(_paravirt_nop, "", .entry.text); /* stub always returning 0. */ -asm (".pushsection .entry.text, \"ax\"\n" - ".global paravirt_ret0\n" - "paravirt_ret0:\n\t" - ASM_ENDBR - "xor %" _ASM_AX ", %" _ASM_AX ";\n\t" - ASM_RET - ".size paravirt_ret0, . - paravirt_ret0\n\t" - ".type paravirt_ret0, @function\n\t" - ".popsection"); - +DEFINE_PARAVIRT_ASM(paravirt_ret0, "xor %eax,%eax", .entry.text); void __init default_banner(void) { diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index 2f314b170c9f0737d6ee673ed380d894a6978911..470c128759eab3c82b05539f13e204354452cdc1 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c @@ -191,13 +191,13 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) arch_end_context_switch(next_p); /* - * Reload esp0 and cpu_current_top_of_stack. This changes + * Reload esp0 and pcpu_hot.top_of_stack. This changes * current_thread_info(). Refresh the SYSENTER configuration in * case prev or next is vm86. */ update_task_stack(next_p); refresh_sysenter_cs(next); - this_cpu_write(cpu_current_top_of_stack, + this_cpu_write(pcpu_hot.top_of_stack, (unsigned long)task_stack_page(next_p) + THREAD_SIZE); @@ -207,7 +207,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) if (prev->gs | next->gs) loadsegment(gs, next->gs); - this_cpu_write(current_task, next_p); + raw_cpu_write(pcpu_hot.current_task, next_p); switch_fpu_finish(); diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index e2f469175be8a78a7004abf4d3571cdd6337b5eb..4e34b3b68ebdc96a76c9bf265415ecac3a5314a8 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c @@ -563,7 +563,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) int cpu = smp_processor_id(); WARN_ON_ONCE(IS_ENABLED(CONFIG_DEBUG_ENTRY) && - this_cpu_read(hardirq_stack_inuse)); + this_cpu_read(pcpu_hot.hardirq_stack_inuse)); if (!test_thread_flag(TIF_NEED_FPU_LOAD)) switch_fpu_prepare(prev_fpu, cpu); @@ -617,8 +617,8 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) /* * Switch the PDA and FPU contexts. */ - this_cpu_write(current_task, next_p); - this_cpu_write(cpu_current_top_of_stack, task_top_of_stack(next_p)); + raw_cpu_write(pcpu_hot.current_task, next_p); + raw_cpu_write(pcpu_hot.top_of_stack, task_top_of_stack(next_p)); switch_fpu_finish(); diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S index 4809c0dc4eb0ceb64b8dbab19cf5cc9525352609..4a73351f87f88bdf5924e33b6b7bb92c2367e95e 100644 --- a/arch/x86/kernel/relocate_kernel_64.S +++ b/arch/x86/kernel/relocate_kernel_64.S @@ -41,6 +41,7 @@ .text .align PAGE_SIZE .code64 +SYM_CODE_START_NOALIGN(relocate_range) SYM_CODE_START_NOALIGN(relocate_kernel) UNWIND_HINT_EMPTY ANNOTATE_NOENDBR @@ -312,5 +313,5 @@ SYM_CODE_START_LOCAL_NOALIGN(swap_pages) int3 SYM_CODE_END(swap_pages) - .globl kexec_control_code_size -.set kexec_control_code_size, . - relocate_kernel + .skip KEXEC_CONTROL_CODE_MAX_SIZE - (. - relocate_kernel), 0xcc +SYM_CODE_END(relocate_range); diff --git a/arch/x86/kernel/resource.c b/arch/x86/kernel/resource.c index bba1abd05bfeb1d94144ad2593048e7ddfbe74c0..79bc8a97a083cdaa42b4bf1fa5e9e5b50dd3e8c2 100644 --- a/arch/x86/kernel/resource.c +++ b/arch/x86/kernel/resource.c @@ -42,8 +42,16 @@ static void remove_e820_regions(struct resource *avail) resource_clip(avail, e820_start, e820_end); if (orig.start != avail->start || orig.end != avail->end) { - pr_info("clipped %pR to %pR for e820 entry [mem %#010Lx-%#010Lx]\n", - &orig, avail, e820_start, e820_end); + pr_info("resource: avoiding allocation from e820 entry [mem %#010Lx-%#010Lx]\n", + e820_start, e820_end); + if (avail->end > avail->start) + /* + * Use %pa instead of %pR because "avail" + * is typically IORESOURCE_UNSET, so %pR + * shows the size instead of addresses. + */ + pr_info("resource: remaining [mem %pa-%pa] available\n", + &avail->start, &avail->end); orig = *avail; } } diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c index b26123c90b4fd3dca51f43aeaae46cbfde526295..c242dc47e9cb92a6918592b3a58b0e2a2ba8e26b 100644 --- a/arch/x86/kernel/setup_percpu.c +++ b/arch/x86/kernel/setup_percpu.c @@ -23,9 +23,6 @@ #include #include -DEFINE_PER_CPU_READ_MOSTLY(int, cpu_number); -EXPORT_PER_CPU_SYMBOL(cpu_number); - #ifdef CONFIG_X86_64 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load) #else @@ -172,7 +169,7 @@ void __init setup_per_cpu_areas(void) for_each_possible_cpu(cpu) { per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu]; per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu); - per_cpu(cpu_number, cpu) = cpu; + per_cpu(pcpu_hot.cpu_number, cpu) = cpu; setup_percpu_segment(cpu); /* * Copy data used in early init routines from the @@ -211,7 +208,7 @@ void __init setup_per_cpu_areas(void) * area. Reload any changed state for the boot CPU. */ if (!cpu) - switch_to_new_gdt(cpu); + switch_gdt_and_percpu_base(cpu); } /* indicate the early static arrays will soon be gone */ diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index c295f0e008a3438b13d135566b6d2054c0792c42..55cad72715d991bd18bf0dfb7aa3bd757f4474f7 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -1048,7 +1048,7 @@ int common_cpu_up(unsigned int cpu, struct task_struct *idle) /* Just in case we booted with a single CPU. */ alternatives_enable_smp(); - per_cpu(current_task, cpu) = idle; + per_cpu(pcpu_hot.current_task, cpu) = idle; cpu_init_stack_canary(cpu, idle); /* Initialize the interrupt stack(s) */ @@ -1058,7 +1058,7 @@ int common_cpu_up(unsigned int cpu, struct task_struct *idle) #ifdef CONFIG_X86_32 /* Stack for startup_32 can be just as for start_secondary onwards */ - per_cpu(cpu_current_top_of_stack, cpu) = task_top_of_stack(idle); + per_cpu(pcpu_hot.top_of_stack, cpu) = task_top_of_stack(idle); #else initial_gs = per_cpu_offset(cpu); #endif @@ -1453,7 +1453,11 @@ void arch_thaw_secondary_cpus_end(void) void __init native_smp_prepare_boot_cpu(void) { int me = smp_processor_id(); - switch_to_new_gdt(me); + + /* SMP handles this from setup_per_cpu_areas() */ + if (!IS_ENABLED(CONFIG_SMP)) + switch_gdt_and_percpu_base(me); + /* already set me in cpu_online_mask in boot_cpu_init() */ cpumask_set_cpu(me, cpu_callout_mask); cpu_set_state_online(me); diff --git a/arch/x86/kernel/static_call.c b/arch/x86/kernel/static_call.c index aaaba85d6d7ff01e596f8b77c42ac72c707b0f0d..2ebc338980bcdfe5a276ac79fbbe2a90ecc6815c 100644 --- a/arch/x86/kernel/static_call.c +++ b/arch/x86/kernel/static_call.c @@ -34,6 +34,7 @@ static void __ref __static_call_transform(void *insn, enum insn_type type, switch (type) { case CALL: + func = callthunks_translate_call_dest(func); code = text_gen_insn(CALL_INSN_OPCODE, insn, func); if (func == &__static_call_return0) { emulate = code; @@ -52,7 +53,7 @@ static void __ref __static_call_transform(void *insn, enum insn_type type, case RET: if (cpu_feature_enabled(X86_FEATURE_RETHUNK)) - code = text_gen_insn(JMP32_INSN_OPCODE, insn, &__x86_return_thunk); + code = text_gen_insn(JMP32_INSN_OPCODE, insn, x86_return_thunk); else code = &retinsn; break; diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index d1e1679f32cf42c72fe423529f4081d29c37ffb8..d317dc3d06a3a3f018f83e0fedfe0c898017e0d5 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -858,7 +858,7 @@ DEFINE_IDTENTRY_RAW(exc_int3) */ asmlinkage __visible noinstr struct pt_regs *sync_regs(struct pt_regs *eregs) { - struct pt_regs *regs = (struct pt_regs *)this_cpu_read(cpu_current_top_of_stack) - 1; + struct pt_regs *regs = (struct pt_regs *)this_cpu_read(pcpu_hot.top_of_stack) - 1; if (regs != eregs) *regs = *eregs; return regs; @@ -876,7 +876,7 @@ asmlinkage __visible noinstr struct pt_regs *vc_switch_off_ist(struct pt_regs *r * trust it and switch to the current kernel stack */ if (ip_within_syscall_gap(regs)) { - sp = this_cpu_read(cpu_current_top_of_stack); + sp = this_cpu_read(pcpu_hot.top_of_stack); goto sync; } diff --git a/arch/x86/kernel/unwind_orc.c b/arch/x86/kernel/unwind_orc.c index c059820dfaeaf6af5167051e084a9dc8253f01a4..cdf6c6060170008d2e2bf18d6ba7d2703776dc29 100644 --- a/arch/x86/kernel/unwind_orc.c +++ b/arch/x86/kernel/unwind_orc.c @@ -136,6 +136,21 @@ static struct orc_entry null_orc_entry = { .type = UNWIND_HINT_TYPE_CALL }; +#ifdef CONFIG_CALL_THUNKS +static struct orc_entry *orc_callthunk_find(unsigned long ip) +{ + if (!is_callthunk((void *)ip)) + return NULL; + + return &null_orc_entry; +} +#else +static struct orc_entry *orc_callthunk_find(unsigned long ip) +{ + return NULL; +} +#endif + /* Fake frame pointer entry -- used as a fallback for generated code */ static struct orc_entry orc_fp_entry = { .type = UNWIND_HINT_TYPE_CALL, @@ -189,7 +204,11 @@ static struct orc_entry *orc_find(unsigned long ip) if (orc) return orc; - return orc_ftrace_find(ip); + orc = orc_ftrace_find(ip); + if (orc) + return orc; + + return orc_callthunk_find(ip); } #ifdef CONFIG_MODULES diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S index 15f29053cec46e462aa01c84c2b1f33e7c89ceaf..2e0ee14229bffce61924e7d1b689864f728eda5b 100644 --- a/arch/x86/kernel/vmlinux.lds.S +++ b/arch/x86/kernel/vmlinux.lds.S @@ -132,18 +132,19 @@ SECTIONS CPUIDLE_TEXT LOCK_TEXT KPROBES_TEXT - ALIGN_ENTRY_TEXT_BEGIN - ENTRY_TEXT - ALIGN_ENTRY_TEXT_END SOFTIRQENTRY_TEXT - STATIC_CALL_TEXT - *(.gnu.warning) - #ifdef CONFIG_RETPOLINE __indirect_thunk_start = .; *(.text.__x86.*) __indirect_thunk_end = .; #endif + STATIC_CALL_TEXT + + ALIGN_ENTRY_TEXT_BEGIN + ENTRY_TEXT + ALIGN_ENTRY_TEXT_END + *(.gnu.warning) + } :text =0xcccc /* End of text section, which should occupy whole number of pages */ @@ -290,6 +291,13 @@ SECTIONS *(.return_sites) __return_sites_end = .; } + + . = ALIGN(8); + .call_sites : AT(ADDR(.call_sites) - LOAD_OFFSET) { + __call_sites = .; + *(.call_sites) + __call_sites_end = .; + } #endif #ifdef CONFIG_X86_KERNEL_IBT @@ -301,6 +309,15 @@ SECTIONS } #endif +#ifdef CONFIG_FINEIBT + . = ALIGN(8); + .cfi_sites : AT(ADDR(.cfi_sites) - LOAD_OFFSET) { + __cfi_sites = .; + *(.cfi_sites) + __cfi_sites_end = .; + } +#endif + /* * struct alt_inst entries. From the header (alternative.h): * "Alternative instructions for different CPU types or capabilities" @@ -493,11 +510,3 @@ INIT_PER_CPU(irq_stack_backing_store); #endif #endif /* CONFIG_X86_64 */ - -#ifdef CONFIG_KEXEC_CORE -#include - -. = ASSERT(kexec_control_code_size <= KEXEC_CONTROL_CODE_MAX_SIZE, - "kexec control code size is too big"); -#endif - diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig index 67be7f217e37bdad646c288ee3a3b18ceba5939b..fbeaa9ddef5985f193afccc58a0dc36c1eeb0103 100644 --- a/arch/x86/kvm/Kconfig +++ b/arch/x86/kvm/Kconfig @@ -118,6 +118,17 @@ config KVM_AMD_SEV Provides support for launching Encrypted VMs (SEV) and Encrypted VMs with Encrypted State (SEV-ES) on AMD processors. +config KVM_SMM + bool "System Management Mode emulation" + default y + depends on KVM + help + Provides support for KVM to emulate System Management Mode (SMM) + in virtual machines. This can be used by the virtual machine + firmware to implement UEFI secure boot. + + If unsure, say Y. + config KVM_XEN bool "Support for Xen hypercall interface" depends on KVM diff --git a/arch/x86/kvm/Makefile b/arch/x86/kvm/Makefile index f453a0f96e243e806bdc8b96757dd8a22757573b..80e3fe184d17e64984d2f2d4adba00ec9d5ad57e 100644 --- a/arch/x86/kvm/Makefile +++ b/arch/x86/kvm/Makefile @@ -20,12 +20,14 @@ endif kvm-$(CONFIG_X86_64) += mmu/tdp_iter.o mmu/tdp_mmu.o kvm-$(CONFIG_KVM_XEN) += xen.o +kvm-$(CONFIG_KVM_SMM) += smm.o kvm-intel-y += vmx/vmx.o vmx/vmenter.o vmx/pmu_intel.o vmx/vmcs12.o \ - vmx/evmcs.o vmx/nested.o vmx/posted_intr.o + vmx/hyperv.o vmx/nested.o vmx/posted_intr.o kvm-intel-$(CONFIG_X86_SGX_KVM) += vmx/sgx.o -kvm-amd-y += svm/svm.o svm/vmenter.o svm/pmu.o svm/nested.o svm/avic.o svm/sev.o +kvm-amd-y += svm/svm.o svm/vmenter.o svm/pmu.o svm/nested.o svm/avic.o \ + svm/sev.o svm/hyperv.o ifdef CONFIG_HYPERV kvm-amd-y += svm/svm_onhyperv.o diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index c92c49a0b35b048f6874ee6115bfcc2ddba48563..b14653b61470c2720267006f7e7166ebac159613 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c @@ -62,10 +62,16 @@ u32 xstate_required_size(u64 xstate_bv, bool compacted) * This one is tied to SSB in the user API, and not * visible in /proc/cpuinfo. */ -#define KVM_X86_FEATURE_PSFD (13*32+28) /* Predictive Store Forwarding Disable */ +#define KVM_X86_FEATURE_AMD_PSFD (13*32+28) /* Predictive Store Forwarding Disable */ #define F feature_bit -#define SF(name) (boot_cpu_has(X86_FEATURE_##name) ? F(name) : 0) + +/* Scattered Flag - For features that are scattered by cpufeatures.h. */ +#define SF(name) \ +({ \ + BUILD_BUG_ON(X86_FEATURE_##name >= MAX_CPU_FEATURES); \ + (boot_cpu_has(X86_FEATURE_##name) ? F(name) : 0); \ +}) /* * Magic value used by KVM when querying userspace-provided CPUID entries and @@ -543,9 +549,9 @@ static __always_inline void __kvm_cpu_cap_mask(unsigned int leaf) } static __always_inline -void kvm_cpu_cap_init_scattered(enum kvm_only_cpuid_leafs leaf, u32 mask) +void kvm_cpu_cap_init_kvm_defined(enum kvm_only_cpuid_leafs leaf, u32 mask) { - /* Use kvm_cpu_cap_mask for non-scattered leafs. */ + /* Use kvm_cpu_cap_mask for leafs that aren't KVM-only. */ BUILD_BUG_ON(leaf < NCAPINTS); kvm_cpu_caps[leaf] = mask; @@ -555,7 +561,7 @@ void kvm_cpu_cap_init_scattered(enum kvm_only_cpuid_leafs leaf, u32 mask) static __always_inline void kvm_cpu_cap_mask(enum cpuid_leafs leaf, u32 mask) { - /* Use kvm_cpu_cap_init_scattered for scattered leafs. */ + /* Use kvm_cpu_cap_init_kvm_defined for KVM-only leafs. */ BUILD_BUG_ON(leaf >= NCAPINTS); kvm_cpu_caps[leaf] &= mask; @@ -657,14 +663,19 @@ void kvm_set_cpu_caps(void) kvm_cpu_cap_set(X86_FEATURE_SPEC_CTRL_SSBD); kvm_cpu_cap_mask(CPUID_7_1_EAX, - F(AVX_VNNI) | F(AVX512_BF16) + F(AVX_VNNI) | F(AVX512_BF16) | F(CMPCCXADD) | F(AMX_FP16) | + F(AVX_IFMA) + ); + + kvm_cpu_cap_init_kvm_defined(CPUID_7_1_EDX, + F(AVX_VNNI_INT8) | F(AVX_NE_CONVERT) | F(PREFETCHITI) ); kvm_cpu_cap_mask(CPUID_D_1_EAX, F(XSAVEOPT) | F(XSAVEC) | F(XGETBV1) | F(XSAVES) | f_xfd ); - kvm_cpu_cap_init_scattered(CPUID_12_EAX, + kvm_cpu_cap_init_kvm_defined(CPUID_12_EAX, SF(SGX1) | SF(SGX2) | SF(SGX_EDECCSSA) ); @@ -694,7 +705,7 @@ void kvm_set_cpu_caps(void) F(CLZERO) | F(XSAVEERPTR) | F(WBNOINVD) | F(AMD_IBPB) | F(AMD_IBRS) | F(AMD_SSBD) | F(VIRT_SSBD) | F(AMD_SSB_NO) | F(AMD_STIBP) | F(AMD_STIBP_ALWAYS_ON) | - __feature_bit(KVM_X86_FEATURE_PSFD) + __feature_bit(KVM_X86_FEATURE_AMD_PSFD) ); /* @@ -913,9 +924,9 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function) goto out; cpuid_entry_override(entry, CPUID_7_1_EAX); + cpuid_entry_override(entry, CPUID_7_1_EDX); entry->ebx = 0; entry->ecx = 0; - entry->edx = 0; } break; case 0xa: { /* Architectural Performance Monitoring */ @@ -1220,8 +1231,12 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function) * Other defined bits are for MSRs that KVM does not expose: * EAX 3 SPCL, SMM page configuration lock * EAX 13 PCMSR, Prefetch control MSR + * + * KVM doesn't support SMM_CTL. + * EAX 9 SMM_CTL MSR is not supported */ entry->eax &= BIT(0) | BIT(2) | BIT(6); + entry->eax |= BIT(9); if (static_cpu_has(X86_FEATURE_LFENCE_RDTSC)) entry->eax |= BIT(2); if (!static_cpu_has_bug(X86_BUG_NULL_SEG)) diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index 4a43261d25a2aabc9dc3e2a00d09cd102151f8e0..5cc3efa0e21c17632de33946955bd9a9a6a72238 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -242,37 +242,6 @@ enum x86_transfer_type { X86_TRANSFER_TASK_SWITCH, }; -static ulong reg_read(struct x86_emulate_ctxt *ctxt, unsigned nr) -{ - if (KVM_EMULATOR_BUG_ON(nr >= NR_EMULATOR_GPRS, ctxt)) - nr &= NR_EMULATOR_GPRS - 1; - - if (!(ctxt->regs_valid & (1 << nr))) { - ctxt->regs_valid |= 1 << nr; - ctxt->_regs[nr] = ctxt->ops->read_gpr(ctxt, nr); - } - return ctxt->_regs[nr]; -} - -static ulong *reg_write(struct x86_emulate_ctxt *ctxt, unsigned nr) -{ - if (KVM_EMULATOR_BUG_ON(nr >= NR_EMULATOR_GPRS, ctxt)) - nr &= NR_EMULATOR_GPRS - 1; - - BUILD_BUG_ON(sizeof(ctxt->regs_dirty) * BITS_PER_BYTE < NR_EMULATOR_GPRS); - BUILD_BUG_ON(sizeof(ctxt->regs_valid) * BITS_PER_BYTE < NR_EMULATOR_GPRS); - - ctxt->regs_valid |= 1 << nr; - ctxt->regs_dirty |= 1 << nr; - return &ctxt->_regs[nr]; -} - -static ulong *reg_rmw(struct x86_emulate_ctxt *ctxt, unsigned nr) -{ - reg_read(ctxt, nr); - return reg_write(ctxt, nr); -} - static void writeback_registers(struct x86_emulate_ctxt *ctxt) { unsigned long dirty = ctxt->regs_dirty; @@ -2338,335 +2307,15 @@ static int em_lseg(struct x86_emulate_ctxt *ctxt) return rc; } -static int emulator_has_longmode(struct x86_emulate_ctxt *ctxt) -{ -#ifdef CONFIG_X86_64 - return ctxt->ops->guest_has_long_mode(ctxt); -#else - return false; -#endif -} - -static void rsm_set_desc_flags(struct desc_struct *desc, u32 flags) -{ - desc->g = (flags >> 23) & 1; - desc->d = (flags >> 22) & 1; - desc->l = (flags >> 21) & 1; - desc->avl = (flags >> 20) & 1; - desc->p = (flags >> 15) & 1; - desc->dpl = (flags >> 13) & 3; - desc->s = (flags >> 12) & 1; - desc->type = (flags >> 8) & 15; -} - -static int rsm_load_seg_32(struct x86_emulate_ctxt *ctxt, const char *smstate, - int n) -{ - struct desc_struct desc; - int offset; - u16 selector; - - selector = GET_SMSTATE(u32, smstate, 0x7fa8 + n * 4); - - if (n < 3) - offset = 0x7f84 + n * 12; - else - offset = 0x7f2c + (n - 3) * 12; - - set_desc_base(&desc, GET_SMSTATE(u32, smstate, offset + 8)); - set_desc_limit(&desc, GET_SMSTATE(u32, smstate, offset + 4)); - rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, offset)); - ctxt->ops->set_segment(ctxt, selector, &desc, 0, n); - return X86EMUL_CONTINUE; -} - -#ifdef CONFIG_X86_64 -static int rsm_load_seg_64(struct x86_emulate_ctxt *ctxt, const char *smstate, - int n) -{ - struct desc_struct desc; - int offset; - u16 selector; - u32 base3; - - offset = 0x7e00 + n * 16; - - selector = GET_SMSTATE(u16, smstate, offset); - rsm_set_desc_flags(&desc, GET_SMSTATE(u16, smstate, offset + 2) << 8); - set_desc_limit(&desc, GET_SMSTATE(u32, smstate, offset + 4)); - set_desc_base(&desc, GET_SMSTATE(u32, smstate, offset + 8)); - base3 = GET_SMSTATE(u32, smstate, offset + 12); - - ctxt->ops->set_segment(ctxt, selector, &desc, base3, n); - return X86EMUL_CONTINUE; -} -#endif - -static int rsm_enter_protected_mode(struct x86_emulate_ctxt *ctxt, - u64 cr0, u64 cr3, u64 cr4) -{ - int bad; - u64 pcid; - - /* In order to later set CR4.PCIDE, CR3[11:0] must be zero. */ - pcid = 0; - if (cr4 & X86_CR4_PCIDE) { - pcid = cr3 & 0xfff; - cr3 &= ~0xfff; - } - - bad = ctxt->ops->set_cr(ctxt, 3, cr3); - if (bad) - return X86EMUL_UNHANDLEABLE; - - /* - * First enable PAE, long mode needs it before CR0.PG = 1 is set. - * Then enable protected mode. However, PCID cannot be enabled - * if EFER.LMA=0, so set it separately. - */ - bad = ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PCIDE); - if (bad) - return X86EMUL_UNHANDLEABLE; - - bad = ctxt->ops->set_cr(ctxt, 0, cr0); - if (bad) - return X86EMUL_UNHANDLEABLE; - - if (cr4 & X86_CR4_PCIDE) { - bad = ctxt->ops->set_cr(ctxt, 4, cr4); - if (bad) - return X86EMUL_UNHANDLEABLE; - if (pcid) { - bad = ctxt->ops->set_cr(ctxt, 3, cr3 | pcid); - if (bad) - return X86EMUL_UNHANDLEABLE; - } - - } - - return X86EMUL_CONTINUE; -} - -static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt, - const char *smstate) -{ - struct desc_struct desc; - struct desc_ptr dt; - u16 selector; - u32 val, cr0, cr3, cr4; - int i; - - cr0 = GET_SMSTATE(u32, smstate, 0x7ffc); - cr3 = GET_SMSTATE(u32, smstate, 0x7ff8); - ctxt->eflags = GET_SMSTATE(u32, smstate, 0x7ff4) | X86_EFLAGS_FIXED; - ctxt->_eip = GET_SMSTATE(u32, smstate, 0x7ff0); - - for (i = 0; i < 8; i++) - *reg_write(ctxt, i) = GET_SMSTATE(u32, smstate, 0x7fd0 + i * 4); - - val = GET_SMSTATE(u32, smstate, 0x7fcc); - - if (ctxt->ops->set_dr(ctxt, 6, val)) - return X86EMUL_UNHANDLEABLE; - - val = GET_SMSTATE(u32, smstate, 0x7fc8); - - if (ctxt->ops->set_dr(ctxt, 7, val)) - return X86EMUL_UNHANDLEABLE; - - selector = GET_SMSTATE(u32, smstate, 0x7fc4); - set_desc_base(&desc, GET_SMSTATE(u32, smstate, 0x7f64)); - set_desc_limit(&desc, GET_SMSTATE(u32, smstate, 0x7f60)); - rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, 0x7f5c)); - ctxt->ops->set_segment(ctxt, selector, &desc, 0, VCPU_SREG_TR); - - selector = GET_SMSTATE(u32, smstate, 0x7fc0); - set_desc_base(&desc, GET_SMSTATE(u32, smstate, 0x7f80)); - set_desc_limit(&desc, GET_SMSTATE(u32, smstate, 0x7f7c)); - rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, 0x7f78)); - ctxt->ops->set_segment(ctxt, selector, &desc, 0, VCPU_SREG_LDTR); - - dt.address = GET_SMSTATE(u32, smstate, 0x7f74); - dt.size = GET_SMSTATE(u32, smstate, 0x7f70); - ctxt->ops->set_gdt(ctxt, &dt); - - dt.address = GET_SMSTATE(u32, smstate, 0x7f58); - dt.size = GET_SMSTATE(u32, smstate, 0x7f54); - ctxt->ops->set_idt(ctxt, &dt); - - for (i = 0; i < 6; i++) { - int r = rsm_load_seg_32(ctxt, smstate, i); - if (r != X86EMUL_CONTINUE) - return r; - } - - cr4 = GET_SMSTATE(u32, smstate, 0x7f14); - - ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smstate, 0x7ef8)); - - return rsm_enter_protected_mode(ctxt, cr0, cr3, cr4); -} - -#ifdef CONFIG_X86_64 -static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, - const char *smstate) -{ - struct desc_struct desc; - struct desc_ptr dt; - u64 val, cr0, cr3, cr4; - u32 base3; - u16 selector; - int i, r; - - for (i = 0; i < 16; i++) - *reg_write(ctxt, i) = GET_SMSTATE(u64, smstate, 0x7ff8 - i * 8); - - ctxt->_eip = GET_SMSTATE(u64, smstate, 0x7f78); - ctxt->eflags = GET_SMSTATE(u32, smstate, 0x7f70) | X86_EFLAGS_FIXED; - - val = GET_SMSTATE(u64, smstate, 0x7f68); - - if (ctxt->ops->set_dr(ctxt, 6, val)) - return X86EMUL_UNHANDLEABLE; - - val = GET_SMSTATE(u64, smstate, 0x7f60); - - if (ctxt->ops->set_dr(ctxt, 7, val)) - return X86EMUL_UNHANDLEABLE; - - cr0 = GET_SMSTATE(u64, smstate, 0x7f58); - cr3 = GET_SMSTATE(u64, smstate, 0x7f50); - cr4 = GET_SMSTATE(u64, smstate, 0x7f48); - ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smstate, 0x7f00)); - val = GET_SMSTATE(u64, smstate, 0x7ed0); - - if (ctxt->ops->set_msr(ctxt, MSR_EFER, val & ~EFER_LMA)) - return X86EMUL_UNHANDLEABLE; - - selector = GET_SMSTATE(u32, smstate, 0x7e90); - rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, 0x7e92) << 8); - set_desc_limit(&desc, GET_SMSTATE(u32, smstate, 0x7e94)); - set_desc_base(&desc, GET_SMSTATE(u32, smstate, 0x7e98)); - base3 = GET_SMSTATE(u32, smstate, 0x7e9c); - ctxt->ops->set_segment(ctxt, selector, &desc, base3, VCPU_SREG_TR); - - dt.size = GET_SMSTATE(u32, smstate, 0x7e84); - dt.address = GET_SMSTATE(u64, smstate, 0x7e88); - ctxt->ops->set_idt(ctxt, &dt); - - selector = GET_SMSTATE(u32, smstate, 0x7e70); - rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, 0x7e72) << 8); - set_desc_limit(&desc, GET_SMSTATE(u32, smstate, 0x7e74)); - set_desc_base(&desc, GET_SMSTATE(u32, smstate, 0x7e78)); - base3 = GET_SMSTATE(u32, smstate, 0x7e7c); - ctxt->ops->set_segment(ctxt, selector, &desc, base3, VCPU_SREG_LDTR); - - dt.size = GET_SMSTATE(u32, smstate, 0x7e64); - dt.address = GET_SMSTATE(u64, smstate, 0x7e68); - ctxt->ops->set_gdt(ctxt, &dt); - - r = rsm_enter_protected_mode(ctxt, cr0, cr3, cr4); - if (r != X86EMUL_CONTINUE) - return r; - - for (i = 0; i < 6; i++) { - r = rsm_load_seg_64(ctxt, smstate, i); - if (r != X86EMUL_CONTINUE) - return r; - } - - return X86EMUL_CONTINUE; -} -#endif - static int em_rsm(struct x86_emulate_ctxt *ctxt) { - unsigned long cr0, cr4, efer; - char buf[512]; - u64 smbase; - int ret; - if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_MASK) == 0) return emulate_ud(ctxt); - smbase = ctxt->ops->get_smbase(ctxt); - - ret = ctxt->ops->read_phys(ctxt, smbase + 0xfe00, buf, sizeof(buf)); - if (ret != X86EMUL_CONTINUE) - return X86EMUL_UNHANDLEABLE; - - if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_INSIDE_NMI_MASK) == 0) - ctxt->ops->set_nmi_mask(ctxt, false); - - ctxt->ops->exiting_smm(ctxt); - - /* - * Get back to real mode, to prepare a safe state in which to load - * CR0/CR3/CR4/EFER. It's all a bit more complicated if the vCPU - * supports long mode. - */ - if (emulator_has_longmode(ctxt)) { - struct desc_struct cs_desc; - - /* Zero CR4.PCIDE before CR0.PG. */ - cr4 = ctxt->ops->get_cr(ctxt, 4); - if (cr4 & X86_CR4_PCIDE) - ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PCIDE); - - /* A 32-bit code segment is required to clear EFER.LMA. */ - memset(&cs_desc, 0, sizeof(cs_desc)); - cs_desc.type = 0xb; - cs_desc.s = cs_desc.g = cs_desc.p = 1; - ctxt->ops->set_segment(ctxt, 0, &cs_desc, 0, VCPU_SREG_CS); - } - - /* For the 64-bit case, this will clear EFER.LMA. */ - cr0 = ctxt->ops->get_cr(ctxt, 0); - if (cr0 & X86_CR0_PE) - ctxt->ops->set_cr(ctxt, 0, cr0 & ~(X86_CR0_PG | X86_CR0_PE)); - - if (emulator_has_longmode(ctxt)) { - /* Clear CR4.PAE before clearing EFER.LME. */ - cr4 = ctxt->ops->get_cr(ctxt, 4); - if (cr4 & X86_CR4_PAE) - ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PAE); - - /* And finally go back to 32-bit mode. */ - efer = 0; - ctxt->ops->set_msr(ctxt, MSR_EFER, efer); - } - - /* - * Give leave_smm() a chance to make ISA-specific changes to the vCPU - * state (e.g. enter guest mode) before loading state from the SMM - * state-save area. - */ - if (ctxt->ops->leave_smm(ctxt, buf)) - goto emulate_shutdown; - -#ifdef CONFIG_X86_64 - if (emulator_has_longmode(ctxt)) - ret = rsm_load_state_64(ctxt, buf); - else -#endif - ret = rsm_load_state_32(ctxt, buf); - - if (ret != X86EMUL_CONTINUE) - goto emulate_shutdown; + if (ctxt->ops->leave_smm(ctxt)) + ctxt->ops->triple_fault(ctxt); - /* - * Note, the ctxt->ops callbacks are responsible for handling side - * effects when writing MSRs and CRs, e.g. MMU context resets, CPUID - * runtime updates, etc... If that changes, e.g. this flow is moved - * out of the emulator to make it look more like enter_smm(), then - * those side effects need to be explicitly handled for both success - * and shutdown. - */ return emulator_recalc_and_set_mode(ctxt); - -emulate_shutdown: - ctxt->ops->triple_fault(ctxt); - return X86EMUL_CONTINUE; } static void diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c index 0adf4a437e85f6f7e67b7b281ebc2484b680ee2f..2c7f2a26421e825097982d800a03d09435e9917a 100644 --- a/arch/x86/kvm/hyperv.c +++ b/arch/x86/kvm/hyperv.c @@ -23,22 +23,25 @@ #include "ioapic.h" #include "cpuid.h" #include "hyperv.h" +#include "mmu.h" #include "xen.h" #include #include #include #include +#include #include #include +#include #include #include "trace.h" #include "irq.h" #include "fpu.h" -#define KVM_HV_MAX_SPARSE_VCPU_SET_BITS DIV_ROUND_UP(KVM_MAX_VCPUS, 64) +#define KVM_HV_MAX_SPARSE_VCPU_SET_BITS DIV_ROUND_UP(KVM_MAX_VCPUS, HV_VCPUS_PER_SPARSE_BANK) static void stimer_mark_pending(struct kvm_vcpu_hv_stimer *stimer, bool vcpu_kick); @@ -897,13 +900,15 @@ bool kvm_hv_assist_page_enabled(struct kvm_vcpu *vcpu) } EXPORT_SYMBOL_GPL(kvm_hv_assist_page_enabled); -bool kvm_hv_get_assist_page(struct kvm_vcpu *vcpu, - struct hv_vp_assist_page *assist_page) +int kvm_hv_get_assist_page(struct kvm_vcpu *vcpu) { - if (!kvm_hv_assist_page_enabled(vcpu)) - return false; - return !kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data, - assist_page, sizeof(*assist_page)); + struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); + + if (!hv_vcpu || !kvm_hv_assist_page_enabled(vcpu)) + return -EFAULT; + + return kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data, + &hv_vcpu->vp_assist_page, sizeof(struct hv_vp_assist_page)); } EXPORT_SYMBOL_GPL(kvm_hv_get_assist_page); @@ -954,6 +959,11 @@ int kvm_hv_vcpu_init(struct kvm_vcpu *vcpu) hv_vcpu->vp_index = vcpu->vcpu_idx; + for (i = 0; i < HV_NR_TLB_FLUSH_FIFOS; i++) { + INIT_KFIFO(hv_vcpu->tlb_flush_fifo[i].entries); + spin_lock_init(&hv_vcpu->tlb_flush_fifo[i].write_lock); + } + return 0; } @@ -1736,6 +1746,28 @@ static void sparse_set_to_vcpu_mask(struct kvm *kvm, u64 *sparse_banks, } } +static bool hv_is_vp_in_sparse_set(u32 vp_id, u64 valid_bank_mask, u64 sparse_banks[]) +{ + int valid_bit_nr = vp_id / HV_VCPUS_PER_SPARSE_BANK; + unsigned long sbank; + + if (!test_bit(valid_bit_nr, (unsigned long *)&valid_bank_mask)) + return false; + + /* + * The index into the sparse bank is the number of preceding bits in + * the valid mask. Optimize for VMs with <64 vCPUs by skipping the + * fancy math if there can't possibly be preceding bits. + */ + if (valid_bit_nr) + sbank = hweight64(valid_bank_mask & GENMASK_ULL(valid_bit_nr - 1, 0)); + else + sbank = 0; + + return test_bit(vp_id % HV_VCPUS_PER_SPARSE_BANK, + (unsigned long *)&sparse_banks[sbank]); +} + struct kvm_hv_hcall { u64 param; u64 ingpa; @@ -1749,57 +1781,173 @@ struct kvm_hv_hcall { sse128_t xmm[HV_HYPERCALL_MAX_XMM_REGISTERS]; }; -static u64 kvm_get_sparse_vp_set(struct kvm *kvm, struct kvm_hv_hcall *hc, - int consumed_xmm_halves, - u64 *sparse_banks, gpa_t offset) -{ - u16 var_cnt; - int i; - - if (hc->var_cnt > 64) - return -EINVAL; - /* Ignore banks that cannot possibly contain a legal VP index. */ - var_cnt = min_t(u16, hc->var_cnt, KVM_HV_MAX_SPARSE_VCPU_SET_BITS); +static int kvm_hv_get_hc_data(struct kvm *kvm, struct kvm_hv_hcall *hc, + u16 orig_cnt, u16 cnt_cap, u64 *data, + int consumed_xmm_halves, gpa_t offset) +{ + /* + * Preserve the original count when ignoring entries via a "cap", KVM + * still needs to validate the guest input (though the non-XMM path + * punts on the checks). + */ + u16 cnt = min(orig_cnt, cnt_cap); + int i, j; if (hc->fast) { /* * Each XMM holds two sparse banks, but do not count halves that * have already been consumed for hypercall parameters. */ - if (hc->var_cnt > 2 * HV_HYPERCALL_MAX_XMM_REGISTERS - consumed_xmm_halves) + if (orig_cnt > 2 * HV_HYPERCALL_MAX_XMM_REGISTERS - consumed_xmm_halves) return HV_STATUS_INVALID_HYPERCALL_INPUT; - for (i = 0; i < var_cnt; i++) { - int j = i + consumed_xmm_halves; + + for (i = 0; i < cnt; i++) { + j = i + consumed_xmm_halves; if (j % 2) - sparse_banks[i] = sse128_hi(hc->xmm[j / 2]); + data[i] = sse128_hi(hc->xmm[j / 2]); else - sparse_banks[i] = sse128_lo(hc->xmm[j / 2]); + data[i] = sse128_lo(hc->xmm[j / 2]); } return 0; } - return kvm_read_guest(kvm, hc->ingpa + offset, sparse_banks, - var_cnt * sizeof(*sparse_banks)); + return kvm_read_guest(kvm, hc->ingpa + offset, data, + cnt * sizeof(*data)); +} + +static u64 kvm_get_sparse_vp_set(struct kvm *kvm, struct kvm_hv_hcall *hc, + u64 *sparse_banks, int consumed_xmm_halves, + gpa_t offset) +{ + if (hc->var_cnt > HV_MAX_SPARSE_VCPU_BANKS) + return -EINVAL; + + /* Cap var_cnt to ignore banks that cannot contain a legal VP index. */ + return kvm_hv_get_hc_data(kvm, hc, hc->var_cnt, KVM_HV_MAX_SPARSE_VCPU_SET_BITS, + sparse_banks, consumed_xmm_halves, offset); +} + +static int kvm_hv_get_tlb_flush_entries(struct kvm *kvm, struct kvm_hv_hcall *hc, u64 entries[], + int consumed_xmm_halves, gpa_t offset) +{ + return kvm_hv_get_hc_data(kvm, hc, hc->rep_cnt, hc->rep_cnt, + entries, consumed_xmm_halves, offset); +} + +static void hv_tlb_flush_enqueue(struct kvm_vcpu *vcpu, + struct kvm_vcpu_hv_tlb_flush_fifo *tlb_flush_fifo, + u64 *entries, int count) +{ + struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); + u64 flush_all_entry = KVM_HV_TLB_FLUSHALL_ENTRY; + + if (!hv_vcpu) + return; + + spin_lock(&tlb_flush_fifo->write_lock); + + /* + * All entries should fit on the fifo leaving one free for 'flush all' + * entry in case another request comes in. In case there's not enough + * space, just put 'flush all' entry there. + */ + if (count && entries && count < kfifo_avail(&tlb_flush_fifo->entries)) { + WARN_ON(kfifo_in(&tlb_flush_fifo->entries, entries, count) != count); + goto out_unlock; + } + + /* + * Note: full fifo always contains 'flush all' entry, no need to check the + * return value. + */ + kfifo_in(&tlb_flush_fifo->entries, &flush_all_entry, 1); + +out_unlock: + spin_unlock(&tlb_flush_fifo->write_lock); +} + +int kvm_hv_vcpu_flush_tlb(struct kvm_vcpu *vcpu) +{ + struct kvm_vcpu_hv_tlb_flush_fifo *tlb_flush_fifo; + struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); + u64 entries[KVM_HV_TLB_FLUSH_FIFO_SIZE]; + int i, j, count; + gva_t gva; + + if (!tdp_enabled || !hv_vcpu) + return -EINVAL; + + tlb_flush_fifo = kvm_hv_get_tlb_flush_fifo(vcpu, is_guest_mode(vcpu)); + + count = kfifo_out(&tlb_flush_fifo->entries, entries, KVM_HV_TLB_FLUSH_FIFO_SIZE); + + for (i = 0; i < count; i++) { + if (entries[i] == KVM_HV_TLB_FLUSHALL_ENTRY) + goto out_flush_all; + + /* + * Lower 12 bits of 'address' encode the number of additional + * pages to flush. + */ + gva = entries[i] & PAGE_MASK; + for (j = 0; j < (entries[i] & ~PAGE_MASK) + 1; j++) + static_call(kvm_x86_flush_tlb_gva)(vcpu, gva + j * PAGE_SIZE); + + ++vcpu->stat.tlb_flush; + } + return 0; + +out_flush_all: + kfifo_reset_out(&tlb_flush_fifo->entries); + + /* Fall back to full flush. */ + return -ENOSPC; } static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc) { + struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); + u64 *sparse_banks = hv_vcpu->sparse_banks; struct kvm *kvm = vcpu->kvm; struct hv_tlb_flush_ex flush_ex; struct hv_tlb_flush flush; DECLARE_BITMAP(vcpu_mask, KVM_MAX_VCPUS); + struct kvm_vcpu_hv_tlb_flush_fifo *tlb_flush_fifo; + /* + * Normally, there can be no more than 'KVM_HV_TLB_FLUSH_FIFO_SIZE' + * entries on the TLB flush fifo. The last entry, however, needs to be + * always left free for 'flush all' entry which gets placed when + * there is not enough space to put all the requested entries. + */ + u64 __tlb_flush_entries[KVM_HV_TLB_FLUSH_FIFO_SIZE - 1]; + u64 *tlb_flush_entries; u64 valid_bank_mask; - u64 sparse_banks[KVM_HV_MAX_SPARSE_VCPU_SET_BITS]; + struct kvm_vcpu *v; + unsigned long i; bool all_cpus; + int consumed_xmm_halves = 0; + gpa_t data_offset; /* - * The Hyper-V TLFS doesn't allow more than 64 sparse banks, e.g. the - * valid mask is a u64. Fail the build if KVM's max allowed number of - * vCPUs (>4096) would exceed this limit, KVM will additional changes - * for Hyper-V support to avoid setting the guest up to fail. + * The Hyper-V TLFS doesn't allow more than HV_MAX_SPARSE_VCPU_BANKS + * sparse banks. Fail the build if KVM's max allowed number of + * vCPUs (>4096) exceeds this limit. */ - BUILD_BUG_ON(KVM_HV_MAX_SPARSE_VCPU_SET_BITS > 64); + BUILD_BUG_ON(KVM_HV_MAX_SPARSE_VCPU_SET_BITS > HV_MAX_SPARSE_VCPU_BANKS); + + /* + * 'Slow' hypercall's first parameter is the address in guest's memory + * where hypercall parameters are placed. This is either a GPA or a + * nested GPA when KVM is handling the call from L2 ('direct' TLB + * flush). Translate the address here so the memory can be uniformly + * read with kvm_read_guest(). + */ + if (!hc->fast && is_guest_mode(vcpu)) { + hc->ingpa = translate_nested_gpa(vcpu, hc->ingpa, 0, NULL); + if (unlikely(hc->ingpa == INVALID_GPA)) + return HV_STATUS_INVALID_HYPERCALL_INPUT; + } if (hc->code == HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST || hc->code == HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE) { @@ -1807,14 +1955,17 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc) flush.address_space = hc->ingpa; flush.flags = hc->outgpa; flush.processor_mask = sse128_lo(hc->xmm[0]); + consumed_xmm_halves = 1; } else { if (unlikely(kvm_read_guest(kvm, hc->ingpa, &flush, sizeof(flush)))) return HV_STATUS_INVALID_HYPERCALL_INPUT; + data_offset = sizeof(flush); } trace_kvm_hv_flush_tlb(flush.processor_mask, - flush.address_space, flush.flags); + flush.address_space, flush.flags, + is_guest_mode(vcpu)); valid_bank_mask = BIT_ULL(0); sparse_banks[0] = flush.processor_mask; @@ -1834,16 +1985,18 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc) flush_ex.flags = hc->outgpa; memcpy(&flush_ex.hv_vp_set, &hc->xmm[0], sizeof(hc->xmm[0])); + consumed_xmm_halves = 2; } else { if (unlikely(kvm_read_guest(kvm, hc->ingpa, &flush_ex, sizeof(flush_ex)))) return HV_STATUS_INVALID_HYPERCALL_INPUT; + data_offset = sizeof(flush_ex); } trace_kvm_hv_flush_tlb_ex(flush_ex.hv_vp_set.valid_bank_mask, flush_ex.hv_vp_set.format, flush_ex.address_space, - flush_ex.flags); + flush_ex.flags, is_guest_mode(vcpu)); valid_bank_mask = flush_ex.hv_vp_set.valid_bank_mask; all_cpus = flush_ex.hv_vp_set.format != @@ -1852,29 +2005,95 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc) if (hc->var_cnt != hweight64(valid_bank_mask)) return HV_STATUS_INVALID_HYPERCALL_INPUT; - if (all_cpus) - goto do_flush; + if (!all_cpus) { + if (!hc->var_cnt) + goto ret_success; - if (!hc->var_cnt) - goto ret_success; + if (kvm_get_sparse_vp_set(kvm, hc, sparse_banks, + consumed_xmm_halves, data_offset)) + return HV_STATUS_INVALID_HYPERCALL_INPUT; + } - if (kvm_get_sparse_vp_set(kvm, hc, 2, sparse_banks, - offsetof(struct hv_tlb_flush_ex, - hv_vp_set.bank_contents))) + /* + * Hyper-V TLFS doesn't explicitly forbid non-empty sparse vCPU + * banks (and, thus, non-zero 'var_cnt') for the 'all vCPUs' + * case (HV_GENERIC_SET_ALL). Always adjust data_offset and + * consumed_xmm_halves to make sure TLB flush entries are read + * from the correct offset. + */ + data_offset += hc->var_cnt * sizeof(sparse_banks[0]); + consumed_xmm_halves += hc->var_cnt; + } + + if (hc->code == HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE || + hc->code == HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX || + hc->rep_cnt > ARRAY_SIZE(__tlb_flush_entries)) { + tlb_flush_entries = NULL; + } else { + if (kvm_hv_get_tlb_flush_entries(kvm, hc, __tlb_flush_entries, + consumed_xmm_halves, data_offset)) return HV_STATUS_INVALID_HYPERCALL_INPUT; + tlb_flush_entries = __tlb_flush_entries; } -do_flush: /* * vcpu->arch.cr3 may not be up-to-date for running vCPUs so we can't * analyze it here, flush TLB regardless of the specified address space. */ - if (all_cpus) { - kvm_make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH_GUEST); - } else { + if (all_cpus && !is_guest_mode(vcpu)) { + kvm_for_each_vcpu(i, v, kvm) { + tlb_flush_fifo = kvm_hv_get_tlb_flush_fifo(v, false); + hv_tlb_flush_enqueue(v, tlb_flush_fifo, + tlb_flush_entries, hc->rep_cnt); + } + + kvm_make_all_cpus_request(kvm, KVM_REQ_HV_TLB_FLUSH); + } else if (!is_guest_mode(vcpu)) { sparse_set_to_vcpu_mask(kvm, sparse_banks, valid_bank_mask, vcpu_mask); - kvm_make_vcpus_request_mask(kvm, KVM_REQ_TLB_FLUSH_GUEST, vcpu_mask); + for_each_set_bit(i, vcpu_mask, KVM_MAX_VCPUS) { + v = kvm_get_vcpu(kvm, i); + if (!v) + continue; + tlb_flush_fifo = kvm_hv_get_tlb_flush_fifo(v, false); + hv_tlb_flush_enqueue(v, tlb_flush_fifo, + tlb_flush_entries, hc->rep_cnt); + } + + kvm_make_vcpus_request_mask(kvm, KVM_REQ_HV_TLB_FLUSH, vcpu_mask); + } else { + struct kvm_vcpu_hv *hv_v; + + bitmap_zero(vcpu_mask, KVM_MAX_VCPUS); + + kvm_for_each_vcpu(i, v, kvm) { + hv_v = to_hv_vcpu(v); + + /* + * The following check races with nested vCPUs entering/exiting + * and/or migrating between L1's vCPUs, however the only case when + * KVM *must* flush the TLB is when the target L2 vCPU keeps + * running on the same L1 vCPU from the moment of the request until + * kvm_hv_flush_tlb() returns. TLB is fully flushed in all other + * cases, e.g. when the target L2 vCPU migrates to a different L1 + * vCPU or when the corresponding L1 vCPU temporary switches to a + * different L2 vCPU while the request is being processed. + */ + if (!hv_v || hv_v->nested.vm_id != hv_vcpu->nested.vm_id) + continue; + + if (!all_cpus && + !hv_is_vp_in_sparse_set(hv_v->nested.vp_id, valid_bank_mask, + sparse_banks)) + continue; + + __set_bit(i, vcpu_mask); + tlb_flush_fifo = kvm_hv_get_tlb_flush_fifo(v, true); + hv_tlb_flush_enqueue(v, tlb_flush_fifo, + tlb_flush_entries, hc->rep_cnt); + } + + kvm_make_vcpus_request_mask(kvm, KVM_REQ_HV_TLB_FLUSH, vcpu_mask); } ret_success: @@ -1883,8 +2102,8 @@ ret_success: ((u64)hc->rep_cnt << HV_HYPERCALL_REP_COMP_OFFSET); } -static void kvm_send_ipi_to_many(struct kvm *kvm, u32 vector, - unsigned long *vcpu_bitmap) +static void kvm_hv_send_ipi_to_many(struct kvm *kvm, u32 vector, + u64 *sparse_banks, u64 valid_bank_mask) { struct kvm_lapic_irq irq = { .delivery_mode = APIC_DM_FIXED, @@ -1894,7 +2113,9 @@ static void kvm_send_ipi_to_many(struct kvm *kvm, u32 vector, unsigned long i; kvm_for_each_vcpu(i, vcpu, kvm) { - if (vcpu_bitmap && !test_bit(i, vcpu_bitmap)) + if (sparse_banks && + !hv_is_vp_in_sparse_set(kvm_hv_get_vpindex(vcpu), + valid_bank_mask, sparse_banks)) continue; /* We fail only when APIC is disabled */ @@ -1904,12 +2125,12 @@ static void kvm_send_ipi_to_many(struct kvm *kvm, u32 vector, static u64 kvm_hv_send_ipi(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc) { + struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); + u64 *sparse_banks = hv_vcpu->sparse_banks; struct kvm *kvm = vcpu->kvm; struct hv_send_ipi_ex send_ipi_ex; struct hv_send_ipi send_ipi; - DECLARE_BITMAP(vcpu_mask, KVM_MAX_VCPUS); u64 valid_bank_mask; - u64 sparse_banks[KVM_HV_MAX_SPARSE_VCPU_SET_BITS]; u32 vector; bool all_cpus; @@ -1959,7 +2180,7 @@ static u64 kvm_hv_send_ipi(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc) if (!hc->var_cnt) goto ret_success; - if (kvm_get_sparse_vp_set(kvm, hc, 1, sparse_banks, + if (kvm_get_sparse_vp_set(kvm, hc, sparse_banks, 1, offsetof(struct hv_send_ipi_ex, vp_set.bank_contents))) return HV_STATUS_INVALID_HYPERCALL_INPUT; @@ -1969,13 +2190,10 @@ check_and_send_ipi: if ((vector < HV_IPI_LOW_VECTOR) || (vector > HV_IPI_HIGH_VECTOR)) return HV_STATUS_INVALID_HYPERCALL_INPUT; - if (all_cpus) { - kvm_send_ipi_to_many(kvm, vector, NULL); - } else { - sparse_set_to_vcpu_mask(kvm, sparse_banks, valid_bank_mask, vcpu_mask); - - kvm_send_ipi_to_many(kvm, vector, vcpu_mask); - } + if (all_cpus) + kvm_hv_send_ipi_to_many(kvm, vector, NULL, 0); + else + kvm_hv_send_ipi_to_many(kvm, vector, sparse_banks, valid_bank_mask); ret_success: return HV_STATUS_SUCCESS; @@ -2062,10 +2280,25 @@ static void kvm_hv_hypercall_set_result(struct kvm_vcpu *vcpu, u64 result) static int kvm_hv_hypercall_complete(struct kvm_vcpu *vcpu, u64 result) { + u32 tlb_lock_count = 0; + int ret; + + if (hv_result_success(result) && is_guest_mode(vcpu) && + kvm_hv_is_tlb_flush_hcall(vcpu) && + kvm_read_guest(vcpu->kvm, to_hv_vcpu(vcpu)->nested.pa_page_gpa, + &tlb_lock_count, sizeof(tlb_lock_count))) + result = HV_STATUS_INVALID_HYPERCALL_INPUT; + trace_kvm_hv_hypercall_done(result); kvm_hv_hypercall_set_result(vcpu, result); ++vcpu->stat.hypercalls; - return kvm_skip_emulated_instruction(vcpu); + + ret = kvm_skip_emulated_instruction(vcpu); + + if (tlb_lock_count) + kvm_x86_ops.nested_ops->hv_inject_synthetic_vmexit_post_tlb_flush(vcpu); + + return ret; } static int kvm_hv_hypercall_complete_userspace(struct kvm_vcpu *vcpu) @@ -2502,6 +2735,7 @@ int kvm_get_hv_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid, ent->ebx |= HV_DEBUGGING; ent->edx |= HV_X64_GUEST_DEBUGGING_AVAILABLE; ent->edx |= HV_FEATURE_DEBUG_MSRS_AVAILABLE; + ent->edx |= HV_FEATURE_EXT_GVA_RANGES_FLUSH; /* * Direct Synthetic timers only make sense with in-kernel @@ -2545,6 +2779,7 @@ int kvm_get_hv_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid, case HYPERV_CPUID_NESTED_FEATURES: ent->eax = evmcs_ver; + ent->eax |= HV_X64_NESTED_DIRECT_FLUSH; ent->eax |= HV_X64_NESTED_MSR_BITMAP; ent->ebx |= HV_X64_NESTED_EVMCS1_PERF_GLOBAL_CTRL; break; diff --git a/arch/x86/kvm/hyperv.h b/arch/x86/kvm/hyperv.h index 1030b1b50552373666a59d371e99202389abfab0..9f96414a31c52d2a514582422855d5149d1ebfc7 100644 --- a/arch/x86/kvm/hyperv.h +++ b/arch/x86/kvm/hyperv.h @@ -22,6 +22,7 @@ #define __ARCH_X86_KVM_HYPERV_H__ #include +#include "x86.h" /* "Hv#1" signature */ #define HYPERV_CPUID_SIGNATURE_EAX 0x31237648 @@ -107,8 +108,7 @@ int kvm_hv_activate_synic(struct kvm_vcpu *vcpu, bool dont_zero_synic_pages); void kvm_hv_vcpu_uninit(struct kvm_vcpu *vcpu); bool kvm_hv_assist_page_enabled(struct kvm_vcpu *vcpu); -bool kvm_hv_get_assist_page(struct kvm_vcpu *vcpu, - struct hv_vp_assist_page *assist_page); +int kvm_hv_get_assist_page(struct kvm_vcpu *vcpu); static inline struct kvm_vcpu_hv_stimer *to_hv_stimer(struct kvm_vcpu *vcpu, int timer_index) @@ -151,4 +151,64 @@ int kvm_vm_ioctl_hv_eventfd(struct kvm *kvm, struct kvm_hyperv_eventfd *args); int kvm_get_hv_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid, struct kvm_cpuid_entry2 __user *entries); +static inline struct kvm_vcpu_hv_tlb_flush_fifo *kvm_hv_get_tlb_flush_fifo(struct kvm_vcpu *vcpu, + bool is_guest_mode) +{ + struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); + int i = is_guest_mode ? HV_L2_TLB_FLUSH_FIFO : + HV_L1_TLB_FLUSH_FIFO; + + return &hv_vcpu->tlb_flush_fifo[i]; +} + +static inline void kvm_hv_vcpu_purge_flush_tlb(struct kvm_vcpu *vcpu) +{ + struct kvm_vcpu_hv_tlb_flush_fifo *tlb_flush_fifo; + + if (!to_hv_vcpu(vcpu) || !kvm_check_request(KVM_REQ_HV_TLB_FLUSH, vcpu)) + return; + + tlb_flush_fifo = kvm_hv_get_tlb_flush_fifo(vcpu, is_guest_mode(vcpu)); + + kfifo_reset_out(&tlb_flush_fifo->entries); +} + +static inline bool guest_hv_cpuid_has_l2_tlb_flush(struct kvm_vcpu *vcpu) +{ + struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); + + return hv_vcpu && + (hv_vcpu->cpuid_cache.nested_eax & HV_X64_NESTED_DIRECT_FLUSH); +} + +static inline bool kvm_hv_is_tlb_flush_hcall(struct kvm_vcpu *vcpu) +{ + struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); + u16 code; + + if (!hv_vcpu) + return false; + + code = is_64_bit_hypercall(vcpu) ? kvm_rcx_read(vcpu) : + kvm_rax_read(vcpu); + + return (code == HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE || + code == HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST || + code == HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX || + code == HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX); +} + +static inline int kvm_hv_verify_vp_assist(struct kvm_vcpu *vcpu) +{ + if (!to_hv_vcpu(vcpu)) + return 0; + + if (!kvm_hv_assist_page_enabled(vcpu)) + return 0; + + return kvm_hv_get_assist_page(vcpu); +} + +int kvm_hv_vcpu_flush_tlb(struct kvm_vcpu *vcpu); + #endif diff --git a/arch/x86/kvm/irq.c b/arch/x86/kvm/irq.c index f371f1292ca3e0dcd3cf05132ce4fbf7153949ba..a70952eca9058196e1bd0b49f2c10be76637dd2b 100644 --- a/arch/x86/kvm/irq.c +++ b/arch/x86/kvm/irq.c @@ -31,7 +31,6 @@ int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) return r; } -EXPORT_SYMBOL(kvm_cpu_has_pending_timer); /* * check if there is a pending userspace external interrupt @@ -150,7 +149,6 @@ void kvm_inject_pending_timer_irqs(struct kvm_vcpu *vcpu) if (kvm_xen_timer_enabled(vcpu)) kvm_xen_inject_timer_irqs(vcpu); } -EXPORT_SYMBOL_GPL(kvm_inject_pending_timer_irqs); void __kvm_migrate_timers(struct kvm_vcpu *vcpu) { @@ -165,3 +163,8 @@ bool kvm_arch_irqfd_allowed(struct kvm *kvm, struct kvm_irqfd *args) return resample ? irqchip_kernel(kvm) : irqchip_in_kernel(kvm); } + +bool kvm_arch_irqchip_in_kernel(struct kvm *kvm) +{ + return irqchip_in_kernel(kvm); +} diff --git a/arch/x86/kvm/kvm_cache_regs.h b/arch/x86/kvm/kvm_cache_regs.h index 3febc342360cc7524b6c1665ee7621a1d0e1228e..c09174f73a344f79d61967ed2e2e5ca55c4d5e9b 100644 --- a/arch/x86/kvm/kvm_cache_regs.h +++ b/arch/x86/kvm/kvm_cache_regs.h @@ -200,9 +200,4 @@ static inline bool is_guest_mode(struct kvm_vcpu *vcpu) return vcpu->arch.hflags & HF_GUEST_MASK; } -static inline bool is_smm(struct kvm_vcpu *vcpu) -{ - return vcpu->arch.hflags & HF_SMM_MASK; -} - #endif diff --git a/arch/x86/kvm/kvm_emulate.h b/arch/x86/kvm/kvm_emulate.h index 89246446d6aa9d5254a603a29163f7aa62547896..2d9662be8333781c8851cd779869b680bd39b1b1 100644 --- a/arch/x86/kvm/kvm_emulate.h +++ b/arch/x86/kvm/kvm_emulate.h @@ -116,16 +116,6 @@ struct x86_emulate_ops { unsigned int bytes, struct x86_exception *fault, bool system); - /* - * read_phys: Read bytes of standard (non-emulated/special) memory. - * Used for descriptor reading. - * @addr: [IN ] Physical address from which to read. - * @val: [OUT] Value read from memory. - * @bytes: [IN ] Number of bytes to read from memory. - */ - int (*read_phys)(struct x86_emulate_ctxt *ctxt, unsigned long addr, - void *val, unsigned int bytes); - /* * write_std: Write bytes of standard (non-emulated/special) memory. * Used for descriptor writing. @@ -209,11 +199,8 @@ struct x86_emulate_ops { int (*cpl)(struct x86_emulate_ctxt *ctxt); void (*get_dr)(struct x86_emulate_ctxt *ctxt, int dr, ulong *dest); int (*set_dr)(struct x86_emulate_ctxt *ctxt, int dr, ulong value); - u64 (*get_smbase)(struct x86_emulate_ctxt *ctxt); - void (*set_smbase)(struct x86_emulate_ctxt *ctxt, u64 smbase); int (*set_msr_with_filter)(struct x86_emulate_ctxt *ctxt, u32 msr_index, u64 data); int (*get_msr_with_filter)(struct x86_emulate_ctxt *ctxt, u32 msr_index, u64 *pdata); - int (*set_msr)(struct x86_emulate_ctxt *ctxt, u32 msr_index, u64 data); int (*get_msr)(struct x86_emulate_ctxt *ctxt, u32 msr_index, u64 *pdata); int (*check_pmc)(struct x86_emulate_ctxt *ctxt, u32 pmc); int (*read_pmc)(struct x86_emulate_ctxt *ctxt, u32 pmc, u64 *pdata); @@ -234,8 +221,7 @@ struct x86_emulate_ops { void (*set_nmi_mask)(struct x86_emulate_ctxt *ctxt, bool masked); unsigned (*get_hflags)(struct x86_emulate_ctxt *ctxt); - void (*exiting_smm)(struct x86_emulate_ctxt *ctxt); - int (*leave_smm)(struct x86_emulate_ctxt *ctxt, const char *smstate); + int (*leave_smm)(struct x86_emulate_ctxt *ctxt); void (*triple_fault)(struct x86_emulate_ctxt *ctxt); int (*set_xcr)(struct x86_emulate_ctxt *ctxt, u32 index, u64 xcr); }; @@ -292,7 +278,6 @@ enum x86emul_mode { /* These match some of the HF_* flags defined in kvm_host.h */ #define X86EMUL_GUEST_MASK (1 << 5) /* VCPU is in guest-mode */ #define X86EMUL_SMM_MASK (1 << 6) -#define X86EMUL_SMM_INSIDE_NMI_MASK (1 << 7) /* * fastop functions are declared as taking a never-defined fastop parameter, @@ -526,4 +511,35 @@ void emulator_invalidate_register_cache(struct x86_emulate_ctxt *ctxt); void emulator_writeback_register_cache(struct x86_emulate_ctxt *ctxt); bool emulator_can_use_gpa(struct x86_emulate_ctxt *ctxt); +static inline ulong reg_read(struct x86_emulate_ctxt *ctxt, unsigned nr) +{ + if (KVM_EMULATOR_BUG_ON(nr >= NR_EMULATOR_GPRS, ctxt)) + nr &= NR_EMULATOR_GPRS - 1; + + if (!(ctxt->regs_valid & (1 << nr))) { + ctxt->regs_valid |= 1 << nr; + ctxt->_regs[nr] = ctxt->ops->read_gpr(ctxt, nr); + } + return ctxt->_regs[nr]; +} + +static inline ulong *reg_write(struct x86_emulate_ctxt *ctxt, unsigned nr) +{ + if (KVM_EMULATOR_BUG_ON(nr >= NR_EMULATOR_GPRS, ctxt)) + nr &= NR_EMULATOR_GPRS - 1; + + BUILD_BUG_ON(sizeof(ctxt->regs_dirty) * BITS_PER_BYTE < NR_EMULATOR_GPRS); + BUILD_BUG_ON(sizeof(ctxt->regs_valid) * BITS_PER_BYTE < NR_EMULATOR_GPRS); + + ctxt->regs_valid |= 1 << nr; + ctxt->regs_dirty |= 1 << nr; + return &ctxt->_regs[nr]; +} + +static inline ulong *reg_rmw(struct x86_emulate_ctxt *ctxt, unsigned nr) +{ + reg_read(ctxt, nr); + return reg_write(ctxt, nr); +} + #endif /* _ASM_X86_KVM_X86_EMULATE_H */ diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index d7639d126e6c7ae255f6bce48634ebe1102a5b84..4efdb4a4d72c6084f67f00ce8ae06292f68532e6 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -42,6 +42,7 @@ #include "x86.h" #include "cpuid.h" #include "hyperv.h" +#include "smm.h" #ifndef CONFIG_X86_64 #define mod_64(x, y) ((x) - (y) * div64_u64(x, y)) @@ -159,7 +160,6 @@ bool kvm_can_use_hv_timer(struct kvm_vcpu *vcpu) && !(kvm_mwait_in_guest(vcpu->kvm) || kvm_can_post_timer_interrupt(vcpu)); } -EXPORT_SYMBOL_GPL(kvm_can_use_hv_timer); static bool kvm_use_posted_timer_interrupt(struct kvm_vcpu *vcpu) { @@ -1170,9 +1170,10 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode, break; case APIC_DM_SMI: - result = 1; - kvm_make_request(KVM_REQ_SMI, vcpu); - kvm_vcpu_kick(vcpu); + if (!kvm_inject_smi(vcpu)) { + kvm_vcpu_kick(vcpu); + result = 1; + } break; case APIC_DM_NMI: @@ -1912,7 +1913,6 @@ bool kvm_lapic_hv_timer_in_use(struct kvm_vcpu *vcpu) return vcpu->arch.apic->lapic_timer.hv_timer_in_use; } -EXPORT_SYMBOL_GPL(kvm_lapic_hv_timer_in_use); static void cancel_hv_timer(struct kvm_lapic *apic) { @@ -2430,7 +2430,6 @@ void kvm_apic_update_apicv(struct kvm_vcpu *vcpu) apic->isr_count = count_vectors(apic->regs + APIC_ISR); } } -EXPORT_SYMBOL_GPL(kvm_apic_update_apicv); void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event) { @@ -2722,8 +2721,6 @@ static int kvm_apic_state_fixup(struct kvm_vcpu *vcpu, icr = __kvm_lapic_get_reg64(s->regs, APIC_ICR); __kvm_lapic_set_reg(s->regs, APIC_ICR2, icr >> 32); } - } else { - kvm_lapic_xapic_id_updated(vcpu->arch.apic); } return 0; @@ -2759,6 +2756,9 @@ int kvm_apic_set_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s) } memcpy(vcpu->arch.apic->regs, s->regs, sizeof(*s)); + if (!apic_x2apic_mode(apic)) + kvm_lapic_xapic_id_updated(apic); + atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY); kvm_recalculate_apic_map(vcpu->kvm); kvm_apic_set_version(vcpu); diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h index a5ac4a5a517920ed8dff0ba1f14cde110606832d..28e3769066e215332197df09381ec67252a67a5d 100644 --- a/arch/x86/kvm/lapic.h +++ b/arch/x86/kvm/lapic.h @@ -7,7 +7,7 @@ #include #include "hyperv.h" -#include "kvm_cache_regs.h" +#include "smm.h" #define KVM_APIC_INIT 0 #define KVM_APIC_SIPI 1 diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index b6f96d47e596d1018987755a2f96a8a9305fda0c..835426254e768c1e7f50aab3f84867bd75906f56 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -22,6 +22,7 @@ #include "tdp_mmu.h" #include "x86.h" #include "kvm_cache_regs.h" +#include "smm.h" #include "kvm_emulate.h" #include "cpuid.h" #include "spte.h" @@ -802,15 +803,31 @@ static void account_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp) kvm_flush_remote_tlbs_with_address(kvm, gfn, 1); } -void account_huge_nx_page(struct kvm *kvm, struct kvm_mmu_page *sp) +void track_possible_nx_huge_page(struct kvm *kvm, struct kvm_mmu_page *sp) { - if (sp->lpage_disallowed) + /* + * If it's possible to replace the shadow page with an NX huge page, + * i.e. if the shadow page is the only thing currently preventing KVM + * from using a huge page, add the shadow page to the list of "to be + * zapped for NX recovery" pages. Note, the shadow page can already be + * on the list if KVM is reusing an existing shadow page, i.e. if KVM + * links a shadow page at multiple points. + */ + if (!list_empty(&sp->possible_nx_huge_page_link)) return; ++kvm->stat.nx_lpage_splits; - list_add_tail(&sp->lpage_disallowed_link, - &kvm->arch.lpage_disallowed_mmu_pages); - sp->lpage_disallowed = true; + list_add_tail(&sp->possible_nx_huge_page_link, + &kvm->arch.possible_nx_huge_pages); +} + +static void account_nx_huge_page(struct kvm *kvm, struct kvm_mmu_page *sp, + bool nx_huge_page_possible) +{ + sp->nx_huge_page_disallowed = true; + + if (nx_huge_page_possible) + track_possible_nx_huge_page(kvm, sp); } static void unaccount_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp) @@ -830,11 +847,20 @@ static void unaccount_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp) kvm_mmu_gfn_allow_lpage(slot, gfn); } -void unaccount_huge_nx_page(struct kvm *kvm, struct kvm_mmu_page *sp) +void untrack_possible_nx_huge_page(struct kvm *kvm, struct kvm_mmu_page *sp) { + if (list_empty(&sp->possible_nx_huge_page_link)) + return; + --kvm->stat.nx_lpage_splits; - sp->lpage_disallowed = false; - list_del(&sp->lpage_disallowed_link); + list_del_init(&sp->possible_nx_huge_page_link); +} + +static void unaccount_nx_huge_page(struct kvm *kvm, struct kvm_mmu_page *sp) +{ + sp->nx_huge_page_disallowed = false; + + untrack_possible_nx_huge_page(kvm, sp); } static struct kvm_memory_slot * @@ -1645,7 +1671,7 @@ static int is_empty_shadow_page(u64 *spt) u64 *pos; u64 *end; - for (pos = spt, end = pos + PAGE_SIZE / sizeof(u64); pos != end; pos++) + for (pos = spt, end = pos + SPTE_ENT_PER_PAGE; pos != end; pos++) if (is_shadow_present_pte(*pos)) { printk(KERN_ERR "%s: %p %llx\n", __func__, pos, *pos); @@ -1793,7 +1819,7 @@ static int __mmu_unsync_walk(struct kvm_mmu_page *sp, continue; } - child = to_shadow_page(ent & SPTE_BASE_ADDR_MASK); + child = spte_to_child_sp(ent); if (child->unsync_children) { if (mmu_pages_add(pvec, child, i)) @@ -1894,7 +1920,7 @@ static bool is_obsolete_sp(struct kvm *kvm, struct kvm_mmu_page *sp) if (sp->role.invalid) return true; - /* TDP MMU pages due not use the MMU generation. */ + /* TDP MMU pages do not use the MMU generation. */ return !sp->tdp_mmu_page && unlikely(sp->mmu_valid_gen != kvm->arch.mmu_valid_gen); } @@ -2129,6 +2155,8 @@ static struct kvm_mmu_page *kvm_mmu_alloc_shadow_page(struct kvm *kvm, set_page_private(virt_to_page(sp->spt), (unsigned long)sp); + INIT_LIST_HEAD(&sp->possible_nx_huge_page_link); + /* * active_mmu_pages must be a FIFO list, as kvm_zap_obsolete_pages() * depends on valid pages being added to the head of the list. See @@ -2350,7 +2378,7 @@ static void validate_direct_spte(struct kvm_vcpu *vcpu, u64 *sptep, * so we should update the spte at this point to get * a new sp with the correct access. */ - child = to_shadow_page(*sptep & SPTE_BASE_ADDR_MASK); + child = spte_to_child_sp(*sptep); if (child->role.access == direct_access) return; @@ -2371,7 +2399,7 @@ static int mmu_page_zap_pte(struct kvm *kvm, struct kvm_mmu_page *sp, if (is_last_spte(pte, sp->role.level)) { drop_spte(kvm, spte); } else { - child = to_shadow_page(pte & SPTE_BASE_ADDR_MASK); + child = spte_to_child_sp(pte); drop_parent_pte(child, spte); /* @@ -2487,8 +2515,8 @@ static bool __kvm_mmu_prepare_zap_page(struct kvm *kvm, zapped_root = !is_obsolete_sp(kvm, sp); } - if (sp->lpage_disallowed) - unaccount_huge_nx_page(kvm, sp); + if (sp->nx_huge_page_disallowed) + unaccount_nx_huge_page(kvm, sp); sp->role.invalid = 1; @@ -2811,7 +2839,7 @@ static int mmu_set_spte(struct kvm_vcpu *vcpu, struct kvm_memory_slot *slot, struct kvm_mmu_page *child; u64 pte = *sptep; - child = to_shadow_page(pte & SPTE_BASE_ADDR_MASK); + child = spte_to_child_sp(pte); drop_parent_pte(child, sptep); flush = true; } else if (pfn != spte_to_pfn(*sptep)) { @@ -3085,7 +3113,8 @@ void disallowed_hugepage_adjust(struct kvm_page_fault *fault, u64 spte, int cur_ if (cur_level > PG_LEVEL_4K && cur_level == fault->goal_level && is_shadow_present_pte(spte) && - !is_large_pte(spte)) { + !is_large_pte(spte) && + spte_to_child_sp(spte)->nx_huge_page_disallowed) { /* * A small SPTE exists for this pfn, but FNAME(fetch) * and __direct_map would like to create a large PTE @@ -3127,9 +3156,9 @@ static int __direct_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) continue; link_shadow_page(vcpu, it.sptep, sp); - if (fault->is_tdp && fault->huge_page_disallowed && - fault->req_level >= it.level) - account_huge_nx_page(vcpu->kvm, sp); + if (fault->huge_page_disallowed) + account_nx_huge_page(vcpu->kvm, sp, + fault->req_level >= it.level); } if (WARN_ON_ONCE(it.level != fault->goal_level)) @@ -3149,8 +3178,13 @@ static void kvm_send_hwpoison_signal(unsigned long address, struct task_struct * send_sig_mceerr(BUS_MCEERR_AR, (void __user *)address, PAGE_SHIFT, tsk); } -static int kvm_handle_bad_page(struct kvm_vcpu *vcpu, gfn_t gfn, kvm_pfn_t pfn) +static int kvm_handle_error_pfn(struct kvm_vcpu *vcpu, gfn_t gfn, kvm_pfn_t pfn) { + if (is_sigpending_pfn(pfn)) { + kvm_handle_signal_exit(vcpu); + return -EINTR; + } + /* * Do not cache the mmio info caused by writing the readonly gfn * into the spte otherwise read access on readonly gfn also can @@ -3172,7 +3206,7 @@ static int handle_abnormal_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fau { /* The pfn is invalid, report the error! */ if (unlikely(is_error_pfn(fault->pfn))) - return kvm_handle_bad_page(vcpu, fault->gfn, fault->pfn); + return kvm_handle_error_pfn(vcpu, fault->gfn, fault->pfn); if (unlikely(!fault->slot)) { gva_t gva = fault->is_tdp ? 0 : fault->addr; @@ -3423,7 +3457,11 @@ static void mmu_free_root_page(struct kvm *kvm, hpa_t *root_hpa, if (!VALID_PAGE(*root_hpa)) return; - sp = to_shadow_page(*root_hpa & SPTE_BASE_ADDR_MASK); + /* + * The "root" may be a special root, e.g. a PAE entry, treat it as a + * SPTE to ensure any non-PA bits are dropped. + */ + sp = spte_to_child_sp(*root_hpa); if (WARN_ON(!sp)) return; @@ -3908,8 +3946,7 @@ void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu) hpa_t root = vcpu->arch.mmu->pae_root[i]; if (IS_VALID_PAE_ROOT(root)) { - root &= SPTE_BASE_ADDR_MASK; - sp = to_shadow_page(root); + sp = spte_to_child_sp(root); mmu_sync_children(vcpu, sp, true); } } @@ -4170,7 +4207,7 @@ static int kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) } async = false; - fault->pfn = __gfn_to_pfn_memslot(slot, fault->gfn, false, &async, + fault->pfn = __gfn_to_pfn_memslot(slot, fault->gfn, false, false, &async, fault->write, &fault->map_writable, &fault->hva); if (!async) @@ -4187,7 +4224,12 @@ static int kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) } } - fault->pfn = __gfn_to_pfn_memslot(slot, fault->gfn, false, NULL, + /* + * Allow gup to bail on pending non-fatal signals when it's also allowed + * to wait for IO. Note, gup always bails if it is unable to quickly + * get a page and a fatal signal, i.e. SIGKILL, is pending. + */ + fault->pfn = __gfn_to_pfn_memslot(slot, fault->gfn, false, true, NULL, fault->write, &fault->map_writable, &fault->hva); return RET_PF_CONTINUE; @@ -5972,7 +6014,7 @@ int kvm_mmu_init_vm(struct kvm *kvm) INIT_LIST_HEAD(&kvm->arch.active_mmu_pages); INIT_LIST_HEAD(&kvm->arch.zapped_obsolete_pages); - INIT_LIST_HEAD(&kvm->arch.lpage_disallowed_mmu_pages); + INIT_LIST_HEAD(&kvm->arch.possible_nx_huge_pages); spin_lock_init(&kvm->arch.mmu_unsync_pages_lock); r = kvm_mmu_init_tdp_mmu(kvm); @@ -6657,7 +6699,7 @@ static int set_nx_huge_pages(const char *val, const struct kernel_param *kp) kvm_mmu_zap_all_fast(kvm); mutex_unlock(&kvm->slots_lock); - wake_up_process(kvm->arch.nx_lpage_recovery_thread); + wake_up_process(kvm->arch.nx_huge_page_recovery_thread); } mutex_unlock(&kvm_lock); } @@ -6789,7 +6831,7 @@ static int set_nx_huge_pages_recovery_param(const char *val, const struct kernel mutex_lock(&kvm_lock); list_for_each_entry(kvm, &vm_list, vm_list) - wake_up_process(kvm->arch.nx_lpage_recovery_thread); + wake_up_process(kvm->arch.nx_huge_page_recovery_thread); mutex_unlock(&kvm_lock); } @@ -6797,9 +6839,10 @@ static int set_nx_huge_pages_recovery_param(const char *val, const struct kernel return err; } -static void kvm_recover_nx_lpages(struct kvm *kvm) +static void kvm_recover_nx_huge_pages(struct kvm *kvm) { unsigned long nx_lpage_splits = kvm->stat.nx_lpage_splits; + struct kvm_memory_slot *slot; int rcu_idx; struct kvm_mmu_page *sp; unsigned int ratio; @@ -6820,24 +6863,55 @@ static void kvm_recover_nx_lpages(struct kvm *kvm) ratio = READ_ONCE(nx_huge_pages_recovery_ratio); to_zap = ratio ? DIV_ROUND_UP(nx_lpage_splits, ratio) : 0; for ( ; to_zap; --to_zap) { - if (list_empty(&kvm->arch.lpage_disallowed_mmu_pages)) + if (list_empty(&kvm->arch.possible_nx_huge_pages)) break; /* * We use a separate list instead of just using active_mmu_pages - * because the number of lpage_disallowed pages is expected to - * be relatively small compared to the total. + * because the number of shadow pages that be replaced with an + * NX huge page is expected to be relatively small compared to + * the total number of shadow pages. And because the TDP MMU + * doesn't use active_mmu_pages. */ - sp = list_first_entry(&kvm->arch.lpage_disallowed_mmu_pages, + sp = list_first_entry(&kvm->arch.possible_nx_huge_pages, struct kvm_mmu_page, - lpage_disallowed_link); - WARN_ON_ONCE(!sp->lpage_disallowed); - if (is_tdp_mmu_page(sp)) { + possible_nx_huge_page_link); + WARN_ON_ONCE(!sp->nx_huge_page_disallowed); + WARN_ON_ONCE(!sp->role.direct); + + /* + * Unaccount and do not attempt to recover any NX Huge Pages + * that are being dirty tracked, as they would just be faulted + * back in as 4KiB pages. The NX Huge Pages in this slot will be + * recovered, along with all the other huge pages in the slot, + * when dirty logging is disabled. + * + * Since gfn_to_memslot() is relatively expensive, it helps to + * skip it if it the test cannot possibly return true. On the + * other hand, if any memslot has logging enabled, chances are + * good that all of them do, in which case unaccount_nx_huge_page() + * is much cheaper than zapping the page. + * + * If a memslot update is in progress, reading an incorrect value + * of kvm->nr_memslots_dirty_logging is not a problem: if it is + * becoming zero, gfn_to_memslot() will be done unnecessarily; if + * it is becoming nonzero, the page will be zapped unnecessarily. + * Either way, this only affects efficiency in racy situations, + * and not correctness. + */ + slot = NULL; + if (atomic_read(&kvm->nr_memslots_dirty_logging)) { + slot = gfn_to_memslot(kvm, sp->gfn); + WARN_ON_ONCE(!slot); + } + + if (slot && kvm_slot_dirty_track_enabled(slot)) + unaccount_nx_huge_page(kvm, sp); + else if (is_tdp_mmu_page(sp)) flush |= kvm_tdp_mmu_zap_sp(kvm, sp); - } else { + else kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list); - WARN_ON_ONCE(sp->lpage_disallowed); - } + WARN_ON_ONCE(sp->nx_huge_page_disallowed); if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) { kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush); @@ -6857,7 +6931,7 @@ static void kvm_recover_nx_lpages(struct kvm *kvm) srcu_read_unlock(&kvm->srcu, rcu_idx); } -static long get_nx_lpage_recovery_timeout(u64 start_time) +static long get_nx_huge_page_recovery_timeout(u64 start_time) { bool enabled; uint period; @@ -6868,19 +6942,19 @@ static long get_nx_lpage_recovery_timeout(u64 start_time) : MAX_SCHEDULE_TIMEOUT; } -static int kvm_nx_lpage_recovery_worker(struct kvm *kvm, uintptr_t data) +static int kvm_nx_huge_page_recovery_worker(struct kvm *kvm, uintptr_t data) { u64 start_time; long remaining_time; while (true) { start_time = get_jiffies_64(); - remaining_time = get_nx_lpage_recovery_timeout(start_time); + remaining_time = get_nx_huge_page_recovery_timeout(start_time); set_current_state(TASK_INTERRUPTIBLE); while (!kthread_should_stop() && remaining_time > 0) { schedule_timeout(remaining_time); - remaining_time = get_nx_lpage_recovery_timeout(start_time); + remaining_time = get_nx_huge_page_recovery_timeout(start_time); set_current_state(TASK_INTERRUPTIBLE); } @@ -6889,7 +6963,7 @@ static int kvm_nx_lpage_recovery_worker(struct kvm *kvm, uintptr_t data) if (kthread_should_stop()) return 0; - kvm_recover_nx_lpages(kvm); + kvm_recover_nx_huge_pages(kvm); } } @@ -6897,17 +6971,17 @@ int kvm_mmu_post_init_vm(struct kvm *kvm) { int err; - err = kvm_vm_create_worker_thread(kvm, kvm_nx_lpage_recovery_worker, 0, + err = kvm_vm_create_worker_thread(kvm, kvm_nx_huge_page_recovery_worker, 0, "kvm-nx-lpage-recovery", - &kvm->arch.nx_lpage_recovery_thread); + &kvm->arch.nx_huge_page_recovery_thread); if (!err) - kthread_unpark(kvm->arch.nx_lpage_recovery_thread); + kthread_unpark(kvm->arch.nx_huge_page_recovery_thread); return err; } void kvm_mmu_pre_destroy_vm(struct kvm *kvm) { - if (kvm->arch.nx_lpage_recovery_thread) - kthread_stop(kvm->arch.nx_lpage_recovery_thread); + if (kvm->arch.nx_huge_page_recovery_thread) + kthread_stop(kvm->arch.nx_huge_page_recovery_thread); } diff --git a/arch/x86/kvm/mmu/mmu_internal.h b/arch/x86/kvm/mmu/mmu_internal.h index 582def531d4d9e12d2c281973d63f034a7f0f222..dbaf6755c5a71c009d99c797747f441190d215cf 100644 --- a/arch/x86/kvm/mmu/mmu_internal.h +++ b/arch/x86/kvm/mmu/mmu_internal.h @@ -57,7 +57,13 @@ struct kvm_mmu_page { bool tdp_mmu_page; bool unsync; u8 mmu_valid_gen; - bool lpage_disallowed; /* Can't be replaced by an equiv large page */ + + /* + * The shadow page can't be replaced by an equivalent huge page + * because it is being used to map an executable page in the guest + * and the NX huge page mitigation is enabled. + */ + bool nx_huge_page_disallowed; /* * The following two entries are used to key the shadow page in the @@ -100,7 +106,14 @@ struct kvm_mmu_page { }; }; - struct list_head lpage_disallowed_link; + /* + * Tracks shadow pages that, if zapped, would allow KVM to create an NX + * huge page. A shadow page will have nx_huge_page_disallowed set but + * not be on the list if a huge page is disallowed for other reasons, + * e.g. because KVM is shadowing a PTE at the same gfn, the memslot + * isn't properly aligned, etc... + */ + struct list_head possible_nx_huge_page_link; #ifdef CONFIG_X86_32 /* * Used out of the mmu-lock to avoid reading spte values while an @@ -120,18 +133,6 @@ struct kvm_mmu_page { extern struct kmem_cache *mmu_page_header_cache; -static inline struct kvm_mmu_page *to_shadow_page(hpa_t shadow_page) -{ - struct page *page = pfn_to_page(shadow_page >> PAGE_SHIFT); - - return (struct kvm_mmu_page *)page_private(page); -} - -static inline struct kvm_mmu_page *sptep_to_sp(u64 *sptep) -{ - return to_shadow_page(__pa(sptep)); -} - static inline int kvm_mmu_role_as_id(union kvm_mmu_page_role role) { return role.smm ? 1 : 0; @@ -315,7 +316,7 @@ void disallowed_hugepage_adjust(struct kvm_page_fault *fault, u64 spte, int cur_ void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc); -void account_huge_nx_page(struct kvm *kvm, struct kvm_mmu_page *sp); -void unaccount_huge_nx_page(struct kvm *kvm, struct kvm_mmu_page *sp); +void track_possible_nx_huge_page(struct kvm *kvm, struct kvm_mmu_page *sp); +void untrack_possible_nx_huge_page(struct kvm *kvm, struct kvm_mmu_page *sp); #endif /* __KVM_X86_MMU_INTERNAL_H */ diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h index 5ab5f94dcb6fdb3c95140dbc31bee11595665832..0f645507205579032d5dc11bdee646e798f16aae 100644 --- a/arch/x86/kvm/mmu/paging_tmpl.h +++ b/arch/x86/kvm/mmu/paging_tmpl.h @@ -713,9 +713,9 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault, continue; link_shadow_page(vcpu, it.sptep, sp); - if (fault->huge_page_disallowed && - fault->req_level >= it.level) - account_huge_nx_page(vcpu->kvm, sp); + if (fault->huge_page_disallowed) + account_nx_huge_page(vcpu->kvm, sp, + fault->req_level >= it.level); } if (WARN_ON_ONCE(it.level != fault->goal_level)) diff --git a/arch/x86/kvm/mmu/spte.c b/arch/x86/kvm/mmu/spte.c index 2e08b2a45361299fe071de8f766533b2d8dcecb6..c0fd7e049b4e4b5406e0dbbfbcfa79b3078bd774 100644 --- a/arch/x86/kvm/mmu/spte.c +++ b/arch/x86/kvm/mmu/spte.c @@ -161,6 +161,18 @@ bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, if (!prefetch) spte |= spte_shadow_accessed_mask(spte); + /* + * For simplicity, enforce the NX huge page mitigation even if not + * strictly necessary. KVM could ignore the mitigation if paging is + * disabled in the guest, as the guest doesn't have an page tables to + * abuse. But to safely ignore the mitigation, KVM would have to + * ensure a new MMU is loaded (or all shadow pages zapped) when CR0.PG + * is toggled on, and that's a net negative for performance when TDP is + * enabled. When TDP is disabled, KVM will always switch to a new MMU + * when CR0.PG is toggled, but leveraging that to ignore the mitigation + * would tie make_spte() further to vCPU/MMU state, and add complexity + * just to optimize a mode that is anything but performance critical. + */ if (level > PG_LEVEL_4K && (pte_access & ACC_EXEC_MASK) && is_nx_huge_page_enabled(vcpu->kvm)) { pte_access &= ~ACC_EXEC_MASK; diff --git a/arch/x86/kvm/mmu/spte.h b/arch/x86/kvm/mmu/spte.h index 7670c13ce251b6e08698eef477317b932e5bfdb7..1f03701b943a1cac91fe7dac6a5f3514c5b9d6dc 100644 --- a/arch/x86/kvm/mmu/spte.h +++ b/arch/x86/kvm/mmu/spte.h @@ -188,7 +188,7 @@ extern u64 __read_mostly shadow_nonpresent_or_rsvd_mask; * should not modify the SPTE. * * Use a semi-arbitrary value that doesn't set RWX bits, i.e. is not-present on - * bot AMD and Intel CPUs, and doesn't set PFN bits, i.e. doesn't create a L1TF + * both AMD and Intel CPUs, and doesn't set PFN bits, i.e. doesn't create a L1TF * vulnerability. Use only low bits to avoid 64-bit immediates. * * Only used by the TDP MMU. @@ -219,6 +219,23 @@ static inline int spte_index(u64 *sptep) */ extern u64 __read_mostly shadow_nonpresent_or_rsvd_lower_gfn_mask; +static inline struct kvm_mmu_page *to_shadow_page(hpa_t shadow_page) +{ + struct page *page = pfn_to_page((shadow_page) >> PAGE_SHIFT); + + return (struct kvm_mmu_page *)page_private(page); +} + +static inline struct kvm_mmu_page *spte_to_child_sp(u64 spte) +{ + return to_shadow_page(spte & SPTE_BASE_ADDR_MASK); +} + +static inline struct kvm_mmu_page *sptep_to_sp(u64 *sptep) +{ + return to_shadow_page(__pa(sptep)); +} + static inline bool is_mmio_spte(u64 spte) { return (spte & shadow_mmio_mask) == shadow_mmio_value && diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c index 672f0432d7777fca9414da2bb6fc6cc13d28220b..771210ce518112f5d8446d9204e4d0eeec9ee241 100644 --- a/arch/x86/kvm/mmu/tdp_mmu.c +++ b/arch/x86/kvm/mmu/tdp_mmu.c @@ -29,7 +29,6 @@ int kvm_mmu_init_tdp_mmu(struct kvm *kvm) kvm->arch.tdp_mmu_enabled = true; INIT_LIST_HEAD(&kvm->arch.tdp_mmu_roots); spin_lock_init(&kvm->arch.tdp_mmu_pages_lock); - INIT_LIST_HEAD(&kvm->arch.tdp_mmu_pages); kvm->arch.tdp_mmu_zap_wq = wq; return 1; } @@ -54,7 +53,7 @@ void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm) /* Also waits for any queued work items. */ destroy_workqueue(kvm->arch.tdp_mmu_zap_wq); - WARN_ON(!list_empty(&kvm->arch.tdp_mmu_pages)); + WARN_ON(atomic64_read(&kvm->arch.tdp_mmu_pages)); WARN_ON(!list_empty(&kvm->arch.tdp_mmu_roots)); /* @@ -284,6 +283,8 @@ static struct kvm_mmu_page *tdp_mmu_alloc_sp(struct kvm_vcpu *vcpu) static void tdp_mmu_init_sp(struct kvm_mmu_page *sp, tdp_ptep_t sptep, gfn_t gfn, union kvm_mmu_page_role role) { + INIT_LIST_HEAD(&sp->possible_nx_huge_page_link); + set_page_private(virt_to_page(sp->spt), (unsigned long)sp); sp->role = role; @@ -375,11 +376,13 @@ static void handle_changed_spte_dirty_log(struct kvm *kvm, int as_id, gfn_t gfn, static void tdp_account_mmu_page(struct kvm *kvm, struct kvm_mmu_page *sp) { kvm_account_pgtable_pages((void *)sp->spt, +1); + atomic64_inc(&kvm->arch.tdp_mmu_pages); } static void tdp_unaccount_mmu_page(struct kvm *kvm, struct kvm_mmu_page *sp) { kvm_account_pgtable_pages((void *)sp->spt, -1); + atomic64_dec(&kvm->arch.tdp_mmu_pages); } /** @@ -395,14 +398,17 @@ static void tdp_mmu_unlink_sp(struct kvm *kvm, struct kvm_mmu_page *sp, bool shared) { tdp_unaccount_mmu_page(kvm, sp); + + if (!sp->nx_huge_page_disallowed) + return; + if (shared) spin_lock(&kvm->arch.tdp_mmu_pages_lock); else lockdep_assert_held_write(&kvm->mmu_lock); - list_del(&sp->link); - if (sp->lpage_disallowed) - unaccount_huge_nx_page(kvm, sp); + sp->nx_huge_page_disallowed = false; + untrack_possible_nx_huge_page(kvm, sp); if (shared) spin_unlock(&kvm->arch.tdp_mmu_pages_lock); @@ -1116,16 +1122,13 @@ static int tdp_mmu_map_handle_target_level(struct kvm_vcpu *vcpu, * @kvm: kvm instance * @iter: a tdp_iter instance currently on the SPTE that should be set * @sp: The new TDP page table to install. - * @account_nx: True if this page table is being installed to split a - * non-executable huge page. * @shared: This operation is running under the MMU lock in read mode. * * Returns: 0 if the new page table was installed. Non-0 if the page table * could not be installed (e.g. the atomic compare-exchange failed). */ static int tdp_mmu_link_sp(struct kvm *kvm, struct tdp_iter *iter, - struct kvm_mmu_page *sp, bool account_nx, - bool shared) + struct kvm_mmu_page *sp, bool shared) { u64 spte = make_nonleaf_spte(sp->spt, !kvm_ad_enabled()); int ret = 0; @@ -1138,16 +1141,14 @@ static int tdp_mmu_link_sp(struct kvm *kvm, struct tdp_iter *iter, tdp_mmu_set_spte(kvm, iter, spte); } - spin_lock(&kvm->arch.tdp_mmu_pages_lock); - list_add(&sp->link, &kvm->arch.tdp_mmu_pages); - if (account_nx) - account_huge_nx_page(kvm, sp); - spin_unlock(&kvm->arch.tdp_mmu_pages_lock); tdp_account_mmu_page(kvm, sp); return 0; } +static int tdp_mmu_split_huge_page(struct kvm *kvm, struct tdp_iter *iter, + struct kvm_mmu_page *sp, bool shared); + /* * Handle a TDP page fault (NPT/EPT violation/misconfiguration) by installing * page tables and SPTEs to translate the faulting guest physical address. @@ -1155,9 +1156,10 @@ static int tdp_mmu_link_sp(struct kvm *kvm, struct tdp_iter *iter, int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) { struct kvm_mmu *mmu = vcpu->arch.mmu; + struct kvm *kvm = vcpu->kvm; struct tdp_iter iter; struct kvm_mmu_page *sp; - int ret; + int ret = RET_PF_RETRY; kvm_mmu_hugepage_adjust(vcpu, fault); @@ -1166,6 +1168,8 @@ int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) rcu_read_lock(); tdp_mmu_for_each_pte(iter, mmu, fault->gfn, fault->gfn + 1) { + int r; + if (fault->nx_huge_page_workaround_enabled) disallowed_hugepage_adjust(fault, iter.old_spte, iter.level); @@ -1173,57 +1177,52 @@ int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) break; /* - * If there is an SPTE mapping a large page at a higher level - * than the target, that SPTE must be cleared and replaced - * with a non-leaf SPTE. + * If SPTE has been frozen by another thread, just give up and + * retry, avoiding unnecessary page table allocation and free. */ - if (is_shadow_present_pte(iter.old_spte) && - is_large_pte(iter.old_spte)) { - if (tdp_mmu_zap_spte_atomic(vcpu->kvm, &iter)) - break; + if (is_removed_spte(iter.old_spte)) + goto retry; - /* - * The iter must explicitly re-read the spte here - * because the new value informs the !present - * path below. - */ - iter.old_spte = kvm_tdp_mmu_read_spte(iter.sptep); - } + /* Step down into the lower level page table if it exists. */ + if (is_shadow_present_pte(iter.old_spte) && + !is_large_pte(iter.old_spte)) + continue; - if (!is_shadow_present_pte(iter.old_spte)) { - bool account_nx = fault->huge_page_disallowed && - fault->req_level >= iter.level; + /* + * The SPTE is either non-present or points to a huge page that + * needs to be split. + */ + sp = tdp_mmu_alloc_sp(vcpu); + tdp_mmu_init_child_sp(sp, &iter); - /* - * If SPTE has been frozen by another thread, just - * give up and retry, avoiding unnecessary page table - * allocation and free. - */ - if (is_removed_spte(iter.old_spte)) - break; + sp->nx_huge_page_disallowed = fault->huge_page_disallowed; - sp = tdp_mmu_alloc_sp(vcpu); - tdp_mmu_init_child_sp(sp, &iter); + if (is_shadow_present_pte(iter.old_spte)) + r = tdp_mmu_split_huge_page(kvm, &iter, sp, true); + else + r = tdp_mmu_link_sp(kvm, &iter, sp, true); - if (tdp_mmu_link_sp(vcpu->kvm, &iter, sp, account_nx, true)) { - tdp_mmu_free_sp(sp); - break; - } + /* + * Also force the guest to retry the access if the upper level SPTEs + * aren't in place. + */ + if (r) { + tdp_mmu_free_sp(sp); + goto retry; } - } - /* - * Force the guest to retry the access if the upper level SPTEs aren't - * in place, or if the target leaf SPTE is frozen by another CPU. - */ - if (iter.level != fault->goal_level || is_removed_spte(iter.old_spte)) { - rcu_read_unlock(); - return RET_PF_RETRY; + if (fault->huge_page_disallowed && + fault->req_level >= iter.level) { + spin_lock(&kvm->arch.tdp_mmu_pages_lock); + track_possible_nx_huge_page(kvm, sp); + spin_unlock(&kvm->arch.tdp_mmu_pages_lock); + } } ret = tdp_mmu_map_handle_target_level(vcpu, fault, &iter); - rcu_read_unlock(); +retry: + rcu_read_unlock(); return ret; } @@ -1472,6 +1471,7 @@ static struct kvm_mmu_page *tdp_mmu_alloc_sp_for_split(struct kvm *kvm, return sp; } +/* Note, the caller is responsible for initializing @sp. */ static int tdp_mmu_split_huge_page(struct kvm *kvm, struct tdp_iter *iter, struct kvm_mmu_page *sp, bool shared) { @@ -1479,8 +1479,6 @@ static int tdp_mmu_split_huge_page(struct kvm *kvm, struct tdp_iter *iter, const int level = iter->level; int ret, i; - tdp_mmu_init_child_sp(sp, iter); - /* * No need for atomics when writing to sp->spt since the page table has * not been linked in yet and thus is not reachable from any other CPU. @@ -1496,7 +1494,7 @@ static int tdp_mmu_split_huge_page(struct kvm *kvm, struct tdp_iter *iter, * correctness standpoint since the translation will be the same either * way. */ - ret = tdp_mmu_link_sp(kvm, iter, sp, false, shared); + ret = tdp_mmu_link_sp(kvm, iter, sp, shared); if (ret) goto out; @@ -1556,6 +1554,8 @@ retry: continue; } + tdp_mmu_init_child_sp(sp, &iter); + if (tdp_mmu_split_huge_page(kvm, &iter, sp, shared)) goto retry; diff --git a/arch/x86/kvm/mmu/tdp_mmu.h b/arch/x86/kvm/mmu/tdp_mmu.h index c163f7cc23ca54aa93d466447d9aaf7ab176bf41..d3714200b932afacd7a3e5273e6361c28dd86f55 100644 --- a/arch/x86/kvm/mmu/tdp_mmu.h +++ b/arch/x86/kvm/mmu/tdp_mmu.h @@ -5,6 +5,8 @@ #include +#include "spte.h" + hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu); __must_check static inline bool kvm_tdp_mmu_get_root(struct kvm_mmu_page *root) diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c index de1fd73697365c18e27157205c0c7a39e4650127..684393c2210523d917675f6709fa7ab1dd5fd8ed 100644 --- a/arch/x86/kvm/pmu.c +++ b/arch/x86/kvm/pmu.c @@ -101,10 +101,6 @@ static inline void __kvm_perf_overflow(struct kvm_pmc *pmc, bool in_pmi) struct kvm_pmu *pmu = pmc_to_pmu(pmc); bool skip_pmi = false; - /* Ignore counters that have been reprogrammed already. */ - if (test_and_set_bit(pmc->idx, pmu->reprogram_pmi)) - return; - if (pmc->perf_event && pmc->perf_event->attr.precise_ip) { if (!in_pmi) { /* @@ -122,7 +118,6 @@ static inline void __kvm_perf_overflow(struct kvm_pmc *pmc, bool in_pmi) } else { __set_bit(pmc->idx, (unsigned long *)&pmu->global_status); } - kvm_make_request(KVM_REQ_PMU, pmc->vcpu); if (!pmc->intr || skip_pmi) return; @@ -147,12 +142,22 @@ static void kvm_perf_overflow(struct perf_event *perf_event, { struct kvm_pmc *pmc = perf_event->overflow_handler_context; + /* + * Ignore overflow events for counters that are scheduled to be + * reprogrammed, e.g. if a PMI for the previous event races with KVM's + * handling of a related guest WRMSR. + */ + if (test_and_set_bit(pmc->idx, pmc_to_pmu(pmc)->reprogram_pmi)) + return; + __kvm_perf_overflow(pmc, true); + + kvm_make_request(KVM_REQ_PMU, pmc->vcpu); } -static void pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type, - u64 config, bool exclude_user, - bool exclude_kernel, bool intr) +static int pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type, u64 config, + bool exclude_user, bool exclude_kernel, + bool intr) { struct kvm_pmu *pmu = pmc_to_pmu(pmc); struct perf_event *event; @@ -204,14 +209,14 @@ static void pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type, if (IS_ERR(event)) { pr_debug_ratelimited("kvm_pmu: event creation failed %ld for pmc->idx = %d\n", PTR_ERR(event), pmc->idx); - return; + return PTR_ERR(event); } pmc->perf_event = event; pmc_to_pmu(pmc)->event_count++; - clear_bit(pmc->idx, pmc_to_pmu(pmc)->reprogram_pmi); pmc->is_paused = false; pmc->intr = intr || pebs; + return 0; } static void pmc_pause_counter(struct kvm_pmc *pmc) @@ -245,7 +250,6 @@ static bool pmc_resume_counter(struct kvm_pmc *pmc) perf_event_enable(pmc->perf_event); pmc->is_paused = false; - clear_bit(pmc->idx, (unsigned long *)&pmc_to_pmu(pmc)->reprogram_pmi); return true; } @@ -293,7 +297,7 @@ out: return allow_event; } -void reprogram_counter(struct kvm_pmc *pmc) +static void reprogram_counter(struct kvm_pmc *pmc) { struct kvm_pmu *pmu = pmc_to_pmu(pmc); u64 eventsel = pmc->eventsel; @@ -303,10 +307,13 @@ void reprogram_counter(struct kvm_pmc *pmc) pmc_pause_counter(pmc); if (!pmc_speculative_in_use(pmc) || !pmc_is_enabled(pmc)) - return; + goto reprogram_complete; if (!check_pmu_event_filter(pmc)) - return; + goto reprogram_complete; + + if (pmc->counter < pmc->prev_counter) + __kvm_perf_overflow(pmc, false); if (eventsel & ARCH_PERFMON_EVENTSEL_PIN_CONTROL) printk_once("kvm pmu: pin control bit is ignored\n"); @@ -324,18 +331,29 @@ void reprogram_counter(struct kvm_pmc *pmc) } if (pmc->current_config == new_config && pmc_resume_counter(pmc)) - return; + goto reprogram_complete; pmc_release_perf_event(pmc); pmc->current_config = new_config; - pmc_reprogram_counter(pmc, PERF_TYPE_RAW, - (eventsel & pmu->raw_event_mask), - !(eventsel & ARCH_PERFMON_EVENTSEL_USR), - !(eventsel & ARCH_PERFMON_EVENTSEL_OS), - eventsel & ARCH_PERFMON_EVENTSEL_INT); + + /* + * If reprogramming fails, e.g. due to contention, leave the counter's + * regprogram bit set, i.e. opportunistically try again on the next PMU + * refresh. Don't make a new request as doing so can stall the guest + * if reprogramming repeatedly fails. + */ + if (pmc_reprogram_counter(pmc, PERF_TYPE_RAW, + (eventsel & pmu->raw_event_mask), + !(eventsel & ARCH_PERFMON_EVENTSEL_USR), + !(eventsel & ARCH_PERFMON_EVENTSEL_OS), + eventsel & ARCH_PERFMON_EVENTSEL_INT)) + return; + +reprogram_complete: + clear_bit(pmc->idx, (unsigned long *)&pmc_to_pmu(pmc)->reprogram_pmi); + pmc->prev_counter = 0; } -EXPORT_SYMBOL_GPL(reprogram_counter); void kvm_pmu_handle_event(struct kvm_vcpu *vcpu) { @@ -345,10 +363,11 @@ void kvm_pmu_handle_event(struct kvm_vcpu *vcpu) for_each_set_bit(bit, pmu->reprogram_pmi, X86_PMC_IDX_MAX) { struct kvm_pmc *pmc = static_call(kvm_x86_pmu_pmc_idx_to_pmc)(pmu, bit); - if (unlikely(!pmc || !pmc->perf_event)) { + if (unlikely(!pmc)) { clear_bit(bit, pmu->reprogram_pmi); continue; } + reprogram_counter(pmc); } @@ -522,14 +541,9 @@ void kvm_pmu_destroy(struct kvm_vcpu *vcpu) static void kvm_pmu_incr_counter(struct kvm_pmc *pmc) { - u64 prev_count; - - prev_count = pmc->counter; + pmc->prev_counter = pmc->counter; pmc->counter = (pmc->counter + 1) & pmc_bitmask(pmc); - - reprogram_counter(pmc); - if (pmc->counter < prev_count) - __kvm_perf_overflow(pmc, false); + kvm_pmu_request_counter_reprogam(pmc); } static inline bool eventsel_match_perf_hw_id(struct kvm_pmc *pmc, @@ -542,12 +556,15 @@ static inline bool eventsel_match_perf_hw_id(struct kvm_pmc *pmc, static inline bool cpl_is_matched(struct kvm_pmc *pmc) { bool select_os, select_user; - u64 config = pmc->current_config; + u64 config; if (pmc_is_gp(pmc)) { + config = pmc->eventsel; select_os = config & ARCH_PERFMON_EVENTSEL_OS; select_user = config & ARCH_PERFMON_EVENTSEL_USR; } else { + config = fixed_ctrl_field(pmc_to_pmu(pmc)->fixed_ctr_ctrl, + pmc->idx - INTEL_PMC_IDX_FIXED); select_os = config & 0x1; select_user = config & 0x2; } @@ -577,6 +594,8 @@ EXPORT_SYMBOL_GPL(kvm_pmu_trigger_event); int kvm_vm_ioctl_set_pmu_event_filter(struct kvm *kvm, void __user *argp) { struct kvm_pmu_event_filter tmp, *filter; + struct kvm_vcpu *vcpu; + unsigned long i; size_t size; int r; @@ -613,9 +632,18 @@ int kvm_vm_ioctl_set_pmu_event_filter(struct kvm *kvm, void __user *argp) mutex_lock(&kvm->lock); filter = rcu_replace_pointer(kvm->arch.pmu_event_filter, filter, mutex_is_locked(&kvm->lock)); + synchronize_srcu_expedited(&kvm->srcu); + + BUILD_BUG_ON(sizeof(((struct kvm_pmu *)0)->reprogram_pmi) > + sizeof(((struct kvm_pmu *)0)->__reprogram_pmi)); + + kvm_for_each_vcpu(i, vcpu, kvm) + atomic64_set(&vcpu_to_pmu(vcpu)->__reprogram_pmi, -1ull); + + kvm_make_all_cpus_request(kvm, KVM_REQ_PMU); + mutex_unlock(&kvm->lock); - synchronize_srcu_expedited(&kvm->srcu); r = 0; cleanup: kfree(filter); diff --git a/arch/x86/kvm/pmu.h b/arch/x86/kvm/pmu.h index 5cc5721f260bfea18fd8fd754f6aded0a9334309..85ff3c0588bac01e0dee3edd607c33d4aca347ba 100644 --- a/arch/x86/kvm/pmu.h +++ b/arch/x86/kvm/pmu.h @@ -183,7 +183,11 @@ static inline void kvm_init_pmu_capability(void) KVM_PMC_MAX_FIXED); } -void reprogram_counter(struct kvm_pmc *pmc); +static inline void kvm_pmu_request_counter_reprogam(struct kvm_pmc *pmc) +{ + set_bit(pmc->idx, pmc_to_pmu(pmc)->reprogram_pmi); + kvm_make_request(KVM_REQ_PMU, pmc->vcpu); +} void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu); void kvm_pmu_handle_event(struct kvm_vcpu *vcpu); diff --git a/arch/x86/kvm/reverse_cpuid.h b/arch/x86/kvm/reverse_cpuid.h index 4e5b8444f161c3b01b30c2b58d9aa695f0000b4e..042d0aca3c92b238602d93d0158e043bcd2b4447 100644 --- a/arch/x86/kvm/reverse_cpuid.h +++ b/arch/x86/kvm/reverse_cpuid.h @@ -7,17 +7,30 @@ #include /* - * Hardware-defined CPUID leafs that are scattered in the kernel, but need to - * be directly used by KVM. Note, these word values conflict with the kernel's - * "bug" caps, but KVM doesn't use those. + * Hardware-defined CPUID leafs that are either scattered by the kernel or are + * unknown to the kernel, but need to be directly used by KVM. Note, these + * word values conflict with the kernel's "bug" caps, but KVM doesn't use those. */ enum kvm_only_cpuid_leafs { CPUID_12_EAX = NCAPINTS, + CPUID_7_1_EDX, NR_KVM_CPU_CAPS, NKVMCAPINTS = NR_KVM_CPU_CAPS - NCAPINTS, }; +/* + * Define a KVM-only feature flag. + * + * For features that are scattered by cpufeatures.h, __feature_translate() also + * needs to be updated to translate the kernel-defined feature into the + * KVM-defined feature. + * + * For features that are 100% KVM-only, i.e. not defined by cpufeatures.h, + * forego the intermediate KVM_X86_FEATURE and directly define X86_FEATURE_* so + * that X86_FEATURE_* can be used in KVM. No __feature_translate() handling is + * needed in this case. + */ #define KVM_X86_FEATURE(w, f) ((w)*32 + (f)) /* Intel-defined SGX sub-features, CPUID level 0x12 (EAX). */ @@ -25,6 +38,11 @@ enum kvm_only_cpuid_leafs { #define KVM_X86_FEATURE_SGX2 KVM_X86_FEATURE(CPUID_12_EAX, 1) #define KVM_X86_FEATURE_SGX_EDECCSSA KVM_X86_FEATURE(CPUID_12_EAX, 11) +/* Intel-defined sub-features, CPUID level 0x00000007:1 (EDX) */ +#define X86_FEATURE_AVX_VNNI_INT8 KVM_X86_FEATURE(CPUID_7_1_EDX, 4) +#define X86_FEATURE_AVX_NE_CONVERT KVM_X86_FEATURE(CPUID_7_1_EDX, 5) +#define X86_FEATURE_PREFETCHITI KVM_X86_FEATURE(CPUID_7_1_EDX, 14) + struct cpuid_reg { u32 function; u32 index; @@ -49,6 +67,7 @@ static const struct cpuid_reg reverse_cpuid[] = { [CPUID_7_1_EAX] = { 7, 1, CPUID_EAX}, [CPUID_12_EAX] = {0x00000012, 0, CPUID_EAX}, [CPUID_8000_001F_EAX] = {0x8000001f, 0, CPUID_EAX}, + [CPUID_7_1_EDX] = { 7, 1, CPUID_EDX}, }; /* diff --git a/arch/x86/kvm/smm.c b/arch/x86/kvm/smm.c new file mode 100644 index 0000000000000000000000000000000000000000..a9c1c2af8d94c21406cb18916bfea52c0f8fc647 --- /dev/null +++ b/arch/x86/kvm/smm.c @@ -0,0 +1,649 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#include +#include "x86.h" +#include "kvm_cache_regs.h" +#include "kvm_emulate.h" +#include "smm.h" +#include "cpuid.h" +#include "trace.h" + +#define CHECK_SMRAM32_OFFSET(field, offset) \ + ASSERT_STRUCT_OFFSET(struct kvm_smram_state_32, field, offset - 0xFE00) + +#define CHECK_SMRAM64_OFFSET(field, offset) \ + ASSERT_STRUCT_OFFSET(struct kvm_smram_state_64, field, offset - 0xFE00) + +static void check_smram_offsets(void) +{ + /* 32 bit SMRAM image */ + CHECK_SMRAM32_OFFSET(reserved1, 0xFE00); + CHECK_SMRAM32_OFFSET(smbase, 0xFEF8); + CHECK_SMRAM32_OFFSET(smm_revision, 0xFEFC); + CHECK_SMRAM32_OFFSET(io_inst_restart, 0xFF00); + CHECK_SMRAM32_OFFSET(auto_hlt_restart, 0xFF02); + CHECK_SMRAM32_OFFSET(io_restart_rdi, 0xFF04); + CHECK_SMRAM32_OFFSET(io_restart_rcx, 0xFF08); + CHECK_SMRAM32_OFFSET(io_restart_rsi, 0xFF0C); + CHECK_SMRAM32_OFFSET(io_restart_rip, 0xFF10); + CHECK_SMRAM32_OFFSET(cr4, 0xFF14); + CHECK_SMRAM32_OFFSET(reserved2, 0xFF18); + CHECK_SMRAM32_OFFSET(int_shadow, 0xFF1A); + CHECK_SMRAM32_OFFSET(reserved3, 0xFF1B); + CHECK_SMRAM32_OFFSET(ds, 0xFF2C); + CHECK_SMRAM32_OFFSET(fs, 0xFF38); + CHECK_SMRAM32_OFFSET(gs, 0xFF44); + CHECK_SMRAM32_OFFSET(idtr, 0xFF50); + CHECK_SMRAM32_OFFSET(tr, 0xFF5C); + CHECK_SMRAM32_OFFSET(gdtr, 0xFF6C); + CHECK_SMRAM32_OFFSET(ldtr, 0xFF78); + CHECK_SMRAM32_OFFSET(es, 0xFF84); + CHECK_SMRAM32_OFFSET(cs, 0xFF90); + CHECK_SMRAM32_OFFSET(ss, 0xFF9C); + CHECK_SMRAM32_OFFSET(es_sel, 0xFFA8); + CHECK_SMRAM32_OFFSET(cs_sel, 0xFFAC); + CHECK_SMRAM32_OFFSET(ss_sel, 0xFFB0); + CHECK_SMRAM32_OFFSET(ds_sel, 0xFFB4); + CHECK_SMRAM32_OFFSET(fs_sel, 0xFFB8); + CHECK_SMRAM32_OFFSET(gs_sel, 0xFFBC); + CHECK_SMRAM32_OFFSET(ldtr_sel, 0xFFC0); + CHECK_SMRAM32_OFFSET(tr_sel, 0xFFC4); + CHECK_SMRAM32_OFFSET(dr7, 0xFFC8); + CHECK_SMRAM32_OFFSET(dr6, 0xFFCC); + CHECK_SMRAM32_OFFSET(gprs, 0xFFD0); + CHECK_SMRAM32_OFFSET(eip, 0xFFF0); + CHECK_SMRAM32_OFFSET(eflags, 0xFFF4); + CHECK_SMRAM32_OFFSET(cr3, 0xFFF8); + CHECK_SMRAM32_OFFSET(cr0, 0xFFFC); + + /* 64 bit SMRAM image */ + CHECK_SMRAM64_OFFSET(es, 0xFE00); + CHECK_SMRAM64_OFFSET(cs, 0xFE10); + CHECK_SMRAM64_OFFSET(ss, 0xFE20); + CHECK_SMRAM64_OFFSET(ds, 0xFE30); + CHECK_SMRAM64_OFFSET(fs, 0xFE40); + CHECK_SMRAM64_OFFSET(gs, 0xFE50); + CHECK_SMRAM64_OFFSET(gdtr, 0xFE60); + CHECK_SMRAM64_OFFSET(ldtr, 0xFE70); + CHECK_SMRAM64_OFFSET(idtr, 0xFE80); + CHECK_SMRAM64_OFFSET(tr, 0xFE90); + CHECK_SMRAM64_OFFSET(io_restart_rip, 0xFEA0); + CHECK_SMRAM64_OFFSET(io_restart_rcx, 0xFEA8); + CHECK_SMRAM64_OFFSET(io_restart_rsi, 0xFEB0); + CHECK_SMRAM64_OFFSET(io_restart_rdi, 0xFEB8); + CHECK_SMRAM64_OFFSET(io_restart_dword, 0xFEC0); + CHECK_SMRAM64_OFFSET(reserved1, 0xFEC4); + CHECK_SMRAM64_OFFSET(io_inst_restart, 0xFEC8); + CHECK_SMRAM64_OFFSET(auto_hlt_restart, 0xFEC9); + CHECK_SMRAM64_OFFSET(amd_nmi_mask, 0xFECA); + CHECK_SMRAM64_OFFSET(int_shadow, 0xFECB); + CHECK_SMRAM64_OFFSET(reserved2, 0xFECC); + CHECK_SMRAM64_OFFSET(efer, 0xFED0); + CHECK_SMRAM64_OFFSET(svm_guest_flag, 0xFED8); + CHECK_SMRAM64_OFFSET(svm_guest_vmcb_gpa, 0xFEE0); + CHECK_SMRAM64_OFFSET(svm_guest_virtual_int, 0xFEE8); + CHECK_SMRAM64_OFFSET(reserved3, 0xFEF0); + CHECK_SMRAM64_OFFSET(smm_revison, 0xFEFC); + CHECK_SMRAM64_OFFSET(smbase, 0xFF00); + CHECK_SMRAM64_OFFSET(reserved4, 0xFF04); + CHECK_SMRAM64_OFFSET(ssp, 0xFF18); + CHECK_SMRAM64_OFFSET(svm_guest_pat, 0xFF20); + CHECK_SMRAM64_OFFSET(svm_host_efer, 0xFF28); + CHECK_SMRAM64_OFFSET(svm_host_cr4, 0xFF30); + CHECK_SMRAM64_OFFSET(svm_host_cr3, 0xFF38); + CHECK_SMRAM64_OFFSET(svm_host_cr0, 0xFF40); + CHECK_SMRAM64_OFFSET(cr4, 0xFF48); + CHECK_SMRAM64_OFFSET(cr3, 0xFF50); + CHECK_SMRAM64_OFFSET(cr0, 0xFF58); + CHECK_SMRAM64_OFFSET(dr7, 0xFF60); + CHECK_SMRAM64_OFFSET(dr6, 0xFF68); + CHECK_SMRAM64_OFFSET(rflags, 0xFF70); + CHECK_SMRAM64_OFFSET(rip, 0xFF78); + CHECK_SMRAM64_OFFSET(gprs, 0xFF80); + + BUILD_BUG_ON(sizeof(union kvm_smram) != 512); +} + +#undef CHECK_SMRAM64_OFFSET +#undef CHECK_SMRAM32_OFFSET + + +void kvm_smm_changed(struct kvm_vcpu *vcpu, bool entering_smm) +{ + BUILD_BUG_ON(HF_SMM_MASK != X86EMUL_SMM_MASK); + + trace_kvm_smm_transition(vcpu->vcpu_id, vcpu->arch.smbase, entering_smm); + + if (entering_smm) { + vcpu->arch.hflags |= HF_SMM_MASK; + } else { + vcpu->arch.hflags &= ~(HF_SMM_MASK | HF_SMM_INSIDE_NMI_MASK); + + /* Process a latched INIT or SMI, if any. */ + kvm_make_request(KVM_REQ_EVENT, vcpu); + + /* + * Even if KVM_SET_SREGS2 loaded PDPTRs out of band, + * on SMM exit we still need to reload them from + * guest memory + */ + vcpu->arch.pdptrs_from_userspace = false; + } + + kvm_mmu_reset_context(vcpu); +} + +void process_smi(struct kvm_vcpu *vcpu) +{ + vcpu->arch.smi_pending = true; + kvm_make_request(KVM_REQ_EVENT, vcpu); +} + +static u32 enter_smm_get_segment_flags(struct kvm_segment *seg) +{ + u32 flags = 0; + flags |= seg->g << 23; + flags |= seg->db << 22; + flags |= seg->l << 21; + flags |= seg->avl << 20; + flags |= seg->present << 15; + flags |= seg->dpl << 13; + flags |= seg->s << 12; + flags |= seg->type << 8; + return flags; +} + +static void enter_smm_save_seg_32(struct kvm_vcpu *vcpu, + struct kvm_smm_seg_state_32 *state, + u32 *selector, int n) +{ + struct kvm_segment seg; + + kvm_get_segment(vcpu, &seg, n); + *selector = seg.selector; + state->base = seg.base; + state->limit = seg.limit; + state->flags = enter_smm_get_segment_flags(&seg); +} + +#ifdef CONFIG_X86_64 +static void enter_smm_save_seg_64(struct kvm_vcpu *vcpu, + struct kvm_smm_seg_state_64 *state, + int n) +{ + struct kvm_segment seg; + + kvm_get_segment(vcpu, &seg, n); + state->selector = seg.selector; + state->attributes = enter_smm_get_segment_flags(&seg) >> 8; + state->limit = seg.limit; + state->base = seg.base; +} +#endif + +static void enter_smm_save_state_32(struct kvm_vcpu *vcpu, + struct kvm_smram_state_32 *smram) +{ + struct desc_ptr dt; + unsigned long val; + int i; + + smram->cr0 = kvm_read_cr0(vcpu); + smram->cr3 = kvm_read_cr3(vcpu); + smram->eflags = kvm_get_rflags(vcpu); + smram->eip = kvm_rip_read(vcpu); + + for (i = 0; i < 8; i++) + smram->gprs[i] = kvm_register_read_raw(vcpu, i); + + kvm_get_dr(vcpu, 6, &val); + smram->dr6 = (u32)val; + kvm_get_dr(vcpu, 7, &val); + smram->dr7 = (u32)val; + + enter_smm_save_seg_32(vcpu, &smram->tr, &smram->tr_sel, VCPU_SREG_TR); + enter_smm_save_seg_32(vcpu, &smram->ldtr, &smram->ldtr_sel, VCPU_SREG_LDTR); + + static_call(kvm_x86_get_gdt)(vcpu, &dt); + smram->gdtr.base = dt.address; + smram->gdtr.limit = dt.size; + + static_call(kvm_x86_get_idt)(vcpu, &dt); + smram->idtr.base = dt.address; + smram->idtr.limit = dt.size; + + enter_smm_save_seg_32(vcpu, &smram->es, &smram->es_sel, VCPU_SREG_ES); + enter_smm_save_seg_32(vcpu, &smram->cs, &smram->cs_sel, VCPU_SREG_CS); + enter_smm_save_seg_32(vcpu, &smram->ss, &smram->ss_sel, VCPU_SREG_SS); + + enter_smm_save_seg_32(vcpu, &smram->ds, &smram->ds_sel, VCPU_SREG_DS); + enter_smm_save_seg_32(vcpu, &smram->fs, &smram->fs_sel, VCPU_SREG_FS); + enter_smm_save_seg_32(vcpu, &smram->gs, &smram->gs_sel, VCPU_SREG_GS); + + smram->cr4 = kvm_read_cr4(vcpu); + smram->smm_revision = 0x00020000; + smram->smbase = vcpu->arch.smbase; + + smram->int_shadow = static_call(kvm_x86_get_interrupt_shadow)(vcpu); +} + +#ifdef CONFIG_X86_64 +static void enter_smm_save_state_64(struct kvm_vcpu *vcpu, + struct kvm_smram_state_64 *smram) +{ + struct desc_ptr dt; + unsigned long val; + int i; + + for (i = 0; i < 16; i++) + smram->gprs[15 - i] = kvm_register_read_raw(vcpu, i); + + smram->rip = kvm_rip_read(vcpu); + smram->rflags = kvm_get_rflags(vcpu); + + + kvm_get_dr(vcpu, 6, &val); + smram->dr6 = val; + kvm_get_dr(vcpu, 7, &val); + smram->dr7 = val; + + smram->cr0 = kvm_read_cr0(vcpu); + smram->cr3 = kvm_read_cr3(vcpu); + smram->cr4 = kvm_read_cr4(vcpu); + + smram->smbase = vcpu->arch.smbase; + smram->smm_revison = 0x00020064; + + smram->efer = vcpu->arch.efer; + + enter_smm_save_seg_64(vcpu, &smram->tr, VCPU_SREG_TR); + + static_call(kvm_x86_get_idt)(vcpu, &dt); + smram->idtr.limit = dt.size; + smram->idtr.base = dt.address; + + enter_smm_save_seg_64(vcpu, &smram->ldtr, VCPU_SREG_LDTR); + + static_call(kvm_x86_get_gdt)(vcpu, &dt); + smram->gdtr.limit = dt.size; + smram->gdtr.base = dt.address; + + enter_smm_save_seg_64(vcpu, &smram->es, VCPU_SREG_ES); + enter_smm_save_seg_64(vcpu, &smram->cs, VCPU_SREG_CS); + enter_smm_save_seg_64(vcpu, &smram->ss, VCPU_SREG_SS); + enter_smm_save_seg_64(vcpu, &smram->ds, VCPU_SREG_DS); + enter_smm_save_seg_64(vcpu, &smram->fs, VCPU_SREG_FS); + enter_smm_save_seg_64(vcpu, &smram->gs, VCPU_SREG_GS); + + smram->int_shadow = static_call(kvm_x86_get_interrupt_shadow)(vcpu); +} +#endif + +void enter_smm(struct kvm_vcpu *vcpu) +{ + struct kvm_segment cs, ds; + struct desc_ptr dt; + unsigned long cr0; + union kvm_smram smram; + + check_smram_offsets(); + + memset(smram.bytes, 0, sizeof(smram.bytes)); + +#ifdef CONFIG_X86_64 + if (guest_cpuid_has(vcpu, X86_FEATURE_LM)) + enter_smm_save_state_64(vcpu, &smram.smram64); + else +#endif + enter_smm_save_state_32(vcpu, &smram.smram32); + + /* + * Give enter_smm() a chance to make ISA-specific changes to the vCPU + * state (e.g. leave guest mode) after we've saved the state into the + * SMM state-save area. + * + * Kill the VM in the unlikely case of failure, because the VM + * can be in undefined state in this case. + */ + if (static_call(kvm_x86_enter_smm)(vcpu, &smram)) + goto error; + + kvm_smm_changed(vcpu, true); + + if (kvm_vcpu_write_guest(vcpu, vcpu->arch.smbase + 0xfe00, &smram, sizeof(smram))) + goto error; + + if (static_call(kvm_x86_get_nmi_mask)(vcpu)) + vcpu->arch.hflags |= HF_SMM_INSIDE_NMI_MASK; + else + static_call(kvm_x86_set_nmi_mask)(vcpu, true); + + kvm_set_rflags(vcpu, X86_EFLAGS_FIXED); + kvm_rip_write(vcpu, 0x8000); + + static_call(kvm_x86_set_interrupt_shadow)(vcpu, 0); + + cr0 = vcpu->arch.cr0 & ~(X86_CR0_PE | X86_CR0_EM | X86_CR0_TS | X86_CR0_PG); + static_call(kvm_x86_set_cr0)(vcpu, cr0); + vcpu->arch.cr0 = cr0; + + static_call(kvm_x86_set_cr4)(vcpu, 0); + + /* Undocumented: IDT limit is set to zero on entry to SMM. */ + dt.address = dt.size = 0; + static_call(kvm_x86_set_idt)(vcpu, &dt); + + if (WARN_ON_ONCE(kvm_set_dr(vcpu, 7, DR7_FIXED_1))) + goto error; + + cs.selector = (vcpu->arch.smbase >> 4) & 0xffff; + cs.base = vcpu->arch.smbase; + + ds.selector = 0; + ds.base = 0; + + cs.limit = ds.limit = 0xffffffff; + cs.type = ds.type = 0x3; + cs.dpl = ds.dpl = 0; + cs.db = ds.db = 0; + cs.s = ds.s = 1; + cs.l = ds.l = 0; + cs.g = ds.g = 1; + cs.avl = ds.avl = 0; + cs.present = ds.present = 1; + cs.unusable = ds.unusable = 0; + cs.padding = ds.padding = 0; + + kvm_set_segment(vcpu, &cs, VCPU_SREG_CS); + kvm_set_segment(vcpu, &ds, VCPU_SREG_DS); + kvm_set_segment(vcpu, &ds, VCPU_SREG_ES); + kvm_set_segment(vcpu, &ds, VCPU_SREG_FS); + kvm_set_segment(vcpu, &ds, VCPU_SREG_GS); + kvm_set_segment(vcpu, &ds, VCPU_SREG_SS); + +#ifdef CONFIG_X86_64 + if (guest_cpuid_has(vcpu, X86_FEATURE_LM)) + if (static_call(kvm_x86_set_efer)(vcpu, 0)) + goto error; +#endif + + kvm_update_cpuid_runtime(vcpu); + kvm_mmu_reset_context(vcpu); + return; +error: + kvm_vm_dead(vcpu->kvm); +} + +static void rsm_set_desc_flags(struct kvm_segment *desc, u32 flags) +{ + desc->g = (flags >> 23) & 1; + desc->db = (flags >> 22) & 1; + desc->l = (flags >> 21) & 1; + desc->avl = (flags >> 20) & 1; + desc->present = (flags >> 15) & 1; + desc->dpl = (flags >> 13) & 3; + desc->s = (flags >> 12) & 1; + desc->type = (flags >> 8) & 15; + + desc->unusable = !desc->present; + desc->padding = 0; +} + +static int rsm_load_seg_32(struct kvm_vcpu *vcpu, + const struct kvm_smm_seg_state_32 *state, + u16 selector, int n) +{ + struct kvm_segment desc; + + desc.selector = selector; + desc.base = state->base; + desc.limit = state->limit; + rsm_set_desc_flags(&desc, state->flags); + kvm_set_segment(vcpu, &desc, n); + return X86EMUL_CONTINUE; +} + +#ifdef CONFIG_X86_64 + +static int rsm_load_seg_64(struct kvm_vcpu *vcpu, + const struct kvm_smm_seg_state_64 *state, + int n) +{ + struct kvm_segment desc; + + desc.selector = state->selector; + rsm_set_desc_flags(&desc, state->attributes << 8); + desc.limit = state->limit; + desc.base = state->base; + kvm_set_segment(vcpu, &desc, n); + return X86EMUL_CONTINUE; +} +#endif + +static int rsm_enter_protected_mode(struct kvm_vcpu *vcpu, + u64 cr0, u64 cr3, u64 cr4) +{ + int bad; + u64 pcid; + + /* In order to later set CR4.PCIDE, CR3[11:0] must be zero. */ + pcid = 0; + if (cr4 & X86_CR4_PCIDE) { + pcid = cr3 & 0xfff; + cr3 &= ~0xfff; + } + + bad = kvm_set_cr3(vcpu, cr3); + if (bad) + return X86EMUL_UNHANDLEABLE; + + /* + * First enable PAE, long mode needs it before CR0.PG = 1 is set. + * Then enable protected mode. However, PCID cannot be enabled + * if EFER.LMA=0, so set it separately. + */ + bad = kvm_set_cr4(vcpu, cr4 & ~X86_CR4_PCIDE); + if (bad) + return X86EMUL_UNHANDLEABLE; + + bad = kvm_set_cr0(vcpu, cr0); + if (bad) + return X86EMUL_UNHANDLEABLE; + + if (cr4 & X86_CR4_PCIDE) { + bad = kvm_set_cr4(vcpu, cr4); + if (bad) + return X86EMUL_UNHANDLEABLE; + if (pcid) { + bad = kvm_set_cr3(vcpu, cr3 | pcid); + if (bad) + return X86EMUL_UNHANDLEABLE; + } + + } + + return X86EMUL_CONTINUE; +} + +static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt, + const struct kvm_smram_state_32 *smstate) +{ + struct kvm_vcpu *vcpu = ctxt->vcpu; + struct desc_ptr dt; + int i, r; + + ctxt->eflags = smstate->eflags | X86_EFLAGS_FIXED; + ctxt->_eip = smstate->eip; + + for (i = 0; i < 8; i++) + *reg_write(ctxt, i) = smstate->gprs[i]; + + if (kvm_set_dr(vcpu, 6, smstate->dr6)) + return X86EMUL_UNHANDLEABLE; + if (kvm_set_dr(vcpu, 7, smstate->dr7)) + return X86EMUL_UNHANDLEABLE; + + rsm_load_seg_32(vcpu, &smstate->tr, smstate->tr_sel, VCPU_SREG_TR); + rsm_load_seg_32(vcpu, &smstate->ldtr, smstate->ldtr_sel, VCPU_SREG_LDTR); + + dt.address = smstate->gdtr.base; + dt.size = smstate->gdtr.limit; + static_call(kvm_x86_set_gdt)(vcpu, &dt); + + dt.address = smstate->idtr.base; + dt.size = smstate->idtr.limit; + static_call(kvm_x86_set_idt)(vcpu, &dt); + + rsm_load_seg_32(vcpu, &smstate->es, smstate->es_sel, VCPU_SREG_ES); + rsm_load_seg_32(vcpu, &smstate->cs, smstate->cs_sel, VCPU_SREG_CS); + rsm_load_seg_32(vcpu, &smstate->ss, smstate->ss_sel, VCPU_SREG_SS); + + rsm_load_seg_32(vcpu, &smstate->ds, smstate->ds_sel, VCPU_SREG_DS); + rsm_load_seg_32(vcpu, &smstate->fs, smstate->fs_sel, VCPU_SREG_FS); + rsm_load_seg_32(vcpu, &smstate->gs, smstate->gs_sel, VCPU_SREG_GS); + + vcpu->arch.smbase = smstate->smbase; + + r = rsm_enter_protected_mode(vcpu, smstate->cr0, + smstate->cr3, smstate->cr4); + + if (r != X86EMUL_CONTINUE) + return r; + + static_call(kvm_x86_set_interrupt_shadow)(vcpu, 0); + ctxt->interruptibility = (u8)smstate->int_shadow; + + return r; +} + +#ifdef CONFIG_X86_64 +static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, + const struct kvm_smram_state_64 *smstate) +{ + struct kvm_vcpu *vcpu = ctxt->vcpu; + struct desc_ptr dt; + int i, r; + + for (i = 0; i < 16; i++) + *reg_write(ctxt, i) = smstate->gprs[15 - i]; + + ctxt->_eip = smstate->rip; + ctxt->eflags = smstate->rflags | X86_EFLAGS_FIXED; + + if (kvm_set_dr(vcpu, 6, smstate->dr6)) + return X86EMUL_UNHANDLEABLE; + if (kvm_set_dr(vcpu, 7, smstate->dr7)) + return X86EMUL_UNHANDLEABLE; + + vcpu->arch.smbase = smstate->smbase; + + if (kvm_set_msr(vcpu, MSR_EFER, smstate->efer & ~EFER_LMA)) + return X86EMUL_UNHANDLEABLE; + + rsm_load_seg_64(vcpu, &smstate->tr, VCPU_SREG_TR); + + dt.size = smstate->idtr.limit; + dt.address = smstate->idtr.base; + static_call(kvm_x86_set_idt)(vcpu, &dt); + + rsm_load_seg_64(vcpu, &smstate->ldtr, VCPU_SREG_LDTR); + + dt.size = smstate->gdtr.limit; + dt.address = smstate->gdtr.base; + static_call(kvm_x86_set_gdt)(vcpu, &dt); + + r = rsm_enter_protected_mode(vcpu, smstate->cr0, smstate->cr3, smstate->cr4); + if (r != X86EMUL_CONTINUE) + return r; + + rsm_load_seg_64(vcpu, &smstate->es, VCPU_SREG_ES); + rsm_load_seg_64(vcpu, &smstate->cs, VCPU_SREG_CS); + rsm_load_seg_64(vcpu, &smstate->ss, VCPU_SREG_SS); + rsm_load_seg_64(vcpu, &smstate->ds, VCPU_SREG_DS); + rsm_load_seg_64(vcpu, &smstate->fs, VCPU_SREG_FS); + rsm_load_seg_64(vcpu, &smstate->gs, VCPU_SREG_GS); + + static_call(kvm_x86_set_interrupt_shadow)(vcpu, 0); + ctxt->interruptibility = (u8)smstate->int_shadow; + + return X86EMUL_CONTINUE; +} +#endif + +int emulator_leave_smm(struct x86_emulate_ctxt *ctxt) +{ + struct kvm_vcpu *vcpu = ctxt->vcpu; + unsigned long cr0; + union kvm_smram smram; + u64 smbase; + int ret; + + smbase = vcpu->arch.smbase; + + ret = kvm_vcpu_read_guest(vcpu, smbase + 0xfe00, smram.bytes, sizeof(smram)); + if (ret < 0) + return X86EMUL_UNHANDLEABLE; + + if ((vcpu->arch.hflags & HF_SMM_INSIDE_NMI_MASK) == 0) + static_call(kvm_x86_set_nmi_mask)(vcpu, false); + + kvm_smm_changed(vcpu, false); + + /* + * Get back to real mode, to prepare a safe state in which to load + * CR0/CR3/CR4/EFER. It's all a bit more complicated if the vCPU + * supports long mode. + */ +#ifdef CONFIG_X86_64 + if (guest_cpuid_has(vcpu, X86_FEATURE_LM)) { + struct kvm_segment cs_desc; + unsigned long cr4; + + /* Zero CR4.PCIDE before CR0.PG. */ + cr4 = kvm_read_cr4(vcpu); + if (cr4 & X86_CR4_PCIDE) + kvm_set_cr4(vcpu, cr4 & ~X86_CR4_PCIDE); + + /* A 32-bit code segment is required to clear EFER.LMA. */ + memset(&cs_desc, 0, sizeof(cs_desc)); + cs_desc.type = 0xb; + cs_desc.s = cs_desc.g = cs_desc.present = 1; + kvm_set_segment(vcpu, &cs_desc, VCPU_SREG_CS); + } +#endif + + /* For the 64-bit case, this will clear EFER.LMA. */ + cr0 = kvm_read_cr0(vcpu); + if (cr0 & X86_CR0_PE) + kvm_set_cr0(vcpu, cr0 & ~(X86_CR0_PG | X86_CR0_PE)); + +#ifdef CONFIG_X86_64 + if (guest_cpuid_has(vcpu, X86_FEATURE_LM)) { + unsigned long cr4, efer; + + /* Clear CR4.PAE before clearing EFER.LME. */ + cr4 = kvm_read_cr4(vcpu); + if (cr4 & X86_CR4_PAE) + kvm_set_cr4(vcpu, cr4 & ~X86_CR4_PAE); + + /* And finally go back to 32-bit mode. */ + efer = 0; + kvm_set_msr(vcpu, MSR_EFER, efer); + } +#endif + + /* + * Give leave_smm() a chance to make ISA-specific changes to the vCPU + * state (e.g. enter guest mode) before loading state from the SMM + * state-save area. + */ + if (static_call(kvm_x86_leave_smm)(vcpu, &smram)) + return X86EMUL_UNHANDLEABLE; + +#ifdef CONFIG_X86_64 + if (guest_cpuid_has(vcpu, X86_FEATURE_LM)) + return rsm_load_state_64(ctxt, &smram.smram64); + else +#endif + return rsm_load_state_32(ctxt, &smram.smram32); +} diff --git a/arch/x86/kvm/smm.h b/arch/x86/kvm/smm.h new file mode 100644 index 0000000000000000000000000000000000000000..a1cf2ac5bd78d57214e0ac0416be5b1257fb1432 --- /dev/null +++ b/arch/x86/kvm/smm.h @@ -0,0 +1,168 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef ASM_KVM_SMM_H +#define ASM_KVM_SMM_H + +#include + +#ifdef CONFIG_KVM_SMM + + +/* + * 32 bit KVM's emulated SMM layout. Based on Intel P6 layout + * (https://www.sandpile.org/x86/smm.htm). + */ + +struct kvm_smm_seg_state_32 { + u32 flags; + u32 limit; + u32 base; +} __packed; + +struct kvm_smram_state_32 { + u32 reserved1[62]; + u32 smbase; + u32 smm_revision; + u16 io_inst_restart; + u16 auto_hlt_restart; + u32 io_restart_rdi; + u32 io_restart_rcx; + u32 io_restart_rsi; + u32 io_restart_rip; + u32 cr4; + + /* A20M#, CPL, shutdown and other reserved/undocumented fields */ + u16 reserved2; + u8 int_shadow; /* KVM extension */ + u8 reserved3[17]; + + struct kvm_smm_seg_state_32 ds; + struct kvm_smm_seg_state_32 fs; + struct kvm_smm_seg_state_32 gs; + struct kvm_smm_seg_state_32 idtr; /* IDTR has only base and limit */ + struct kvm_smm_seg_state_32 tr; + u32 reserved; + struct kvm_smm_seg_state_32 gdtr; /* GDTR has only base and limit */ + struct kvm_smm_seg_state_32 ldtr; + struct kvm_smm_seg_state_32 es; + struct kvm_smm_seg_state_32 cs; + struct kvm_smm_seg_state_32 ss; + + u32 es_sel; + u32 cs_sel; + u32 ss_sel; + u32 ds_sel; + u32 fs_sel; + u32 gs_sel; + u32 ldtr_sel; + u32 tr_sel; + + u32 dr7; + u32 dr6; + u32 gprs[8]; /* GPRS in the "natural" X86 order (EAX/ECX/EDX.../EDI) */ + u32 eip; + u32 eflags; + u32 cr3; + u32 cr0; +} __packed; + + +/* 64 bit KVM's emulated SMM layout. Based on AMD64 layout */ + +struct kvm_smm_seg_state_64 { + u16 selector; + u16 attributes; + u32 limit; + u64 base; +}; + +struct kvm_smram_state_64 { + + struct kvm_smm_seg_state_64 es; + struct kvm_smm_seg_state_64 cs; + struct kvm_smm_seg_state_64 ss; + struct kvm_smm_seg_state_64 ds; + struct kvm_smm_seg_state_64 fs; + struct kvm_smm_seg_state_64 gs; + struct kvm_smm_seg_state_64 gdtr; /* GDTR has only base and limit*/ + struct kvm_smm_seg_state_64 ldtr; + struct kvm_smm_seg_state_64 idtr; /* IDTR has only base and limit*/ + struct kvm_smm_seg_state_64 tr; + + /* I/O restart and auto halt restart are not implemented by KVM */ + u64 io_restart_rip; + u64 io_restart_rcx; + u64 io_restart_rsi; + u64 io_restart_rdi; + u32 io_restart_dword; + u32 reserved1; + u8 io_inst_restart; + u8 auto_hlt_restart; + u8 amd_nmi_mask; /* Documented in AMD BKDG as NMI mask, not used by KVM */ + u8 int_shadow; + u32 reserved2; + + u64 efer; + + /* + * Two fields below are implemented on AMD only, to store + * SVM guest vmcb address if the #SMI was received while in the guest mode. + */ + u64 svm_guest_flag; + u64 svm_guest_vmcb_gpa; + u64 svm_guest_virtual_int; /* unknown purpose, not implemented */ + + u32 reserved3[3]; + u32 smm_revison; + u32 smbase; + u32 reserved4[5]; + + /* ssp and svm_* fields below are not implemented by KVM */ + u64 ssp; + u64 svm_guest_pat; + u64 svm_host_efer; + u64 svm_host_cr4; + u64 svm_host_cr3; + u64 svm_host_cr0; + + u64 cr4; + u64 cr3; + u64 cr0; + u64 dr7; + u64 dr6; + u64 rflags; + u64 rip; + u64 gprs[16]; /* GPRS in a reversed "natural" X86 order (R15/R14/../RCX/RAX.) */ +}; + +union kvm_smram { + struct kvm_smram_state_64 smram64; + struct kvm_smram_state_32 smram32; + u8 bytes[512]; +}; + +static inline int kvm_inject_smi(struct kvm_vcpu *vcpu) +{ + kvm_make_request(KVM_REQ_SMI, vcpu); + return 0; +} + +static inline bool is_smm(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.hflags & HF_SMM_MASK; +} + +void kvm_smm_changed(struct kvm_vcpu *vcpu, bool in_smm); +void enter_smm(struct kvm_vcpu *vcpu); +int emulator_leave_smm(struct x86_emulate_ctxt *ctxt); +void process_smi(struct kvm_vcpu *vcpu); +#else +static inline int kvm_inject_smi(struct kvm_vcpu *vcpu) { return -ENOTTY; } +static inline bool is_smm(struct kvm_vcpu *vcpu) { return false; } + +/* + * emulator_leave_smm is used as a function pointer, so the + * stub is defined in x86.c. + */ +#endif + +#endif diff --git a/arch/x86/kvm/svm/hyperv.c b/arch/x86/kvm/svm/hyperv.c new file mode 100644 index 0000000000000000000000000000000000000000..088f6429b24cef4de2dd1b7b97e3c657fc2d2f40 --- /dev/null +++ b/arch/x86/kvm/svm/hyperv.c @@ -0,0 +1,18 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * AMD SVM specific code for Hyper-V on KVM. + * + * Copyright 2022 Red Hat, Inc. and/or its affiliates. + */ +#include "hyperv.h" + +void svm_hv_inject_synthetic_vmexit_post_tlb_flush(struct kvm_vcpu *vcpu) +{ + struct vcpu_svm *svm = to_svm(vcpu); + + svm->vmcb->control.exit_code = HV_SVM_EXITCODE_ENL; + svm->vmcb->control.exit_code_hi = 0; + svm->vmcb->control.exit_info_1 = HV_SVM_ENL_EXITCODE_TRAP_AFTER_FLUSH; + svm->vmcb->control.exit_info_2 = 0; + nested_svm_vmexit(svm); +} diff --git a/arch/x86/kvm/svm/hyperv.h b/arch/x86/kvm/svm/hyperv.h index 7d6d97968fb981902ae401ffeff87b9406f34382..02f4784b5d446b2f0a86604b589c85981b13e5d1 100644 --- a/arch/x86/kvm/svm/hyperv.h +++ b/arch/x86/kvm/svm/hyperv.h @@ -9,27 +9,37 @@ #include #include "../hyperv.h" +#include "svm.h" -/* - * Hyper-V uses the software reserved 32 bytes in VMCB - * control area to expose SVM enlightenments to guests. - */ -struct hv_enlightenments { - struct __packed hv_enlightenments_control { - u32 nested_flush_hypercall:1; - u32 msr_bitmap:1; - u32 enlightened_npt_tlb: 1; - u32 reserved:29; - } __packed hv_enlightenments_control; - u32 hv_vp_id; - u64 hv_vm_id; - u64 partition_assist_page; - u64 reserved; -} __packed; +static inline void nested_svm_hv_update_vm_vp_ids(struct kvm_vcpu *vcpu) +{ + struct vcpu_svm *svm = to_svm(vcpu); + struct hv_vmcb_enlightenments *hve = &svm->nested.ctl.hv_enlightenments; + struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); -/* - * Hyper-V uses the software reserved clean bit in VMCB - */ -#define VMCB_HV_NESTED_ENLIGHTENMENTS VMCB_SW + if (!hv_vcpu) + return; + + hv_vcpu->nested.pa_page_gpa = hve->partition_assist_page; + hv_vcpu->nested.vm_id = hve->hv_vm_id; + hv_vcpu->nested.vp_id = hve->hv_vp_id; +} + +static inline bool nested_svm_l2_tlb_flush_enabled(struct kvm_vcpu *vcpu) +{ + struct vcpu_svm *svm = to_svm(vcpu); + struct hv_vmcb_enlightenments *hve = &svm->nested.ctl.hv_enlightenments; + struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); + + if (!hv_vcpu) + return false; + + if (!hve->hv_enlightenments_control.nested_flush_hypercall) + return false; + + return hv_vcpu->vp_assist_page.nested_control.features.directhypercall; +} + +void svm_hv_inject_synthetic_vmexit_post_tlb_flush(struct kvm_vcpu *vcpu); #endif /* __ARCH_X86_KVM_SVM_HYPERV_H__ */ diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c index 995bc0f907591745677aa6950d0a78620ddc1228..bc9cd7086fa972a8c9dd0600f7316278da4029ec 100644 --- a/arch/x86/kvm/svm/nested.c +++ b/arch/x86/kvm/svm/nested.c @@ -25,6 +25,7 @@ #include "trace.h" #include "mmu.h" #include "x86.h" +#include "smm.h" #include "cpuid.h" #include "lapic.h" #include "svm.h" @@ -149,8 +150,12 @@ void recalc_intercepts(struct vcpu_svm *svm) vmcb_clr_intercept(c, INTERCEPT_VINTR); } - /* We don't want to see VMMCALLs from a nested guest */ - vmcb_clr_intercept(c, INTERCEPT_VMMCALL); + /* + * We want to see VMMCALLs from a nested guest only when Hyper-V L2 TLB + * flush feature is enabled. + */ + if (!nested_svm_l2_tlb_flush_enabled(&svm->vcpu)) + vmcb_clr_intercept(c, INTERCEPT_VMMCALL); for (i = 0; i < MAX_INTERCEPT; i++) c->intercepts[i] |= g->intercepts[i]; @@ -179,8 +184,7 @@ void recalc_intercepts(struct vcpu_svm *svm) */ static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm) { - struct hv_enlightenments *hve = - (struct hv_enlightenments *)svm->nested.ctl.reserved_sw; + struct hv_vmcb_enlightenments *hve = &svm->nested.ctl.hv_enlightenments; int i; /* @@ -194,7 +198,7 @@ static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm) if (!svm->nested.force_msr_bitmap_recalc && kvm_hv_hypercall_enabled(&svm->vcpu) && hve->hv_enlightenments_control.msr_bitmap && - (svm->nested.ctl.clean & BIT(VMCB_HV_NESTED_ENLIGHTENMENTS))) + (svm->nested.ctl.clean & BIT(HV_VMCB_NESTED_ENLIGHTENMENTS))) goto set_msrpm_base_pa; if (!(vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_MSR_PROT))) @@ -369,8 +373,8 @@ void __nested_copy_vmcb_control_to_cache(struct kvm_vcpu *vcpu, /* Hyper-V extensions (Enlightened VMCB) */ if (kvm_hv_hypercall_enabled(vcpu)) { to->clean = from->clean; - memcpy(to->reserved_sw, from->reserved_sw, - sizeof(struct hv_enlightenments)); + memcpy(&to->hv_enlightenments, &from->hv_enlightenments, + sizeof(to->hv_enlightenments)); } } @@ -473,6 +477,15 @@ static void nested_save_pending_event_to_vmcb12(struct vcpu_svm *svm, static void nested_svm_transition_tlb_flush(struct kvm_vcpu *vcpu) { + /* + * KVM_REQ_HV_TLB_FLUSH flushes entries from either L1's VP_ID or + * L2's VP_ID upon request from the guest. Make sure we check for + * pending entries in the right FIFO upon L1/L2 transition as these + * requests are put by other vCPUs asynchronously. + */ + if (to_hv_vcpu(vcpu) && npt_enabled) + kvm_make_request(KVM_REQ_HV_TLB_FLUSH, vcpu); + /* * TODO: optimize unconditional TLB flush/MMU sync. A partial list of * things to fix before this can be conditional: @@ -800,6 +813,8 @@ int enter_svm_guest_mode(struct kvm_vcpu *vcpu, u64 vmcb12_gpa, if (kvm_vcpu_apicv_active(vcpu)) kvm_make_request(KVM_REQ_APICV_UPDATE, vcpu); + nested_svm_hv_update_vm_vp_ids(vcpu); + return 0; } @@ -822,6 +837,13 @@ int nested_svm_vmrun(struct kvm_vcpu *vcpu) return 1; } + /* This fails when VP assist page is enabled but the supplied GPA is bogus */ + ret = kvm_hv_verify_vp_assist(vcpu); + if (ret) { + kvm_inject_gp(vcpu, 0); + return ret; + } + vmcb12_gpa = svm->vmcb->save.rax; ret = kvm_vcpu_map(vcpu, gpa_to_gfn(vmcb12_gpa), &map); if (ret == -EINVAL) { @@ -1383,6 +1405,7 @@ static int svm_check_nested_events(struct kvm_vcpu *vcpu) return 0; } +#ifdef CONFIG_KVM_SMM if (vcpu->arch.smi_pending && !svm_smi_blocked(vcpu)) { if (block_nested_events) return -EBUSY; @@ -1391,6 +1414,7 @@ static int svm_check_nested_events(struct kvm_vcpu *vcpu) nested_svm_simple_vmexit(svm, SVM_EXIT_SMI); return 0; } +#endif if (vcpu->arch.nmi_pending && !svm_nmi_blocked(vcpu)) { if (block_nested_events) @@ -1417,6 +1441,7 @@ static int svm_check_nested_events(struct kvm_vcpu *vcpu) int nested_svm_exit_special(struct vcpu_svm *svm) { u32 exit_code = svm->vmcb->control.exit_code; + struct kvm_vcpu *vcpu = &svm->vcpu; switch (exit_code) { case SVM_EXIT_INTR: @@ -1435,6 +1460,13 @@ int nested_svm_exit_special(struct vcpu_svm *svm) return NESTED_EXIT_HOST; break; } + case SVM_EXIT_VMMCALL: + /* Hyper-V L2 TLB flush hypercall is handled by L0 */ + if (guest_hv_cpuid_has_l2_tlb_flush(vcpu) && + nested_svm_l2_tlb_flush_enabled(vcpu) && + kvm_hv_is_tlb_flush_hcall(vcpu)) + return NESTED_EXIT_HOST; + break; default: break; } @@ -1485,7 +1517,7 @@ static void nested_copy_vmcb_cache_to_control(struct vmcb_control_area *dst, dst->virt_ext = from->virt_ext; dst->pause_filter_count = from->pause_filter_count; dst->pause_filter_thresh = from->pause_filter_thresh; - /* 'clean' and 'reserved_sw' are not changed by KVM */ + /* 'clean' and 'hv_enlightenments' are not changed by KVM */ } static int svm_get_nested_state(struct kvm_vcpu *vcpu, @@ -1715,6 +1747,9 @@ static bool svm_get_nested_state_pages(struct kvm_vcpu *vcpu) return false; } + if (kvm_hv_verify_vp_assist(vcpu)) + return false; + return true; } @@ -1726,4 +1761,5 @@ struct kvm_x86_nested_ops svm_nested_ops = { .get_nested_state_pages = svm_get_nested_state_pages, .get_state = svm_get_nested_state, .set_state = svm_set_nested_state, + .hv_inject_synthetic_vmexit_post_tlb_flush = svm_hv_inject_synthetic_vmexit_post_tlb_flush, }; diff --git a/arch/x86/kvm/svm/pmu.c b/arch/x86/kvm/svm/pmu.c index 9d65cd095691be1aad51b745eb4c7d5dba992dfe..0e313fbae0556bab9a16f6246fbcab2e6e639085 100644 --- a/arch/x86/kvm/svm/pmu.c +++ b/arch/x86/kvm/svm/pmu.c @@ -159,7 +159,7 @@ static int amd_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) data &= ~pmu->reserved_bits; if (data != pmc->eventsel) { pmc->eventsel = data; - reprogram_counter(pmc); + kvm_pmu_request_counter_reprogam(pmc); } return 0; } @@ -212,7 +212,7 @@ static void amd_pmu_reset(struct kvm_vcpu *vcpu) struct kvm_pmc *pmc = &pmu->gp_counters[i]; pmc_stop_counter(pmc); - pmc->counter = pmc->eventsel = 0; + pmc->counter = pmc->prev_counter = pmc->eventsel = 0; } } diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index efaaef2b7ae11b280da39abf027107ed242fd1b2..86d6897f480684c272a3026e6e9efd7f83cc8464 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -465,9 +465,9 @@ static void sev_clflush_pages(struct page *pages[], unsigned long npages) return; for (i = 0; i < npages; i++) { - page_virtual = kmap_atomic(pages[i]); + page_virtual = kmap_local_page(pages[i]); clflush_cache_range(page_virtual, PAGE_SIZE); - kunmap_atomic(page_virtual); + kunmap_local(page_virtual); cond_resched(); } } @@ -2648,7 +2648,7 @@ static int setup_vmgexit_scratch(struct vcpu_svm *svm, bool sync, u64 len) ghcb_scratch_beg = control->ghcb_gpa + offsetof(struct ghcb, shared_buffer); ghcb_scratch_end = control->ghcb_gpa + - offsetof(struct ghcb, reserved_1); + offsetof(struct ghcb, reserved_0xff0); /* * If the scratch area begins within the GHCB, it must be diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index ce362e88a5676cf1deac281fed528a010e594291..9a194aa1a75a498d69f64bfafdca887e6c2e16bf 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -6,6 +6,7 @@ #include "mmu.h" #include "kvm_cache_regs.h" #include "x86.h" +#include "smm.h" #include "cpuid.h" #include "pmu.h" @@ -2708,8 +2709,6 @@ static int svm_get_msr_feature(struct kvm_msr_entry *msr) if (cpu_feature_enabled(X86_FEATURE_LFENCE_RDTSC)) msr->data |= MSR_AMD64_DE_CFG_LFENCE_SERIALIZE; break; - case MSR_IA32_PERF_CAPABILITIES: - return 0; default: return KVM_MSR_RET_INVALID; } @@ -3723,6 +3722,13 @@ static void svm_flush_tlb_current(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); + /* + * Unlike VMX, SVM doesn't provide a way to flush only NPT TLB entries. + * A TLB flush for the current ASID flushes both "host" and "guest" TLB + * entries, and thus is a superset of Hyper-V's fine grained flushing. + */ + kvm_hv_vcpu_purge_flush_tlb(vcpu); + /* * Flush only the current ASID even if the TLB flush was invoked via * kvm_flush_remote_tlbs(). Although flushing remote TLBs requires all @@ -3889,8 +3895,14 @@ static int svm_vcpu_pre_run(struct kvm_vcpu *vcpu) static fastpath_t svm_exit_handlers_fastpath(struct kvm_vcpu *vcpu) { - if (to_svm(vcpu)->vmcb->control.exit_code == SVM_EXIT_MSR && - to_svm(vcpu)->vmcb->control.exit_info_1) + struct vmcb_control_area *control = &to_svm(vcpu)->vmcb->control; + + /* + * Note, the next RIP must be provided as SRCU isn't held, i.e. KVM + * can't read guest memory (dereference memslots) to decode the WRMSR. + */ + if (control->exit_code == SVM_EXIT_MSR && control->exit_info_1 && + nrips && control->next_rip) return handle_fastpath_set_msr_irqoff(vcpu); return EXIT_FASTPATH_NONE; @@ -4102,6 +4114,8 @@ static bool svm_has_emulated_msr(struct kvm *kvm, u32 index) case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC: return false; case MSR_IA32_SMBASE: + if (!IS_ENABLED(CONFIG_KVM_SMM)) + return false; /* SEV-ES guests do not support SMM, so report false */ if (kvm && sev_es_guest(kvm)) return false; @@ -4358,6 +4372,7 @@ static void svm_setup_mce(struct kvm_vcpu *vcpu) vcpu->arch.mcg_cap &= 0x1ff; } +#ifdef CONFIG_KVM_SMM bool svm_smi_blocked(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); @@ -4385,7 +4400,7 @@ static int svm_smi_allowed(struct kvm_vcpu *vcpu, bool for_injection) return 1; } -static int svm_enter_smm(struct kvm_vcpu *vcpu, char *smstate) +static int svm_enter_smm(struct kvm_vcpu *vcpu, union kvm_smram *smram) { struct vcpu_svm *svm = to_svm(vcpu); struct kvm_host_map map_save; @@ -4394,10 +4409,16 @@ static int svm_enter_smm(struct kvm_vcpu *vcpu, char *smstate) if (!is_guest_mode(vcpu)) return 0; - /* FED8h - SVM Guest */ - put_smstate(u64, smstate, 0x7ed8, 1); - /* FEE0h - SVM Guest VMCB Physical Address */ - put_smstate(u64, smstate, 0x7ee0, svm->nested.vmcb12_gpa); + /* + * 32-bit SMRAM format doesn't preserve EFER and SVM state. Userspace is + * responsible for ensuring nested SVM and SMIs are mutually exclusive. + */ + + if (!guest_cpuid_has(vcpu, X86_FEATURE_LM)) + return 1; + + smram->smram64.svm_guest_flag = 1; + smram->smram64.svm_guest_vmcb_gpa = svm->nested.vmcb12_gpa; svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX]; svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP]; @@ -4419,8 +4440,7 @@ static int svm_enter_smm(struct kvm_vcpu *vcpu, char *smstate) * that, see svm_prepare_switch_to_guest()) which must be * preserved. */ - if (kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.hsave_msr), - &map_save) == -EINVAL) + if (kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.hsave_msr), &map_save)) return 1; BUILD_BUG_ON(offsetof(struct vmcb, save) != 0x400); @@ -4432,34 +4452,33 @@ static int svm_enter_smm(struct kvm_vcpu *vcpu, char *smstate) return 0; } -static int svm_leave_smm(struct kvm_vcpu *vcpu, const char *smstate) +static int svm_leave_smm(struct kvm_vcpu *vcpu, const union kvm_smram *smram) { struct vcpu_svm *svm = to_svm(vcpu); struct kvm_host_map map, map_save; - u64 saved_efer, vmcb12_gpa; struct vmcb *vmcb12; int ret; + const struct kvm_smram_state_64 *smram64 = &smram->smram64; + if (!guest_cpuid_has(vcpu, X86_FEATURE_LM)) return 0; /* Non-zero if SMI arrived while vCPU was in guest mode. */ - if (!GET_SMSTATE(u64, smstate, 0x7ed8)) + if (!smram64->svm_guest_flag) return 0; if (!guest_cpuid_has(vcpu, X86_FEATURE_SVM)) return 1; - saved_efer = GET_SMSTATE(u64, smstate, 0x7ed0); - if (!(saved_efer & EFER_SVME)) + if (!(smram64->efer & EFER_SVME)) return 1; - vmcb12_gpa = GET_SMSTATE(u64, smstate, 0x7ee0); - if (kvm_vcpu_map(vcpu, gpa_to_gfn(vmcb12_gpa), &map) == -EINVAL) + if (kvm_vcpu_map(vcpu, gpa_to_gfn(smram64->svm_guest_vmcb_gpa), &map)) return 1; ret = 1; - if (kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.hsave_msr), &map_save) == -EINVAL) + if (kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.hsave_msr), &map_save)) goto unmap_map; if (svm_allocate_nested(svm)) @@ -4481,7 +4500,7 @@ static int svm_leave_smm(struct kvm_vcpu *vcpu, const char *smstate) vmcb12 = map.hva; nested_copy_vmcb_control_to_cache(svm, &vmcb12->control); nested_copy_vmcb_save_to_cache(svm, &vmcb12->save); - ret = enter_svm_guest_mode(vcpu, vmcb12_gpa, vmcb12, false); + ret = enter_svm_guest_mode(vcpu, smram64->svm_guest_vmcb_gpa, vmcb12, false); if (ret) goto unmap_save; @@ -4507,6 +4526,7 @@ static void svm_enable_smi_window(struct kvm_vcpu *vcpu) /* We must be in SMM; RSM will cause a vmexit anyway. */ } } +#endif static bool svm_can_emulate_instruction(struct kvm_vcpu *vcpu, int emul_type, void *insn, int insn_len) @@ -4782,10 +4802,12 @@ static struct kvm_x86_ops svm_x86_ops __initdata = { .pi_update_irte = avic_pi_update_irte, .setup_mce = svm_setup_mce, +#ifdef CONFIG_KVM_SMM .smi_allowed = svm_smi_allowed, .enter_smm = svm_enter_smm, .leave_smm = svm_leave_smm, .enable_smi_window = svm_enable_smi_window, +#endif .mem_enc_ioctl = sev_mem_enc_ioctl, .mem_enc_register_region = sev_mem_enc_register_region, @@ -4851,6 +4873,7 @@ static __init void svm_set_cpu_caps(void) { kvm_set_cpu_caps(); + kvm_caps.supported_perf_cap = 0; kvm_caps.supported_xss = 0; /* CPUID 0x80000001 and 0x8000000A (SVM features) */ diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h index 199a2ecef1cec611d8c903ae9a525589ac30ea25..4826e6cc611bf1305c55c93c52f2364bf40d4506 100644 --- a/arch/x86/kvm/svm/svm.h +++ b/arch/x86/kvm/svm/svm.h @@ -151,7 +151,10 @@ struct vmcb_ctrl_area_cached { u64 nested_cr3; u64 virt_ext; u32 clean; - u8 reserved_sw[32]; + union { + struct hv_vmcb_enlightenments hv_enlightenments; + u8 reserved_sw[32]; + }; }; struct svm_nested_state { diff --git a/arch/x86/kvm/svm/svm_onhyperv.c b/arch/x86/kvm/svm/svm_onhyperv.c index 8cdc62c74a964e4369d0d1a4e09c60cde740095b..26a89d0da93e46ba33f2b36164c2e1f751243fa3 100644 --- a/arch/x86/kvm/svm/svm_onhyperv.c +++ b/arch/x86/kvm/svm/svm_onhyperv.c @@ -14,9 +14,9 @@ #include "kvm_onhyperv.h" #include "svm_onhyperv.h" -int svm_hv_enable_direct_tlbflush(struct kvm_vcpu *vcpu) +int svm_hv_enable_l2_tlb_flush(struct kvm_vcpu *vcpu) { - struct hv_enlightenments *hve; + struct hv_vmcb_enlightenments *hve; struct hv_partition_assist_pg **p_hv_pa_pg = &to_kvm_hv(vcpu->kvm)->hv_pa_pg; @@ -26,13 +26,13 @@ int svm_hv_enable_direct_tlbflush(struct kvm_vcpu *vcpu) if (!*p_hv_pa_pg) return -ENOMEM; - hve = (struct hv_enlightenments *)to_svm(vcpu)->vmcb->control.reserved_sw; + hve = &to_svm(vcpu)->vmcb->control.hv_enlightenments; hve->partition_assist_page = __pa(*p_hv_pa_pg); hve->hv_vm_id = (unsigned long)vcpu->kvm; if (!hve->hv_enlightenments_control.nested_flush_hypercall) { hve->hv_enlightenments_control.nested_flush_hypercall = 1; - vmcb_mark_dirty(to_svm(vcpu)->vmcb, VMCB_HV_NESTED_ENLIGHTENMENTS); + vmcb_mark_dirty(to_svm(vcpu)->vmcb, HV_VMCB_NESTED_ENLIGHTENMENTS); } return 0; diff --git a/arch/x86/kvm/svm/svm_onhyperv.h b/arch/x86/kvm/svm/svm_onhyperv.h index e2fc593804657303fb9251d87fd07c9605848922..45faf84476cecf5d468ea4937c3e2d147874605b 100644 --- a/arch/x86/kvm/svm/svm_onhyperv.h +++ b/arch/x86/kvm/svm/svm_onhyperv.h @@ -13,12 +13,14 @@ static struct kvm_x86_ops svm_x86_ops; -int svm_hv_enable_direct_tlbflush(struct kvm_vcpu *vcpu); +int svm_hv_enable_l2_tlb_flush(struct kvm_vcpu *vcpu); static inline void svm_hv_init_vmcb(struct vmcb *vmcb) { - struct hv_enlightenments *hve = - (struct hv_enlightenments *)vmcb->control.reserved_sw; + struct hv_vmcb_enlightenments *hve = &vmcb->control.hv_enlightenments; + + BUILD_BUG_ON(sizeof(vmcb->control.hv_enlightenments) != + sizeof(vmcb->control.reserved_sw)); if (npt_enabled && ms_hyperv.nested_features & HV_X64_NESTED_ENLIGHTENED_TLB) @@ -51,8 +53,8 @@ static inline void svm_hv_hardware_setup(void) vp_ap->nested_control.features.directhypercall = 1; } - svm_x86_ops.enable_direct_tlbflush = - svm_hv_enable_direct_tlbflush; + svm_x86_ops.enable_l2_tlb_flush = + svm_hv_enable_l2_tlb_flush; } } @@ -60,23 +62,20 @@ static inline void svm_hv_vmcb_dirty_nested_enlightenments( struct kvm_vcpu *vcpu) { struct vmcb *vmcb = to_svm(vcpu)->vmcb; - struct hv_enlightenments *hve = - (struct hv_enlightenments *)vmcb->control.reserved_sw; + struct hv_vmcb_enlightenments *hve = &vmcb->control.hv_enlightenments; if (hve->hv_enlightenments_control.msr_bitmap) - vmcb_mark_dirty(vmcb, VMCB_HV_NESTED_ENLIGHTENMENTS); + vmcb_mark_dirty(vmcb, HV_VMCB_NESTED_ENLIGHTENMENTS); } -static inline void svm_hv_update_vp_id(struct vmcb *vmcb, - struct kvm_vcpu *vcpu) +static inline void svm_hv_update_vp_id(struct vmcb *vmcb, struct kvm_vcpu *vcpu) { - struct hv_enlightenments *hve = - (struct hv_enlightenments *)vmcb->control.reserved_sw; + struct hv_vmcb_enlightenments *hve = &vmcb->control.hv_enlightenments; u32 vp_index = kvm_hv_get_vpindex(vcpu); if (hve->hv_vp_id != vp_index) { hve->hv_vp_id = vp_index; - vmcb_mark_dirty(vmcb, VMCB_HV_NESTED_ENLIGHTENMENTS); + vmcb_mark_dirty(vmcb, HV_VMCB_NESTED_ENLIGHTENMENTS); } } #else diff --git a/arch/x86/kvm/svm/vmenter.S b/arch/x86/kvm/svm/vmenter.S index 34367dc203f217dd9f87e45831ef9ce0ecc871b3..8e8295e774f0f3aa2b08a58948535f4caff2d4ac 100644 --- a/arch/x86/kvm/svm/vmenter.S +++ b/arch/x86/kvm/svm/vmenter.S @@ -1,6 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0 */ #include #include +#include #include #include #include diff --git a/arch/x86/kvm/trace.h b/arch/x86/kvm/trace.h index bc25589ad588676974770919234b67f9ce198c43..83843379813ee3ef8cca33d1986ef61ea6e1ff9b 100644 --- a/arch/x86/kvm/trace.h +++ b/arch/x86/kvm/trace.h @@ -113,12 +113,13 @@ TRACE_EVENT(kvm_hv_hypercall_done, * Tracepoint for Xen hypercall. */ TRACE_EVENT(kvm_xen_hypercall, - TP_PROTO(unsigned long nr, unsigned long a0, unsigned long a1, - unsigned long a2, unsigned long a3, unsigned long a4, - unsigned long a5), - TP_ARGS(nr, a0, a1, a2, a3, a4, a5), + TP_PROTO(u8 cpl, unsigned long nr, + unsigned long a0, unsigned long a1, unsigned long a2, + unsigned long a3, unsigned long a4, unsigned long a5), + TP_ARGS(cpl, nr, a0, a1, a2, a3, a4, a5), TP_STRUCT__entry( + __field(u8, cpl) __field(unsigned long, nr) __field(unsigned long, a0) __field(unsigned long, a1) @@ -129,6 +130,7 @@ TRACE_EVENT(kvm_xen_hypercall, ), TP_fast_assign( + __entry->cpl = cpl; __entry->nr = nr; __entry->a0 = a0; __entry->a1 = a1; @@ -138,8 +140,9 @@ TRACE_EVENT(kvm_xen_hypercall, __entry->a4 = a5; ), - TP_printk("nr 0x%lx a0 0x%lx a1 0x%lx a2 0x%lx a3 0x%lx a4 0x%lx a5 %lx", - __entry->nr, __entry->a0, __entry->a1, __entry->a2, + TP_printk("cpl %d nr 0x%lx a0 0x%lx a1 0x%lx a2 0x%lx a3 0x%lx a4 0x%lx a5 %lx", + __entry->cpl, __entry->nr, + __entry->a0, __entry->a1, __entry->a2, __entry->a3, __entry->a4, __entry->a5) ); @@ -1547,38 +1550,41 @@ TRACE_EVENT(kvm_hv_timer_state, * Tracepoint for kvm_hv_flush_tlb. */ TRACE_EVENT(kvm_hv_flush_tlb, - TP_PROTO(u64 processor_mask, u64 address_space, u64 flags), - TP_ARGS(processor_mask, address_space, flags), + TP_PROTO(u64 processor_mask, u64 address_space, u64 flags, bool guest_mode), + TP_ARGS(processor_mask, address_space, flags, guest_mode), TP_STRUCT__entry( __field(u64, processor_mask) __field(u64, address_space) __field(u64, flags) + __field(bool, guest_mode) ), TP_fast_assign( __entry->processor_mask = processor_mask; __entry->address_space = address_space; __entry->flags = flags; + __entry->guest_mode = guest_mode; ), - TP_printk("processor_mask 0x%llx address_space 0x%llx flags 0x%llx", + TP_printk("processor_mask 0x%llx address_space 0x%llx flags 0x%llx %s", __entry->processor_mask, __entry->address_space, - __entry->flags) + __entry->flags, __entry->guest_mode ? "(L2)" : "") ); /* * Tracepoint for kvm_hv_flush_tlb_ex. */ TRACE_EVENT(kvm_hv_flush_tlb_ex, - TP_PROTO(u64 valid_bank_mask, u64 format, u64 address_space, u64 flags), - TP_ARGS(valid_bank_mask, format, address_space, flags), + TP_PROTO(u64 valid_bank_mask, u64 format, u64 address_space, u64 flags, bool guest_mode), + TP_ARGS(valid_bank_mask, format, address_space, flags, guest_mode), TP_STRUCT__entry( __field(u64, valid_bank_mask) __field(u64, format) __field(u64, address_space) __field(u64, flags) + __field(bool, guest_mode) ), TP_fast_assign( @@ -1586,12 +1592,14 @@ TRACE_EVENT(kvm_hv_flush_tlb_ex, __entry->format = format; __entry->address_space = address_space; __entry->flags = flags; + __entry->guest_mode = guest_mode; ), TP_printk("valid_bank_mask 0x%llx format 0x%llx " - "address_space 0x%llx flags 0x%llx", + "address_space 0x%llx flags 0x%llx %s", __entry->valid_bank_mask, __entry->format, - __entry->address_space, __entry->flags) + __entry->address_space, __entry->flags, + __entry->guest_mode ? "(L2)" : "") ); /* diff --git a/arch/x86/kvm/vmx/capabilities.h b/arch/x86/kvm/vmx/capabilities.h index 07254314f3dd5b4412b97c0d0b42aa4de52ac835..cd2ac9536c99810ba25658d7ac273ff2a63c53f3 100644 --- a/arch/x86/kvm/vmx/capabilities.h +++ b/arch/x86/kvm/vmx/capabilities.h @@ -395,30 +395,6 @@ static inline bool vmx_pebs_supported(void) return boot_cpu_has(X86_FEATURE_PEBS) && kvm_pmu_cap.pebs_ept; } -static inline u64 vmx_get_perf_capabilities(void) -{ - u64 perf_cap = PMU_CAP_FW_WRITES; - struct x86_pmu_lbr lbr; - u64 host_perf_cap = 0; - - if (!enable_pmu) - return 0; - - if (boot_cpu_has(X86_FEATURE_PDCM)) - rdmsrl(MSR_IA32_PERF_CAPABILITIES, host_perf_cap); - - if (x86_perf_get_lbr(&lbr) >= 0 && lbr.nr) - perf_cap |= host_perf_cap & PMU_CAP_LBR_FMT; - - if (vmx_pebs_supported()) { - perf_cap |= host_perf_cap & PERF_CAP_PEBS_MASK; - if ((perf_cap & PERF_CAP_PEBS_FORMAT) < 4) - perf_cap &= ~PERF_CAP_PEBS_BASELINE; - } - - return perf_cap; -} - static inline bool cpu_has_notify_vmexit(void) { return vmcs_config.cpu_based_2nd_exec_ctrl & diff --git a/arch/x86/kvm/vmx/evmcs.c b/arch/x86/kvm/vmx/hyperv.c similarity index 95% rename from arch/x86/kvm/vmx/evmcs.c rename to arch/x86/kvm/vmx/hyperv.c index d8b23c96d627234e67f50d3bad7eceef444291a5..ae03d1fe03552174ff6673edffd8980f123d3cda 100644 --- a/arch/x86/kvm/vmx/evmcs.c +++ b/arch/x86/kvm/vmx/hyperv.c @@ -3,9 +3,9 @@ #include #include -#include "../hyperv.h" #include "../cpuid.h" -#include "evmcs.h" +#include "hyperv.h" +#include "nested.h" #include "vmcs.h" #include "vmx.h" #include "trace.h" @@ -322,24 +322,17 @@ const struct evmcs_field vmcs_field_to_evmcs_1[] = { }; const unsigned int nr_evmcs_1_fields = ARRAY_SIZE(vmcs_field_to_evmcs_1); -bool nested_enlightened_vmentry(struct kvm_vcpu *vcpu, u64 *evmcs_gpa) +u64 nested_get_evmptr(struct kvm_vcpu *vcpu) { - struct hv_vp_assist_page assist_page; - - *evmcs_gpa = -1ull; - - if (unlikely(!kvm_hv_get_assist_page(vcpu, &assist_page))) - return false; - - if (unlikely(!assist_page.enlighten_vmentry)) - return false; + struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); - if (unlikely(!evmptr_is_valid(assist_page.current_nested_vmcs))) - return false; + if (unlikely(kvm_hv_get_assist_page(vcpu))) + return EVMPTR_INVALID; - *evmcs_gpa = assist_page.current_nested_vmcs; + if (unlikely(!hv_vcpu->vp_assist_page.enlighten_vmentry)) + return EVMPTR_INVALID; - return true; + return hv_vcpu->vp_assist_page.current_nested_vmcs; } uint16_t nested_get_evmcs_version(struct kvm_vcpu *vcpu) @@ -507,3 +500,23 @@ int nested_enable_evmcs(struct kvm_vcpu *vcpu, return 0; } + +bool nested_evmcs_l2_tlb_flush_enabled(struct kvm_vcpu *vcpu) +{ + struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); + struct vcpu_vmx *vmx = to_vmx(vcpu); + struct hv_enlightened_vmcs *evmcs = vmx->nested.hv_evmcs; + + if (!hv_vcpu || !evmcs) + return false; + + if (!evmcs->hv_enlightenments_control.nested_flush_hypercall) + return false; + + return hv_vcpu->vp_assist_page.nested_control.features.directhypercall; +} + +void vmx_hv_inject_synthetic_vmexit_post_tlb_flush(struct kvm_vcpu *vcpu) +{ + nested_vmx_vmexit(vcpu, HV_VMX_SYNTHETIC_EXIT_REASON_TRAP_AFTER_FLUSH, 0, 0); +} diff --git a/arch/x86/kvm/vmx/evmcs.h b/arch/x86/kvm/vmx/hyperv.h similarity index 95% rename from arch/x86/kvm/vmx/evmcs.h rename to arch/x86/kvm/vmx/hyperv.h index 6f746ef3c0386ce277ef520d9a1746d0728a4ad8..571e7929d14e7b83058228291158f0e96933bf68 100644 --- a/arch/x86/kvm/vmx/evmcs.h +++ b/arch/x86/kvm/vmx/hyperv.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 */ -#ifndef __KVM_X86_VMX_EVMCS_H -#define __KVM_X86_VMX_EVMCS_H +#ifndef __KVM_X86_VMX_HYPERV_H +#define __KVM_X86_VMX_HYPERV_H #include @@ -8,6 +8,8 @@ #include #include +#include "../hyperv.h" + #include "capabilities.h" #include "vmcs.h" #include "vmcs12.h" @@ -235,11 +237,13 @@ enum nested_evmptrld_status { EVMPTRLD_ERROR, }; -bool nested_enlightened_vmentry(struct kvm_vcpu *vcpu, u64 *evmcs_gpa); +u64 nested_get_evmptr(struct kvm_vcpu *vcpu); uint16_t nested_get_evmcs_version(struct kvm_vcpu *vcpu); int nested_enable_evmcs(struct kvm_vcpu *vcpu, uint16_t *vmcs_version); void nested_evmcs_filter_control_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata); int nested_evmcs_check_controls(struct vmcs12 *vmcs12); +bool nested_evmcs_l2_tlb_flush_enabled(struct kvm_vcpu *vcpu); +void vmx_hv_inject_synthetic_vmexit_post_tlb_flush(struct kvm_vcpu *vcpu); -#endif /* __KVM_X86_VMX_EVMCS_H */ +#endif /* __KVM_X86_VMX_HYPERV_H */ diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c index 5b0d4859e4b783b9cd60212e1d8d95e4e51c1bf1..b6f4411b613e9105d8cca1a989ee8f3a4a2a7197 100644 --- a/arch/x86/kvm/vmx/nested.c +++ b/arch/x86/kvm/vmx/nested.c @@ -7,7 +7,6 @@ #include #include "cpuid.h" -#include "evmcs.h" #include "hyperv.h" #include "mmu.h" #include "nested.h" @@ -16,6 +15,7 @@ #include "trace.h" #include "vmx.h" #include "x86.h" +#include "smm.h" static bool __read_mostly enable_shadow_vmcs = 1; module_param_named(enable_shadow_vmcs, enable_shadow_vmcs, bool, S_IRUGO); @@ -225,6 +225,7 @@ static void vmx_disable_shadow_vmcs(struct vcpu_vmx *vmx) static inline void nested_release_evmcs(struct kvm_vcpu *vcpu) { + struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); struct vcpu_vmx *vmx = to_vmx(vcpu); if (evmptr_is_valid(vmx->nested.hv_evmcs_vmptr)) { @@ -233,6 +234,12 @@ static inline void nested_release_evmcs(struct kvm_vcpu *vcpu) } vmx->nested.hv_evmcs_vmptr = EVMPTR_INVALID; + + if (hv_vcpu) { + hv_vcpu->nested.pa_page_gpa = INVALID_GPA; + hv_vcpu->nested.vm_id = 0; + hv_vcpu->nested.vp_id = 0; + } } static void vmx_sync_vmcs_host_state(struct vcpu_vmx *vmx, @@ -1125,6 +1132,15 @@ static void nested_vmx_transition_tlb_flush(struct kvm_vcpu *vcpu, { struct vcpu_vmx *vmx = to_vmx(vcpu); + /* + * KVM_REQ_HV_TLB_FLUSH flushes entries from either L1's VP_ID or + * L2's VP_ID upon request from the guest. Make sure we check for + * pending entries in the right FIFO upon L1/L2 transition as these + * requests are put by other vCPUs asynchronously. + */ + if (to_hv_vcpu(vcpu) && enable_ept) + kvm_make_request(KVM_REQ_HV_TLB_FLUSH, vcpu); + /* * If vmcs12 doesn't use VPID, L1 expects linear and combined mappings * for *all* contexts to be flushed on VM-Enter/VM-Exit, i.e. it's a @@ -1557,11 +1573,19 @@ static void copy_enlightened_to_vmcs12(struct vcpu_vmx *vmx, u32 hv_clean_fields { struct vmcs12 *vmcs12 = vmx->nested.cached_vmcs12; struct hv_enlightened_vmcs *evmcs = vmx->nested.hv_evmcs; + struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(&vmx->vcpu); /* HV_VMX_ENLIGHTENED_CLEAN_FIELD_NONE */ vmcs12->tpr_threshold = evmcs->tpr_threshold; vmcs12->guest_rip = evmcs->guest_rip; + if (unlikely(!(hv_clean_fields & + HV_VMX_ENLIGHTENED_CLEAN_FIELD_ENLIGHTENMENTSCONTROL))) { + hv_vcpu->nested.pa_page_gpa = evmcs->partition_assist_page; + hv_vcpu->nested.vm_id = evmcs->hv_vm_id; + hv_vcpu->nested.vp_id = evmcs->hv_vp_id; + } + if (unlikely(!(hv_clean_fields & HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_BASIC))) { vmcs12->guest_rsp = evmcs->guest_rsp; @@ -1977,7 +2001,8 @@ static enum nested_evmptrld_status nested_vmx_handle_enlightened_vmptrld( if (likely(!guest_cpuid_has_evmcs(vcpu))) return EVMPTRLD_DISABLED; - if (!nested_enlightened_vmentry(vcpu, &evmcs_gpa)) { + evmcs_gpa = nested_get_evmptr(vcpu); + if (!evmptr_is_valid(evmcs_gpa)) { nested_release_evmcs(vcpu); return EVMPTRLD_DISABLED; } @@ -2563,12 +2588,9 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, nested_ept_init_mmu_context(vcpu); /* - * This sets GUEST_CR0 to vmcs12->guest_cr0, possibly modifying those - * bits which we consider mandatory enabled. - * The CR0_READ_SHADOW is what L2 should have expected to read given - * the specifications by L1; It's not enough to take - * vmcs12->cr0_read_shadow because on our cr0_guest_host_mask we - * have more bits than L1 expected. + * Override the CR0/CR4 read shadows after setting the effective guest + * CR0/CR4. The common helpers also set the shadows, but they don't + * account for vmcs12's cr0/4_guest_host_mask. */ vmx_set_cr0(vcpu, vmcs12->guest_cr0); vmcs_writel(CR0_READ_SHADOW, nested_read_cr0(vmcs12)); @@ -3251,6 +3273,12 @@ static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu) static bool vmx_get_nested_state_pages(struct kvm_vcpu *vcpu) { + /* + * Note: nested_get_evmcs_page() also updates 'vp_assist_page' copy + * in 'struct kvm_vcpu_hv' in case eVMCS is in use, this is mandatory + * to make nested_evmcs_l2_tlb_flush_enabled() work correctly post + * migration. + */ if (!nested_get_evmcs_page(vcpu)) { pr_debug_ratelimited("%s: enlightened vmptrld failed\n", __func__); @@ -4767,6 +4795,17 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason, vmx_switch_vmcs(vcpu, &vmx->vmcs01); + /* + * If IBRS is advertised to the vCPU, KVM must flush the indirect + * branch predictors when transitioning from L2 to L1, as L1 expects + * hardware (KVM in this case) to provide separate predictor modes. + * Bare metal isolates VMX root (host) from VMX non-root (guest), but + * doesn't isolate different VMCSs, i.e. in this case, doesn't provide + * separate modes for L2 vs L1. + */ + if (guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL)) + indirect_branch_prediction_barrier(); + /* Update any VMCS fields that might have changed while L2 ran */ vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr); vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr); @@ -5100,24 +5139,35 @@ static int handle_vmxon(struct kvm_vcpu *vcpu) | FEAT_CTL_VMX_ENABLED_OUTSIDE_SMX; /* - * Note, KVM cannot rely on hardware to perform the CR0/CR4 #UD checks - * that have higher priority than VM-Exit (see Intel SDM's pseudocode - * for VMXON), as KVM must load valid CR0/CR4 values into hardware while - * running the guest, i.e. KVM needs to check the _guest_ values. + * Manually check CR4.VMXE checks, KVM must force CR4.VMXE=1 to enter + * the guest and so cannot rely on hardware to perform the check, + * which has higher priority than VM-Exit (see Intel SDM's pseudocode + * for VMXON). * - * Rely on hardware for the other two pre-VM-Exit checks, !VM86 and - * !COMPATIBILITY modes. KVM may run the guest in VM86 to emulate Real - * Mode, but KVM will never take the guest out of those modes. + * Rely on hardware for the other pre-VM-Exit checks, CR0.PE=1, !VM86 + * and !COMPATIBILITY modes. For an unrestricted guest, KVM doesn't + * force any of the relevant guest state. For a restricted guest, KVM + * does force CR0.PE=1, but only to also force VM86 in order to emulate + * Real Mode, and so there's no need to check CR0.PE manually. */ - if (!nested_host_cr0_valid(vcpu, kvm_read_cr0(vcpu)) || - !nested_host_cr4_valid(vcpu, kvm_read_cr4(vcpu))) { + if (!kvm_read_cr4_bits(vcpu, X86_CR4_VMXE)) { kvm_queue_exception(vcpu, UD_VECTOR); return 1; } /* - * CPL=0 and all other checks that are lower priority than VM-Exit must - * be checked manually. + * The CPL is checked for "not in VMX operation" and for "in VMX root", + * and has higher priority than the VM-Fail due to being post-VMXON, + * i.e. VMXON #GPs outside of VMX non-root if CPL!=0. In VMX non-root, + * VMXON causes VM-Exit and KVM unconditionally forwards VMXON VM-Exits + * from L2 to L1, i.e. there's no need to check for the vCPU being in + * VMX non-root. + * + * Forwarding the VM-Exit unconditionally, i.e. without performing the + * #UD checks (see above), is functionally ok because KVM doesn't allow + * L1 to run L2 without CR4.VMXE=0, and because KVM never modifies L2's + * CR0 or CR4, i.e. it's L2's responsibility to emulate #UDs that are + * missed by hardware due to shadowing CR0 and/or CR4. */ if (vmx_get_cpl(vcpu)) { kvm_inject_gp(vcpu, 0); @@ -5127,6 +5177,17 @@ static int handle_vmxon(struct kvm_vcpu *vcpu) if (vmx->nested.vmxon) return nested_vmx_fail(vcpu, VMXERR_VMXON_IN_VMX_ROOT_OPERATION); + /* + * Invalid CR0/CR4 generates #GP. These checks are performed if and + * only if the vCPU isn't already in VMX operation, i.e. effectively + * have lower priority than the VM-Fail above. + */ + if (!nested_host_cr0_valid(vcpu, kvm_read_cr0(vcpu)) || + !nested_host_cr4_valid(vcpu, kvm_read_cr4(vcpu))) { + kvm_inject_gp(vcpu, 0); + return 1; + } + if ((vmx->msr_ia32_feature_control & VMXON_NEEDED_FEATURES) != VMXON_NEEDED_FEATURES) { kvm_inject_gp(vcpu, 0); @@ -5206,7 +5267,6 @@ static int handle_vmclear(struct kvm_vcpu *vcpu) struct vcpu_vmx *vmx = to_vmx(vcpu); u32 zero = 0; gpa_t vmptr; - u64 evmcs_gpa; int r; if (!nested_vmx_check_permission(vcpu)) @@ -5232,7 +5292,7 @@ static int handle_vmclear(struct kvm_vcpu *vcpu) * vmx->nested.hv_evmcs but this shouldn't be a problem. */ if (likely(!guest_cpuid_has_evmcs(vcpu) || - !nested_enlightened_vmentry(vcpu, &evmcs_gpa))) { + !evmptr_is_valid(nested_get_evmptr(vcpu)))) { if (vmptr == vmx->nested.current_vmptr) nested_release_vmcs12(vcpu); @@ -6129,6 +6189,11 @@ static bool nested_vmx_l0_wants_exit(struct kvm_vcpu *vcpu, * Handle L2's bus locks in L0 directly. */ return true; + case EXIT_REASON_VMCALL: + /* Hyper-V L2 TLB flush hypercall is handled by L0 */ + return guest_hv_cpuid_has_l2_tlb_flush(vcpu) && + nested_evmcs_l2_tlb_flush_enabled(vcpu) && + kvm_hv_is_tlb_flush_hcall(vcpu); default: break; } @@ -6980,4 +7045,5 @@ struct kvm_x86_nested_ops vmx_nested_ops = { .write_log_dirty = nested_vmx_write_pml_buffer, .enable_evmcs = nested_enable_evmcs, .get_evmcs_version = nested_get_evmcs_version, + .hv_inject_synthetic_vmexit_post_tlb_flush = vmx_hv_inject_synthetic_vmexit_post_tlb_flush, }; diff --git a/arch/x86/kvm/vmx/nested.h b/arch/x86/kvm/vmx/nested.h index 6312c9541c3cd3bc324fdf630099fed1a1134be9..96952263b029bfed932ec13190c545843b6acff0 100644 --- a/arch/x86/kvm/vmx/nested.h +++ b/arch/x86/kvm/vmx/nested.h @@ -79,9 +79,10 @@ static inline bool nested_ept_ad_enabled(struct kvm_vcpu *vcpu) } /* - * Return the cr0 value that a nested guest would read. This is a combination - * of the real cr0 used to run the guest (guest_cr0), and the bits shadowed by - * its hypervisor (cr0_read_shadow). + * Return the cr0/4 value that a nested guest would read. This is a combination + * of L1's "real" cr0 used to run the guest (guest_cr0), and the bits shadowed + * by the L1 hypervisor (cr0_read_shadow). KVM must emulate CPU behavior as + * the value+mask loaded into vmcs02 may not match the vmcs12 fields. */ static inline unsigned long nested_read_cr0(struct vmcs12 *fields) { diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c index 10b33da9bd05852060b405aeb09498225f671a1f..e5cec07ca8d9574d86793806675f3a26ab23767c 100644 --- a/arch/x86/kvm/vmx/pmu_intel.c +++ b/arch/x86/kvm/vmx/pmu_intel.c @@ -52,7 +52,7 @@ static void reprogram_fixed_counters(struct kvm_pmu *pmu, u64 data) pmc = get_fixed_pmc(pmu, MSR_CORE_PERF_FIXED_CTR0 + i); __set_bit(INTEL_PMC_IDX_FIXED + i, pmu->pmc_in_use); - reprogram_counter(pmc); + kvm_pmu_request_counter_reprogam(pmc); } } @@ -76,7 +76,7 @@ static void reprogram_counters(struct kvm_pmu *pmu, u64 diff) for_each_set_bit(bit, (unsigned long *)&diff, X86_PMC_IDX_MAX) { pmc = intel_pmc_idx_to_pmc(pmu, bit); if (pmc) - reprogram_counter(pmc); + kvm_pmu_request_counter_reprogam(pmc); } } @@ -477,7 +477,7 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) reserved_bits ^= HSW_IN_TX_CHECKPOINTED; if (!(data & reserved_bits)) { pmc->eventsel = data; - reprogram_counter(pmc); + kvm_pmu_request_counter_reprogam(pmc); return 0; } } else if (intel_pmu_handle_lbr_msrs_access(vcpu, msr_info, false)) @@ -631,7 +631,6 @@ static void intel_pmu_init(struct kvm_vcpu *vcpu) pmu->fixed_counters[i].current_config = 0; } - vcpu->arch.perf_capabilities = vmx_get_perf_capabilities(); lbr_desc->records.nr = 0; lbr_desc->event = NULL; lbr_desc->msr_passthrough = false; @@ -647,14 +646,14 @@ static void intel_pmu_reset(struct kvm_vcpu *vcpu) pmc = &pmu->gp_counters[i]; pmc_stop_counter(pmc); - pmc->counter = pmc->eventsel = 0; + pmc->counter = pmc->prev_counter = pmc->eventsel = 0; } for (i = 0; i < KVM_PMC_MAX_FIXED; i++) { pmc = &pmu->fixed_counters[i]; pmc_stop_counter(pmc); - pmc->counter = 0; + pmc->counter = pmc->prev_counter = 0; } pmu->fixed_ctr_ctrl = pmu->global_ctrl = pmu->global_status = 0; diff --git a/arch/x86/kvm/vmx/sgx.c b/arch/x86/kvm/vmx/sgx.c index 8f95c7c0143359cc617641eddbf422018d77cf63..b12da2a6dec95644941c67200c242fb2982b0742 100644 --- a/arch/x86/kvm/vmx/sgx.c +++ b/arch/x86/kvm/vmx/sgx.c @@ -182,8 +182,10 @@ static int __handle_encls_ecreate(struct kvm_vcpu *vcpu, /* Enforce CPUID restriction on max enclave size. */ max_size_log2 = (attributes & SGX_ATTR_MODE64BIT) ? sgx_12_0->edx >> 8 : sgx_12_0->edx; - if (size >= BIT_ULL(max_size_log2)) + if (size >= BIT_ULL(max_size_log2)) { kvm_inject_gp(vcpu, 0); + return 1; + } /* * sgx_virt_ecreate() returns: diff --git a/arch/x86/kvm/vmx/vmcs12.h b/arch/x86/kvm/vmx/vmcs12.h index 746129ddd5ae02b0148130122728fd490a87ff91..01936013428b5c0a7e9e5cbd0fafe1d38770e4d0 100644 --- a/arch/x86/kvm/vmx/vmcs12.h +++ b/arch/x86/kvm/vmx/vmcs12.h @@ -208,9 +208,8 @@ struct __packed vmcs12 { /* * For save/restore compatibility, the vmcs12 field offsets must not change. */ -#define CHECK_OFFSET(field, loc) \ - BUILD_BUG_ON_MSG(offsetof(struct vmcs12, field) != (loc), \ - "Offset of " #field " in struct vmcs12 has changed.") +#define CHECK_OFFSET(field, loc) \ + ASSERT_STRUCT_OFFSET(struct vmcs12, field, loc) static inline void vmx_check_vmcs12_offsets(void) { diff --git a/arch/x86/kvm/vmx/vmenter.S b/arch/x86/kvm/vmx/vmenter.S index 0b5db4de4d09e568cc01b6005748f97104d02ec8..766c6b3ef5ed95f5d4546cd4254aec8d275de2a7 100644 --- a/arch/x86/kvm/vmx/vmenter.S +++ b/arch/x86/kvm/vmx/vmenter.S @@ -269,6 +269,7 @@ SYM_FUNC_END(__vmx_vcpu_run) .section .text, "ax" +#ifndef CONFIG_CC_HAS_ASM_GOTO_OUTPUT /** * vmread_error_trampoline - Trampoline from inline asm to vmread_error() * @field: VMCS field encoding that failed @@ -317,6 +318,7 @@ SYM_FUNC_START(vmread_error_trampoline) RET SYM_FUNC_END(vmread_error_trampoline) +#endif SYM_FUNC_START(vmx_do_interrupt_nmi_irqoff) /* diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 63247c57c72cc6ef206ef29f4b3d0fb117e9d93f..fe5615fd8295c4f615f5fe44785f24617b042a0d 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -51,7 +51,6 @@ #include "capabilities.h" #include "cpuid.h" -#include "evmcs.h" #include "hyperv.h" #include "kvm_onhyperv.h" #include "irq.h" @@ -66,6 +65,7 @@ #include "vmcs12.h" #include "vmx.h" #include "x86.h" +#include "smm.h" MODULE_AUTHOR("Qumranet"); MODULE_LICENSE("GPL"); @@ -526,7 +526,7 @@ static unsigned long host_idt_base; static bool __read_mostly enlightened_vmcs = true; module_param(enlightened_vmcs, bool, 0444); -static int hv_enable_direct_tlbflush(struct kvm_vcpu *vcpu) +static int hv_enable_l2_tlb_flush(struct kvm_vcpu *vcpu) { struct hv_enlightened_vmcs *evmcs; struct hv_partition_assist_pg **p_hv_pa_pg = @@ -858,7 +858,7 @@ unsigned int __vmx_vcpu_run_flags(struct vcpu_vmx *vmx) * to change it directly without causing a vmexit. In that case read * it after vmexit and store it in vmx->spec_ctrl. */ - if (unlikely(!msr_write_intercepted(vmx, MSR_IA32_SPEC_CTRL))) + if (!msr_write_intercepted(vmx, MSR_IA32_SPEC_CTRL)) flags |= VMX_RUN_SAVE_SPEC_CTRL; return flags; @@ -1348,8 +1348,10 @@ void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu, /* * No indirect branch prediction barrier needed when switching - * the active VMCS within a guest, e.g. on nested VM-Enter. - * The L1 VMM can protect itself with retpolines, IBPB or IBRS. + * the active VMCS within a vCPU, unless IBRS is advertised to + * the vCPU. To minimize the number of IBPBs executed, KVM + * performs IBPB on nested VM-Exit (a single nested transition + * may switch the active VMCS multiple times). */ if (!buddy || WARN_ON_ONCE(buddy->vmcs != prev)) indirect_branch_prediction_barrier(); @@ -1834,12 +1836,42 @@ bool nested_vmx_allowed(struct kvm_vcpu *vcpu) return nested && guest_cpuid_has(vcpu, X86_FEATURE_VMX); } -static inline bool vmx_feature_control_msr_valid(struct kvm_vcpu *vcpu, - uint64_t val) +/* + * Userspace is allowed to set any supported IA32_FEATURE_CONTROL regardless of + * guest CPUID. Note, KVM allows userspace to set "VMX in SMX" to maintain + * backwards compatibility even though KVM doesn't support emulating SMX. And + * because userspace set "VMX in SMX", the guest must also be allowed to set it, + * e.g. if the MSR is left unlocked and the guest does a RMW operation. + */ +#define KVM_SUPPORTED_FEATURE_CONTROL (FEAT_CTL_LOCKED | \ + FEAT_CTL_VMX_ENABLED_INSIDE_SMX | \ + FEAT_CTL_VMX_ENABLED_OUTSIDE_SMX | \ + FEAT_CTL_SGX_LC_ENABLED | \ + FEAT_CTL_SGX_ENABLED | \ + FEAT_CTL_LMCE_ENABLED) + +static inline bool is_vmx_feature_control_msr_valid(struct vcpu_vmx *vmx, + struct msr_data *msr) { - uint64_t valid_bits = to_vmx(vcpu)->msr_ia32_feature_control_valid_bits; + uint64_t valid_bits; + + /* + * Ensure KVM_SUPPORTED_FEATURE_CONTROL is updated when new bits are + * exposed to the guest. + */ + WARN_ON_ONCE(vmx->msr_ia32_feature_control_valid_bits & + ~KVM_SUPPORTED_FEATURE_CONTROL); + + if (!msr->host_initiated && + (vmx->msr_ia32_feature_control & FEAT_CTL_LOCKED)) + return false; + + if (msr->host_initiated) + valid_bits = KVM_SUPPORTED_FEATURE_CONTROL; + else + valid_bits = vmx->msr_ia32_feature_control_valid_bits; - return !(val & ~valid_bits); + return !(msr->data & ~valid_bits); } static int vmx_get_msr_feature(struct kvm_msr_entry *msr) @@ -1849,9 +1881,6 @@ static int vmx_get_msr_feature(struct kvm_msr_entry *msr) if (!nested) return 1; return vmx_get_vmx_msr(&vmcs_config.nested, msr->index, &msr->data); - case MSR_IA32_PERF_CAPABILITIES: - msr->data = vmx_get_perf_capabilities(); - return 0; default: return KVM_MSR_RET_INVALID; } @@ -2029,7 +2058,7 @@ static u64 vmx_get_supported_debugctl(struct kvm_vcpu *vcpu, bool host_initiated (host_initiated || guest_cpuid_has(vcpu, X86_FEATURE_BUS_LOCK_DETECT))) debugctl |= DEBUGCTLMSR_BUS_LOCK_DETECT; - if ((vmx_get_perf_capabilities() & PMU_CAP_LBR_FMT) && + if ((kvm_caps.supported_perf_cap & PMU_CAP_LBR_FMT) && (host_initiated || intel_pmu_lbr_is_enabled(vcpu))) debugctl |= DEBUGCTLMSR_LBR | DEBUGCTLMSR_FREEZE_LBRS_ON_PMI; @@ -2241,10 +2270,9 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) vcpu->arch.mcg_ext_ctl = data; break; case MSR_IA32_FEAT_CTL: - if (!vmx_feature_control_msr_valid(vcpu, data) || - (to_vmx(vcpu)->msr_ia32_feature_control & - FEAT_CTL_LOCKED && !msr_info->host_initiated)) + if (!is_vmx_feature_control_msr_valid(vmx, msr_info)) return 1; + vmx->msr_ia32_feature_control = data; if (msr_info->host_initiated && data == 0) vmx_leave_nested(vcpu); @@ -2342,14 +2370,14 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) return 1; if (data & PMU_CAP_LBR_FMT) { if ((data & PMU_CAP_LBR_FMT) != - (vmx_get_perf_capabilities() & PMU_CAP_LBR_FMT)) + (kvm_caps.supported_perf_cap & PMU_CAP_LBR_FMT)) return 1; if (!cpuid_model_is_consistent(vcpu)) return 1; } if (data & PERF_CAP_PEBS_FORMAT) { if ((data & PERF_CAP_PEBS_MASK) != - (vmx_get_perf_capabilities() & PERF_CAP_PEBS_MASK)) + (kvm_caps.supported_perf_cap & PERF_CAP_PEBS_MASK)) return 1; if (!guest_cpuid_has(vcpu, X86_FEATURE_DS)) return 1; @@ -6844,6 +6872,8 @@ static bool vmx_has_emulated_msr(struct kvm *kvm, u32 index) { switch (index) { case MSR_IA32_SMBASE: + if (!IS_ENABLED(CONFIG_KVM_SMM)) + return false; /* * We cannot do SMM unless we can run the guest in big * real mode. @@ -7669,6 +7699,31 @@ static void vmx_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu) vmx_update_exception_bitmap(vcpu); } +static u64 vmx_get_perf_capabilities(void) +{ + u64 perf_cap = PMU_CAP_FW_WRITES; + struct x86_pmu_lbr lbr; + u64 host_perf_cap = 0; + + if (!enable_pmu) + return 0; + + if (boot_cpu_has(X86_FEATURE_PDCM)) + rdmsrl(MSR_IA32_PERF_CAPABILITIES, host_perf_cap); + + x86_perf_get_lbr(&lbr); + if (lbr.nr) + perf_cap |= host_perf_cap & PMU_CAP_LBR_FMT; + + if (vmx_pebs_supported()) { + perf_cap |= host_perf_cap & PERF_CAP_PEBS_MASK; + if ((perf_cap & PERF_CAP_PEBS_FORMAT) < 4) + perf_cap &= ~PERF_CAP_PEBS_BASELINE; + } + + return perf_cap; +} + static __init void vmx_set_cpu_caps(void) { kvm_set_cpu_caps(); @@ -7691,6 +7746,7 @@ static __init void vmx_set_cpu_caps(void) if (!enable_pmu) kvm_cpu_cap_clear(X86_FEATURE_PDCM); + kvm_caps.supported_perf_cap = vmx_get_perf_capabilities(); if (!enable_sgx) { kvm_cpu_cap_clear(X86_FEATURE_SGX); @@ -7906,6 +7962,7 @@ static void vmx_setup_mce(struct kvm_vcpu *vcpu) ~FEAT_CTL_LMCE_ENABLED; } +#ifdef CONFIG_KVM_SMM static int vmx_smi_allowed(struct kvm_vcpu *vcpu, bool for_injection) { /* we need a nested vmexit to enter SMM, postpone if run is pending */ @@ -7914,7 +7971,7 @@ static int vmx_smi_allowed(struct kvm_vcpu *vcpu, bool for_injection) return !is_smm(vcpu); } -static int vmx_enter_smm(struct kvm_vcpu *vcpu, char *smstate) +static int vmx_enter_smm(struct kvm_vcpu *vcpu, union kvm_smram *smram) { struct vcpu_vmx *vmx = to_vmx(vcpu); @@ -7935,7 +7992,7 @@ static int vmx_enter_smm(struct kvm_vcpu *vcpu, char *smstate) return 0; } -static int vmx_leave_smm(struct kvm_vcpu *vcpu, const char *smstate) +static int vmx_leave_smm(struct kvm_vcpu *vcpu, const union kvm_smram *smram) { struct vcpu_vmx *vmx = to_vmx(vcpu); int ret; @@ -7960,6 +8017,7 @@ static void vmx_enable_smi_window(struct kvm_vcpu *vcpu) { /* RSM will cause a vmexit anyway. */ } +#endif static bool vmx_apic_init_signal_blocked(struct kvm_vcpu *vcpu) { @@ -8127,10 +8185,12 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = { .setup_mce = vmx_setup_mce, +#ifdef CONFIG_KVM_SMM .smi_allowed = vmx_smi_allowed, .enter_smm = vmx_enter_smm, .leave_smm = vmx_leave_smm, .enable_smi_window = vmx_enable_smi_window, +#endif .can_emulate_instruction = vmx_can_emulate_instruction, .apic_init_signal_blocked = vmx_apic_init_signal_blocked, @@ -8490,8 +8550,8 @@ static int __init vmx_init(void) } if (ms_hyperv.nested_features & HV_X64_NESTED_DIRECT_FLUSH) - vmx_x86_ops.enable_direct_tlbflush - = hv_enable_direct_tlbflush; + vmx_x86_ops.enable_l2_tlb_flush + = hv_enable_l2_tlb_flush; } else { enlightened_vmcs = false; diff --git a/arch/x86/kvm/vmx/vmx_ops.h b/arch/x86/kvm/vmx/vmx_ops.h index ec268df83ed67988f76a007ad65e9d77aa5d7a3f..842dc898c972876067bc8de27f5eef3cf506a7c9 100644 --- a/arch/x86/kvm/vmx/vmx_ops.h +++ b/arch/x86/kvm/vmx/vmx_ops.h @@ -6,19 +6,33 @@ #include -#include "evmcs.h" +#include "hyperv.h" #include "vmcs.h" #include "../x86.h" void vmread_error(unsigned long field, bool fault); -__attribute__((regparm(0))) void vmread_error_trampoline(unsigned long field, - bool fault); void vmwrite_error(unsigned long field, unsigned long value); void vmclear_error(struct vmcs *vmcs, u64 phys_addr); void vmptrld_error(struct vmcs *vmcs, u64 phys_addr); void invvpid_error(unsigned long ext, u16 vpid, gva_t gva); void invept_error(unsigned long ext, u64 eptp, gpa_t gpa); +#ifndef CONFIG_CC_HAS_ASM_GOTO_OUTPUT +/* + * The VMREAD error trampoline _always_ uses the stack to pass parameters, even + * for 64-bit targets. Preserving all registers allows the VMREAD inline asm + * blob to avoid clobbering GPRs, which in turn allows the compiler to better + * optimize sequences of VMREADs. + * + * Declare the trampoline as an opaque label as it's not safe to call from C + * code; there is no way to tell the compiler to pass params on the stack for + * 64-bit targets. + * + * void vmread_error_trampoline(unsigned long field, bool fault); + */ +extern unsigned long vmread_error_trampoline; +#endif + static __always_inline void vmcs_check16(unsigned long field) { BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2000, diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 69227f77b201d7d9d89a77bdefc2d44f338e3ba8..312aea1854ae6b8c8b4a64884ffaf9b9605c6e29 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -30,6 +30,7 @@ #include "hyperv.h" #include "lapic.h" #include "xen.h" +#include "smm.h" #include #include @@ -119,8 +120,6 @@ static u64 __read_mostly cr4_reserved_bits = CR4_RESERVED_BITS; static void update_cr8_intercept(struct kvm_vcpu *vcpu); static void process_nmi(struct kvm_vcpu *vcpu); -static void process_smi(struct kvm_vcpu *vcpu); -static void enter_smm(struct kvm_vcpu *vcpu); static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags); static void store_regs(struct kvm_vcpu *vcpu); static int sync_regs(struct kvm_vcpu *vcpu); @@ -464,7 +463,6 @@ u64 kvm_get_apic_base(struct kvm_vcpu *vcpu) { return vcpu->arch.apic_base; } -EXPORT_SYMBOL_GPL(kvm_get_apic_base); enum lapic_mode kvm_get_apic_mode(struct kvm_vcpu *vcpu) { @@ -492,7 +490,6 @@ int kvm_set_apic_base(struct kvm_vcpu *vcpu, struct msr_data *msr_info) kvm_recalculate_apic_map(vcpu->kvm); return 0; } -EXPORT_SYMBOL_GPL(kvm_set_apic_base); /* * Handle a fault on a hardware virtualization (VMX or SVM) instruction. @@ -783,7 +780,6 @@ void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault) kvm_queue_exception_e_p(vcpu, PF_VECTOR, fault->error_code, fault->address); } -EXPORT_SYMBOL_GPL(kvm_inject_page_fault); void kvm_inject_emulated_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault) @@ -812,7 +808,6 @@ void kvm_inject_nmi(struct kvm_vcpu *vcpu) atomic_inc(&vcpu->arch.nmi_queued); kvm_make_request(KVM_REQ_NMI, vcpu); } -EXPORT_SYMBOL_GPL(kvm_inject_nmi); void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code) { @@ -837,7 +832,6 @@ bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl) kvm_queue_exception_e(vcpu, GP_VECTOR, 0); return false; } -EXPORT_SYMBOL_GPL(kvm_require_cpl); bool kvm_require_dr(struct kvm_vcpu *vcpu, int dr) { @@ -1654,6 +1648,9 @@ static int kvm_get_msr_feature(struct kvm_msr_entry *msr) case MSR_IA32_ARCH_CAPABILITIES: msr->data = kvm_get_arch_capabilities(); break; + case MSR_IA32_PERF_CAPABILITIES: + msr->data = kvm_caps.supported_perf_cap; + break; case MSR_IA32_UCODE_REV: rdmsrl_safe(msr->index, &msr->data); break; @@ -2067,7 +2064,6 @@ int kvm_emulate_as_nop(struct kvm_vcpu *vcpu) { return kvm_skip_emulated_instruction(vcpu); } -EXPORT_SYMBOL_GPL(kvm_emulate_as_nop); int kvm_emulate_invd(struct kvm_vcpu *vcpu) { @@ -2315,13 +2311,11 @@ static void kvm_write_system_time(struct kvm_vcpu *vcpu, gpa_t system_time, kvm_make_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu); /* we verify if the enable bit is set... */ - if (system_time & 1) { - kvm_gpc_activate(vcpu->kvm, &vcpu->arch.pv_time, vcpu, - KVM_HOST_USES_PFN, system_time & ~1ULL, + if (system_time & 1) + kvm_gpc_activate(&vcpu->arch.pv_time, system_time & ~1ULL, sizeof(struct pvclock_vcpu_time_info)); - } else { - kvm_gpc_deactivate(vcpu->kvm, &vcpu->arch.pv_time); - } + else + kvm_gpc_deactivate(&vcpu->arch.pv_time); return; } @@ -2513,7 +2507,6 @@ u64 kvm_scale_tsc(u64 tsc, u64 ratio) return _tsc; } -EXPORT_SYMBOL_GPL(kvm_scale_tsc); static u64 kvm_compute_l1_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc) { @@ -2972,6 +2965,22 @@ static void kvm_update_masterclock(struct kvm *kvm) kvm_end_pvclock_update(kvm); } +/* + * Use the kernel's tsc_khz directly if the TSC is constant, otherwise use KVM's + * per-CPU value (which may be zero if a CPU is going offline). Note, tsc_khz + * can change during boot even if the TSC is constant, as it's possible for KVM + * to be loaded before TSC calibration completes. Ideally, KVM would get a + * notification when calibration completes, but practically speaking calibration + * will complete before userspace is alive enough to create VMs. + */ +static unsigned long get_cpu_tsc_khz(void) +{ + if (static_cpu_has(X86_FEATURE_CONSTANT_TSC)) + return tsc_khz; + else + return __this_cpu_read(cpu_tsc_khz); +} + /* Called within read_seqcount_begin/retry for kvm->pvclock_sc. */ static void __get_kvmclock(struct kvm *kvm, struct kvm_clock_data *data) { @@ -2982,7 +2991,8 @@ static void __get_kvmclock(struct kvm *kvm, struct kvm_clock_data *data) get_cpu(); data->flags = 0; - if (ka->use_master_clock && __this_cpu_read(cpu_tsc_khz)) { + if (ka->use_master_clock && + (static_cpu_has(X86_FEATURE_CONSTANT_TSC) || __this_cpu_read(cpu_tsc_khz))) { #ifdef CONFIG_X86_64 struct timespec64 ts; @@ -2996,7 +3006,7 @@ static void __get_kvmclock(struct kvm *kvm, struct kvm_clock_data *data) data->flags |= KVM_CLOCK_TSC_STABLE; hv_clock.tsc_timestamp = ka->master_cycle_now; hv_clock.system_time = ka->master_kernel_ns + ka->kvmclock_offset; - kvm_get_time_scale(NSEC_PER_SEC, __this_cpu_read(cpu_tsc_khz) * 1000LL, + kvm_get_time_scale(NSEC_PER_SEC, get_cpu_tsc_khz() * 1000LL, &hv_clock.tsc_shift, &hv_clock.tsc_to_system_mul); data->clock = __pvclock_read_cycles(&hv_clock, data->host_tsc); @@ -3035,12 +3045,10 @@ static void kvm_setup_guest_pvclock(struct kvm_vcpu *v, unsigned long flags; read_lock_irqsave(&gpc->lock, flags); - while (!kvm_gfn_to_pfn_cache_check(v->kvm, gpc, gpc->gpa, - offset + sizeof(*guest_hv_clock))) { + while (!kvm_gpc_check(gpc, offset + sizeof(*guest_hv_clock))) { read_unlock_irqrestore(&gpc->lock, flags); - if (kvm_gfn_to_pfn_cache_refresh(v->kvm, gpc, gpc->gpa, - offset + sizeof(*guest_hv_clock))) + if (kvm_gpc_refresh(gpc, offset + sizeof(*guest_hv_clock))) return; read_lock_irqsave(&gpc->lock, flags); @@ -3106,7 +3114,7 @@ static int kvm_guest_time_update(struct kvm_vcpu *v) /* Keep irq disabled to prevent changes to the clock */ local_irq_save(flags); - tgt_tsc_khz = __this_cpu_read(cpu_tsc_khz); + tgt_tsc_khz = get_cpu_tsc_khz(); if (unlikely(tgt_tsc_khz == 0)) { local_irq_restore(flags); kvm_make_request(KVM_REQ_CLOCK_UPDATE, v); @@ -3389,7 +3397,7 @@ static int kvm_pv_enable_async_pf_int(struct kvm_vcpu *vcpu, u64 data) static void kvmclock_reset(struct kvm_vcpu *vcpu) { - kvm_gpc_deactivate(vcpu->kvm, &vcpu->arch.pv_time); + kvm_gpc_deactivate(&vcpu->arch.pv_time); vcpu->arch.time = 0; } @@ -3397,6 +3405,9 @@ static void kvm_vcpu_flush_tlb_all(struct kvm_vcpu *vcpu) { ++vcpu->stat.tlb_flush; static_call(kvm_x86_flush_tlb_all)(vcpu); + + /* Flushing all ASIDs flushes the current ASID... */ + kvm_clear_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu); } static void kvm_vcpu_flush_tlb_guest(struct kvm_vcpu *vcpu) @@ -3415,6 +3426,12 @@ static void kvm_vcpu_flush_tlb_guest(struct kvm_vcpu *vcpu) } static_call(kvm_x86_flush_tlb_guest)(vcpu); + + /* + * Flushing all "guest" TLB is always a superset of Hyper-V's fine + * grained flushing. + */ + kvm_hv_vcpu_purge_flush_tlb(vcpu); } @@ -3566,20 +3583,15 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) return 1; vcpu->arch.arch_capabilities = data; break; - case MSR_IA32_PERF_CAPABILITIES: { - struct kvm_msr_entry msr_ent = {.index = msr, .data = 0}; - + case MSR_IA32_PERF_CAPABILITIES: if (!msr_info->host_initiated) return 1; - if (kvm_get_msr_feature(&msr_ent)) - return 1; - if (data & ~msr_ent.data) + if (data & ~kvm_caps.supported_perf_cap) return 1; vcpu->arch.perf_capabilities = data; kvm_pmu_refresh(vcpu); return 0; - } case MSR_EFER: return set_efer(vcpu, msr_info); case MSR_K7_HWCR: @@ -3651,7 +3663,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) break; } case MSR_IA32_SMBASE: - if (!msr_info->host_initiated) + if (!IS_ENABLED(CONFIG_KVM_SMM) || !msr_info->host_initiated) return 1; vcpu->arch.smbase = data; break; @@ -4067,7 +4079,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) msr_info->data = vcpu->arch.ia32_misc_enable_msr; break; case MSR_IA32_SMBASE: - if (!msr_info->host_initiated) + if (!IS_ENABLED(CONFIG_KVM_SMM) || !msr_info->host_initiated) return 1; msr_info->data = vcpu->arch.smbase; break; @@ -4425,7 +4437,8 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) KVM_XEN_HVM_CONFIG_EVTCHN_2LEVEL | KVM_XEN_HVM_CONFIG_EVTCHN_SEND; if (sched_info_on()) - r |= KVM_XEN_HVM_CONFIG_RUNSTATE; + r |= KVM_XEN_HVM_CONFIG_RUNSTATE | + KVM_XEN_HVM_CONFIG_RUNSTATE_UPDATE_FLAG; break; #endif case KVM_CAP_SYNC_REGS: @@ -4441,6 +4454,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) r |= KVM_X86_DISABLE_EXITS_MWAIT; break; case KVM_CAP_X86_SMM: + if (!IS_ENABLED(CONFIG_KVM_SMM)) + break; + /* SMBASE is usually relocated above 1M on modern chipsets, * and SMM handlers might indeed rely on 4G segment limits, * so do not report SMM to be available if real mode is @@ -4481,7 +4497,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) kvm_x86_ops.nested_ops->get_state(NULL, NULL, 0) : 0; break; case KVM_CAP_HYPERV_DIRECT_TLBFLUSH: - r = kvm_x86_ops.enable_direct_tlbflush != NULL; + r = kvm_x86_ops.enable_l2_tlb_flush != NULL; break; case KVM_CAP_HYPERV_ENLIGHTENED_VMCS: r = kvm_x86_ops.nested_ops->enable_evmcs != NULL; @@ -4897,13 +4913,6 @@ static int kvm_vcpu_ioctl_nmi(struct kvm_vcpu *vcpu) return 0; } -static int kvm_vcpu_ioctl_smi(struct kvm_vcpu *vcpu) -{ - kvm_make_request(KVM_REQ_SMI, vcpu); - - return 0; -} - static int vcpu_ioctl_tpr_access_reporting(struct kvm_vcpu *vcpu, struct kvm_tpr_access_ctl *tac) { @@ -5039,8 +5048,10 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu, process_nmi(vcpu); +#ifdef CONFIG_KVM_SMM if (kvm_check_request(KVM_REQ_SMI, vcpu)) process_smi(vcpu); +#endif /* * KVM's ABI only allows for one exception to be migrated. Luckily, @@ -5068,16 +5079,15 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu, ex->pending && ex->has_payload) kvm_deliver_exception_payload(vcpu, ex); + memset(events, 0, sizeof(*events)); + /* * The API doesn't provide the instruction length for software * exceptions, so don't report them. As long as the guest RIP * isn't advanced, we should expect to encounter the exception * again. */ - if (kvm_exception_is_soft(ex->vector)) { - events->exception.injected = 0; - events->exception.pending = 0; - } else { + if (!kvm_exception_is_soft(ex->vector)) { events->exception.injected = ex->injected; events->exception.pending = ex->pending; /* @@ -5097,20 +5107,20 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu, events->interrupt.injected = vcpu->arch.interrupt.injected && !vcpu->arch.interrupt.soft; events->interrupt.nr = vcpu->arch.interrupt.nr; - events->interrupt.soft = 0; events->interrupt.shadow = static_call(kvm_x86_get_interrupt_shadow)(vcpu); events->nmi.injected = vcpu->arch.nmi_injected; events->nmi.pending = vcpu->arch.nmi_pending != 0; events->nmi.masked = static_call(kvm_x86_get_nmi_mask)(vcpu); - events->nmi.pad = 0; - events->sipi_vector = 0; /* never valid when reporting to user space */ + /* events->sipi_vector is never valid when reporting to user space */ +#ifdef CONFIG_KVM_SMM events->smi.smm = is_smm(vcpu); events->smi.pending = vcpu->arch.smi_pending; events->smi.smm_inside_nmi = !!(vcpu->arch.hflags & HF_SMM_INSIDE_NMI_MASK); +#endif events->smi.latched_init = kvm_lapic_latched_init(vcpu); events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING @@ -5122,12 +5132,8 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu, events->triple_fault.pending = kvm_test_request(KVM_REQ_TRIPLE_FAULT, vcpu); events->flags |= KVM_VCPUEVENT_VALID_TRIPLE_FAULT; } - - memset(&events->reserved, 0, sizeof(events->reserved)); } -static void kvm_smm_changed(struct kvm_vcpu *vcpu, bool entering_smm); - static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu, struct kvm_vcpu_events *events) { @@ -5200,6 +5206,7 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu, vcpu->arch.apic->sipi_vector = events->sipi_vector; if (events->flags & KVM_VCPUEVENT_VALID_SMM) { +#ifdef CONFIG_KVM_SMM if (!!(vcpu->arch.hflags & HF_SMM_MASK) != events->smi.smm) { kvm_leave_nested(vcpu); kvm_smm_changed(vcpu, events->smi.smm); @@ -5214,6 +5221,12 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu, vcpu->arch.hflags &= ~HF_SMM_INSIDE_NMI_MASK; } +#else + if (events->smi.smm || events->smi.pending || + events->smi.smm_inside_nmi) + return -EINVAL; +#endif + if (lapic_in_kernel(vcpu)) { if (events->smi.latched_init) set_bit(KVM_APIC_INIT, &vcpu->arch.apic->pending_events); @@ -5497,10 +5510,10 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, } return r; case KVM_CAP_HYPERV_DIRECT_TLBFLUSH: - if (!kvm_x86_ops.enable_direct_tlbflush) + if (!kvm_x86_ops.enable_l2_tlb_flush) return -ENOTTY; - return static_call(kvm_x86_enable_direct_tlbflush)(vcpu); + return static_call(kvm_x86_enable_l2_tlb_flush)(vcpu); case KVM_CAP_HYPERV_ENFORCE_CPUID: return kvm_hv_set_enforce_cpuid(vcpu, cap->args[0]); @@ -5580,7 +5593,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp, break; } case KVM_SMI: { - r = kvm_vcpu_ioctl_smi(vcpu); + r = kvm_inject_smi(vcpu); break; } case KVM_SET_CPUID: { @@ -6239,9 +6252,7 @@ split_irqchip_unlock: break; case KVM_CAP_X86_USER_SPACE_MSR: r = -EINVAL; - if (cap->args[0] & ~(KVM_MSR_EXIT_REASON_INVAL | - KVM_MSR_EXIT_REASON_UNKNOWN | - KVM_MSR_EXIT_REASON_FILTER)) + if (cap->args[0] & ~KVM_MSR_EXIT_REASON_VALID_MASK) break; kvm->arch.user_space_msr_mask = cap->args[0]; r = 0; @@ -6418,7 +6429,7 @@ static int kvm_add_msr_filter(struct kvm_x86_msr_filter *msr_filter, if (!user_range->nmsrs) return 0; - if (user_range->flags & ~(KVM_MSR_FILTER_READ | KVM_MSR_FILTER_WRITE)) + if (user_range->flags & ~KVM_MSR_FILTER_RANGE_VALID_MASK) return -EINVAL; if (!user_range->flags) @@ -6452,7 +6463,7 @@ static int kvm_vm_ioctl_set_msr_filter(struct kvm *kvm, int r = 0; u32 i; - if (filter->flags & ~KVM_MSR_FILTER_DEFAULT_DENY) + if (filter->flags & ~KVM_MSR_FILTER_VALID_MASK) return -EINVAL; for (i = 0; i < ARRAY_SIZE(filter->ranges); i++) @@ -7125,8 +7136,8 @@ static int vcpu_mmio_read(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *v) return handled; } -static void kvm_set_segment(struct kvm_vcpu *vcpu, - struct kvm_segment *var, int seg) +void kvm_set_segment(struct kvm_vcpu *vcpu, + struct kvm_segment *var, int seg) { static_call(kvm_x86_set_segment)(vcpu, var, seg); } @@ -7162,16 +7173,6 @@ gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, } EXPORT_SYMBOL_GPL(kvm_mmu_gva_to_gpa_read); - gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva, - struct x86_exception *exception) -{ - struct kvm_mmu *mmu = vcpu->arch.walk_mmu; - - u64 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0; - access |= PFERR_FETCH_MASK; - return mmu->gva_to_gpa(vcpu, mmu, gva, access, exception); -} - gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva, struct x86_exception *exception) { @@ -7284,15 +7285,6 @@ static int emulator_read_std(struct x86_emulate_ctxt *ctxt, return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access, exception); } -static int kvm_read_guest_phys_system(struct x86_emulate_ctxt *ctxt, - unsigned long addr, void *val, unsigned int bytes) -{ - struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); - int r = kvm_vcpu_read_guest(vcpu, addr, val, bytes); - - return r < 0 ? X86EMUL_IO_NEEDED : X86EMUL_CONTINUE; -} - static int kvm_write_guest_virt_helper(gva_t addr, void *val, unsigned int bytes, struct kvm_vcpu *vcpu, u64 access, struct x86_exception *exception) @@ -8084,26 +8076,6 @@ static int emulator_get_msr(struct x86_emulate_ctxt *ctxt, return kvm_get_msr(emul_to_vcpu(ctxt), msr_index, pdata); } -static int emulator_set_msr(struct x86_emulate_ctxt *ctxt, - u32 msr_index, u64 data) -{ - return kvm_set_msr(emul_to_vcpu(ctxt), msr_index, data); -} - -static u64 emulator_get_smbase(struct x86_emulate_ctxt *ctxt) -{ - struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); - - return vcpu->arch.smbase; -} - -static void emulator_set_smbase(struct x86_emulate_ctxt *ctxt, u64 smbase) -{ - struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); - - vcpu->arch.smbase = smbase; -} - static int emulator_check_pmc(struct x86_emulate_ctxt *ctxt, u32 pmc) { @@ -8178,18 +8150,13 @@ static unsigned emulator_get_hflags(struct x86_emulate_ctxt *ctxt) return emul_to_vcpu(ctxt)->arch.hflags; } -static void emulator_exiting_smm(struct x86_emulate_ctxt *ctxt) +#ifndef CONFIG_KVM_SMM +static int emulator_leave_smm(struct x86_emulate_ctxt *ctxt) { - struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); - - kvm_smm_changed(vcpu, false); -} - -static int emulator_leave_smm(struct x86_emulate_ctxt *ctxt, - const char *smstate) -{ - return static_call(kvm_x86_leave_smm)(emul_to_vcpu(ctxt), smstate); + WARN_ON_ONCE(1); + return X86EMUL_UNHANDLEABLE; } +#endif static void emulator_triple_fault(struct x86_emulate_ctxt *ctxt) { @@ -8215,7 +8182,6 @@ static const struct x86_emulate_ops emulate_ops = { .write_gpr = emulator_write_gpr, .read_std = emulator_read_std, .write_std = emulator_write_std, - .read_phys = kvm_read_guest_phys_system, .fetch = kvm_fetch_guest_virt, .read_emulated = emulator_read_emulated, .write_emulated = emulator_write_emulated, @@ -8235,11 +8201,8 @@ static const struct x86_emulate_ops emulate_ops = { .cpl = emulator_get_cpl, .get_dr = emulator_get_dr, .set_dr = emulator_set_dr, - .get_smbase = emulator_get_smbase, - .set_smbase = emulator_set_smbase, .set_msr_with_filter = emulator_set_msr_with_filter, .get_msr_with_filter = emulator_get_msr_with_filter, - .set_msr = emulator_set_msr, .get_msr = emulator_get_msr, .check_pmc = emulator_check_pmc, .read_pmc = emulator_read_pmc, @@ -8254,7 +8217,6 @@ static const struct x86_emulate_ops emulate_ops = { .guest_has_rdpid = emulator_guest_has_rdpid, .set_nmi_mask = emulator_set_nmi_mask, .get_hflags = emulator_get_hflags, - .exiting_smm = emulator_exiting_smm, .leave_smm = emulator_leave_smm, .triple_fault = emulator_triple_fault, .set_xcr = emulator_set_xcr, @@ -8327,8 +8289,6 @@ static void init_emulate_ctxt(struct kvm_vcpu *vcpu) cs_db ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16; BUILD_BUG_ON(HF_GUEST_MASK != X86EMUL_GUEST_MASK); - BUILD_BUG_ON(HF_SMM_MASK != X86EMUL_SMM_MASK); - BUILD_BUG_ON(HF_SMM_INSIDE_NMI_MASK != X86EMUL_SMM_INSIDE_NMI_MASK); ctxt->interruptibility = 0; ctxt->have_exception = false; @@ -8587,29 +8547,6 @@ static bool retry_instruction(struct x86_emulate_ctxt *ctxt, static int complete_emulated_mmio(struct kvm_vcpu *vcpu); static int complete_emulated_pio(struct kvm_vcpu *vcpu); -static void kvm_smm_changed(struct kvm_vcpu *vcpu, bool entering_smm) -{ - trace_kvm_smm_transition(vcpu->vcpu_id, vcpu->arch.smbase, entering_smm); - - if (entering_smm) { - vcpu->arch.hflags |= HF_SMM_MASK; - } else { - vcpu->arch.hflags &= ~(HF_SMM_MASK | HF_SMM_INSIDE_NMI_MASK); - - /* Process a latched INIT or SMI, if any. */ - kvm_make_request(KVM_REQ_EVENT, vcpu); - - /* - * Even if KVM_SET_SREGS2 loaded PDPTRs out of band, - * on SMM exit we still need to reload them from - * guest memory - */ - vcpu->arch.pdptrs_from_userspace = false; - } - - kvm_mmu_reset_context(vcpu); -} - static int kvm_vcpu_check_hw_bp(unsigned long addr, u32 type, u32 dr7, unsigned long *db) { @@ -8841,7 +8778,9 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, write_fault_to_spt, emulation_type)) return 1; - if (ctxt->have_exception) { + + if (ctxt->have_exception && + !(emulation_type & EMULTYPE_SKIP)) { /* * #UD should result in just EMULATION_FAILED, and trap-like * exception should not be encountered during decode. @@ -9105,9 +9044,11 @@ static void tsc_khz_changed(void *data) struct cpufreq_freqs *freq = data; unsigned long khz = 0; + WARN_ON_ONCE(boot_cpu_has(X86_FEATURE_CONSTANT_TSC)); + if (data) khz = freq->new; - else if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) + else khz = cpufreq_quick_get(raw_smp_processor_id()); if (!khz) khz = tsc_khz; @@ -9128,8 +9069,10 @@ static void kvm_hyperv_tsc_notifier(void) hyperv_stop_tsc_emulation(); /* TSC frequency always matches when on Hyper-V */ - for_each_present_cpu(cpu) - per_cpu(cpu_tsc_khz, cpu) = tsc_khz; + if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) { + for_each_present_cpu(cpu) + per_cpu(cpu_tsc_khz, cpu) = tsc_khz; + } kvm_caps.max_guest_tsc_khz = tsc_khz; list_for_each_entry(kvm, &vm_list, vm_list) { @@ -9266,10 +9209,10 @@ static void kvm_timer_init(void) } cpufreq_register_notifier(&kvmclock_cpufreq_notifier_block, CPUFREQ_TRANSITION_NOTIFIER); - } - cpuhp_setup_state(CPUHP_AP_X86_KVM_CLK_ONLINE, "x86/kvm/clk:online", - kvmclock_cpu_online, kvmclock_cpu_down_prep); + cpuhp_setup_state(CPUHP_AP_X86_KVM_CLK_ONLINE, "x86/kvm/clk:online", + kvmclock_cpu_online, kvmclock_cpu_down_prep); + } } #ifdef CONFIG_X86_64 @@ -9429,10 +9372,11 @@ void kvm_arch_exit(void) #endif kvm_lapic_exit(); - if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) + if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) { cpufreq_unregister_notifier(&kvmclock_cpufreq_notifier_block, CPUFREQ_TRANSITION_NOTIFIER); - cpuhp_remove_state_nocalls(CPUHP_AP_X86_KVM_CLK_ONLINE); + cpuhp_remove_state_nocalls(CPUHP_AP_X86_KVM_CLK_ONLINE); + } #ifdef CONFIG_X86_64 pvclock_gtod_unregister_notifier(&pvclock_gtod_notifier); irq_work_sync(&pvclock_irq_work); @@ -9999,6 +9943,7 @@ static int kvm_check_and_inject_events(struct kvm_vcpu *vcpu, * in order to make progress and get back here for another iteration. * The kvm_x86_ops hooks communicate this by returning -EBUSY. */ +#ifdef CONFIG_KVM_SMM if (vcpu->arch.smi_pending) { r = can_inject ? static_call(kvm_x86_smi_allowed)(vcpu, true) : -EBUSY; if (r < 0) @@ -10011,6 +9956,7 @@ static int kvm_check_and_inject_events(struct kvm_vcpu *vcpu, } else static_call(kvm_x86_enable_smi_window)(vcpu); } +#endif if (vcpu->arch.nmi_pending) { r = can_inject ? static_call(kvm_x86_nmi_allowed)(vcpu, true) : -EBUSY; @@ -10086,246 +10032,6 @@ static void process_nmi(struct kvm_vcpu *vcpu) kvm_make_request(KVM_REQ_EVENT, vcpu); } -static u32 enter_smm_get_segment_flags(struct kvm_segment *seg) -{ - u32 flags = 0; - flags |= seg->g << 23; - flags |= seg->db << 22; - flags |= seg->l << 21; - flags |= seg->avl << 20; - flags |= seg->present << 15; - flags |= seg->dpl << 13; - flags |= seg->s << 12; - flags |= seg->type << 8; - return flags; -} - -static void enter_smm_save_seg_32(struct kvm_vcpu *vcpu, char *buf, int n) -{ - struct kvm_segment seg; - int offset; - - kvm_get_segment(vcpu, &seg, n); - put_smstate(u32, buf, 0x7fa8 + n * 4, seg.selector); - - if (n < 3) - offset = 0x7f84 + n * 12; - else - offset = 0x7f2c + (n - 3) * 12; - - put_smstate(u32, buf, offset + 8, seg.base); - put_smstate(u32, buf, offset + 4, seg.limit); - put_smstate(u32, buf, offset, enter_smm_get_segment_flags(&seg)); -} - -#ifdef CONFIG_X86_64 -static void enter_smm_save_seg_64(struct kvm_vcpu *vcpu, char *buf, int n) -{ - struct kvm_segment seg; - int offset; - u16 flags; - - kvm_get_segment(vcpu, &seg, n); - offset = 0x7e00 + n * 16; - - flags = enter_smm_get_segment_flags(&seg) >> 8; - put_smstate(u16, buf, offset, seg.selector); - put_smstate(u16, buf, offset + 2, flags); - put_smstate(u32, buf, offset + 4, seg.limit); - put_smstate(u64, buf, offset + 8, seg.base); -} -#endif - -static void enter_smm_save_state_32(struct kvm_vcpu *vcpu, char *buf) -{ - struct desc_ptr dt; - struct kvm_segment seg; - unsigned long val; - int i; - - put_smstate(u32, buf, 0x7ffc, kvm_read_cr0(vcpu)); - put_smstate(u32, buf, 0x7ff8, kvm_read_cr3(vcpu)); - put_smstate(u32, buf, 0x7ff4, kvm_get_rflags(vcpu)); - put_smstate(u32, buf, 0x7ff0, kvm_rip_read(vcpu)); - - for (i = 0; i < 8; i++) - put_smstate(u32, buf, 0x7fd0 + i * 4, kvm_register_read_raw(vcpu, i)); - - kvm_get_dr(vcpu, 6, &val); - put_smstate(u32, buf, 0x7fcc, (u32)val); - kvm_get_dr(vcpu, 7, &val); - put_smstate(u32, buf, 0x7fc8, (u32)val); - - kvm_get_segment(vcpu, &seg, VCPU_SREG_TR); - put_smstate(u32, buf, 0x7fc4, seg.selector); - put_smstate(u32, buf, 0x7f64, seg.base); - put_smstate(u32, buf, 0x7f60, seg.limit); - put_smstate(u32, buf, 0x7f5c, enter_smm_get_segment_flags(&seg)); - - kvm_get_segment(vcpu, &seg, VCPU_SREG_LDTR); - put_smstate(u32, buf, 0x7fc0, seg.selector); - put_smstate(u32, buf, 0x7f80, seg.base); - put_smstate(u32, buf, 0x7f7c, seg.limit); - put_smstate(u32, buf, 0x7f78, enter_smm_get_segment_flags(&seg)); - - static_call(kvm_x86_get_gdt)(vcpu, &dt); - put_smstate(u32, buf, 0x7f74, dt.address); - put_smstate(u32, buf, 0x7f70, dt.size); - - static_call(kvm_x86_get_idt)(vcpu, &dt); - put_smstate(u32, buf, 0x7f58, dt.address); - put_smstate(u32, buf, 0x7f54, dt.size); - - for (i = 0; i < 6; i++) - enter_smm_save_seg_32(vcpu, buf, i); - - put_smstate(u32, buf, 0x7f14, kvm_read_cr4(vcpu)); - - /* revision id */ - put_smstate(u32, buf, 0x7efc, 0x00020000); - put_smstate(u32, buf, 0x7ef8, vcpu->arch.smbase); -} - -#ifdef CONFIG_X86_64 -static void enter_smm_save_state_64(struct kvm_vcpu *vcpu, char *buf) -{ - struct desc_ptr dt; - struct kvm_segment seg; - unsigned long val; - int i; - - for (i = 0; i < 16; i++) - put_smstate(u64, buf, 0x7ff8 - i * 8, kvm_register_read_raw(vcpu, i)); - - put_smstate(u64, buf, 0x7f78, kvm_rip_read(vcpu)); - put_smstate(u32, buf, 0x7f70, kvm_get_rflags(vcpu)); - - kvm_get_dr(vcpu, 6, &val); - put_smstate(u64, buf, 0x7f68, val); - kvm_get_dr(vcpu, 7, &val); - put_smstate(u64, buf, 0x7f60, val); - - put_smstate(u64, buf, 0x7f58, kvm_read_cr0(vcpu)); - put_smstate(u64, buf, 0x7f50, kvm_read_cr3(vcpu)); - put_smstate(u64, buf, 0x7f48, kvm_read_cr4(vcpu)); - - put_smstate(u32, buf, 0x7f00, vcpu->arch.smbase); - - /* revision id */ - put_smstate(u32, buf, 0x7efc, 0x00020064); - - put_smstate(u64, buf, 0x7ed0, vcpu->arch.efer); - - kvm_get_segment(vcpu, &seg, VCPU_SREG_TR); - put_smstate(u16, buf, 0x7e90, seg.selector); - put_smstate(u16, buf, 0x7e92, enter_smm_get_segment_flags(&seg) >> 8); - put_smstate(u32, buf, 0x7e94, seg.limit); - put_smstate(u64, buf, 0x7e98, seg.base); - - static_call(kvm_x86_get_idt)(vcpu, &dt); - put_smstate(u32, buf, 0x7e84, dt.size); - put_smstate(u64, buf, 0x7e88, dt.address); - - kvm_get_segment(vcpu, &seg, VCPU_SREG_LDTR); - put_smstate(u16, buf, 0x7e70, seg.selector); - put_smstate(u16, buf, 0x7e72, enter_smm_get_segment_flags(&seg) >> 8); - put_smstate(u32, buf, 0x7e74, seg.limit); - put_smstate(u64, buf, 0x7e78, seg.base); - - static_call(kvm_x86_get_gdt)(vcpu, &dt); - put_smstate(u32, buf, 0x7e64, dt.size); - put_smstate(u64, buf, 0x7e68, dt.address); - - for (i = 0; i < 6; i++) - enter_smm_save_seg_64(vcpu, buf, i); -} -#endif - -static void enter_smm(struct kvm_vcpu *vcpu) -{ - struct kvm_segment cs, ds; - struct desc_ptr dt; - unsigned long cr0; - char buf[512]; - - memset(buf, 0, 512); -#ifdef CONFIG_X86_64 - if (guest_cpuid_has(vcpu, X86_FEATURE_LM)) - enter_smm_save_state_64(vcpu, buf); - else -#endif - enter_smm_save_state_32(vcpu, buf); - - /* - * Give enter_smm() a chance to make ISA-specific changes to the vCPU - * state (e.g. leave guest mode) after we've saved the state into the - * SMM state-save area. - */ - static_call(kvm_x86_enter_smm)(vcpu, buf); - - kvm_smm_changed(vcpu, true); - kvm_vcpu_write_guest(vcpu, vcpu->arch.smbase + 0xfe00, buf, sizeof(buf)); - - if (static_call(kvm_x86_get_nmi_mask)(vcpu)) - vcpu->arch.hflags |= HF_SMM_INSIDE_NMI_MASK; - else - static_call(kvm_x86_set_nmi_mask)(vcpu, true); - - kvm_set_rflags(vcpu, X86_EFLAGS_FIXED); - kvm_rip_write(vcpu, 0x8000); - - cr0 = vcpu->arch.cr0 & ~(X86_CR0_PE | X86_CR0_EM | X86_CR0_TS | X86_CR0_PG); - static_call(kvm_x86_set_cr0)(vcpu, cr0); - vcpu->arch.cr0 = cr0; - - static_call(kvm_x86_set_cr4)(vcpu, 0); - - /* Undocumented: IDT limit is set to zero on entry to SMM. */ - dt.address = dt.size = 0; - static_call(kvm_x86_set_idt)(vcpu, &dt); - - kvm_set_dr(vcpu, 7, DR7_FIXED_1); - - cs.selector = (vcpu->arch.smbase >> 4) & 0xffff; - cs.base = vcpu->arch.smbase; - - ds.selector = 0; - ds.base = 0; - - cs.limit = ds.limit = 0xffffffff; - cs.type = ds.type = 0x3; - cs.dpl = ds.dpl = 0; - cs.db = ds.db = 0; - cs.s = ds.s = 1; - cs.l = ds.l = 0; - cs.g = ds.g = 1; - cs.avl = ds.avl = 0; - cs.present = ds.present = 1; - cs.unusable = ds.unusable = 0; - cs.padding = ds.padding = 0; - - kvm_set_segment(vcpu, &cs, VCPU_SREG_CS); - kvm_set_segment(vcpu, &ds, VCPU_SREG_DS); - kvm_set_segment(vcpu, &ds, VCPU_SREG_ES); - kvm_set_segment(vcpu, &ds, VCPU_SREG_FS); - kvm_set_segment(vcpu, &ds, VCPU_SREG_GS); - kvm_set_segment(vcpu, &ds, VCPU_SREG_SS); - -#ifdef CONFIG_X86_64 - if (guest_cpuid_has(vcpu, X86_FEATURE_LM)) - static_call(kvm_x86_set_efer)(vcpu, 0); -#endif - - kvm_update_cpuid_runtime(vcpu); - kvm_mmu_reset_context(vcpu); -} - -static void process_smi(struct kvm_vcpu *vcpu) -{ - vcpu->arch.smi_pending = true; - kvm_make_request(KVM_REQ_EVENT, vcpu); -} - void kvm_make_scan_ioapic_request_mask(struct kvm *kvm, unsigned long *vcpu_bitmap) { @@ -10516,20 +10222,17 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) bool req_immediate_exit = false; - /* Forbid vmenter if vcpu dirty ring is soft-full */ - if (unlikely(vcpu->kvm->dirty_ring_size && - kvm_dirty_ring_soft_full(&vcpu->dirty_ring))) { - vcpu->run->exit_reason = KVM_EXIT_DIRTY_RING_FULL; - trace_kvm_dirty_ring_exit(vcpu); - r = 0; - goto out; - } - if (kvm_request_pending(vcpu)) { if (kvm_check_request(KVM_REQ_VM_DEAD, vcpu)) { r = -EIO; goto out; } + + if (kvm_dirty_ring_check_request(vcpu)) { + r = 0; + goto out; + } + if (kvm_check_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu)) { if (unlikely(!kvm_x86_ops.nested_ops->get_nested_state_pages(vcpu))) { r = 0; @@ -10553,14 +10256,27 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) kvm_mmu_sync_roots(vcpu); if (kvm_check_request(KVM_REQ_LOAD_MMU_PGD, vcpu)) kvm_mmu_load_pgd(vcpu); - if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) { + + /* + * Note, the order matters here, as flushing "all" TLB entries + * also flushes the "current" TLB entries, i.e. servicing the + * flush "all" will clear any request to flush "current". + */ + if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) kvm_vcpu_flush_tlb_all(vcpu); - /* Flushing all ASIDs flushes the current ASID... */ - kvm_clear_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu); - } kvm_service_local_tlb_flush_requests(vcpu); + /* + * Fall back to a "full" guest flush if Hyper-V's precise + * flushing fails. Note, Hyper-V's flushing is per-vCPU, but + * the flushes are considered "remote" and not "local" because + * the requests can be initiated from other vCPUs. + */ + if (kvm_check_request(KVM_REQ_HV_TLB_FLUSH, vcpu) && + kvm_hv_vcpu_flush_tlb(vcpu)) + kvm_vcpu_flush_tlb_guest(vcpu); + if (kvm_check_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu)) { vcpu->run->exit_reason = KVM_EXIT_TPR_ACCESS; r = 0; @@ -10585,8 +10301,10 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) } if (kvm_check_request(KVM_REQ_STEAL_UPDATE, vcpu)) record_steal_time(vcpu); +#ifdef CONFIG_KVM_SMM if (kvm_check_request(KVM_REQ_SMI, vcpu)) process_smi(vcpu); +#endif if (kvm_check_request(KVM_REQ_NMI, vcpu)) process_nmi(vcpu); if (kvm_check_request(KVM_REQ_PMU, vcpu)) @@ -11834,7 +11552,7 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) vcpu->arch.regs_avail = ~0; vcpu->arch.regs_dirty = ~0; - kvm_gpc_init(&vcpu->arch.pv_time); + kvm_gpc_init(&vcpu->arch.pv_time, vcpu->kvm, vcpu, KVM_HOST_USES_PFN); if (!irqchip_in_kernel(vcpu->kvm) || kvm_vcpu_is_reset_bsp(vcpu)) vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; @@ -11900,6 +11618,8 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) vcpu->arch.pat = MSR_IA32_CR_PAT_DEFAULT; kvm_async_pf_hash_reset(vcpu); + + vcpu->arch.perf_capabilities = kvm_caps.supported_perf_cap; kvm_pmu_init(vcpu); vcpu->arch.pending_external_vector = -1; @@ -12334,7 +12054,6 @@ bool kvm_vcpu_is_reset_bsp(struct kvm_vcpu *vcpu) { return vcpu->kvm->arch.bsp_vcpu_id == vcpu->vcpu_id; } -EXPORT_SYMBOL_GPL(kvm_vcpu_is_reset_bsp); bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu) { @@ -12909,10 +12628,12 @@ static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu) static_call(kvm_x86_nmi_allowed)(vcpu, false))) return true; +#ifdef CONFIG_KVM_SMM if (kvm_test_request(KVM_REQ_SMI, vcpu) || (vcpu->arch.smi_pending && static_call(kvm_x86_smi_allowed)(vcpu, false))) return true; +#endif if (kvm_arch_interrupt_allowed(vcpu) && (kvm_cpu_has_interrupt(vcpu) || @@ -12953,7 +12674,9 @@ bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu) return true; if (kvm_test_request(KVM_REQ_NMI, vcpu) || +#ifdef CONFIG_KVM_SMM kvm_test_request(KVM_REQ_SMI, vcpu) || +#endif kvm_test_request(KVM_REQ_EVENT, vcpu)) return true; diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h index 829d3134c1eb08d290aba61cf1ecf292b4ae9466..9de72586f4065e6a1db1af334710d6c2127412c5 100644 --- a/arch/x86/kvm/x86.h +++ b/arch/x86/kvm/x86.h @@ -27,6 +27,7 @@ struct kvm_caps { u64 supported_mce_cap; u64 supported_xcr0; u64 supported_xss; + u64 supported_perf_cap; }; void kvm_spurious_fault(void); diff --git a/arch/x86/kvm/xen.c b/arch/x86/kvm/xen.c index f3098c0e386a8a6a8eadf50f993ec2a00cfe2317..d7af402402484bb008f253213a86c186224a34c1 100644 --- a/arch/x86/kvm/xen.c +++ b/arch/x86/kvm/xen.c @@ -42,13 +42,12 @@ static int kvm_xen_shared_info_init(struct kvm *kvm, gfn_t gfn) int idx = srcu_read_lock(&kvm->srcu); if (gfn == GPA_INVALID) { - kvm_gpc_deactivate(kvm, gpc); + kvm_gpc_deactivate(gpc); goto out; } do { - ret = kvm_gpc_activate(kvm, gpc, NULL, KVM_HOST_USES_PFN, gpa, - PAGE_SIZE); + ret = kvm_gpc_activate(gpc, gpa, PAGE_SIZE); if (ret) goto out; @@ -170,112 +169,45 @@ static void kvm_xen_init_timer(struct kvm_vcpu *vcpu) vcpu->arch.xen.timer.function = xen_timer_callback; } -static void kvm_xen_update_runstate(struct kvm_vcpu *v, int state) +static void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, bool atomic) { struct kvm_vcpu_xen *vx = &v->arch.xen; - u64 now = get_kvmclock_ns(v->kvm); - u64 delta_ns = now - vx->runstate_entry_time; - u64 run_delay = current->sched_info.run_delay; - - if (unlikely(!vx->runstate_entry_time)) - vx->current_runstate = RUNSTATE_offline; - - /* - * Time waiting for the scheduler isn't "stolen" if the - * vCPU wasn't running anyway. - */ - if (vx->current_runstate == RUNSTATE_running) { - u64 steal_ns = run_delay - vx->last_steal; - - delta_ns -= steal_ns; - - vx->runstate_times[RUNSTATE_runnable] += steal_ns; - } - vx->last_steal = run_delay; - - vx->runstate_times[vx->current_runstate] += delta_ns; - vx->current_runstate = state; - vx->runstate_entry_time = now; -} - -void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, int state) -{ - struct kvm_vcpu_xen *vx = &v->arch.xen; - struct gfn_to_pfn_cache *gpc = &vx->runstate_cache; - uint64_t *user_times; + struct gfn_to_pfn_cache *gpc1 = &vx->runstate_cache; + struct gfn_to_pfn_cache *gpc2 = &vx->runstate2_cache; + size_t user_len, user_len1, user_len2; + struct vcpu_runstate_info rs; unsigned long flags; - size_t user_len; - int *user_state; - - kvm_xen_update_runstate(v, state); - - if (!vx->runstate_cache.active) - return; - - if (IS_ENABLED(CONFIG_64BIT) && v->kvm->arch.xen.long_mode) - user_len = sizeof(struct vcpu_runstate_info); - else - user_len = sizeof(struct compat_vcpu_runstate_info); - - read_lock_irqsave(&gpc->lock, flags); - while (!kvm_gfn_to_pfn_cache_check(v->kvm, gpc, gpc->gpa, - user_len)) { - read_unlock_irqrestore(&gpc->lock, flags); - - /* When invoked from kvm_sched_out() we cannot sleep */ - if (state == RUNSTATE_runnable) - return; - - if (kvm_gfn_to_pfn_cache_refresh(v->kvm, gpc, gpc->gpa, user_len)) - return; - - read_lock_irqsave(&gpc->lock, flags); - } + size_t times_ofs; + uint8_t *update_bit = NULL; + uint64_t entry_time; + uint64_t *rs_times; + int *rs_state; /* * The only difference between 32-bit and 64-bit versions of the - * runstate struct us the alignment of uint64_t in 32-bit, which + * runstate struct is the alignment of uint64_t in 32-bit, which * means that the 64-bit version has an additional 4 bytes of - * padding after the first field 'state'. - * - * So we use 'int __user *user_state' to point to the state field, - * and 'uint64_t __user *user_times' for runstate_entry_time. So - * the actual array of time[] in each state starts at user_times[1]. + * padding after the first field 'state'. Let's be really really + * paranoid about that, and matching it with our internal data + * structures that we memcpy into it... */ BUILD_BUG_ON(offsetof(struct vcpu_runstate_info, state) != 0); BUILD_BUG_ON(offsetof(struct compat_vcpu_runstate_info, state) != 0); BUILD_BUG_ON(sizeof(struct compat_vcpu_runstate_info) != 0x2c); #ifdef CONFIG_X86_64 + /* + * The 64-bit structure has 4 bytes of padding before 'state_entry_time' + * so each subsequent field is shifted by 4, and it's 4 bytes longer. + */ BUILD_BUG_ON(offsetof(struct vcpu_runstate_info, state_entry_time) != offsetof(struct compat_vcpu_runstate_info, state_entry_time) + 4); BUILD_BUG_ON(offsetof(struct vcpu_runstate_info, time) != offsetof(struct compat_vcpu_runstate_info, time) + 4); + BUILD_BUG_ON(sizeof(struct vcpu_runstate_info) != 0x2c + 4); #endif - - user_state = gpc->khva; - - if (IS_ENABLED(CONFIG_64BIT) && v->kvm->arch.xen.long_mode) - user_times = gpc->khva + offsetof(struct vcpu_runstate_info, - state_entry_time); - else - user_times = gpc->khva + offsetof(struct compat_vcpu_runstate_info, - state_entry_time); - /* - * First write the updated state_entry_time at the appropriate - * location determined by 'offset'. - */ - BUILD_BUG_ON(sizeof_field(struct vcpu_runstate_info, state_entry_time) != - sizeof(user_times[0])); - BUILD_BUG_ON(sizeof_field(struct compat_vcpu_runstate_info, state_entry_time) != - sizeof(user_times[0])); - - user_times[0] = vx->runstate_entry_time | XEN_RUNSTATE_UPDATE; - smp_wmb(); - - /* - * Next, write the new runstate. This is in the *same* place - * for 32-bit and 64-bit guests, asserted here for paranoia. + * The state field is in the same place at the start of both structs, + * and is the same size (int) as vx->current_runstate. */ BUILD_BUG_ON(offsetof(struct vcpu_runstate_info, state) != offsetof(struct compat_vcpu_runstate_info, state)); @@ -284,34 +216,238 @@ void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, int state) BUILD_BUG_ON(sizeof_field(struct compat_vcpu_runstate_info, state) != sizeof(vx->current_runstate)); - *user_state = vx->current_runstate; + /* + * The state_entry_time field is 64 bits in both versions, and the + * XEN_RUNSTATE_UPDATE flag is in the top bit, which given that x86 + * is little-endian means that it's in the last *byte* of the word. + * That detail is important later. + */ + BUILD_BUG_ON(sizeof_field(struct vcpu_runstate_info, state_entry_time) != + sizeof(uint64_t)); + BUILD_BUG_ON(sizeof_field(struct compat_vcpu_runstate_info, state_entry_time) != + sizeof(uint64_t)); + BUILD_BUG_ON((XEN_RUNSTATE_UPDATE >> 56) != 0x80); /* - * Write the actual runstate times immediately after the - * runstate_entry_time. + * The time array is four 64-bit quantities in both versions, matching + * the vx->runstate_times and immediately following state_entry_time. */ BUILD_BUG_ON(offsetof(struct vcpu_runstate_info, state_entry_time) != - offsetof(struct vcpu_runstate_info, time) - sizeof(u64)); + offsetof(struct vcpu_runstate_info, time) - sizeof(uint64_t)); BUILD_BUG_ON(offsetof(struct compat_vcpu_runstate_info, state_entry_time) != - offsetof(struct compat_vcpu_runstate_info, time) - sizeof(u64)); + offsetof(struct compat_vcpu_runstate_info, time) - sizeof(uint64_t)); BUILD_BUG_ON(sizeof_field(struct vcpu_runstate_info, time) != sizeof_field(struct compat_vcpu_runstate_info, time)); BUILD_BUG_ON(sizeof_field(struct vcpu_runstate_info, time) != sizeof(vx->runstate_times)); - memcpy(user_times + 1, vx->runstate_times, sizeof(vx->runstate_times)); - smp_wmb(); + if (IS_ENABLED(CONFIG_64BIT) && v->kvm->arch.xen.long_mode) { + user_len = sizeof(struct vcpu_runstate_info); + times_ofs = offsetof(struct vcpu_runstate_info, + state_entry_time); + } else { + user_len = sizeof(struct compat_vcpu_runstate_info); + times_ofs = offsetof(struct compat_vcpu_runstate_info, + state_entry_time); + } + + /* + * There are basically no alignment constraints. The guest can set it + * up so it crosses from one page to the next, and at arbitrary byte + * alignment (and the 32-bit ABI doesn't align the 64-bit integers + * anyway, even if the overall struct had been 64-bit aligned). + */ + if ((gpc1->gpa & ~PAGE_MASK) + user_len >= PAGE_SIZE) { + user_len1 = PAGE_SIZE - (gpc1->gpa & ~PAGE_MASK); + user_len2 = user_len - user_len1; + } else { + user_len1 = user_len; + user_len2 = 0; + } + BUG_ON(user_len1 + user_len2 != user_len); + + retry: + /* + * Attempt to obtain the GPC lock on *both* (if there are two) + * gfn_to_pfn caches that cover the region. + */ + read_lock_irqsave(&gpc1->lock, flags); + while (!kvm_gpc_check(gpc1, user_len1)) { + read_unlock_irqrestore(&gpc1->lock, flags); + + /* When invoked from kvm_sched_out() we cannot sleep */ + if (atomic) + return; + + if (kvm_gpc_refresh(gpc1, user_len1)) + return; + + read_lock_irqsave(&gpc1->lock, flags); + } + + if (likely(!user_len2)) { + /* + * Set up three pointers directly to the runstate_info + * struct in the guest (via the GPC). + * + * • @rs_state → state field + * • @rs_times → state_entry_time field. + * • @update_bit → last byte of state_entry_time, which + * contains the XEN_RUNSTATE_UPDATE bit. + */ + rs_state = gpc1->khva; + rs_times = gpc1->khva + times_ofs; + if (v->kvm->arch.xen.runstate_update_flag) + update_bit = ((void *)(&rs_times[1])) - 1; + } else { + /* + * The guest's runstate_info is split across two pages and we + * need to hold and validate both GPCs simultaneously. We can + * declare a lock ordering GPC1 > GPC2 because nothing else + * takes them more than one at a time. + */ + read_lock(&gpc2->lock); + + if (!kvm_gpc_check(gpc2, user_len2)) { + read_unlock(&gpc2->lock); + read_unlock_irqrestore(&gpc1->lock, flags); + + /* When invoked from kvm_sched_out() we cannot sleep */ + if (atomic) + return; + + /* + * Use kvm_gpc_activate() here because if the runstate + * area was configured in 32-bit mode and only extends + * to the second page now because the guest changed to + * 64-bit mode, the second GPC won't have been set up. + */ + if (kvm_gpc_activate(gpc2, gpc1->gpa + user_len1, + user_len2)) + return; + + /* + * We dropped the lock on GPC1 so we have to go all the + * way back and revalidate that too. + */ + goto retry; + } + + /* + * In this case, the runstate_info struct will be assembled on + * the kernel stack (compat or not as appropriate) and will + * be copied to GPC1/GPC2 with a dual memcpy. Set up the three + * rs pointers accordingly. + */ + rs_times = &rs.state_entry_time; + + /* + * The rs_state pointer points to the start of what we'll + * copy to the guest, which in the case of a compat guest + * is the 32-bit field that the compiler thinks is padding. + */ + rs_state = ((void *)rs_times) - times_ofs; + + /* + * The update_bit is still directly in the guest memory, + * via one GPC or the other. + */ + if (v->kvm->arch.xen.runstate_update_flag) { + if (user_len1 >= times_ofs + sizeof(uint64_t)) + update_bit = gpc1->khva + times_ofs + + sizeof(uint64_t) - 1; + else + update_bit = gpc2->khva + times_ofs + + sizeof(uint64_t) - 1 - user_len1; + } + +#ifdef CONFIG_X86_64 + /* + * Don't leak kernel memory through the padding in the 64-bit + * version of the struct. + */ + memset(&rs, 0, offsetof(struct vcpu_runstate_info, state_entry_time)); +#endif + } + + /* + * First, set the XEN_RUNSTATE_UPDATE bit in the top bit of the + * state_entry_time field, directly in the guest. We need to set + * that (and write-barrier) before writing to the rest of the + * structure, and clear it last. Just as Xen does, we address the + * single *byte* in which it resides because it might be in a + * different cache line to the rest of the 64-bit word, due to + * the (lack of) alignment constraints. + */ + entry_time = vx->runstate_entry_time; + if (update_bit) { + entry_time |= XEN_RUNSTATE_UPDATE; + *update_bit = (vx->runstate_entry_time | XEN_RUNSTATE_UPDATE) >> 56; + smp_wmb(); + } /* - * Finally, clear the XEN_RUNSTATE_UPDATE bit in the guest's - * runstate_entry_time field. + * Now assemble the actual structure, either on our kernel stack + * or directly in the guest according to how the rs_state and + * rs_times pointers were set up above. */ - user_times[0] &= ~XEN_RUNSTATE_UPDATE; + *rs_state = vx->current_runstate; + rs_times[0] = entry_time; + memcpy(rs_times + 1, vx->runstate_times, sizeof(vx->runstate_times)); + + /* For the split case, we have to then copy it to the guest. */ + if (user_len2) { + memcpy(gpc1->khva, rs_state, user_len1); + memcpy(gpc2->khva, ((void *)rs_state) + user_len1, user_len2); + } smp_wmb(); - read_unlock_irqrestore(&gpc->lock, flags); + /* Finally, clear the XEN_RUNSTATE_UPDATE bit. */ + if (update_bit) { + entry_time &= ~XEN_RUNSTATE_UPDATE; + *update_bit = entry_time >> 56; + smp_wmb(); + } - mark_page_dirty_in_slot(v->kvm, gpc->memslot, gpc->gpa >> PAGE_SHIFT); + if (user_len2) + read_unlock(&gpc2->lock); + + read_unlock_irqrestore(&gpc1->lock, flags); + + mark_page_dirty_in_slot(v->kvm, gpc1->memslot, gpc1->gpa >> PAGE_SHIFT); + if (user_len2) + mark_page_dirty_in_slot(v->kvm, gpc2->memslot, gpc2->gpa >> PAGE_SHIFT); +} + +void kvm_xen_update_runstate(struct kvm_vcpu *v, int state) +{ + struct kvm_vcpu_xen *vx = &v->arch.xen; + u64 now = get_kvmclock_ns(v->kvm); + u64 delta_ns = now - vx->runstate_entry_time; + u64 run_delay = current->sched_info.run_delay; + + if (unlikely(!vx->runstate_entry_time)) + vx->current_runstate = RUNSTATE_offline; + + /* + * Time waiting for the scheduler isn't "stolen" if the + * vCPU wasn't running anyway. + */ + if (vx->current_runstate == RUNSTATE_running) { + u64 steal_ns = run_delay - vx->last_steal; + + delta_ns -= steal_ns; + + vx->runstate_times[RUNSTATE_runnable] += steal_ns; + } + vx->last_steal = run_delay; + + vx->runstate_times[vx->current_runstate] += delta_ns; + vx->current_runstate = state; + vx->runstate_entry_time = now; + + if (vx->runstate_cache.active) + kvm_xen_update_runstate_guest(v, state == RUNSTATE_runnable); } static void kvm_xen_inject_vcpu_vector(struct kvm_vcpu *v) @@ -352,12 +488,10 @@ void kvm_xen_inject_pending_events(struct kvm_vcpu *v) * little more honest about it. */ read_lock_irqsave(&gpc->lock, flags); - while (!kvm_gfn_to_pfn_cache_check(v->kvm, gpc, gpc->gpa, - sizeof(struct vcpu_info))) { + while (!kvm_gpc_check(gpc, sizeof(struct vcpu_info))) { read_unlock_irqrestore(&gpc->lock, flags); - if (kvm_gfn_to_pfn_cache_refresh(v->kvm, gpc, gpc->gpa, - sizeof(struct vcpu_info))) + if (kvm_gpc_refresh(gpc, sizeof(struct vcpu_info))) return; read_lock_irqsave(&gpc->lock, flags); @@ -417,8 +551,7 @@ int __kvm_xen_has_interrupt(struct kvm_vcpu *v) sizeof_field(struct compat_vcpu_info, evtchn_upcall_pending)); read_lock_irqsave(&gpc->lock, flags); - while (!kvm_gfn_to_pfn_cache_check(v->kvm, gpc, gpc->gpa, - sizeof(struct vcpu_info))) { + while (!kvm_gpc_check(gpc, sizeof(struct vcpu_info))) { read_unlock_irqrestore(&gpc->lock, flags); /* @@ -432,8 +565,7 @@ int __kvm_xen_has_interrupt(struct kvm_vcpu *v) if (in_atomic() || !task_is_running(current)) return 1; - if (kvm_gfn_to_pfn_cache_refresh(v->kvm, gpc, gpc->gpa, - sizeof(struct vcpu_info))) { + if (kvm_gpc_refresh(gpc, sizeof(struct vcpu_info))) { /* * If this failed, userspace has screwed up the * vcpu_info mapping. No interrupts for you. @@ -493,6 +625,17 @@ int kvm_xen_hvm_set_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data) r = 0; break; + case KVM_XEN_ATTR_TYPE_RUNSTATE_UPDATE_FLAG: + if (!sched_info_on()) { + r = -EOPNOTSUPP; + break; + } + mutex_lock(&kvm->lock); + kvm->arch.xen.runstate_update_flag = !!data->u.runstate_update_flag; + mutex_unlock(&kvm->lock); + r = 0; + break; + default: break; } @@ -530,6 +673,15 @@ int kvm_xen_hvm_get_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data) r = 0; break; + case KVM_XEN_ATTR_TYPE_RUNSTATE_UPDATE_FLAG: + if (!sched_info_on()) { + r = -EOPNOTSUPP; + break; + } + data->u.runstate_update_flag = kvm->arch.xen.runstate_update_flag; + r = 0; + break; + default: break; } @@ -554,15 +706,13 @@ int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data) offsetof(struct compat_vcpu_info, time)); if (data->u.gpa == GPA_INVALID) { - kvm_gpc_deactivate(vcpu->kvm, &vcpu->arch.xen.vcpu_info_cache); + kvm_gpc_deactivate(&vcpu->arch.xen.vcpu_info_cache); r = 0; break; } - r = kvm_gpc_activate(vcpu->kvm, - &vcpu->arch.xen.vcpu_info_cache, NULL, - KVM_HOST_USES_PFN, data->u.gpa, - sizeof(struct vcpu_info)); + r = kvm_gpc_activate(&vcpu->arch.xen.vcpu_info_cache, + data->u.gpa, sizeof(struct vcpu_info)); if (!r) kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); @@ -570,37 +720,65 @@ int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data) case KVM_XEN_VCPU_ATTR_TYPE_VCPU_TIME_INFO: if (data->u.gpa == GPA_INVALID) { - kvm_gpc_deactivate(vcpu->kvm, - &vcpu->arch.xen.vcpu_time_info_cache); + kvm_gpc_deactivate(&vcpu->arch.xen.vcpu_time_info_cache); r = 0; break; } - r = kvm_gpc_activate(vcpu->kvm, - &vcpu->arch.xen.vcpu_time_info_cache, - NULL, KVM_HOST_USES_PFN, data->u.gpa, + r = kvm_gpc_activate(&vcpu->arch.xen.vcpu_time_info_cache, + data->u.gpa, sizeof(struct pvclock_vcpu_time_info)); if (!r) kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); break; - case KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADDR: + case KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADDR: { + size_t sz, sz1, sz2; + if (!sched_info_on()) { r = -EOPNOTSUPP; break; } if (data->u.gpa == GPA_INVALID) { - kvm_gpc_deactivate(vcpu->kvm, - &vcpu->arch.xen.runstate_cache); r = 0; + deactivate_out: + kvm_gpc_deactivate(&vcpu->arch.xen.runstate_cache); + kvm_gpc_deactivate(&vcpu->arch.xen.runstate2_cache); break; } - r = kvm_gpc_activate(vcpu->kvm, &vcpu->arch.xen.runstate_cache, - NULL, KVM_HOST_USES_PFN, data->u.gpa, - sizeof(struct vcpu_runstate_info)); - break; + /* + * If the guest switches to 64-bit mode after setting the runstate + * address, that's actually OK. kvm_xen_update_runstate_guest() + * will cope. + */ + if (IS_ENABLED(CONFIG_64BIT) && vcpu->kvm->arch.xen.long_mode) + sz = sizeof(struct vcpu_runstate_info); + else + sz = sizeof(struct compat_vcpu_runstate_info); + + /* How much fits in the (first) page? */ + sz1 = PAGE_SIZE - (data->u.gpa & ~PAGE_MASK); + r = kvm_gpc_activate(&vcpu->arch.xen.runstate_cache, + data->u.gpa, sz1); + if (r) + goto deactivate_out; + + /* Either map the second page, or deactivate the second GPC */ + if (sz1 >= sz) { + kvm_gpc_deactivate(&vcpu->arch.xen.runstate2_cache); + } else { + sz2 = sz - sz1; + BUG_ON((data->u.gpa + sz1) & ~PAGE_MASK); + r = kvm_gpc_activate(&vcpu->arch.xen.runstate2_cache, + data->u.gpa + sz1, sz2); + if (r) + goto deactivate_out; + } + kvm_xen_update_runstate_guest(vcpu, false); + break; + } case KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_CURRENT: if (!sched_info_on()) { r = -EOPNOTSUPP; @@ -693,6 +871,8 @@ int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data) if (data->u.runstate.state <= RUNSTATE_offline) kvm_xen_update_runstate(vcpu, data->u.runstate.state); + else if (vcpu->arch.xen.runstate_cache.active) + kvm_xen_update_runstate_guest(vcpu, false); r = 0; break; @@ -972,9 +1152,9 @@ static bool wait_pending_event(struct kvm_vcpu *vcpu, int nr_ports, bool ret = true; int idx, i; - read_lock_irqsave(&gpc->lock, flags); idx = srcu_read_lock(&kvm->srcu); - if (!kvm_gfn_to_pfn_cache_check(kvm, gpc, gpc->gpa, PAGE_SIZE)) + read_lock_irqsave(&gpc->lock, flags); + if (!kvm_gpc_check(gpc, PAGE_SIZE)) goto out_rcu; ret = false; @@ -994,8 +1174,8 @@ static bool wait_pending_event(struct kvm_vcpu *vcpu, int nr_ports, } out_rcu: - srcu_read_unlock(&kvm->srcu, idx); read_unlock_irqrestore(&gpc->lock, flags); + srcu_read_unlock(&kvm->srcu, idx); return ret; } @@ -1008,20 +1188,45 @@ static bool kvm_xen_schedop_poll(struct kvm_vcpu *vcpu, bool longmode, evtchn_port_t port, *ports; gpa_t gpa; - if (!longmode || !lapic_in_kernel(vcpu) || + if (!lapic_in_kernel(vcpu) || !(vcpu->kvm->arch.xen_hvm_config.flags & KVM_XEN_HVM_CONFIG_EVTCHN_SEND)) return false; idx = srcu_read_lock(&vcpu->kvm->srcu); gpa = kvm_mmu_gva_to_gpa_system(vcpu, param, NULL); srcu_read_unlock(&vcpu->kvm->srcu, idx); - - if (!gpa || kvm_vcpu_read_guest(vcpu, gpa, &sched_poll, - sizeof(sched_poll))) { + if (!gpa) { *r = -EFAULT; return true; } + if (IS_ENABLED(CONFIG_64BIT) && !longmode) { + struct compat_sched_poll sp32; + + /* Sanity check that the compat struct definition is correct */ + BUILD_BUG_ON(sizeof(sp32) != 16); + + if (kvm_vcpu_read_guest(vcpu, gpa, &sp32, sizeof(sp32))) { + *r = -EFAULT; + return true; + } + + /* + * This is a 32-bit pointer to an array of evtchn_port_t which + * are uint32_t, so once it's converted no further compat + * handling is needed. + */ + sched_poll.ports = (void *)(unsigned long)(sp32.ports); + sched_poll.nr_ports = sp32.nr_ports; + sched_poll.timeout = sp32.timeout; + } else { + if (kvm_vcpu_read_guest(vcpu, gpa, &sched_poll, + sizeof(sched_poll))) { + *r = -EFAULT; + return true; + } + } + if (unlikely(sched_poll.nr_ports > 1)) { /* Xen (unofficially) limits number of pollers to 128 */ if (sched_poll.nr_ports > 128) { @@ -1256,7 +1461,7 @@ int kvm_xen_hypercall(struct kvm_vcpu *vcpu) } #endif cpl = static_call(kvm_x86_get_cpl)(vcpu); - trace_kvm_xen_hypercall(input, params[0], params[1], params[2], + trace_kvm_xen_hypercall(cpl, input, params[0], params[1], params[2], params[3], params[4], params[5]); /* @@ -1371,7 +1576,7 @@ int kvm_xen_set_evtchn_fast(struct kvm_xen_evtchn *xe, struct kvm *kvm) idx = srcu_read_lock(&kvm->srcu); read_lock_irqsave(&gpc->lock, flags); - if (!kvm_gfn_to_pfn_cache_check(kvm, gpc, gpc->gpa, PAGE_SIZE)) + if (!kvm_gpc_check(gpc, PAGE_SIZE)) goto out_rcu; if (IS_ENABLED(CONFIG_64BIT) && kvm->arch.xen.long_mode) { @@ -1405,7 +1610,7 @@ int kvm_xen_set_evtchn_fast(struct kvm_xen_evtchn *xe, struct kvm *kvm) gpc = &vcpu->arch.xen.vcpu_info_cache; read_lock_irqsave(&gpc->lock, flags); - if (!kvm_gfn_to_pfn_cache_check(kvm, gpc, gpc->gpa, sizeof(struct vcpu_info))) { + if (!kvm_gpc_check(gpc, sizeof(struct vcpu_info))) { /* * Could not access the vcpu_info. Set the bit in-kernel * and prod the vCPU to deliver it for itself. @@ -1503,7 +1708,7 @@ static int kvm_xen_set_evtchn(struct kvm_xen_evtchn *xe, struct kvm *kvm) break; idx = srcu_read_lock(&kvm->srcu); - rc = kvm_gfn_to_pfn_cache_refresh(kvm, gpc, gpc->gpa, PAGE_SIZE); + rc = kvm_gpc_refresh(gpc, PAGE_SIZE); srcu_read_unlock(&kvm->srcu, idx); } while(!rc); @@ -1833,9 +2038,14 @@ void kvm_xen_init_vcpu(struct kvm_vcpu *vcpu) timer_setup(&vcpu->arch.xen.poll_timer, cancel_evtchn_poll, 0); - kvm_gpc_init(&vcpu->arch.xen.runstate_cache); - kvm_gpc_init(&vcpu->arch.xen.vcpu_info_cache); - kvm_gpc_init(&vcpu->arch.xen.vcpu_time_info_cache); + kvm_gpc_init(&vcpu->arch.xen.runstate_cache, vcpu->kvm, NULL, + KVM_HOST_USES_PFN); + kvm_gpc_init(&vcpu->arch.xen.runstate2_cache, vcpu->kvm, NULL, + KVM_HOST_USES_PFN); + kvm_gpc_init(&vcpu->arch.xen.vcpu_info_cache, vcpu->kvm, NULL, + KVM_HOST_USES_PFN); + kvm_gpc_init(&vcpu->arch.xen.vcpu_time_info_cache, vcpu->kvm, NULL, + KVM_HOST_USES_PFN); } void kvm_xen_destroy_vcpu(struct kvm_vcpu *vcpu) @@ -1843,9 +2053,10 @@ void kvm_xen_destroy_vcpu(struct kvm_vcpu *vcpu) if (kvm_xen_timer_enabled(vcpu)) kvm_xen_stop_timer(vcpu); - kvm_gpc_deactivate(vcpu->kvm, &vcpu->arch.xen.runstate_cache); - kvm_gpc_deactivate(vcpu->kvm, &vcpu->arch.xen.vcpu_info_cache); - kvm_gpc_deactivate(vcpu->kvm, &vcpu->arch.xen.vcpu_time_info_cache); + kvm_gpc_deactivate(&vcpu->arch.xen.runstate_cache); + kvm_gpc_deactivate(&vcpu->arch.xen.runstate2_cache); + kvm_gpc_deactivate(&vcpu->arch.xen.vcpu_info_cache); + kvm_gpc_deactivate(&vcpu->arch.xen.vcpu_time_info_cache); del_timer_sync(&vcpu->arch.xen.poll_timer); } @@ -1853,7 +2064,7 @@ void kvm_xen_destroy_vcpu(struct kvm_vcpu *vcpu) void kvm_xen_init_vm(struct kvm *kvm) { idr_init(&kvm->arch.xen.evtchn_ports); - kvm_gpc_init(&kvm->arch.xen.shinfo_cache); + kvm_gpc_init(&kvm->arch.xen.shinfo_cache, kvm, NULL, KVM_HOST_USES_PFN); } void kvm_xen_destroy_vm(struct kvm *kvm) @@ -1861,7 +2072,7 @@ void kvm_xen_destroy_vm(struct kvm *kvm) struct evtchnfd *evtchnfd; int i; - kvm_gpc_deactivate(kvm, &kvm->arch.xen.shinfo_cache); + kvm_gpc_deactivate(&kvm->arch.xen.shinfo_cache); idr_for_each_entry(&kvm->arch.xen.evtchn_ports, evtchnfd, i) { if (!evtchnfd->deliver.port.port) diff --git a/arch/x86/kvm/xen.h b/arch/x86/kvm/xen.h index 532a535a9e99f43c4cfadc1fbbb1192630225bf8..ea33d80a0c51f811278541b428ba93ccb203a718 100644 --- a/arch/x86/kvm/xen.h +++ b/arch/x86/kvm/xen.h @@ -143,11 +143,11 @@ int kvm_xen_hypercall(struct kvm_vcpu *vcpu); #include #include -void kvm_xen_update_runstate_guest(struct kvm_vcpu *vcpu, int state); +void kvm_xen_update_runstate(struct kvm_vcpu *vcpu, int state); static inline void kvm_xen_runstate_set_running(struct kvm_vcpu *vcpu) { - kvm_xen_update_runstate_guest(vcpu, RUNSTATE_running); + kvm_xen_update_runstate(vcpu, RUNSTATE_running); } static inline void kvm_xen_runstate_set_preempted(struct kvm_vcpu *vcpu) @@ -162,7 +162,7 @@ static inline void kvm_xen_runstate_set_preempted(struct kvm_vcpu *vcpu) if (WARN_ON_ONCE(!vcpu->preempted)) return; - kvm_xen_update_runstate_guest(vcpu, RUNSTATE_runnable); + kvm_xen_update_runstate(vcpu, RUNSTATE_runnable); } /* 32-bit compatibility definitions, also used natively in 32-bit build */ @@ -207,4 +207,11 @@ struct compat_vcpu_runstate_info { uint64_t time[4]; } __attribute__((packed)); +struct compat_sched_poll { + /* This is actually a guest virtual address which points to ports. */ + uint32_t ports; + unsigned int nr_ports; + uint64_t timeout; +}; + #endif /* __ARCH_X86_KVM_XEN_H__ */ diff --git a/arch/x86/lib/error-inject.c b/arch/x86/lib/error-inject.c index 1e3de0769b8104198a6cc69c0bf8a4b10aea06c6..b5a6d83106bc2d1789d55910add79cd1a8c90236 100644 --- a/arch/x86/lib/error-inject.c +++ b/arch/x86/lib/error-inject.c @@ -11,6 +11,7 @@ asm( ".text\n" ".type just_return_func, @function\n" ".globl just_return_func\n" + ASM_FUNC_ALIGN "just_return_func:\n" ANNOTATE_NOENDBR ASM_RET diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S index b7dfd60243b75c65f79e39a1486163a5d7f21ca6..32125224fccaadfb45863679bca5c5e7cba85009 100644 --- a/arch/x86/lib/putuser.S +++ b/arch/x86/lib/putuser.S @@ -47,8 +47,6 @@ SYM_FUNC_START(__put_user_1) LOAD_TASK_SIZE_MINUS_N(0) cmp %_ASM_BX,%_ASM_CX jae .Lbad_put_user -SYM_INNER_LABEL(__put_user_nocheck_1, SYM_L_GLOBAL) - ENDBR ASM_STAC 1: movb %al,(%_ASM_CX) xor %ecx,%ecx @@ -56,54 +54,87 @@ SYM_INNER_LABEL(__put_user_nocheck_1, SYM_L_GLOBAL) RET SYM_FUNC_END(__put_user_1) EXPORT_SYMBOL(__put_user_1) + +SYM_FUNC_START(__put_user_nocheck_1) + ENDBR + ASM_STAC +2: movb %al,(%_ASM_CX) + xor %ecx,%ecx + ASM_CLAC + RET +SYM_FUNC_END(__put_user_nocheck_1) EXPORT_SYMBOL(__put_user_nocheck_1) SYM_FUNC_START(__put_user_2) LOAD_TASK_SIZE_MINUS_N(1) cmp %_ASM_BX,%_ASM_CX jae .Lbad_put_user -SYM_INNER_LABEL(__put_user_nocheck_2, SYM_L_GLOBAL) - ENDBR ASM_STAC -2: movw %ax,(%_ASM_CX) +3: movw %ax,(%_ASM_CX) xor %ecx,%ecx ASM_CLAC RET SYM_FUNC_END(__put_user_2) EXPORT_SYMBOL(__put_user_2) + +SYM_FUNC_START(__put_user_nocheck_2) + ENDBR + ASM_STAC +4: movw %ax,(%_ASM_CX) + xor %ecx,%ecx + ASM_CLAC + RET +SYM_FUNC_END(__put_user_nocheck_2) EXPORT_SYMBOL(__put_user_nocheck_2) SYM_FUNC_START(__put_user_4) LOAD_TASK_SIZE_MINUS_N(3) cmp %_ASM_BX,%_ASM_CX jae .Lbad_put_user -SYM_INNER_LABEL(__put_user_nocheck_4, SYM_L_GLOBAL) - ENDBR ASM_STAC -3: movl %eax,(%_ASM_CX) +5: movl %eax,(%_ASM_CX) xor %ecx,%ecx ASM_CLAC RET SYM_FUNC_END(__put_user_4) EXPORT_SYMBOL(__put_user_4) + +SYM_FUNC_START(__put_user_nocheck_4) + ENDBR + ASM_STAC +6: movl %eax,(%_ASM_CX) + xor %ecx,%ecx + ASM_CLAC + RET +SYM_FUNC_END(__put_user_nocheck_4) EXPORT_SYMBOL(__put_user_nocheck_4) SYM_FUNC_START(__put_user_8) LOAD_TASK_SIZE_MINUS_N(7) cmp %_ASM_BX,%_ASM_CX jae .Lbad_put_user -SYM_INNER_LABEL(__put_user_nocheck_8, SYM_L_GLOBAL) - ENDBR ASM_STAC -4: mov %_ASM_AX,(%_ASM_CX) +7: mov %_ASM_AX,(%_ASM_CX) #ifdef CONFIG_X86_32 -5: movl %edx,4(%_ASM_CX) +8: movl %edx,4(%_ASM_CX) #endif xor %ecx,%ecx ASM_CLAC RET SYM_FUNC_END(__put_user_8) EXPORT_SYMBOL(__put_user_8) + +SYM_FUNC_START(__put_user_nocheck_8) + ENDBR + ASM_STAC +9: mov %_ASM_AX,(%_ASM_CX) +#ifdef CONFIG_X86_32 +10: movl %edx,4(%_ASM_CX) +#endif + xor %ecx,%ecx + ASM_CLAC + RET +SYM_FUNC_END(__put_user_nocheck_8) EXPORT_SYMBOL(__put_user_nocheck_8) SYM_CODE_START_LOCAL(.Lbad_put_user_clac) @@ -117,6 +148,11 @@ SYM_CODE_END(.Lbad_put_user_clac) _ASM_EXTABLE_UA(2b, .Lbad_put_user_clac) _ASM_EXTABLE_UA(3b, .Lbad_put_user_clac) _ASM_EXTABLE_UA(4b, .Lbad_put_user_clac) -#ifdef CONFIG_X86_32 _ASM_EXTABLE_UA(5b, .Lbad_put_user_clac) + _ASM_EXTABLE_UA(6b, .Lbad_put_user_clac) + _ASM_EXTABLE_UA(7b, .Lbad_put_user_clac) + _ASM_EXTABLE_UA(9b, .Lbad_put_user_clac) +#ifdef CONFIG_X86_32 + _ASM_EXTABLE_UA(8b, .Lbad_put_user_clac) + _ASM_EXTABLE_UA(10b, .Lbad_put_user_clac) #endif diff --git a/arch/x86/lib/retpoline.S b/arch/x86/lib/retpoline.S index 073289a55f8495cb3f175d97d847485b05eb7a8b..5f61c65322bea68c5164d01afcba0ccad805407d 100644 --- a/arch/x86/lib/retpoline.S +++ b/arch/x86/lib/retpoline.S @@ -5,24 +5,27 @@ #include #include #include +#include #include #include #include +#include #include .section .text.__x86.indirect_thunk -.macro RETPOLINE reg + +.macro POLINE reg ANNOTATE_INTRA_FUNCTION_CALL call .Ldo_rop_\@ -.Lspec_trap_\@: - UNWIND_HINT_EMPTY - pause - lfence - jmp .Lspec_trap_\@ + int3 .Ldo_rop_\@: mov %\reg, (%_ASM_SP) UNWIND_HINT_FUNC +.endm + +.macro RETPOLINE reg + POLINE \reg RET .endm @@ -52,7 +55,6 @@ SYM_INNER_LABEL(__x86_indirect_thunk_\reg, SYM_L_GLOBAL) */ #define __EXPORT_THUNK(sym) _ASM_NOKPROBE(sym); EXPORT_SYMBOL(sym) -#define EXPORT_THUNK(reg) __EXPORT_THUNK(__x86_indirect_thunk_ ## reg) .align RETPOLINE_THUNK_SIZE SYM_CODE_START(__x86_indirect_thunk_array) @@ -64,10 +66,65 @@ SYM_CODE_START(__x86_indirect_thunk_array) .align RETPOLINE_THUNK_SIZE SYM_CODE_END(__x86_indirect_thunk_array) -#define GEN(reg) EXPORT_THUNK(reg) +#define GEN(reg) __EXPORT_THUNK(__x86_indirect_thunk_ ## reg) +#include +#undef GEN + +#ifdef CONFIG_CALL_DEPTH_TRACKING +.macro CALL_THUNK reg + .align RETPOLINE_THUNK_SIZE + +SYM_INNER_LABEL(__x86_indirect_call_thunk_\reg, SYM_L_GLOBAL) + UNWIND_HINT_EMPTY + ANNOTATE_NOENDBR + + CALL_DEPTH_ACCOUNT + POLINE \reg + ANNOTATE_UNRET_SAFE + ret + int3 +.endm + + .align RETPOLINE_THUNK_SIZE +SYM_CODE_START(__x86_indirect_call_thunk_array) + +#define GEN(reg) CALL_THUNK reg #include #undef GEN + .align RETPOLINE_THUNK_SIZE +SYM_CODE_END(__x86_indirect_call_thunk_array) + +#define GEN(reg) __EXPORT_THUNK(__x86_indirect_call_thunk_ ## reg) +#include +#undef GEN + +.macro JUMP_THUNK reg + .align RETPOLINE_THUNK_SIZE + +SYM_INNER_LABEL(__x86_indirect_jump_thunk_\reg, SYM_L_GLOBAL) + UNWIND_HINT_EMPTY + ANNOTATE_NOENDBR + POLINE \reg + ANNOTATE_UNRET_SAFE + ret + int3 +.endm + + .align RETPOLINE_THUNK_SIZE +SYM_CODE_START(__x86_indirect_jump_thunk_array) + +#define GEN(reg) JUMP_THUNK reg +#include +#undef GEN + + .align RETPOLINE_THUNK_SIZE +SYM_CODE_END(__x86_indirect_jump_thunk_array) + +#define GEN(reg) __EXPORT_THUNK(__x86_indirect_jump_thunk_ ## reg) +#include +#undef GEN +#endif /* * This function name is magical and is used by -mfunction-return=thunk-extern * for the compiler to generate JMPs to it. @@ -140,3 +197,37 @@ __EXPORT_THUNK(zen_untrain_ret) EXPORT_SYMBOL(__x86_return_thunk) #endif /* CONFIG_RETHUNK */ + +#ifdef CONFIG_CALL_DEPTH_TRACKING + + .align 64 +SYM_FUNC_START(__x86_return_skl) + ANNOTATE_NOENDBR + /* + * Keep the hotpath in a 16byte I-fetch for the non-debug + * case. + */ + CALL_THUNKS_DEBUG_INC_RETS + shlq $5, PER_CPU_VAR(pcpu_hot + X86_call_depth) + jz 1f + ANNOTATE_UNRET_SAFE + ret + int3 +1: + CALL_THUNKS_DEBUG_INC_STUFFS + .rept 16 + ANNOTATE_INTRA_FUNCTION_CALL + call 2f + int3 +2: + .endr + add $(8*16), %rsp + + CREDIT_CALL_DEPTH + + ANNOTATE_UNRET_SAFE + ret + int3 +SYM_FUNC_END(__x86_return_skl) + +#endif /* CONFIG_CALL_DEPTH_TRACKING */ diff --git a/arch/x86/mm/cpu_entry_area.c b/arch/x86/mm/cpu_entry_area.c index 42cd96e7d733efddf3c420d5ae01877dc4c0cc8f..7316a822425992efec14fb5598d42f32eac43f6c 100644 --- a/arch/x86/mm/cpu_entry_area.c +++ b/arch/x86/mm/cpu_entry_area.c @@ -9,22 +9,60 @@ #include #include #include +#include static DEFINE_PER_CPU_PAGE_ALIGNED(struct entry_stack_page, entry_stack_storage); #ifdef CONFIG_X86_64 static DEFINE_PER_CPU_PAGE_ALIGNED(struct exception_stacks, exception_stacks); DEFINE_PER_CPU(struct cea_exception_stacks*, cea_exception_stacks); -#endif -#ifdef CONFIG_X86_32 +static DEFINE_PER_CPU_READ_MOSTLY(unsigned long, _cea_offset); + +static __always_inline unsigned int cea_offset(unsigned int cpu) +{ + return per_cpu(_cea_offset, cpu); +} + +static __init void init_cea_offsets(void) +{ + unsigned int max_cea; + unsigned int i, j; + + max_cea = (CPU_ENTRY_AREA_MAP_SIZE - PAGE_SIZE) / CPU_ENTRY_AREA_SIZE; + + /* O(sodding terrible) */ + for_each_possible_cpu(i) { + unsigned int cea; + +again: + cea = get_random_u32_below(max_cea); + + for_each_possible_cpu(j) { + if (cea_offset(j) == cea) + goto again; + + if (i == j) + break; + } + + per_cpu(_cea_offset, i) = cea; + } +} +#else /* !X86_64 */ DECLARE_PER_CPU_PAGE_ALIGNED(struct doublefault_stack, doublefault_stack); + +static __always_inline unsigned int cea_offset(unsigned int cpu) +{ + return cpu; +} +static inline void init_cea_offsets(void) { } #endif /* Is called from entry code, so must be noinstr */ noinstr struct cpu_entry_area *get_cpu_entry_area(int cpu) { - unsigned long va = CPU_ENTRY_AREA_PER_CPU + cpu * CPU_ENTRY_AREA_SIZE; + unsigned long va = CPU_ENTRY_AREA_PER_CPU + cea_offset(cpu) * CPU_ENTRY_AREA_SIZE; BUILD_BUG_ON(sizeof(struct cpu_entry_area) % PAGE_SIZE != 0); return (struct cpu_entry_area *) va; @@ -148,6 +186,9 @@ static void __init setup_cpu_entry_area(unsigned int cpu) pgprot_t tss_prot = PAGE_KERNEL; #endif + kasan_populate_shadow_for_vaddr(cea, CPU_ENTRY_AREA_SIZE, + early_cpu_to_node(cpu)); + cea_set_pte(&cea->gdt, get_cpu_gdt_paddr(cpu), gdt_prot); cea_map_percpu_pages(&cea->entry_stack_page, @@ -201,7 +242,6 @@ static __init void setup_cpu_entry_area_ptes(void) /* The +1 is for the readonly IDT: */ BUILD_BUG_ON((CPU_ENTRY_AREA_PAGES+1)*PAGE_SIZE != CPU_ENTRY_AREA_MAP_SIZE); - BUILD_BUG_ON(CPU_ENTRY_AREA_TOTAL_SIZE != CPU_ENTRY_AREA_MAP_SIZE); BUG_ON(CPU_ENTRY_AREA_BASE & ~PMD_MASK); start = CPU_ENTRY_AREA_BASE; @@ -217,6 +257,8 @@ void __init setup_cpu_entry_areas(void) { unsigned int cpu; + init_cea_offsets(); + setup_cpu_entry_area_ptes(); for_each_possible_cpu(cpu) diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index 9121bc1b9453ab3236150e98855be90b91888f64..d3987359d44140ae67c21bdc0798c210dc10fcf1 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c @@ -801,7 +801,7 @@ void __init poking_init(void) spinlock_t *ptl; pte_t *ptep; - poking_mm = copy_init_mm(); + poking_mm = mm_alloc(); BUG_ON(!poking_mm); /* diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 3f040c6e5d13adec988114a570b52e600853d328..a190aae8ceaf70c4070c216348a307a2d28535c9 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -1416,47 +1416,6 @@ void mark_rodata_ro(void) debug_checkwx(); } -int kern_addr_valid(unsigned long addr) -{ - unsigned long above = ((long)addr) >> __VIRTUAL_MASK_SHIFT; - pgd_t *pgd; - p4d_t *p4d; - pud_t *pud; - pmd_t *pmd; - pte_t *pte; - - if (above != 0 && above != -1UL) - return 0; - - pgd = pgd_offset_k(addr); - if (pgd_none(*pgd)) - return 0; - - p4d = p4d_offset(pgd, addr); - if (!p4d_present(*p4d)) - return 0; - - pud = pud_offset(p4d, addr); - if (!pud_present(*pud)) - return 0; - - if (pud_large(*pud)) - return pfn_valid(pud_pfn(*pud)); - - pmd = pmd_offset(pud, addr); - if (!pmd_present(*pmd)) - return 0; - - if (pmd_large(*pmd)) - return pfn_valid(pmd_pfn(*pmd)); - - pte = pte_offset_kernel(pmd, addr); - if (pte_none(*pte)) - return 0; - - return pfn_valid(pte_pfn(*pte)); -} - /* * Block size is the minimum amount of memory which can be hotplugged or * hotremoved. It must be power of two and must be equal or larger than @@ -1533,72 +1492,44 @@ static long __meminitdata addr_start, addr_end; static void __meminitdata *p_start, *p_end; static int __meminitdata node_start; -static int __meminit vmemmap_populate_hugepages(unsigned long start, - unsigned long end, int node, struct vmem_altmap *altmap) +void __meminit vmemmap_set_pmd(pmd_t *pmd, void *p, int node, + unsigned long addr, unsigned long next) { - unsigned long addr; - unsigned long next; - pgd_t *pgd; - p4d_t *p4d; - pud_t *pud; - pmd_t *pmd; - - for (addr = start; addr < end; addr = next) { - next = pmd_addr_end(addr, end); - - pgd = vmemmap_pgd_populate(addr, node); - if (!pgd) - return -ENOMEM; - - p4d = vmemmap_p4d_populate(pgd, addr, node); - if (!p4d) - return -ENOMEM; - - pud = vmemmap_pud_populate(p4d, addr, node); - if (!pud) - return -ENOMEM; - - pmd = pmd_offset(pud, addr); - if (pmd_none(*pmd)) { - void *p; - - p = vmemmap_alloc_block_buf(PMD_SIZE, node, altmap); - if (p) { - pte_t entry; - - entry = pfn_pte(__pa(p) >> PAGE_SHIFT, - PAGE_KERNEL_LARGE); - set_pmd(pmd, __pmd(pte_val(entry))); + pte_t entry; + + entry = pfn_pte(__pa(p) >> PAGE_SHIFT, + PAGE_KERNEL_LARGE); + set_pmd(pmd, __pmd(pte_val(entry))); + + /* check to see if we have contiguous blocks */ + if (p_end != p || node_start != node) { + if (p_start) + pr_debug(" [%lx-%lx] PMD -> [%p-%p] on node %d\n", + addr_start, addr_end-1, p_start, p_end-1, node_start); + addr_start = addr; + node_start = node; + p_start = p; + } - /* check to see if we have contiguous blocks */ - if (p_end != p || node_start != node) { - if (p_start) - pr_debug(" [%lx-%lx] PMD -> [%p-%p] on node %d\n", - addr_start, addr_end-1, p_start, p_end-1, node_start); - addr_start = addr; - node_start = node; - p_start = p; - } + addr_end = addr + PMD_SIZE; + p_end = p + PMD_SIZE; - addr_end = addr + PMD_SIZE; - p_end = p + PMD_SIZE; + if (!IS_ALIGNED(addr, PMD_SIZE) || + !IS_ALIGNED(next, PMD_SIZE)) + vmemmap_use_new_sub_pmd(addr, next); +} - if (!IS_ALIGNED(addr, PMD_SIZE) || - !IS_ALIGNED(next, PMD_SIZE)) - vmemmap_use_new_sub_pmd(addr, next); +int __meminit vmemmap_check_pmd(pmd_t *pmd, int node, + unsigned long addr, unsigned long next) +{ + int large = pmd_large(*pmd); - continue; - } else if (altmap) - return -ENOMEM; /* no fallback */ - } else if (pmd_large(*pmd)) { - vmemmap_verify((pte_t *)pmd, node, addr, next); - vmemmap_use_sub_pmd(addr, next); - continue; - } - if (vmemmap_populate_basepages(addr, next, node, NULL)) - return -ENOMEM; + if (pmd_large(*pmd)) { + vmemmap_verify((pte_t *)pmd, node, addr, next); + vmemmap_use_sub_pmd(addr, next); } - return 0; + + return large; } int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, diff --git a/arch/x86/mm/kasan_init_64.c b/arch/x86/mm/kasan_init_64.c index e7b9b464a82f1d0798284a34ecc943db316e7095..0302491d799d1b2227826eab5a01f76403e75edc 100644 --- a/arch/x86/mm/kasan_init_64.c +++ b/arch/x86/mm/kasan_init_64.c @@ -316,10 +316,33 @@ void __init kasan_early_init(void) kasan_map_early_shadow(init_top_pgt); } +static unsigned long kasan_mem_to_shadow_align_down(unsigned long va) +{ + unsigned long shadow = (unsigned long)kasan_mem_to_shadow((void *)va); + + return round_down(shadow, PAGE_SIZE); +} + +static unsigned long kasan_mem_to_shadow_align_up(unsigned long va) +{ + unsigned long shadow = (unsigned long)kasan_mem_to_shadow((void *)va); + + return round_up(shadow, PAGE_SIZE); +} + +void __init kasan_populate_shadow_for_vaddr(void *va, size_t size, int nid) +{ + unsigned long shadow_start, shadow_end; + + shadow_start = kasan_mem_to_shadow_align_down((unsigned long)va); + shadow_end = kasan_mem_to_shadow_align_up((unsigned long)va + size); + kasan_populate_shadow(shadow_start, shadow_end, nid); +} + void __init kasan_init(void) { + unsigned long shadow_cea_begin, shadow_cea_per_cpu_begin, shadow_cea_end; int i; - void *shadow_cpu_entry_begin, *shadow_cpu_entry_end; memcpy(early_top_pgt, init_top_pgt, sizeof(early_top_pgt)); @@ -360,16 +383,10 @@ void __init kasan_init(void) map_range(&pfn_mapped[i]); } - shadow_cpu_entry_begin = (void *)CPU_ENTRY_AREA_BASE; - shadow_cpu_entry_begin = kasan_mem_to_shadow(shadow_cpu_entry_begin); - shadow_cpu_entry_begin = (void *)round_down( - (unsigned long)shadow_cpu_entry_begin, PAGE_SIZE); - - shadow_cpu_entry_end = (void *)(CPU_ENTRY_AREA_BASE + - CPU_ENTRY_AREA_MAP_SIZE); - shadow_cpu_entry_end = kasan_mem_to_shadow(shadow_cpu_entry_end); - shadow_cpu_entry_end = (void *)round_up( - (unsigned long)shadow_cpu_entry_end, PAGE_SIZE); + shadow_cea_begin = kasan_mem_to_shadow_align_down(CPU_ENTRY_AREA_BASE); + shadow_cea_per_cpu_begin = kasan_mem_to_shadow_align_up(CPU_ENTRY_AREA_PER_CPU); + shadow_cea_end = kasan_mem_to_shadow_align_up(CPU_ENTRY_AREA_BASE + + CPU_ENTRY_AREA_MAP_SIZE); kasan_populate_early_shadow( kasan_mem_to_shadow((void *)PAGE_OFFSET + MAXMEM), @@ -391,12 +408,18 @@ void __init kasan_init(void) kasan_populate_early_shadow( kasan_mem_to_shadow((void *)VMALLOC_END + 1), - shadow_cpu_entry_begin); + (void *)shadow_cea_begin); - kasan_populate_shadow((unsigned long)shadow_cpu_entry_begin, - (unsigned long)shadow_cpu_entry_end, 0); + /* + * Populate the shadow for the shared portion of the CPU entry area. + * Shadows for the per-CPU areas are mapped on-demand, as each CPU's + * area is randomly placed somewhere in the 512GiB range and mapping + * the entire 512GiB range is prohibitively expensive. + */ + kasan_populate_shadow(shadow_cea_begin, + shadow_cea_per_cpu_begin, 0); - kasan_populate_early_shadow(shadow_cpu_entry_end, + kasan_populate_early_shadow((void *)shadow_cea_end, kasan_mem_to_shadow((void *)__START_KERNEL_map)); kasan_populate_shadow((unsigned long)kasan_mem_to_shadow(_stext), diff --git a/arch/x86/mm/kmmio.c b/arch/x86/mm/kmmio.c index d3efbc5b344982658393b4bca3c744d5977318ec..9f82019179e1188d7d4df4b716ba361fc83ca63d 100644 --- a/arch/x86/mm/kmmio.c +++ b/arch/x86/mm/kmmio.c @@ -62,7 +62,13 @@ struct kmmio_context { int active; }; -static DEFINE_SPINLOCK(kmmio_lock); +/* + * The kmmio_lock is taken in int3 context, which is treated as NMI context. + * This causes lockdep to complain about it bein in both NMI and normal + * context. Hide it from lockdep, as it should not have any other locks + * taken under it, and this is only enabled for debugging mmio anyway. + */ +static arch_spinlock_t kmmio_lock = __ARCH_SPIN_LOCK_UNLOCKED; /* Protected by kmmio_lock */ unsigned int kmmio_count; @@ -240,15 +246,14 @@ int kmmio_handler(struct pt_regs *regs, unsigned long addr) page_base &= page_level_mask(l); /* - * Preemption is now disabled to prevent process switch during - * single stepping. We can only handle one active kmmio trace + * Hold the RCU read lock over single stepping to avoid looking + * up the probe and kmmio_fault_page again. The rcu_read_lock_sched() + * also disables preemption and prevents process switch during + * the single stepping. We can only handle one active kmmio trace * per cpu, so ensure that we finish it before something else - * gets to run. We also hold the RCU read lock over single - * stepping to avoid looking up the probe and kmmio_fault_page - * again. + * gets to run. */ - preempt_disable(); - rcu_read_lock(); + rcu_read_lock_sched_notrace(); faultpage = get_kmmio_fault_page(page_base); if (!faultpage) { @@ -317,8 +322,7 @@ int kmmio_handler(struct pt_regs *regs, unsigned long addr) return 1; /* fault handled */ no_kmmio: - rcu_read_unlock(); - preempt_enable_no_resched(); + rcu_read_unlock_sched_notrace(); return ret; } @@ -346,10 +350,10 @@ static int post_kmmio_handler(unsigned long condition, struct pt_regs *regs) ctx->probe->post_handler(ctx->probe, condition, regs); /* Prevent racing against release_kmmio_fault_page(). */ - spin_lock(&kmmio_lock); + arch_spin_lock(&kmmio_lock); if (ctx->fpage->count) arm_kmmio_fault_page(ctx->fpage); - spin_unlock(&kmmio_lock); + arch_spin_unlock(&kmmio_lock); regs->flags &= ~X86_EFLAGS_TF; regs->flags |= ctx->saved_flags; @@ -357,8 +361,7 @@ static int post_kmmio_handler(unsigned long condition, struct pt_regs *regs) /* These were acquired in kmmio_handler(). */ ctx->active--; BUG_ON(ctx->active); - rcu_read_unlock(); - preempt_enable_no_resched(); + rcu_read_unlock_sched_notrace(); /* * if somebody else is singlestepping across a probe point, flags @@ -440,7 +443,8 @@ int register_kmmio_probe(struct kmmio_probe *p) unsigned int l; pte_t *pte; - spin_lock_irqsave(&kmmio_lock, flags); + local_irq_save(flags); + arch_spin_lock(&kmmio_lock); if (get_kmmio_probe(addr)) { ret = -EEXIST; goto out; @@ -460,7 +464,9 @@ int register_kmmio_probe(struct kmmio_probe *p) size += page_level_size(l); } out: - spin_unlock_irqrestore(&kmmio_lock, flags); + arch_spin_unlock(&kmmio_lock); + local_irq_restore(flags); + /* * XXX: What should I do here? * Here was a call to global_flush_tlb(), but it does not exist @@ -494,7 +500,8 @@ static void remove_kmmio_fault_pages(struct rcu_head *head) struct kmmio_fault_page **prevp = &dr->release_list; unsigned long flags; - spin_lock_irqsave(&kmmio_lock, flags); + local_irq_save(flags); + arch_spin_lock(&kmmio_lock); while (f) { if (!f->count) { list_del_rcu(&f->list); @@ -506,7 +513,8 @@ static void remove_kmmio_fault_pages(struct rcu_head *head) } f = *prevp; } - spin_unlock_irqrestore(&kmmio_lock, flags); + arch_spin_unlock(&kmmio_lock); + local_irq_restore(flags); /* This is the real RCU destroy call. */ call_rcu(&dr->rcu, rcu_free_kmmio_fault_pages); @@ -540,14 +548,16 @@ void unregister_kmmio_probe(struct kmmio_probe *p) if (!pte) return; - spin_lock_irqsave(&kmmio_lock, flags); + local_irq_save(flags); + arch_spin_lock(&kmmio_lock); while (size < size_lim) { release_kmmio_fault_page(addr + size, &release_list); size += page_level_size(l); } list_del_rcu(&p->list); kmmio_count--; - spin_unlock_irqrestore(&kmmio_lock, flags); + arch_spin_unlock(&kmmio_lock); + local_irq_restore(flags); if (!release_list) return; diff --git a/arch/x86/mm/mem_encrypt_boot.S b/arch/x86/mm/mem_encrypt_boot.S index 9de3d900bc927142b5ec1688551464c26aa60851..e25288ee33c2df72755451175269775e8e1481a8 100644 --- a/arch/x86/mm/mem_encrypt_boot.S +++ b/arch/x86/mm/mem_encrypt_boot.S @@ -26,7 +26,7 @@ SYM_FUNC_START(sme_encrypt_execute) * RCX - virtual address of the encryption workarea, including: * - stack page (PAGE_SIZE) * - encryption routine page (PAGE_SIZE) - * - intermediate copy buffer (PMD_PAGE_SIZE) + * - intermediate copy buffer (PMD_SIZE) * R8 - physical address of the pagetables to use for encryption */ @@ -123,7 +123,7 @@ SYM_FUNC_START(__enc_copy) wbinvd /* Invalidate any cache entries */ /* Copy/encrypt up to 2MB at a time */ - movq $PMD_PAGE_SIZE, %r12 + movq $PMD_SIZE, %r12 1: cmpq %r12, %r9 jnb 2f diff --git a/arch/x86/mm/mem_encrypt_identity.c b/arch/x86/mm/mem_encrypt_identity.c index f415498d3175cf7f67eef6f20c72a5f247a24a2f..88cccd65029dba414912656310c3e72313a4aa4b 100644 --- a/arch/x86/mm/mem_encrypt_identity.c +++ b/arch/x86/mm/mem_encrypt_identity.c @@ -93,7 +93,7 @@ struct sme_populate_pgd_data { * section is 2MB aligned to allow for simple pagetable setup using only * PMD entries (see vmlinux.lds.S). */ -static char sme_workarea[2 * PMD_PAGE_SIZE] __section(".init.scratch"); +static char sme_workarea[2 * PMD_SIZE] __section(".init.scratch"); static char sme_cmdline_arg[] __initdata = "mem_encrypt"; static char sme_cmdline_on[] __initdata = "on"; @@ -198,8 +198,8 @@ static void __init __sme_map_range_pmd(struct sme_populate_pgd_data *ppd) while (ppd->vaddr < ppd->vaddr_end) { sme_populate_pgd_large(ppd); - ppd->vaddr += PMD_PAGE_SIZE; - ppd->paddr += PMD_PAGE_SIZE; + ppd->vaddr += PMD_SIZE; + ppd->paddr += PMD_SIZE; } } @@ -225,11 +225,11 @@ static void __init __sme_map_range(struct sme_populate_pgd_data *ppd, vaddr_end = ppd->vaddr_end; /* If start is not 2MB aligned, create PTE entries */ - ppd->vaddr_end = ALIGN(ppd->vaddr, PMD_PAGE_SIZE); + ppd->vaddr_end = ALIGN(ppd->vaddr, PMD_SIZE); __sme_map_range_pte(ppd); /* Create PMD entries */ - ppd->vaddr_end = vaddr_end & PMD_PAGE_MASK; + ppd->vaddr_end = vaddr_end & PMD_MASK; __sme_map_range_pmd(ppd); /* If end is not 2MB aligned, create PTE entries */ @@ -325,7 +325,7 @@ void __init sme_encrypt_kernel(struct boot_params *bp) /* Physical addresses gives us the identity mapped virtual addresses */ kernel_start = __pa_symbol(_text); - kernel_end = ALIGN(__pa_symbol(_end), PMD_PAGE_SIZE); + kernel_end = ALIGN(__pa_symbol(_end), PMD_SIZE); kernel_len = kernel_end - kernel_start; initrd_start = 0; @@ -355,12 +355,12 @@ void __init sme_encrypt_kernel(struct boot_params *bp) * executable encryption area size: * stack page (PAGE_SIZE) * encryption routine page (PAGE_SIZE) - * intermediate copy buffer (PMD_PAGE_SIZE) + * intermediate copy buffer (PMD_SIZE) * pagetable structures for the encryption of the kernel * pagetable structures for workarea (in case not currently mapped) */ execute_start = workarea_start; - execute_end = execute_start + (PAGE_SIZE * 2) + PMD_PAGE_SIZE; + execute_end = execute_start + (PAGE_SIZE * 2) + PMD_SIZE; execute_len = execute_end - execute_start; /* @@ -383,7 +383,7 @@ void __init sme_encrypt_kernel(struct boot_params *bp) * before it is mapped. */ workarea_len = execute_len + pgtable_area_len; - workarea_end = ALIGN(workarea_start + workarea_len, PMD_PAGE_SIZE); + workarea_end = ALIGN(workarea_start + workarea_len, PMD_SIZE); /* * Set the address to the start of where newly created pagetable diff --git a/arch/x86/mm/pat/set_memory.c b/arch/x86/mm/pat/set_memory.c index ef34ba21aa92be96811af6806627047ce22dace4..356758b7d4b4766bef29dc899d6505f85b40157d 100644 --- a/arch/x86/mm/pat/set_memory.c +++ b/arch/x86/mm/pat/set_memory.c @@ -220,6 +220,23 @@ within_inclusive(unsigned long addr, unsigned long start, unsigned long end) #ifdef CONFIG_X86_64 +/* + * The kernel image is mapped into two places in the virtual address space + * (addresses without KASLR, of course): + * + * 1. The kernel direct map (0xffff880000000000) + * 2. The "high kernel map" (0xffffffff81000000) + * + * We actually execute out of #2. If we get the address of a kernel symbol, it + * points to #2, but almost all physical-to-virtual translations point to #1. + * + * This is so that we can have both a directmap of all physical memory *and* + * take full advantage of the the limited (s32) immediate addressing range (2G) + * of x86_64. + * + * See Documentation/x86/x86_64/mm.rst for more detail. + */ + static inline unsigned long highmap_start_pfn(void) { return __pa_symbol(_text) >> PAGE_SHIFT; @@ -605,10 +622,6 @@ static inline pgprot_t verify_rwx(pgprot_t old, pgprot_t new, unsigned long star { unsigned long end; - /* Kernel text is rw at boot up */ - if (system_state == SYSTEM_BOOTING) - return new; - /* * 32-bit has some unfixable W+X issues, like EFI code * and writeable data being in the same page. Disable @@ -765,11 +778,11 @@ phys_addr_t slow_virt_to_phys(void *__virt_addr) switch (level) { case PG_LEVEL_1G: phys_addr = (phys_addr_t)pud_pfn(*(pud_t *)pte) << PAGE_SHIFT; - offset = virt_addr & ~PUD_PAGE_MASK; + offset = virt_addr & ~PUD_MASK; break; case PG_LEVEL_2M: phys_addr = (phys_addr_t)pmd_pfn(*(pmd_t *)pte) << PAGE_SHIFT; - offset = virt_addr & ~PMD_PAGE_MASK; + offset = virt_addr & ~PMD_MASK; break; default: phys_addr = (phys_addr_t)pte_pfn(*pte) << PAGE_SHIFT; @@ -1059,7 +1072,7 @@ __split_large_page(struct cpa_data *cpa, pte_t *kpte, unsigned long address, case PG_LEVEL_1G: ref_prot = pud_pgprot(*(pud_t *)kpte); ref_pfn = pud_pfn(*(pud_t *)kpte); - pfninc = PMD_PAGE_SIZE >> PAGE_SHIFT; + pfninc = PMD_SIZE >> PAGE_SHIFT; lpaddr = address & PUD_MASK; lpinc = PMD_SIZE; /* @@ -1646,8 +1659,11 @@ repeat: return err; } -static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias); +static int __change_page_attr_set_clr(struct cpa_data *cpa, int primary); +/* + * Check the directmap and "high kernel map" 'aliases'. + */ static int cpa_process_alias(struct cpa_data *cpa) { struct cpa_data alias_cpa; @@ -1671,6 +1687,12 @@ static int cpa_process_alias(struct cpa_data *cpa) alias_cpa.flags &= ~(CPA_PAGES_ARRAY | CPA_ARRAY); alias_cpa.curpage = 0; + /* Directmap always has NX set, do not modify. */ + if (__supported_pte_mask & _PAGE_NX) { + alias_cpa.mask_clr.pgprot &= ~_PAGE_NX; + alias_cpa.mask_set.pgprot &= ~_PAGE_NX; + } + cpa->force_flush_all = 1; ret = __change_page_attr_set_clr(&alias_cpa, 0); @@ -1693,6 +1715,15 @@ static int cpa_process_alias(struct cpa_data *cpa) alias_cpa.flags &= ~(CPA_PAGES_ARRAY | CPA_ARRAY); alias_cpa.curpage = 0; + /* + * [_text, _brk_end) also covers data, do not modify NX except + * in cases where the highmap is the primary target. + */ + if (__supported_pte_mask & _PAGE_NX) { + alias_cpa.mask_clr.pgprot &= ~_PAGE_NX; + alias_cpa.mask_set.pgprot &= ~_PAGE_NX; + } + cpa->force_flush_all = 1; /* * The high mapping range is imprecise, so ignore the @@ -1705,12 +1736,19 @@ static int cpa_process_alias(struct cpa_data *cpa) return 0; } -static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias) +static int __change_page_attr_set_clr(struct cpa_data *cpa, int primary) { unsigned long numpages = cpa->numpages; unsigned long rempages = numpages; int ret = 0; + /* + * No changes, easy! + */ + if (!(pgprot_val(cpa->mask_set) | pgprot_val(cpa->mask_clr)) && + !cpa->force_split) + return ret; + while (rempages) { /* * Store the remaining nr of pages for the large page @@ -1723,13 +1761,13 @@ static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias) if (!debug_pagealloc_enabled()) spin_lock(&cpa_lock); - ret = __change_page_attr(cpa, checkalias); + ret = __change_page_attr(cpa, primary); if (!debug_pagealloc_enabled()) spin_unlock(&cpa_lock); if (ret) goto out; - if (checkalias) { + if (primary && !(cpa->flags & CPA_NO_CHECK_ALIAS)) { ret = cpa_process_alias(cpa); if (ret) goto out; @@ -1757,7 +1795,7 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages, struct page **pages) { struct cpa_data cpa; - int ret, cache, checkalias; + int ret, cache; memset(&cpa, 0, sizeof(cpa)); @@ -1803,20 +1841,11 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages, cpa.numpages = numpages; cpa.mask_set = mask_set; cpa.mask_clr = mask_clr; - cpa.flags = 0; + cpa.flags = in_flag; cpa.curpage = 0; cpa.force_split = force_split; - if (in_flag & (CPA_ARRAY | CPA_PAGES_ARRAY)) - cpa.flags |= in_flag; - - /* No alias checking for _NX bit modifications */ - checkalias = (pgprot_val(mask_set) | pgprot_val(mask_clr)) != _PAGE_NX; - /* Has caller explicitly disabled alias checking? */ - if (in_flag & CPA_NO_CHECK_ALIAS) - checkalias = 0; - - ret = __change_page_attr_set_clr(&cpa, checkalias); + ret = __change_page_attr_set_clr(&cpa, 1); /* * Check whether we really changed something: @@ -2047,6 +2076,16 @@ int set_memory_ro(unsigned long addr, int numpages) return change_page_attr_clear(&addr, numpages, __pgprot(_PAGE_RW), 0); } +int set_memory_rox(unsigned long addr, int numpages) +{ + pgprot_t clr = __pgprot(_PAGE_RW); + + if (__supported_pte_mask & _PAGE_NX) + clr.pgprot |= _PAGE_NX; + + return change_page_attr_clear(&addr, numpages, clr, 0); +} + int set_memory_rw(unsigned long addr, int numpages) { return change_page_attr_set(&addr, numpages, __pgprot(_PAGE_RW), 0); @@ -2059,11 +2098,9 @@ int set_memory_np(unsigned long addr, int numpages) int set_memory_np_noalias(unsigned long addr, int numpages) { - int cpa_flags = CPA_NO_CHECK_ALIAS; - return change_page_attr_set_clr(&addr, numpages, __pgprot(0), __pgprot(_PAGE_PRESENT), 0, - cpa_flags, NULL); + CPA_NO_CHECK_ALIAS, NULL); } int set_memory_4k(unsigned long addr, int numpages) @@ -2280,7 +2317,7 @@ static int __set_pages_p(struct page *page, int numpages) .numpages = numpages, .mask_set = __pgprot(_PAGE_PRESENT | _PAGE_RW), .mask_clr = __pgprot(0), - .flags = 0}; + .flags = CPA_NO_CHECK_ALIAS }; /* * No alias checking needed for setting present flag. otherwise, @@ -2288,7 +2325,7 @@ static int __set_pages_p(struct page *page, int numpages) * mappings (this adds to complexity if we want to do this from * atomic context especially). Let's keep it simple! */ - return __change_page_attr_set_clr(&cpa, 0); + return __change_page_attr_set_clr(&cpa, 1); } static int __set_pages_np(struct page *page, int numpages) @@ -2299,7 +2336,7 @@ static int __set_pages_np(struct page *page, int numpages) .numpages = numpages, .mask_set = __pgprot(0), .mask_clr = __pgprot(_PAGE_PRESENT | _PAGE_RW), - .flags = 0}; + .flags = CPA_NO_CHECK_ALIAS }; /* * No alias checking needed for setting not present flag. otherwise, @@ -2307,7 +2344,7 @@ static int __set_pages_np(struct page *page, int numpages) * mappings (this adds to complexity if we want to do this from * atomic context especially). Let's keep it simple! */ - return __change_page_attr_set_clr(&cpa, 0); + return __change_page_attr_set_clr(&cpa, 1); } int set_direct_map_invalid_noflush(struct page *page) @@ -2378,7 +2415,7 @@ int __init kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn, unsigned long address, .numpages = numpages, .mask_set = __pgprot(0), .mask_clr = __pgprot(~page_flags & (_PAGE_NX|_PAGE_RW)), - .flags = 0, + .flags = CPA_NO_CHECK_ALIAS, }; WARN_ONCE(num_online_cpus() > 1, "Don't call after initializing SMP"); @@ -2391,7 +2428,7 @@ int __init kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn, unsigned long address, cpa.mask_set = __pgprot(_PAGE_PRESENT | page_flags); - retval = __change_page_attr_set_clr(&cpa, 0); + retval = __change_page_attr_set_clr(&cpa, 1); __flush_tlb_all(); out: @@ -2421,12 +2458,12 @@ int __init kernel_unmap_pages_in_pgd(pgd_t *pgd, unsigned long address, .numpages = numpages, .mask_set = __pgprot(0), .mask_clr = __pgprot(_PAGE_PRESENT | _PAGE_RW), - .flags = 0, + .flags = CPA_NO_CHECK_ALIAS, }; WARN_ONCE(num_online_cpus() > 1, "Don't call after initializing SMP"); - retval = __change_page_attr_set_clr(&cpa, 0); + retval = __change_page_attr_set_clr(&cpa, 1); __flush_tlb_all(); return retval; diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c index 8525f2876fb409101e525bd3f43997fa66180c32..e4f499eb0f295beda06263181a0247e0d76eaac7 100644 --- a/arch/x86/mm/pgtable.c +++ b/arch/x86/mm/pgtable.c @@ -299,9 +299,6 @@ static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[]) pud_t *pud; int i; - if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */ - return; - p4d = p4d_offset(pgd, 0); pud = pud_offset(p4d, 0); @@ -434,10 +431,12 @@ pgd_t *pgd_alloc(struct mm_struct *mm) mm->pgd = pgd; - if (preallocate_pmds(mm, pmds, PREALLOCATED_PMDS) != 0) + if (sizeof(pmds) != 0 && + preallocate_pmds(mm, pmds, PREALLOCATED_PMDS) != 0) goto out_free_pgd; - if (preallocate_pmds(mm, u_pmds, PREALLOCATED_USER_PMDS) != 0) + if (sizeof(u_pmds) != 0 && + preallocate_pmds(mm, u_pmds, PREALLOCATED_USER_PMDS) != 0) goto out_free_pmds; if (paravirt_pgd_alloc(mm) != 0) @@ -451,17 +450,22 @@ pgd_t *pgd_alloc(struct mm_struct *mm) spin_lock(&pgd_lock); pgd_ctor(mm, pgd); - pgd_prepopulate_pmd(mm, pgd, pmds); - pgd_prepopulate_user_pmd(mm, pgd, u_pmds); + if (sizeof(pmds) != 0) + pgd_prepopulate_pmd(mm, pgd, pmds); + + if (sizeof(u_pmds) != 0) + pgd_prepopulate_user_pmd(mm, pgd, u_pmds); spin_unlock(&pgd_lock); return pgd; out_free_user_pmds: - free_pmds(mm, u_pmds, PREALLOCATED_USER_PMDS); + if (sizeof(u_pmds) != 0) + free_pmds(mm, u_pmds, PREALLOCATED_USER_PMDS); out_free_pmds: - free_pmds(mm, pmds, PREALLOCATED_PMDS); + if (sizeof(pmds) != 0) + free_pmds(mm, pmds, PREALLOCATED_PMDS); out_free_pgd: _pgd_free(pgd); out: diff --git a/arch/x86/mm/pti.c b/arch/x86/mm/pti.c index ffe3b3a087feaaa31bb0ca6b30dab8caf84857fe..78414c6d1b5ed1c245d13767907649b40f88b993 100644 --- a/arch/x86/mm/pti.c +++ b/arch/x86/mm/pti.c @@ -592,7 +592,7 @@ static void pti_set_kernel_image_nonglobal(void) * of the image. */ unsigned long start = PFN_ALIGN(_text); - unsigned long end = ALIGN((unsigned long)_end, PMD_PAGE_SIZE); + unsigned long end = ALIGN((unsigned long)_end, PMD_SIZE); /* * This clears _PAGE_GLOBAL from the entire kernel image. diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index 36ffe67ad6e55e23b34ff7ee0ca75be8b22aacb3..b808be77635ed3731fd111d4c7031891783c00a7 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #include #include @@ -340,6 +341,13 @@ static int emit_call(u8 **pprog, void *func, void *ip) return emit_patch(pprog, func, ip, 0xE8); } +static int emit_rsb_call(u8 **pprog, void *func, void *ip) +{ + OPTIMIZER_HIDE_VAR(func); + x86_call_depth_emit_accounting(pprog, func); + return emit_patch(pprog, func, ip, 0xE8); +} + static int emit_jump(u8 **pprog, void *func, void *ip) { return emit_patch(pprog, func, ip, 0xE9); @@ -417,7 +425,10 @@ static void emit_indirect_jump(u8 **pprog, int reg, u8 *ip) EMIT2(0xFF, 0xE0 + reg); } else if (cpu_feature_enabled(X86_FEATURE_RETPOLINE)) { OPTIMIZER_HIDE_VAR(reg); - emit_jump(&prog, &__x86_indirect_thunk_array[reg], ip); + if (cpu_feature_enabled(X86_FEATURE_CALL_DEPTH)) + emit_jump(&prog, &__x86_indirect_jump_thunk_array[reg], ip); + else + emit_jump(&prog, &__x86_indirect_thunk_array[reg], ip); } else { EMIT2(0xFF, 0xE0 + reg); /* jmp *%\reg */ if (IS_ENABLED(CONFIG_RETPOLINE) || IS_ENABLED(CONFIG_SLS)) @@ -432,7 +443,7 @@ static void emit_return(u8 **pprog, u8 *ip) u8 *prog = *pprog; if (cpu_feature_enabled(X86_FEATURE_RETHUNK)) { - emit_jump(&prog, &__x86_return_thunk, ip); + emit_jump(&prog, x86_return_thunk, ip); } else { EMIT1(0xC3); /* ret */ if (IS_ENABLED(CONFIG_SLS)) @@ -1514,19 +1525,26 @@ st: if (is_imm8(insn->off)) break; /* call */ - case BPF_JMP | BPF_CALL: + case BPF_JMP | BPF_CALL: { + int offs; + func = (u8 *) __bpf_call_base + imm32; if (tail_call_reachable) { /* mov rax, qword ptr [rbp - rounded_stack_depth - 8] */ EMIT3_off32(0x48, 0x8B, 0x85, -round_up(bpf_prog->aux->stack_depth, 8) - 8); - if (!imm32 || emit_call(&prog, func, image + addrs[i - 1] + 7)) + if (!imm32) return -EINVAL; + offs = 7 + x86_call_depth_emit_accounting(&prog, func); } else { - if (!imm32 || emit_call(&prog, func, image + addrs[i - 1])) + if (!imm32) return -EINVAL; + offs = x86_call_depth_emit_accounting(&prog, func); } + if (emit_call(&prog, func, image + addrs[i - 1] + offs)) + return -EINVAL; break; + } case BPF_JMP | BPF_TAIL_CALL: if (imm32) @@ -1917,7 +1935,7 @@ static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog, /* arg2: lea rsi, [rbp - ctx_cookie_off] */ EMIT4(0x48, 0x8D, 0x75, -run_ctx_off); - if (emit_call(&prog, bpf_trampoline_enter(p), prog)) + if (emit_rsb_call(&prog, bpf_trampoline_enter(p), prog)) return -EINVAL; /* remember prog start time returned by __bpf_prog_enter */ emit_mov_reg(&prog, true, BPF_REG_6, BPF_REG_0); @@ -1938,7 +1956,7 @@ static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog, (long) p->insnsi >> 32, (u32) (long) p->insnsi); /* call JITed bpf program or interpreter */ - if (emit_call(&prog, p->bpf_func, prog)) + if (emit_rsb_call(&prog, p->bpf_func, prog)) return -EINVAL; /* @@ -1962,7 +1980,7 @@ static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog, emit_mov_reg(&prog, true, BPF_REG_2, BPF_REG_6); /* arg3: lea rdx, [rbp - run_ctx_off] */ EMIT4(0x48, 0x8D, 0x55, -run_ctx_off); - if (emit_call(&prog, bpf_trampoline_exit(p), prog)) + if (emit_rsb_call(&prog, bpf_trampoline_exit(p), prog)) return -EINVAL; *pprog = prog; @@ -2184,6 +2202,11 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i prog = image; EMIT_ENDBR(); + /* + * This is the direct-call trampoline, as such it needs accounting + * for the __fentry__ call. + */ + x86_call_depth_emit_accounting(&prog, NULL); EMIT1(0x55); /* push rbp */ EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */ EMIT4(0x48, 0x83, 0xEC, stack_size); /* sub rsp, stack_size */ @@ -2210,7 +2233,7 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i if (flags & BPF_TRAMP_F_CALL_ORIG) { /* arg1: mov rdi, im */ emit_mov_imm64(&prog, BPF_REG_1, (long) im >> 32, (u32) (long) im); - if (emit_call(&prog, __bpf_tramp_enter, prog)) { + if (emit_rsb_call(&prog, __bpf_tramp_enter, prog)) { ret = -EINVAL; goto cleanup; } @@ -2242,7 +2265,7 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i EMIT2(0xff, 0xd0); /* call *rax */ } else { /* call original function */ - if (emit_call(&prog, orig_call, prog)) { + if (emit_rsb_call(&prog, orig_call, prog)) { ret = -EINVAL; goto cleanup; } @@ -2286,7 +2309,7 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i im->ip_epilogue = prog; /* arg1: mov rdi, im */ emit_mov_imm64(&prog, BPF_REG_1, (long) im >> 32, (u32) (long) im); - if (emit_call(&prog, __bpf_tramp_exit, prog)) { + if (emit_rsb_call(&prog, __bpf_tramp_exit, prog)) { ret = -EINVAL; goto cleanup; } diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c index 2f82480fd430571895ce194243f37f04b8010071..ea2eb2ec90e2bc96368090bb0478a2c8f591df0e 100644 --- a/arch/x86/pci/acpi.c +++ b/arch/x86/pci/acpi.c @@ -1,4 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 + +#define pr_fmt(fmt) "PCI: " fmt + #include #include #include @@ -37,15 +40,15 @@ static int __init set_nouse_crs(const struct dmi_system_id *id) static int __init set_ignore_seg(const struct dmi_system_id *id) { - printk(KERN_INFO "PCI: %s detected: ignoring ACPI _SEG\n", id->ident); + pr_info("%s detected: ignoring ACPI _SEG\n", id->ident); pci_ignore_seg = true; return 0; } static int __init set_no_e820(const struct dmi_system_id *id) { - printk(KERN_INFO "PCI: %s detected: not clipping E820 regions from _CRS\n", - id->ident); + pr_info("%s detected: not clipping E820 regions from _CRS\n", + id->ident); pci_use_e820 = false; return 0; } @@ -231,10 +234,9 @@ void __init pci_acpi_crs_quirks(void) else if (pci_probe & PCI_USE__CRS) pci_use_crs = true; - printk(KERN_INFO "PCI: %s host bridge windows from ACPI; " - "if necessary, use \"pci=%s\" and report a bug\n", - pci_use_crs ? "Using" : "Ignoring", - pci_use_crs ? "nocrs" : "use_crs"); + pr_info("%s host bridge windows from ACPI; if necessary, use \"pci=%s\" and report a bug\n", + pci_use_crs ? "Using" : "Ignoring", + pci_use_crs ? "nocrs" : "use_crs"); /* "pci=use_e820"/"pci=no_e820" on the kernel cmdline takes precedence */ if (pci_probe & PCI_NO_E820) @@ -242,19 +244,17 @@ void __init pci_acpi_crs_quirks(void) else if (pci_probe & PCI_USE_E820) pci_use_e820 = true; - printk(KERN_INFO "PCI: %s E820 reservations for host bridge windows\n", - pci_use_e820 ? "Using" : "Ignoring"); + pr_info("%s E820 reservations for host bridge windows\n", + pci_use_e820 ? "Using" : "Ignoring"); if (pci_probe & (PCI_NO_E820 | PCI_USE_E820)) - printk(KERN_INFO "PCI: Please notify linux-pci@vger.kernel.org so future kernels can this automatically\n"); + pr_info("Please notify linux-pci@vger.kernel.org so future kernels can do this automatically\n"); } #ifdef CONFIG_PCI_MMCONFIG static int check_segment(u16 seg, struct device *dev, char *estr) { if (seg) { - dev_err(dev, - "%s can't access PCI configuration " - "space under this host bridge.\n", + dev_err(dev, "%s can't access configuration space under this host bridge\n", estr); return -EIO; } @@ -264,9 +264,7 @@ static int check_segment(u16 seg, struct device *dev, char *estr) * just can't access extended configuration space of * devices under this host bridge. */ - dev_warn(dev, - "%s can't access extended PCI configuration " - "space under this bridge.\n", + dev_warn(dev, "%s can't access extended configuration space under this bridge\n", estr); return 0; @@ -421,9 +419,8 @@ struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root) root->segment = domain = 0; if (domain && !pci_domains_supported) { - printk(KERN_WARNING "pci_bus %04x:%02x: " - "ignored (multiple domains not supported)\n", - domain, busnum); + pr_warn("pci_bus %04x:%02x: ignored (multiple domains not supported)\n", + domain, busnum); return NULL; } @@ -491,7 +488,7 @@ int __init pci_acpi_init(void) if (acpi_noirq) return -ENODEV; - printk(KERN_INFO "PCI: Using ACPI for IRQ routing\n"); + pr_info("Using ACPI for IRQ routing\n"); acpi_irq_penalty_init(); pcibios_enable_irq = acpi_pci_irq_enable; pcibios_disable_irq = acpi_pci_irq_disable; @@ -503,7 +500,7 @@ int __init pci_acpi_init(void) * also do it here in case there are still broken drivers that * don't use pci_enable_device(). */ - printk(KERN_INFO "PCI: Routing PCI interrupts for all devices because \"pci=routeirq\" specified\n"); + pr_info("Routing PCI interrupts for all devices because \"pci=routeirq\" specified\n"); for_each_pci_dev(dev) acpi_pci_irq_enable(dev); } diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c index 7e51c14a1ef06d83d854a0c165ec605bbf3919db..55d9caf66401a445fdd27e18e49010988e6a3337 100644 --- a/arch/x86/platform/efi/efi.c +++ b/arch/x86/platform/efi/efi.c @@ -305,6 +305,50 @@ static void __init efi_clean_memmap(void) } } +/* + * Firmware can use EfiMemoryMappedIO to request that MMIO regions be + * mapped by the OS so they can be accessed by EFI runtime services, but + * should have no other significance to the OS (UEFI r2.10, sec 7.2). + * However, most bootloaders and EFI stubs convert EfiMemoryMappedIO + * regions to E820_TYPE_RESERVED entries, which prevent Linux from + * allocating space from them (see remove_e820_regions()). + * + * Some platforms use EfiMemoryMappedIO entries for PCI MMCONFIG space and + * PCI host bridge windows, which means Linux can't allocate BAR space for + * hot-added devices. + * + * Remove large EfiMemoryMappedIO regions from the E820 map to avoid this + * problem. + * + * Retain small EfiMemoryMappedIO regions because on some platforms, these + * describe non-window space that's included in host bridge _CRS. If we + * assign that space to PCI devices, they don't work. + */ +static void __init efi_remove_e820_mmio(void) +{ + efi_memory_desc_t *md; + u64 size, start, end; + int i = 0; + + for_each_efi_memory_desc(md) { + if (md->type == EFI_MEMORY_MAPPED_IO) { + size = md->num_pages << EFI_PAGE_SHIFT; + start = md->phys_addr; + end = start + size - 1; + if (size >= 256*1024) { + pr_info("Remove mem%02u: MMIO range=[0x%08llx-0x%08llx] (%lluMB) from e820 map\n", + i, start, end, size >> 20); + e820__range_remove(start, size, + E820_TYPE_RESERVED, 1); + } else { + pr_info("Not removing mem%02u: MMIO range=[0x%08llx-0x%08llx] (%lluKB) from e820 map\n", + i, start, end, size >> 10); + } + } + i++; + } +} + void __init efi_print_memmap(void) { efi_memory_desc_t *md; @@ -476,6 +520,8 @@ void __init efi_init(void) set_bit(EFI_RUNTIME_SERVICES, &efi.flags); efi_clean_memmap(); + efi_remove_e820_mmio(); + if (efi_enabled(EFI_DBG)) efi_print_memmap(); } diff --git a/arch/x86/power/hibernate.c b/arch/x86/power/hibernate.c index e94e0050a583a8fe7d865093e9995a84c2fcdf32..6f955eb1e1631a04df52708ea9792bb0328d1ca8 100644 --- a/arch/x86/power/hibernate.c +++ b/arch/x86/power/hibernate.c @@ -159,7 +159,7 @@ int relocate_restore_code(void) if (!relocated_restore_code) return -ENOMEM; - memcpy((void *)relocated_restore_code, core_restore_code, PAGE_SIZE); + __memcpy((void *)relocated_restore_code, core_restore_code, PAGE_SIZE); /* Make the page containing the relocated code executable */ pgd = (pgd_t *)__va(read_cr3_pa()) + diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c index 468e8c8cce01eff6498a39410d6f20461dcb4a3e..5b137966287709d8640e851352e5f58ea3811c9a 100644 --- a/arch/x86/xen/enlighten_pv.c +++ b/arch/x86/xen/enlighten_pv.c @@ -1210,7 +1210,7 @@ static void __init xen_setup_gdt(int cpu) pv_ops.cpu.write_gdt_entry = xen_write_gdt_entry_boot; pv_ops.cpu.load_gdt = xen_load_gdt_boot; - switch_to_new_gdt(cpu); + switch_gdt_and_percpu_base(cpu); pv_ops.cpu.write_gdt_entry = xen_write_gdt_entry; pv_ops.cpu.load_gdt = xen_load_gdt; diff --git a/arch/xtensa/include/asm/pgtable.h b/arch/xtensa/include/asm/pgtable.h index 54f577c13afa1ecb9349332ed22a7d7ae471be63..5b5484d707b2e59bb69d0ca89c11645c7a90abf7 100644 --- a/arch/xtensa/include/asm/pgtable.h +++ b/arch/xtensa/include/asm/pgtable.h @@ -386,8 +386,6 @@ ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) #else -#define kern_addr_valid(addr) (1) - extern void update_mmu_cache(struct vm_area_struct * vma, unsigned long address, pte_t *ptep); diff --git a/block/bsg.c b/block/bsg.c index 8eba57b9bb461135c557106a8b6ede12b9579e98..30fcc865ef4f0a49b8dbb424dd1152d15410463e 100644 --- a/block/bsg.c +++ b/block/bsg.c @@ -235,7 +235,7 @@ out_put_device: } EXPORT_SYMBOL_GPL(bsg_register_queue); -static char *bsg_devnode(struct device *dev, umode_t *mode) +static char *bsg_devnode(const struct device *dev, umode_t *mode) { return kasprintf(GFP_KERNEL, "bsg/%s", dev_name(dev)); } diff --git a/block/genhd.c b/block/genhd.c index 08f76135a637357069fd4e2f8ae2cb8bc3c371ab..ab3cbe44196f77c1b69a6847813997d3396e96b0 100644 --- a/block/genhd.c +++ b/block/genhd.c @@ -1189,9 +1189,9 @@ static void disk_release(struct device *dev) iput(disk->part0->bd_inode); /* frees the disk */ } -static int block_uevent(struct device *dev, struct kobj_uevent_env *env) +static int block_uevent(const struct device *dev, struct kobj_uevent_env *env) { - struct gendisk *disk = dev_to_disk(dev); + const struct gendisk *disk = dev_to_disk(dev); return add_uevent_var(env, "DISKSEQ=%llu", disk->diskseq); } diff --git a/crypto/Kconfig b/crypto/Kconfig index d779667671b23f03c57b5e887627f06da775ef2f..9c86f704515761c47bbafc579827651b2810cf9b 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -175,9 +175,6 @@ config CRYPTO_MANAGER_EXTRA_TESTS This is intended for developer use only, as these tests take much longer to run than the normal self tests. -config CRYPTO_GF128MUL - tristate - config CRYPTO_NULL tristate "Null algorithms" select CRYPTO_NULL2 @@ -714,9 +711,9 @@ config CRYPTO_KEYWRAP config CRYPTO_LRW tristate "LRW (Liskov Rivest Wagner)" + select CRYPTO_LIB_GF128MUL select CRYPTO_SKCIPHER select CRYPTO_MANAGER - select CRYPTO_GF128MUL select CRYPTO_ECB help LRW (Liskov Rivest Wagner) mode @@ -926,8 +923,8 @@ config CRYPTO_CMAC config CRYPTO_GHASH tristate "GHASH" - select CRYPTO_GF128MUL select CRYPTO_HASH + select CRYPTO_LIB_GF128MUL help GCM GHASH function (NIST SP800-38D) @@ -967,8 +964,8 @@ config CRYPTO_MICHAEL_MIC config CRYPTO_POLYVAL tristate - select CRYPTO_GF128MUL select CRYPTO_HASH + select CRYPTO_LIB_GF128MUL help POLYVAL hash function for HCTR2 diff --git a/crypto/Makefile b/crypto/Makefile index 303b21c43df05a5e4784b355150daadbd9ec8d66..d0126c915834b2f09b3ed42ab712a69b26b47258 100644 --- a/crypto/Makefile +++ b/crypto/Makefile @@ -85,7 +85,6 @@ obj-$(CONFIG_CRYPTO_WP512) += wp512.o CFLAGS_wp512.o := $(call cc-option,-fno-schedule-insns) # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=79149 obj-$(CONFIG_CRYPTO_BLAKE2B) += blake2b_generic.o CFLAGS_blake2b_generic.o := -Wframe-larger-than=4096 # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=105930 -obj-$(CONFIG_CRYPTO_GF128MUL) += gf128mul.o obj-$(CONFIG_CRYPTO_ECB) += ecb.o obj-$(CONFIG_CRYPTO_CBC) += cbc.o obj-$(CONFIG_CRYPTO_CFB) += cfb.o diff --git a/crypto/aes_generic.c b/crypto/aes_generic.c index 27ab2793181374ff142c37cdf3106a878a0eabd8..666474b81c6aa5655fd48eee511cc57203dd1410 100644 --- a/crypto/aes_generic.c +++ b/crypto/aes_generic.c @@ -48,11 +48,11 @@ */ #include +#include #include #include #include #include -#include #include #include diff --git a/crypto/aes_ti.c b/crypto/aes_ti.c index 205c2c257d4926b327af3f806088e45dfa4fbdd9..a3b342f92fab6b910cd675dd9de8cc217bc66f5e 100644 --- a/crypto/aes_ti.c +++ b/crypto/aes_ti.c @@ -6,7 +6,7 @@ */ #include -#include +#include #include static int aesti_set_key(struct crypto_tfm *tfm, const u8 *in_key, diff --git a/crypto/af_alg.c b/crypto/af_alg.c index e893c0f6c8799a78f09b125702ace398ae43938d..0a4fa2a429e22a60d9fc3845ba2d21b29c0fcb60 100644 --- a/crypto/af_alg.c +++ b/crypto/af_alg.c @@ -12,6 +12,8 @@ #include #include #include +#include +#include #include #include #include @@ -19,6 +21,10 @@ #include #include #include +#include +#include +#include +#include struct alg_type_list { const struct af_alg_type *type; @@ -222,6 +228,129 @@ out: return err; } +#ifdef CONFIG_KEYS + +static const u8 *key_data_ptr_user(const struct key *key, + unsigned int *datalen) +{ + const struct user_key_payload *ukp; + + ukp = user_key_payload_locked(key); + if (IS_ERR_OR_NULL(ukp)) + return ERR_PTR(-EKEYREVOKED); + + *datalen = key->datalen; + + return ukp->data; +} + +static const u8 *key_data_ptr_encrypted(const struct key *key, + unsigned int *datalen) +{ + const struct encrypted_key_payload *ekp; + + ekp = dereference_key_locked(key); + if (IS_ERR_OR_NULL(ekp)) + return ERR_PTR(-EKEYREVOKED); + + *datalen = ekp->decrypted_datalen; + + return ekp->decrypted_data; +} + +static const u8 *key_data_ptr_trusted(const struct key *key, + unsigned int *datalen) +{ + const struct trusted_key_payload *tkp; + + tkp = dereference_key_locked(key); + if (IS_ERR_OR_NULL(tkp)) + return ERR_PTR(-EKEYREVOKED); + + *datalen = tkp->key_len; + + return tkp->key; +} + +static struct key *lookup_key(key_serial_t serial) +{ + key_ref_t key_ref; + + key_ref = lookup_user_key(serial, 0, KEY_NEED_SEARCH); + if (IS_ERR(key_ref)) + return ERR_CAST(key_ref); + + return key_ref_to_ptr(key_ref); +} + +static int alg_setkey_by_key_serial(struct alg_sock *ask, sockptr_t optval, + unsigned int optlen) +{ + const struct af_alg_type *type = ask->type; + u8 *key_data = NULL; + unsigned int key_datalen; + key_serial_t serial; + struct key *key; + const u8 *ret; + int err; + + if (optlen != sizeof(serial)) + return -EINVAL; + + if (copy_from_sockptr(&serial, optval, optlen)) + return -EFAULT; + + key = lookup_key(serial); + if (IS_ERR(key)) + return PTR_ERR(key); + + down_read(&key->sem); + + ret = ERR_PTR(-ENOPROTOOPT); + if (!strcmp(key->type->name, "user") || + !strcmp(key->type->name, "logon")) { + ret = key_data_ptr_user(key, &key_datalen); + } else if (IS_REACHABLE(CONFIG_ENCRYPTED_KEYS) && + !strcmp(key->type->name, "encrypted")) { + ret = key_data_ptr_encrypted(key, &key_datalen); + } else if (IS_REACHABLE(CONFIG_TRUSTED_KEYS) && + !strcmp(key->type->name, "trusted")) { + ret = key_data_ptr_trusted(key, &key_datalen); + } + + if (IS_ERR(ret)) { + up_read(&key->sem); + return PTR_ERR(ret); + } + + key_data = sock_kmalloc(&ask->sk, key_datalen, GFP_KERNEL); + if (!key_data) { + up_read(&key->sem); + return -ENOMEM; + } + + memcpy(key_data, ret, key_datalen); + + up_read(&key->sem); + + err = type->setkey(ask->private, key_data, key_datalen); + + sock_kzfree_s(&ask->sk, key_data, key_datalen); + + return err; +} + +#else + +static inline int alg_setkey_by_key_serial(struct alg_sock *ask, + sockptr_t optval, + unsigned int optlen) +{ + return -ENOPROTOOPT; +} + +#endif + static int alg_setsockopt(struct socket *sock, int level, int optname, sockptr_t optval, unsigned int optlen) { @@ -242,12 +371,16 @@ static int alg_setsockopt(struct socket *sock, int level, int optname, switch (optname) { case ALG_SET_KEY: + case ALG_SET_KEY_BY_KEY_SERIAL: if (sock->state == SS_CONNECTED) goto unlock; if (!type->setkey) goto unlock; - err = alg_setkey(sk, optval, optlen); + if (optname == ALG_SET_KEY_BY_KEY_SERIAL) + err = alg_setkey_by_key_serial(ask, optval, optlen); + else + err = alg_setkey(sk, optval, optlen); break; case ALG_SET_AEAD_AUTHSIZE: if (sock->state == SS_CONNECTED) diff --git a/crypto/algapi.c b/crypto/algapi.c index 5c69ff8e8fa5c1dab176cd476f19907f45893dfe..d08f864f08beef69e787473faa03ea67998a05c0 100644 --- a/crypto/algapi.c +++ b/crypto/algapi.c @@ -222,12 +222,65 @@ void crypto_remove_spawns(struct crypto_alg *alg, struct list_head *list, } EXPORT_SYMBOL_GPL(crypto_remove_spawns); +static void crypto_alg_finish_registration(struct crypto_alg *alg, + bool fulfill_requests, + struct list_head *algs_to_put) +{ + struct crypto_alg *q; + + list_for_each_entry(q, &crypto_alg_list, cra_list) { + if (q == alg) + continue; + + if (crypto_is_moribund(q)) + continue; + + if (crypto_is_larval(q)) { + struct crypto_larval *larval = (void *)q; + + /* + * Check to see if either our generic name or + * specific name can satisfy the name requested + * by the larval entry q. + */ + if (strcmp(alg->cra_name, q->cra_name) && + strcmp(alg->cra_driver_name, q->cra_name)) + continue; + + if (larval->adult) + continue; + if ((q->cra_flags ^ alg->cra_flags) & larval->mask) + continue; + + if (fulfill_requests && crypto_mod_get(alg)) + larval->adult = alg; + else + larval->adult = ERR_PTR(-EAGAIN); + + continue; + } + + if (strcmp(alg->cra_name, q->cra_name)) + continue; + + if (strcmp(alg->cra_driver_name, q->cra_driver_name) && + q->cra_priority > alg->cra_priority) + continue; + + crypto_remove_spawns(q, algs_to_put, alg); + } + + crypto_notify(CRYPTO_MSG_ALG_LOADED, alg); +} + static struct crypto_larval *crypto_alloc_test_larval(struct crypto_alg *alg) { struct crypto_larval *larval; - if (!IS_ENABLED(CONFIG_CRYPTO_MANAGER)) - return NULL; + if (!IS_ENABLED(CONFIG_CRYPTO_MANAGER) || + IS_ENABLED(CONFIG_CRYPTO_MANAGER_DISABLE_TESTS) || + (alg->cra_flags & CRYPTO_ALG_INTERNAL)) + return NULL; /* No self-test needed */ larval = crypto_larval_alloc(alg->cra_name, alg->cra_flags | CRYPTO_ALG_TESTED, 0); @@ -248,7 +301,8 @@ static struct crypto_larval *crypto_alloc_test_larval(struct crypto_alg *alg) return larval; } -static struct crypto_larval *__crypto_register_alg(struct crypto_alg *alg) +static struct crypto_larval * +__crypto_register_alg(struct crypto_alg *alg, struct list_head *algs_to_put) { struct crypto_alg *q; struct crypto_larval *larval; @@ -259,9 +313,6 @@ static struct crypto_larval *__crypto_register_alg(struct crypto_alg *alg) INIT_LIST_HEAD(&alg->cra_users); - /* No cheating! */ - alg->cra_flags &= ~CRYPTO_ALG_TESTED; - ret = -EEXIST; list_for_each_entry(q, &crypto_alg_list, cra_list) { @@ -288,12 +339,17 @@ static struct crypto_larval *__crypto_register_alg(struct crypto_alg *alg) list_add(&alg->cra_list, &crypto_alg_list); - if (larval) + crypto_stats_init(alg); + + if (larval) { + /* No cheating! */ + alg->cra_flags &= ~CRYPTO_ALG_TESTED; + list_add(&larval->alg.cra_list, &crypto_alg_list); - else + } else { alg->cra_flags |= CRYPTO_ALG_TESTED; - - crypto_stats_init(alg); + crypto_alg_finish_registration(alg, true, algs_to_put); + } out: return larval; @@ -341,7 +397,10 @@ found: alg->cra_flags |= CRYPTO_ALG_TESTED; - /* Only satisfy larval waiters if we are the best. */ + /* + * If a higher-priority implementation of the same algorithm is + * currently being tested, then don't fulfill request larvals. + */ best = true; list_for_each_entry(q, &crypto_alg_list, cra_list) { if (crypto_is_moribund(q) || !crypto_is_larval(q)) @@ -356,47 +415,7 @@ found: } } - list_for_each_entry(q, &crypto_alg_list, cra_list) { - if (q == alg) - continue; - - if (crypto_is_moribund(q)) - continue; - - if (crypto_is_larval(q)) { - struct crypto_larval *larval = (void *)q; - - /* - * Check to see if either our generic name or - * specific name can satisfy the name requested - * by the larval entry q. - */ - if (strcmp(alg->cra_name, q->cra_name) && - strcmp(alg->cra_driver_name, q->cra_name)) - continue; - - if (larval->adult) - continue; - if ((q->cra_flags ^ alg->cra_flags) & larval->mask) - continue; - - if (best && crypto_mod_get(alg)) - larval->adult = alg; - else - larval->adult = ERR_PTR(-EAGAIN); - - continue; - } - - if (strcmp(alg->cra_name, q->cra_name)) - continue; - - if (strcmp(alg->cra_driver_name, q->cra_driver_name) && - q->cra_priority > alg->cra_priority) - continue; - - crypto_remove_spawns(q, &list, alg); - } + crypto_alg_finish_registration(alg, best, &list); complete: complete_all(&test->completion); @@ -423,7 +442,8 @@ EXPORT_SYMBOL_GPL(crypto_remove_final); int crypto_register_alg(struct crypto_alg *alg) { struct crypto_larval *larval; - bool test_started; + LIST_HEAD(algs_to_put); + bool test_started = false; int err; alg->cra_flags &= ~CRYPTO_ALG_DEAD; @@ -432,17 +452,18 @@ int crypto_register_alg(struct crypto_alg *alg) return err; down_write(&crypto_alg_sem); - larval = __crypto_register_alg(alg); - test_started = static_key_enabled(&crypto_boot_test_finished); - if (!IS_ERR_OR_NULL(larval)) + larval = __crypto_register_alg(alg, &algs_to_put); + if (!IS_ERR_OR_NULL(larval)) { + test_started = crypto_boot_test_finished(); larval->test_started = test_started; + } up_write(&crypto_alg_sem); - if (IS_ERR_OR_NULL(larval)) + if (IS_ERR(larval)) return PTR_ERR(larval); - if (test_started) crypto_wait_for_test(larval); + crypto_remove_final(&algs_to_put); return 0; } EXPORT_SYMBOL_GPL(crypto_register_alg); @@ -619,6 +640,7 @@ int crypto_register_instance(struct crypto_template *tmpl, struct crypto_larval *larval; struct crypto_spawn *spawn; u32 fips_internal = 0; + LIST_HEAD(algs_to_put); int err; err = crypto_check_alg(&inst->alg); @@ -650,7 +672,7 @@ int crypto_register_instance(struct crypto_template *tmpl, inst->alg.cra_flags |= (fips_internal & CRYPTO_ALG_FIPS_INTERNAL); - larval = __crypto_register_alg(&inst->alg); + larval = __crypto_register_alg(&inst->alg, &algs_to_put); if (IS_ERR(larval)) goto unlock; else if (larval) @@ -662,15 +684,12 @@ int crypto_register_instance(struct crypto_template *tmpl, unlock: up_write(&crypto_alg_sem); - err = PTR_ERR(larval); - if (IS_ERR_OR_NULL(larval)) - goto err; - - crypto_wait_for_test(larval); - err = 0; - -err: - return err; + if (IS_ERR(larval)) + return PTR_ERR(larval); + if (larval) + crypto_wait_for_test(larval); + crypto_remove_final(&algs_to_put); + return 0; } EXPORT_SYMBOL_GPL(crypto_register_instance); @@ -1234,6 +1253,9 @@ EXPORT_SYMBOL_GPL(crypto_stats_skcipher_decrypt); static void __init crypto_start_tests(void) { + if (IS_ENABLED(CONFIG_CRYPTO_MANAGER_DISABLE_TESTS)) + return; + for (;;) { struct crypto_larval *larval = NULL; struct crypto_alg *q; @@ -1267,7 +1289,7 @@ static void __init crypto_start_tests(void) crypto_wait_for_test(larval); } - static_branch_enable(&crypto_boot_test_finished); + set_crypto_boot_test_finished(); } static int __init crypto_algapi_init(void) diff --git a/crypto/algboss.c b/crypto/algboss.c index eb5fe84efb83e69cba4335f896b6a8f740630de2..0de1e66979498eb3b49b3862c37879e619fa0f68 100644 --- a/crypto/algboss.c +++ b/crypto/algboss.c @@ -175,18 +175,10 @@ static int cryptomgr_test(void *data) { struct crypto_test_param *param = data; u32 type = param->type; - int err = 0; - -#ifdef CONFIG_CRYPTO_MANAGER_DISABLE_TESTS - goto skiptest; -#endif - - if (type & CRYPTO_ALG_TESTED) - goto skiptest; + int err; err = alg_test(param->driver, param->alg, type, CRYPTO_ALG_TESTED); -skiptest: crypto_alg_tested(param->driver, err); kfree(param); @@ -197,7 +189,9 @@ static int cryptomgr_schedule_test(struct crypto_alg *alg) { struct task_struct *thread; struct crypto_test_param *param; - u32 type; + + if (IS_ENABLED(CONFIG_CRYPTO_MANAGER_DISABLE_TESTS)) + return NOTIFY_DONE; if (!try_module_get(THIS_MODULE)) goto err; @@ -208,13 +202,7 @@ static int cryptomgr_schedule_test(struct crypto_alg *alg) memcpy(param->driver, alg->cra_driver_name, sizeof(param->driver)); memcpy(param->alg, alg->cra_name, sizeof(param->alg)); - type = alg->cra_flags; - - /* Do not test internal algorithms. */ - if (type & CRYPTO_ALG_INTERNAL) - type |= CRYPTO_ALG_TESTED; - - param->type = type; + param->type = alg->cra_flags; thread = kthread_run(cryptomgr_test, param, "cryptomgr_test"); if (IS_ERR(thread)) diff --git a/crypto/anubis.c b/crypto/anubis.c index 5da0241ef453c89557decd0f1cddefd137165047..9f0cf61bbc6e2638140045f3954a69e77498a4ea 100644 --- a/crypto/anubis.c +++ b/crypto/anubis.c @@ -29,11 +29,11 @@ * */ +#include #include #include #include #include -#include #include #define ANUBIS_MIN_KEY_SIZE 16 diff --git a/crypto/api.c b/crypto/api.c index 64f2d365a8e94d5bcd9a199e44b465da105030a4..b022702f643672ea6ad2873bb1b95b5bc828ade2 100644 --- a/crypto/api.c +++ b/crypto/api.c @@ -31,8 +31,10 @@ EXPORT_SYMBOL_GPL(crypto_alg_sem); BLOCKING_NOTIFIER_HEAD(crypto_chain); EXPORT_SYMBOL_GPL(crypto_chain); -DEFINE_STATIC_KEY_FALSE(crypto_boot_test_finished); -EXPORT_SYMBOL_GPL(crypto_boot_test_finished); +#ifndef CONFIG_CRYPTO_MANAGER_DISABLE_TESTS +DEFINE_STATIC_KEY_FALSE(__crypto_boot_test_finished); +EXPORT_SYMBOL_GPL(__crypto_boot_test_finished); +#endif static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg); @@ -172,9 +174,6 @@ void crypto_wait_for_test(struct crypto_larval *larval) err = wait_for_completion_killable(&larval->completion); WARN_ON(err); - if (!err) - crypto_notify(CRYPTO_MSG_ALG_LOADED, larval); - out: crypto_larval_kill(&larval->alg); } @@ -205,7 +204,7 @@ static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg) struct crypto_larval *larval = (void *)alg; long timeout; - if (!static_branch_likely(&crypto_boot_test_finished)) + if (!crypto_boot_test_finished()) crypto_start_test(larval); timeout = wait_for_completion_killable_timeout( diff --git a/crypto/blowfish_common.c b/crypto/blowfish_common.c index 1c072012baff4813c52555f1b36a243cdfb9e55e..c0208ce269a337015eba68122ce38fa5132e1012 100644 --- a/crypto/blowfish_common.c +++ b/crypto/blowfish_common.c @@ -14,11 +14,12 @@ * Copyright (c) Kyle McMartin * Copyright (c) 2002 James Morris */ + +#include #include #include #include #include -#include #include #include diff --git a/crypto/blowfish_generic.c b/crypto/blowfish_generic.c index 003b52c6880eabbc1af53bb2999452a420ae7962..0e74c7242e77e409ebfa7b71d1759a5f3f10081e 100644 --- a/crypto/blowfish_generic.c +++ b/crypto/blowfish_generic.c @@ -11,11 +11,12 @@ * Copyright (c) Kyle McMartin * Copyright (c) 2002 James Morris */ + +#include #include #include #include #include -#include #include #include diff --git a/crypto/camellia_generic.c b/crypto/camellia_generic.c index fd1a88af9e77fd091292e72b6c27753ca5d24331..c04670cf51acfb7fe089ccb4005c442c5f8dda6a 100644 --- a/crypto/camellia_generic.c +++ b/crypto/camellia_generic.c @@ -9,7 +9,7 @@ * https://info.isl.ntt.co.jp/crypt/eng/camellia/specifications.html */ -#include +#include #include #include #include diff --git a/crypto/cast5_generic.c b/crypto/cast5_generic.c index 0257c14cefc25abe032e0d302aa8e40a05e6432c..085a1eedae03bbfed08ab4b505fb0e5d1be10d7a 100644 --- a/crypto/cast5_generic.c +++ b/crypto/cast5_generic.c @@ -14,8 +14,8 @@ #include +#include #include -#include #include #include #include diff --git a/crypto/cast6_generic.c b/crypto/cast6_generic.c index 75346380aa0bc7d8b5e910b913036f2954e81565..34f1ab53e3a713ba44a8442b917e95e8f56f7edd 100644 --- a/crypto/cast6_generic.c +++ b/crypto/cast6_generic.c @@ -11,8 +11,8 @@ #include +#include #include -#include #include #include #include diff --git a/crypto/ccm.c b/crypto/ccm.c index 6b815ece51c6a930dc472fa3624e85591beb64d5..30dbae72728f73d4eaab3778cb02aceff260df20 100644 --- a/crypto/ccm.c +++ b/crypto/ccm.c @@ -218,7 +218,7 @@ static int crypto_ccm_auth(struct aead_request *req, struct scatterlist *plain, cryptlen += ilen; } - ahash_request_set_crypt(ahreq, plain, pctx->odata, cryptlen); + ahash_request_set_crypt(ahreq, plain, odata, cryptlen); err = crypto_ahash_finup(ahreq); out: return err; diff --git a/crypto/cryptd.c b/crypto/cryptd.c index 668095eca0fafb52137e33ee70a97c2695b3180b..ca3a40fc7da911816b956e9aa37d6534592c493d 100644 --- a/crypto/cryptd.c +++ b/crypto/cryptd.c @@ -68,11 +68,12 @@ struct aead_instance_ctx { struct cryptd_skcipher_ctx { refcount_t refcnt; - struct crypto_sync_skcipher *child; + struct crypto_skcipher *child; }; struct cryptd_skcipher_request_ctx { crypto_completion_t complete; + struct skcipher_request req; }; struct cryptd_hash_ctx { @@ -227,13 +228,13 @@ static int cryptd_skcipher_setkey(struct crypto_skcipher *parent, const u8 *key, unsigned int keylen) { struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(parent); - struct crypto_sync_skcipher *child = ctx->child; + struct crypto_skcipher *child = ctx->child; - crypto_sync_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); - crypto_sync_skcipher_set_flags(child, - crypto_skcipher_get_flags(parent) & - CRYPTO_TFM_REQ_MASK); - return crypto_sync_skcipher_setkey(child, key, keylen); + crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); + crypto_skcipher_set_flags(child, + crypto_skcipher_get_flags(parent) & + CRYPTO_TFM_REQ_MASK); + return crypto_skcipher_setkey(child, key, keylen); } static void cryptd_skcipher_complete(struct skcipher_request *req, int err) @@ -258,13 +259,13 @@ static void cryptd_skcipher_encrypt(struct crypto_async_request *base, struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req); struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); - struct crypto_sync_skcipher *child = ctx->child; - SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, child); + struct skcipher_request *subreq = &rctx->req; + struct crypto_skcipher *child = ctx->child; if (unlikely(err == -EINPROGRESS)) goto out; - skcipher_request_set_sync_tfm(subreq, child); + skcipher_request_set_tfm(subreq, child); skcipher_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL); skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen, @@ -286,13 +287,13 @@ static void cryptd_skcipher_decrypt(struct crypto_async_request *base, struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req); struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); - struct crypto_sync_skcipher *child = ctx->child; - SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, child); + struct skcipher_request *subreq = &rctx->req; + struct crypto_skcipher *child = ctx->child; if (unlikely(err == -EINPROGRESS)) goto out; - skcipher_request_set_sync_tfm(subreq, child); + skcipher_request_set_tfm(subreq, child); skcipher_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL); skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen, @@ -343,9 +344,10 @@ static int cryptd_skcipher_init_tfm(struct crypto_skcipher *tfm) if (IS_ERR(cipher)) return PTR_ERR(cipher); - ctx->child = (struct crypto_sync_skcipher *)cipher; + ctx->child = cipher; crypto_skcipher_set_reqsize( - tfm, sizeof(struct cryptd_skcipher_request_ctx)); + tfm, sizeof(struct cryptd_skcipher_request_ctx) + + crypto_skcipher_reqsize(cipher)); return 0; } @@ -353,7 +355,7 @@ static void cryptd_skcipher_exit_tfm(struct crypto_skcipher *tfm) { struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); - crypto_free_sync_skcipher(ctx->child); + crypto_free_skcipher(ctx->child); } static void cryptd_skcipher_free(struct skcipher_instance *inst) @@ -931,7 +933,7 @@ struct crypto_skcipher *cryptd_skcipher_child(struct cryptd_skcipher *tfm) { struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base); - return &ctx->child->base; + return ctx->child; } EXPORT_SYMBOL_GPL(cryptd_skcipher_child); diff --git a/crypto/des_generic.c b/crypto/des_generic.c index c85354a5e94c7bd5befc80f754ce8297c1affc14..1274e18d3eb901500e92d09cbf232170eb1da07a 100644 --- a/crypto/des_generic.c +++ b/crypto/des_generic.c @@ -8,11 +8,11 @@ */ #include +#include #include #include #include #include -#include #include diff --git a/crypto/dh.c b/crypto/dh.c index 99c3b2ef7adca58c5043838961e7c2cce6d29d9c..e39c1bde1ac07bdd1606eec6161789f3786a8e57 100644 --- a/crypto/dh.c +++ b/crypto/dh.c @@ -318,6 +318,9 @@ static int dh_safe_prime_init_tfm(struct crypto_kpp *tfm) if (IS_ERR(tfm_ctx->dh_tfm)) return PTR_ERR(tfm_ctx->dh_tfm); + kpp_set_reqsize(tfm, sizeof(struct kpp_request) + + crypto_kpp_reqsize(tfm_ctx->dh_tfm)); + return 0; } @@ -593,7 +596,6 @@ static int __maybe_unused __dh_safe_prime_create( inst->alg.max_size = dh_safe_prime_max_size; inst->alg.init = dh_safe_prime_init_tfm; inst->alg.exit = dh_safe_prime_exit_tfm; - inst->alg.reqsize = sizeof(struct kpp_request) + dh_alg->reqsize; inst->alg.base.cra_priority = dh_alg->base.cra_priority; inst->alg.base.cra_module = THIS_MODULE; inst->alg.base.cra_ctxsize = sizeof(struct dh_safe_prime_tfm_ctx); diff --git a/crypto/fcrypt.c b/crypto/fcrypt.c index 76a04d000c0d3fd55c1521862208c04def6ce7bd..95a16e88899bafbe6a86417a4274561e8ed2f3eb 100644 --- a/crypto/fcrypt.c +++ b/crypto/fcrypt.c @@ -43,10 +43,10 @@ */ #include +#include #include #include #include -#include #define ROUNDS 16 diff --git a/crypto/internal.h b/crypto/internal.h index c08385571853ee677fc415022980c889917191a4..932f0aafddc32dc3974e71bdb8d0f37ad9e3aa80 100644 --- a/crypto/internal.h +++ b/crypto/internal.h @@ -47,7 +47,25 @@ extern struct list_head crypto_alg_list; extern struct rw_semaphore crypto_alg_sem; extern struct blocking_notifier_head crypto_chain; -DECLARE_STATIC_KEY_FALSE(crypto_boot_test_finished); +#ifdef CONFIG_CRYPTO_MANAGER_DISABLE_TESTS +static inline bool crypto_boot_test_finished(void) +{ + return true; +} +static inline void set_crypto_boot_test_finished(void) +{ +} +#else +DECLARE_STATIC_KEY_FALSE(__crypto_boot_test_finished); +static inline bool crypto_boot_test_finished(void) +{ + return static_branch_likely(&__crypto_boot_test_finished); +} +static inline void set_crypto_boot_test_finished(void) +{ + static_branch_enable(&__crypto_boot_test_finished); +} +#endif /* !CONFIG_CRYPTO_MANAGER_DISABLE_TESTS */ #ifdef CONFIG_PROC_FS void __init crypto_init_proc(void); diff --git a/crypto/kdf_sp800108.c b/crypto/kdf_sp800108.c index 58edf7797abfb017a529b3a7e7f384d0d1a2d64f..c3f9938e1ad27fd838508ca5d3e93ab136e350dd 100644 --- a/crypto/kdf_sp800108.c +++ b/crypto/kdf_sp800108.c @@ -125,9 +125,13 @@ static const struct kdf_testvec kdf_ctr_hmac_sha256_tv_template[] = { static int __init crypto_kdf108_init(void) { - int ret = kdf_test(&kdf_ctr_hmac_sha256_tv_template[0], "hmac(sha256)", - crypto_kdf108_setkey, crypto_kdf108_ctr_generate); + int ret; + if (IS_ENABLED(CONFIG_CRYPTO_MANAGER_DISABLE_TESTS)) + return 0; + + ret = kdf_test(&kdf_ctr_hmac_sha256_tv_template[0], "hmac(sha256)", + crypto_kdf108_setkey, crypto_kdf108_ctr_generate); if (ret) { if (fips_enabled) panic("alg: self-tests for CTR-KDF (hmac(sha256)) failed (rc=%d)\n", @@ -136,7 +140,7 @@ static int __init crypto_kdf108_init(void) WARN(1, "alg: self-tests for CTR-KDF (hmac(sha256)) failed (rc=%d)\n", ret); - } else { + } else if (fips_enabled) { pr_info("alg: self-tests for CTR-KDF (hmac(sha256)) passed\n"); } diff --git a/crypto/khazad.c b/crypto/khazad.c index f19339954c89e0aee5a08d9cde36c18fe022e1d3..70cafe73f97405ca52044f18446c2736fb4c7a8d 100644 --- a/crypto/khazad.c +++ b/crypto/khazad.c @@ -19,11 +19,11 @@ * */ +#include #include #include #include #include -#include #include #define KHAZAD_KEY_SIZE 16 diff --git a/crypto/rsa-pkcs1pad.c b/crypto/rsa-pkcs1pad.c index e75728f87ce5620ff2aa159f14fdc820492f414a..6ee5b8a060c0674e1e9730a53466331d4158eca2 100644 --- a/crypto/rsa-pkcs1pad.c +++ b/crypto/rsa-pkcs1pad.c @@ -579,6 +579,10 @@ static int pkcs1pad_init_tfm(struct crypto_akcipher *tfm) return PTR_ERR(child_tfm); ctx->child = child_tfm; + + akcipher_set_reqsize(tfm, sizeof(struct pkcs1pad_request) + + crypto_akcipher_reqsize(child_tfm)); + return 0; } @@ -674,7 +678,6 @@ static int pkcs1pad_create(struct crypto_template *tmpl, struct rtattr **tb) inst->alg.set_pub_key = pkcs1pad_set_pub_key; inst->alg.set_priv_key = pkcs1pad_set_priv_key; inst->alg.max_size = pkcs1pad_get_max_size; - inst->alg.reqsize = sizeof(struct pkcs1pad_request) + rsa_alg->reqsize; inst->free = pkcs1pad_free; diff --git a/crypto/seed.c b/crypto/seed.c index 27720140820ef4e9f758bed25f40899ada6adabf..d0506ade2a5f87fbaf2fbc915df0dd53e33c2f5e 100644 --- a/crypto/seed.c +++ b/crypto/seed.c @@ -8,11 +8,11 @@ * Copyright (C) 2007 Korea Information Security Agency (KISA). */ +#include #include #include #include #include -#include #include #define SEED_NUM_KCONSTANTS 16 diff --git a/crypto/serpent_generic.c b/crypto/serpent_generic.c index 45f98b750053e72d7bc4ca9aa069f965291a5c18..c6bca47931e21ec86a8196097f05257bb025654e 100644 --- a/crypto/serpent_generic.c +++ b/crypto/serpent_generic.c @@ -7,11 +7,11 @@ * Copyright (C) 2002 Dag Arne Osvik */ +#include #include #include #include #include -#include #include #include diff --git a/crypto/shash.c b/crypto/shash.c index 4c88e63b3350fc7f6d6e76dec62b41d4ca064cf8..868b6ba2b3b74e8ac6e644c707fb11a854767abb 100644 --- a/crypto/shash.c +++ b/crypto/shash.c @@ -18,26 +18,16 @@ #include "internal.h" +#define MAX_SHASH_ALIGNMASK 63 + static const struct crypto_type crypto_shash_type; -static int shash_no_setkey(struct crypto_shash *tfm, const u8 *key, - unsigned int keylen) +int shash_no_setkey(struct crypto_shash *tfm, const u8 *key, + unsigned int keylen) { return -ENOSYS; } - -/* - * Check whether an shash algorithm has a setkey function. - * - * For CFI compatibility, this must not be an inline function. This is because - * when CFI is enabled, modules won't get the same address for shash_no_setkey - * (if it were exported, which inlining would require) as the core kernel will. - */ -bool crypto_shash_alg_has_setkey(struct shash_alg *alg) -{ - return alg->setkey != shash_no_setkey; -} -EXPORT_SYMBOL_GPL(crypto_shash_alg_has_setkey); +EXPORT_SYMBOL_GPL(shash_no_setkey); static int shash_setkey_unaligned(struct crypto_shash *tfm, const u8 *key, unsigned int keylen) @@ -100,7 +90,7 @@ static int shash_update_unaligned(struct shash_desc *desc, const u8 *data, * We cannot count on __aligned() working for large values: * https://patchwork.kernel.org/patch/9507697/ */ - u8 ubuf[MAX_ALGAPI_ALIGNMASK * 2]; + u8 ubuf[MAX_SHASH_ALIGNMASK * 2]; u8 *buf = PTR_ALIGN(&ubuf[0], alignmask + 1); int err; @@ -142,7 +132,7 @@ static int shash_final_unaligned(struct shash_desc *desc, u8 *out) * We cannot count on __aligned() working for large values: * https://patchwork.kernel.org/patch/9507697/ */ - u8 ubuf[MAX_ALGAPI_ALIGNMASK + HASH_MAX_DIGESTSIZE]; + u8 ubuf[MAX_SHASH_ALIGNMASK + HASH_MAX_DIGESTSIZE]; u8 *buf = PTR_ALIGN(&ubuf[0], alignmask + 1); int err; @@ -536,6 +526,9 @@ static int shash_prepare_alg(struct shash_alg *alg) alg->statesize > HASH_MAX_STATESIZE) return -EINVAL; + if (base->cra_alignmask > MAX_SHASH_ALIGNMASK) + return -EINVAL; + if ((alg->export && !alg->import) || (alg->import && !alg->export)) return -EINVAL; diff --git a/crypto/skcipher.c b/crypto/skcipher.c index 418211180ceec379570c7de99ef43d86861b7c5a..0ecab31cfe79bfd8acc54dae56986c768a308373 100644 --- a/crypto/skcipher.c +++ b/crypto/skcipher.c @@ -763,7 +763,7 @@ struct crypto_sync_skcipher *crypto_alloc_sync_skcipher( struct crypto_skcipher *tfm; /* Only sync algorithms allowed. */ - mask |= CRYPTO_ALG_ASYNC; + mask |= CRYPTO_ALG_ASYNC | CRYPTO_ALG_SKCIPHER_REQSIZE_LARGE; tfm = crypto_alloc_tfm(alg_name, &crypto_skcipher_type, type, mask); diff --git a/crypto/sm4_generic.c b/crypto/sm4_generic.c index 4a6480a27fee51869331f74e64a329c00ec912ef..560eba37dc55eae6e2f9cacb5ec339a71618c0fb 100644 --- a/crypto/sm4_generic.c +++ b/crypto/sm4_generic.c @@ -7,12 +7,12 @@ * All rights reserved. */ +#include #include #include #include #include #include -#include #include #include diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index a82679b576bb4381771a0f87ec0e401ef219a68e..a0833654ce946d25320b29665ac9e23841ab06dd 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c @@ -324,7 +324,7 @@ static void test_mb_aead_speed(const char *algo, int enc, int secs, crypto_req_done, &data[i].wait); } - pr_info("\ntesting speed of multibuffer %s (%s) %s\n", algo, + pr_info("testing speed of multibuffer %s (%s) %s\n", algo, get_driver_name(crypto_aead, tfm), e); i = 0; @@ -506,8 +506,8 @@ static int test_aead_cycles(struct aead_request *req, int enc, int blen) out: if (ret == 0) - printk("1 operation in %lu cycles (%d bytes)\n", - (cycles + 4) / 8, blen); + pr_cont("1 operation in %lu cycles (%d bytes)\n", + (cycles + 4) / 8, blen); return ret; } @@ -575,8 +575,8 @@ static void test_aead_speed(const char *algo, int enc, unsigned int secs, } crypto_init_wait(&wait); - printk(KERN_INFO "\ntesting speed of %s (%s) %s\n", algo, - get_driver_name(crypto_aead, tfm), e); + pr_info("testing speed of %s (%s) %s\n", algo, + get_driver_name(crypto_aead, tfm), e); req = aead_request_alloc(tfm, GFP_KERNEL); if (!req) { @@ -624,8 +624,8 @@ static void test_aead_speed(const char *algo, int enc, unsigned int secs, memset(iv, 0xff, iv_len); crypto_aead_clear_flags(tfm, ~0); - printk(KERN_INFO "test %u (%d bit key, %d byte blocks): ", - i, *keysize * 8, bs); + pr_info("test %u (%d bit key, %d byte blocks): ", + i, *keysize * 8, bs); memset(tvmem[0], 0xff, PAGE_SIZE); @@ -727,8 +727,8 @@ static int test_ahash_jiffies_digest(struct ahash_request *req, int blen, return ret; } - printk("%6u opers/sec, %9lu bytes/sec\n", - bcount / secs, ((long)bcount * blen) / secs); + pr_cont("%6u opers/sec, %9lu bytes/sec\n", + bcount / secs, ((long)bcount * blen) / secs); return 0; } @@ -877,8 +877,8 @@ static void test_ahash_speed_common(const char *algo, unsigned int secs, return; } - printk(KERN_INFO "\ntesting speed of async %s (%s)\n", algo, - get_driver_name(crypto_ahash, tfm)); + pr_info("testing speed of async %s (%s)\n", algo, + get_driver_name(crypto_ahash, tfm)); if (crypto_ahash_digestsize(tfm) > MAX_DIGEST_SIZE) { pr_err("digestsize(%u) > %d\n", crypto_ahash_digestsize(tfm), @@ -1090,15 +1090,6 @@ static void test_mb_skcipher_speed(const char *algo, int enc, int secs, goto out_free_tfm; } - - for (i = 0; i < num_mb; ++i) - if (testmgr_alloc_buf(data[i].xbuf)) { - while (i--) - testmgr_free_buf(data[i].xbuf); - goto out_free_tfm; - } - - for (i = 0; i < num_mb; ++i) { data[i].req = skcipher_request_alloc(tfm, GFP_KERNEL); if (!data[i].req) { @@ -1117,7 +1108,7 @@ static void test_mb_skcipher_speed(const char *algo, int enc, int secs, crypto_init_wait(&data[i].wait); } - pr_info("\ntesting speed of multibuffer %s (%s) %s\n", algo, + pr_info("testing speed of multibuffer %s (%s) %s\n", algo, get_driver_name(crypto_skcipher, tfm), e); i = 0; @@ -1324,13 +1315,12 @@ static void test_skcipher_speed(const char *algo, int enc, unsigned int secs, return; } - pr_info("\ntesting speed of %s %s (%s) %s\n", async ? "async" : "sync", + pr_info("testing speed of %s %s (%s) %s\n", async ? "async" : "sync", algo, get_driver_name(crypto_skcipher, tfm), e); req = skcipher_request_alloc(tfm, GFP_KERNEL); if (!req) { - pr_err("tcrypt: skcipher: Failed to allocate request for %s\n", - algo); + pr_err("skcipher: Failed to allocate request for %s\n", algo); goto out; } @@ -1471,387 +1461,396 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb) } for (i = 1; i < 200; i++) - ret += do_test(NULL, 0, 0, i, num_mb); + ret = min(ret, do_test(NULL, 0, 0, i, num_mb)); break; case 1: - ret += tcrypt_test("md5"); + ret = min(ret, tcrypt_test("md5")); break; case 2: - ret += tcrypt_test("sha1"); + ret = min(ret, tcrypt_test("sha1")); break; case 3: - ret += tcrypt_test("ecb(des)"); - ret += tcrypt_test("cbc(des)"); - ret += tcrypt_test("ctr(des)"); + ret = min(ret, tcrypt_test("ecb(des)")); + ret = min(ret, tcrypt_test("cbc(des)")); + ret = min(ret, tcrypt_test("ctr(des)")); break; case 4: - ret += tcrypt_test("ecb(des3_ede)"); - ret += tcrypt_test("cbc(des3_ede)"); - ret += tcrypt_test("ctr(des3_ede)"); + ret = min(ret, tcrypt_test("ecb(des3_ede)")); + ret = min(ret, tcrypt_test("cbc(des3_ede)")); + ret = min(ret, tcrypt_test("ctr(des3_ede)")); break; case 5: - ret += tcrypt_test("md4"); + ret = min(ret, tcrypt_test("md4")); break; case 6: - ret += tcrypt_test("sha256"); + ret = min(ret, tcrypt_test("sha256")); break; case 7: - ret += tcrypt_test("ecb(blowfish)"); - ret += tcrypt_test("cbc(blowfish)"); - ret += tcrypt_test("ctr(blowfish)"); + ret = min(ret, tcrypt_test("ecb(blowfish)")); + ret = min(ret, tcrypt_test("cbc(blowfish)")); + ret = min(ret, tcrypt_test("ctr(blowfish)")); break; case 8: - ret += tcrypt_test("ecb(twofish)"); - ret += tcrypt_test("cbc(twofish)"); - ret += tcrypt_test("ctr(twofish)"); - ret += tcrypt_test("lrw(twofish)"); - ret += tcrypt_test("xts(twofish)"); + ret = min(ret, tcrypt_test("ecb(twofish)")); + ret = min(ret, tcrypt_test("cbc(twofish)")); + ret = min(ret, tcrypt_test("ctr(twofish)")); + ret = min(ret, tcrypt_test("lrw(twofish)")); + ret = min(ret, tcrypt_test("xts(twofish)")); break; case 9: - ret += tcrypt_test("ecb(serpent)"); - ret += tcrypt_test("cbc(serpent)"); - ret += tcrypt_test("ctr(serpent)"); - ret += tcrypt_test("lrw(serpent)"); - ret += tcrypt_test("xts(serpent)"); + ret = min(ret, tcrypt_test("ecb(serpent)")); + ret = min(ret, tcrypt_test("cbc(serpent)")); + ret = min(ret, tcrypt_test("ctr(serpent)")); + ret = min(ret, tcrypt_test("lrw(serpent)")); + ret = min(ret, tcrypt_test("xts(serpent)")); break; case 10: - ret += tcrypt_test("ecb(aes)"); - ret += tcrypt_test("cbc(aes)"); - ret += tcrypt_test("lrw(aes)"); - ret += tcrypt_test("xts(aes)"); - ret += tcrypt_test("ctr(aes)"); - ret += tcrypt_test("rfc3686(ctr(aes))"); - ret += tcrypt_test("ofb(aes)"); - ret += tcrypt_test("cfb(aes)"); - ret += tcrypt_test("xctr(aes)"); + ret = min(ret, tcrypt_test("ecb(aes)")); + ret = min(ret, tcrypt_test("cbc(aes)")); + ret = min(ret, tcrypt_test("lrw(aes)")); + ret = min(ret, tcrypt_test("xts(aes)")); + ret = min(ret, tcrypt_test("ctr(aes)")); + ret = min(ret, tcrypt_test("rfc3686(ctr(aes))")); + ret = min(ret, tcrypt_test("ofb(aes)")); + ret = min(ret, tcrypt_test("cfb(aes)")); + ret = min(ret, tcrypt_test("xctr(aes)")); break; case 11: - ret += tcrypt_test("sha384"); + ret = min(ret, tcrypt_test("sha384")); break; case 12: - ret += tcrypt_test("sha512"); + ret = min(ret, tcrypt_test("sha512")); break; case 13: - ret += tcrypt_test("deflate"); + ret = min(ret, tcrypt_test("deflate")); break; case 14: - ret += tcrypt_test("ecb(cast5)"); - ret += tcrypt_test("cbc(cast5)"); - ret += tcrypt_test("ctr(cast5)"); + ret = min(ret, tcrypt_test("ecb(cast5)")); + ret = min(ret, tcrypt_test("cbc(cast5)")); + ret = min(ret, tcrypt_test("ctr(cast5)")); break; case 15: - ret += tcrypt_test("ecb(cast6)"); - ret += tcrypt_test("cbc(cast6)"); - ret += tcrypt_test("ctr(cast6)"); - ret += tcrypt_test("lrw(cast6)"); - ret += tcrypt_test("xts(cast6)"); + ret = min(ret, tcrypt_test("ecb(cast6)")); + ret = min(ret, tcrypt_test("cbc(cast6)")); + ret = min(ret, tcrypt_test("ctr(cast6)")); + ret = min(ret, tcrypt_test("lrw(cast6)")); + ret = min(ret, tcrypt_test("xts(cast6)")); break; case 16: - ret += tcrypt_test("ecb(arc4)"); + ret = min(ret, tcrypt_test("ecb(arc4)")); break; case 17: - ret += tcrypt_test("michael_mic"); + ret = min(ret, tcrypt_test("michael_mic")); break; case 18: - ret += tcrypt_test("crc32c"); + ret = min(ret, tcrypt_test("crc32c")); break; case 19: - ret += tcrypt_test("ecb(tea)"); + ret = min(ret, tcrypt_test("ecb(tea)")); break; case 20: - ret += tcrypt_test("ecb(xtea)"); + ret = min(ret, tcrypt_test("ecb(xtea)")); break; case 21: - ret += tcrypt_test("ecb(khazad)"); + ret = min(ret, tcrypt_test("ecb(khazad)")); break; case 22: - ret += tcrypt_test("wp512"); + ret = min(ret, tcrypt_test("wp512")); break; case 23: - ret += tcrypt_test("wp384"); + ret = min(ret, tcrypt_test("wp384")); break; case 24: - ret += tcrypt_test("wp256"); + ret = min(ret, tcrypt_test("wp256")); break; case 26: - ret += tcrypt_test("ecb(anubis)"); - ret += tcrypt_test("cbc(anubis)"); + ret = min(ret, tcrypt_test("ecb(anubis)")); + ret = min(ret, tcrypt_test("cbc(anubis)")); break; case 30: - ret += tcrypt_test("ecb(xeta)"); + ret = min(ret, tcrypt_test("ecb(xeta)")); break; case 31: - ret += tcrypt_test("pcbc(fcrypt)"); + ret = min(ret, tcrypt_test("pcbc(fcrypt)")); break; case 32: - ret += tcrypt_test("ecb(camellia)"); - ret += tcrypt_test("cbc(camellia)"); - ret += tcrypt_test("ctr(camellia)"); - ret += tcrypt_test("lrw(camellia)"); - ret += tcrypt_test("xts(camellia)"); + ret = min(ret, tcrypt_test("ecb(camellia)")); + ret = min(ret, tcrypt_test("cbc(camellia)")); + ret = min(ret, tcrypt_test("ctr(camellia)")); + ret = min(ret, tcrypt_test("lrw(camellia)")); + ret = min(ret, tcrypt_test("xts(camellia)")); break; case 33: - ret += tcrypt_test("sha224"); + ret = min(ret, tcrypt_test("sha224")); break; case 35: - ret += tcrypt_test("gcm(aes)"); + ret = min(ret, tcrypt_test("gcm(aes)")); break; case 36: - ret += tcrypt_test("lzo"); + ret = min(ret, tcrypt_test("lzo")); break; case 37: - ret += tcrypt_test("ccm(aes)"); + ret = min(ret, tcrypt_test("ccm(aes)")); break; case 38: - ret += tcrypt_test("cts(cbc(aes))"); + ret = min(ret, tcrypt_test("cts(cbc(aes))")); break; case 39: - ret += tcrypt_test("xxhash64"); + ret = min(ret, tcrypt_test("xxhash64")); break; case 40: - ret += tcrypt_test("rmd160"); + ret = min(ret, tcrypt_test("rmd160")); break; case 42: - ret += tcrypt_test("blake2b-512"); + ret = min(ret, tcrypt_test("blake2b-512")); break; case 43: - ret += tcrypt_test("ecb(seed)"); + ret = min(ret, tcrypt_test("ecb(seed)")); break; case 45: - ret += tcrypt_test("rfc4309(ccm(aes))"); + ret = min(ret, tcrypt_test("rfc4309(ccm(aes))")); break; case 46: - ret += tcrypt_test("ghash"); + ret = min(ret, tcrypt_test("ghash")); break; case 47: - ret += tcrypt_test("crct10dif"); + ret = min(ret, tcrypt_test("crct10dif")); break; case 48: - ret += tcrypt_test("sha3-224"); + ret = min(ret, tcrypt_test("sha3-224")); break; case 49: - ret += tcrypt_test("sha3-256"); + ret = min(ret, tcrypt_test("sha3-256")); break; case 50: - ret += tcrypt_test("sha3-384"); + ret = min(ret, tcrypt_test("sha3-384")); break; case 51: - ret += tcrypt_test("sha3-512"); + ret = min(ret, tcrypt_test("sha3-512")); break; case 52: - ret += tcrypt_test("sm3"); + ret = min(ret, tcrypt_test("sm3")); break; case 53: - ret += tcrypt_test("streebog256"); + ret = min(ret, tcrypt_test("streebog256")); break; case 54: - ret += tcrypt_test("streebog512"); + ret = min(ret, tcrypt_test("streebog512")); break; case 55: - ret += tcrypt_test("gcm(sm4)"); + ret = min(ret, tcrypt_test("gcm(sm4)")); break; case 56: - ret += tcrypt_test("ccm(sm4)"); + ret = min(ret, tcrypt_test("ccm(sm4)")); break; case 57: - ret += tcrypt_test("polyval"); + ret = min(ret, tcrypt_test("polyval")); break; case 58: - ret += tcrypt_test("gcm(aria)"); + ret = min(ret, tcrypt_test("gcm(aria)")); + break; + + case 59: + ret = min(ret, tcrypt_test("cts(cbc(sm4))")); break; case 100: - ret += tcrypt_test("hmac(md5)"); + ret = min(ret, tcrypt_test("hmac(md5)")); break; case 101: - ret += tcrypt_test("hmac(sha1)"); + ret = min(ret, tcrypt_test("hmac(sha1)")); break; case 102: - ret += tcrypt_test("hmac(sha256)"); + ret = min(ret, tcrypt_test("hmac(sha256)")); break; case 103: - ret += tcrypt_test("hmac(sha384)"); + ret = min(ret, tcrypt_test("hmac(sha384)")); break; case 104: - ret += tcrypt_test("hmac(sha512)"); + ret = min(ret, tcrypt_test("hmac(sha512)")); break; case 105: - ret += tcrypt_test("hmac(sha224)"); + ret = min(ret, tcrypt_test("hmac(sha224)")); break; case 106: - ret += tcrypt_test("xcbc(aes)"); + ret = min(ret, tcrypt_test("xcbc(aes)")); break; case 108: - ret += tcrypt_test("hmac(rmd160)"); + ret = min(ret, tcrypt_test("hmac(rmd160)")); break; case 109: - ret += tcrypt_test("vmac64(aes)"); + ret = min(ret, tcrypt_test("vmac64(aes)")); break; case 111: - ret += tcrypt_test("hmac(sha3-224)"); + ret = min(ret, tcrypt_test("hmac(sha3-224)")); break; case 112: - ret += tcrypt_test("hmac(sha3-256)"); + ret = min(ret, tcrypt_test("hmac(sha3-256)")); break; case 113: - ret += tcrypt_test("hmac(sha3-384)"); + ret = min(ret, tcrypt_test("hmac(sha3-384)")); break; case 114: - ret += tcrypt_test("hmac(sha3-512)"); + ret = min(ret, tcrypt_test("hmac(sha3-512)")); break; case 115: - ret += tcrypt_test("hmac(streebog256)"); + ret = min(ret, tcrypt_test("hmac(streebog256)")); break; case 116: - ret += tcrypt_test("hmac(streebog512)"); + ret = min(ret, tcrypt_test("hmac(streebog512)")); break; case 150: - ret += tcrypt_test("ansi_cprng"); + ret = min(ret, tcrypt_test("ansi_cprng")); break; case 151: - ret += tcrypt_test("rfc4106(gcm(aes))"); + ret = min(ret, tcrypt_test("rfc4106(gcm(aes))")); break; case 152: - ret += tcrypt_test("rfc4543(gcm(aes))"); + ret = min(ret, tcrypt_test("rfc4543(gcm(aes))")); break; case 153: - ret += tcrypt_test("cmac(aes)"); + ret = min(ret, tcrypt_test("cmac(aes)")); break; case 154: - ret += tcrypt_test("cmac(des3_ede)"); + ret = min(ret, tcrypt_test("cmac(des3_ede)")); break; case 155: - ret += tcrypt_test("authenc(hmac(sha1),cbc(aes))"); + ret = min(ret, tcrypt_test("authenc(hmac(sha1),cbc(aes))")); break; case 156: - ret += tcrypt_test("authenc(hmac(md5),ecb(cipher_null))"); + ret = min(ret, tcrypt_test("authenc(hmac(md5),ecb(cipher_null))")); break; case 157: - ret += tcrypt_test("authenc(hmac(sha1),ecb(cipher_null))"); + ret = min(ret, tcrypt_test("authenc(hmac(sha1),ecb(cipher_null))")); break; case 158: - ret += tcrypt_test("cbcmac(sm4)"); + ret = min(ret, tcrypt_test("cbcmac(sm4)")); break; case 159: - ret += tcrypt_test("cmac(sm4)"); + ret = min(ret, tcrypt_test("cmac(sm4)")); + break; + + case 160: + ret = min(ret, tcrypt_test("xcbc(sm4)")); break; case 181: - ret += tcrypt_test("authenc(hmac(sha1),cbc(des))"); + ret = min(ret, tcrypt_test("authenc(hmac(sha1),cbc(des))")); break; case 182: - ret += tcrypt_test("authenc(hmac(sha1),cbc(des3_ede))"); + ret = min(ret, tcrypt_test("authenc(hmac(sha1),cbc(des3_ede))")); break; case 183: - ret += tcrypt_test("authenc(hmac(sha224),cbc(des))"); + ret = min(ret, tcrypt_test("authenc(hmac(sha224),cbc(des))")); break; case 184: - ret += tcrypt_test("authenc(hmac(sha224),cbc(des3_ede))"); + ret = min(ret, tcrypt_test("authenc(hmac(sha224),cbc(des3_ede))")); break; case 185: - ret += tcrypt_test("authenc(hmac(sha256),cbc(des))"); + ret = min(ret, tcrypt_test("authenc(hmac(sha256),cbc(des))")); break; case 186: - ret += tcrypt_test("authenc(hmac(sha256),cbc(des3_ede))"); + ret = min(ret, tcrypt_test("authenc(hmac(sha256),cbc(des3_ede))")); break; case 187: - ret += tcrypt_test("authenc(hmac(sha384),cbc(des))"); + ret = min(ret, tcrypt_test("authenc(hmac(sha384),cbc(des))")); break; case 188: - ret += tcrypt_test("authenc(hmac(sha384),cbc(des3_ede))"); + ret = min(ret, tcrypt_test("authenc(hmac(sha384),cbc(des3_ede))")); break; case 189: - ret += tcrypt_test("authenc(hmac(sha512),cbc(des))"); + ret = min(ret, tcrypt_test("authenc(hmac(sha512),cbc(des))")); break; case 190: - ret += tcrypt_test("authenc(hmac(sha512),cbc(des3_ede))"); + ret = min(ret, tcrypt_test("authenc(hmac(sha512),cbc(des3_ede))")); break; case 191: - ret += tcrypt_test("ecb(sm4)"); - ret += tcrypt_test("cbc(sm4)"); - ret += tcrypt_test("cfb(sm4)"); - ret += tcrypt_test("ctr(sm4)"); + ret = min(ret, tcrypt_test("ecb(sm4)")); + ret = min(ret, tcrypt_test("cbc(sm4)")); + ret = min(ret, tcrypt_test("cfb(sm4)")); + ret = min(ret, tcrypt_test("ctr(sm4)")); + ret = min(ret, tcrypt_test("xts(sm4)")); break; case 192: - ret += tcrypt_test("ecb(aria)"); - ret += tcrypt_test("cbc(aria)"); - ret += tcrypt_test("cfb(aria)"); - ret += tcrypt_test("ctr(aria)"); + ret = min(ret, tcrypt_test("ecb(aria)")); + ret = min(ret, tcrypt_test("cbc(aria)")); + ret = min(ret, tcrypt_test("cfb(aria)")); + ret = min(ret, tcrypt_test("ctr(aria)")); break; case 200: test_cipher_speed("ecb(aes)", ENCRYPT, sec, NULL, 0, @@ -2109,6 +2108,10 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb) speed_template_16); test_cipher_speed("cbc(sm4)", DECRYPT, sec, NULL, 0, speed_template_16); + test_cipher_speed("cts(cbc(sm4))", ENCRYPT, sec, NULL, 0, + speed_template_16); + test_cipher_speed("cts(cbc(sm4))", DECRYPT, sec, NULL, 0, + speed_template_16); test_cipher_speed("cfb(sm4)", ENCRYPT, sec, NULL, 0, speed_template_16); test_cipher_speed("cfb(sm4)", DECRYPT, sec, NULL, 0, @@ -2117,6 +2120,10 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb) speed_template_16); test_cipher_speed("ctr(sm4)", DECRYPT, sec, NULL, 0, speed_template_16); + test_cipher_speed("xts(sm4)", ENCRYPT, sec, NULL, 0, + speed_template_32); + test_cipher_speed("xts(sm4)", DECRYPT, sec, NULL, 0, + speed_template_32); break; case 219: @@ -2630,6 +2637,10 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb) speed_template_16); test_acipher_speed("ctr(sm4)", DECRYPT, sec, NULL, 0, speed_template_16); + test_acipher_speed("xts(sm4)", ENCRYPT, sec, NULL, 0, + speed_template_32); + test_acipher_speed("xts(sm4)", DECRYPT, sec, NULL, 0, + speed_template_32); break; case 519: @@ -2885,7 +2896,7 @@ static int __init tcrypt_mod_init(void) err = do_test(alg, type, mask, mode, num_mb); if (err) { - printk(KERN_ERR "tcrypt: one or more tests failed!\n"); + pr_err("one or more tests failed!\n"); goto err_free_tv; } else { pr_debug("all tests passed\n"); diff --git a/crypto/tea.c b/crypto/tea.c index 02efc5d816903097d5f9ba0fcd0dce79ff4e9f50..896f863f3067ce85f129d176a65af8d61d54155e 100644 --- a/crypto/tea.c +++ b/crypto/tea.c @@ -14,11 +14,11 @@ * Copyright (c) 2004 Aaron Grothe ajgrothe@yahoo.com */ +#include #include #include #include #include -#include #include #define TEA_KEY_SIZE 16 diff --git a/crypto/testmgr.c b/crypto/testmgr.c index 589189c9fced55e745822cee378e620437788634..4476ac97baa5e45aa1e57665070a260b49196691 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c @@ -4712,6 +4712,12 @@ static const struct alg_test_desc alg_test_descs[] = { .alg = "cts(cbc(paes))", .test = alg_test_null, .fips_allowed = 1, + }, { + .alg = "cts(cbc(sm4))", + .test = alg_test_skcipher, + .suite = { + .cipher = __VECS(sm4_cts_tv_template) + } }, { .alg = "curve25519", .test = alg_test_kpp, @@ -5586,6 +5592,12 @@ static const struct alg_test_desc alg_test_descs[] = { .suite = { .hash = __VECS(aes_xcbc128_tv_template) } + }, { + .alg = "xcbc(sm4)", + .test = alg_test_hash, + .suite = { + .hash = __VECS(sm4_xcbc128_tv_template) + } }, { .alg = "xchacha12", .test = alg_test_skcipher, @@ -5640,6 +5652,13 @@ static const struct alg_test_desc alg_test_descs[] = { .suite = { .cipher = __VECS(serpent_xts_tv_template) } + }, { + .alg = "xts(sm4)", + .generic_driver = "xts(ecb(sm4-generic))", + .test = alg_test_skcipher, + .suite = { + .cipher = __VECS(sm4_xts_tv_template) + } }, { .alg = "xts(twofish)", .generic_driver = "xts(ecb(twofish-generic))", diff --git a/crypto/testmgr.h b/crypto/testmgr.h index d6088e26f3261ce71ec6138d8efede1aa8ad713e..f10bfb9d997353817ae25d992881d84193dd69ef 100644 --- a/crypto/testmgr.h +++ b/crypto/testmgr.h @@ -14882,6 +14882,353 @@ static const struct cipher_testvec sm4_cfb_tv_template[] = { } }; +static const struct cipher_testvec sm4_cts_tv_template[] = { + /* Generated from AES-CTS test vectors */ + { + .klen = 16, + .key = "\x63\x68\x69\x63\x6b\x65\x6e\x20" + "\x74\x65\x72\x69\x79\x61\x6b\x69", + .ptext = "\x49\x20\x77\x6f\x75\x6c\x64\x20" + "\x6c\x69\x6b\x65\x20\x74\x68\x65" + "\x20", + .len = 17, + .ctext = "\x05\xfe\x23\xee\x17\xa2\x89\x98" + "\xbc\x97\x0a\x0b\x54\x67\xca\xd7" + "\xd6", + }, { + .klen = 16, + .key = "\x63\x68\x69\x63\x6b\x65\x6e\x20" + "\x74\x65\x72\x69\x79\x61\x6b\x69", + .ptext = "\x49\x20\x77\x6f\x75\x6c\x64\x20" + "\x6c\x69\x6b\x65\x20\x74\x68\x65" + "\x20\x47\x65\x6e\x65\x72\x61\x6c" + "\x20\x47\x61\x75\x27\x73\x20", + .len = 31, + .ctext = "\x15\x46\xe4\x95\xa4\xec\xf0\xb8" + "\x49\xd6\x6a\x9d\x89\xc7\xfd\x70" + "\xd6\x71\xc8\xc0\x4d\x52\x7c\x66" + "\x93\xf7\x70\xbb\xa8\x3f\xa3", + }, { + .klen = 16, + .key = "\x63\x68\x69\x63\x6b\x65\x6e\x20" + "\x74\x65\x72\x69\x79\x61\x6b\x69", + .ptext = "\x49\x20\x77\x6f\x75\x6c\x64\x20" + "\x6c\x69\x6b\x65\x20\x74\x68\x65" + "\x20\x47\x65\x6e\x65\x72\x61\x6c" + "\x20\x47\x61\x75\x27\x73\x20\x43", + .len = 32, + .ctext = "\x89\xc7\x99\x3f\x87\x69\x5c\xd3" + "\x01\x6a\xbf\xd4\x3f\x79\x02\xa3" + "\xd6\x71\xc8\xc0\x4d\x52\x7c\x66" + "\x93\xf7\x70\xbb\xa8\x3f\xa3\xcf", + }, { + .klen = 16, + .key = "\x63\x68\x69\x63\x6b\x65\x6e\x20" + "\x74\x65\x72\x69\x79\x61\x6b\x69", + .ptext = "\x49\x20\x77\x6f\x75\x6c\x64\x20" + "\x6c\x69\x6b\x65\x20\x74\x68\x65" + "\x20\x47\x65\x6e\x65\x72\x61\x6c" + "\x20\x47\x61\x75\x27\x73\x20\x43" + "\x68\x69\x63\x6b\x65\x6e\x2c\x20" + "\x70\x6c\x65\x61\x73\x65\x2c", + .len = 47, + .ctext = "\xd6\x71\xc8\xc0\x4d\x52\x7c\x66" + "\x93\xf7\x70\xbb\xa8\x3f\xa3\xcf" + "\xd3\xe1\xdc\xeb\xfa\x04\x11\x99" + "\xde\xcf\x6f\x4d\x7b\x09\x92\x7f" + "\x89\xc7\x99\x3f\x87\x69\x5c\xd3" + "\x01\x6a\xbf\xd4\x3f\x79\x02", + }, { + .klen = 16, + .key = "\x63\x68\x69\x63\x6b\x65\x6e\x20" + "\x74\x65\x72\x69\x79\x61\x6b\x69", + .ptext = "\x49\x20\x77\x6f\x75\x6c\x64\x20" + "\x6c\x69\x6b\x65\x20\x74\x68\x65" + "\x20\x47\x65\x6e\x65\x72\x61\x6c" + "\x20\x47\x61\x75\x27\x73\x20\x43" + "\x68\x69\x63\x6b\x65\x6e\x2c\x20" + "\x70\x6c\x65\x61\x73\x65\x2c\x20", + .len = 48, + .ctext = "\xd6\x71\xc8\xc0\x4d\x52\x7c\x66" + "\x93\xf7\x70\xbb\xa8\x3f\xa3\xcf" + "\x9a\xbd\x7b\xfe\x82\xab\xcc\x7f" + "\xbd\x99\x21\x0c\x5e\x4d\xed\x20" + "\x89\xc7\x99\x3f\x87\x69\x5c\xd3" + "\x01\x6a\xbf\xd4\x3f\x79\x02\xa3", + }, { + .klen = 16, + .key = "\x63\x68\x69\x63\x6b\x65\x6e\x20" + "\x74\x65\x72\x69\x79\x61\x6b\x69", + .ptext = "\x49\x20\x77\x6f\x75\x6c\x64\x20" + "\x6c\x69\x6b\x65\x20\x74\x68\x65" + "\x20\x47\x65\x6e\x65\x72\x61\x6c" + "\x20\x47\x61\x75\x27\x73\x20\x43" + "\x68\x69\x63\x6b\x65\x6e\x2c\x20" + "\x70\x6c\x65\x61\x73\x65\x2c\x20" + "\x61\x6e\x64\x20\x77\x6f\x6e\x74" + "\x6f\x6e\x20\x73\x6f\x75\x70\x2e", + .len = 64, + .ctext = "\xd6\x71\xc8\xc0\x4d\x52\x7c\x66" + "\x93\xf7\x70\xbb\xa8\x3f\xa3\xcf" + "\x89\xc7\x99\x3f\x87\x69\x5c\xd3" + "\x01\x6a\xbf\xd4\x3f\x79\x02\xa3" + "\x58\x19\xa4\x8f\xa9\x68\x5e\x6b" + "\x2c\x0f\x81\x60\x15\x98\x27\x4f" + "\x9a\xbd\x7b\xfe\x82\xab\xcc\x7f" + "\xbd\x99\x21\x0c\x5e\x4d\xed\x20", + } +}; + +static const struct cipher_testvec sm4_xts_tv_template[] = { + /* Generated from AES-XTS test vectors */ + { + .key = "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00", + .klen = 32, + .iv = "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00", + .ptext = "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00", + .ctext = "\xd9\xb4\x21\xf7\x31\xc8\x94\xfd" + "\xc3\x5b\x77\x29\x1f\xe4\xe3\xb0" + "\x2a\x1f\xb7\x66\x98\xd5\x9f\x0e" + "\x51\x37\x6c\x4a\xda\x5b\xc7\x5d", + .len = 32, + }, { + .key = "\x11\x11\x11\x11\x11\x11\x11\x11" + "\x11\x11\x11\x11\x11\x11\x11\x11" + "\x22\x22\x22\x22\x22\x22\x22\x22" + "\x22\x22\x22\x22\x22\x22\x22\x22", + .klen = 32, + .iv = "\x33\x33\x33\x33\x33\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00", + .ptext = "\x44\x44\x44\x44\x44\x44\x44\x44" + "\x44\x44\x44\x44\x44\x44\x44\x44" + "\x44\x44\x44\x44\x44\x44\x44\x44" + "\x44\x44\x44\x44\x44\x44\x44\x44", + .ctext = "\xa7\x4d\x72\x6c\x11\x19\x6a\x32" + "\xbe\x04\xe0\x01\xff\x29\xd0\xc7" + "\x93\x2f\x9f\x3e\xc2\x9b\xfc\xb6" + "\x4d\xd1\x7f\x63\xcb\xd3\xea\x31", + .len = 32, + }, { + .key = "\xff\xfe\xfd\xfc\xfb\xfa\xf9\xf8" + "\xf7\xf6\xf5\xf4\xf3\xf2\xf1\xf0" + "\x22\x22\x22\x22\x22\x22\x22\x22" + "\x22\x22\x22\x22\x22\x22\x22\x22", + .klen = 32, + .iv = "\x33\x33\x33\x33\x33\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00", + .ptext = "\x44\x44\x44\x44\x44\x44\x44\x44" + "\x44\x44\x44\x44\x44\x44\x44\x44" + "\x44\x44\x44\x44\x44\x44\x44\x44" + "\x44\x44\x44\x44\x44\x44\x44\x44", + .ctext = "\x7f\x76\x08\x8e\xff\xad\xf7\x0c" + "\x02\xea\x9f\x95\xda\x06\x28\xd3" + "\x51\xbf\xcb\x9e\xac\x05\x63\xbc" + "\xf1\x7b\x71\x0d\xab\x0a\x98\x26", + .len = 32, + }, { + .key = "\x27\x18\x28\x18\x28\x45\x90\x45" + "\x23\x53\x60\x28\x74\x71\x35\x26" + "\x31\x41\x59\x26\x53\x58\x97\x93" + "\x23\x84\x62\x64\x33\x83\x27\x95", + .klen = 32, + .iv = "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00", + .ptext = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" + "\x10\x11\x12\x13\x14\x15\x16\x17" + "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" + "\x20\x21\x22\x23\x24\x25\x26\x27" + "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f" + "\x30\x31\x32\x33\x34\x35\x36\x37" + "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f" + "\x40\x41\x42\x43\x44\x45\x46\x47" + "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f" + "\x50\x51\x52\x53\x54\x55\x56\x57" + "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f" + "\x60\x61\x62\x63\x64\x65\x66\x67" + "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f" + "\x70\x71\x72\x73\x74\x75\x76\x77" + "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f" + "\x80\x81\x82\x83\x84\x85\x86\x87" + "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f" + "\x90\x91\x92\x93\x94\x95\x96\x97" + "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f" + "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7" + "\xa8\xa9\xaa\xab\xac\xad\xae\xaf" + "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7" + "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf" + "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7" + "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf" + "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7" + "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf" + "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7" + "\xe8\xe9\xea\xeb\xec\xed\xee\xef" + "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7" + "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff" + "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" + "\x10\x11\x12\x13\x14\x15\x16\x17" + "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" + "\x20\x21\x22\x23\x24\x25\x26\x27" + "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f" + "\x30\x31\x32\x33\x34\x35\x36\x37" + "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f" + "\x40\x41\x42\x43\x44\x45\x46\x47" + "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f" + "\x50\x51\x52\x53\x54\x55\x56\x57" + "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f" + "\x60\x61\x62\x63\x64\x65\x66\x67" + "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f" + "\x70\x71\x72\x73\x74\x75\x76\x77" + "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f" + "\x80\x81\x82\x83\x84\x85\x86\x87" + "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f" + "\x90\x91\x92\x93\x94\x95\x96\x97" + "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f" + "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7" + "\xa8\xa9\xaa\xab\xac\xad\xae\xaf" + "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7" + "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf" + "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7" + "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf" + "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7" + "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf" + "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7" + "\xe8\xe9\xea\xeb\xec\xed\xee\xef" + "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7" + "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff", + .ctext = "\x54\xdd\x65\xb6\x32\x6f\xae\xa8" + "\xfa\xd1\xa8\x3c\x63\x61\x4a\xf3" + "\x9f\x72\x1d\x8d\xfe\x17\x7a\x30" + "\xb6\x6a\xbf\x6a\x44\x99\x80\xe1" + "\xcd\xbe\x06\xaf\xb7\x33\x36\xf3" + "\x7a\x4d\x39\xde\x96\x4a\x30\xd7" + "\xd0\x4a\x37\x99\x16\x9c\x60\x25" + "\x8f\x6b\x74\x8a\x61\x86\x1a\xa5" + "\xec\x92\xa2\xc1\x5b\x2b\x7c\x61" + "\x5a\x42\xab\xa4\x99\xbb\xd6\xb7" + "\x1d\xb9\xc7\x89\xb2\x18\x20\x89" + "\xa2\x5d\xd3\xdf\x80\x0e\xd1\x86" + "\x4d\x19\xf7\xed\x45\xfd\x17\xa9" + "\x48\x0b\x0f\xb8\x2d\x9b\x7f\xc3" + "\xed\x57\xe9\xa1\x14\x0e\xaa\x77" + "\x8d\xd2\xdd\x67\x9e\x3e\xdc\x3d" + "\xc4\xd5\x5c\x95\x0e\xbc\x53\x1d" + "\x95\x92\xf7\xc4\x63\x82\x56\xd5" + "\x65\x18\x29\x2a\x20\xaf\x98\xfd" + "\xd3\xa6\x36\x00\x35\x0a\x70\xab" + "\x5a\x40\xf4\xc2\x85\x03\x7c\xa0" + "\x1f\x25\x1f\x19\xec\xae\x03\x29" + "\xff\x77\xad\x88\xcd\x5a\x4c\xde" + "\xa2\xae\xab\xc2\x21\x48\xff\xbd" + "\x23\x9b\xd1\x05\x15\xbd\xe1\x13" + "\x1d\xec\x84\x04\xe4\x43\xdc\x76" + "\x31\x40\xd5\xf2\x2b\xf3\x3e\x0c" + "\x68\x72\xd6\xb8\x1d\x63\x0f\x6f" + "\x00\xcd\xd0\x58\xfe\x80\xf9\xcb" + "\xfb\x77\x70\x7f\x93\xce\xe2\xca" + "\x92\xb9\x15\xb8\x30\x40\x27\xc1" + "\x90\xa8\x4e\x2d\x65\xe0\x18\xcc" + "\x6a\x38\x7d\x37\x66\xac\xdb\x28" + "\x25\x32\x84\xe8\xdb\x9a\xcf\x8f" + "\x52\x28\x0d\xdc\x6d\x00\x33\xd2" + "\xcc\xaa\xa4\xf9\xae\xff\x12\x36" + "\x69\xbc\x02\x4f\xd6\x76\x8e\xdf" + "\x8b\xc1\xf8\xd6\x22\xc1\x9c\x60" + "\x9e\xf9\x7f\x60\x91\x90\xcd\x11" + "\x02\x41\xe7\xfb\x08\x4e\xd8\x94" + "\x2d\xa1\xf9\xb9\xcf\x1b\x51\x4b" + "\x61\xa3\x88\xb3\x0e\xa6\x1a\x4a" + "\x74\x5b\x38\x1e\xe7\xad\x6c\x4d" + "\xb1\x27\x54\x53\xb8\x41\x3f\x98" + "\xdf\x6e\x4a\x40\x98\x6e\xe4\xb5" + "\x9a\xf5\xdf\xae\xcd\x30\x12\x65" + "\x17\x90\x67\xa0\x0d\x7c\xa3\x5a" + "\xb9\x5a\xbd\x61\x7a\xde\xa2\x8e" + "\xc1\xc2\x6a\x97\xde\x28\xb8\xbf" + "\xe3\x01\x20\xd6\xae\xfb\xd2\x58" + "\xc5\x9e\x42\xd1\x61\xe8\x06\x5a" + "\x78\x10\x6b\xdc\xa5\xcd\x90\xfb" + "\x3a\xac\x4e\x93\x86\x6c\x8a\x7f" + "\x96\x76\x86\x0a\x79\x14\x5b\xd9" + "\x2e\x02\xe8\x19\xa9\x0b\xe0\xb9" + "\x7c\xc5\x22\xb3\x21\x06\x85\x6f" + "\xdf\x0e\x54\xd8\x8e\x46\x24\x15" + "\x5a\x2f\x1c\x14\xea\xea\xa1\x63" + "\xf8\x58\xe9\x9a\x80\x6e\x79\x1a" + "\xcd\x82\xf1\xb0\xe2\x9f\x00\x28" + "\xa4\xc3\x8e\x97\x6f\x57\x1a\x93" + "\xf4\xfd\x57\xd7\x87\xc2\x4d\xb0" + "\xe0\x1c\xa3\x04\xe5\xa5\xc4\xdd" + "\x50\xcf\x8b\xdb\xf4\x91\xe5\x7c", + .len = 512, + }, { + .key = "\x62\x49\x77\x57\x24\x70\x93\x69" + "\x99\x59\x57\x49\x66\x96\x76\x27" + "\x02\x88\x41\x97\x16\x93\x99\x37" + "\x51\x05\x82\x09\x74\x94\x45\x92", + .klen = 32, + .iv = "\xff\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00", + .ptext = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" + "\x10\x11\x12\x13\x14\x15\x16\x17" + "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" + "\x20\x21\x22\x23\x24\x25\x26\x27" + "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f" + "\x30\x31\x32\x33\x34\x35\x36\x37" + "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f" + "\x40\x41\x42\x43\x44\x45\x46\x47" + "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f" + "\x50\x51\x52\x53\x54\x55\x56\x57" + "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f" + "\x60\x61\x62\x63\x64\x65\x66\x67" + "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f" + "\x70\x71\x72\x73\x74\x75\x76\x77" + "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f" + "\x80\x81\x82\x83\x84\x85\x86\x87" + "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f" + "\x90\x91\x92\x93\x94\x95\x96\x97" + "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f" + "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7" + "\xa8\xa9\xaa\xab\xac\xad\xae\xaf" + "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7" + "\xf8\xf9\xfa\xfb\xfc", + .ctext = "\xa2\x9f\x9e\x4e\x71\xdb\x28\x3c" + "\x80\x0e\xf6\xb7\x8e\x57\x1c\xba" + "\x90\xda\x3b\x6c\x22\x00\x68\x30" + "\x1d\x63\x0d\x9e\x6a\xad\x37\x55" + "\xbc\x77\x1e\xc9\xad\x83\x30\xd5" + "\x27\xb2\x66\x77\x18\x3c\xa6\x39" + "\x9c\x0a\xaa\x1f\x02\xe1\xd5\x65" + "\x9b\x8d\xc5\x97\x3d\xc5\x04\x53" + "\x78\x00\xe3\xb0\x1a\x43\x4e\xb7" + "\xc4\x9f\x38\xc5\x7b\xa4\x70\x64" + "\x78\xe6\x32\xd9\x65\x44\xc5\x64" + "\xb8\x42\x35\x99\xff\x66\x75\xb0" + "\x22\xd3\x9b\x6e\x8d\xcf\x6a\x24" + "\xfd\x92\xb7\x1b\x04\x28\x2a\x61" + "\xdc\x96\x2a\x20\x7a\x2c\xf1\xf9" + "\x12\x15\xf0\x4d\xcf\x2b\xde\x33" + "\x41\xbc\xe7\x85\x87\x22\xb7\x16" + "\x02\x1c\xd8\xa2\x0f\x1f\xa3\xe9" + "\xd8\x45\x48\xe7\xbe\x08\x4e\x4e" + "\x23\x79\x84\xdb\x40\x76\xf5\x13" + "\x78\x92\x4a\x2f\xf9\x1b\xf2\x80" + "\x25\x74\x51\x45\x9a\x77\x78\x97" + "\xd3\xe0\xc7\xc4\x35\x67\x2a\xe6" + "\xb3\x0d\x62\x9f\x8b", + .len = 189, + }, +}; + static const struct aead_testvec sm4_gcm_tv_template[] = { { /* From https://datatracker.ietf.org/doc/html/rfc8998#appendix-A.1 */ .key = "\x01\x23\x45\x67\x89\xAB\xCD\xEF" @@ -14913,6 +15260,298 @@ static const struct aead_testvec sm4_gcm_tv_template[] = { "\x83\xDE\x35\x41\xE4\xC2\xB5\x81" "\x77\xE0\x65\xA9\xBF\x7B\x62\xEC", .clen = 80, + }, { /* Generated from AES-GCM test vectors */ + .key = zeroed_string, + .klen = 16, + .ctext = "\x23\x2f\x0c\xfe\x30\x8b\x49\xea" + "\x6f\xc8\x82\x29\xb5\xdc\x85\x8d", + .clen = 16, + }, { + .key = zeroed_string, + .klen = 16, + .ptext = zeroed_string, + .plen = 16, + .ctext = "\x7d\xe2\xaa\x7f\x11\x10\x18\x82" + "\x18\x06\x3b\xe1\xbf\xeb\x6d\x89" + "\xb8\x51\xb5\xf3\x94\x93\x75\x2b" + "\xe5\x08\xf1\xbb\x44\x82\xc5\x57", + .clen = 32, + }, { + .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c" + "\x6d\x6a\x8f\x94\x67\x30\x83\x08", + .klen = 16, + .iv = "\xca\xfe\xba\xbe\xfa\xce\xdb\xad" + "\xde\xca\xf8\x88", + .ptext = "\xd9\x31\x32\x25\xf8\x84\x06\xe5" + "\xa5\x59\x09\xc5\xaf\xf5\x26\x9a" + "\x86\xa7\xa9\x53\x15\x34\xf7\xda" + "\x2e\x4c\x30\x3d\x8a\x31\x8a\x72" + "\x1c\x3c\x0c\x95\x95\x68\x09\x53" + "\x2f\xcf\x0e\x24\x49\xa6\xb5\x25" + "\xb1\x6a\xed\xf5\xaa\x0d\xe6\x57" + "\xba\x63\x7b\x39\x1a\xaf\xd2\x55", + .plen = 64, + .ctext = "\xe4\x11\x0f\xf1\xc1\x41\x97\xe6" + "\x76\x21\x6a\x33\x83\x10\x41\xeb" + "\x09\x58\x00\x11\x7b\xdc\x3f\x75" + "\x1a\x49\x6e\xfc\xf2\xbb\xdf\xdb" + "\x3a\x2e\x13\xfd\xc5\xc1\x9d\x07" + "\x1a\xe5\x48\x3f\xed\xde\x98\x5d" + "\x3f\x2d\x5b\x4e\xee\x0b\xb6\xdf" + "\xe3\x63\x36\x83\x23\xf7\x5b\x80" + "\x7d\xfe\x77\xef\x71\xb1\x5e\xc9" + "\x52\x6b\x09\xab\x84\x28\x4b\x8a", + .clen = 80, + }, { + .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c" + "\x6d\x6a\x8f\x94\x67\x30\x83\x08", + .klen = 16, + .iv = "\xca\xfe\xba\xbe\xfa\xce\xdb\xad" + "\xde\xca\xf8\x88", + .ptext = "\xd9\x31\x32\x25\xf8\x84\x06\xe5" + "\xa5\x59\x09\xc5\xaf\xf5\x26\x9a" + "\x86\xa7\xa9\x53\x15\x34\xf7\xda" + "\x2e\x4c\x30\x3d\x8a\x31\x8a\x72" + "\x1c\x3c\x0c\x95\x95\x68\x09\x53" + "\x2f\xcf\x0e\x24\x49\xa6\xb5\x25" + "\xb1\x6a\xed\xf5\xaa\x0d\xe6\x57" + "\xba\x63\x7b\x39", + .plen = 60, + .assoc = "\xfe\xed\xfa\xce\xde\xad\xbe\xef" + "\xfe\xed\xfa\xce\xde\xad\xbe\xef" + "\xab\xad\xda\xd2", + .alen = 20, + .ctext = "\xe4\x11\x0f\xf1\xc1\x41\x97\xe6" + "\x76\x21\x6a\x33\x83\x10\x41\xeb" + "\x09\x58\x00\x11\x7b\xdc\x3f\x75" + "\x1a\x49\x6e\xfc\xf2\xbb\xdf\xdb" + "\x3a\x2e\x13\xfd\xc5\xc1\x9d\x07" + "\x1a\xe5\x48\x3f\xed\xde\x98\x5d" + "\x3f\x2d\x5b\x4e\xee\x0b\xb6\xdf" + "\xe3\x63\x36\x83" + "\x89\xf6\xba\x35\xb8\x18\xd3\xcc" + "\x38\x6c\x05\xb3\x8a\xcb\xc9\xde", + .clen = 76, + }, { + .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c" + "\xfe\xff\xe9\x92\x86\x65\x73\x1c", + .klen = 16, + .iv = "\xca\xfe\xba\xbe\xfa\xce\xdb\xad" + "\xde\xca\xf8\x88", + .ptext = "\xd9\x31\x32\x25\xf8\x84\x06\xe5" + "\xa5\x59\x09\xc5\xaf\xf5\x26\x9a" + "\x86\xa7\xa9\x53\x15\x34\xf7\xda" + "\x2e\x4c\x30\x3d\x8a\x31\x8a\x72" + "\x1c\x3c\x0c\x95\x95\x68\x09\x53" + "\x2f\xcf\x0e\x24\x49\xa6\xb5\x25" + "\xb1\x6a\xed\xf5\xaa\x0d\xe6\x57" + "\xba\x63\x7b\x39", + .plen = 60, + .assoc = "\xfe\xed\xfa\xce\xde\xad\xbe\xef" + "\xfe\xed\xfa\xce\xde\xad\xbe\xef" + "\xab\xad\xda\xd2", + .alen = 20, + .ctext = "\xc1\x11\x44\x51\xd9\x25\x87\x5b" + "\x0f\xd9\x06\xf3\x33\x44\xbb\x87" + "\x8b\xa3\x77\xd2\x0c\x60\xfa\xcc" + "\x85\x50\x6f\x96\x0c\x54\x54\xc1" + "\x58\x04\x88\x6e\xf4\x26\x35\x7e" + "\x94\x80\x48\x6c\xf2\xf4\x88\x1f" + "\x19\x63\xea\xae\xba\x81\x1a\x5d" + "\x0e\x6f\x59\x08" + "\x33\xac\x5b\xa8\x19\x60\xdb\x1d" + "\xdd\x2e\x22\x2e\xe0\x87\x51\x5d", + .clen = 76, + }, { + .key = "\x8b\x32\xcf\xe7\x44\xed\x13\x59" + "\x04\x38\x77\xb0\xb9\xad\xb4\x38", + .klen = 16, + .iv = "\x00\xff\xff\xff\xff\x00\x00\xff" + "\xff\xff\x00\xff", + .ptext = "\x42\xc1\xcc\x08\x48\x6f\x41\x3f" + "\x2f\x11\x66\x8b\x2a\x16\xf0\xe0" + "\x58\x83\xf0\xc3\x70\x14\xc0\x5b" + "\x3f\xec\x1d\x25\x3c\x51\xd2\x03" + "\xcf\x59\x74\x1f\xb2\x85\xb4\x07" + "\xc6\x6a\x63\x39\x8a\x5b\xde\xcb" + "\xaf\x08\x44\xbd\x6f\x91\x15\xe1" + "\xf5\x7a\x6e\x18\xbd\xdd\x61\x50" + "\x59\xa9\x97\xab\xbb\x0e\x74\x5c" + "\x00\xa4\x43\x54\x04\x54\x9b\x3b" + "\x77\xec\xfd\x5c\xa6\xe8\x7b\x08" + "\xae\xe6\x10\x3f\x32\x65\xd1\xfc" + "\xa4\x1d\x2c\x31\xfb\x33\x7a\xb3" + "\x35\x23\xf4\x20\x41\xd4\xad\x82" + "\x8b\xa4\xad\x96\x1c\x20\x53\xbe" + "\x0e\xa6\xf4\xdc\x78\x49\x3e\x72" + "\xb1\xa9\xb5\x83\xcb\x08\x54\xb7" + "\xad\x49\x3a\xae\x98\xce\xa6\x66" + "\x10\x30\x90\x8c\x55\x83\xd7\x7c" + "\x8b\xe6\x53\xde\xd2\x6e\x18\x21" + "\x01\x52\xd1\x9f\x9d\xbb\x9c\x73" + "\x57\xcc\x89\x09\x75\x9b\x78\x70" + "\xed\x26\x97\x4d\xb4\xe4\x0c\xa5" + "\xfa\x70\x04\x70\xc6\x96\x1c\x7d" + "\x54\x41\x77\xa8\xe3\xb0\x7e\x96" + "\x82\xd9\xec\xa2\x87\x68\x55\xf9" + "\x8f\x9e\x73\x43\x47\x6a\x08\x36" + "\x93\x67\xa8\x2d\xde\xac\x41\xa9" + "\x5c\x4d\x73\x97\x0f\x70\x68\xfa" + "\x56\x4d\x00\xc2\x3b\x1f\xc8\xb9" + "\x78\x1f\x51\x07\xe3\x9a\x13\x4e" + "\xed\x2b\x2e\xa3\xf7\x44\xb2\xe7" + "\xab\x19\x37\xd9\xba\x76\x5e\xd2" + "\xf2\x53\x15\x17\x4c\x6b\x16\x9f" + "\x02\x66\x49\xca\x7c\x91\x05\xf2" + "\x45\x36\x1e\xf5\x77\xad\x1f\x46" + "\xa8\x13\xfb\x63\xb6\x08\x99\x63" + "\x82\xa2\xed\xb3\xac\xdf\x43\x19" + "\x45\xea\x78\x73\xd9\xb7\x39\x11" + "\xa3\x13\x7c\xf8\x3f\xf7\xad\x81" + "\x48\x2f\xa9\x5c\x5f\xa0\xf0\x79" + "\xa4\x47\x7d\x80\x20\x26\xfd\x63" + "\x0a\xc7\x7e\x6d\x75\x47\xff\x76" + "\x66\x2e\x8a\x6c\x81\x35\xaf\x0b" + "\x2e\x6a\x49\x60\xc1\x10\xe1\xe1" + "\x54\x03\xa4\x09\x0c\x37\x7a\x15" + "\x23\x27\x5b\x8b\x4b\xa5\x64\x97" + "\xae\x4a\x50\x73\x1f\x66\x1c\x5c" + "\x03\x25\x3c\x8d\x48\x58\x71\x34" + "\x0e\xec\x4e\x55\x1a\x03\x6a\xe5" + "\xb6\x19\x2b\x84\x2a\x20\xd1\xea" + "\x80\x6f\x96\x0e\x05\x62\xc7\x78" + "\x87\x79\x60\x38\x46\xb4\x25\x57" + "\x6e\x16\x63\xf8\xad\x6e\xd7\x42" + "\x69\xe1\x88\xef\x6e\xd5\xb4\x9a" + "\x3c\x78\x6c\x3b\xe5\xa0\x1d\x22" + "\x86\x5c\x74\x3a\xeb\x24\x26\xc7" + "\x09\xfc\x91\x96\x47\x87\x4f\x1a" + "\xd6\x6b\x2c\x18\x47\xc0\xb8\x24" + "\xa8\x5a\x4a\x9e\xcb\x03\xe7\x2a" + "\x09\xe6\x4d\x9c\x6d\x86\x60\xf5" + "\x2f\x48\x69\x37\x9f\xf2\xd2\xcb" + "\x0e\x5a\xdd\x6e\x8a\xfb\x6a\xfe" + "\x0b\x63\xde\x87\x42\x79\x8a\x68" + "\x51\x28\x9b\x7a\xeb\xaf\xb8\x2f" + "\x9d\xd1\xc7\x45\x90\x08\xc9\x83" + "\xe9\x83\x84\xcb\x28\x69\x09\x69" + "\xce\x99\x46\x00\x54\xcb\xd8\x38" + "\xf9\x53\x4a\xbf\x31\xce\x57\x15" + "\x33\xfa\x96\x04\x33\x42\xe3\xc0" + "\xb7\x54\x4a\x65\x7a\x7c\x02\xe6" + "\x19\x95\xd0\x0e\x82\x07\x63\xf9" + "\xe1\x2b\x2a\xfc\x55\x92\x52\xc9" + "\xb5\x9f\x23\x28\x60\xe7\x20\x51" + "\x10\xd3\xed\x6d\x9b\xab\xb8\xe2" + "\x5d\x9a\x34\xb3\xbe\x9c\x64\xcb" + "\x78\xc6\x91\x22\x40\x91\x80\xbe" + "\xd7\x78\x5c\x0e\x0a\xdc\x08\xe9" + "\x67\x10\xa4\x83\x98\x79\x23\xe7" + "\x92\xda\xa9\x22\x16\xb1\xe7\x78" + "\xa3\x1c\x6c\x8f\x35\x7c\x4d\x37" + "\x2f\x6e\x0b\x50\x5c\x34\xb9\xf9" + "\xe6\x3d\x91\x0d\x32\x95\xaa\x3d" + "\x48\x11\x06\xbb\x2d\xf2\x63\x88" + "\x3f\x73\x09\xe2\x45\x56\x31\x51" + "\xfa\x5e\x4e\x62\xf7\x90\xf9\xa9" + "\x7d\x7b\x1b\xb1\xc8\x26\x6e\x66" + "\xf6\x90\x9a\x7f\xf2\x57\xcc\x23" + "\x59\xfa\xfa\xaa\x44\x04\x01\xa7" + "\xa4\x78\xdb\x74\x3d\x8b\xb5", + .plen = 719, + .ctext = "\xdc\xb1\x0f\x2a\xe8\x2d\x1c\x57" + "\xc4\x82\xfa\xd6\x87\xe6\x2f\x50" + "\xbd\x9e\x0a\x42\x31\xf2\xc7\xbb" + "\x21\x63\xa7\x05\x43\x33\xef\x33" + "\x5c\xd3\x47\x55\xce\x5c\xe4\xd4" + "\xe5\x07\x62\x22\xac\x01\xa8\x35" + "\x9c\x59\x34\x30\x8e\xff\x9f\xb4" + "\xd2\x4e\x74\x90\x64\xf2\x78\x5e" + "\x63\xb7\xc5\x08\x1b\x37\xa5\x9e" + "\xc0\xde\xff\xa9\x7f\x0b\xd3\x02" + "\x83\x6e\x33\xfa\x43\x11\xd3\xda" + "\x02\xcf\xcd\x4a\xc0\x78\x1f\x39" + "\x62\xcb\xa3\x95\x7e\x13\x92\x28" + "\xb2\xc4\x7a\xba\xd1\xc6\xf6\x1f" + "\xda\x0b\xf1\xd1\x99\x54\xd8\x3b" + "\x16\xf8\xe6\x97\x1e\xa7\xcf\x49" + "\x69\x84\x01\x4c\xdc\x7a\x34\xff" + "\x01\x08\xa3\x0b\x39\xac\x21\x37" + "\xd8\xb4\x04\x19\x8b\x7a\x7d\x17" + "\x44\xd1\x18\xaf\x1f\xa9\x29\xfe" + "\xfa\x77\xe0\x40\x42\x0c\x79\xb7" + "\xc3\x15\x1b\xd9\x0c\x82\xfc\x16" + "\x70\xd6\x2a\xe9\x94\x72\xc5\xa5" + "\x8a\x58\xbc\xfa\xe0\x88\x39\x4a" + "\x80\xe8\xec\xaf\x60\xac\xe7\xf8" + "\x9c\xf0\xfc\x61\x39\x07\x98\x6b" + "\x88\xe3\x98\x22\x28\x18\x4a\x2d" + "\x25\xef\x10\xe3\x83\x66\x3f\xfd" + "\xc7\x0b\xa3\xfd\x97\xa9\xf4\xbd" + "\xd8\x2a\xee\x4a\x50\xad\xcc\xb5" + "\xc7\xab\xb8\x79\x9c\xd1\xf1\x27" + "\x08\xf5\xf5\xe8\x1b\x66\xce\x41" + "\x56\x60\x94\x86\xf0\x78\xc2\xfa" + "\x5b\x63\x40\xb1\xd1\x1a\x38\x69" + "\x0b\x8c\xb2\xf5\xa2\xbe\x90\x9d" + "\x46\x23\x79\x8b\x3b\x4a\xf4\xbb" + "\x55\xf7\x58\x9d\xaf\x59\xff\x74" + "\xf3\xb9\xc4\x26\xb1\xf8\xe1\x28" + "\x8b\x5e\x8f\x6d\x64\xe7\xe8\x63" + "\xd2\x9e\xcb\xee\xae\x19\x04\x1d" + "\x05\xf0\x9d\x99\x7b\x33\x33\xae" + "\x6e\xe5\x09\xdd\x67\x51\xc4\xc8" + "\x6a\xc7\x36\x35\xc9\x93\x76\xa1" + "\xa8\x1c\xfa\x75\x92\x34\x0e\x7d" + "\x3d\x1d\xef\x00\xfd\xa5\x25\x12" + "\x7c\x91\x21\x41\xcc\x50\x47\xa9" + "\x22\x50\x24\x96\x34\x79\x3d\xe8" + "\x3f\xa0\x56\xaf\x98\x53\x55\xc3" + "\x46\x1b\x17\x54\xb8\xb0\xb7\xe0" + "\xe0\xab\x47\x6f\x06\xda\xcc\x75" + "\xa7\x96\xb7\x92\xf3\xa0\x5f\xe6" + "\xba\x97\xe3\x2f\x97\x05\xb2\x99" + "\xa0\x09\x10\x98\x9c\xd3\x2e\xd1" + "\x7e\x2a\x30\x54\x3c\xb9\x33\xe3" + "\xf2\xaf\xd3\xa5\xee\xd0\x0b\x8a" + "\x19\x54\x0f\x02\x51\x1f\x91\xdf" + "\x71\x9c\xad\x77\x35\x28\x55\x6d" + "\xcd\x7a\xd9\xa3\x41\x98\x6b\x37" + "\x19\x0f\xbe\xae\x69\xb2\x25\x01" + "\xee\x0e\x51\x4b\x53\xea\x0f\x5f" + "\x85\x74\x79\x36\x32\x0a\x2a\x40" + "\xad\x6b\x78\x41\x54\x99\xe9\xc1" + "\x2b\x6c\x9b\x42\x21\xef\xe2\x50" + "\x56\x8d\x78\xdf\x58\xbe\x0a\x0f" + "\xfc\xfc\x0d\x2e\xd0\xcb\xa6\x0a" + "\xa8\xd9\x1e\xa9\xd4\x7c\x99\x88" + "\xcf\x11\xad\x1c\xd3\x04\x63\x55" + "\xef\x85\x0b\x69\xa1\x40\xf1\x75" + "\x24\xf4\xe5\x2c\xd4\x7a\x24\x50" + "\x8f\xa2\x71\xc9\x92\x20\xcd\xcf" + "\xda\x40\xbe\xf6\xfe\x1a\xca\xc7" + "\x4a\x80\x45\x55\xcb\xdd\xb7\x01" + "\xb0\x8d\xcb\xd2\xae\xbd\xa4\xd0" + "\x5c\x10\x05\x66\x7b\xd4\xff\xd9" + "\xc4\x23\x9d\x8d\x6b\x24\xf8\x3f" + "\x73\x4d\x5c\x2b\x33\x4c\x5e\x63" + "\x74\x6d\x03\xa1\x7a\x35\x65\x17" + "\x38\x7f\x3b\xc1\x69\xcf\x61\x34" + "\x30\x21\xaf\x97\x47\x12\x3f\xa1" + "\xa7\x50\xc5\x87\xfb\x3f\x70\x32" + "\x86\x17\x5f\x25\xe4\x74\xc6\xd0" + "\x9b\x39\xe6\xe1\x5a\xec\x8f\x40" + "\xce\xcc\x37\x3b\xd8\x72\x1c\x31" + "\x75\xa4\xa6\x89\x8c\xdd\xd6\xd2" + "\x32\x3d\xe8\xc3\x54\xab\x1f\x35" + "\x52\xb4\x94\x81\xb0\x37\x3a\x03" + "\xbb\xb1\x99\x30\xa5\xf8\x21\xcd" + "\x93\x5d\xa7\x13\xed\xc7\x49\x09" + "\x70\xda\x08\x39\xaa\x15\x9e\x45" + "\x35\x2b\x0f\x5c\x8c\x8b\xc9" + "\xa8\xb8\x9f\xfd\x37\x36\x31\x7e" + "\x34\x4f\xc1\xc0\xca\x8a\x22\xfd", + .clen = 735, } }; @@ -14947,6 +15586,282 @@ static const struct aead_testvec sm4_ccm_tv_template[] = { "\x16\x84\x2D\x4F\xA1\x86\xF5\x6A" "\xB3\x32\x56\x97\x1F\xA1\x10\xF4", .clen = 80, + }, { /* Generated from AES-CCM test vectors */ + .key = "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7" + "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf", + .klen = 16, + .iv = "\x01\x00\x00\x00\x03\x02\x01\x00" + "\xa0\xa1\xa2\xa3\xa4\xa5\x00\x00", + .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07", + .alen = 8, + .ptext = "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" + "\x10\x11\x12\x13\x14\x15\x16\x17" + "\x18\x19\x1a\x1b\x1c\x1d\x1e", + .plen = 23, + .ctext = "\x7b\xff\x4a\x15\xf5\x73\xce\x82" + "\x6e\xc2\x31\x1d\xe2\x53\x02\xac" + "\xa4\x48\xf9\xe4\xf5\x1f\x81\x70" + "\x18\xbc\xb6\x84\x01\xb8\xae", + .clen = 31, + }, { + .key = "\xf4\x6b\xc2\x75\x62\xfe\xb4\xe1" + "\x53\x14\x73\x66\x8d\x88\xf6\x80", + .klen = 16, + .iv = "\x03\xa0\x20\x35\x26\xf2\x21\x8d" + "\x50\x20\xda\xe2\x00\x00\x00\x00", + .assoc = "\x5b\x9e\x13\x67\x02\x5e\xef\xc1" + "\x6c\xf9\xd7\x1e\x52\x8f\x7a\x47" + "\xe9\xd4\xcf\x20\x14\x6e\xf0\x2d" + "\xd8\x9e\x2b\x56\x10\x23\x56\xe7", + .alen = 32, + .ctext = "\x23\x58\xce\xdc\x40\xb1\xcd\x92" + "\x47\x96\x59\xfc\x8a\x26\x4f\xcf", + .clen = 16, + }, { + .key = "\xab\x2f\x8a\x74\xb7\x1c\xd2\xb1" + "\xff\x80\x2e\x48\x7d\x82\xf8\xb9", + .klen = 16, + .iv = "\x03\xaf\x94\x87\x78\x35\x82\x81" + "\x7f\x88\x94\x68\x00\x00\x00\x00", + .alen = 0, + .ptext = "\x00", + .plen = 0, + .ctext = "\x72\x7e\xf5\xd6\x39\x7a\x2b\x43", + .clen = 8, + }, { + .key = "\x39\xbb\xa7\xbe\x59\x97\x9e\x73" + "\xa4\x48\x93\x39\x26\x71\x4a\xc6", + .klen = 16, + .iv = "\x03\xee\x49\x83\xe9\xa9\xff\xe9" + "\x57\xba\xfd\x9e\x00\x00\x00\x00", + .assoc = "\x44\xa6\x2c\x05\xe9\xe1\x43\xb1" + "\x58\x7c\xf2\x5c\x6d\x39\x0a\x64" + "\xa4\xf0\x13\x05\xd1\x77\x99\x67" + "\x11\xc4\xc6\xdb\x00\x56\x36\x61", + .alen = 32, + .ptext = "\x00", + .plen = 0, + .ctext = "\xb0\x9d\xc6\xfb\x7d\xb5\xa1\x0e", + .clen = 8, + }, { + .key = "\x58\x5d\xa0\x96\x65\x1a\x04\xd7" + "\x0d\x1a\x53\x3b\xb5\xe3\xf8\x8b", + .klen = 16, + .iv = "\x03\xcf\x76\x3f\xd9\x95\x75\x8f" + "\x44\x89\x40\x7b\x00\x00\x00\x00", + .assoc = "\x8f\x86\x6c\x4d\x1d\xc5\x39\x88" + "\xc8\xf3\x5c\x52\x10\x63\x6f\x2b" + "\x8a\x2a\xc5\x6f\x30\x23\x58\x7b" + "\xfb\x36\x03\x11\xb4\xd9\xf2\xfe", + .alen = 32, + .ptext = "\xc2\x54\xc8\xde\x78\x87\x77\x40" + "\x49\x71\xe4\xb7\xe7\xcb\x76\x61" + "\x0a\x41\xb9\xe9\xc0\x76\x54\xab" + "\x04\x49\x3b\x19\x93\x57\x25\x5d", + .plen = 32, + .ctext = "\xc9\xae\xef\x1d\xf3\x2c\xd3\x38" + "\xc9\x7f\x7e\x28\xe8\xaa\xb3\x60" + "\x49\xdc\x66\xca\x7b\x3d\xe0\x3c" + "\xcb\x45\x9c\x1b\xb2\xbe\x07\x90" + "\x87\xa6\x6b\x89\x0d\x0f\x90\xaa" + "\x7d\xf6\x5a\x9a\x68\x2b\x81\x92", + .clen = 48, + }, { + .key = "\x8b\x32\xcf\xe7\x44\xed\x13\x59" + "\x04\x38\x77\xb0\xb9\xad\xb4\x38", + .klen = 16, + .iv = "\x02\xff\xff\xff\xff\x00\x00\xff" + "\xff\xff\x00\xff\xff\x00\x00\x00", + .assoc = "\x8f\x86\x6c\x4d\x1d\xc5\x39\x88" + "\xc8\xf3\x5c\x52\x10\x63\x6f\x2b" + "\x8a\x2a\xc5\x6f\x30\x23\x58\x7b" + "\xfb\x36\x03\x11\xb4\xd9\xf2\xfe" + "\xc8\xf3\x5c\x52\x10\x63", + .alen = 38, + .ptext = "\x42\xc1\xcc\x08\x48\x6f\x41\x3f" + "\x2f\x11\x66\x8b\x2a\x16\xf0\xe0" + "\x58\x83\xf0\xc3\x70\x14\xc0\x5b" + "\x3f\xec\x1d\x25\x3c\x51\xd2\x03" + "\xcf\x59\x74\x1f\xb2\x85\xb4\x07" + "\xc6\x6a\x63\x39\x8a\x5b\xde\xcb" + "\xaf\x08\x44\xbd\x6f\x91\x15\xe1" + "\xf5\x7a\x6e\x18\xbd\xdd\x61\x50" + "\x59\xa9\x97\xab\xbb\x0e\x74\x5c" + "\x00\xa4\x43\x54\x04\x54\x9b\x3b" + "\x77\xec\xfd\x5c\xa6\xe8\x7b\x08" + "\xae\xe6\x10\x3f\x32\x65\xd1\xfc" + "\xa4\x1d\x2c\x31\xfb\x33\x7a\xb3" + "\x35\x23\xf4\x20\x41\xd4\xad\x82" + "\x8b\xa4\xad\x96\x1c\x20\x53\xbe" + "\x0e\xa6\xf4\xdc\x78\x49\x3e\x72" + "\xb1\xa9\xb5\x83\xcb\x08\x54\xb7" + "\xad\x49\x3a\xae\x98\xce\xa6\x66" + "\x10\x30\x90\x8c\x55\x83\xd7\x7c" + "\x8b\xe6\x53\xde\xd2\x6e\x18\x21" + "\x01\x52\xd1\x9f\x9d\xbb\x9c\x73" + "\x57\xcc\x89\x09\x75\x9b\x78\x70" + "\xed\x26\x97\x4d\xb4\xe4\x0c\xa5" + "\xfa\x70\x04\x70\xc6\x96\x1c\x7d" + "\x54\x41\x77\xa8\xe3\xb0\x7e\x96" + "\x82\xd9\xec\xa2\x87\x68\x55\xf9" + "\x8f\x9e\x73\x43\x47\x6a\x08\x36" + "\x93\x67\xa8\x2d\xde\xac\x41\xa9" + "\x5c\x4d\x73\x97\x0f\x70\x68\xfa" + "\x56\x4d\x00\xc2\x3b\x1f\xc8\xb9" + "\x78\x1f\x51\x07\xe3\x9a\x13\x4e" + "\xed\x2b\x2e\xa3\xf7\x44\xb2\xe7" + "\xab\x19\x37\xd9\xba\x76\x5e\xd2" + "\xf2\x53\x15\x17\x4c\x6b\x16\x9f" + "\x02\x66\x49\xca\x7c\x91\x05\xf2" + "\x45\x36\x1e\xf5\x77\xad\x1f\x46" + "\xa8\x13\xfb\x63\xb6\x08\x99\x63" + "\x82\xa2\xed\xb3\xac\xdf\x43\x19" + "\x45\xea\x78\x73\xd9\xb7\x39\x11" + "\xa3\x13\x7c\xf8\x3f\xf7\xad\x81" + "\x48\x2f\xa9\x5c\x5f\xa0\xf0\x79" + "\xa4\x47\x7d\x80\x20\x26\xfd\x63" + "\x0a\xc7\x7e\x6d\x75\x47\xff\x76" + "\x66\x2e\x8a\x6c\x81\x35\xaf\x0b" + "\x2e\x6a\x49\x60\xc1\x10\xe1\xe1" + "\x54\x03\xa4\x09\x0c\x37\x7a\x15" + "\x23\x27\x5b\x8b\x4b\xa5\x64\x97" + "\xae\x4a\x50\x73\x1f\x66\x1c\x5c" + "\x03\x25\x3c\x8d\x48\x58\x71\x34" + "\x0e\xec\x4e\x55\x1a\x03\x6a\xe5" + "\xb6\x19\x2b\x84\x2a\x20\xd1\xea" + "\x80\x6f\x96\x0e\x05\x62\xc7\x78" + "\x87\x79\x60\x38\x46\xb4\x25\x57" + "\x6e\x16\x63\xf8\xad\x6e\xd7\x42" + "\x69\xe1\x88\xef\x6e\xd5\xb4\x9a" + "\x3c\x78\x6c\x3b\xe5\xa0\x1d\x22" + "\x86\x5c\x74\x3a\xeb\x24\x26\xc7" + "\x09\xfc\x91\x96\x47\x87\x4f\x1a" + "\xd6\x6b\x2c\x18\x47\xc0\xb8\x24" + "\xa8\x5a\x4a\x9e\xcb\x03\xe7\x2a" + "\x09\xe6\x4d\x9c\x6d\x86\x60\xf5" + "\x2f\x48\x69\x37\x9f\xf2\xd2\xcb" + "\x0e\x5a\xdd\x6e\x8a\xfb\x6a\xfe" + "\x0b\x63\xde\x87\x42\x79\x8a\x68" + "\x51\x28\x9b\x7a\xeb\xaf\xb8\x2f" + "\x9d\xd1\xc7\x45\x90\x08\xc9\x83" + "\xe9\x83\x84\xcb\x28\x69\x09\x69" + "\xce\x99\x46\x00\x54\xcb\xd8\x38" + "\xf9\x53\x4a\xbf\x31\xce\x57\x15" + "\x33\xfa\x96\x04\x33\x42\xe3\xc0" + "\xb7\x54\x4a\x65\x7a\x7c\x02\xe6" + "\x19\x95\xd0\x0e\x82\x07\x63\xf9" + "\xe1\x2b\x2a\xfc\x55\x92\x52\xc9" + "\xb5\x9f\x23\x28\x60\xe7\x20\x51" + "\x10\xd3\xed\x6d\x9b\xab\xb8\xe2" + "\x5d\x9a\x34\xb3\xbe\x9c\x64\xcb" + "\x78\xc6\x91\x22\x40\x91\x80\xbe" + "\xd7\x78\x5c\x0e\x0a\xdc\x08\xe9" + "\x67\x10\xa4\x83\x98\x79\x23\xe7" + "\x92\xda\xa9\x22\x16\xb1\xe7\x78" + "\xa3\x1c\x6c\x8f\x35\x7c\x4d\x37" + "\x2f\x6e\x0b\x50\x5c\x34\xb9\xf9" + "\xe6\x3d\x91\x0d\x32\x95\xaa\x3d" + "\x48\x11\x06\xbb\x2d\xf2\x63\x88" + "\x3f\x73\x09\xe2\x45\x56\x31\x51" + "\xfa\x5e\x4e\x62\xf7\x90\xf9\xa9" + "\x7d\x7b\x1b\xb1\xc8\x26\x6e\x66" + "\xf6\x90\x9a\x7f\xf2\x57\xcc\x23" + "\x59\xfa\xfa\xaa\x44\x04\x01\xa7" + "\xa4\x78\xdb\x74\x3d\x8b\xb5", + .plen = 719, + .ctext = "\xc5\x50\x85\x02\x72\xa8\xb3\x62" + "\xf9\xcd\x77\x7b\x43\xa5\x04\x70" + "\x68\x40\x57\x21\x1c\xfe\xef\x05" + "\x4d\xb8\x44\xba\x59\xea\x62\x32" + "\xcb\x6b\x6a\x39\x9b\xf3\xe5\xa4" + "\x36\x38\xde\x7d\xcf\xb6\xcd\xe3" + "\x89\xbf\x37\xc9\x96\x3c\x70\x10" + "\x92\x47\xcc\xac\x6f\xf8\x55\x9a" + "\x26\x43\x34\xb4\x92\x7d\x68\xfc" + "\x60\x37\x74\x2a\x55\xba\xc7\xd7" + "\x98\x69\xb7\xcf\x42\xfd\xb2\x10" + "\xa0\x59\xe1\x2c\x73\x66\x12\x97" + "\x85\x8b\x28\xcc\x29\x02\x15\x89" + "\x23\xd3\x32\x92\x87\x57\x09\x13" + "\x04\x7e\x8b\x6c\x3a\xc1\x4e\x6c" + "\xe1\x9f\xc8\xcc\x47\x9c\xd8\x10" + "\xf4\xb7\x5c\x30\x7a\x8b\x0f\x01" + "\x52\x38\x02\x92\x99\xac\x03\x90" + "\x18\x32\x2d\x21\x6a\x0a\x2a\xe7" + "\xc2\xcc\x15\x84\x4e\x2b\x0b\x3a" + "\x4c\xdc\xb0\x6b\x10\xd1\x27\x10" + "\xf0\x4a\x5c\x43\xa0\x34\x34\x59" + "\x47\x43\x48\xcb\x69\xa7\xff\x52" + "\xb8\xca\x23\x09\x07\xd7\xc5\xe4" + "\x2a\x4f\x99\xd5\x83\x36\x2a\x2d" + "\x59\xd0\xca\xb0\xfa\x40\x8c\xab" + "\xdf\x69\x08\xd9\x79\x1d\xde\xa8" + "\x0b\x34\x74\x4d\xf5\xa0\x4c\x81" + "\x7f\x93\x06\x40\x24\xfe\x7d\xcd" + "\xe4\xfe\xf8\xf8\x30\xce\xd0\x5d" + "\x70\xfd\x0d\x5a\x78\x85\x74\x2d" + "\xe4\xb5\x40\x18\x99\x11\xe4\x6a" + "\xdf\xfa\x4f\x25\x2c\xde\x15\xb7" + "\x12\xd8\xc6\x90\x0d\x0f\xc9\xfb" + "\x21\xf1\xed\xfe\x98\xe1\x03\xe2" + "\x5c\xef\xb6\xc7\x87\x77\x0e\xcd" + "\xff\x78\x94\xc9\xbe\xd3\x47\xf7" + "\x8d\x37\x48\x01\x42\xe2\x17\x96" + "\xfc\xc0\xcb\x7b\x7b\x57\xaf\x3b" + "\xc9\xd0\x94\xce\x5e\x1b\xa9\x47" + "\x02\x4d\x74\xcc\x45\x1d\xd3\x2d" + "\x5f\x4f\x7f\xf2\x4b\xf9\x59\xee" + "\x9e\x9e\xb9\x95\x29\x19\xd1\x5f" + "\x72\xab\x8d\xf1\x28\xd1\x1c\xae" + "\xc2\xba\xf7\x22\x84\x2c\x83\x51" + "\x03\xad\xa3\xef\x81\xa7\xdc\xf1" + "\x44\x51\x50\x96\x70\xd1\xe5\x47" + "\x57\xf9\x30\x90\xe4\xbf\xfc\x75" + "\x14\xaa\x4d\xb7\xb1\xe7\x79\x33" + "\x43\xc2\x5c\xc1\xbc\x09\x92\x0f" + "\xa7\xaf\x68\x51\x51\xec\x0b\xc3" + "\x3d\x2b\x94\x30\x45\x29\x1b\x9e" + "\x70\x56\xf8\xd6\x67\x2d\x39\x3b" + "\x3c\xd2\xd0\xd3\xdc\x7d\x84\xe9" + "\x06\x31\x98\xa6\x5c\xbf\x10\x58" + "\xce\xbb\xa7\xe1\x65\x7e\x51\x87" + "\x70\x46\xb4\x7f\xf9\xec\x92\x1c" + "\x9b\x24\x49\xc1\x04\xbe\x1c\x5f" + "\xcc\xb3\x33\x8c\xad\xe7\xdc\x32" + "\x54\xa2\x0d\x83\x0f\x3c\x12\x5d" + "\x71\xe3\x9c\xae\x71\xa3\x2a\x10" + "\xc5\x91\xb4\x73\x96\x60\xdb\x5d" + "\x1f\xd5\x9a\xd2\x69\xc3\xd7\x4b" + "\xa2\x66\x81\x96\x4a\xaa\x02\xd6" + "\xd5\x44\x9b\x42\x3a\x15\x5f\xe7" + "\x4d\x7c\xf6\x71\x4a\xea\xe8\x43" + "\xd7\x68\xe4\xbc\x05\x87\x49\x05" + "\x3b\x47\xb2\x6d\x5f\xd1\x11\xa6" + "\x58\xd4\xa2\x45\xec\xb5\x54\x55" + "\xd3\xd6\xd2\x6a\x8b\x21\x9e\x2c" + "\xf1\x27\x4b\x5b\xe3\xff\xe0\xfd" + "\x4b\xf1\xe7\xe2\x84\xf2\x17\x37" + "\x11\x68\xc4\x92\x4b\x6b\xef\x8e" + "\x75\xf5\xc2\x7d\x5c\xe9\x7c\xfc" + "\x2b\x00\x33\x0e\x7d\x69\xd8\xd4" + "\x9b\xa8\x38\x54\x7e\x6d\x23\x51" + "\x2c\xd6\xc4\x58\x23\x1c\x22\x2a" + "\x59\xc5\x9b\xec\x9d\xbf\x03\x0f" + "\xb3\xdd\xba\x02\x22\xa0\x34\x37" + "\x19\x56\xc2\x5b\x32\x1d\x1e\x66" + "\x68\xf4\x47\x05\x04\x18\xa7\x28" + "\x80\xf2\xc7\x99\xed\x1e\x72\x48" + "\x8f\x97\x5d\xb3\x74\x42\xfd\x0c" + "\x0f\x5f\x29\x0c\xf1\x35\x22\x90" + "\xd6\x7c\xb8\xa3\x2a\x89\x38\x71" + "\xe9\x7a\x55\x3c\x3b\xf2\x6e\x1a" + "\x22\x8f\x07\x81\xc1\xe1\xf1\x76" + "\x2a\x75\xab\x86\xc4\xcc\x52\x59" + "\x83\x19\x5e\xb3\x53\xe2\x81\xdf" + "\xe6\x15\xb3\xba\x0c\x0e\xba" + "\xa9\x2c\xed\x51\xd5\x06\xc8\xc6" + "\x4b\x9f\x5d\x1b\x61\x31\xad\xf4", + .clen = 735, } }; @@ -15030,6 +15945,68 @@ static const struct hash_testvec sm4_cmac128_tv_template[] = { } }; +static const struct hash_testvec sm4_xcbc128_tv_template[] = { + { /* Generated from AES-XCBC128 test vectors */ + .key = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", + .plaintext = zeroed_string, + .digest = "\xa9\x9a\x5c\x44\xe2\x34\xee\x2c" + "\x9b\xe4\x9d\xca\x64\xb0\xa5\xc4", + .psize = 0, + .ksize = 16, + }, { + .key = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", + .plaintext = "\x00\x01\x02", + .digest = "\x17\x27\x62\xf3\x8b\x88\x1d\xc0" + "\x97\x35\x9c\x3e\x9f\x27\xb7\x83", + .psize = 3, + .ksize = 16, + } , { + .key = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", + .plaintext = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", + .digest = "\xda\x45\xd1\xac\xec\x4d\xab\x46" + "\xdd\x59\xe0\x44\xff\x59\xd5\xfc", + .psize = 16, + .ksize = 16, + }, { + .key = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", + .plaintext = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" + "\x10\x11\x12\x13", + .digest = "\xbe\x24\x5d\x81\x8c\x8a\x10\xa4" + "\x8e\xc2\x16\xfa\xa4\x83\xc9\x2a", + .psize = 20, + .ksize = 16, + }, { + .key = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", + .plaintext = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" + "\x10\x11\x12\x13\x14\x15\x16\x17" + "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f", + .digest = "\x91\x82\x31\x56\xd5\x77\xa4\xc5" + "\x88\x2d\xce\x3a\x87\x5e\xbd\xba", + .psize = 32, + .ksize = 16, + }, { + .key = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", + .plaintext = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" + "\x10\x11\x12\x13\x14\x15\x16\x17" + "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" + "\x20\x21", + .digest = "\x2a\xae\xa5\x24\x0c\x12\x9f\x5f" + "\x55\xfb\xae\x35\x13\x0d\x22\x2d", + .psize = 34, + .ksize = 16, + } +}; + /* Cast6 test vectors from RFC 2612 */ static const struct cipher_testvec cast6_tv_template[] = { { diff --git a/crypto/twofish_common.c b/crypto/twofish_common.c index f921f30334f4873b270b3725b320746c8bf9896c..bf4f28742f770a429bd1f992da81785e31e1d4d8 100644 --- a/crypto/twofish_common.c +++ b/crypto/twofish_common.c @@ -25,9 +25,9 @@ * Third Edition. */ +#include #include #include -#include #include #include #include diff --git a/crypto/twofish_generic.c b/crypto/twofish_generic.c index 86b2f067a4162acad710c0c9fe9125e8c7aa8ef0..557915e4062d7ca55eb62859cca742a5d46d0bfb 100644 --- a/crypto/twofish_generic.c +++ b/crypto/twofish_generic.c @@ -25,12 +25,12 @@ */ #include +#include #include #include #include #include #include -#include #include /* Macros to compute the g() function in the encryption and decryption diff --git a/drivers/accel/drm_accel.c b/drivers/accel/drm_accel.c index a5ee84a4017a3acb7489688c210e68d4968f0f67..1b69824286fd3f20eb1a102f0af16da72b5eb41e 100644 --- a/drivers/accel/drm_accel.c +++ b/drivers/accel/drm_accel.c @@ -27,7 +27,7 @@ static struct device_type accel_sysfs_device_minor = { .name = "accel_minor" }; -static char *accel_devnode(struct device *dev, umode_t *mode) +static char *accel_devnode(const struct device *dev, umode_t *mode) { return kasprintf(GFP_KERNEL, "accel/%s", dev_name(dev)); } diff --git a/drivers/accessibility/speakup/Makefile b/drivers/accessibility/speakup/Makefile index ba69b0803d4285b00039290ee5b22614807cfb1c..6f6a83565c0ded227260a93a16243e5ae240fe7a 100644 --- a/drivers/accessibility/speakup/Makefile +++ b/drivers/accessibility/speakup/Makefile @@ -40,7 +40,9 @@ hostprogs += makemapdata makemapdata-objs := makemapdata.o quiet_cmd_mkmap = MKMAP $@ - cmd_mkmap = TOPDIR=$(srctree) $(obj)/makemapdata > $@ + cmd_mkmap = TOPDIR=$(srctree) \ + SPKDIR=$(if $(KBUILD_EXTMOD),$(KBUILD_EXTMOD),$(srctree)/drivers/accessibility/speakup) \ + $(obj)/makemapdata > $@ $(obj)/mapdata.h: $(obj)/makemapdata $(call cmd,mkmap) diff --git a/drivers/accessibility/speakup/kobjects.c b/drivers/accessibility/speakup/kobjects.c index 41ae24ab5d0860566763717cef878cdae9b534b5..a7522d4098025a0abfbd41ecfb54246f3db4a80d 100644 --- a/drivers/accessibility/speakup/kobjects.c +++ b/drivers/accessibility/speakup/kobjects.c @@ -914,6 +914,8 @@ static struct kobj_attribute say_word_ctl_attribute = __ATTR(say_word_ctl, 0644, spk_var_show, spk_var_store); static struct kobj_attribute spell_delay_attribute = __ATTR(spell_delay, 0644, spk_var_show, spk_var_store); +static struct kobj_attribute cur_phonetic_attribute = + __ATTR(cur_phonetic, 0644, spk_var_show, spk_var_store); /* * These attributes are i18n related. @@ -967,6 +969,7 @@ static struct attribute *main_attrs[] = { &say_control_attribute.attr, &say_word_ctl_attribute.attr, &spell_delay_attribute.attr, + &cur_phonetic_attribute.attr, NULL, }; diff --git a/drivers/accessibility/speakup/main.c b/drivers/accessibility/speakup/main.c index 73db0cb44fc7b075ce84ddc70300b81ae5344ef9..4733fd6334ab8d3948eef2f19a432ba83a7934ce 100644 --- a/drivers/accessibility/speakup/main.c +++ b/drivers/accessibility/speakup/main.c @@ -65,6 +65,7 @@ int spk_key_echo, spk_say_word_ctl; int spk_say_ctrl, spk_bell_pos; short spk_punc_mask; int spk_punc_level, spk_reading_punc; +int spk_cur_phonetic; char spk_str_caps_start[MAXVARLEN + 1] = "\0"; char spk_str_caps_stop[MAXVARLEN + 1] = "\0"; char spk_str_pause[MAXVARLEN + 1] = "\0"; @@ -1268,20 +1269,29 @@ int spk_set_key_info(const u_char *key_info, u_char *k_buffer) return 0; } -static struct var_t spk_vars[] = { +enum spk_vars_id { + BELL_POS_ID = 0, SPELL_DELAY_ID, ATTRIB_BLEEP_ID, + BLEEPS_ID, BLEEP_TIME_ID, PUNC_LEVEL_ID, + READING_PUNC_ID, CURSOR_TIME_ID, SAY_CONTROL_ID, + SAY_WORD_CTL_ID, NO_INTERRUPT_ID, KEY_ECHO_ID, + CUR_PHONETIC_ID, V_LAST_VAR_ID, NB_ID +}; + +static struct var_t spk_vars[NB_ID] = { /* bell must be first to set high limit */ - {BELL_POS, .u.n = {NULL, 0, 0, 0, 0, 0, NULL} }, - {SPELL_DELAY, .u.n = {NULL, 0, 0, 4, 0, 0, NULL} }, - {ATTRIB_BLEEP, .u.n = {NULL, 1, 0, 3, 0, 0, NULL} }, - {BLEEPS, .u.n = {NULL, 3, 0, 3, 0, 0, NULL} }, - {BLEEP_TIME, .u.n = {NULL, 30, 1, 200, 0, 0, NULL} }, - {PUNC_LEVEL, .u.n = {NULL, 1, 0, 4, 0, 0, NULL} }, - {READING_PUNC, .u.n = {NULL, 1, 0, 4, 0, 0, NULL} }, - {CURSOR_TIME, .u.n = {NULL, 120, 50, 600, 0, 0, NULL} }, - {SAY_CONTROL, TOGGLE_0}, - {SAY_WORD_CTL, TOGGLE_0}, - {NO_INTERRUPT, TOGGLE_0}, - {KEY_ECHO, .u.n = {NULL, 1, 0, 2, 0, 0, NULL} }, + [BELL_POS_ID] = { BELL_POS, .u.n = {NULL, 0, 0, 0, 0, 0, NULL} }, + [SPELL_DELAY_ID] = { SPELL_DELAY, .u.n = {NULL, 0, 0, 4, 0, 0, NULL} }, + [ATTRIB_BLEEP_ID] = { ATTRIB_BLEEP, .u.n = {NULL, 1, 0, 3, 0, 0, NULL} }, + [BLEEPS_ID] = { BLEEPS, .u.n = {NULL, 3, 0, 3, 0, 0, NULL} }, + [BLEEP_TIME_ID] = { BLEEP_TIME, .u.n = {NULL, 30, 1, 200, 0, 0, NULL} }, + [PUNC_LEVEL_ID] = { PUNC_LEVEL, .u.n = {NULL, 1, 0, 4, 0, 0, NULL} }, + [READING_PUNC_ID] = { READING_PUNC, .u.n = {NULL, 1, 0, 4, 0, 0, NULL} }, + [CURSOR_TIME_ID] = { CURSOR_TIME, .u.n = {NULL, 120, 50, 600, 0, 0, NULL} }, + [SAY_CONTROL_ID] { SAY_CONTROL, TOGGLE_0}, + [SAY_WORD_CTL_ID] = {SAY_WORD_CTL, TOGGLE_0}, + [NO_INTERRUPT_ID] = { NO_INTERRUPT, TOGGLE_0}, + [KEY_ECHO_ID] = { KEY_ECHO, .u.n = {NULL, 1, 0, 2, 0, 0, NULL} }, + [CUR_PHONETIC_ID] = { CUR_PHONETIC, .u.n = {NULL, 0, 0, 1, 0, 0, NULL} }, V_LAST_VAR }; @@ -1712,8 +1722,12 @@ static void cursor_done(struct timer_list *unused) speakup_win_say(vc); else if (is_cursor == 1 || is_cursor == 4) say_line_from_to(vc, 0, vc->vc_cols, 0); - else - say_char(vc); + else { + if (spk_cur_phonetic == 1) + say_phonetic_char(vc); + else + say_char(vc); + } spk_keydown = 0; is_cursor = 0; out: @@ -2453,5 +2467,33 @@ out: return err; } +module_param_named(bell_pos, spk_vars[BELL_POS_ID].u.n.default_val, int, 0444); +module_param_named(spell_delay, spk_vars[SPELL_DELAY_ID].u.n.default_val, int, 0444); +module_param_named(attrib_bleep, spk_vars[ATTRIB_BLEEP_ID].u.n.default_val, int, 0444); +module_param_named(bleeps, spk_vars[BLEEPS_ID].u.n.default_val, int, 0444); +module_param_named(bleep_time, spk_vars[BLEEP_TIME_ID].u.n.default_val, int, 0444); +module_param_named(punc_level, spk_vars[PUNC_LEVEL_ID].u.n.default_val, int, 0444); +module_param_named(reading_punc, spk_vars[READING_PUNC_ID].u.n.default_val, int, 0444); +module_param_named(cursor_time, spk_vars[CURSOR_TIME_ID].u.n.default_val, int, 0444); +module_param_named(say_control, spk_vars[SAY_CONTROL_ID].u.n.default_val, int, 0444); +module_param_named(say_word_ctl, spk_vars[SAY_WORD_CTL_ID].u.n.default_val, int, 0444); +module_param_named(no_interrupt, spk_vars[NO_INTERRUPT_ID].u.n.default_val, int, 0444); +module_param_named(key_echo, spk_vars[KEY_ECHO_ID].u.n.default_val, int, 0444); +module_param_named(cur_phonetic, spk_vars[CUR_PHONETIC_ID].u.n.default_val, int, 0444); + +MODULE_PARM_DESC(bell_pos, "This works much like a typewriter bell. If for example 72 is echoed to bell_pos, it will beep the PC speaker when typing on a line past character 72."); +MODULE_PARM_DESC(spell_delay, "This controls how fast a word is spelled when speakup's spell word review command is pressed."); +MODULE_PARM_DESC(attrib_bleep, "Beeps the PC speaker when there is an attribute change such as background color when using speakup review commands. One = on, zero = off."); +MODULE_PARM_DESC(bleeps, "This controls whether one hears beeps through the PC speaker when using speakup review commands."); +MODULE_PARM_DESC(bleep_time, "This controls the duration of the PC speaker beeps speakup produces."); +MODULE_PARM_DESC(punc_level, "Controls the level of punctuation spoken as the screen is displayed, not reviewed."); +MODULE_PARM_DESC(reading_punc, "It controls the level of punctuation when reviewing the screen with speakup's screen review commands."); +MODULE_PARM_DESC(cursor_time, "This controls cursor delay when using arrow keys."); +MODULE_PARM_DESC(say_control, "This controls if speakup speaks shift, alt and control when those keys are pressed or not."); +MODULE_PARM_DESC(say_word_ctl, "Sets thw say_word_ctl on load."); +MODULE_PARM_DESC(no_interrupt, "Controls if typing interrupts output from speakup."); +MODULE_PARM_DESC(key_echo, "Controls if speakup speaks keys when they are typed. One = on zero = off or don't echo keys."); +MODULE_PARM_DESC(cur_phonetic, "Controls if speakup speaks letters phonetically during navigation. One = on zero = off or don't speak phonetically."); + module_init(speakup_init); module_exit(speakup_exit); diff --git a/drivers/accessibility/speakup/makemapdata.c b/drivers/accessibility/speakup/makemapdata.c index 81db9ebf1fff8a28448c4529e0129117ff45a787..d7d41bb9b05fbb590c436aba29e6a94b2b9af3d0 100644 --- a/drivers/accessibility/speakup/makemapdata.c +++ b/drivers/accessibility/speakup/makemapdata.c @@ -51,12 +51,15 @@ main(int argc, char *argv[]) { int value, i; struct st_key *this; - const char *dir_name; + const char *dir_name, *spk_dir_name; char *cp; dir_name = getenv("TOPDIR"); if (!dir_name) dir_name = "."; + spk_dir_name = getenv("SPKDIR"); + if (!spk_dir_name) + spk_dir_name = "drivers/accessibility/speakup"; bzero(key_table, sizeof(key_table)); add_key("shift", 1, is_shift); add_key("altgr", 2, is_shift); @@ -83,7 +86,7 @@ main(int argc, char *argv[]) add_key(def_name, value, is_input); } - open_input(dir_name, "drivers/accessibility/speakup/spk_priv_keyinfo.h"); + open_input(spk_dir_name, "spk_priv_keyinfo.h"); while (get_define()) { if (strlen(def_val) > 5) { //if (def_val[0] == '(') diff --git a/drivers/accessibility/speakup/speakup.h b/drivers/accessibility/speakup/speakup.h index 33594f5a79837e871c091639668e80eccb1b023a..364fde99749ed187bbba8d6e4a25ba7039efdb8f 100644 --- a/drivers/accessibility/speakup/speakup.h +++ b/drivers/accessibility/speakup/speakup.h @@ -105,6 +105,7 @@ extern int spk_no_intr, spk_say_ctrl, spk_say_word_ctl, spk_punc_level; extern int spk_reading_punc, spk_attrib_bleep, spk_bleeps; extern int spk_bleep_time, spk_bell_pos; extern int spk_spell_delay, spk_key_echo; +extern int spk_cur_phonetic; extern short spk_punc_mask; extern short spk_pitch_shift, synth_flags; extern bool spk_quiet_boot; diff --git a/drivers/accessibility/speakup/speakup_acntpc.c b/drivers/accessibility/speakup/speakup_acntpc.c index a55b60754eb1059abc7760c6518fa4e7840ef02b..a27e6bbf05da308b80b125ded1d2b1f1bf8e9a49 100644 --- a/drivers/accessibility/speakup/speakup_acntpc.c +++ b/drivers/accessibility/speakup/speakup_acntpc.c @@ -34,14 +34,23 @@ static int synth_port_control; static int port_forced; static unsigned int synth_portlist[] = { 0x2a8, 0 }; -static struct var_t vars[] = { - { CAPS_START, .u.s = {"\033P8" } }, - { CAPS_STOP, .u.s = {"\033P5" } }, - { RATE, .u.n = {"\033R%c", 9, 0, 17, 0, 0, "0123456789abcdefgh" } }, - { PITCH, .u.n = {"\033P%d", 5, 0, 9, 0, 0, NULL } }, - { VOL, .u.n = {"\033A%d", 5, 0, 9, 0, 0, NULL } }, - { TONE, .u.n = {"\033V%d", 5, 0, 9, 0, 0, NULL } }, - { DIRECT, .u.n = {NULL, 0, 0, 1, 0, 0, NULL } }, +enum default_vars_id { + CAPS_START_ID = 0, CAPS_STOP_ID, + RATE_ID, PITCH_ID, + VOL_ID, TONE_ID, + DIRECT_ID, V_LAST_VAR_ID, + NB_ID +}; + + +static struct var_t vars[NB_ID] = { + [CAPS_START_ID] = { CAPS_START, .u.s = {"\033P8" } }, + [CAPS_STOP_ID] = { CAPS_STOP, .u.s = {"\033P5" } }, + [RATE_ID] = { RATE, .u.n = {"\033R%c", 9, 0, 17, 0, 0, "0123456789abcdefgh" } }, + [PITCH_ID] = { PITCH, .u.n = {"\033P%d", 5, 0, 9, 0, 0, NULL } }, + [VOL_ID] = { VOL, .u.n = {"\033A%d", 5, 0, 9, 0, 0, NULL } }, + [TONE_ID] = { TONE, .u.n = {"\033V%d", 5, 0, 9, 0, 0, NULL } }, + [DIRECT_ID] = { DIRECT, .u.n = {NULL, 0, 0, 1, 0, 0, NULL } }, V_LAST_VAR }; @@ -305,9 +314,22 @@ static void accent_release(struct spk_synth *synth) module_param_hw_named(port, port_forced, int, ioport, 0444); module_param_named(start, synth_acntpc.startup, short, 0444); +module_param_named(rate, vars[RATE_ID].u.n.default_val, int, 0444); +module_param_named(pitch, vars[PITCH_ID].u.n.default_val, int, 0444); +module_param_named(vol, vars[VOL_ID].u.n.default_val, int, 0444); +module_param_named(tone, vars[TONE_ID].u.n.default_val, int, 0444); +module_param_named(direct, vars[DIRECT_ID].u.n.default_val, int, 0444); + + MODULE_PARM_DESC(port, "Set the port for the synthesizer (override probing)."); MODULE_PARM_DESC(start, "Start the synthesizer once it is loaded."); +MODULE_PARM_DESC(rate, "Set the rate variable on load."); +MODULE_PARM_DESC(pitch, "Set the pitch variable on load."); +MODULE_PARM_DESC(vol, "Set the vol variable on load."); +MODULE_PARM_DESC(tone, "Set the tone variable on load."); +MODULE_PARM_DESC(direct, "Set the direct variable on load."); + module_spk_synth(synth_acntpc); diff --git a/drivers/accessibility/speakup/speakup_acntsa.c b/drivers/accessibility/speakup/speakup_acntsa.c index 2697c51ed6b565d1a25e3edd63cfbbc67279da99..26bb9f9399d33940afecb04672e53635a87ba3c9 100644 --- a/drivers/accessibility/speakup/speakup_acntsa.c +++ b/drivers/accessibility/speakup/speakup_acntsa.c @@ -19,14 +19,24 @@ static int synth_probe(struct spk_synth *synth); -static struct var_t vars[] = { - { CAPS_START, .u.s = {"\033P8" } }, - { CAPS_STOP, .u.s = {"\033P5" } }, - { RATE, .u.n = {"\033R%c", 9, 0, 17, 0, 0, "0123456789abcdefgh" } }, - { PITCH, .u.n = {"\033P%d", 5, 0, 9, 0, 0, NULL } }, - { VOL, .u.n = {"\033A%d", 9, 0, 9, 0, 0, NULL } }, - { TONE, .u.n = {"\033V%d", 5, 0, 9, 0, 0, NULL } }, - { DIRECT, .u.n = {NULL, 0, 0, 1, 0, 0, NULL } }, + +enum default_vars_id { + CAPS_START_ID = 0, CAPS_STOP_ID, + RATE_ID, PITCH_ID, + VOL_ID, TONE_ID, + DIRECT_ID, V_LAST_VAR_ID, + NB_ID +}; + + +static struct var_t vars[NB_ID] = { + [CAPS_START_ID] = { CAPS_START, .u.s = {"\033P8" } }, + [CAPS_STOP_ID] = { CAPS_STOP, .u.s = {"\033P5" } }, + [RATE_ID] = { RATE, .u.n = {"\033R%c", 9, 0, 17, 0, 0, "0123456789abcdefgh" } }, + [PITCH_ID] = { PITCH, .u.n = {"\033P%d", 5, 0, 9, 0, 0, NULL } }, + [VOL_ID] = { VOL, .u.n = {"\033A%d", 9, 0, 9, 0, 0, NULL } }, + [TONE_ID] = { TONE, .u.n = {"\033V%d", 5, 0, 9, 0, 0, NULL } }, + [DIRECT_ID] = { DIRECT, .u.n = {NULL, 0, 0, 1, 0, 0, NULL } }, V_LAST_VAR }; @@ -129,10 +139,21 @@ static int synth_probe(struct spk_synth *synth) module_param_named(ser, synth_acntsa.ser, int, 0444); module_param_named(dev, synth_acntsa.dev_name, charp, 0444); module_param_named(start, synth_acntsa.startup, short, 0444); +module_param_named(rate, vars[RATE_ID].u.n.default_val, int, 0444); +module_param_named(pitch, vars[PITCH_ID].u.n.default_val, int, 0444); +module_param_named(vol, vars[VOL_ID].u.n.default_val, int, 0444); +module_param_named(tone, vars[TONE_ID].u.n.default_val, int, 0444); +module_param_named(direct, vars[DIRECT_ID].u.n.default_val, int, 0444); MODULE_PARM_DESC(ser, "Set the serial port for the synthesizer (0-based)."); MODULE_PARM_DESC(dev, "Set the device e.g. ttyUSB0, for the synthesizer."); MODULE_PARM_DESC(start, "Start the synthesizer once it is loaded."); +MODULE_PARM_DESC(rate, "Set the rate variable on load."); +MODULE_PARM_DESC(pitch, "Set the pitch variable on load."); +MODULE_PARM_DESC(vol, "Set the vol variable on load."); +MODULE_PARM_DESC(tone, "Set the tone variable on load."); +MODULE_PARM_DESC(direct, "Set the direct variable on load."); + module_spk_synth(synth_acntsa); diff --git a/drivers/accessibility/speakup/speakup_apollo.c b/drivers/accessibility/speakup/speakup_apollo.c index c84a7e0864b7c15cf2f263c5c985ac8bba6f9ee1..d2fbb3f572219a7ea3eadae19608781a61662b49 100644 --- a/drivers/accessibility/speakup/speakup_apollo.c +++ b/drivers/accessibility/speakup/speakup_apollo.c @@ -24,15 +24,28 @@ static void do_catch_up(struct spk_synth *synth); -static struct var_t vars[] = { - { CAPS_START, .u.s = {"cap, " } }, - { CAPS_STOP, .u.s = {"" } }, - { RATE, .u.n = {"@W%d", 6, 1, 9, 0, 0, NULL } }, - { PITCH, .u.n = {"@F%x", 10, 0, 15, 0, 0, NULL } }, - { VOL, .u.n = {"@A%x", 10, 0, 15, 0, 0, NULL } }, - { VOICE, .u.n = {"@V%d", 1, 1, 6, 0, 0, NULL } }, - { LANG, .u.n = {"@=%d,", 1, 1, 4, 0, 0, NULL } }, - { DIRECT, .u.n = {NULL, 0, 0, 1, 0, 0, NULL } }, + + +enum default_vars_id { + CAPS_START_ID = 0, CAPS_STOP_ID, + RATE_ID, PITCH_ID, + VOL_ID, VOICE_ID, LANG_ID, + DIRECT_ID, V_LAST_VAR_ID, + NB_ID +}; + + + + +static struct var_t vars[NB_ID] = { + [CAPS_START_ID] = { CAPS_START, .u.s = {"cap, " } }, + [CAPS_STOP_ID] = { CAPS_STOP, .u.s = {"" } }, + [RATE_ID] = { RATE, .u.n = {"@W%d", 6, 1, 9, 0, 0, NULL } }, + [PITCH_ID] = { PITCH, .u.n = {"@F%x", 10, 0, 15, 0, 0, NULL } }, + [VOL_ID] = { VOL, .u.n = {"@A%x", 10, 0, 15, 0, 0, NULL } }, + [VOICE_ID] = { VOICE, .u.n = {"@V%d", 1, 1, 6, 0, 0, NULL } }, + [LANG_ID] = { LANG, .u.n = {"@=%d,", 1, 1, 4, 0, 0, NULL } }, + [DIRECT_ID] = { DIRECT, .u.n = {NULL, 0, 0, 1, 0, 0, NULL } }, V_LAST_VAR }; @@ -193,10 +206,25 @@ static void do_catch_up(struct spk_synth *synth) module_param_named(ser, synth_apollo.ser, int, 0444); module_param_named(dev, synth_apollo.dev_name, charp, 0444); module_param_named(start, synth_apollo.startup, short, 0444); +module_param_named(rate, vars[RATE_ID].u.n.default_val, int, 0444); +module_param_named(pitch, vars[PITCH_ID].u.n.default_val, int, 0444); +module_param_named(vol, vars[VOL_ID].u.n.default_val, int, 0444); +module_param_named(voice, vars[VOICE_ID].u.n.default_val, int, 0444); +module_param_named(lang, vars[LANG_ID].u.n.default_val, int, 0444); +module_param_named(direct, vars[DIRECT_ID].u.n.default_val, int, 0444); + MODULE_PARM_DESC(ser, "Set the serial port for the synthesizer (0-based)."); MODULE_PARM_DESC(dev, "Set the device e.g. ttyUSB0, for the synthesizer."); MODULE_PARM_DESC(start, "Start the synthesizer once it is loaded."); +MODULE_PARM_DESC(rate, "Set the rate variable on load."); +MODULE_PARM_DESC(pitch, "Set the pitch variable on load."); +MODULE_PARM_DESC(vol, "Set the vol variable on load."); +MODULE_PARM_DESC(voice, "Set the voice variable on load."); +MODULE_PARM_DESC(lang, "Set the lang variable on load."); +MODULE_PARM_DESC(direct, "Set the direct variable on load."); + + module_spk_synth(synth_apollo); diff --git a/drivers/accessibility/speakup/speakup_audptr.c b/drivers/accessibility/speakup/speakup_audptr.c index 4d16d60db9b2300fdfa3b5960d6a727870565cb9..55813f3e40ff5f2b397c5b755277ec9e0edca546 100644 --- a/drivers/accessibility/speakup/speakup_audptr.c +++ b/drivers/accessibility/speakup/speakup_audptr.c @@ -19,15 +19,24 @@ static int synth_probe(struct spk_synth *synth); static void synth_flush(struct spk_synth *synth); -static struct var_t vars[] = { - { CAPS_START, .u.s = {"\x05[f99]" } }, - { CAPS_STOP, .u.s = {"\x05[f80]" } }, - { RATE, .u.n = {"\x05[r%d]", 10, 0, 20, 100, -10, NULL } }, - { PITCH, .u.n = {"\x05[f%d]", 80, 39, 4500, 0, 0, NULL } }, - { VOL, .u.n = {"\x05[g%d]", 21, 0, 40, 0, 0, NULL } }, - { TONE, .u.n = {"\x05[s%d]", 9, 0, 63, 0, 0, NULL } }, - { PUNCT, .u.n = {"\x05[A%c]", 0, 0, 3, 0, 0, "nmsa" } }, - { DIRECT, .u.n = {NULL, 0, 0, 1, 0, 0, NULL } }, + +enum default_vars_id { + CAPS_START_ID = 0, CAPS_STOP_ID, + RATE_ID, PITCH_ID, + VOL_ID, TONE_ID, PUNCT_ID, + DIRECT_ID, V_LAST_VAR_ID, + NB_ID +}; + +static struct var_t vars[NB_ID] = { + [CAPS_START_ID] = { CAPS_START, .u.s = {"\x05[f99]" } }, + [CAPS_STOP_ID] = { CAPS_STOP, .u.s = {"\x05[f80]" } }, + [RATE_ID] = { RATE, .u.n = {"\x05[r%d]", 10, 0, 20, 100, -10, NULL } }, + [PITCH_ID] = { PITCH, .u.n = {"\x05[f%d]", 80, 39, 4500, 0, 0, NULL } }, + [VOL_ID] = { VOL, .u.n = {"\x05[g%d]", 21, 0, 40, 0, 0, NULL } }, + [TONE_ID] = { TONE, .u.n = {"\x05[s%d]", 9, 0, 63, 0, 0, NULL } }, + [PUNCT_ID] = { PUNCT, .u.n = {"\x05[A%c]", 0, 0, 3, 0, 0, "nmsa" } }, + [DIRECT_ID] = { DIRECT, .u.n = {NULL, 0, 0, 1, 0, 0, NULL } }, V_LAST_VAR }; @@ -158,10 +167,25 @@ static int synth_probe(struct spk_synth *synth) module_param_named(ser, synth_audptr.ser, int, 0444); module_param_named(dev, synth_audptr.dev_name, charp, 0444); module_param_named(start, synth_audptr.startup, short, 0444); +module_param_named(rate, vars[RATE_ID].u.n.default_val, int, 0444); +module_param_named(pitch, vars[PITCH_ID].u.n.default_val, int, 0444); +module_param_named(vol, vars[VOL_ID].u.n.default_val, int, 0444); +module_param_named(tone, vars[TONE_ID].u.n.default_val, int, 0444); +module_param_named(punct, vars[PUNCT_ID].u.n.default_val, int, 0444); +module_param_named(direct, vars[DIRECT_ID].u.n.default_val, int, 0444); + + MODULE_PARM_DESC(ser, "Set the serial port for the synthesizer (0-based)."); MODULE_PARM_DESC(dev, "Set the device e.g. ttyUSB0, for the synthesizer."); MODULE_PARM_DESC(start, "Start the synthesizer once it is loaded."); +MODULE_PARM_DESC(rate, "Set the rate variable on load."); +MODULE_PARM_DESC(pitch, "Set the pitch variable on load."); +MODULE_PARM_DESC(vol, "Set the vol variable on load."); +MODULE_PARM_DESC(tone, "Set the tone variable on load."); +MODULE_PARM_DESC(punct, "Set the punct variable on load."); +MODULE_PARM_DESC(direct, "Set the direct variable on load."); + module_spk_synth(synth_audptr); diff --git a/drivers/accessibility/speakup/speakup_bns.c b/drivers/accessibility/speakup/speakup_bns.c index b8103eb117b861662c50292325ea1965ad5ec328..60507756499c2339a84a9b920c032f0358d68836 100644 --- a/drivers/accessibility/speakup/speakup_bns.c +++ b/drivers/accessibility/speakup/speakup_bns.c @@ -16,14 +16,23 @@ #define SYNTH_CLEAR 0x18 #define PROCSPEECH '\r' -static struct var_t vars[] = { - { CAPS_START, .u.s = {"\x05\x31\x32P" } }, - { CAPS_STOP, .u.s = {"\x05\x38P" } }, - { RATE, .u.n = {"\x05%dE", 8, 1, 16, 0, 0, NULL } }, - { PITCH, .u.n = {"\x05%dP", 8, 0, 16, 0, 0, NULL } }, - { VOL, .u.n = {"\x05%dV", 8, 0, 16, 0, 0, NULL } }, - { TONE, .u.n = {"\x05%dT", 8, 0, 16, 0, 0, NULL } }, - { DIRECT, .u.n = {NULL, 0, 0, 1, 0, 0, NULL } }, + +enum default_vars_id { + CAPS_START_ID = 0, CAPS_STOP_ID, + RATE_ID, PITCH_ID, + VOL_ID, TONE_ID, + DIRECT_ID, V_LAST_VAR_ID, + NB_ID +}; + +static struct var_t vars[NB_ID] = { + [CAPS_START_ID] = { CAPS_START, .u.s = {"\x05\x31\x32P" } }, + [CAPS_STOP_ID] = { CAPS_STOP, .u.s = {"\x05\x38P" } }, + [RATE_ID] = { RATE, .u.n = {"\x05%dE", 8, 1, 16, 0, 0, NULL } }, + [PITCH_ID] = { PITCH, .u.n = {"\x05%dP", 8, 0, 16, 0, 0, NULL } }, + [VOL_ID] = { VOL, .u.n = {"\x05%dV", 8, 0, 16, 0, 0, NULL } }, + [TONE_ID] = { TONE, .u.n = {"\x05%dT", 8, 0, 16, 0, 0, NULL } }, + [DIRECT_ID] = { DIRECT, .u.n = {NULL, 0, 0, 1, 0, 0, NULL } }, V_LAST_VAR }; @@ -113,10 +122,21 @@ static struct spk_synth synth_bns = { module_param_named(ser, synth_bns.ser, int, 0444); module_param_named(dev, synth_bns.dev_name, charp, 0444); module_param_named(start, synth_bns.startup, short, 0444); +module_param_named(rate, vars[RATE_ID].u.n.default_val, int, 0444); +module_param_named(pitch, vars[PITCH_ID].u.n.default_val, int, 0444); +module_param_named(vol, vars[VOL_ID].u.n.default_val, int, 0444); +module_param_named(tone, vars[TONE_ID].u.n.default_val, int, 0444); +module_param_named(direct, vars[DIRECT_ID].u.n.default_val, int, 0444); + MODULE_PARM_DESC(ser, "Set the serial port for the synthesizer (0-based)."); MODULE_PARM_DESC(dev, "Set the device e.g. ttyUSB0, for the synthesizer."); MODULE_PARM_DESC(start, "Start the synthesizer once it is loaded."); +MODULE_PARM_DESC(rate, "Set the rate variable on load."); +MODULE_PARM_DESC(pitch, "Set the pitch variable on load."); +MODULE_PARM_DESC(vol, "Set the vol variable on load."); +MODULE_PARM_DESC(tone, "Set the tone variable on load."); +MODULE_PARM_DESC(direct, "Set the direct variable on load."); module_spk_synth(synth_bns); diff --git a/drivers/accessibility/speakup/speakup_decext.c b/drivers/accessibility/speakup/speakup_decext.c index eaebf62300a41a79b4b7715a5d36a9f04404e03c..271bcf279bf9b0f398fd006807b564001a2211de 100644 --- a/drivers/accessibility/speakup/speakup_decext.c +++ b/drivers/accessibility/speakup/speakup_decext.c @@ -38,16 +38,25 @@ static void synth_flush(struct spk_synth *synth); static int in_escape; -static struct var_t vars[] = { - { CAPS_START, .u.s = {"[:dv ap 222]" } }, - { CAPS_STOP, .u.s = {"[:dv ap 100]" } }, - { RATE, .u.n = {"[:ra %d]", 7, 0, 9, 150, 25, NULL } }, - { PITCH, .u.n = {"[:dv ap %d]", 100, 0, 100, 0, 0, NULL } }, - { INFLECTION, .u.n = {"[:dv pr %d] ", 100, 0, 10000, 0, 0, NULL } }, - { VOL, .u.n = {"[:dv gv %d]", 13, 0, 16, 0, 5, NULL } }, - { PUNCT, .u.n = {"[:pu %c]", 0, 0, 2, 0, 0, "nsa" } }, - { VOICE, .u.n = {"[:n%c]", 0, 0, 9, 0, 0, "phfdburwkv" } }, - { DIRECT, .u.n = {NULL, 0, 0, 1, 0, 0, NULL } }, + +enum default_vars_id { + CAPS_START_ID = 0, CAPS_STOP_ID, + RATE_ID, PITCH_ID, INFLECTION_ID, + VOL_ID, PUNCT_ID, VOICE_ID, + DIRECT_ID, V_LAST_ID, + NB_ID, +}; + +static struct var_t vars[NB_ID] = { + [CAPS_START_ID] = { CAPS_START, .u.s = {"[:dv ap 222]" } }, + [CAPS_STOP_ID] = { CAPS_STOP, .u.s = {"[:dv ap 100]" } }, + [RATE_ID] = { RATE, .u.n = {"[:ra %d]", 7, 0, 9, 150, 25, NULL } }, + [PITCH_ID] = { PITCH, .u.n = {"[:dv ap %d]", 100, 0, 100, 0, 0, NULL } }, + [INFLECTION_ID] = { INFLECTION, .u.n = {"[:dv pr %d] ", 100, 0, 10000, 0, 0, NULL } }, + [VOL_ID] = { VOL, .u.n = {"[:dv gv %d]", 13, 0, 16, 0, 5, NULL } }, + [PUNCT_ID] = { PUNCT, .u.n = {"[:pu %c]", 0, 0, 2, 0, 0, "nsa" } }, + [VOICE_ID] = { VOICE, .u.n = {"[:n%c]", 0, 0, 9, 0, 0, "phfdburwkv" } }, + [DIRECT_ID] = { DIRECT, .u.n = {NULL, 0, 0, 1, 0, 0, NULL } }, V_LAST_VAR }; @@ -225,10 +234,25 @@ static void synth_flush(struct spk_synth *synth) module_param_named(ser, synth_decext.ser, int, 0444); module_param_named(dev, synth_decext.dev_name, charp, 0444); module_param_named(start, synth_decext.startup, short, 0444); +module_param_named(rate, vars[RATE_ID].u.n.default_val, int, 0444); +module_param_named(pitch, vars[PITCH_ID].u.n.default_val, int, 0444); +module_param_named(inflection, vars[INFLECTION_ID].u.n.default_val, int, 0444); +module_param_named(vol, vars[VOL_ID].u.n.default_val, int, 0444); +module_param_named(punct, vars[PUNCT_ID].u.n.default_val, int, 0444); +module_param_named(voice, vars[VOICE_ID].u.n.default_val, int, 0444); +module_param_named(direct, vars[DIRECT_ID].u.n.default_val, int, 0444); + MODULE_PARM_DESC(ser, "Set the serial port for the synthesizer (0-based)."); MODULE_PARM_DESC(dev, "Set the device e.g. ttyUSB0, for the synthesizer."); MODULE_PARM_DESC(start, "Start the synthesizer once it is loaded."); +MODULE_PARM_DESC(rate, "Set the rate variable on load."); +MODULE_PARM_DESC(pitch, "Set the pitch variable on load."); +MODULE_PARM_DESC(inflection, "Set the inflection variable on load."); +MODULE_PARM_DESC(vol, "Set the vol variable on load."); +MODULE_PARM_DESC(punct, "Set the punct variable on load."); +MODULE_PARM_DESC(voice, "Set the voice variable on load."); +MODULE_PARM_DESC(direct, "Set the direct variable on load."); module_spk_synth(synth_decext); diff --git a/drivers/accessibility/speakup/speakup_decpc.c b/drivers/accessibility/speakup/speakup_decpc.c index dec314dee214f0ccdd63f0a6f94f615f892fcbdc..083ca92658057a70548e0d55304e3ccde434a144 100644 --- a/drivers/accessibility/speakup/speakup_decpc.c +++ b/drivers/accessibility/speakup/speakup_decpc.c @@ -134,16 +134,27 @@ static int synth_portlist[] = { 0x340, 0x350, 0x240, 0x250, 0 }; static int in_escape, is_flushing; static int dt_stat, dma_state; -static struct var_t vars[] = { - { CAPS_START, .u.s = {"[:dv ap 200]" } }, - { CAPS_STOP, .u.s = {"[:dv ap 100]" } }, - { RATE, .u.n = {"[:ra %d]", 9, 0, 18, 150, 25, NULL } }, - { PITCH, .u.n = {"[:dv ap %d]", 80, 0, 100, 20, 0, NULL } }, - { INFLECTION, .u.n = {"[:dv pr %d] ", 100, 0, 10000, 0, 0, NULL } }, - { VOL, .u.n = {"[:vo se %d]", 5, 0, 9, 5, 10, NULL } }, - { PUNCT, .u.n = {"[:pu %c]", 0, 0, 2, 0, 0, "nsa" } }, - { VOICE, .u.n = {"[:n%c]", 0, 0, 9, 0, 0, "phfdburwkv" } }, - { DIRECT, .u.n = {NULL, 0, 0, 1, 0, 0, NULL } }, + +enum default_vars_id { + CAPS_START_ID = 0, CAPS_STOP_ID, + RATE_ID, PITCH_ID, INFLECTION_ID, + VOL_ID, PUNCT_ID, VOICE_ID, + DIRECT_ID, V_LAST_VAR_ID, + NB_ID, +}; + + + +static struct var_t vars[NB_ID] = { + [CAPS_START_ID] = { CAPS_START, .u.s = {"[:dv ap 200]" } }, + [CAPS_STOP_ID] = { CAPS_STOP, .u.s = {"[:dv ap 100]" } }, + [RATE_ID] = { RATE, .u.n = {"[:ra %d]", 9, 0, 18, 150, 25, NULL } }, + [PITCH_ID] = { PITCH, .u.n = {"[:dv ap %d]", 80, 0, 100, 20, 0, NULL } }, + [INFLECTION_ID] = { INFLECTION, .u.n = {"[:dv pr %d] ", 100, 0, 10000, 0, 0, NULL } }, + [VOL_ID] = { VOL, .u.n = {"[:vo se %d]", 5, 0, 9, 5, 10, NULL } }, + [PUNCT_ID] = { PUNCT, .u.n = {"[:pu %c]", 0, 0, 2, 0, 0, "nsa" } }, + [VOICE_ID] = { VOICE, .u.n = {"[:n%c]", 0, 0, 9, 0, 0, "phfdburwkv" } }, + [DIRECT_ID] = { DIRECT, .u.n = {NULL, 0, 0, 1, 0, 0, NULL } }, V_LAST_VAR }; @@ -483,8 +494,25 @@ static void dtpc_release(struct spk_synth *synth) } module_param_named(start, synth_dec_pc.startup, short, 0444); +module_param_named(rate, vars[RATE_ID].u.n.default_val, int, 0444); +module_param_named(pitch, vars[PITCH_ID].u.n.default_val, int, 0444); +module_param_named(inflection, vars[INFLECTION_ID].u.n.default_val, int, 0444); +module_param_named(vol, vars[VOL_ID].u.n.default_val, int, 0444); +module_param_named(punct, vars[PUNCT_ID].u.n.default_val, int, 0444); +module_param_named(voice, vars[VOICE_ID].u.n.default_val, int, 0444); +module_param_named(direct, vars[DIRECT_ID].u.n.default_val, int, 0444); + + + MODULE_PARM_DESC(start, "Start the synthesizer once it is loaded."); +MODULE_PARM_DESC(rate, "Set the rate variable on load."); +MODULE_PARM_DESC(pitch, "Set the pitch variable on load."); +MODULE_PARM_DESC(inflection, "Set the inflection variable on load."); +MODULE_PARM_DESC(vol, "Set the vol variable on load."); +MODULE_PARM_DESC(punct, "Set the punct variable on load."); +MODULE_PARM_DESC(voice, "Set the voice variable on load."); +MODULE_PARM_DESC(direct, "Set the direct variable on load."); module_spk_synth(synth_dec_pc); diff --git a/drivers/accessibility/speakup/speakup_dectlk.c b/drivers/accessibility/speakup/speakup_dectlk.c index 2a7e8d727904aec475958f0b050fdda41069c05b..56334405d865c5d35bc22185c2afe349bd8c3d5b 100644 --- a/drivers/accessibility/speakup/speakup_dectlk.c +++ b/drivers/accessibility/speakup/speakup_dectlk.c @@ -40,16 +40,24 @@ static int is_flushing; static DEFINE_SPINLOCK(flush_lock); static DECLARE_WAIT_QUEUE_HEAD(flush); -static struct var_t vars[] = { - { CAPS_START, .u.s = {"[:dv ap 160] " } }, - { CAPS_STOP, .u.s = {"[:dv ap 100 ] " } }, - { RATE, .u.n = {"[:ra %d] ", 180, 75, 650, 0, 0, NULL } }, - { PITCH, .u.n = {"[:dv ap %d] ", 122, 50, 350, 0, 0, NULL } }, - { INFLECTION, .u.n = {"[:dv pr %d] ", 100, 0, 10000, 0, 0, NULL } }, - { VOL, .u.n = {"[:dv g5 %d] ", 86, 60, 86, 0, 0, NULL } }, - { PUNCT, .u.n = {"[:pu %c] ", 0, 0, 2, 0, 0, "nsa" } }, - { VOICE, .u.n = {"[:n%c] ", 0, 0, 9, 0, 0, "phfdburwkv" } }, - { DIRECT, .u.n = {NULL, 0, 0, 1, 0, 0, NULL } }, +enum default_vars_id { + CAPS_START_ID = 0, CAPS_STOP_ID, + RATE_ID, PITCH_ID, INFLECTION_ID, + VOL_ID, PUNCT_ID, VOICE_ID, + DIRECT_ID, V_LAST_VAR_ID, + NB_ID, +}; + +static struct var_t vars[NB_ID] = { + [CAPS_START_ID] = { CAPS_START, .u.s = {"[:dv ap 160] " } }, + [CAPS_STOP_ID] = { CAPS_STOP, .u.s = {"[:dv ap 100 ] " } }, + [RATE_ID] = { RATE, .u.n = {"[:ra %d] ", 180, 75, 650, 0, 0, NULL } }, + [PITCH_ID] = { PITCH, .u.n = {"[:dv ap %d] ", 122, 50, 350, 0, 0, NULL } }, + [INFLECTION_ID] = { INFLECTION, .u.n = {"[:dv pr %d] ", 100, 0, 10000, 0, 0, NULL } }, + [VOL_ID] = { VOL, .u.n = {"[:dv g5 %d] ", 86, 60, 86, 0, 0, NULL } }, + [PUNCT_ID] = { PUNCT, .u.n = {"[:pu %c] ", 0, 0, 2, 0, 0, "nsa" } }, + [VOICE_ID] = { VOICE, .u.n = {"[:n%c] ", 0, 0, 9, 0, 0, "phfdburwkv" } }, + [DIRECT_ID] = { DIRECT, .u.n = {NULL, 0, 0, 1, 0, 0, NULL } }, V_LAST_VAR }; @@ -306,10 +314,27 @@ static void synth_flush(struct spk_synth *synth) module_param_named(ser, synth_dectlk.ser, int, 0444); module_param_named(dev, synth_dectlk.dev_name, charp, 0444); module_param_named(start, synth_dectlk.startup, short, 0444); +module_param_named(rate, vars[RATE_ID].u.n.default_val, int, 0444); +module_param_named(pitch, vars[PITCH_ID].u.n.default_val, int, 0444); +module_param_named(inflection, vars[INFLECTION_ID].u.n.default_val, int, 0444); +module_param_named(vol, vars[VOL_ID].u.n.default_val, int, 0444); +module_param_named(punct, vars[PUNCT_ID].u.n.default_val, int, 0444); +module_param_named(voice, vars[VOICE_ID].u.n.default_val, int, 0444); +module_param_named(direct, vars[DIRECT_ID].u.n.default_val, int, 0444); + + MODULE_PARM_DESC(ser, "Set the serial port for the synthesizer (0-based)."); MODULE_PARM_DESC(dev, "Set the device e.g. ttyUSB0, for the synthesizer."); MODULE_PARM_DESC(start, "Start the synthesizer once it is loaded."); +MODULE_PARM_DESC(rate, "Set the rate variable on load."); +MODULE_PARM_DESC(pitch, "Set the pitch variable on load."); +MODULE_PARM_DESC(inflection, "Set the inflection variable on load."); +MODULE_PARM_DESC(vol, "Set the vol variable on load."); +MODULE_PARM_DESC(punct, "Set the punct variable on load."); +MODULE_PARM_DESC(voice, "Set the voice variable on load."); +MODULE_PARM_DESC(direct, "Set the direct variable on load."); + module_spk_synth(synth_dectlk); diff --git a/drivers/accessibility/speakup/speakup_dtlk.c b/drivers/accessibility/speakup/speakup_dtlk.c index 6f01e010aaf43c6cc9fff1953ad19a053b771e56..fa826568937bf0ade438d2c65e6ba61b63859c18 100644 --- a/drivers/accessibility/speakup/speakup_dtlk.c +++ b/drivers/accessibility/speakup/speakup_dtlk.c @@ -37,17 +37,27 @@ static unsigned int synth_portlist[] = { static u_char synth_status; -static struct var_t vars[] = { - { CAPS_START, .u.s = {"\x01+35p" } }, - { CAPS_STOP, .u.s = {"\x01-35p" } }, - { RATE, .u.n = {"\x01%ds", 8, 0, 9, 0, 0, NULL } }, - { PITCH, .u.n = {"\x01%dp", 50, 0, 99, 0, 0, NULL } }, - { VOL, .u.n = {"\x01%dv", 5, 0, 9, 0, 0, NULL } }, - { TONE, .u.n = {"\x01%dx", 1, 0, 2, 0, 0, NULL } }, - { PUNCT, .u.n = {"\x01%db", 7, 0, 15, 0, 0, NULL } }, - { VOICE, .u.n = {"\x01%do", 0, 0, 7, 0, 0, NULL } }, - { FREQUENCY, .u.n = {"\x01%df", 5, 0, 9, 0, 0, NULL } }, - { DIRECT, .u.n = {NULL, 0, 0, 1, 0, 0, NULL } }, +enum default_vars_id { + CAPS_START_ID = 0, CAPS_STOP_ID, + RATE_ID, PITCH_ID, + VOL_ID, TONE_ID, PUNCT_ID, + VOICE_ID, FREQUENCY_ID, + DIRECT_ID, V_LAST_VAR_ID, + NB_ID, +}; + + +static struct var_t vars[NB_ID] = { + [CAPS_START_ID] = { CAPS_START, .u.s = {"\x01+35p" } }, + [CAPS_STOP_ID] = { CAPS_STOP, .u.s = {"\x01-35p" } }, + [RATE_ID] = { RATE, .u.n = {"\x01%ds", 8, 0, 9, 0, 0, NULL } }, + [PITCH_ID] = { PITCH, .u.n = {"\x01%dp", 50, 0, 99, 0, 0, NULL } }, + [VOL_ID] = { VOL, .u.n = {"\x01%dv", 5, 0, 9, 0, 0, NULL } }, + [TONE_ID] = { TONE, .u.n = {"\x01%dx", 1, 0, 2, 0, 0, NULL } }, + [PUNCT_ID] = { PUNCT, .u.n = {"\x01%db", 7, 0, 15, 0, 0, NULL } }, + [VOICE_ID] = { VOICE, .u.n = {"\x01%do", 0, 0, 7, 0, 0, NULL } }, + [FREQUENCY_ID] = { FREQUENCY, .u.n = {"\x01%df", 5, 0, 9, 0, 0, NULL } }, + [DIRECT_ID] = { DIRECT, .u.n = {NULL, 0, 0, 1, 0, 0, NULL } }, V_LAST_VAR }; @@ -376,9 +386,27 @@ static void dtlk_release(struct spk_synth *synth) module_param_hw_named(port, port_forced, int, ioport, 0444); module_param_named(start, synth_dtlk.startup, short, 0444); +module_param_named(rate, vars[RATE_ID].u.n.default_val, int, 0444); +module_param_named(pitch, vars[PITCH_ID].u.n.default_val, int, 0444); +module_param_named(vol, vars[VOL_ID].u.n.default_val, int, 0444); +module_param_named(tone, vars[TONE_ID].u.n.default_val, int, 0444); +module_param_named(punct, vars[PUNCT_ID].u.n.default_val, int, 0444); +module_param_named(voice, vars[VOICE_ID].u.n.default_val, int, 0444); +module_param_named(frequency, vars[FREQUENCY_ID].u.n.default_val, int, 0444); +module_param_named(direct, vars[DIRECT_ID].u.n.default_val, int, 0444); + MODULE_PARM_DESC(port, "Set the port for the synthesizer (override probing)."); MODULE_PARM_DESC(start, "Start the synthesizer once it is loaded."); +MODULE_PARM_DESC(rate, "Set the rate variable on load."); +MODULE_PARM_DESC(pitch, "Set the pitch variable on load."); +MODULE_PARM_DESC(vol, "Set the vol variable on load."); +MODULE_PARM_DESC(tone, "Set the tone variable on load."); +MODULE_PARM_DESC(punct, "Set the punct variable on load."); +MODULE_PARM_DESC(voice, "Set the voice variable on load."); +MODULE_PARM_DESC(frequency, "Set the frequency variable on load."); +MODULE_PARM_DESC(direct, "Set the direct variable on load."); + module_spk_synth(synth_dtlk); diff --git a/drivers/accessibility/speakup/speakup_dummy.c b/drivers/accessibility/speakup/speakup_dummy.c index 56419dbb28d3c678843d84f66a3dcd06134f6dce..52b2c5d44576d785085a96b878382567ff93d644 100644 --- a/drivers/accessibility/speakup/speakup_dummy.c +++ b/drivers/accessibility/speakup/speakup_dummy.c @@ -18,17 +18,30 @@ #define DRV_VERSION "2.11" #define SYNTH_CLEAR '!' -static struct var_t vars[] = { - { CAPS_START, .u.s = {"CAPS_START\n" } }, - { CAPS_STOP, .u.s = {"CAPS_STOP\n" } }, - { PAUSE, .u.s = {"PAUSE\n"} }, - { RATE, .u.n = {"RATE %d\n", 8, 1, 16, 0, 0, NULL } }, - { PITCH, .u.n = {"PITCH %d\n", 8, 0, 16, 0, 0, NULL } }, - { INFLECTION, .u.n = {"INFLECTION %d\n", 8, 0, 16, 0, 0, NULL } }, - { VOL, .u.n = {"VOL %d\n", 8, 0, 16, 0, 0, NULL } }, - { TONE, .u.n = {"TONE %d\n", 8, 0, 16, 0, 0, NULL } }, - { PUNCT, .u.n = {"PUNCT %d\n", 0, 0, 3, 0, 0, NULL } }, - { DIRECT, .u.n = {NULL, 0, 0, 1, 0, 0, NULL } }, + +enum default_vars_id { + CAPS_START_ID = 0, CAPS_STOP_ID, + PAUSE_ID, + RATE_ID, PITCH_ID, INFLECTION_ID, + VOL_ID, TONE_ID, PUNCT_ID, + DIRECT_ID, V_LAST_VAR_ID, + NB_ID +}; + + + + +static struct var_t vars[NB_ID] = { + [CAPS_START_ID] = { CAPS_START, .u.s = {"CAPS_START\n" } }, + [CAPS_STOP_ID] = { CAPS_STOP, .u.s = {"CAPS_STOP\n" } }, + [PAUSE_ID] = { PAUSE, .u.s = {"PAUSE\n"} }, + [RATE_ID] = { RATE, .u.n = {"RATE %d\n", 8, 1, 16, 0, 0, NULL } }, + [PITCH_ID] = { PITCH, .u.n = {"PITCH %d\n", 8, 0, 16, 0, 0, NULL } }, + [INFLECTION_ID] = { INFLECTION, .u.n = {"INFLECTION %d\n", 8, 0, 16, 0, 0, NULL } }, + [VOL_ID] = { VOL, .u.n = {"VOL %d\n", 8, 0, 16, 0, 0, NULL } }, + [TONE_ID] = { TONE, .u.n = {"TONE %d\n", 8, 0, 16, 0, 0, NULL } }, + [PUNCT_ID] = { PUNCT, .u.n = {"PUNCT %d\n", 0, 0, 3, 0, 0, NULL } }, + [DIRECT_ID] = { DIRECT, .u.n = {NULL, 0, 0, 1, 0, 0, NULL } }, V_LAST_VAR }; @@ -129,10 +142,28 @@ static struct spk_synth synth_dummy = { module_param_named(ser, synth_dummy.ser, int, 0444); module_param_named(dev, synth_dummy.dev_name, charp, 0444); module_param_named(start, synth_dummy.startup, short, 0444); +module_param_named(rate, vars[RATE_ID].u.n.default_val, int, 0444); +module_param_named(pitch, vars[PITCH_ID].u.n.default_val, int, 0444); +module_param_named(inflection, vars[INFLECTION_ID].u.n.default_val, int, 0444); +module_param_named(vol, vars[VOL_ID].u.n.default_val, int, 0444); +module_param_named(tone, vars[TONE_ID].u.n.default_val, int, 0444); +module_param_named(punct, vars[PUNCT_ID].u.n.default_val, int, 0444); +module_param_named(direct, vars[DIRECT_ID].u.n.default_val, int, 0444); + + + MODULE_PARM_DESC(ser, "Set the serial port for the synthesizer (0-based)."); MODULE_PARM_DESC(dev, "Set the device e.g. ttyUSB0, for the synthesizer."); MODULE_PARM_DESC(start, "Start the synthesizer once it is loaded."); +MODULE_PARM_DESC(rate, "Set the rate variable on load."); +MODULE_PARM_DESC(pitch, "Set the pitch variable on load."); +MODULE_PARM_DESC(inflection, "Set the inflection variable on load."); +MODULE_PARM_DESC(vol, "Set the vol variable on load."); +MODULE_PARM_DESC(tone, "Set the tone variable on load."); +MODULE_PARM_DESC(punct, "Set the punct variable on load."); +MODULE_PARM_DESC(direct, "Set the direct variable on load."); + module_spk_synth(synth_dummy); diff --git a/drivers/accessibility/speakup/speakup_keypc.c b/drivers/accessibility/speakup/speakup_keypc.c index f61b62f1ea4dbeedf9c6776413d46bda1627a0d6..9356f6379560302c079ce165f54da575664fc763 100644 --- a/drivers/accessibility/speakup/speakup_keypc.c +++ b/drivers/accessibility/speakup/speakup_keypc.c @@ -33,12 +33,21 @@ static int synth_port; static int port_forced; static unsigned int synth_portlist[] = { 0x2a8, 0 }; -static struct var_t vars[] = { - { CAPS_START, .u.s = {"[f130]" } }, - { CAPS_STOP, .u.s = {"[f90]" } }, - { RATE, .u.n = {"\04%c ", 8, 0, 10, 81, -8, NULL } }, - { PITCH, .u.n = {"[f%d]", 5, 0, 9, 40, 10, NULL } }, - { DIRECT, .u.n = {NULL, 0, 0, 1, 0, 0, NULL } }, + +enum default_vars_id { + CAPS_START_ID = 0, CAPS_STOP_ID, + RATE_ID, PITCH_ID, + DIRECT_ID, V_LAST_VAR_ID, + NB_ID +}; + + +static struct var_t vars[NB_ID] = { + [CAPS_START_ID] = { CAPS_START, .u.s = {"[f130]" } }, + [CAPS_STOP_ID] = { CAPS_STOP, .u.s = {"[f90]" } }, + [RATE_ID] = { RATE, .u.n = {"\04%c ", 8, 0, 10, 81, -8, NULL } }, + [PITCH_ID] = { PITCH, .u.n = {"[f%d]", 5, 0, 9, 40, 10, NULL } }, + [DIRECT_ID] = { DIRECT, .u.n = {NULL, 0, 0, 1, 0, 0, NULL } }, V_LAST_VAR }; @@ -305,9 +314,17 @@ static void keynote_release(struct spk_synth *synth) module_param_hw_named(port, port_forced, int, ioport, 0444); module_param_named(start, synth_keypc.startup, short, 0444); +module_param_named(rate, vars[RATE_ID].u.n.default_val, int, 0444); +module_param_named(pitch, vars[PITCH_ID].u.n.default_val, int, 0444); +module_param_named(direct, vars[DIRECT_ID].u.n.default_val, int, 0444); MODULE_PARM_DESC(port, "Set the port for the synthesizer (override probing)."); MODULE_PARM_DESC(start, "Start the synthesizer once it is loaded."); +MODULE_PARM_DESC(rate, "Set the rate variable on load."); +MODULE_PARM_DESC(pitch, "Set the pitch variable on load."); +MODULE_PARM_DESC(direct, "Set the direct variable on load."); + + module_spk_synth(synth_keypc); diff --git a/drivers/accessibility/speakup/speakup_ltlk.c b/drivers/accessibility/speakup/speakup_ltlk.c index f885cfaa27c80070e598e473765f6913af759d4f..1e279ae143bfd71cbcaed625b0565c3c26f5f7bc 100644 --- a/drivers/accessibility/speakup/speakup_ltlk.c +++ b/drivers/accessibility/speakup/speakup_ltlk.c @@ -18,17 +18,28 @@ static int synth_probe(struct spk_synth *synth); -static struct var_t vars[] = { - { CAPS_START, .u.s = {"\x01+35p" } }, - { CAPS_STOP, .u.s = {"\x01-35p" } }, - { RATE, .u.n = {"\x01%ds", 8, 0, 9, 0, 0, NULL } }, - { PITCH, .u.n = {"\x01%dp", 50, 0, 99, 0, 0, NULL } }, - { VOL, .u.n = {"\x01%dv", 5, 0, 9, 0, 0, NULL } }, - { TONE, .u.n = {"\x01%dx", 1, 0, 2, 0, 0, NULL } }, - { PUNCT, .u.n = {"\x01%db", 7, 0, 15, 0, 0, NULL } }, - { VOICE, .u.n = {"\x01%do", 0, 0, 7, 0, 0, NULL } }, - { FREQUENCY, .u.n = {"\x01%df", 5, 0, 9, 0, 0, NULL } }, - { DIRECT, .u.n = {NULL, 0, 0, 1, 0, 0, NULL } }, + +enum default_vars_id { + CAPS_START_ID = 0, CAPS_STOP_ID, + RATE_ID, PITCH_ID, + VOL_ID, TONE_ID, PUNCT_ID, + VOICE_ID, FREQUENCY_ID, + DIRECT_ID, V_LAST_VAR_ID, + NB_ID +}; + + +static struct var_t vars[NB_ID] = { + [CAPS_START_ID] = { CAPS_START, .u.s = {"\x01+35p" } }, + [CAPS_STOP_ID] = { CAPS_STOP, .u.s = {"\x01-35p" } }, + [RATE_ID] = { RATE, .u.n = {"\x01%ds", 8, 0, 9, 0, 0, NULL } }, + [PITCH_ID] = { PITCH, .u.n = {"\x01%dp", 50, 0, 99, 0, 0, NULL } }, + [VOL_ID] = { VOL, .u.n = {"\x01%dv", 5, 0, 9, 0, 0, NULL } }, + [TONE_ID] = { TONE, .u.n = {"\x01%dx", 1, 0, 2, 0, 0, NULL } }, + [PUNCT_ID] = { PUNCT, .u.n = {"\x01%db", 7, 0, 15, 0, 0, NULL } }, + [VOICE_ID] = { VOICE, .u.n = {"\x01%do", 0, 0, 7, 0, 0, NULL } }, + [FREQUENCY_ID] = { FREQUENCY, .u.n = {"\x01%df", 5, 0, 9, 0, 0, NULL } }, + [DIRECT_ID] = { DIRECT, .u.n = {NULL, 0, 0, 1, 0, 0, NULL } }, V_LAST_VAR }; @@ -160,10 +171,30 @@ static int synth_probe(struct spk_synth *synth) module_param_named(ser, synth_ltlk.ser, int, 0444); module_param_named(dev, synth_ltlk.dev_name, charp, 0444); module_param_named(start, synth_ltlk.startup, short, 0444); +module_param_named(rate, vars[RATE_ID].u.n.default_val, int, 0444); +module_param_named(pitch, vars[PITCH_ID].u.n.default_val, int, 0444); +module_param_named(vol, vars[VOL_ID].u.n.default_val, int, 0444); +module_param_named(tone, vars[TONE_ID].u.n.default_val, int, 0444); +module_param_named(punct, vars[PUNCT_ID].u.n.default_val, int, 0444); +module_param_named(voice, vars[VOICE_ID].u.n.default_val, int, 0444); +module_param_named(frequency, vars[FREQUENCY_ID].u.n.default_val, int, 0444); +module_param_named(direct, vars[DIRECT_ID].u.n.default_val, int, 0444); + + + MODULE_PARM_DESC(ser, "Set the serial port for the synthesizer (0-based)."); MODULE_PARM_DESC(dev, "Set the device e.g. ttyUSB0, for the synthesizer."); MODULE_PARM_DESC(start, "Start the synthesizer once it is loaded."); +MODULE_PARM_DESC(rate, "Set the rate variable on load."); +MODULE_PARM_DESC(pitch, "Set the pitch variable on load."); +MODULE_PARM_DESC(vol, "Set the vol variable on load."); +MODULE_PARM_DESC(tone, "Set the tone variable on load."); +MODULE_PARM_DESC(punct, "Set the punct variable on load."); +MODULE_PARM_DESC(voice, "Set the voice variable on load."); +MODULE_PARM_DESC(frequency, "Set the frequency variable on load."); +MODULE_PARM_DESC(direct, "Set the direct variable on load."); + module_spk_synth(synth_ltlk); diff --git a/drivers/accessibility/speakup/speakup_soft.c b/drivers/accessibility/speakup/speakup_soft.c index 28c8f60370cf8f9d56bd0873f1ac5c48b873d1d1..6d446824677b452542d225d1305f4527b16bfd4e 100644 --- a/drivers/accessibility/speakup/speakup_soft.c +++ b/drivers/accessibility/speakup/speakup_soft.c @@ -33,21 +33,30 @@ static struct miscdevice synth_device, synthu_device; static int init_pos; static int misc_registered; -static struct var_t vars[] = { - /* DIRECT is put first so that module_param_named can access it easily */ - { DIRECT, .u.n = {NULL, 0, 0, 1, 0, 0, NULL } }, - - { CAPS_START, .u.s = {"\x01+3p" } }, - { CAPS_STOP, .u.s = {"\x01-3p" } }, - { PAUSE, .u.n = {"\x01P" } }, - { RATE, .u.n = {"\x01%ds", 2, 0, 9, 0, 0, NULL } }, - { PITCH, .u.n = {"\x01%dp", 5, 0, 9, 0, 0, NULL } }, - { INFLECTION, .u.n = {"\x01%dr", 5, 0, 9, 0, 0, NULL } }, - { VOL, .u.n = {"\x01%dv", 5, 0, 9, 0, 0, NULL } }, - { TONE, .u.n = {"\x01%dx", 1, 0, 2, 0, 0, NULL } }, - { PUNCT, .u.n = {"\x01%db", 0, 0, 3, 0, 0, NULL } }, - { VOICE, .u.n = {"\x01%do", 0, 0, 7, 0, 0, NULL } }, - { FREQUENCY, .u.n = {"\x01%df", 5, 0, 9, 0, 0, NULL } }, + +enum default_vars_id { + DIRECT_ID = 0, CAPS_START_ID, CAPS_STOP_ID, + PAUSE_ID, RATE_ID, PITCH_ID, INFLECTION_ID, + VOL_ID, TONE_ID, PUNCT_ID, VOICE_ID, + FREQUENCY_ID, V_LAST_VAR_ID, + NB_ID +}; + + +static struct var_t vars[NB_ID] = { + + [DIRECT_ID] = { DIRECT, .u.n = {NULL, 0, 0, 1, 0, 0, NULL } }, + [CAPS_START_ID] = { CAPS_START, .u.s = {"\x01+3p" } }, + [CAPS_STOP_ID] = { CAPS_STOP, .u.s = {"\x01-3p" } }, + [PAUSE_ID] = { PAUSE, .u.n = {"\x01P" } }, + [RATE_ID] = { RATE, .u.n = {"\x01%ds", 2, 0, 9, 0, 0, NULL } }, + [PITCH_ID] = { PITCH, .u.n = {"\x01%dp", 5, 0, 9, 0, 0, NULL } }, + [INFLECTION_ID] = { INFLECTION, .u.n = {"\x01%dr", 5, 0, 9, 0, 0, NULL } }, + [VOL_ID] = { VOL, .u.n = {"\x01%dv", 5, 0, 9, 0, 0, NULL } }, + [TONE_ID] = { TONE, .u.n = {"\x01%dx", 1, 0, 2, 0, 0, NULL } }, + [PUNCT_ID] = { PUNCT, .u.n = {"\x01%db", 0, 0, 3, 0, 0, NULL } }, + [VOICE_ID] = { VOICE, .u.n = {"\x01%do", 0, 0, 7, 0, 0, NULL } }, + [FREQUENCY_ID] = { FREQUENCY, .u.n = {"\x01%df", 5, 0, 9, 0, 0, NULL } }, V_LAST_VAR }; @@ -451,10 +460,28 @@ static int softsynth_adjust(struct spk_synth *synth, struct st_var_header *var) } module_param_named(start, synth_soft.startup, short, 0444); -module_param_named(direct, vars[0].u.n.default_val, int, 0444); +module_param_named(direct, vars[DIRECT_ID].u.n.default_val, int, 0444); +module_param_named(rate, vars[RATE_ID].u.n.default_val, int, 0444); +module_param_named(pitch, vars[PITCH_ID].u.n.default_val, int, 0444); +module_param_named(inflection, vars[INFLECTION_ID].u.n.default_val, int, 0444); +module_param_named(vol, vars[VOL_ID].u.n.default_val, int, 0444); +module_param_named(tone, vars[TONE_ID].u.n.default_val, int, 0444); +module_param_named(punct, vars[PUNCT_ID].u.n.default_val, int, 0444); +module_param_named(voice, vars[VOICE_ID].u.n.default_val, int, 0444); +module_param_named(frequency, vars[FREQUENCY_ID].u.n.default_val, int, 0444); + + MODULE_PARM_DESC(start, "Start the synthesizer once it is loaded."); MODULE_PARM_DESC(direct, "Set the direct variable on load."); +MODULE_PARM_DESC(rate, "Sets the rate of the synthesizer."); +MODULE_PARM_DESC(pitch, "Sets the pitch of the synthesizer."); +MODULE_PARM_DESC(inflection, "Sets the inflection of the synthesizer."); +MODULE_PARM_DESC(vol, "Sets the volume of the speech synthesizer."); +MODULE_PARM_DESC(tone, "Sets the tone of the speech synthesizer."); +MODULE_PARM_DESC(punct, "Sets the amount of punctuation spoken by the synthesizer."); +MODULE_PARM_DESC(voice, "Sets the voice used by the synthesizer."); +MODULE_PARM_DESC(frequency, "Sets the frequency of speech synthesizer."); module_spk_synth(synth_soft); diff --git a/drivers/accessibility/speakup/speakup_spkout.c b/drivers/accessibility/speakup/speakup_spkout.c index 5e3bb3aa98b62e82d379a9fdeffbc06a5c05067b..d3f26095b0eef95f25a51f683846e02bd34f5144 100644 --- a/drivers/accessibility/speakup/speakup_spkout.c +++ b/drivers/accessibility/speakup/speakup_spkout.c @@ -18,15 +18,26 @@ static void synth_flush(struct spk_synth *synth); -static struct var_t vars[] = { - { CAPS_START, .u.s = {"\x05P+" } }, - { CAPS_STOP, .u.s = {"\x05P-" } }, - { RATE, .u.n = {"\x05R%d", 7, 0, 9, 0, 0, NULL } }, - { PITCH, .u.n = {"\x05P%d", 3, 0, 9, 0, 0, NULL } }, - { VOL, .u.n = {"\x05V%d", 9, 0, 9, 0, 0, NULL } }, - { TONE, .u.n = {"\x05T%c", 8, 0, 25, 65, 0, NULL } }, - { PUNCT, .u.n = {"\x05M%c", 0, 0, 3, 0, 0, "nsma" } }, - { DIRECT, .u.n = {NULL, 0, 0, 1, 0, 0, NULL } }, + + +enum default_vars_id { + CAPS_START_ID = 0, CAPS_STOP_ID, + RATE_ID, PITCH_ID, + VOL_ID, TONE_ID, PUNCT_ID, + DIRECT_ID, V_LAST_VAR_ID, + NB_ID +}; + + +static struct var_t vars[NB_ID] = { + [CAPS_START_ID] = { CAPS_START, .u.s = {"\x05P+" } }, + [CAPS_STOP_ID] = { CAPS_STOP, .u.s = {"\x05P-" } }, + [RATE_ID] = { RATE, .u.n = {"\x05R%d", 7, 0, 9, 0, 0, NULL } }, + [PITCH_ID] = { PITCH, .u.n = {"\x05P%d", 3, 0, 9, 0, 0, NULL } }, + [VOL_ID] = { VOL, .u.n = {"\x05V%d", 9, 0, 9, 0, 0, NULL } }, + [TONE_ID] = { TONE, .u.n = {"\x05T%c", 8, 0, 25, 65, 0, NULL } }, + [PUNCT_ID] = { PUNCT, .u.n = {"\x05M%c", 0, 0, 3, 0, 0, "nsma" } }, + [DIRECT_ID] = { DIRECT, .u.n = {NULL, 0, 0, 1, 0, 0, NULL } }, V_LAST_VAR }; @@ -124,10 +135,24 @@ static void synth_flush(struct spk_synth *synth) module_param_named(ser, synth_spkout.ser, int, 0444); module_param_named(dev, synth_spkout.dev_name, charp, 0444); module_param_named(start, synth_spkout.startup, short, 0444); +module_param_named(rate, vars[RATE_ID].u.n.default_val, int, 0444); +module_param_named(vol, vars[PITCH_ID].u.n.default_val, int, 0444); +module_param_named(tone, vars[TONE_ID].u.n.default_val, int, 0444); +module_param_named(punct, vars[PUNCT_ID].u.n.default_val, int, 0444); +module_param_named(direct, vars[DIRECT_ID].u.n.default_val, int, 0444); + + MODULE_PARM_DESC(ser, "Set the serial port for the synthesizer (0-based)."); MODULE_PARM_DESC(dev, "Set the device e.g. ttyUSB0, for the synthesizer."); MODULE_PARM_DESC(start, "Start the synthesizer once it is loaded."); +MODULE_PARM_DESC(rate, "Set the rate variable on load."); +MODULE_PARM_DESC(vol, "Set the vol variable on load."); +MODULE_PARM_DESC(tone, "Set the tone variable on load."); +MODULE_PARM_DESC(punct, "Set the punct variable on load."); +MODULE_PARM_DESC(direct, "Set the direct variable on load."); + + module_spk_synth(synth_spkout); diff --git a/drivers/accessibility/speakup/speakup_txprt.c b/drivers/accessibility/speakup/speakup_txprt.c index 9e781347f7ebb2532b8252183b476c20dc3b28e9..4d0a0d4c41f0b9ef7265ec47b1588a927e462586 100644 --- a/drivers/accessibility/speakup/speakup_txprt.c +++ b/drivers/accessibility/speakup/speakup_txprt.c @@ -16,14 +16,29 @@ #define SYNTH_CLEAR 0x18 #define PROCSPEECH '\r' /* process speech char */ -static struct var_t vars[] = { - { CAPS_START, .u.s = {"\x05P8" } }, - { CAPS_STOP, .u.s = {"\x05P5" } }, - { RATE, .u.n = {"\x05R%d", 5, 0, 9, 0, 0, NULL } }, - { PITCH, .u.n = {"\x05P%d", 5, 0, 9, 0, 0, NULL } }, - { VOL, .u.n = {"\x05V%d", 5, 0, 9, 0, 0, NULL } }, - { TONE, .u.n = {"\x05T%c", 12, 0, 25, 61, 0, NULL } }, - { DIRECT, .u.n = {NULL, 0, 0, 1, 0, 0, NULL } }, + + + +enum default_vars_id { + CAPS_START_ID = 0, CAPS_STOP_ID, + RATE_ID, PITCH_ID, + VOL_ID, TONE_ID, + DIRECT_ID, V_LAST_VAR_ID, + NB_ID +}; + + + + + +static struct var_t vars[NB_ID] = { + [CAPS_START_ID] = { CAPS_START, .u.s = {"\x05P8" } }, + [CAPS_STOP_ID] = { CAPS_STOP, .u.s = {"\x05P5" } }, + [RATE_ID] = { RATE, .u.n = {"\x05R%d", 5, 0, 9, 0, 0, NULL } }, + [PITCH_ID] = { PITCH, .u.n = {"\x05P%d", 5, 0, 9, 0, 0, NULL } }, + [VOL_ID] = { VOL, .u.n = {"\x05V%d", 5, 0, 9, 0, 0, NULL } }, + [TONE_ID] = { TONE, .u.n = {"\x05T%c", 12, 0, 25, 61, 0, NULL } }, + [DIRECT_ID] = { DIRECT, .u.n = {NULL, 0, 0, 1, 0, 0, NULL } }, V_LAST_VAR }; @@ -112,10 +127,24 @@ static struct spk_synth synth_txprt = { module_param_named(ser, synth_txprt.ser, int, 0444); module_param_named(dev, synth_txprt.dev_name, charp, 0444); module_param_named(start, synth_txprt.startup, short, 0444); +module_param_named(rate, vars[RATE_ID].u.n.default_val, int, 0444); +module_param_named(pitch, vars[PITCH_ID].u.n.default_val, int, 0444); +module_param_named(vol, vars[VOL_ID].u.n.default_val, int, 0444); +module_param_named(tone, vars[TONE_ID].u.n.default_val, int, 0444); +module_param_named(direct, vars[DIRECT_ID].u.n.default_val, int, 0444); + + MODULE_PARM_DESC(ser, "Set the serial port for the synthesizer (0-based)."); MODULE_PARM_DESC(dev, "Set the device e.g. ttyUSB0, for the synthesizer."); MODULE_PARM_DESC(start, "Start the synthesizer once it is loaded."); +MODULE_PARM_DESC(rate, "Set the rate variable on load."); +MODULE_PARM_DESC(pitch, "Set the pitch variable on load."); +MODULE_PARM_DESC(vol, "Set the vol variable on load."); +MODULE_PARM_DESC(tone, "Set the tone variable on load."); +MODULE_PARM_DESC(direct, "Set the direct variable on load."); + + module_spk_synth(synth_txprt); diff --git a/drivers/accessibility/speakup/spk_types.h b/drivers/accessibility/speakup/spk_types.h index 3a14d39bf896ebd9176fcf57de139687bbabc9af..08011518a28aea840302b18c250dab7bf3e911dc 100644 --- a/drivers/accessibility/speakup/spk_types.h +++ b/drivers/accessibility/speakup/spk_types.h @@ -49,7 +49,7 @@ enum var_id_t { RATE, PITCH, VOL, TONE, PUNCT, VOICE, FREQUENCY, LANG, DIRECT, PAUSE, CAPS_START, CAPS_STOP, CHARTAB, INFLECTION, FLUSH, - MAXVARS + CUR_PHONETIC, MAXVARS }; typedef int (*special_func)(struct vc_data *vc, u_char type, u_char ch, diff --git a/drivers/accessibility/speakup/varhandlers.c b/drivers/accessibility/speakup/varhandlers.c index e1c9f42e39f0fae670ee00ec9ffdb8a906c85667..462f8d879053c263a3d17c13de0072bf5c193662 100644 --- a/drivers/accessibility/speakup/varhandlers.c +++ b/drivers/accessibility/speakup/varhandlers.c @@ -48,6 +48,7 @@ static struct st_var_header var_headers[] = { { "chartab", CHARTAB, VAR_PROC, NULL, NULL }, { "direct", DIRECT, VAR_NUM, NULL, NULL }, { "pause", PAUSE, VAR_STRING, spk_str_pause, NULL }, + { "cur_phonetic", CUR_PHONETIC, VAR_NUM, &spk_cur_phonetic, NULL }, }; static struct st_var_header *var_ptrs[MAXVARS] = { NULL, NULL, NULL }; diff --git a/drivers/acpi/acpica/dswexec.c b/drivers/acpi/acpica/dswexec.c index e8ad41387f84c19452a306b9bba2c1e147d273dd..b082eb942a0f86f3ac37c667016f0e3e65ca83d2 100644 --- a/drivers/acpi/acpica/dswexec.c +++ b/drivers/acpi/acpica/dswexec.c @@ -389,9 +389,11 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state) /* * All opcodes require operand resolution, with the only exceptions - * being the object_type and size_of operators. + * being the object_type and size_of operators as well as opcodes that + * take no arguments. */ - if (!(walk_state->op_info->flags & AML_NO_OPERAND_RESOLVE)) { + if (!(walk_state->op_info->flags & AML_NO_OPERAND_RESOLVE) && + (walk_state->op_info->flags & AML_HAS_ARGS)) { /* Resolve all operands */ diff --git a/drivers/acpi/acpica/evxfregn.c b/drivers/acpi/acpica/evxfregn.c index 0a8372bf6a77d199ae1b5955b5c2e52f19065527..a5c19f46ec17dfe87775bc6562072778a1380c9f 100644 --- a/drivers/acpi/acpica/evxfregn.c +++ b/drivers/acpi/acpica/evxfregn.c @@ -20,13 +20,14 @@ ACPI_MODULE_NAME("evxfregn") /******************************************************************************* * - * FUNCTION: acpi_install_address_space_handler + * FUNCTION: acpi_install_address_space_handler_internal * * PARAMETERS: device - Handle for the device * space_id - The address space ID * handler - Address of the handler * setup - Address of the setup function * context - Value passed to the handler on each access + * Run_reg - Run _REG methods for this address space? * * RETURN: Status * @@ -37,13 +38,16 @@ ACPI_MODULE_NAME("evxfregn") * are executed here, and these methods can only be safely executed after * the default handlers have been installed and the hardware has been * initialized (via acpi_enable_subsystem.) + * To avoid this problem pass FALSE for Run_Reg and later on call + * acpi_execute_reg_methods() to execute _REG. * ******************************************************************************/ -acpi_status -acpi_install_address_space_handler(acpi_handle device, - acpi_adr_space_type space_id, - acpi_adr_space_handler handler, - acpi_adr_space_setup setup, void *context) +static acpi_status +acpi_install_address_space_handler_internal(acpi_handle device, + acpi_adr_space_type space_id, + acpi_adr_space_handler handler, + acpi_adr_space_setup setup, + void *context, u8 run_reg) { struct acpi_namespace_node *node; acpi_status status; @@ -80,14 +84,40 @@ acpi_install_address_space_handler(acpi_handle device, /* Run all _REG methods for this address space */ - acpi_ev_execute_reg_methods(node, space_id, ACPI_REG_CONNECT); + if (run_reg) { + acpi_ev_execute_reg_methods(node, space_id, ACPI_REG_CONNECT); + } unlock_and_exit: (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); return_ACPI_STATUS(status); } +acpi_status +acpi_install_address_space_handler(acpi_handle device, + acpi_adr_space_type space_id, + acpi_adr_space_handler handler, + acpi_adr_space_setup setup, void *context) +{ + return acpi_install_address_space_handler_internal(device, space_id, + handler, setup, + context, TRUE); +} + ACPI_EXPORT_SYMBOL(acpi_install_address_space_handler) +acpi_status +acpi_install_address_space_handler_no_reg(acpi_handle device, + acpi_adr_space_type space_id, + acpi_adr_space_handler handler, + acpi_adr_space_setup setup, + void *context) +{ + return acpi_install_address_space_handler_internal(device, space_id, + handler, setup, + context, FALSE); +} + +ACPI_EXPORT_SYMBOL(acpi_install_address_space_handler_no_reg) /******************************************************************************* * @@ -228,3 +258,51 @@ unlock_and_exit: } ACPI_EXPORT_SYMBOL(acpi_remove_address_space_handler) +/******************************************************************************* + * + * FUNCTION: acpi_execute_reg_methods + * + * PARAMETERS: device - Handle for the device + * space_id - The address space ID + * + * RETURN: Status + * + * DESCRIPTION: Execute _REG for all op_regions of a given space_id. + * + ******************************************************************************/ +acpi_status +acpi_execute_reg_methods(acpi_handle device, acpi_adr_space_type space_id) +{ + struct acpi_namespace_node *node; + acpi_status status; + + ACPI_FUNCTION_TRACE(acpi_execute_reg_methods); + + /* Parameter validation */ + + if (!device) { + return_ACPI_STATUS(AE_BAD_PARAMETER); + } + + status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); + if (ACPI_FAILURE(status)) { + return_ACPI_STATUS(status); + } + + /* Convert and validate the device handle */ + + node = acpi_ns_validate_handle(device); + if (node) { + + /* Run all _REG methods for this address space */ + + acpi_ev_execute_reg_methods(node, space_id, ACPI_REG_CONNECT); + } else { + status = AE_BAD_PARAMETER; + } + + (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); + return_ACPI_STATUS(status); +} + +ACPI_EXPORT_SYMBOL(acpi_execute_reg_methods) diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index 2520fb998ce60ae30e7092dbde9121a019212a1e..105d2e795afadd16afb1cdc46bd3f9150faf93bc 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c @@ -94,6 +94,7 @@ enum { EC_FLAGS_QUERY_ENABLED, /* Query is enabled */ EC_FLAGS_EVENT_HANDLER_INSTALLED, /* Event handler installed */ EC_FLAGS_EC_HANDLER_INSTALLED, /* OpReg handler installed */ + EC_FLAGS_EC_REG_CALLED, /* OpReg ACPI _REG method called */ EC_FLAGS_QUERY_METHODS_INSTALLED, /* _Qxx handlers installed */ EC_FLAGS_STARTED, /* Driver is started */ EC_FLAGS_STOPPED, /* Driver is stopped */ @@ -1446,6 +1447,7 @@ static bool install_gpio_irq_event_handler(struct acpi_ec *ec) * ec_install_handlers - Install service callbacks and register query methods. * @ec: Target EC. * @device: ACPI device object corresponding to @ec. + * @call_reg: If _REG should be called to notify OpRegion availability * * Install a handler for the EC address space type unless it has been installed * already. If @device is not NULL, also look for EC query methods in the @@ -1458,7 +1460,8 @@ static bool install_gpio_irq_event_handler(struct acpi_ec *ec) * -EPROBE_DEFER if GPIO IRQ acquisition needs to be deferred, * or 0 (success) otherwise. */ -static int ec_install_handlers(struct acpi_ec *ec, struct acpi_device *device) +static int ec_install_handlers(struct acpi_ec *ec, struct acpi_device *device, + bool call_reg) { acpi_status status; @@ -1466,15 +1469,21 @@ static int ec_install_handlers(struct acpi_ec *ec, struct acpi_device *device) if (!test_bit(EC_FLAGS_EC_HANDLER_INSTALLED, &ec->flags)) { acpi_ec_enter_noirq(ec); - status = acpi_install_address_space_handler(ec->handle, - ACPI_ADR_SPACE_EC, - &acpi_ec_space_handler, - NULL, ec); + status = acpi_install_address_space_handler_no_reg(ec->handle, + ACPI_ADR_SPACE_EC, + &acpi_ec_space_handler, + NULL, ec); if (ACPI_FAILURE(status)) { acpi_ec_stop(ec, false); return -ENODEV; } set_bit(EC_FLAGS_EC_HANDLER_INSTALLED, &ec->flags); + ec->address_space_handler_holder = ec->handle; + } + + if (call_reg && !test_bit(EC_FLAGS_EC_REG_CALLED, &ec->flags)) { + acpi_execute_reg_methods(ec->handle, ACPI_ADR_SPACE_EC); + set_bit(EC_FLAGS_EC_REG_CALLED, &ec->flags); } if (!device) @@ -1526,7 +1535,8 @@ static int ec_install_handlers(struct acpi_ec *ec, struct acpi_device *device) static void ec_remove_handlers(struct acpi_ec *ec) { if (test_bit(EC_FLAGS_EC_HANDLER_INSTALLED, &ec->flags)) { - if (ACPI_FAILURE(acpi_remove_address_space_handler(ec->handle, + if (ACPI_FAILURE(acpi_remove_address_space_handler( + ec->address_space_handler_holder, ACPI_ADR_SPACE_EC, &acpi_ec_space_handler))) pr_err("failed to remove space handler\n"); clear_bit(EC_FLAGS_EC_HANDLER_INSTALLED, &ec->flags); @@ -1562,11 +1572,11 @@ static void ec_remove_handlers(struct acpi_ec *ec) } } -static int acpi_ec_setup(struct acpi_ec *ec, struct acpi_device *device) +static int acpi_ec_setup(struct acpi_ec *ec, struct acpi_device *device, bool call_reg) { int ret; - ret = ec_install_handlers(ec, device); + ret = ec_install_handlers(ec, device, call_reg); if (ret) return ret; @@ -1631,7 +1641,7 @@ static int acpi_ec_add(struct acpi_device *device) } } - ret = acpi_ec_setup(ec, device); + ret = acpi_ec_setup(ec, device, true); if (ret) goto err; @@ -1750,7 +1760,7 @@ void __init acpi_ec_dsdt_probe(void) * At this point, the GPE is not fully initialized, so do not to * handle the events. */ - ret = acpi_ec_setup(ec, NULL); + ret = acpi_ec_setup(ec, NULL, true); if (ret) { acpi_ec_free(ec); return; @@ -1944,7 +1954,7 @@ void __init acpi_ec_ecdt_probe(void) * At this point, the namespace is not initialized, so do not find * the namespace objects, or handle the events. */ - ret = acpi_ec_setup(ec, NULL); + ret = acpi_ec_setup(ec, NULL, false); if (ret) { acpi_ec_free(ec); goto out; diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h index 219c02df9a08c720dcca32559f47d3770b0507c2..ec584442fb298861d75b51adafe9732f5ec09290 100644 --- a/drivers/acpi/internal.h +++ b/drivers/acpi/internal.h @@ -173,6 +173,7 @@ enum acpi_ec_event_state { struct acpi_ec { acpi_handle handle; + acpi_handle address_space_handler_holder; int gpe; int irq; unsigned long command_addr; diff --git a/drivers/acpi/numa/hmat.c b/drivers/acpi/numa/hmat.c index 6cceca64a6bcfdd723d11f741983f82c0aa2f374..605a0c7053bea27eb01bc7d91cdf324752bc4deb 100644 --- a/drivers/acpi/numa/hmat.c +++ b/drivers/acpi/numa/hmat.c @@ -780,11 +780,6 @@ static int hmat_callback(struct notifier_block *self, return NOTIFY_OK; } -static struct notifier_block hmat_callback_nb = { - .notifier_call = hmat_callback, - .priority = 2, -}; - static __init void hmat_free_structures(void) { struct memory_target *target, *tnext; @@ -867,7 +862,7 @@ static __init int hmat_init(void) hmat_register_targets(); /* Keep the table and structures if the notifier may use them */ - if (!register_hotmemory_notifier(&hmat_callback_nb)) + if (!hotplug_memory_notifier(hmat_callback, HMAT_CALLBACK_PRI)) return 0; out_put: hmat_free_structures(); diff --git a/drivers/base/base.h b/drivers/base/base.h index b902d1ecc247f142d33150ad129e27c0565fa316..7d4803c03d3e61ea950e3b896ab18308c4f32fa4 100644 --- a/drivers/base/base.h +++ b/drivers/base/base.h @@ -146,7 +146,6 @@ static inline int driver_match_device(struct device_driver *drv, { return drv->bus->match ? drv->bus->match(dev, drv) : 1; } -extern bool driver_allows_async_probing(struct device_driver *drv); extern int driver_add_groups(struct device_driver *drv, const struct attribute_group **groups); diff --git a/drivers/base/bus.c b/drivers/base/bus.c index 7ca47e5b3c1f4c4200bac096f72fd170d113400c..4ec6dbab73be5ceaa603ea138ca2a13184bb170f 100644 --- a/drivers/base/bus.c +++ b/drivers/base/bus.c @@ -163,7 +163,7 @@ static struct kobj_type bus_ktype = { .release = bus_release, }; -static int bus_uevent_filter(struct kobject *kobj) +static int bus_uevent_filter(const struct kobject *kobj) { const struct kobj_type *ktype = get_ktype(kobj); diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c index 4b5cd08c5a657f7341d13dd652a2689230942632..950b22cdb5f7cd81eaff4a3a4deec716c0dc9fee 100644 --- a/drivers/base/cacheinfo.c +++ b/drivers/base/cacheinfo.c @@ -196,7 +196,7 @@ static void cache_of_set_props(struct cacheinfo *this_leaf, static int cache_setup_of_node(unsigned int cpu) { - struct device_node *np; + struct device_node *np, *prev; struct cacheinfo *this_leaf; unsigned int index = 0; @@ -206,19 +206,24 @@ static int cache_setup_of_node(unsigned int cpu) return -ENOENT; } + prev = np; + while (index < cache_leaves(cpu)) { this_leaf = per_cpu_cacheinfo_idx(cpu, index); - if (this_leaf->level != 1) + if (this_leaf->level != 1) { np = of_find_next_cache_node(np); - else - np = of_node_get(np);/* cpu node itself */ - if (!np) - break; + of_node_put(prev); + prev = np; + if (!np) + break; + } cache_of_set_props(this_leaf, np); this_leaf->fw_token = np; index++; } + of_node_put(np); + if (index != cache_leaves(cpu)) /* not all OF nodes populated */ return -ENOENT; @@ -312,8 +317,6 @@ static void cache_shared_cpu_map_remove(unsigned int cpu) cpumask_clear_cpu(cpu, &sib_leaf->shared_cpu_map); cpumask_clear_cpu(sibling, &this_leaf->shared_cpu_map); } - if (of_have_populated_dt()) - of_node_put(this_leaf->fw_token); } } diff --git a/drivers/base/class.c b/drivers/base/class.c index 64f7b9a0970f7d5d89559cbb4872079021a14fea..86ec554cfe6041982a22d0ca298892c77278886b 100644 --- a/drivers/base/class.c +++ b/drivers/base/class.c @@ -62,7 +62,7 @@ static void class_release(struct kobject *kobj) kfree(cp); } -static const struct kobj_ns_type_operations *class_child_ns_type(struct kobject *kobj) +static const struct kobj_ns_type_operations *class_child_ns_type(const struct kobject *kobj) { struct subsys_private *cp = to_subsys_private(kobj); struct class *class = cp->class; @@ -192,6 +192,11 @@ int __class_register(struct class *cls, struct lock_class_key *key) } error = class_add_groups(class_get(cls), cls->class_groups); class_put(cls); + if (error) { + kobject_del(&cp->subsys.kobj); + kfree_const(cp->subsys.kobj.name); + kfree(cp); + } return error; } EXPORT_SYMBOL_GPL(__class_register); diff --git a/drivers/base/core.c b/drivers/base/core.c index d02501933467d443296e2dac60225ff8d3d2fbb6..a3e14143ec0cfd4beed114627361affb7da18650 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include #include @@ -1628,7 +1629,7 @@ early_param("fw_devlink", fw_devlink_setup); static bool fw_devlink_strict; static int __init fw_devlink_strict_setup(char *arg) { - return strtobool(arg, &fw_devlink_strict); + return kstrtobool(arg, &fw_devlink_strict); } early_param("fw_devlink.strict", fw_devlink_strict_setup); @@ -2280,7 +2281,7 @@ ssize_t device_store_bool(struct device *dev, struct device_attribute *attr, { struct dev_ext_attribute *ea = to_ext_attr(attr); - if (strtobool(buf, ea->var) < 0) + if (kstrtobool(buf, ea->var) < 0) return -EINVAL; return size; @@ -2334,9 +2335,9 @@ static void device_release(struct kobject *kobj) kfree(p); } -static const void *device_namespace(struct kobject *kobj) +static const void *device_namespace(const struct kobject *kobj) { - struct device *dev = kobj_to_dev(kobj); + const struct device *dev = kobj_to_dev(kobj); const void *ns = NULL; if (dev->class && dev->class->ns_type) @@ -2345,9 +2346,9 @@ static const void *device_namespace(struct kobject *kobj) return ns; } -static void device_get_ownership(struct kobject *kobj, kuid_t *uid, kgid_t *gid) +static void device_get_ownership(const struct kobject *kobj, kuid_t *uid, kgid_t *gid) { - struct device *dev = kobj_to_dev(kobj); + const struct device *dev = kobj_to_dev(kobj); if (dev->class && dev->class->get_ownership) dev->class->get_ownership(dev, uid, gid); @@ -2361,12 +2362,12 @@ static struct kobj_type device_ktype = { }; -static int dev_uevent_filter(struct kobject *kobj) +static int dev_uevent_filter(const struct kobject *kobj) { const struct kobj_type *ktype = get_ktype(kobj); if (ktype == &device_ktype) { - struct device *dev = kobj_to_dev(kobj); + const struct device *dev = kobj_to_dev(kobj); if (dev->bus) return 1; if (dev->class) @@ -2375,9 +2376,9 @@ static int dev_uevent_filter(struct kobject *kobj) return 0; } -static const char *dev_uevent_name(struct kobject *kobj) +static const char *dev_uevent_name(const struct kobject *kobj) { - struct device *dev = kobj_to_dev(kobj); + const struct device *dev = kobj_to_dev(kobj); if (dev->bus) return dev->bus->name; @@ -2534,7 +2535,7 @@ static ssize_t online_store(struct device *dev, struct device_attribute *attr, bool val; int ret; - ret = strtobool(buf, &val); + ret = kstrtobool(buf, &val); if (ret < 0) return ret; @@ -2585,11 +2586,6 @@ union device_attr_group_devres { const struct attribute_group **groups; }; -static int devm_attr_group_match(struct device *dev, void *res, void *data) -{ - return ((union device_attr_group_devres *)res)->group == data; -} - static void devm_attr_group_remove(struct device *dev, void *res) { union device_attr_group_devres *devres = res; @@ -2640,23 +2636,6 @@ int devm_device_add_group(struct device *dev, const struct attribute_group *grp) } EXPORT_SYMBOL_GPL(devm_device_add_group); -/** - * devm_device_remove_group: remove a managed group from a device - * @dev: device to remove the group from - * @grp: group to remove - * - * This function removes a group of attributes from a device. The attributes - * previously have to have been created for this group, otherwise it will fail. - */ -void devm_device_remove_group(struct device *dev, - const struct attribute_group *grp) -{ - WARN_ON(devres_release(dev, devm_attr_group_remove, - devm_attr_group_match, - /* cast away const */ (void *)grp)); -} -EXPORT_SYMBOL_GPL(devm_device_remove_group); - /** * devm_device_add_groups - create a bunch of managed attribute groups * @dev: The device to create the group for @@ -2693,23 +2672,6 @@ int devm_device_add_groups(struct device *dev, } EXPORT_SYMBOL_GPL(devm_device_add_groups); -/** - * devm_device_remove_groups - remove a list of managed groups - * - * @dev: The device for the groups to be removed from - * @groups: NULL terminated list of groups to be removed - * - * If groups is not NULL, remove the specified groups from the device. - */ -void devm_device_remove_groups(struct device *dev, - const struct attribute_group **groups) -{ - WARN_ON(devres_release(dev, devm_attr_groups_remove, - devm_attr_group_match, - /* cast away const */ (void *)groups)); -} -EXPORT_SYMBOL_GPL(devm_device_remove_groups); - static int device_add_attrs(struct device *dev) { struct class *class = dev->class; @@ -3024,9 +2986,9 @@ static void class_dir_release(struct kobject *kobj) } static const -struct kobj_ns_type_operations *class_dir_child_ns_type(struct kobject *kobj) +struct kobj_ns_type_operations *class_dir_child_ns_type(const struct kobject *kobj) { - struct class_dir *dir = to_class_dir(kobj); + const struct class_dir *dir = to_class_dir(kobj); return dir->class->ns_type; } diff --git a/drivers/base/dd.c b/drivers/base/dd.c index 3dda62503102f9ee77637d396031c5b133482b6f..e9b2f9c25efe48c92cd2986e6538d48a2db79dfa 100644 --- a/drivers/base/dd.c +++ b/drivers/base/dd.c @@ -843,7 +843,7 @@ static int __init save_async_options(char *buf) } __setup("driver_async_probe=", save_async_options); -bool driver_allows_async_probing(struct device_driver *drv) +static bool driver_allows_async_probing(struct device_driver *drv) { switch (drv->probe_type) { case PROBE_PREFER_ASYNCHRONOUS: @@ -1162,7 +1162,11 @@ static int __driver_attach(struct device *dev, void *data) return 0; } else if (ret < 0) { dev_dbg(dev, "Bus failed to match device: %d\n", ret); - return ret; + /* + * Driver could not match with device, but may match with + * another device on the bus. + */ + return 0; } /* ret > 0 means positive match */ if (driver_allows_async_probing(drv)) { diff --git a/drivers/base/devres.c b/drivers/base/devres.c index 4ab2b50ee38f49a016f9581d385eba41b925d729..c0e100074aa395425cc6191020174518bf9e4578 100644 --- a/drivers/base/devres.c +++ b/drivers/base/devres.c @@ -101,6 +101,9 @@ static bool check_dr_size(size_t size, size_t *tot_size) size, tot_size))) return false; + /* Actually allocate the full kmalloc bucket size. */ + *tot_size = kmalloc_size_roundup(*tot_size); + return true; } diff --git a/drivers/base/firmware_loader/firmware.h b/drivers/base/firmware_loader/firmware.h index fe77e91c38a21424e105dc64c8c12b8e2b4d9891..bf549d6500d7bc8e177ae90aae8d149983b22b4e 100644 --- a/drivers/base/firmware_loader/firmware.h +++ b/drivers/base/firmware_loader/firmware.h @@ -9,8 +9,6 @@ #include #include -#include - /** * enum fw_opt - options to control firmware loading behaviour * diff --git a/drivers/base/firmware_loader/main.c b/drivers/base/firmware_loader/main.c index 7c3590fd97c28d8977f2be0c6789264ff619c07a..017c4cdb219eb115e0bc5ce93269c24ff2ff4a70 100644 --- a/drivers/base/firmware_loader/main.c +++ b/drivers/base/firmware_loader/main.c @@ -821,7 +821,7 @@ _request_firmware(const struct firmware **firmware_p, const char *name, * called by a driver when serving an unrelated request from userland, we use * the kernel credentials to read the file. */ - kern_cred = prepare_kernel_cred(NULL); + kern_cred = prepare_kernel_cred(&init_task); if (!kern_cred) { ret = -ENOMEM; goto out; diff --git a/drivers/base/firmware_loader/sysfs.c b/drivers/base/firmware_loader/sysfs.c index 5b66b3d1fa16b8126946d67daeaf6c711e7440d9..56911d75b90a1f8355b8f6ceadf2ed14d32aa00f 100644 --- a/drivers/base/firmware_loader/sysfs.c +++ b/drivers/base/firmware_loader/sysfs.c @@ -64,7 +64,7 @@ static struct attribute *firmware_class_attrs[] = { }; ATTRIBUTE_GROUPS(firmware_class); -static int do_firmware_uevent(struct fw_sysfs *fw_sysfs, struct kobj_uevent_env *env) +static int do_firmware_uevent(const struct fw_sysfs *fw_sysfs, struct kobj_uevent_env *env) { if (add_uevent_var(env, "FIRMWARE=%s", fw_sysfs->fw_priv->fw_name)) return -ENOMEM; @@ -76,9 +76,9 @@ static int do_firmware_uevent(struct fw_sysfs *fw_sysfs, struct kobj_uevent_env return 0; } -static int firmware_uevent(struct device *dev, struct kobj_uevent_env *env) +static int firmware_uevent(const struct device *dev, struct kobj_uevent_env *env) { - struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev); + const struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev); int err = 0; mutex_lock(&fw_lock); diff --git a/drivers/base/firmware_loader/sysfs.h b/drivers/base/firmware_loader/sysfs.h index df1d5add698f1d93e69f7f3a05d15745d6ec0a3c..2060add8ef81b7e27eeb3a27b0b96d2e46d2f93b 100644 --- a/drivers/base/firmware_loader/sysfs.h +++ b/drivers/base/firmware_loader/sysfs.h @@ -80,11 +80,7 @@ struct fw_sysfs { struct firmware *fw; void *fw_upload_priv; }; - -static inline struct fw_sysfs *to_fw_sysfs(struct device *dev) -{ - return container_of(dev, struct fw_sysfs, dev); -} +#define to_fw_sysfs(__dev) container_of_const(__dev, struct fw_sysfs, dev) void __fw_load_abort(struct fw_priv *fw_priv); diff --git a/drivers/base/memory.c b/drivers/base/memory.c index 9aa0da991cfb910955ece83be4be1970eced2749..fe98fb8d94e51c7749f3fa75f0f014ca804adfee 100644 --- a/drivers/base/memory.c +++ b/drivers/base/memory.c @@ -175,6 +175,15 @@ int memory_notify(unsigned long val, void *v) return blocking_notifier_call_chain(&memory_chain, val, v); } +#if defined(CONFIG_MEMORY_FAILURE) && defined(CONFIG_MEMORY_HOTPLUG) +static unsigned long memblk_nr_poison(struct memory_block *mem); +#else +static inline unsigned long memblk_nr_poison(struct memory_block *mem) +{ + return 0; +} +#endif + static int memory_block_online(struct memory_block *mem) { unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr); @@ -183,6 +192,9 @@ static int memory_block_online(struct memory_block *mem) struct zone *zone; int ret; + if (memblk_nr_poison(mem)) + return -EHWPOISON; + zone = zone_for_pfn_range(mem->online_type, mem->nid, mem->group, start_pfn, nr_pages); @@ -864,6 +876,7 @@ void remove_memory_block_devices(unsigned long start, unsigned long size) mem = find_memory_block_by_id(block_id); if (WARN_ON_ONCE(!mem)) continue; + num_poisoned_pages_sub(-1UL, memblk_nr_poison(mem)); unregister_memory_block_under_nodes(mem); remove_memory_block(mem); } @@ -1164,3 +1177,28 @@ int walk_dynamic_memory_groups(int nid, walk_memory_groups_func_t func, } return ret; } + +#if defined(CONFIG_MEMORY_FAILURE) && defined(CONFIG_MEMORY_HOTPLUG) +void memblk_nr_poison_inc(unsigned long pfn) +{ + const unsigned long block_id = pfn_to_block_id(pfn); + struct memory_block *mem = find_memory_block_by_id(block_id); + + if (mem) + atomic_long_inc(&mem->nr_hwpoison); +} + +void memblk_nr_poison_sub(unsigned long pfn, long i) +{ + const unsigned long block_id = pfn_to_block_id(pfn); + struct memory_block *mem = find_memory_block_by_id(block_id); + + if (mem) + atomic_long_sub(i, &mem->nr_hwpoison); +} + +static unsigned long memblk_nr_poison(struct memory_block *mem) +{ + return atomic_long_read(&mem->nr_hwpoison); +} +#endif diff --git a/drivers/base/platform.c b/drivers/base/platform.c index 51bb2289865c7222c0a2d1d6f4e8ad02225f9105..968f3d71eeab2e9fe5cdf552908c79bd4cdad580 100644 --- a/drivers/base/platform.c +++ b/drivers/base/platform.c @@ -441,8 +441,8 @@ static int __platform_get_irq_byname(struct platform_device *dev, struct resource *r; int ret; - if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node) { - ret = of_irq_get_byname(dev->dev.of_node, name); + if (!dev->dev.of_node || IS_ENABLED(CONFIG_OF_IRQ)) { + ret = fwnode_irq_get_byname(dev_fwnode(&dev->dev), name); if (ret > 0 || ret == -EPROBE_DEFER) return ret; } diff --git a/drivers/base/property.c b/drivers/base/property.c index 2a5a37fcd99871bafb43684157dda53da35dd023..bbb3e499ff4a5db3f7238959668f8642429b6487 100644 --- a/drivers/base/property.c +++ b/drivers/base/property.c @@ -17,12 +17,19 @@ #include #include -struct fwnode_handle *dev_fwnode(const struct device *dev) +struct fwnode_handle *__dev_fwnode(struct device *dev) { return IS_ENABLED(CONFIG_OF) && dev->of_node ? of_fwnode_handle(dev->of_node) : dev->fwnode; } -EXPORT_SYMBOL_GPL(dev_fwnode); +EXPORT_SYMBOL_GPL(__dev_fwnode); + +const struct fwnode_handle *__dev_fwnode_const(const struct device *dev) +{ + return IS_ENABLED(CONFIG_OF) && dev->of_node ? + of_fwnode_handle(dev->of_node) : dev->fwnode; +} +EXPORT_SYMBOL_GPL(__dev_fwnode_const); /** * device_property_present - check if a property of a device is present @@ -475,12 +482,13 @@ int fwnode_property_match_string(const struct fwnode_handle *fwnode, ret = fwnode_property_read_string_array(fwnode, propname, values, nval); if (ret < 0) - goto out; + goto out_free; ret = match_string(values, nval, string); if (ret < 0) ret = -ENODATA; -out: + +out_free: kfree(values); return ret; } @@ -601,7 +609,7 @@ EXPORT_SYMBOL_GPL(fwnode_get_parent); * node's parents. * * Returns a node pointer with refcount incremented, use - * fwnode_handle_node() on it when done. + * fwnode_handle_put() on it when done. */ struct fwnode_handle *fwnode_get_next_parent(struct fwnode_handle *fwnode) { @@ -756,7 +764,7 @@ EXPORT_SYMBOL_GPL(fwnode_get_next_available_child_node); * @dev: Device to find the next child node for. * @child: Handle to one of the device's child nodes or a null handle. */ -struct fwnode_handle *device_get_next_child_node(struct device *dev, +struct fwnode_handle *device_get_next_child_node(const struct device *dev, struct fwnode_handle *child) { const struct fwnode_handle *fwnode = dev_fwnode(dev); @@ -793,7 +801,7 @@ EXPORT_SYMBOL_GPL(fwnode_get_named_child_node); * @dev: Device to find the named child node for. * @childname: String to match child node name against. */ -struct fwnode_handle *device_get_named_child_node(struct device *dev, +struct fwnode_handle *device_get_named_child_node(const struct device *dev, const char *childname) { return fwnode_get_named_child_node(dev_fwnode(dev), childname); @@ -852,7 +860,7 @@ EXPORT_SYMBOL_GPL(fwnode_device_is_available); * device_get_child_node_count - return the number of child nodes for device * @dev: Device to cound the child nodes for */ -unsigned int device_get_child_node_count(struct device *dev) +unsigned int device_get_child_node_count(const struct device *dev) { struct fwnode_handle *child; unsigned int count = 0; @@ -864,13 +872,13 @@ unsigned int device_get_child_node_count(struct device *dev) } EXPORT_SYMBOL_GPL(device_get_child_node_count); -bool device_dma_supported(struct device *dev) +bool device_dma_supported(const struct device *dev) { return fwnode_call_bool_op(dev_fwnode(dev), device_dma_supported); } EXPORT_SYMBOL_GPL(device_dma_supported); -enum dev_dma_attr device_get_dma_attr(struct device *dev) +enum dev_dma_attr device_get_dma_attr(const struct device *dev) { if (!fwnode_has_op(dev_fwnode(dev), device_get_dma_attr)) return DEV_DMA_NOT_SUPPORTED; @@ -1206,7 +1214,7 @@ const void *device_get_match_data(const struct device *dev) } EXPORT_SYMBOL_GPL(device_get_match_data); -static unsigned int fwnode_graph_devcon_matches(struct fwnode_handle *fwnode, +static unsigned int fwnode_graph_devcon_matches(const struct fwnode_handle *fwnode, const char *con_id, void *data, devcon_match_fn_t match, void **matches, @@ -1240,7 +1248,7 @@ static unsigned int fwnode_graph_devcon_matches(struct fwnode_handle *fwnode, return count; } -static unsigned int fwnode_devcon_matches(struct fwnode_handle *fwnode, +static unsigned int fwnode_devcon_matches(const struct fwnode_handle *fwnode, const char *con_id, void *data, devcon_match_fn_t match, void **matches, @@ -1282,7 +1290,7 @@ static unsigned int fwnode_devcon_matches(struct fwnode_handle *fwnode, * device node. @match will be used to convert the connection description to * data the caller is expecting to be returned. */ -void *fwnode_connection_find_match(struct fwnode_handle *fwnode, +void *fwnode_connection_find_match(const struct fwnode_handle *fwnode, const char *con_id, void *data, devcon_match_fn_t match) { @@ -1319,7 +1327,7 @@ EXPORT_SYMBOL_GPL(fwnode_connection_find_match); * * Return: Number of matches resolved, or negative errno. */ -int fwnode_connection_find_matches(struct fwnode_handle *fwnode, +int fwnode_connection_find_matches(const struct fwnode_handle *fwnode, const char *con_id, void *data, devcon_match_fn_t match, void **matches, unsigned int matches_len) diff --git a/drivers/base/test/Kconfig b/drivers/base/test/Kconfig index 2f3fa31a948e2e86f6e9753a304443110a29fcc6..610a1ba7a467e8236ba4fb924f7004172385e15c 100644 --- a/drivers/base/test/Kconfig +++ b/drivers/base/test/Kconfig @@ -8,6 +8,7 @@ config TEST_ASYNC_DRIVER_PROBE The module name will be test_async_driver_probe.ko If unsure say N. + config DRIVER_PE_KUNIT_TEST bool "KUnit Tests for property entry API" if !KUNIT_ALL_TESTS depends on KUNIT=y diff --git a/drivers/block/aoe/aoechr.c b/drivers/block/aoe/aoechr.c index 8eea2529da20a7ec76efade7937ad91b6e0aee7c..7a368c90467d1cd038c896640e82dada88007956 100644 --- a/drivers/block/aoe/aoechr.c +++ b/drivers/block/aoe/aoechr.c @@ -273,7 +273,7 @@ static const struct file_operations aoe_fops = { .llseek = noop_llseek, }; -static char *aoe_devnode(struct device *dev, umode_t *mode) +static char *aoe_devnode(const struct device *dev, umode_t *mode) { return kasprintf(GFP_KERNEL, "etherd/%s", dev_name(dev)); } diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c index 0e58a3187345e73b6a386d1c2888f6c441f0ac3e..757f4692b5bd8034909b32334b27f5e952bb935e 100644 --- a/drivers/block/drbd/drbd_receiver.c +++ b/drivers/block/drbd/drbd_receiver.c @@ -1030,6 +1030,9 @@ randomize: sock.socket->sk->sk_allocation = GFP_NOIO; msock.socket->sk->sk_allocation = GFP_NOIO; + sock.socket->sk->sk_use_task_frag = false; + msock.socket->sk->sk_use_task_frag = false; + sock.socket->sk->sk_priority = TC_PRIO_INTERACTIVE_BULK; msock.socket->sk->sk_priority = TC_PRIO_INTERACTIVE; diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index e379ccc63c520788c93c9adfab077fb41dda8701..592cfa8b765a578bd69a7c5cf33f49964d565e5f 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -512,6 +512,7 @@ static int sock_xmit(struct nbd_device *nbd, int index, int send, noreclaim_flag = memalloc_noreclaim_save(); do { sock->sk->sk_allocation = GFP_NOIO | __GFP_MEMALLOC; + sock->sk->sk_use_task_frag = false; msg.msg_name = NULL; msg.msg_namelen = 0; msg.msg_control = NULL; diff --git a/drivers/block/zram/Kconfig b/drivers/block/zram/Kconfig index d4100b0c083ec44e8235cd58787e6b00ebdc0599..0386b7da02aa3ba46d187358d5fe3a0302b97a8d 100644 --- a/drivers/block/zram/Kconfig +++ b/drivers/block/zram/Kconfig @@ -78,3 +78,12 @@ config ZRAM_MEMORY_TRACKING /sys/kernel/debug/zram/zramX/block_state. See Documentation/admin-guide/blockdev/zram.rst for more information. + +config ZRAM_MULTI_COMP + bool "Enable multiple compression streams" + depends on ZRAM + help + This will enable multi-compression streams, so that ZRAM can + re-compress pages using a potentially slower but more effective + compression algorithm. Note, that IDLE page recompression + requires ZRAM_MEMORY_TRACKING. diff --git a/drivers/block/zram/zcomp.c b/drivers/block/zram/zcomp.c index 0916de952e091e2e38116bb548d84b127fcc0569..55af4efd79835666a857884a992ba37c2d7d3a08 100644 --- a/drivers/block/zram/zcomp.c +++ b/drivers/block/zram/zcomp.c @@ -206,7 +206,7 @@ void zcomp_destroy(struct zcomp *comp) * case of allocation error, or any other error potentially * returned by zcomp_init(). */ -struct zcomp *zcomp_create(const char *compress) +struct zcomp *zcomp_create(const char *alg) { struct zcomp *comp; int error; @@ -216,14 +216,14 @@ struct zcomp *zcomp_create(const char *compress) * is not loaded yet. We must do it here, otherwise we are about to * call /sbin/modprobe under CPU hot-plug lock. */ - if (!zcomp_available_algorithm(compress)) + if (!zcomp_available_algorithm(alg)) return ERR_PTR(-EINVAL); comp = kzalloc(sizeof(struct zcomp), GFP_KERNEL); if (!comp) return ERR_PTR(-ENOMEM); - comp->name = compress; + comp->name = alg; error = zcomp_init(comp); if (error) { kfree(comp); diff --git a/drivers/block/zram/zcomp.h b/drivers/block/zram/zcomp.h index 40f6420f4b2e92d6129e4ba37c9daa5b3e229810..cdefdef93da8c00d3086e10c0980f7c5462694e8 100644 --- a/drivers/block/zram/zcomp.h +++ b/drivers/block/zram/zcomp.h @@ -27,7 +27,7 @@ int zcomp_cpu_dead(unsigned int cpu, struct hlist_node *node); ssize_t zcomp_available_show(const char *comp, char *buf); bool zcomp_available_algorithm(const char *comp); -struct zcomp *zcomp_create(const char *comp); +struct zcomp *zcomp_create(const char *alg); void zcomp_destroy(struct zcomp *comp); struct zcomp_strm *zcomp_stream_get(struct zcomp *comp); diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index 966aab902d19a6c1a4720fc78138761e267b23af..e290d6d970474eeba24d913a168242990a10c5f7 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -155,6 +155,25 @@ static inline bool is_partial_io(struct bio_vec *bvec) } #endif +static inline void zram_set_priority(struct zram *zram, u32 index, u32 prio) +{ + prio &= ZRAM_COMP_PRIORITY_MASK; + /* + * Clear previous priority value first, in case if we recompress + * further an already recompressed page + */ + zram->table[index].flags &= ~(ZRAM_COMP_PRIORITY_MASK << + ZRAM_COMP_PRIORITY_BIT1); + zram->table[index].flags |= (prio << ZRAM_COMP_PRIORITY_BIT1); +} + +static inline u32 zram_get_priority(struct zram *zram, u32 index) +{ + u32 prio = zram->table[index].flags >> ZRAM_COMP_PRIORITY_BIT1; + + return prio & ZRAM_COMP_PRIORITY_MASK; +} + /* * Check if request is within bounds and aligned on zram logical blocks. */ @@ -188,16 +207,13 @@ static void update_position(u32 *index, int *offset, struct bio_vec *bvec) static inline void update_used_max(struct zram *zram, const unsigned long pages) { - unsigned long old_max, cur_max; - - old_max = atomic_long_read(&zram->stats.max_used_pages); + unsigned long cur_max = atomic_long_read(&zram->stats.max_used_pages); do { - cur_max = old_max; - if (pages > cur_max) - old_max = atomic_long_cmpxchg( - &zram->stats.max_used_pages, cur_max, pages); - } while (old_max != cur_max); + if (cur_max >= pages) + return; + } while (!atomic_long_try_cmpxchg(&zram->stats.max_used_pages, + &cur_max, pages)); } static inline void zram_fill_page(void *ptr, unsigned long len, @@ -629,10 +645,10 @@ static int read_from_bdev_async(struct zram *zram, struct bio_vec *bvec, #define PAGE_WB_SIG "page_index=" -#define PAGE_WRITEBACK 0 -#define HUGE_WRITEBACK (1<<0) -#define IDLE_WRITEBACK (1<<1) - +#define PAGE_WRITEBACK 0 +#define HUGE_WRITEBACK (1<<0) +#define IDLE_WRITEBACK (1<<1) +#define INCOMPRESSIBLE_WRITEBACK (1<<2) static ssize_t writeback_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) @@ -653,6 +669,8 @@ static ssize_t writeback_store(struct device *dev, mode = HUGE_WRITEBACK; else if (sysfs_streq(buf, "huge_idle")) mode = IDLE_WRITEBACK | HUGE_WRITEBACK; + else if (sysfs_streq(buf, "incompressible")) + mode = INCOMPRESSIBLE_WRITEBACK; else { if (strncmp(buf, PAGE_WB_SIG, sizeof(PAGE_WB_SIG) - 1)) return -EINVAL; @@ -715,11 +733,15 @@ static ssize_t writeback_store(struct device *dev, goto next; if (mode & IDLE_WRITEBACK && - !zram_test_flag(zram, index, ZRAM_IDLE)) + !zram_test_flag(zram, index, ZRAM_IDLE)) goto next; if (mode & HUGE_WRITEBACK && - !zram_test_flag(zram, index, ZRAM_HUGE)) + !zram_test_flag(zram, index, ZRAM_HUGE)) + goto next; + if (mode & INCOMPRESSIBLE_WRITEBACK && + !zram_test_flag(zram, index, ZRAM_INCOMPRESSIBLE)) goto next; + /* * Clearing ZRAM_UNDER_WB is duty of caller. * IOW, zram_free_page never clear it. @@ -753,8 +775,12 @@ static ssize_t writeback_store(struct device *dev, zram_clear_flag(zram, index, ZRAM_IDLE); zram_slot_unlock(zram, index); /* - * Return last IO error unless every IO were - * not suceeded. + * BIO errors are not fatal, we continue and simply + * attempt to writeback the remaining objects (pages). + * At the same time we need to signal user-space that + * some writes (at least one, but also could be all of + * them) were not successful and we do so by returning + * the most recent BIO error. */ ret = err; continue; @@ -920,13 +946,16 @@ static ssize_t read_block_state(struct file *file, char __user *buf, ts = ktime_to_timespec64(zram->table[index].ac_time); copied = snprintf(kbuf + written, count, - "%12zd %12lld.%06lu %c%c%c%c\n", + "%12zd %12lld.%06lu %c%c%c%c%c%c\n", index, (s64)ts.tv_sec, ts.tv_nsec / NSEC_PER_USEC, zram_test_flag(zram, index, ZRAM_SAME) ? 's' : '.', zram_test_flag(zram, index, ZRAM_WB) ? 'w' : '.', zram_test_flag(zram, index, ZRAM_HUGE) ? 'h' : '.', - zram_test_flag(zram, index, ZRAM_IDLE) ? 'i' : '.'); + zram_test_flag(zram, index, ZRAM_IDLE) ? 'i' : '.', + zram_get_priority(zram, index) ? 'r' : '.', + zram_test_flag(zram, index, + ZRAM_INCOMPRESSIBLE) ? 'n' : '.'); if (count <= copied) { zram_slot_unlock(zram, index); @@ -1000,46 +1029,143 @@ static ssize_t max_comp_streams_store(struct device *dev, return len; } -static ssize_t comp_algorithm_show(struct device *dev, - struct device_attribute *attr, char *buf) +static void comp_algorithm_set(struct zram *zram, u32 prio, const char *alg) { - size_t sz; - struct zram *zram = dev_to_zram(dev); + /* Do not free statically defined compression algorithms */ + if (zram->comp_algs[prio] != default_compressor) + kfree(zram->comp_algs[prio]); + + zram->comp_algs[prio] = alg; +} + +static ssize_t __comp_algorithm_show(struct zram *zram, u32 prio, char *buf) +{ + ssize_t sz; down_read(&zram->init_lock); - sz = zcomp_available_show(zram->compressor, buf); + sz = zcomp_available_show(zram->comp_algs[prio], buf); up_read(&zram->init_lock); return sz; } -static ssize_t comp_algorithm_store(struct device *dev, - struct device_attribute *attr, const char *buf, size_t len) +static int __comp_algorithm_store(struct zram *zram, u32 prio, const char *buf) { - struct zram *zram = dev_to_zram(dev); - char compressor[ARRAY_SIZE(zram->compressor)]; + char *compressor; size_t sz; - strscpy(compressor, buf, sizeof(compressor)); + sz = strlen(buf); + if (sz >= CRYPTO_MAX_ALG_NAME) + return -E2BIG; + + compressor = kstrdup(buf, GFP_KERNEL); + if (!compressor) + return -ENOMEM; + /* ignore trailing newline */ - sz = strlen(compressor); if (sz > 0 && compressor[sz - 1] == '\n') compressor[sz - 1] = 0x00; - if (!zcomp_available_algorithm(compressor)) + if (!zcomp_available_algorithm(compressor)) { + kfree(compressor); return -EINVAL; + } down_write(&zram->init_lock); if (init_done(zram)) { up_write(&zram->init_lock); + kfree(compressor); pr_info("Can't change algorithm for initialized device\n"); return -EBUSY; } - strcpy(zram->compressor, compressor); + comp_algorithm_set(zram, prio, compressor); up_write(&zram->init_lock); - return len; + return 0; +} + +static ssize_t comp_algorithm_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct zram *zram = dev_to_zram(dev); + + return __comp_algorithm_show(zram, ZRAM_PRIMARY_COMP, buf); +} + +static ssize_t comp_algorithm_store(struct device *dev, + struct device_attribute *attr, + const char *buf, + size_t len) +{ + struct zram *zram = dev_to_zram(dev); + int ret; + + ret = __comp_algorithm_store(zram, ZRAM_PRIMARY_COMP, buf); + return ret ? ret : len; +} + +#ifdef CONFIG_ZRAM_MULTI_COMP +static ssize_t recomp_algorithm_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct zram *zram = dev_to_zram(dev); + ssize_t sz = 0; + u32 prio; + + for (prio = ZRAM_SECONDARY_COMP; prio < ZRAM_MAX_COMPS; prio++) { + if (!zram->comp_algs[prio]) + continue; + + sz += scnprintf(buf + sz, PAGE_SIZE - sz - 2, "#%d: ", prio); + sz += __comp_algorithm_show(zram, prio, buf + sz); + } + + return sz; +} + +static ssize_t recomp_algorithm_store(struct device *dev, + struct device_attribute *attr, + const char *buf, + size_t len) +{ + struct zram *zram = dev_to_zram(dev); + int prio = ZRAM_SECONDARY_COMP; + char *args, *param, *val; + char *alg = NULL; + int ret; + + args = skip_spaces(buf); + while (*args) { + args = next_arg(args, ¶m, &val); + + if (!*val) + return -EINVAL; + + if (!strcmp(param, "algo")) { + alg = val; + continue; + } + + if (!strcmp(param, "priority")) { + ret = kstrtoint(val, 10, &prio); + if (ret) + return ret; + continue; + } + } + + if (!alg) + return -EINVAL; + + if (prio < ZRAM_SECONDARY_COMP || prio >= ZRAM_MAX_COMPS) + return -EINVAL; + + ret = __comp_algorithm_store(zram, prio, alg); + return ret ? ret : len; } +#endif static ssize_t compact_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) @@ -1210,6 +1336,11 @@ static void zram_free_page(struct zram *zram, size_t index) atomic64_dec(&zram->stats.huge_pages); } + if (zram_test_flag(zram, index, ZRAM_INCOMPRESSIBLE)) + zram_clear_flag(zram, index, ZRAM_INCOMPRESSIBLE); + + zram_set_priority(zram, index, 0); + if (zram_test_flag(zram, index, ZRAM_WB)) { zram_clear_flag(zram, index, ZRAM_WB); free_block_bdev(zram, zram_get_element(zram, index)); @@ -1242,32 +1373,37 @@ out: ~(1UL << ZRAM_LOCK | 1UL << ZRAM_UNDER_WB)); } -static int __zram_bvec_read(struct zram *zram, struct page *page, u32 index, - struct bio *bio, bool partial_io) +/* + * Reads a page from the writeback devices. Corresponding ZRAM slot + * should be unlocked. + */ +static int zram_bvec_read_from_bdev(struct zram *zram, struct page *page, + u32 index, struct bio *bio, bool partial_io) +{ + struct bio_vec bvec = { + .bv_page = page, + .bv_len = PAGE_SIZE, + .bv_offset = 0, + }; + + return read_from_bdev(zram, &bvec, zram_get_element(zram, index), bio, + partial_io); +} + +/* + * Reads (decompresses if needed) a page from zspool (zsmalloc). + * Corresponding ZRAM slot should be locked. + */ +static int zram_read_from_zspool(struct zram *zram, struct page *page, + u32 index) { struct zcomp_strm *zstrm; unsigned long handle; unsigned int size; void *src, *dst; + u32 prio; int ret; - zram_slot_lock(zram, index); - if (zram_test_flag(zram, index, ZRAM_WB)) { - struct bio_vec bvec; - - zram_slot_unlock(zram, index); - /* A null bio means rw_page was used, we must fallback to bio */ - if (!bio) - return -EOPNOTSUPP; - - bvec.bv_page = page; - bvec.bv_len = PAGE_SIZE; - bvec.bv_offset = 0; - return read_from_bdev(zram, &bvec, - zram_get_element(zram, index), - bio, partial_io); - } - handle = zram_get_handle(zram, index); if (!handle || zram_test_flag(zram, index, ZRAM_SAME)) { unsigned long value; @@ -1277,14 +1413,15 @@ static int __zram_bvec_read(struct zram *zram, struct page *page, u32 index, mem = kmap_atomic(page); zram_fill_page(mem, PAGE_SIZE, value); kunmap_atomic(mem); - zram_slot_unlock(zram, index); return 0; } size = zram_get_obj_size(zram, index); - if (size != PAGE_SIZE) - zstrm = zcomp_stream_get(zram->comp); + if (size != PAGE_SIZE) { + prio = zram_get_priority(zram, index); + zstrm = zcomp_stream_get(zram->comps[prio]); + } src = zs_map_object(zram->mem_pool, handle, ZS_MM_RO); if (size == PAGE_SIZE) { @@ -1296,20 +1433,43 @@ static int __zram_bvec_read(struct zram *zram, struct page *page, u32 index, dst = kmap_atomic(page); ret = zcomp_decompress(zstrm, src, size, dst); kunmap_atomic(dst); - zcomp_stream_put(zram->comp); + zcomp_stream_put(zram->comps[prio]); } zs_unmap_object(zram->mem_pool, handle); - zram_slot_unlock(zram, index); + return ret; +} + +static int __zram_bvec_read(struct zram *zram, struct page *page, u32 index, + struct bio *bio, bool partial_io) +{ + int ret; + + zram_slot_lock(zram, index); + if (!zram_test_flag(zram, index, ZRAM_WB)) { + /* Slot should be locked through out the function call */ + ret = zram_read_from_zspool(zram, page, index); + zram_slot_unlock(zram, index); + } else { + /* Slot should be unlocked before the function call */ + zram_slot_unlock(zram, index); + + /* A null bio means rw_page was used, we must fallback to bio */ + if (!bio) + return -EOPNOTSUPP; + + ret = zram_bvec_read_from_bdev(zram, page, index, bio, + partial_io); + } /* Should NEVER happen. Return bio error if it does. */ - if (WARN_ON(ret)) + if (WARN_ON(ret < 0)) pr_err("Decompression failed! err=%d, page=%u\n", ret, index); return ret; } static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec, - u32 index, int offset, struct bio *bio) + u32 index, int offset, struct bio *bio) { int ret; struct page *page; @@ -1363,13 +1523,13 @@ static int __zram_bvec_write(struct zram *zram, struct bio_vec *bvec, kunmap_atomic(mem); compress_again: - zstrm = zcomp_stream_get(zram->comp); + zstrm = zcomp_stream_get(zram->comps[ZRAM_PRIMARY_COMP]); src = kmap_atomic(page); ret = zcomp_compress(zstrm, src, &comp_len); kunmap_atomic(src); if (unlikely(ret)) { - zcomp_stream_put(zram->comp); + zcomp_stream_put(zram->comps[ZRAM_PRIMARY_COMP]); pr_err("Compression failed! err=%d\n", ret); zs_free(zram->mem_pool, handle); return ret; @@ -1390,19 +1550,19 @@ compress_again: * if we have a 'non-null' handle here then we are coming * from the slow path and handle has already been allocated. */ - if (IS_ERR((void *)handle)) + if (IS_ERR_VALUE(handle)) handle = zs_malloc(zram->mem_pool, comp_len, __GFP_KSWAPD_RECLAIM | __GFP_NOWARN | __GFP_HIGHMEM | __GFP_MOVABLE); - if (IS_ERR((void *)handle)) { - zcomp_stream_put(zram->comp); + if (IS_ERR_VALUE(handle)) { + zcomp_stream_put(zram->comps[ZRAM_PRIMARY_COMP]); atomic64_inc(&zram->stats.writestall); handle = zs_malloc(zram->mem_pool, comp_len, GFP_NOIO | __GFP_HIGHMEM | __GFP_MOVABLE); - if (IS_ERR((void *)handle)) + if (IS_ERR_VALUE(handle)) return PTR_ERR((void *)handle); if (comp_len != PAGE_SIZE) @@ -1414,14 +1574,14 @@ compress_again: * zstrm buffer back. It is necessary that the dereferencing * of the zstrm variable below occurs correctly. */ - zstrm = zcomp_stream_get(zram->comp); + zstrm = zcomp_stream_get(zram->comps[ZRAM_PRIMARY_COMP]); } alloced_pages = zs_get_total_pages(zram->mem_pool); update_used_max(zram, alloced_pages); if (zram->limit_pages && alloced_pages > zram->limit_pages) { - zcomp_stream_put(zram->comp); + zcomp_stream_put(zram->comps[ZRAM_PRIMARY_COMP]); zs_free(zram->mem_pool, handle); return -ENOMEM; } @@ -1435,7 +1595,7 @@ compress_again: if (comp_len == PAGE_SIZE) kunmap_atomic(src); - zcomp_stream_put(zram->comp); + zcomp_stream_put(zram->comps[ZRAM_PRIMARY_COMP]); zs_unmap_object(zram->mem_pool, handle); atomic64_add(comp_len, &zram->stats.compr_data_size); out: @@ -1504,6 +1664,274 @@ out: return ret; } +#ifdef CONFIG_ZRAM_MULTI_COMP +/* + * This function will decompress (unless it's ZRAM_HUGE) the page and then + * attempt to compress it using provided compression algorithm priority + * (which is potentially more effective). + * + * Corresponding ZRAM slot should be locked. + */ +static int zram_recompress(struct zram *zram, u32 index, struct page *page, + u32 threshold, u32 prio, u32 prio_max) +{ + struct zcomp_strm *zstrm = NULL; + unsigned long handle_old; + unsigned long handle_new; + unsigned int comp_len_old; + unsigned int comp_len_new; + unsigned int class_index_old; + unsigned int class_index_new; + u32 num_recomps = 0; + void *src, *dst; + int ret; + + handle_old = zram_get_handle(zram, index); + if (!handle_old) + return -EINVAL; + + comp_len_old = zram_get_obj_size(zram, index); + /* + * Do not recompress objects that are already "small enough". + */ + if (comp_len_old < threshold) + return 0; + + ret = zram_read_from_zspool(zram, page, index); + if (ret) + return ret; + + class_index_old = zs_lookup_class_index(zram->mem_pool, comp_len_old); + /* + * Iterate the secondary comp algorithms list (in order of priority) + * and try to recompress the page. + */ + for (; prio < prio_max; prio++) { + if (!zram->comps[prio]) + continue; + + /* + * Skip if the object is already re-compressed with a higher + * priority algorithm (or same algorithm). + */ + if (prio <= zram_get_priority(zram, index)) + continue; + + num_recomps++; + zstrm = zcomp_stream_get(zram->comps[prio]); + src = kmap_atomic(page); + ret = zcomp_compress(zstrm, src, &comp_len_new); + kunmap_atomic(src); + + if (ret) { + zcomp_stream_put(zram->comps[prio]); + return ret; + } + + class_index_new = zs_lookup_class_index(zram->mem_pool, + comp_len_new); + + /* Continue until we make progress */ + if (class_index_new >= class_index_old || + (threshold && comp_len_new >= threshold)) { + zcomp_stream_put(zram->comps[prio]); + continue; + } + + /* Recompression was successful so break out */ + break; + } + + /* + * We did not try to recompress, e.g. when we have only one + * secondary algorithm and the page is already recompressed + * using that algorithm + */ + if (!zstrm) + return 0; + + if (class_index_new >= class_index_old) { + /* + * Secondary algorithms failed to re-compress the page + * in a way that would save memory, mark the object as + * incompressible so that we will not try to compress + * it again. + * + * We need to make sure that all secondary algorithms have + * failed, so we test if the number of recompressions matches + * the number of active secondary algorithms. + */ + if (num_recomps == zram->num_active_comps - 1) + zram_set_flag(zram, index, ZRAM_INCOMPRESSIBLE); + return 0; + } + + /* Successful recompression but above threshold */ + if (threshold && comp_len_new >= threshold) + return 0; + + /* + * No direct reclaim (slow path) for handle allocation and no + * re-compression attempt (unlike in __zram_bvec_write()) since + * we already have stored that object in zsmalloc. If we cannot + * alloc memory for recompressed object then we bail out and + * simply keep the old (existing) object in zsmalloc. + */ + handle_new = zs_malloc(zram->mem_pool, comp_len_new, + __GFP_KSWAPD_RECLAIM | + __GFP_NOWARN | + __GFP_HIGHMEM | + __GFP_MOVABLE); + if (IS_ERR_VALUE(handle_new)) { + zcomp_stream_put(zram->comps[prio]); + return PTR_ERR((void *)handle_new); + } + + dst = zs_map_object(zram->mem_pool, handle_new, ZS_MM_WO); + memcpy(dst, zstrm->buffer, comp_len_new); + zcomp_stream_put(zram->comps[prio]); + + zs_unmap_object(zram->mem_pool, handle_new); + + zram_free_page(zram, index); + zram_set_handle(zram, index, handle_new); + zram_set_obj_size(zram, index, comp_len_new); + zram_set_priority(zram, index, prio); + + atomic64_add(comp_len_new, &zram->stats.compr_data_size); + atomic64_inc(&zram->stats.pages_stored); + + return 0; +} + +#define RECOMPRESS_IDLE (1 << 0) +#define RECOMPRESS_HUGE (1 << 1) + +static ssize_t recompress_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t len) +{ + u32 prio = ZRAM_SECONDARY_COMP, prio_max = ZRAM_MAX_COMPS; + struct zram *zram = dev_to_zram(dev); + unsigned long nr_pages = zram->disksize >> PAGE_SHIFT; + char *args, *param, *val, *algo = NULL; + u32 mode = 0, threshold = 0; + unsigned long index; + struct page *page; + ssize_t ret; + + args = skip_spaces(buf); + while (*args) { + args = next_arg(args, ¶m, &val); + + if (!*val) + return -EINVAL; + + if (!strcmp(param, "type")) { + if (!strcmp(val, "idle")) + mode = RECOMPRESS_IDLE; + if (!strcmp(val, "huge")) + mode = RECOMPRESS_HUGE; + if (!strcmp(val, "huge_idle")) + mode = RECOMPRESS_IDLE | RECOMPRESS_HUGE; + continue; + } + + if (!strcmp(param, "threshold")) { + /* + * We will re-compress only idle objects equal or + * greater in size than watermark. + */ + ret = kstrtouint(val, 10, &threshold); + if (ret) + return ret; + continue; + } + + if (!strcmp(param, "algo")) { + algo = val; + continue; + } + } + + if (threshold >= PAGE_SIZE) + return -EINVAL; + + down_read(&zram->init_lock); + if (!init_done(zram)) { + ret = -EINVAL; + goto release_init_lock; + } + + if (algo) { + bool found = false; + + for (; prio < ZRAM_MAX_COMPS; prio++) { + if (!zram->comp_algs[prio]) + continue; + + if (!strcmp(zram->comp_algs[prio], algo)) { + prio_max = min(prio + 1, ZRAM_MAX_COMPS); + found = true; + break; + } + } + + if (!found) { + ret = -EINVAL; + goto release_init_lock; + } + } + + page = alloc_page(GFP_KERNEL); + if (!page) { + ret = -ENOMEM; + goto release_init_lock; + } + + ret = len; + for (index = 0; index < nr_pages; index++) { + int err = 0; + + zram_slot_lock(zram, index); + + if (!zram_allocated(zram, index)) + goto next; + + if (mode & RECOMPRESS_IDLE && + !zram_test_flag(zram, index, ZRAM_IDLE)) + goto next; + + if (mode & RECOMPRESS_HUGE && + !zram_test_flag(zram, index, ZRAM_HUGE)) + goto next; + + if (zram_test_flag(zram, index, ZRAM_WB) || + zram_test_flag(zram, index, ZRAM_UNDER_WB) || + zram_test_flag(zram, index, ZRAM_SAME) || + zram_test_flag(zram, index, ZRAM_INCOMPRESSIBLE)) + goto next; + + err = zram_recompress(zram, index, page, threshold, + prio, prio_max); +next: + zram_slot_unlock(zram, index); + if (err) { + ret = err; + break; + } + + cond_resched(); + } + + __free_page(page); + +release_init_lock: + up_read(&zram->init_lock); + return ret; +} +#endif + /* * zram_bio_discard - handler on discard request * @index: physical block index in PAGE_SIZE units @@ -1553,11 +1981,9 @@ static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index, int ret; if (!op_is_write(op)) { - atomic64_inc(&zram->stats.num_reads); ret = zram_bvec_read(zram, bvec, index, offset, bio); flush_dcache_page(bvec->bv_page); } else { - atomic64_inc(&zram->stats.num_writes); ret = zram_bvec_write(zram, bvec, index, offset, bio); } @@ -1710,6 +2136,21 @@ out: return ret; } +static void zram_destroy_comps(struct zram *zram) +{ + u32 prio; + + for (prio = 0; prio < ZRAM_MAX_COMPS; prio++) { + struct zcomp *comp = zram->comps[prio]; + + zram->comps[prio] = NULL; + if (!comp) + continue; + zcomp_destroy(comp); + zram->num_active_comps--; + } +} + static void zram_reset_device(struct zram *zram) { down_write(&zram->init_lock); @@ -1727,11 +2168,11 @@ static void zram_reset_device(struct zram *zram) /* I/O operation under all of CPU are done so let's free */ zram_meta_free(zram, zram->disksize); zram->disksize = 0; + zram_destroy_comps(zram); memset(&zram->stats, 0, sizeof(zram->stats)); - zcomp_destroy(zram->comp); - zram->comp = NULL; reset_bdev(zram); + comp_algorithm_set(zram, ZRAM_PRIMARY_COMP, default_compressor); up_write(&zram->init_lock); } @@ -1742,6 +2183,7 @@ static ssize_t disksize_store(struct device *dev, struct zcomp *comp; struct zram *zram = dev_to_zram(dev); int err; + u32 prio; disksize = memparse(buf, NULL); if (!disksize) @@ -1760,22 +2202,29 @@ static ssize_t disksize_store(struct device *dev, goto out_unlock; } - comp = zcomp_create(zram->compressor); - if (IS_ERR(comp)) { - pr_err("Cannot initialise %s compressing backend\n", - zram->compressor); - err = PTR_ERR(comp); - goto out_free_meta; - } + for (prio = 0; prio < ZRAM_MAX_COMPS; prio++) { + if (!zram->comp_algs[prio]) + continue; + + comp = zcomp_create(zram->comp_algs[prio]); + if (IS_ERR(comp)) { + pr_err("Cannot initialise %s compressing backend\n", + zram->comp_algs[prio]); + err = PTR_ERR(comp); + goto out_free_comps; + } - zram->comp = comp; + zram->comps[prio] = comp; + zram->num_active_comps++; + } zram->disksize = disksize; set_capacity_and_notify(zram->disk, zram->disksize >> SECTOR_SHIFT); up_write(&zram->init_lock); return len; -out_free_meta: +out_free_comps: + zram_destroy_comps(zram); zram_meta_free(zram, disksize); out_unlock: up_write(&zram->init_lock); @@ -1860,6 +2309,10 @@ static DEVICE_ATTR_WO(writeback); static DEVICE_ATTR_RW(writeback_limit); static DEVICE_ATTR_RW(writeback_limit_enable); #endif +#ifdef CONFIG_ZRAM_MULTI_COMP +static DEVICE_ATTR_RW(recomp_algorithm); +static DEVICE_ATTR_WO(recompress); +#endif static struct attribute *zram_disk_attrs[] = { &dev_attr_disksize.attr, @@ -1883,6 +2336,10 @@ static struct attribute *zram_disk_attrs[] = { &dev_attr_bd_stat.attr, #endif &dev_attr_debug_stat.attr, +#ifdef CONFIG_ZRAM_MULTI_COMP + &dev_attr_recomp_algorithm.attr, + &dev_attr_recompress.attr, +#endif NULL, }; @@ -1962,7 +2419,7 @@ static int zram_add(void) if (ret) goto out_cleanup_disk; - strscpy(zram->compressor, default_compressor, sizeof(zram->compressor)); + comp_algorithm_set(zram, ZRAM_PRIMARY_COMP, default_compressor); zram_debugfs_register(zram); pr_info("Added device: %s\n", zram->disk->disk_name); diff --git a/drivers/block/zram/zram_drv.h b/drivers/block/zram/zram_drv.h index a2bda53020fddeb42d8b053abd68da8b91cf8f86..c5254626f051faebcf06453242d8711467b68981 100644 --- a/drivers/block/zram/zram_drv.h +++ b/drivers/block/zram/zram_drv.h @@ -40,6 +40,9 @@ */ #define ZRAM_FLAG_SHIFT (PAGE_SHIFT + 1) +/* Only 2 bits are allowed for comp priority index */ +#define ZRAM_COMP_PRIORITY_MASK 0x3 + /* Flags for zram pages (table[page_no].flags) */ enum zram_pageflags { /* zram slot is locked */ @@ -49,6 +52,10 @@ enum zram_pageflags { ZRAM_UNDER_WB, /* page is under writeback */ ZRAM_HUGE, /* Incompressible page */ ZRAM_IDLE, /* not accessed page since last idle marking */ + ZRAM_INCOMPRESSIBLE, /* none of the algorithms could compress it */ + + ZRAM_COMP_PRIORITY_BIT1, /* First bit of comp priority index */ + ZRAM_COMP_PRIORITY_BIT2, /* Second bit of comp priority index */ __NR_ZRAM_PAGEFLAGS, }; @@ -69,8 +76,6 @@ struct zram_table_entry { struct zram_stats { atomic64_t compr_data_size; /* compressed size of pages stored */ - atomic64_t num_reads; /* failed + successful */ - atomic64_t num_writes; /* --do-- */ atomic64_t failed_reads; /* can happen when memory is too low */ atomic64_t failed_writes; /* can happen when memory is too low */ atomic64_t invalid_io; /* non-page-aligned I/O requests */ @@ -89,10 +94,20 @@ struct zram_stats { #endif }; +#ifdef CONFIG_ZRAM_MULTI_COMP +#define ZRAM_PRIMARY_COMP 0U +#define ZRAM_SECONDARY_COMP 1U +#define ZRAM_MAX_COMPS 4U +#else +#define ZRAM_PRIMARY_COMP 0U +#define ZRAM_SECONDARY_COMP 0U +#define ZRAM_MAX_COMPS 1U +#endif + struct zram { struct zram_table_entry *table; struct zs_pool *mem_pool; - struct zcomp *comp; + struct zcomp *comps[ZRAM_MAX_COMPS]; struct gendisk *disk; /* Prevent concurrent execution of device init */ struct rw_semaphore init_lock; @@ -107,7 +122,8 @@ struct zram { * we can store in a disk. */ u64 disksize; /* bytes */ - char compressor[CRYPTO_MAX_ALG_NAME]; + const char *comp_algs[ZRAM_MAX_COMPS]; + s8 num_active_comps; /* * zram is claimed so open request will be failed */ diff --git a/drivers/bus/mhi/host/boot.c b/drivers/bus/mhi/host/boot.c index 26d0eddb147718af0f69f5ca5b4a6b92f0d679cd..1c69feee1703005db11c911d86e7dff0d4cb3a7a 100644 --- a/drivers/bus/mhi/host/boot.c +++ b/drivers/bus/mhi/host/boot.c @@ -118,9 +118,7 @@ static int __mhi_download_rddm_in_panic(struct mhi_controller *mhi_cntrl) /* Hardware reset so force device to enter RDDM */ dev_dbg(dev, "Did not enter RDDM, do a host req reset\n"); - mhi_write_reg(mhi_cntrl, mhi_cntrl->regs, - MHI_SOC_RESET_REQ_OFFSET, - MHI_SOC_RESET_REQ); + mhi_soc_reset(mhi_cntrl); udelay(delayus); } diff --git a/drivers/bus/mhi/host/pci_generic.c b/drivers/bus/mhi/host/pci_generic.c index caa4ce28cf9e553af7c8404f16d2d9013eaf2fc8..f39657f71483cb9d814a62bdbb51a1a3a489af08 100644 --- a/drivers/bus/mhi/host/pci_generic.c +++ b/drivers/bus/mhi/host/pci_generic.c @@ -24,6 +24,10 @@ #define HEALTH_CHECK_PERIOD (HZ * 2) +/* PCI VID definitions */ +#define PCI_VENDOR_ID_THALES 0x1269 +#define PCI_VENDOR_ID_QUECTEL 0x1eac + /** * struct mhi_pci_dev_info - MHI PCI device specific information * @config: MHI controller configuration @@ -340,6 +344,8 @@ static const struct mhi_channel_config mhi_foxconn_sdx55_channels[] = { MHI_CHANNEL_CONFIG_DL(13, "MBIM", 32, 0), MHI_CHANNEL_CONFIG_UL(32, "DUN", 32, 0), MHI_CHANNEL_CONFIG_DL(33, "DUN", 32, 0), + MHI_CHANNEL_CONFIG_UL(92, "DUN2", 32, 1), + MHI_CHANNEL_CONFIG_DL(93, "DUN2", 32, 1), MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0_MBIM", 128, 2), MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0_MBIM", 128, 3), }; @@ -542,6 +548,8 @@ static const struct mhi_pci_dev_info mhi_telit_fn990_info = { static const struct pci_device_id mhi_pci_id_table[] = { { PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0304), .driver_data = (kernel_ulong_t) &mhi_qcom_sdx24_info }, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0306, PCI_VENDOR_ID_QCOM, 0x010c), + .driver_data = (kernel_ulong_t) &mhi_foxconn_sdx55_info }, /* EM919x (sdx55), use the same vid:pid as qcom-sdx55m */ { PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0306, 0x18d7, 0x0200), .driver_data = (kernel_ulong_t) &mhi_sierra_em919x_info }, @@ -555,11 +563,11 @@ static const struct pci_device_id mhi_pci_id_table[] = { .driver_data = (kernel_ulong_t) &mhi_telit_fn990_info }, { PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0308), .driver_data = (kernel_ulong_t) &mhi_qcom_sdx65_info }, - { PCI_DEVICE(0x1eac, 0x1001), /* EM120R-GL (sdx24) */ + { PCI_DEVICE(PCI_VENDOR_ID_QUECTEL, 0x1001), /* EM120R-GL (sdx24) */ .driver_data = (kernel_ulong_t) &mhi_quectel_em1xx_info }, - { PCI_DEVICE(0x1eac, 0x1002), /* EM160R-GL (sdx24) */ + { PCI_DEVICE(PCI_VENDOR_ID_QUECTEL, 0x1002), /* EM160R-GL (sdx24) */ .driver_data = (kernel_ulong_t) &mhi_quectel_em1xx_info }, - { PCI_DEVICE(0x1eac, 0x2001), /* EM120R-GL for FCCL (sdx24) */ + { PCI_DEVICE(PCI_VENDOR_ID_QUECTEL, 0x2001), /* EM120R-GL for FCCL (sdx24) */ .driver_data = (kernel_ulong_t) &mhi_quectel_em1xx_info }, /* T99W175 (sdx55), Both for eSIM and Non-eSIM */ { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0ab), @@ -583,17 +591,20 @@ static const struct pci_device_id mhi_pci_id_table[] = { { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0d9), .driver_data = (kernel_ulong_t) &mhi_foxconn_sdx65_info }, /* MV31-W (Cinterion) */ - { PCI_DEVICE(0x1269, 0x00b3), + { PCI_DEVICE(PCI_VENDOR_ID_THALES, 0x00b3), .driver_data = (kernel_ulong_t) &mhi_mv31_info }, /* MV31-W (Cinterion), based on new baseline */ - { PCI_DEVICE(0x1269, 0x00b4), + { PCI_DEVICE(PCI_VENDOR_ID_THALES, 0x00b4), .driver_data = (kernel_ulong_t) &mhi_mv31_info }, /* MV32-WA (Cinterion) */ - { PCI_DEVICE(0x1269, 0x00ba), + { PCI_DEVICE(PCI_VENDOR_ID_THALES, 0x00ba), .driver_data = (kernel_ulong_t) &mhi_mv32_info }, /* MV32-WB (Cinterion) */ - { PCI_DEVICE(0x1269, 0x00bb), + { PCI_DEVICE(PCI_VENDOR_ID_THALES, 0x00bb), .driver_data = (kernel_ulong_t) &mhi_mv32_info }, + /* T99W175 (sdx55), HP variant */ + { PCI_DEVICE(0x03f0, 0x0a6c), + .driver_data = (kernel_ulong_t) &mhi_foxconn_sdx55_info }, { } }; MODULE_DEVICE_TABLE(pci, mhi_pci_id_table); diff --git a/drivers/bus/mhi/host/pm.c b/drivers/bus/mhi/host/pm.c index 4a42186ff1112ef169b227e31af283d53fdfcc3c..083459028a4b81e35d01179265af95a8711e4835 100644 --- a/drivers/bus/mhi/host/pm.c +++ b/drivers/bus/mhi/host/pm.c @@ -301,7 +301,8 @@ int mhi_pm_m0_transition(struct mhi_controller *mhi_cntrl) read_lock_irq(&mhi_chan->lock); /* Only ring DB if ring is not empty */ - if (tre_ring->base && tre_ring->wp != tre_ring->rp) + if (tre_ring->base && tre_ring->wp != tre_ring->rp && + mhi_chan->ch_state == MHI_CH_STATE_ENABLED) mhi_ring_chan_db(mhi_cntrl, mhi_chan); read_unlock_irq(&mhi_chan->lock); } diff --git a/drivers/char/agp/amd-k7-agp.c b/drivers/char/agp/amd-k7-agp.c index 2b2095542816b84fcfbb0333a3efa80d7fd9ea8b..55397ba765d234ec43019d8d412e32f50c2ec7b8 100644 --- a/drivers/char/agp/amd-k7-agp.c +++ b/drivers/char/agp/amd-k7-agp.c @@ -488,26 +488,11 @@ static void agp_amdk7_remove(struct pci_dev *pdev) agp_put_bridge(bridge); } -#ifdef CONFIG_PM - -static int agp_amdk7_suspend(struct pci_dev *pdev, pm_message_t state) +static int agp_amdk7_resume(struct device *dev) { - pci_save_state(pdev); - pci_set_power_state(pdev, pci_choose_state(pdev, state)); - - return 0; -} - -static int agp_amdk7_resume(struct pci_dev *pdev) -{ - pci_set_power_state(pdev, PCI_D0); - pci_restore_state(pdev); - return amd_irongate_driver.configure(); } -#endif /* CONFIG_PM */ - /* must be the same order as name table above */ static const struct pci_device_id agp_amdk7_pci_table[] = { { @@ -539,15 +524,14 @@ static const struct pci_device_id agp_amdk7_pci_table[] = { MODULE_DEVICE_TABLE(pci, agp_amdk7_pci_table); +static DEFINE_SIMPLE_DEV_PM_OPS(agp_amdk7_pm_ops, NULL, agp_amdk7_resume); + static struct pci_driver agp_amdk7_pci_driver = { .name = "agpgart-amdk7", .id_table = agp_amdk7_pci_table, .probe = agp_amdk7_probe, .remove = agp_amdk7_remove, -#ifdef CONFIG_PM - .suspend = agp_amdk7_suspend, - .resume = agp_amdk7_resume, -#endif + .driver.pm = &agp_amdk7_pm_ops, }; static int __init agp_amdk7_init(void) diff --git a/drivers/char/agp/amd64-agp.c b/drivers/char/agp/amd64-agp.c index 84a4aa9312cf8d845990f7b06ebc328b41064c7f..ce8651436609fcefa2259cf7ae1704bc1e933b0c 100644 --- a/drivers/char/agp/amd64-agp.c +++ b/drivers/char/agp/amd64-agp.c @@ -588,9 +588,7 @@ static void agp_amd64_remove(struct pci_dev *pdev) agp_bridges_found--; } -#define agp_amd64_suspend NULL - -static int __maybe_unused agp_amd64_resume(struct device *dev) +static int agp_amd64_resume(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); @@ -727,7 +725,7 @@ static const struct pci_device_id agp_amd64_pci_promisc_table[] = { { } }; -static SIMPLE_DEV_PM_OPS(agp_amd64_pm_ops, agp_amd64_suspend, agp_amd64_resume); +static DEFINE_SIMPLE_DEV_PM_OPS(agp_amd64_pm_ops, NULL, agp_amd64_resume); static struct pci_driver agp_amd64_pci_driver = { .name = "agpgart-amd64", diff --git a/drivers/char/agp/ati-agp.c b/drivers/char/agp/ati-agp.c index 6f5530482d8332496fc48757f2d2a45ed26f96ea..3c1fce48aabe764d91b1381559d7a5c916c4fb53 100644 --- a/drivers/char/agp/ati-agp.c +++ b/drivers/char/agp/ati-agp.c @@ -238,23 +238,10 @@ static int ati_configure(void) } -#ifdef CONFIG_PM -static int agp_ati_suspend(struct pci_dev *dev, pm_message_t state) +static int agp_ati_resume(struct device *dev) { - pci_save_state(dev); - pci_set_power_state(dev, PCI_D3hot); - - return 0; -} - -static int agp_ati_resume(struct pci_dev *dev) -{ - pci_set_power_state(dev, PCI_D0); - pci_restore_state(dev); - return ati_configure(); } -#endif /* *Since we don't need contiguous memory we just try @@ -559,15 +546,14 @@ static const struct pci_device_id agp_ati_pci_table[] = { MODULE_DEVICE_TABLE(pci, agp_ati_pci_table); +static DEFINE_SIMPLE_DEV_PM_OPS(agp_ati_pm_ops, NULL, agp_ati_resume); + static struct pci_driver agp_ati_pci_driver = { .name = "agpgart-ati", .id_table = agp_ati_pci_table, .probe = agp_ati_probe, .remove = agp_ati_remove, -#ifdef CONFIG_PM - .suspend = agp_ati_suspend, - .resume = agp_ati_resume, -#endif + .driver.pm = &agp_ati_pm_ops, }; static int __init agp_ati_init(void) diff --git a/drivers/char/agp/efficeon-agp.c b/drivers/char/agp/efficeon-agp.c index c53f0f9ef5b0b2db8bcd835163ef30e0264b6c37..f28d42319269579a0e05110486475104816fae16 100644 --- a/drivers/char/agp/efficeon-agp.c +++ b/drivers/char/agp/efficeon-agp.c @@ -412,18 +412,11 @@ static void agp_efficeon_remove(struct pci_dev *pdev) agp_put_bridge(bridge); } -#ifdef CONFIG_PM -static int agp_efficeon_suspend(struct pci_dev *dev, pm_message_t state) -{ - return 0; -} - -static int agp_efficeon_resume(struct pci_dev *pdev) +static int agp_efficeon_resume(struct device *dev) { printk(KERN_DEBUG PFX "agp_efficeon_resume()\n"); return efficeon_configure(); } -#endif static const struct pci_device_id agp_efficeon_pci_table[] = { { @@ -437,6 +430,8 @@ static const struct pci_device_id agp_efficeon_pci_table[] = { { } }; +static DEFINE_SIMPLE_DEV_PM_OPS(agp_efficeon_pm_ops, NULL, agp_efficeon_resume); + MODULE_DEVICE_TABLE(pci, agp_efficeon_pci_table); static struct pci_driver agp_efficeon_pci_driver = { @@ -444,10 +439,7 @@ static struct pci_driver agp_efficeon_pci_driver = { .id_table = agp_efficeon_pci_table, .probe = agp_efficeon_probe, .remove = agp_efficeon_remove, -#ifdef CONFIG_PM - .suspend = agp_efficeon_suspend, - .resume = agp_efficeon_resume, -#endif + .driver.pm = &agp_efficeon_pm_ops, }; static int __init agp_efficeon_init(void) diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c index 9e4f27a6cb5afad0034f1636a12a2b775cb36663..c518b3a9db04d6eca1c529d6790f0b9116965fb7 100644 --- a/drivers/char/agp/intel-agp.c +++ b/drivers/char/agp/intel-agp.c @@ -817,16 +817,15 @@ static void agp_intel_remove(struct pci_dev *pdev) agp_put_bridge(bridge); } -#ifdef CONFIG_PM -static int agp_intel_resume(struct pci_dev *pdev) +static int agp_intel_resume(struct device *dev) { + struct pci_dev *pdev = to_pci_dev(dev); struct agp_bridge_data *bridge = pci_get_drvdata(pdev); bridge->driver->configure(); return 0; } -#endif static const struct pci_device_id agp_intel_pci_table[] = { #define ID(x) \ @@ -895,14 +894,14 @@ static const struct pci_device_id agp_intel_pci_table[] = { MODULE_DEVICE_TABLE(pci, agp_intel_pci_table); +static DEFINE_SIMPLE_DEV_PM_OPS(agp_intel_pm_ops, NULL, agp_intel_resume); + static struct pci_driver agp_intel_pci_driver = { .name = "agpgart-intel", .id_table = agp_intel_pci_table, .probe = agp_intel_probe, .remove = agp_intel_remove, -#ifdef CONFIG_PM - .resume = agp_intel_resume, -#endif + .driver.pm = &agp_intel_pm_ops, }; static int __init agp_intel_init(void) diff --git a/drivers/char/agp/nvidia-agp.c b/drivers/char/agp/nvidia-agp.c index 826dbd06f6bbb1eaac6c8fff2a770fb5b577d5cd..dbcbc06cc20293b71ea9088b1297178347233ce1 100644 --- a/drivers/char/agp/nvidia-agp.c +++ b/drivers/char/agp/nvidia-agp.c @@ -404,28 +404,13 @@ static void agp_nvidia_remove(struct pci_dev *pdev) agp_put_bridge(bridge); } -#ifdef CONFIG_PM -static int agp_nvidia_suspend(struct pci_dev *pdev, pm_message_t state) +static int agp_nvidia_resume(struct device *dev) { - pci_save_state(pdev); - pci_set_power_state(pdev, PCI_D3hot); - - return 0; -} - -static int agp_nvidia_resume(struct pci_dev *pdev) -{ - /* set power state 0 and restore PCI space */ - pci_set_power_state(pdev, PCI_D0); - pci_restore_state(pdev); - /* reconfigure AGP hardware again */ nvidia_configure(); return 0; } -#endif - static const struct pci_device_id agp_nvidia_pci_table[] = { { @@ -449,15 +434,14 @@ static const struct pci_device_id agp_nvidia_pci_table[] = { MODULE_DEVICE_TABLE(pci, agp_nvidia_pci_table); +static DEFINE_SIMPLE_DEV_PM_OPS(agp_nvidia_pm_ops, NULL, agp_nvidia_resume); + static struct pci_driver agp_nvidia_pci_driver = { .name = "agpgart-nvidia", .id_table = agp_nvidia_pci_table, .probe = agp_nvidia_probe, .remove = agp_nvidia_remove, -#ifdef CONFIG_PM - .suspend = agp_nvidia_suspend, - .resume = agp_nvidia_resume, -#endif + .driver.pm = &agp_nvidia_pm_ops, }; static int __init agp_nvidia_init(void) diff --git a/drivers/char/agp/sis-agp.c b/drivers/char/agp/sis-agp.c index f8a02f4bef1bf3b97c700136f0c8a58e927cdd5d..484bb101c53b524f8540186251c43edda009ec77 100644 --- a/drivers/char/agp/sis-agp.c +++ b/drivers/char/agp/sis-agp.c @@ -217,10 +217,7 @@ static void agp_sis_remove(struct pci_dev *pdev) agp_put_bridge(bridge); } -#define agp_sis_suspend NULL - -static int __maybe_unused agp_sis_resume( - __attribute__((unused)) struct device *dev) +static int agp_sis_resume(__attribute__((unused)) struct device *dev) { return sis_driver.configure(); } @@ -407,7 +404,7 @@ static const struct pci_device_id agp_sis_pci_table[] = { MODULE_DEVICE_TABLE(pci, agp_sis_pci_table); -static SIMPLE_DEV_PM_OPS(agp_sis_pm_ops, agp_sis_suspend, agp_sis_resume); +static DEFINE_SIMPLE_DEV_PM_OPS(agp_sis_pm_ops, NULL, agp_sis_resume); static struct pci_driver agp_sis_pci_driver = { .name = "agpgart-sis", diff --git a/drivers/char/agp/via-agp.c b/drivers/char/agp/via-agp.c index b2f484f527fb786bd54908849f141f1d9ea9b53f..bc5140af2dcbe752532e1d8360d279308a18c069 100644 --- a/drivers/char/agp/via-agp.c +++ b/drivers/char/agp/via-agp.c @@ -489,9 +489,7 @@ static void agp_via_remove(struct pci_dev *pdev) agp_put_bridge(bridge); } -#define agp_via_suspend NULL - -static int __maybe_unused agp_via_resume(struct device *dev) +static int agp_via_resume(struct device *dev) { struct agp_bridge_data *bridge = dev_get_drvdata(dev); @@ -551,7 +549,7 @@ static const struct pci_device_id agp_via_pci_table[] = { MODULE_DEVICE_TABLE(pci, agp_via_pci_table); -static SIMPLE_DEV_PM_OPS(agp_via_pm_ops, agp_via_suspend, agp_via_resume); +static DEFINE_SIMPLE_DEV_PM_OPS(agp_via_pm_ops, NULL, agp_via_resume); static struct pci_driver agp_via_pci_driver = { .name = "agpgart-via", diff --git a/drivers/char/hw_random/amd-rng.c b/drivers/char/hw_random/amd-rng.c index c22d4184bb612a61c83762e0b0feaf21f118fe15..0555e3838bce1fbead6536a0f8a841aecbec5207 100644 --- a/drivers/char/hw_random/amd-rng.c +++ b/drivers/char/hw_random/amd-rng.c @@ -143,15 +143,19 @@ static int __init amd_rng_mod_init(void) found: err = pci_read_config_dword(pdev, 0x58, &pmbase); if (err) - return err; + goto put_dev; pmbase &= 0x0000FF00; - if (pmbase == 0) - return -EIO; + if (pmbase == 0) { + err = -EIO; + goto put_dev; + } priv = kzalloc(sizeof(*priv), GFP_KERNEL); - if (!priv) - return -ENOMEM; + if (!priv) { + err = -ENOMEM; + goto put_dev; + } if (!request_region(pmbase + PMBASE_OFFSET, PMBASE_SIZE, DRV_NAME)) { dev_err(&pdev->dev, DRV_NAME " region 0x%x already in use!\n", @@ -185,6 +189,8 @@ err_iomap: release_region(pmbase + PMBASE_OFFSET, PMBASE_SIZE); out: kfree(priv); +put_dev: + pci_dev_put(pdev); return err; } @@ -200,6 +206,8 @@ static void __exit amd_rng_mod_exit(void) release_region(priv->pmbase + PMBASE_OFFSET, PMBASE_SIZE); + pci_dev_put(priv->pcidev); + kfree(priv); } diff --git a/drivers/char/hw_random/cavium-rng-vf.c b/drivers/char/hw_random/cavium-rng-vf.c index 7c55f4cf4a8ba69b27185fc27432c363e6d8acc9..c99c54cd99c67675bd6b679dbd57986feaeb40a7 100644 --- a/drivers/char/hw_random/cavium-rng-vf.c +++ b/drivers/char/hw_random/cavium-rng-vf.c @@ -225,7 +225,6 @@ static int cavium_rng_probe_vf(struct pci_dev *pdev, return -ENOMEM; rng->ops.read = cavium_rng_read; - rng->ops.quality = 1000; pci_set_drvdata(pdev, rng); diff --git a/drivers/char/hw_random/cn10k-rng.c b/drivers/char/hw_random/cn10k-rng.c index a01e9307737c5e11eea9ccd9dd5c10fe724ac04b..c1193f85982c36cae1cd1bf57db09f9ceb99b9a6 100644 --- a/drivers/char/hw_random/cn10k-rng.c +++ b/drivers/char/hw_random/cn10k-rng.c @@ -145,7 +145,6 @@ static int cn10k_rng_probe(struct pci_dev *pdev, const struct pci_device_id *id) return -ENOMEM; rng->ops.read = cn10k_rng_read; - rng->ops.quality = 1000; rng->ops.priv = (unsigned long)rng; reset_rng_health_state(rng); diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c index 63a0a8e4505d2b232714b92af6c71d3d23fc9cea..f34d356fe2c06104c8a9cde73f0b0aa88a4d6671 100644 --- a/drivers/char/hw_random/core.c +++ b/drivers/char/hw_random/core.c @@ -41,14 +41,14 @@ static DEFINE_MUTEX(reading_mutex); static int data_avail; static u8 *rng_buffer, *rng_fillbuf; static unsigned short current_quality; -static unsigned short default_quality; /* = 0; default to "off" */ +static unsigned short default_quality = 1024; /* default to maximum */ module_param(current_quality, ushort, 0644); MODULE_PARM_DESC(current_quality, "current hwrng entropy estimation per 1024 bits of input -- obsolete, use rng_quality instead"); module_param(default_quality, ushort, 0644); MODULE_PARM_DESC(default_quality, - "default entropy content of hwrng per 1024 bits of input"); + "default maximum entropy content of hwrng per 1024 bits of input"); static void drop_current_rng(void); static int hwrng_init(struct hwrng *rng); @@ -172,10 +172,7 @@ static int hwrng_init(struct hwrng *rng) reinit_completion(&rng->cleanup_done); skip_init: - if (!rng->quality) - rng->quality = default_quality; - if (rng->quality > 1024) - rng->quality = 1024; + rng->quality = min_t(u16, min_t(u16, default_quality, 1024), rng->quality ?: 1024); current_quality = rng->quality; /* obsolete */ return 0; diff --git a/drivers/char/hw_random/geode-rng.c b/drivers/char/hw_random/geode-rng.c index 138ce434f86b20b7552ec962cb8267115b97c059..12fbe809183190209c371a3983b1270e2f3871f1 100644 --- a/drivers/char/hw_random/geode-rng.c +++ b/drivers/char/hw_random/geode-rng.c @@ -51,6 +51,10 @@ static const struct pci_device_id pci_tbl[] = { }; MODULE_DEVICE_TABLE(pci, pci_tbl); +struct amd_geode_priv { + struct pci_dev *pcidev; + void __iomem *membase; +}; static int geode_rng_data_read(struct hwrng *rng, u32 *data) { @@ -90,6 +94,7 @@ static int __init geode_rng_init(void) const struct pci_device_id *ent; void __iomem *mem; unsigned long rng_base; + struct amd_geode_priv *priv; for_each_pci_dev(pdev) { ent = pci_match_id(pci_tbl, pdev); @@ -97,17 +102,26 @@ static int __init geode_rng_init(void) goto found; } /* Device not found. */ - goto out; + return err; found: + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) { + err = -ENOMEM; + goto put_dev; + } + rng_base = pci_resource_start(pdev, 0); if (rng_base == 0) - goto out; + goto free_priv; err = -ENOMEM; mem = ioremap(rng_base, 0x58); if (!mem) - goto out; - geode_rng.priv = (unsigned long)mem; + goto free_priv; + + geode_rng.priv = (unsigned long)priv; + priv->membase = mem; + priv->pcidev = pdev; pr_info("AMD Geode RNG detected\n"); err = hwrng_register(&geode_rng); @@ -116,20 +130,26 @@ found: err); goto err_unmap; } -out: return err; err_unmap: iounmap(mem); - goto out; +free_priv: + kfree(priv); +put_dev: + pci_dev_put(pdev); + return err; } static void __exit geode_rng_exit(void) { - void __iomem *mem = (void __iomem *)geode_rng.priv; + struct amd_geode_priv *priv; + priv = (struct amd_geode_priv *)geode_rng.priv; hwrng_unregister(&geode_rng); - iounmap(mem); + iounmap(priv->membase); + pci_dev_put(priv->pcidev); + kfree(priv); } module_init(geode_rng_init); diff --git a/drivers/char/hw_random/mpfs-rng.c b/drivers/char/hw_random/mpfs-rng.c index 5813da617a485e9afb5b4840a691b34e23b4bd16..c6972734ae62e8e31b734b10beaa6ceed13af326 100644 --- a/drivers/char/hw_random/mpfs-rng.c +++ b/drivers/char/hw_random/mpfs-rng.c @@ -78,7 +78,6 @@ static int mpfs_rng_probe(struct platform_device *pdev) rng_priv->rng.read = mpfs_rng_read; rng_priv->rng.name = pdev->name; - rng_priv->rng.quality = 1024; platform_set_drvdata(pdev, rng_priv); diff --git a/drivers/char/hw_random/mtk-rng.c b/drivers/char/hw_random/mtk-rng.c index 6c00ea0085553a60c246837f626e3a69c5ae01cc..aa993753ab120bcfaff7b14fbdd36524e7a6fc0d 100644 --- a/drivers/char/hw_random/mtk-rng.c +++ b/drivers/char/hw_random/mtk-rng.c @@ -22,7 +22,7 @@ #define RNG_AUTOSUSPEND_TIMEOUT 100 #define USEC_POLL 2 -#define TIMEOUT_POLL 20 +#define TIMEOUT_POLL 60 #define RNG_CTRL 0x00 #define RNG_EN BIT(0) @@ -77,7 +77,7 @@ static bool mtk_rng_wait_ready(struct hwrng *rng, bool wait) readl_poll_timeout_atomic(priv->base + RNG_CTRL, ready, ready & RNG_READY, USEC_POLL, TIMEOUT_POLL); - return !!ready; + return !!(ready & RNG_READY); } static int mtk_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait) @@ -179,6 +179,7 @@ static const struct dev_pm_ops mtk_rng_pm_ops = { #endif /* CONFIG_PM */ static const struct of_device_id mtk_rng_match[] = { + { .compatible = "mediatek,mt7986-rng" }, { .compatible = "mediatek,mt7623-rng" }, {}, }; diff --git a/drivers/char/hw_random/npcm-rng.c b/drivers/char/hw_random/npcm-rng.c index 1ec5f267a6566d10ec536cecd367e671ae5a78c0..9903d0357e06ea425e915b0b66a7e2820dd3a1f2 100644 --- a/drivers/char/hw_random/npcm-rng.c +++ b/drivers/char/hw_random/npcm-rng.c @@ -13,11 +13,13 @@ #include #include #include +#include #define NPCM_RNGCS_REG 0x00 /* Control and status register */ #define NPCM_RNGD_REG 0x04 /* Data register */ #define NPCM_RNGMODE_REG 0x08 /* Mode register */ +#define NPCM_RNG_CLK_SET_62_5MHZ BIT(2) /* 60-80 MHz */ #define NPCM_RNG_CLK_SET_25MHZ GENMASK(4, 3) /* 20-25 MHz */ #define NPCM_RNG_DATA_VALID BIT(1) #define NPCM_RNG_ENABLE BIT(0) @@ -31,14 +33,14 @@ struct npcm_rng { void __iomem *base; struct hwrng rng; + u32 clkp; }; static int npcm_rng_init(struct hwrng *rng) { struct npcm_rng *priv = to_npcm_rng(rng); - writel(NPCM_RNG_CLK_SET_25MHZ | NPCM_RNG_ENABLE, - priv->base + NPCM_RNGCS_REG); + writel(priv->clkp | NPCM_RNG_ENABLE, priv->base + NPCM_RNGCS_REG); return 0; } @@ -47,7 +49,7 @@ static void npcm_rng_cleanup(struct hwrng *rng) { struct npcm_rng *priv = to_npcm_rng(rng); - writel(NPCM_RNG_CLK_SET_25MHZ, priv->base + NPCM_RNGCS_REG); + writel(priv->clkp, priv->base + NPCM_RNGCS_REG); } static int npcm_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait) @@ -109,7 +111,7 @@ static int npcm_rng_probe(struct platform_device *pdev) priv->rng.name = pdev->name; priv->rng.read = npcm_rng_read; priv->rng.priv = (unsigned long)&pdev->dev; - priv->rng.quality = 1000; + priv->clkp = (u32)(uintptr_t)of_device_get_match_data(&pdev->dev); writel(NPCM_RNG_M1ROSEL, priv->base + NPCM_RNGMODE_REG); @@ -162,7 +164,10 @@ static const struct dev_pm_ops npcm_rng_pm_ops = { }; static const struct of_device_id rng_dt_id[] __maybe_unused = { - { .compatible = "nuvoton,npcm750-rng", }, + { .compatible = "nuvoton,npcm750-rng", + .data = (void *)NPCM_RNG_CLK_SET_25MHZ }, + { .compatible = "nuvoton,npcm845-rng", + .data = (void *)NPCM_RNG_CLK_SET_62_5MHZ }, {}, }; MODULE_DEVICE_TABLE(of, rng_dt_id); diff --git a/drivers/char/hw_random/powernv-rng.c b/drivers/char/hw_random/powernv-rng.c index 429e956f34e1c2b225e0e781a4434ab8ceb8d888..47b88de029f2deec8c84be3d6c5af3aa413f8dee 100644 --- a/drivers/char/hw_random/powernv-rng.c +++ b/drivers/char/hw_random/powernv-rng.c @@ -11,6 +11,7 @@ #include #include #include +#include static int powernv_rng_read(struct hwrng *rng, void *data, size_t max, bool wait) { diff --git a/drivers/char/hw_random/s390-trng.c b/drivers/char/hw_random/s390-trng.c index 795853dfc46b70a0f38b4a803780971b8900fb6a..d27e32e9bfee11d528248d0f1360c5154a369dca 100644 --- a/drivers/char/hw_random/s390-trng.c +++ b/drivers/char/hw_random/s390-trng.c @@ -23,6 +23,7 @@ #include #include #include +#include MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("IBM Corporation"); @@ -191,7 +192,6 @@ static struct hwrng trng_hwrng_dev = { .name = "s390-trng", .data_read = trng_hwrng_data_read, .read = trng_hwrng_read, - .quality = 1024, }; diff --git a/drivers/char/hw_random/stm32-rng.c b/drivers/char/hw_random/stm32-rng.c index bc22178f83e83ac80cd50677a63da2ef07995136..a6731cf0627ac06c7202ce1d2bca604cfae1ca07 100644 --- a/drivers/char/hw_random/stm32-rng.c +++ b/drivers/char/hw_random/stm32-rng.c @@ -44,16 +44,18 @@ static int stm32_rng_read(struct hwrng *rng, void *data, size_t max, bool wait) pm_runtime_get_sync((struct device *) priv->rng.priv); - while (max > sizeof(u32)) { + while (max >= sizeof(u32)) { sr = readl_relaxed(priv->base + RNG_SR); /* Manage timeout which is based on timer and take */ /* care of initial delay time when enabling rng */ if (!sr && wait) { - retval = readl_relaxed_poll_timeout_atomic(priv->base + int err; + + err = readl_relaxed_poll_timeout_atomic(priv->base + RNG_SR, sr, sr, 10, 50000); - if (retval) + if (err) dev_err((struct device *)priv->rng.priv, "%s: timeout %x!\n", __func__, sr); } diff --git a/drivers/char/hw_random/timeriomem-rng.c b/drivers/char/hw_random/timeriomem-rng.c index 8ea1fc831eb7bac773848fc39ea9f63742217ba3..26f322d19a883f9672473c2c86aa733432ca8113 100644 --- a/drivers/char/hw_random/timeriomem-rng.c +++ b/drivers/char/hw_random/timeriomem-rng.c @@ -145,8 +145,6 @@ static int timeriomem_rng_probe(struct platform_device *pdev) if (!of_property_read_u32(pdev->dev.of_node, "quality", &i)) priv->rng_ops.quality = i; - else - priv->rng_ops.quality = 0; } else { period = pdata->period; priv->rng_ops.quality = pdata->quality; diff --git a/drivers/char/hw_random/virtio-rng.c b/drivers/char/hw_random/virtio-rng.c index a6f3a8a2aca6d1c9091a21da56d3efea38ba74e9..f7690e0f92ede2f1ed29afc7de5943443c4b0988 100644 --- a/drivers/char/hw_random/virtio-rng.c +++ b/drivers/char/hw_random/virtio-rng.c @@ -148,7 +148,6 @@ static int probe_common(struct virtio_device *vdev) .cleanup = virtio_cleanup, .priv = (unsigned long)vi, .name = vi->name, - .quality = 1000, }; vdev->priv = vi; diff --git a/drivers/char/mem.c b/drivers/char/mem.c index 5611d127363e470ec8185b42dd025196b4e076b8..83bf2a4dcb57ed848c9b6b61f56b57a95925f94a 100644 --- a/drivers/char/mem.c +++ b/drivers/char/mem.c @@ -746,7 +746,7 @@ static const struct file_operations memory_fops = { .llseek = noop_llseek, }; -static char *mem_devnode(struct device *dev, umode_t *mode) +static char *mem_devnode(const struct device *dev, umode_t *mode) { if (mode && devlist[MINOR(dev->devt)].mode) *mode = devlist[MINOR(dev->devt)].mode; diff --git a/drivers/char/misc.c b/drivers/char/misc.c index cba19bfdc44ddb5c082e681d7526dce723b43ac6..7a1388b0572b9941e0be4c56c3170e1ca527c71c 100644 --- a/drivers/char/misc.c +++ b/drivers/char/misc.c @@ -61,7 +61,29 @@ static DEFINE_MUTEX(misc_mtx); * Assigned numbers, used for dynamic minors */ #define DYNAMIC_MINORS 128 /* like dynamic majors */ -static DECLARE_BITMAP(misc_minors, DYNAMIC_MINORS); +static DEFINE_IDA(misc_minors_ida); + +static int misc_minor_alloc(void) +{ + int ret; + + ret = ida_alloc_max(&misc_minors_ida, DYNAMIC_MINORS - 1, GFP_KERNEL); + if (ret >= 0) { + ret = DYNAMIC_MINORS - ret - 1; + } else { + ret = ida_alloc_range(&misc_minors_ida, MISC_DYNAMIC_MINOR + 1, + MINORMASK, GFP_KERNEL); + } + return ret; +} + +static void misc_minor_free(int minor) +{ + if (minor < DYNAMIC_MINORS) + ida_free(&misc_minors_ida, DYNAMIC_MINORS - minor - 1); + else if (minor > MISC_DYNAMIC_MINOR) + ida_free(&misc_minors_ida, minor); +} #ifdef CONFIG_PROC_FS static void *misc_seq_start(struct seq_file *seq, loff_t *pos) @@ -183,14 +205,13 @@ int misc_register(struct miscdevice *misc) mutex_lock(&misc_mtx); if (is_dynamic) { - int i = find_first_zero_bit(misc_minors, DYNAMIC_MINORS); + int i = misc_minor_alloc(); - if (i >= DYNAMIC_MINORS) { + if (i < 0) { err = -EBUSY; goto out; } - misc->minor = DYNAMIC_MINORS - i - 1; - set_bit(i, misc_minors); + misc->minor = i; } else { struct miscdevice *c; @@ -209,10 +230,7 @@ int misc_register(struct miscdevice *misc) misc, misc->groups, "%s", misc->name); if (IS_ERR(misc->this_device)) { if (is_dynamic) { - int i = DYNAMIC_MINORS - misc->minor - 1; - - if (i < DYNAMIC_MINORS && i >= 0) - clear_bit(i, misc_minors); + misc_minor_free(misc->minor); misc->minor = MISC_DYNAMIC_MINOR; } err = PTR_ERR(misc->this_device); @@ -240,23 +258,20 @@ EXPORT_SYMBOL(misc_register); void misc_deregister(struct miscdevice *misc) { - int i = DYNAMIC_MINORS - misc->minor - 1; - if (WARN_ON(list_empty(&misc->list))) return; mutex_lock(&misc_mtx); list_del(&misc->list); device_destroy(misc_class, MKDEV(MISC_MAJOR, misc->minor)); - if (i < DYNAMIC_MINORS && i >= 0) - clear_bit(i, misc_minors); + misc_minor_free(misc->minor); mutex_unlock(&misc_mtx); } EXPORT_SYMBOL(misc_deregister); -static char *misc_devnode(struct device *dev, umode_t *mode) +static char *misc_devnode(const struct device *dev, umode_t *mode) { - struct miscdevice *c = dev_get_drvdata(dev); + const struct miscdevice *c = dev_get_drvdata(dev); if (mode && c->mode) *mode = c->mode; diff --git a/drivers/char/random.c b/drivers/char/random.c index 5885ed574c6aae34cad8d428909d82b803fe15c8..ce3ccd172cc868d9f89ea2c5489a600ea08e2a0d 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -56,6 +56,7 @@ #include #include #include +#include #include #include #include diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c index 9fa3c76a267f5047bbe0d9f4fd68c23242118f03..6a821118d55300413484ca518e43bc840538ed46 100644 --- a/drivers/char/virtio_console.c +++ b/drivers/char/virtio_console.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include #include @@ -48,22 +49,11 @@ struct ports_driver_data { /* List of all the devices we're handling */ struct list_head portdevs; - /* - * This is used to keep track of the number of hvc consoles - * spawned by this driver. This number is given as the first - * argument to hvc_alloc(). To correctly map an initial - * console spawned via hvc_instantiate to the console being - * hooked up via hvc_alloc, we need to pass the same vtermno. - * - * We also just assume the first console being initialised was - * the first one that got used as the initial console. - */ - unsigned int next_vtermno; - /* All the console devices handled by this driver */ struct list_head consoles; }; -static struct ports_driver_data pdrvdata = { .next_vtermno = 1}; + +static struct ports_driver_data pdrvdata; static DEFINE_SPINLOCK(pdrvdata_lock); static DECLARE_COMPLETION(early_console_added); @@ -89,6 +79,8 @@ struct console { u32 vtermno; }; +static DEFINE_IDA(vtermno_ida); + struct port_buffer { char *buf; @@ -1244,18 +1236,21 @@ static int init_port_console(struct port *port) * pointers. The final argument is the output buffer size: we * can do any size, so we put PAGE_SIZE here. */ - port->cons.vtermno = pdrvdata.next_vtermno; + ret = ida_alloc_min(&vtermno_ida, 1, GFP_KERNEL); + if (ret < 0) + return ret; + port->cons.vtermno = ret; port->cons.hvc = hvc_alloc(port->cons.vtermno, 0, &hv_ops, PAGE_SIZE); if (IS_ERR(port->cons.hvc)) { ret = PTR_ERR(port->cons.hvc); dev_err(port->dev, "error %d allocating hvc for port\n", ret); port->cons.hvc = NULL; + ida_free(&vtermno_ida, port->cons.vtermno); return ret; } spin_lock_irq(&pdrvdata_lock); - pdrvdata.next_vtermno++; list_add_tail(&port->cons.list, &pdrvdata.consoles); spin_unlock_irq(&pdrvdata_lock); port->guest_connected = true; @@ -1532,6 +1527,7 @@ static void unplug_port(struct port *port) list_del(&port->cons.list); spin_unlock_irq(&pdrvdata_lock); hvc_remove(port->cons.hvc); + ida_free(&vtermno_ida, port->cons.vtermno); } remove_port_data(port); diff --git a/drivers/char/xillybus/xillybus_class.c b/drivers/char/xillybus/xillybus_class.c index 0f238648dcfe2ddc7147cf91d7afc49c743e742c..e9a288e61c15661e76ddf1b8b95dbaa5e47a62d3 100644 --- a/drivers/char/xillybus/xillybus_class.c +++ b/drivers/char/xillybus/xillybus_class.c @@ -227,14 +227,15 @@ int xillybus_find_inode(struct inode *inode, break; } - mutex_unlock(&unit_mutex); - - if (!unit) + if (!unit) { + mutex_unlock(&unit_mutex); return -ENODEV; + } *private_data = unit->private_data; *index = minor - unit->lowest_minor; + mutex_unlock(&unit_mutex); return 0; } EXPORT_SYMBOL(xillybus_find_inode); diff --git a/drivers/char/xillybus/xillyusb.c b/drivers/char/xillybus/xillyusb.c index 39bcbfd908b462a9271cff2d200a87e8e1ee3f9a..5a5afa14ca8cb8267e27a26d226fb7d1c3515c11 100644 --- a/drivers/char/xillybus/xillyusb.c +++ b/drivers/char/xillybus/xillyusb.c @@ -184,6 +184,14 @@ struct xillyusb_dev { struct mutex process_in_mutex; /* synchronize wakeup_all() */ }; +/* + * kref_mutex is used in xillyusb_open() to prevent the xillyusb_dev + * struct from being freed during the gap between being found by + * xillybus_find_inode() and having its reference count incremented. + */ + +static DEFINE_MUTEX(kref_mutex); + /* FPGA to host opcodes */ enum { OPCODE_DATA = 0, @@ -1237,9 +1245,16 @@ static int xillyusb_open(struct inode *inode, struct file *filp) int rc; int index; + mutex_lock(&kref_mutex); + rc = xillybus_find_inode(inode, (void **)&xdev, &index); - if (rc) + if (rc) { + mutex_unlock(&kref_mutex); return rc; + } + + kref_get(&xdev->kref); + mutex_unlock(&kref_mutex); chan = &xdev->channels[index]; filp->private_data = chan; @@ -1275,8 +1290,6 @@ static int xillyusb_open(struct inode *inode, struct file *filp) ((filp->f_mode & FMODE_WRITE) && chan->open_for_write)) goto unmutex_fail; - kref_get(&xdev->kref); - if (filp->f_mode & FMODE_READ) chan->open_for_read = 1; @@ -1413,6 +1426,7 @@ unopen: return rc; unmutex_fail: + kref_put(&xdev->kref, cleanup_dev); mutex_unlock(&chan->lock); return rc; } @@ -2227,7 +2241,9 @@ static void xillyusb_disconnect(struct usb_interface *interface) xdev->dev = NULL; + mutex_lock(&kref_mutex); kref_put(&xdev->kref, cleanup_dev); + mutex_unlock(&kref_mutex); } static struct usb_driver xillyusb_driver = { diff --git a/drivers/counter/stm32-lptimer-cnt.c b/drivers/counter/stm32-lptimer-cnt.c index d6b80b6dfc28796a1a710c93f97689defef65b4d..8439755559b2195d82196b0c98b32d32320c89a3 100644 --- a/drivers/counter/stm32-lptimer-cnt.c +++ b/drivers/counter/stm32-lptimer-cnt.c @@ -69,7 +69,7 @@ static int stm32_lptim_set_enable_state(struct stm32_lptim_cnt *priv, /* ensure CMP & ARR registers are properly written */ ret = regmap_read_poll_timeout(priv->regmap, STM32_LPTIM_ISR, val, - (val & STM32_LPTIM_CMPOK_ARROK), + (val & STM32_LPTIM_CMPOK_ARROK) == STM32_LPTIM_CMPOK_ARROK, 100, 1000); if (ret) return ret; diff --git a/drivers/cpuidle/cpuidle-powernv.c b/drivers/cpuidle/cpuidle-powernv.c index 0b5461b3d7dd4236b59d78ec801cf91da2b3e6e0..9ebedd972df0bb8ebd139fde57597f47e6055da6 100644 --- a/drivers/cpuidle/cpuidle-powernv.c +++ b/drivers/cpuidle/cpuidle-powernv.c @@ -76,6 +76,7 @@ static int snooze_loop(struct cpuidle_device *dev, local_irq_enable(); snooze_exit_time = get_tb() + get_snooze_timeout(dev, drv, index); + dev->poll_time_limit = false; ppc64_runlatch_off(); HMT_very_low(); while (!need_resched()) { @@ -86,6 +87,7 @@ static int snooze_loop(struct cpuidle_device *dev, * cleared to order subsequent test of need_resched(). */ clear_thread_flag(TIF_POLLING_NRFLAG); + dev->poll_time_limit = true; smp_mb(); break; } @@ -155,7 +157,8 @@ static struct cpuidle_state powernv_states[CPUIDLE_STATE_MAX] = { .desc = "snooze", .exit_latency = 0, .target_residency = 0, - .enter = snooze_loop }, + .enter = snooze_loop, + .flags = CPUIDLE_FLAG_POLLING }, }; static int powernv_cpuidle_cpu_online(unsigned int cpu) diff --git a/drivers/cpuidle/cpuidle-pseries.c b/drivers/cpuidle/cpuidle-pseries.c index 7e7ab5597d7acb245681b6e694c0e38640bbdc8a..1bad4d2b7be33da3152f825e344febb0457eef74 100644 --- a/drivers/cpuidle/cpuidle-pseries.c +++ b/drivers/cpuidle/cpuidle-pseries.c @@ -44,6 +44,7 @@ static int snooze_loop(struct cpuidle_device *dev, pseries_idle_prolog(); local_irq_enable(); snooze_exit_time = get_tb() + snooze_timeout; + dev->poll_time_limit = false; while (!need_resched()) { HMT_low(); @@ -54,6 +55,7 @@ static int snooze_loop(struct cpuidle_device *dev, * loop anyway. Require a barrier after polling is * cleared to order subsequent test of need_resched(). */ + dev->poll_time_limit = true; clear_thread_flag(TIF_POLLING_NRFLAG); smp_mb(); break; @@ -268,7 +270,8 @@ static struct cpuidle_state dedicated_states[NR_DEDICATED_STATES] = { .desc = "snooze", .exit_latency = 0, .target_residency = 0, - .enter = &snooze_loop }, + .enter = &snooze_loop, + .flags = CPUIDLE_FLAG_POLLING }, { /* CEDE */ .name = "CEDE", .desc = "CEDE", @@ -286,7 +289,8 @@ static struct cpuidle_state shared_states[] = { .desc = "snooze", .exit_latency = 0, .target_residency = 0, - .enter = &snooze_loop }, + .enter = &snooze_loop, + .flags = CPUIDLE_FLAG_POLLING }, { /* Shared Cede */ .name = "Shared Cede", .desc = "Shared Cede", diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index 55e75fbb658ee1b5fd46a10fef0dde1e6dd3125d..dfb103f81a64b35f4f9e4a8034ab75169dcc18d6 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig @@ -669,7 +669,12 @@ config CRYPTO_DEV_IMGTEC_HASH config CRYPTO_DEV_ROCKCHIP tristate "Rockchip's Cryptographic Engine driver" depends on OF && ARCH_ROCKCHIP + depends on PM + select CRYPTO_ECB + select CRYPTO_CBC + select CRYPTO_DES select CRYPTO_AES + select CRYPTO_ENGINE select CRYPTO_LIB_DES select CRYPTO_MD5 select CRYPTO_SHA1 @@ -681,6 +686,16 @@ config CRYPTO_DEV_ROCKCHIP This driver interfaces with the hardware crypto accelerator. Supporting cbc/ecb chainmode, and aes/des/des3_ede cipher mode. +config CRYPTO_DEV_ROCKCHIP_DEBUG + bool "Enable Rockchip crypto stats" + depends on CRYPTO_DEV_ROCKCHIP + depends on DEBUG_FS + help + Say y to enable Rockchip crypto debug stats. + This will create /sys/kernel/debug/rk3288_crypto/stats for displaying + the number of requests per algorithm and other internal stats. + + config CRYPTO_DEV_ZYNQMP_AES tristate "Support for Xilinx ZynqMP AES hw accelerator" depends on ZYNQMP_FIRMWARE || COMPILE_TEST @@ -785,8 +800,8 @@ config CRYPTO_DEV_CCREE select CRYPTO_ECB select CRYPTO_CTR select CRYPTO_XTS - select CRYPTO_SM4 - select CRYPTO_SM3 + select CRYPTO_SM4_GENERIC + select CRYPTO_SM3_GENERIC help Say 'Y' to enable a driver for the REE interface of the Arm TrustZone CryptoCell family of processors. Currently the diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile index 116de173a66c595cfb536af3c83a3f8b492146da..fa8bf1be1a8cded5ed69fd66630f9af25abf1fe6 100644 --- a/drivers/crypto/Makefile +++ b/drivers/crypto/Makefile @@ -41,7 +41,7 @@ obj-$(CONFIG_CRYPTO_DEV_S5P) += s5p-sss.o obj-$(CONFIG_CRYPTO_DEV_SA2UL) += sa2ul.o obj-$(CONFIG_CRYPTO_DEV_SAHARA) += sahara.o obj-$(CONFIG_CRYPTO_DEV_SL3516) += gemini/ -obj-$(CONFIG_ARCH_STM32) += stm32/ +obj-y += stm32/ obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o obj-$(CONFIG_CRYPTO_DEV_UX500) += ux500/ obj-$(CONFIG_CRYPTO_DEV_VIRTIO) += virtio/ diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-trng.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-trng.c index c4b0a8b588429b7f62fdfb37d2710c75ace7d9e6..e2b9b9104694172fa264e275eae21de8b69b0ee3 100644 --- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-trng.c +++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-trng.c @@ -108,7 +108,6 @@ int sun8i_ce_hwrng_register(struct sun8i_ce_dev *ce) } ce->trng.name = "sun8i Crypto Engine TRNG"; ce->trng.read = sun8i_ce_trng_read; - ce->trng.quality = 1000; ret = hwrng_register(&ce->trng); if (ret) diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c index 910d6751644cf673ea218cb346d2cf6e7ba6ac8a..902f6be057ec6cd7db03b433d2301dae542aa2ab 100644 --- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c +++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c @@ -124,7 +124,7 @@ static int sun8i_ss_setup_ivs(struct skcipher_request *areq) unsigned int ivsize = crypto_skcipher_ivsize(tfm); struct sun8i_ss_flow *sf = &ss->flows[rctx->flow]; int i = 0; - u32 a; + dma_addr_t a; int err; rctx->ivlen = ivsize; diff --git a/drivers/crypto/amlogic/amlogic-gxl-core.c b/drivers/crypto/amlogic/amlogic-gxl-core.c index 6e7ae896717cd2a21600f4282fc27145c3e18da3..937187027ad574acd8eeaf62f34680e587b60e02 100644 --- a/drivers/crypto/amlogic/amlogic-gxl-core.c +++ b/drivers/crypto/amlogic/amlogic-gxl-core.c @@ -237,7 +237,6 @@ static int meson_crypto_probe(struct platform_device *pdev) return err; } - mc->irqs = devm_kcalloc(mc->dev, MAXFLOW, sizeof(int), GFP_KERNEL); for (i = 0; i < MAXFLOW; i++) { mc->irqs[i] = platform_get_irq(pdev, i); if (mc->irqs[i] < 0) diff --git a/drivers/crypto/amlogic/amlogic-gxl.h b/drivers/crypto/amlogic/amlogic-gxl.h index dc0f142324a3c042c5076fc438c900b1221dbac8..8c0746a1d6d43c30e34bb2718e6eb0e938247d86 100644 --- a/drivers/crypto/amlogic/amlogic-gxl.h +++ b/drivers/crypto/amlogic/amlogic-gxl.h @@ -95,7 +95,7 @@ struct meson_dev { struct device *dev; struct meson_flow *chanlist; atomic_t flow; - int *irqs; + int irqs[MAXFLOW]; #ifdef CONFIG_CRYPTO_DEV_AMLOGIC_GXL_DEBUG struct dentry *dbgfs_dir; #endif diff --git a/drivers/crypto/atmel-ecc.c b/drivers/crypto/atmel-ecc.c index 82bf15d4956144710d95124602d0a574d396b9f3..53100fb9b07bdb9fe360916f93d76c46c46cb4cc 100644 --- a/drivers/crypto/atmel-ecc.c +++ b/drivers/crypto/atmel-ecc.c @@ -311,9 +311,9 @@ static struct kpp_alg atmel_ecdh_nist_p256 = { }, }; -static int atmel_ecc_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int atmel_ecc_probe(struct i2c_client *client) { + const struct i2c_device_id *id = i2c_client_get_device_id(client); struct atmel_i2c_client_priv *i2c_priv; int ret; @@ -390,7 +390,7 @@ static struct i2c_driver atmel_ecc_driver = { .name = "atmel-ecc", .of_match_table = of_match_ptr(atmel_ecc_dt_ids), }, - .probe = atmel_ecc_probe, + .probe_new = atmel_ecc_probe, .remove = atmel_ecc_remove, .id_table = atmel_ecc_id, }; diff --git a/drivers/crypto/atmel-sha204a.c b/drivers/crypto/atmel-sha204a.c index a84b657598c6e854053c83141634484af0e1220a..272a06f0b5886f8bb19784d0ef11c5a7a86b6252 100644 --- a/drivers/crypto/atmel-sha204a.c +++ b/drivers/crypto/atmel-sha204a.c @@ -91,9 +91,9 @@ static int atmel_sha204a_rng_read(struct hwrng *rng, void *data, size_t max, return max; } -static int atmel_sha204a_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int atmel_sha204a_probe(struct i2c_client *client) { + const struct i2c_device_id *id = i2c_client_get_device_id(client); struct atmel_i2c_client_priv *i2c_priv; int ret; @@ -107,7 +107,6 @@ static int atmel_sha204a_probe(struct i2c_client *client, i2c_priv->hwrng.name = dev_name(&client->dev); i2c_priv->hwrng.read = atmel_sha204a_rng_read; - i2c_priv->hwrng.quality = 1024; ret = devm_hwrng_register(&client->dev, &i2c_priv->hwrng); if (ret) @@ -143,7 +142,7 @@ static const struct i2c_device_id atmel_sha204a_id[] = { MODULE_DEVICE_TABLE(i2c, atmel_sha204a_id); static struct i2c_driver atmel_sha204a_driver = { - .probe = atmel_sha204a_probe, + .probe_new = atmel_sha204a_probe, .remove = atmel_sha204a_remove, .id_table = atmel_sha204a_id, diff --git a/drivers/crypto/caam/blob_gen.c b/drivers/crypto/caam/blob_gen.c index 6345c7269eb030393dc45459786d21bb948e176f..1f65df48984783a992d8e6d77dcdd7e6d803ff0d 100644 --- a/drivers/crypto/caam/blob_gen.c +++ b/drivers/crypto/caam/blob_gen.c @@ -6,6 +6,7 @@ #define pr_fmt(fmt) "caam blob_gen: " fmt +#include #include #include @@ -61,12 +62,14 @@ static void caam_blob_job_done(struct device *dev, u32 *desc, u32 err, void *con int caam_process_blob(struct caam_blob_priv *priv, struct caam_blob_info *info, bool encap) { + const struct caam_drv_private *ctrlpriv; struct caam_blob_job_result testres; struct device *jrdev = &priv->jrdev; dma_addr_t dma_in, dma_out; int op = OP_PCLID_BLOB; size_t output_len; u32 *desc; + u32 moo; int ret; if (info->key_mod_len > CAAM_BLOB_KEYMOD_LENGTH) @@ -100,6 +103,12 @@ int caam_process_blob(struct caam_blob_priv *priv, goto out_unmap_in; } + ctrlpriv = dev_get_drvdata(jrdev->parent); + moo = FIELD_GET(CSTA_MOO, ioread32(&ctrlpriv->ctrl->perfmon.status)); + if (moo != CSTA_MOO_SECURE && moo != CSTA_MOO_TRUSTED) + dev_warn(jrdev, + "using insecure test key, enable HAB to use unique device key!\n"); + /* * A data blob is encrypted using a blob key (BK); a random number. * The BK is used as an AES-CCM key. The initial block (B0) and the diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c index d3d8bb0a6990022f874820c6bbbe73264df277a5..ecc15bc521db1c63702cae077f183e6b9bdbc970 100644 --- a/drivers/crypto/caam/caamalg.c +++ b/drivers/crypto/caam/caamalg.c @@ -131,7 +131,7 @@ struct caam_aead_req_ctx { static int aead_null_set_sh_desc(struct crypto_aead *aead) { - struct caam_ctx *ctx = crypto_aead_ctx(aead); + struct caam_ctx *ctx = crypto_aead_ctx_dma(aead); struct device *jrdev = ctx->jrdev; struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent); u32 *desc; @@ -184,7 +184,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead) struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead), struct caam_aead_alg, aead); unsigned int ivsize = crypto_aead_ivsize(aead); - struct caam_ctx *ctx = crypto_aead_ctx(aead); + struct caam_ctx *ctx = crypto_aead_ctx_dma(aead); struct device *jrdev = ctx->jrdev; struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent); u32 ctx1_iv_off = 0; @@ -312,7 +312,7 @@ skip_givenc: static int aead_setauthsize(struct crypto_aead *authenc, unsigned int authsize) { - struct caam_ctx *ctx = crypto_aead_ctx(authenc); + struct caam_ctx *ctx = crypto_aead_ctx_dma(authenc); ctx->authsize = authsize; aead_set_sh_desc(authenc); @@ -322,7 +322,7 @@ static int aead_setauthsize(struct crypto_aead *authenc, static int gcm_set_sh_desc(struct crypto_aead *aead) { - struct caam_ctx *ctx = crypto_aead_ctx(aead); + struct caam_ctx *ctx = crypto_aead_ctx_dma(aead); struct device *jrdev = ctx->jrdev; unsigned int ivsize = crypto_aead_ivsize(aead); u32 *desc; @@ -372,7 +372,7 @@ static int gcm_set_sh_desc(struct crypto_aead *aead) static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize) { - struct caam_ctx *ctx = crypto_aead_ctx(authenc); + struct caam_ctx *ctx = crypto_aead_ctx_dma(authenc); int err; err = crypto_gcm_check_authsize(authsize); @@ -387,7 +387,7 @@ static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize) static int rfc4106_set_sh_desc(struct crypto_aead *aead) { - struct caam_ctx *ctx = crypto_aead_ctx(aead); + struct caam_ctx *ctx = crypto_aead_ctx_dma(aead); struct device *jrdev = ctx->jrdev; unsigned int ivsize = crypto_aead_ivsize(aead); u32 *desc; @@ -440,7 +440,7 @@ static int rfc4106_set_sh_desc(struct crypto_aead *aead) static int rfc4106_setauthsize(struct crypto_aead *authenc, unsigned int authsize) { - struct caam_ctx *ctx = crypto_aead_ctx(authenc); + struct caam_ctx *ctx = crypto_aead_ctx_dma(authenc); int err; err = crypto_rfc4106_check_authsize(authsize); @@ -455,7 +455,7 @@ static int rfc4106_setauthsize(struct crypto_aead *authenc, static int rfc4543_set_sh_desc(struct crypto_aead *aead) { - struct caam_ctx *ctx = crypto_aead_ctx(aead); + struct caam_ctx *ctx = crypto_aead_ctx_dma(aead); struct device *jrdev = ctx->jrdev; unsigned int ivsize = crypto_aead_ivsize(aead); u32 *desc; @@ -508,7 +508,7 @@ static int rfc4543_set_sh_desc(struct crypto_aead *aead) static int rfc4543_setauthsize(struct crypto_aead *authenc, unsigned int authsize) { - struct caam_ctx *ctx = crypto_aead_ctx(authenc); + struct caam_ctx *ctx = crypto_aead_ctx_dma(authenc); if (authsize != 16) return -EINVAL; @@ -521,7 +521,7 @@ static int rfc4543_setauthsize(struct crypto_aead *authenc, static int chachapoly_set_sh_desc(struct crypto_aead *aead) { - struct caam_ctx *ctx = crypto_aead_ctx(aead); + struct caam_ctx *ctx = crypto_aead_ctx_dma(aead); struct device *jrdev = ctx->jrdev; unsigned int ivsize = crypto_aead_ivsize(aead); u32 *desc; @@ -547,7 +547,7 @@ static int chachapoly_set_sh_desc(struct crypto_aead *aead) static int chachapoly_setauthsize(struct crypto_aead *aead, unsigned int authsize) { - struct caam_ctx *ctx = crypto_aead_ctx(aead); + struct caam_ctx *ctx = crypto_aead_ctx_dma(aead); if (authsize != POLY1305_DIGEST_SIZE) return -EINVAL; @@ -559,7 +559,7 @@ static int chachapoly_setauthsize(struct crypto_aead *aead, static int chachapoly_setkey(struct crypto_aead *aead, const u8 *key, unsigned int keylen) { - struct caam_ctx *ctx = crypto_aead_ctx(aead); + struct caam_ctx *ctx = crypto_aead_ctx_dma(aead); unsigned int ivsize = crypto_aead_ivsize(aead); unsigned int saltlen = CHACHAPOLY_IV_SIZE - ivsize; @@ -575,7 +575,7 @@ static int chachapoly_setkey(struct crypto_aead *aead, const u8 *key, static int aead_setkey(struct crypto_aead *aead, const u8 *key, unsigned int keylen) { - struct caam_ctx *ctx = crypto_aead_ctx(aead); + struct caam_ctx *ctx = crypto_aead_ctx_dma(aead); struct device *jrdev = ctx->jrdev; struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent); struct crypto_authenc_keys keys; @@ -656,7 +656,7 @@ static int des3_aead_setkey(struct crypto_aead *aead, const u8 *key, static int gcm_setkey(struct crypto_aead *aead, const u8 *key, unsigned int keylen) { - struct caam_ctx *ctx = crypto_aead_ctx(aead); + struct caam_ctx *ctx = crypto_aead_ctx_dma(aead); struct device *jrdev = ctx->jrdev; int err; @@ -677,7 +677,7 @@ static int gcm_setkey(struct crypto_aead *aead, static int rfc4106_setkey(struct crypto_aead *aead, const u8 *key, unsigned int keylen) { - struct caam_ctx *ctx = crypto_aead_ctx(aead); + struct caam_ctx *ctx = crypto_aead_ctx_dma(aead); struct device *jrdev = ctx->jrdev; int err; @@ -703,7 +703,7 @@ static int rfc4106_setkey(struct crypto_aead *aead, static int rfc4543_setkey(struct crypto_aead *aead, const u8 *key, unsigned int keylen) { - struct caam_ctx *ctx = crypto_aead_ctx(aead); + struct caam_ctx *ctx = crypto_aead_ctx_dma(aead); struct device *jrdev = ctx->jrdev; int err; @@ -729,7 +729,7 @@ static int rfc4543_setkey(struct crypto_aead *aead, static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, unsigned int keylen, const u32 ctx1_iv_off) { - struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher); + struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher); struct caam_skcipher_alg *alg = container_of(crypto_skcipher_alg(skcipher), typeof(*alg), skcipher); @@ -832,7 +832,7 @@ static int des3_skcipher_setkey(struct crypto_skcipher *skcipher, static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, unsigned int keylen) { - struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher); + struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher); struct device *jrdev = ctx->jrdev; struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent); u32 *desc; @@ -1057,7 +1057,7 @@ static void init_aead_job(struct aead_request *req, bool all_contig, bool encrypt) { struct crypto_aead *aead = crypto_aead_reqtfm(req); - struct caam_ctx *ctx = crypto_aead_ctx(aead); + struct caam_ctx *ctx = crypto_aead_ctx_dma(aead); int authsize = ctx->authsize; u32 *desc = edesc->hw_desc; u32 out_options, in_options; @@ -1118,7 +1118,7 @@ static void init_gcm_job(struct aead_request *req, bool all_contig, bool encrypt) { struct crypto_aead *aead = crypto_aead_reqtfm(req); - struct caam_ctx *ctx = crypto_aead_ctx(aead); + struct caam_ctx *ctx = crypto_aead_ctx_dma(aead); unsigned int ivsize = crypto_aead_ivsize(aead); u32 *desc = edesc->hw_desc; bool generic_gcm = (ivsize == GCM_AES_IV_SIZE); @@ -1185,7 +1185,7 @@ static void init_authenc_job(struct aead_request *req, struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead), struct caam_aead_alg, aead); unsigned int ivsize = crypto_aead_ivsize(aead); - struct caam_ctx *ctx = crypto_aead_ctx(aead); + struct caam_ctx *ctx = crypto_aead_ctx_dma(aead); struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent); const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) == OP_ALG_AAI_CTR_MOD128); @@ -1234,7 +1234,7 @@ static void init_skcipher_job(struct skcipher_request *req, const bool encrypt) { struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); - struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher); + struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher); struct device *jrdev = ctx->jrdev; int ivsize = crypto_skcipher_ivsize(skcipher); u32 *desc = edesc->hw_desc; @@ -1290,7 +1290,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, bool encrypt) { struct crypto_aead *aead = crypto_aead_reqtfm(req); - struct caam_ctx *ctx = crypto_aead_ctx(aead); + struct caam_ctx *ctx = crypto_aead_ctx_dma(aead); struct device *jrdev = ctx->jrdev; struct caam_aead_req_ctx *rctx = aead_request_ctx(req); gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? @@ -1457,7 +1457,7 @@ static inline int chachapoly_crypt(struct aead_request *req, bool encrypt) { struct aead_edesc *edesc; struct crypto_aead *aead = crypto_aead_reqtfm(req); - struct caam_ctx *ctx = crypto_aead_ctx(aead); + struct caam_ctx *ctx = crypto_aead_ctx_dma(aead); struct device *jrdev = ctx->jrdev; bool all_contig; u32 *desc; @@ -1491,7 +1491,7 @@ static inline int aead_crypt(struct aead_request *req, bool encrypt) { struct aead_edesc *edesc; struct crypto_aead *aead = crypto_aead_reqtfm(req); - struct caam_ctx *ctx = crypto_aead_ctx(aead); + struct caam_ctx *ctx = crypto_aead_ctx_dma(aead); struct device *jrdev = ctx->jrdev; bool all_contig; @@ -1524,7 +1524,7 @@ static int aead_decrypt(struct aead_request *req) static int aead_do_one_req(struct crypto_engine *engine, void *areq) { struct aead_request *req = aead_request_cast(areq); - struct caam_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); + struct caam_ctx *ctx = crypto_aead_ctx_dma(crypto_aead_reqtfm(req)); struct caam_aead_req_ctx *rctx = aead_request_ctx(req); u32 *desc = rctx->edesc->hw_desc; int ret; @@ -1550,7 +1550,7 @@ static inline int gcm_crypt(struct aead_request *req, bool encrypt) { struct aead_edesc *edesc; struct crypto_aead *aead = crypto_aead_reqtfm(req); - struct caam_ctx *ctx = crypto_aead_ctx(aead); + struct caam_ctx *ctx = crypto_aead_ctx_dma(aead); struct device *jrdev = ctx->jrdev; bool all_contig; @@ -1597,7 +1597,7 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req, int desc_bytes) { struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); - struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher); + struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher); struct caam_skcipher_req_ctx *rctx = skcipher_request_ctx(req); struct device *jrdev = ctx->jrdev; gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? @@ -1756,7 +1756,7 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req, static int skcipher_do_one_req(struct crypto_engine *engine, void *areq) { struct skcipher_request *req = skcipher_request_cast(areq); - struct caam_ctx *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)); + struct caam_ctx *ctx = crypto_skcipher_ctx_dma(crypto_skcipher_reqtfm(req)); struct caam_skcipher_req_ctx *rctx = skcipher_request_ctx(req); u32 *desc = rctx->edesc->hw_desc; int ret; @@ -1790,7 +1790,7 @@ static inline int skcipher_crypt(struct skcipher_request *req, bool encrypt) { struct skcipher_edesc *edesc; struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); - struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher); + struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher); struct device *jrdev = ctx->jrdev; struct caam_drv_private_jr *jrpriv = dev_get_drvdata(jrdev); struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent); @@ -3397,7 +3397,7 @@ static int caam_cra_init(struct crypto_skcipher *tfm) struct skcipher_alg *alg = crypto_skcipher_alg(tfm); struct caam_skcipher_alg *caam_alg = container_of(alg, typeof(*caam_alg), skcipher); - struct caam_ctx *ctx = crypto_skcipher_ctx(tfm); + struct caam_ctx *ctx = crypto_skcipher_ctx_dma(tfm); u32 alg_aai = caam_alg->caam.class1_alg_type & OP_ALG_AAI_MASK; int ret = 0; @@ -3434,7 +3434,7 @@ static int caam_aead_init(struct crypto_aead *tfm) struct aead_alg *alg = crypto_aead_alg(tfm); struct caam_aead_alg *caam_alg = container_of(alg, struct caam_aead_alg, aead); - struct caam_ctx *ctx = crypto_aead_ctx(tfm); + struct caam_ctx *ctx = crypto_aead_ctx_dma(tfm); crypto_aead_set_reqsize(tfm, sizeof(struct caam_aead_req_ctx)); @@ -3454,7 +3454,7 @@ static void caam_exit_common(struct caam_ctx *ctx) static void caam_cra_exit(struct crypto_skcipher *tfm) { - struct caam_ctx *ctx = crypto_skcipher_ctx(tfm); + struct caam_ctx *ctx = crypto_skcipher_ctx_dma(tfm); if (ctx->fallback) crypto_free_skcipher(ctx->fallback); @@ -3463,7 +3463,7 @@ static void caam_cra_exit(struct crypto_skcipher *tfm) static void caam_aead_exit(struct crypto_aead *tfm) { - caam_exit_common(crypto_aead_ctx(tfm)); + caam_exit_common(crypto_aead_ctx_dma(tfm)); } void caam_algapi_exit(void) @@ -3491,7 +3491,7 @@ static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg) alg->base.cra_module = THIS_MODULE; alg->base.cra_priority = CAAM_CRA_PRIORITY; - alg->base.cra_ctxsize = sizeof(struct caam_ctx); + alg->base.cra_ctxsize = sizeof(struct caam_ctx) + crypto_dma_padding(); alg->base.cra_flags |= (CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY); @@ -3505,7 +3505,7 @@ static void caam_aead_alg_init(struct caam_aead_alg *t_alg) alg->base.cra_module = THIS_MODULE; alg->base.cra_priority = CAAM_CRA_PRIORITY; - alg->base.cra_ctxsize = sizeof(struct caam_ctx); + alg->base.cra_ctxsize = sizeof(struct caam_ctx) + crypto_dma_padding(); alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY; diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c index 189a7438b29c46166cc80985d04a4dc9efb830d9..c37b67be0492d734e7a27bb4376038aff82ffded 100644 --- a/drivers/crypto/caam/caamalg_qi.c +++ b/drivers/crypto/caam/caamalg_qi.c @@ -81,7 +81,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead) { struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead), typeof(*alg), aead); - struct caam_ctx *ctx = crypto_aead_ctx(aead); + struct caam_ctx *ctx = crypto_aead_ctx_dma(aead); unsigned int ivsize = crypto_aead_ivsize(aead); u32 ctx1_iv_off = 0; u32 *nonce = NULL; @@ -184,7 +184,7 @@ skip_givenc: static int aead_setauthsize(struct crypto_aead *authenc, unsigned int authsize) { - struct caam_ctx *ctx = crypto_aead_ctx(authenc); + struct caam_ctx *ctx = crypto_aead_ctx_dma(authenc); ctx->authsize = authsize; aead_set_sh_desc(authenc); @@ -195,7 +195,7 @@ static int aead_setauthsize(struct crypto_aead *authenc, unsigned int authsize) static int aead_setkey(struct crypto_aead *aead, const u8 *key, unsigned int keylen) { - struct caam_ctx *ctx = crypto_aead_ctx(aead); + struct caam_ctx *ctx = crypto_aead_ctx_dma(aead); struct device *jrdev = ctx->jrdev; struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent); struct crypto_authenc_keys keys; @@ -299,7 +299,7 @@ static int des3_aead_setkey(struct crypto_aead *aead, const u8 *key, static int gcm_set_sh_desc(struct crypto_aead *aead) { - struct caam_ctx *ctx = crypto_aead_ctx(aead); + struct caam_ctx *ctx = crypto_aead_ctx_dma(aead); unsigned int ivsize = crypto_aead_ivsize(aead); int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN - ctx->cdata.keylen; @@ -342,7 +342,7 @@ static int gcm_set_sh_desc(struct crypto_aead *aead) static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize) { - struct caam_ctx *ctx = crypto_aead_ctx(authenc); + struct caam_ctx *ctx = crypto_aead_ctx_dma(authenc); int err; err = crypto_gcm_check_authsize(authsize); @@ -358,7 +358,7 @@ static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize) static int gcm_setkey(struct crypto_aead *aead, const u8 *key, unsigned int keylen) { - struct caam_ctx *ctx = crypto_aead_ctx(aead); + struct caam_ctx *ctx = crypto_aead_ctx_dma(aead); struct device *jrdev = ctx->jrdev; int ret; @@ -402,7 +402,7 @@ static int gcm_setkey(struct crypto_aead *aead, static int rfc4106_set_sh_desc(struct crypto_aead *aead) { - struct caam_ctx *ctx = crypto_aead_ctx(aead); + struct caam_ctx *ctx = crypto_aead_ctx_dma(aead); unsigned int ivsize = crypto_aead_ivsize(aead); int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN - ctx->cdata.keylen; @@ -446,7 +446,7 @@ static int rfc4106_set_sh_desc(struct crypto_aead *aead) static int rfc4106_setauthsize(struct crypto_aead *authenc, unsigned int authsize) { - struct caam_ctx *ctx = crypto_aead_ctx(authenc); + struct caam_ctx *ctx = crypto_aead_ctx_dma(authenc); int err; err = crypto_rfc4106_check_authsize(authsize); @@ -462,7 +462,7 @@ static int rfc4106_setauthsize(struct crypto_aead *authenc, static int rfc4106_setkey(struct crypto_aead *aead, const u8 *key, unsigned int keylen) { - struct caam_ctx *ctx = crypto_aead_ctx(aead); + struct caam_ctx *ctx = crypto_aead_ctx_dma(aead); struct device *jrdev = ctx->jrdev; int ret; @@ -510,7 +510,7 @@ static int rfc4106_setkey(struct crypto_aead *aead, static int rfc4543_set_sh_desc(struct crypto_aead *aead) { - struct caam_ctx *ctx = crypto_aead_ctx(aead); + struct caam_ctx *ctx = crypto_aead_ctx_dma(aead); unsigned int ivsize = crypto_aead_ivsize(aead); int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN - ctx->cdata.keylen; @@ -554,7 +554,7 @@ static int rfc4543_set_sh_desc(struct crypto_aead *aead) static int rfc4543_setauthsize(struct crypto_aead *authenc, unsigned int authsize) { - struct caam_ctx *ctx = crypto_aead_ctx(authenc); + struct caam_ctx *ctx = crypto_aead_ctx_dma(authenc); if (authsize != 16) return -EINVAL; @@ -568,7 +568,7 @@ static int rfc4543_setauthsize(struct crypto_aead *authenc, static int rfc4543_setkey(struct crypto_aead *aead, const u8 *key, unsigned int keylen) { - struct caam_ctx *ctx = crypto_aead_ctx(aead); + struct caam_ctx *ctx = crypto_aead_ctx_dma(aead); struct device *jrdev = ctx->jrdev; int ret; @@ -617,7 +617,7 @@ static int rfc4543_setkey(struct crypto_aead *aead, static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, unsigned int keylen, const u32 ctx1_iv_off) { - struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher); + struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher); struct caam_skcipher_alg *alg = container_of(crypto_skcipher_alg(skcipher), typeof(*alg), skcipher); @@ -731,7 +731,7 @@ static int des_skcipher_setkey(struct crypto_skcipher *skcipher, static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, unsigned int keylen) { - struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher); + struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher); struct device *jrdev = ctx->jrdev; struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent); int ret = 0; @@ -915,7 +915,7 @@ static void aead_done(struct caam_drv_req *drv_req, u32 status) struct aead_edesc *edesc; struct aead_request *aead_req = drv_req->app_ctx; struct crypto_aead *aead = crypto_aead_reqtfm(aead_req); - struct caam_ctx *caam_ctx = crypto_aead_ctx(aead); + struct caam_ctx *caam_ctx = crypto_aead_ctx_dma(aead); int ecode = 0; qidev = caam_ctx->qidev; @@ -937,7 +937,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, bool encrypt) { struct crypto_aead *aead = crypto_aead_reqtfm(req); - struct caam_ctx *ctx = crypto_aead_ctx(aead); + struct caam_ctx *ctx = crypto_aead_ctx_dma(aead); struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead), typeof(*alg), aead); struct device *qidev = ctx->qidev; @@ -1157,7 +1157,7 @@ static inline int aead_crypt(struct aead_request *req, bool encrypt) { struct aead_edesc *edesc; struct crypto_aead *aead = crypto_aead_reqtfm(req); - struct caam_ctx *ctx = crypto_aead_ctx(aead); + struct caam_ctx *ctx = crypto_aead_ctx_dma(aead); int ret; if (unlikely(caam_congested)) @@ -1207,7 +1207,7 @@ static void skcipher_done(struct caam_drv_req *drv_req, u32 status) struct skcipher_edesc *edesc; struct skcipher_request *req = drv_req->app_ctx; struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); - struct caam_ctx *caam_ctx = crypto_skcipher_ctx(skcipher); + struct caam_ctx *caam_ctx = crypto_skcipher_ctx_dma(skcipher); struct device *qidev = caam_ctx->qidev; int ivsize = crypto_skcipher_ivsize(skcipher); int ecode = 0; @@ -1245,7 +1245,7 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req, bool encrypt) { struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); - struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher); + struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher); struct device *qidev = ctx->qidev; gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? GFP_KERNEL : GFP_ATOMIC; @@ -1405,7 +1405,7 @@ static inline int skcipher_crypt(struct skcipher_request *req, bool encrypt) { struct skcipher_edesc *edesc; struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); - struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher); + struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher); struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent); int ret; @@ -2491,7 +2491,7 @@ static int caam_cra_init(struct crypto_skcipher *tfm) struct skcipher_alg *alg = crypto_skcipher_alg(tfm); struct caam_skcipher_alg *caam_alg = container_of(alg, typeof(*caam_alg), skcipher); - struct caam_ctx *ctx = crypto_skcipher_ctx(tfm); + struct caam_ctx *ctx = crypto_skcipher_ctx_dma(tfm); u32 alg_aai = caam_alg->caam.class1_alg_type & OP_ALG_AAI_MASK; int ret = 0; @@ -2524,7 +2524,7 @@ static int caam_aead_init(struct crypto_aead *tfm) struct aead_alg *alg = crypto_aead_alg(tfm); struct caam_aead_alg *caam_alg = container_of(alg, typeof(*caam_alg), aead); - struct caam_ctx *ctx = crypto_aead_ctx(tfm); + struct caam_ctx *ctx = crypto_aead_ctx_dma(tfm); return caam_init_common(ctx, &caam_alg->caam, !caam_alg->caam.nodkp); } @@ -2542,7 +2542,7 @@ static void caam_exit_common(struct caam_ctx *ctx) static void caam_cra_exit(struct crypto_skcipher *tfm) { - struct caam_ctx *ctx = crypto_skcipher_ctx(tfm); + struct caam_ctx *ctx = crypto_skcipher_ctx_dma(tfm); if (ctx->fallback) crypto_free_skcipher(ctx->fallback); @@ -2551,7 +2551,7 @@ static void caam_cra_exit(struct crypto_skcipher *tfm) static void caam_aead_exit(struct crypto_aead *tfm) { - caam_exit_common(crypto_aead_ctx(tfm)); + caam_exit_common(crypto_aead_ctx_dma(tfm)); } void caam_qi_algapi_exit(void) @@ -2579,7 +2579,7 @@ static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg) alg->base.cra_module = THIS_MODULE; alg->base.cra_priority = CAAM_CRA_PRIORITY; - alg->base.cra_ctxsize = sizeof(struct caam_ctx); + alg->base.cra_ctxsize = sizeof(struct caam_ctx) + crypto_dma_padding(); alg->base.cra_flags |= (CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY); @@ -2593,7 +2593,7 @@ static void caam_aead_alg_init(struct caam_aead_alg *t_alg) alg->base.cra_module = THIS_MODULE; alg->base.cra_priority = CAAM_CRA_PRIORITY; - alg->base.cra_ctxsize = sizeof(struct caam_ctx); + alg->base.cra_ctxsize = sizeof(struct caam_ctx) + crypto_dma_padding(); alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY; diff --git a/drivers/crypto/caam/caamalg_qi2.c b/drivers/crypto/caam/caamalg_qi2.c index 4482cb145d051d3304f58b722abcbc8cce8eac77..1b0dd742c53f98673eab633b911c3217f4bd6d17 100644 --- a/drivers/crypto/caam/caamalg_qi2.c +++ b/drivers/crypto/caam/caamalg_qi2.c @@ -134,12 +134,12 @@ static struct caam_request *to_caam_req(struct crypto_async_request *areq) { switch (crypto_tfm_alg_type(areq->tfm)) { case CRYPTO_ALG_TYPE_SKCIPHER: - return skcipher_request_ctx(skcipher_request_cast(areq)); + return skcipher_request_ctx_dma(skcipher_request_cast(areq)); case CRYPTO_ALG_TYPE_AEAD: - return aead_request_ctx(container_of(areq, struct aead_request, - base)); + return aead_request_ctx_dma( + container_of(areq, struct aead_request, base)); case CRYPTO_ALG_TYPE_AHASH: - return ahash_request_ctx(ahash_request_cast(areq)); + return ahash_request_ctx_dma(ahash_request_cast(areq)); default: return ERR_PTR(-EINVAL); } @@ -171,7 +171,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead) { struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead), typeof(*alg), aead); - struct caam_ctx *ctx = crypto_aead_ctx(aead); + struct caam_ctx *ctx = crypto_aead_ctx_dma(aead); unsigned int ivsize = crypto_aead_ivsize(aead); struct device *dev = ctx->dev; struct dpaa2_caam_priv *priv = dev_get_drvdata(dev); @@ -276,7 +276,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead) static int aead_setauthsize(struct crypto_aead *authenc, unsigned int authsize) { - struct caam_ctx *ctx = crypto_aead_ctx(authenc); + struct caam_ctx *ctx = crypto_aead_ctx_dma(authenc); ctx->authsize = authsize; aead_set_sh_desc(authenc); @@ -287,7 +287,7 @@ static int aead_setauthsize(struct crypto_aead *authenc, unsigned int authsize) static int aead_setkey(struct crypto_aead *aead, const u8 *key, unsigned int keylen) { - struct caam_ctx *ctx = crypto_aead_ctx(aead); + struct caam_ctx *ctx = crypto_aead_ctx_dma(aead); struct device *dev = ctx->dev; struct crypto_authenc_keys keys; @@ -350,10 +350,10 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, bool encrypt) { struct crypto_aead *aead = crypto_aead_reqtfm(req); - struct caam_request *req_ctx = aead_request_ctx(req); + struct caam_request *req_ctx = aead_request_ctx_dma(req); struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1]; struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0]; - struct caam_ctx *ctx = crypto_aead_ctx(aead); + struct caam_ctx *ctx = crypto_aead_ctx_dma(aead); struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead), typeof(*alg), aead); struct device *dev = ctx->dev; @@ -587,7 +587,7 @@ skip_out_fle: static int chachapoly_set_sh_desc(struct crypto_aead *aead) { - struct caam_ctx *ctx = crypto_aead_ctx(aead); + struct caam_ctx *ctx = crypto_aead_ctx_dma(aead); unsigned int ivsize = crypto_aead_ivsize(aead); struct device *dev = ctx->dev; struct caam_flc *flc; @@ -620,7 +620,7 @@ static int chachapoly_set_sh_desc(struct crypto_aead *aead) static int chachapoly_setauthsize(struct crypto_aead *aead, unsigned int authsize) { - struct caam_ctx *ctx = crypto_aead_ctx(aead); + struct caam_ctx *ctx = crypto_aead_ctx_dma(aead); if (authsize != POLY1305_DIGEST_SIZE) return -EINVAL; @@ -632,7 +632,7 @@ static int chachapoly_setauthsize(struct crypto_aead *aead, static int chachapoly_setkey(struct crypto_aead *aead, const u8 *key, unsigned int keylen) { - struct caam_ctx *ctx = crypto_aead_ctx(aead); + struct caam_ctx *ctx = crypto_aead_ctx_dma(aead); unsigned int ivsize = crypto_aead_ivsize(aead); unsigned int saltlen = CHACHAPOLY_IV_SIZE - ivsize; @@ -647,7 +647,7 @@ static int chachapoly_setkey(struct crypto_aead *aead, const u8 *key, static int gcm_set_sh_desc(struct crypto_aead *aead) { - struct caam_ctx *ctx = crypto_aead_ctx(aead); + struct caam_ctx *ctx = crypto_aead_ctx_dma(aead); struct device *dev = ctx->dev; unsigned int ivsize = crypto_aead_ivsize(aead); struct caam_flc *flc; @@ -704,7 +704,7 @@ static int gcm_set_sh_desc(struct crypto_aead *aead) static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize) { - struct caam_ctx *ctx = crypto_aead_ctx(authenc); + struct caam_ctx *ctx = crypto_aead_ctx_dma(authenc); int err; err = crypto_gcm_check_authsize(authsize); @@ -720,7 +720,7 @@ static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize) static int gcm_setkey(struct crypto_aead *aead, const u8 *key, unsigned int keylen) { - struct caam_ctx *ctx = crypto_aead_ctx(aead); + struct caam_ctx *ctx = crypto_aead_ctx_dma(aead); struct device *dev = ctx->dev; int ret; @@ -739,7 +739,7 @@ static int gcm_setkey(struct crypto_aead *aead, static int rfc4106_set_sh_desc(struct crypto_aead *aead) { - struct caam_ctx *ctx = crypto_aead_ctx(aead); + struct caam_ctx *ctx = crypto_aead_ctx_dma(aead); struct device *dev = ctx->dev; unsigned int ivsize = crypto_aead_ivsize(aead); struct caam_flc *flc; @@ -799,7 +799,7 @@ static int rfc4106_set_sh_desc(struct crypto_aead *aead) static int rfc4106_setauthsize(struct crypto_aead *authenc, unsigned int authsize) { - struct caam_ctx *ctx = crypto_aead_ctx(authenc); + struct caam_ctx *ctx = crypto_aead_ctx_dma(authenc); int err; err = crypto_rfc4106_check_authsize(authsize); @@ -815,7 +815,7 @@ static int rfc4106_setauthsize(struct crypto_aead *authenc, static int rfc4106_setkey(struct crypto_aead *aead, const u8 *key, unsigned int keylen) { - struct caam_ctx *ctx = crypto_aead_ctx(aead); + struct caam_ctx *ctx = crypto_aead_ctx_dma(aead); struct device *dev = ctx->dev; int ret; @@ -840,7 +840,7 @@ static int rfc4106_setkey(struct crypto_aead *aead, static int rfc4543_set_sh_desc(struct crypto_aead *aead) { - struct caam_ctx *ctx = crypto_aead_ctx(aead); + struct caam_ctx *ctx = crypto_aead_ctx_dma(aead); struct device *dev = ctx->dev; unsigned int ivsize = crypto_aead_ivsize(aead); struct caam_flc *flc; @@ -900,7 +900,7 @@ static int rfc4543_set_sh_desc(struct crypto_aead *aead) static int rfc4543_setauthsize(struct crypto_aead *authenc, unsigned int authsize) { - struct caam_ctx *ctx = crypto_aead_ctx(authenc); + struct caam_ctx *ctx = crypto_aead_ctx_dma(authenc); if (authsize != 16) return -EINVAL; @@ -914,7 +914,7 @@ static int rfc4543_setauthsize(struct crypto_aead *authenc, static int rfc4543_setkey(struct crypto_aead *aead, const u8 *key, unsigned int keylen) { - struct caam_ctx *ctx = crypto_aead_ctx(aead); + struct caam_ctx *ctx = crypto_aead_ctx_dma(aead); struct device *dev = ctx->dev; int ret; @@ -940,7 +940,7 @@ static int rfc4543_setkey(struct crypto_aead *aead, static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, unsigned int keylen, const u32 ctx1_iv_off) { - struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher); + struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher); struct caam_skcipher_alg *alg = container_of(crypto_skcipher_alg(skcipher), struct caam_skcipher_alg, skcipher); @@ -1059,7 +1059,7 @@ static int des3_skcipher_setkey(struct crypto_skcipher *skcipher, static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, unsigned int keylen) { - struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher); + struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher); struct device *dev = ctx->dev; struct dpaa2_caam_priv *priv = dev_get_drvdata(dev); struct caam_flc *flc; @@ -1109,10 +1109,10 @@ static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req) { struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); - struct caam_request *req_ctx = skcipher_request_ctx(req); + struct caam_request *req_ctx = skcipher_request_ctx_dma(req); struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1]; struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0]; - struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher); + struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher); struct device *dev = ctx->dev; gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? GFP_KERNEL : GFP_ATOMIC; @@ -1286,7 +1286,7 @@ static void aead_encrypt_done(void *cbk_ctx, u32 status) struct caam_request *req_ctx = to_caam_req(areq); struct aead_edesc *edesc = req_ctx->edesc; struct crypto_aead *aead = crypto_aead_reqtfm(req); - struct caam_ctx *ctx = crypto_aead_ctx(aead); + struct caam_ctx *ctx = crypto_aead_ctx_dma(aead); int ecode = 0; dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status); @@ -1307,7 +1307,7 @@ static void aead_decrypt_done(void *cbk_ctx, u32 status) struct caam_request *req_ctx = to_caam_req(areq); struct aead_edesc *edesc = req_ctx->edesc; struct crypto_aead *aead = crypto_aead_reqtfm(req); - struct caam_ctx *ctx = crypto_aead_ctx(aead); + struct caam_ctx *ctx = crypto_aead_ctx_dma(aead); int ecode = 0; dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status); @@ -1324,8 +1324,8 @@ static int aead_encrypt(struct aead_request *req) { struct aead_edesc *edesc; struct crypto_aead *aead = crypto_aead_reqtfm(req); - struct caam_ctx *ctx = crypto_aead_ctx(aead); - struct caam_request *caam_req = aead_request_ctx(req); + struct caam_ctx *ctx = crypto_aead_ctx_dma(aead); + struct caam_request *caam_req = aead_request_ctx_dma(req); int ret; /* allocate extended descriptor */ @@ -1352,8 +1352,8 @@ static int aead_decrypt(struct aead_request *req) { struct aead_edesc *edesc; struct crypto_aead *aead = crypto_aead_reqtfm(req); - struct caam_ctx *ctx = crypto_aead_ctx(aead); - struct caam_request *caam_req = aead_request_ctx(req); + struct caam_ctx *ctx = crypto_aead_ctx_dma(aead); + struct caam_request *caam_req = aead_request_ctx_dma(req); int ret; /* allocate extended descriptor */ @@ -1392,7 +1392,7 @@ static void skcipher_encrypt_done(void *cbk_ctx, u32 status) struct skcipher_request *req = skcipher_request_cast(areq); struct caam_request *req_ctx = to_caam_req(areq); struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); - struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher); + struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher); struct skcipher_edesc *edesc = req_ctx->edesc; int ecode = 0; int ivsize = crypto_skcipher_ivsize(skcipher); @@ -1430,7 +1430,7 @@ static void skcipher_decrypt_done(void *cbk_ctx, u32 status) struct skcipher_request *req = skcipher_request_cast(areq); struct caam_request *req_ctx = to_caam_req(areq); struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); - struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher); + struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher); struct skcipher_edesc *edesc = req_ctx->edesc; int ecode = 0; int ivsize = crypto_skcipher_ivsize(skcipher); @@ -1474,8 +1474,8 @@ static int skcipher_encrypt(struct skcipher_request *req) { struct skcipher_edesc *edesc; struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); - struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher); - struct caam_request *caam_req = skcipher_request_ctx(req); + struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher); + struct caam_request *caam_req = skcipher_request_ctx_dma(req); struct dpaa2_caam_priv *priv = dev_get_drvdata(ctx->dev); int ret; @@ -1524,8 +1524,8 @@ static int skcipher_decrypt(struct skcipher_request *req) { struct skcipher_edesc *edesc; struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); - struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher); - struct caam_request *caam_req = skcipher_request_ctx(req); + struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher); + struct caam_request *caam_req = skcipher_request_ctx_dma(req); struct dpaa2_caam_priv *priv = dev_get_drvdata(ctx->dev); int ret; @@ -1603,7 +1603,7 @@ static int caam_cra_init_skcipher(struct crypto_skcipher *tfm) struct skcipher_alg *alg = crypto_skcipher_alg(tfm); struct caam_skcipher_alg *caam_alg = container_of(alg, typeof(*caam_alg), skcipher); - struct caam_ctx *ctx = crypto_skcipher_ctx(tfm); + struct caam_ctx *ctx = crypto_skcipher_ctx_dma(tfm); u32 alg_aai = caam_alg->caam.class1_alg_type & OP_ALG_AAI_MASK; int ret = 0; @@ -1621,10 +1621,12 @@ static int caam_cra_init_skcipher(struct crypto_skcipher *tfm) } ctx->fallback = fallback; - crypto_skcipher_set_reqsize(tfm, sizeof(struct caam_request) + - crypto_skcipher_reqsize(fallback)); + crypto_skcipher_set_reqsize_dma( + tfm, sizeof(struct caam_request) + + crypto_skcipher_reqsize(fallback)); } else { - crypto_skcipher_set_reqsize(tfm, sizeof(struct caam_request)); + crypto_skcipher_set_reqsize_dma(tfm, + sizeof(struct caam_request)); } ret = caam_cra_init(ctx, &caam_alg->caam, false); @@ -1640,8 +1642,8 @@ static int caam_cra_init_aead(struct crypto_aead *tfm) struct caam_aead_alg *caam_alg = container_of(alg, typeof(*caam_alg), aead); - crypto_aead_set_reqsize(tfm, sizeof(struct caam_request)); - return caam_cra_init(crypto_aead_ctx(tfm), &caam_alg->caam, + crypto_aead_set_reqsize_dma(tfm, sizeof(struct caam_request)); + return caam_cra_init(crypto_aead_ctx_dma(tfm), &caam_alg->caam, !caam_alg->caam.nodkp); } @@ -1654,7 +1656,7 @@ static void caam_exit_common(struct caam_ctx *ctx) static void caam_cra_exit(struct crypto_skcipher *tfm) { - struct caam_ctx *ctx = crypto_skcipher_ctx(tfm); + struct caam_ctx *ctx = crypto_skcipher_ctx_dma(tfm); if (ctx->fallback) crypto_free_skcipher(ctx->fallback); @@ -1663,7 +1665,7 @@ static void caam_cra_exit(struct crypto_skcipher *tfm) static void caam_cra_exit_aead(struct crypto_aead *tfm) { - caam_exit_common(crypto_aead_ctx(tfm)); + caam_exit_common(crypto_aead_ctx_dma(tfm)); } static struct caam_skcipher_alg driver_algs[] = { @@ -3008,7 +3010,7 @@ static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg) alg->base.cra_module = THIS_MODULE; alg->base.cra_priority = CAAM_CRA_PRIORITY; - alg->base.cra_ctxsize = sizeof(struct caam_ctx); + alg->base.cra_ctxsize = sizeof(struct caam_ctx) + crypto_dma_padding(); alg->base.cra_flags |= (CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY); @@ -3022,7 +3024,7 @@ static void caam_aead_alg_init(struct caam_aead_alg *t_alg) alg->base.cra_module = THIS_MODULE; alg->base.cra_priority = CAAM_CRA_PRIORITY; - alg->base.cra_ctxsize = sizeof(struct caam_ctx); + alg->base.cra_ctxsize = sizeof(struct caam_ctx) + crypto_dma_padding(); alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY; @@ -3132,7 +3134,7 @@ static inline int ctx_map_to_qm_sg(struct device *dev, static int ahash_set_sh_desc(struct crypto_ahash *ahash) { - struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); + struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash); int digestsize = crypto_ahash_digestsize(ahash); struct dpaa2_caam_priv *priv = dev_get_drvdata(ctx->dev); struct caam_flc *flc; @@ -3305,7 +3307,7 @@ err_flc: static int ahash_setkey(struct crypto_ahash *ahash, const u8 *key, unsigned int keylen) { - struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); + struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash); unsigned int blocksize = crypto_tfm_alg_blocksize(&ahash->base); unsigned int digestsize = crypto_ahash_digestsize(ahash); int ret; @@ -3356,7 +3358,7 @@ bad_free_key: static inline void ahash_unmap(struct device *dev, struct ahash_edesc *edesc, struct ahash_request *req) { - struct caam_hash_state *state = ahash_request_ctx(req); + struct caam_hash_state *state = ahash_request_ctx_dma(req); if (edesc->src_nents) dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE); @@ -3376,7 +3378,7 @@ static inline void ahash_unmap_ctx(struct device *dev, struct ahash_edesc *edesc, struct ahash_request *req, u32 flag) { - struct caam_hash_state *state = ahash_request_ctx(req); + struct caam_hash_state *state = ahash_request_ctx_dma(req); if (state->ctx_dma) { dma_unmap_single(dev, state->ctx_dma, state->ctx_dma_len, flag); @@ -3390,9 +3392,9 @@ static void ahash_done(void *cbk_ctx, u32 status) struct crypto_async_request *areq = cbk_ctx; struct ahash_request *req = ahash_request_cast(areq); struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); - struct caam_hash_state *state = ahash_request_ctx(req); + struct caam_hash_state *state = ahash_request_ctx_dma(req); struct ahash_edesc *edesc = state->caam_req.edesc; - struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); + struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash); int digestsize = crypto_ahash_digestsize(ahash); int ecode = 0; @@ -3417,9 +3419,9 @@ static void ahash_done_bi(void *cbk_ctx, u32 status) struct crypto_async_request *areq = cbk_ctx; struct ahash_request *req = ahash_request_cast(areq); struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); - struct caam_hash_state *state = ahash_request_ctx(req); + struct caam_hash_state *state = ahash_request_ctx_dma(req); struct ahash_edesc *edesc = state->caam_req.edesc; - struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); + struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash); int ecode = 0; dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status); @@ -3455,9 +3457,9 @@ static void ahash_done_ctx_src(void *cbk_ctx, u32 status) struct crypto_async_request *areq = cbk_ctx; struct ahash_request *req = ahash_request_cast(areq); struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); - struct caam_hash_state *state = ahash_request_ctx(req); + struct caam_hash_state *state = ahash_request_ctx_dma(req); struct ahash_edesc *edesc = state->caam_req.edesc; - struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); + struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash); int digestsize = crypto_ahash_digestsize(ahash); int ecode = 0; @@ -3482,9 +3484,9 @@ static void ahash_done_ctx_dst(void *cbk_ctx, u32 status) struct crypto_async_request *areq = cbk_ctx; struct ahash_request *req = ahash_request_cast(areq); struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); - struct caam_hash_state *state = ahash_request_ctx(req); + struct caam_hash_state *state = ahash_request_ctx_dma(req); struct ahash_edesc *edesc = state->caam_req.edesc; - struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); + struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash); int ecode = 0; dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status); @@ -3518,8 +3520,8 @@ static void ahash_done_ctx_dst(void *cbk_ctx, u32 status) static int ahash_update_ctx(struct ahash_request *req) { struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); - struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); - struct caam_hash_state *state = ahash_request_ctx(req); + struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash); + struct caam_hash_state *state = ahash_request_ctx_dma(req); struct caam_request *req_ctx = &state->caam_req; struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1]; struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0]; @@ -3637,8 +3639,8 @@ unmap_ctx: static int ahash_final_ctx(struct ahash_request *req) { struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); - struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); - struct caam_hash_state *state = ahash_request_ctx(req); + struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash); + struct caam_hash_state *state = ahash_request_ctx_dma(req); struct caam_request *req_ctx = &state->caam_req; struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1]; struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0]; @@ -3708,8 +3710,8 @@ unmap_ctx: static int ahash_finup_ctx(struct ahash_request *req) { struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); - struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); - struct caam_hash_state *state = ahash_request_ctx(req); + struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash); + struct caam_hash_state *state = ahash_request_ctx_dma(req); struct caam_request *req_ctx = &state->caam_req; struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1]; struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0]; @@ -3802,8 +3804,8 @@ unmap_ctx: static int ahash_digest(struct ahash_request *req) { struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); - struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); - struct caam_hash_state *state = ahash_request_ctx(req); + struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash); + struct caam_hash_state *state = ahash_request_ctx_dma(req); struct caam_request *req_ctx = &state->caam_req; struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1]; struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0]; @@ -3897,8 +3899,8 @@ unmap: static int ahash_final_no_ctx(struct ahash_request *req) { struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); - struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); - struct caam_hash_state *state = ahash_request_ctx(req); + struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash); + struct caam_hash_state *state = ahash_request_ctx_dma(req); struct caam_request *req_ctx = &state->caam_req; struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1]; struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0]; @@ -3970,8 +3972,8 @@ unmap: static int ahash_update_no_ctx(struct ahash_request *req) { struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); - struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); - struct caam_hash_state *state = ahash_request_ctx(req); + struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash); + struct caam_hash_state *state = ahash_request_ctx_dma(req); struct caam_request *req_ctx = &state->caam_req; struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1]; struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0]; @@ -4091,8 +4093,8 @@ unmap_ctx: static int ahash_finup_no_ctx(struct ahash_request *req) { struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); - struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); - struct caam_hash_state *state = ahash_request_ctx(req); + struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash); + struct caam_hash_state *state = ahash_request_ctx_dma(req); struct caam_request *req_ctx = &state->caam_req; struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1]; struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0]; @@ -4187,8 +4189,8 @@ unmap: static int ahash_update_first(struct ahash_request *req) { struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); - struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); - struct caam_hash_state *state = ahash_request_ctx(req); + struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash); + struct caam_hash_state *state = ahash_request_ctx_dma(req); struct caam_request *req_ctx = &state->caam_req; struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1]; struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0]; @@ -4320,7 +4322,7 @@ static int ahash_finup_first(struct ahash_request *req) static int ahash_init(struct ahash_request *req) { - struct caam_hash_state *state = ahash_request_ctx(req); + struct caam_hash_state *state = ahash_request_ctx_dma(req); state->update = ahash_update_first; state->finup = ahash_finup_first; @@ -4337,28 +4339,28 @@ static int ahash_init(struct ahash_request *req) static int ahash_update(struct ahash_request *req) { - struct caam_hash_state *state = ahash_request_ctx(req); + struct caam_hash_state *state = ahash_request_ctx_dma(req); return state->update(req); } static int ahash_finup(struct ahash_request *req) { - struct caam_hash_state *state = ahash_request_ctx(req); + struct caam_hash_state *state = ahash_request_ctx_dma(req); return state->finup(req); } static int ahash_final(struct ahash_request *req) { - struct caam_hash_state *state = ahash_request_ctx(req); + struct caam_hash_state *state = ahash_request_ctx_dma(req); return state->final(req); } static int ahash_export(struct ahash_request *req, void *out) { - struct caam_hash_state *state = ahash_request_ctx(req); + struct caam_hash_state *state = ahash_request_ctx_dma(req); struct caam_export_state *export = out; u8 *buf = state->buf; int len = state->buflen; @@ -4375,7 +4377,7 @@ static int ahash_export(struct ahash_request *req, void *out) static int ahash_import(struct ahash_request *req, const void *in) { - struct caam_hash_state *state = ahash_request_ctx(req); + struct caam_hash_state *state = ahash_request_ctx_dma(req); const struct caam_export_state *export = in; memset(state, 0, sizeof(*state)); @@ -4547,7 +4549,7 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm) container_of(halg, struct ahash_alg, halg); struct caam_hash_alg *caam_hash = container_of(alg, struct caam_hash_alg, ahash_alg); - struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm); + struct caam_hash_ctx *ctx = crypto_tfm_ctx_dma(tfm); /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */ static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE, HASH_MSG_LEN + SHA1_DIGEST_SIZE, @@ -4594,8 +4596,7 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm) OP_ALG_ALGSEL_SUBMASK) >> OP_ALG_ALGSEL_SHIFT]; - crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), - sizeof(struct caam_hash_state)); + crypto_ahash_set_reqsize_dma(ahash, sizeof(struct caam_hash_state)); /* * For keyed hash algorithms shared descriptors @@ -4606,7 +4607,7 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm) static void caam_hash_cra_exit(struct crypto_tfm *tfm) { - struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm); + struct caam_hash_ctx *ctx = crypto_tfm_ctx_dma(tfm); dma_unmap_single_attrs(ctx->dev, ctx->flc_dma[0], sizeof(ctx->flc), DMA_BIDIRECTIONAL, DMA_ATTR_SKIP_CPU_SYNC); @@ -4646,7 +4647,7 @@ static struct caam_hash_alg *caam_hash_alloc(struct device *dev, alg->cra_module = THIS_MODULE; alg->cra_init = caam_hash_cra_init; alg->cra_exit = caam_hash_cra_exit; - alg->cra_ctxsize = sizeof(struct caam_hash_ctx); + alg->cra_ctxsize = sizeof(struct caam_hash_ctx) + crypto_dma_padding(); alg->cra_priority = CAAM_CRA_PRIORITY; alg->cra_blocksize = template->blocksize; alg->cra_alignmask = 0; diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c index 36ef738e4a181960b2c5e786e3933f478d50d0cd..1050e965a43857c9bf0f73b2802a050ab0f6adb0 100644 --- a/drivers/crypto/caam/caamhash.c +++ b/drivers/crypto/caam/caamhash.c @@ -199,7 +199,7 @@ static inline int ctx_map_to_sec4_sg(struct device *jrdev, static int ahash_set_sh_desc(struct crypto_ahash *ahash) { - struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); + struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash); int digestsize = crypto_ahash_digestsize(ahash); struct device *jrdev = ctx->jrdev; struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent); @@ -255,7 +255,7 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash) static int axcbc_set_sh_desc(struct crypto_ahash *ahash) { - struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); + struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash); int digestsize = crypto_ahash_digestsize(ahash); struct device *jrdev = ctx->jrdev; u32 *desc; @@ -307,7 +307,7 @@ static int axcbc_set_sh_desc(struct crypto_ahash *ahash) static int acmac_set_sh_desc(struct crypto_ahash *ahash) { - struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); + struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash); int digestsize = crypto_ahash_digestsize(ahash); struct device *jrdev = ctx->jrdev; u32 *desc; @@ -421,7 +421,7 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, u32 *keylen, u8 *key, static int ahash_setkey(struct crypto_ahash *ahash, const u8 *key, unsigned int keylen) { - struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); + struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash); struct device *jrdev = ctx->jrdev; int blocksize = crypto_tfm_alg_blocksize(&ahash->base); int digestsize = crypto_ahash_digestsize(ahash); @@ -484,7 +484,7 @@ static int ahash_setkey(struct crypto_ahash *ahash, static int axcbc_setkey(struct crypto_ahash *ahash, const u8 *key, unsigned int keylen) { - struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); + struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash); struct device *jrdev = ctx->jrdev; if (keylen != AES_KEYSIZE_128) @@ -504,7 +504,7 @@ static int axcbc_setkey(struct crypto_ahash *ahash, const u8 *key, static int acmac_setkey(struct crypto_ahash *ahash, const u8 *key, unsigned int keylen) { - struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); + struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash); int err; err = aes_check_keylen(keylen); @@ -543,7 +543,7 @@ static inline void ahash_unmap(struct device *dev, struct ahash_edesc *edesc, struct ahash_request *req, int dst_len) { - struct caam_hash_state *state = ahash_request_ctx(req); + struct caam_hash_state *state = ahash_request_ctx_dma(req); if (edesc->src_nents) dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE); @@ -563,7 +563,7 @@ static inline void ahash_unmap_ctx(struct device *dev, struct ahash_edesc *edesc, struct ahash_request *req, int dst_len, u32 flag) { - struct caam_hash_state *state = ahash_request_ctx(req); + struct caam_hash_state *state = ahash_request_ctx_dma(req); if (state->ctx_dma) { dma_unmap_single(dev, state->ctx_dma, state->ctx_dma_len, flag); @@ -580,8 +580,8 @@ static inline void ahash_done_cpy(struct device *jrdev, u32 *desc, u32 err, struct ahash_edesc *edesc; struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); int digestsize = crypto_ahash_digestsize(ahash); - struct caam_hash_state *state = ahash_request_ctx(req); - struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); + struct caam_hash_state *state = ahash_request_ctx_dma(req); + struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash); int ecode = 0; bool has_bklog; @@ -630,8 +630,8 @@ static inline void ahash_done_switch(struct device *jrdev, u32 *desc, u32 err, struct caam_drv_private_jr *jrp = dev_get_drvdata(jrdev); struct ahash_edesc *edesc; struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); - struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); - struct caam_hash_state *state = ahash_request_ctx(req); + struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash); + struct caam_hash_state *state = ahash_request_ctx_dma(req); int digestsize = crypto_ahash_digestsize(ahash); int ecode = 0; bool has_bklog; @@ -695,8 +695,8 @@ static struct ahash_edesc *ahash_edesc_alloc(struct ahash_request *req, dma_addr_t sh_desc_dma) { struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); - struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); - struct caam_hash_state *state = ahash_request_ctx(req); + struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash); + struct caam_hash_state *state = ahash_request_ctx_dma(req); gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? GFP_KERNEL : GFP_ATOMIC; struct ahash_edesc *edesc; @@ -755,8 +755,8 @@ static int ahash_edesc_add_src(struct caam_hash_ctx *ctx, static int ahash_do_one_req(struct crypto_engine *engine, void *areq) { struct ahash_request *req = ahash_request_cast(areq); - struct caam_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); - struct caam_hash_state *state = ahash_request_ctx(req); + struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(crypto_ahash_reqtfm(req)); + struct caam_hash_state *state = ahash_request_ctx_dma(req); struct device *jrdev = ctx->jrdev; u32 *desc = state->edesc->hw_desc; int ret; @@ -785,7 +785,7 @@ static int ahash_enqueue_req(struct device *jrdev, int dst_len, enum dma_data_direction dir) { struct caam_drv_private_jr *jrpriv = dev_get_drvdata(jrdev); - struct caam_hash_state *state = ahash_request_ctx(req); + struct caam_hash_state *state = ahash_request_ctx_dma(req); struct ahash_edesc *edesc = state->edesc; u32 *desc = edesc->hw_desc; int ret; @@ -815,8 +815,8 @@ static int ahash_enqueue_req(struct device *jrdev, static int ahash_update_ctx(struct ahash_request *req) { struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); - struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); - struct caam_hash_state *state = ahash_request_ctx(req); + struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash); + struct caam_hash_state *state = ahash_request_ctx_dma(req); struct device *jrdev = ctx->jrdev; u8 *buf = state->buf; int *buflen = &state->buflen; @@ -940,8 +940,8 @@ unmap_ctx: static int ahash_final_ctx(struct ahash_request *req) { struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); - struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); - struct caam_hash_state *state = ahash_request_ctx(req); + struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash); + struct caam_hash_state *state = ahash_request_ctx_dma(req); struct device *jrdev = ctx->jrdev; int buflen = state->buflen; u32 *desc; @@ -1001,8 +1001,8 @@ static int ahash_final_ctx(struct ahash_request *req) static int ahash_finup_ctx(struct ahash_request *req) { struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); - struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); - struct caam_hash_state *state = ahash_request_ctx(req); + struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash); + struct caam_hash_state *state = ahash_request_ctx_dma(req); struct device *jrdev = ctx->jrdev; int buflen = state->buflen; u32 *desc; @@ -1075,8 +1075,8 @@ static int ahash_finup_ctx(struct ahash_request *req) static int ahash_digest(struct ahash_request *req) { struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); - struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); - struct caam_hash_state *state = ahash_request_ctx(req); + struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash); + struct caam_hash_state *state = ahash_request_ctx_dma(req); struct device *jrdev = ctx->jrdev; u32 *desc; int digestsize = crypto_ahash_digestsize(ahash); @@ -1142,8 +1142,8 @@ static int ahash_digest(struct ahash_request *req) static int ahash_final_no_ctx(struct ahash_request *req) { struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); - struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); - struct caam_hash_state *state = ahash_request_ctx(req); + struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash); + struct caam_hash_state *state = ahash_request_ctx_dma(req); struct device *jrdev = ctx->jrdev; u8 *buf = state->buf; int buflen = state->buflen; @@ -1191,8 +1191,8 @@ static int ahash_final_no_ctx(struct ahash_request *req) static int ahash_update_no_ctx(struct ahash_request *req) { struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); - struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); - struct caam_hash_state *state = ahash_request_ctx(req); + struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash); + struct caam_hash_state *state = ahash_request_ctx_dma(req); struct device *jrdev = ctx->jrdev; u8 *buf = state->buf; int *buflen = &state->buflen; @@ -1312,8 +1312,8 @@ static int ahash_update_no_ctx(struct ahash_request *req) static int ahash_finup_no_ctx(struct ahash_request *req) { struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); - struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); - struct caam_hash_state *state = ahash_request_ctx(req); + struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash); + struct caam_hash_state *state = ahash_request_ctx_dma(req); struct device *jrdev = ctx->jrdev; int buflen = state->buflen; u32 *desc; @@ -1388,8 +1388,8 @@ static int ahash_finup_no_ctx(struct ahash_request *req) static int ahash_update_first(struct ahash_request *req) { struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); - struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); - struct caam_hash_state *state = ahash_request_ctx(req); + struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash); + struct caam_hash_state *state = ahash_request_ctx_dma(req); struct device *jrdev = ctx->jrdev; u8 *buf = state->buf; int *buflen = &state->buflen; @@ -1498,7 +1498,7 @@ static int ahash_finup_first(struct ahash_request *req) static int ahash_init(struct ahash_request *req) { - struct caam_hash_state *state = ahash_request_ctx(req); + struct caam_hash_state *state = ahash_request_ctx_dma(req); state->update = ahash_update_first; state->finup = ahash_finup_first; @@ -1515,28 +1515,28 @@ static int ahash_init(struct ahash_request *req) static int ahash_update(struct ahash_request *req) { - struct caam_hash_state *state = ahash_request_ctx(req); + struct caam_hash_state *state = ahash_request_ctx_dma(req); return state->update(req); } static int ahash_finup(struct ahash_request *req) { - struct caam_hash_state *state = ahash_request_ctx(req); + struct caam_hash_state *state = ahash_request_ctx_dma(req); return state->finup(req); } static int ahash_final(struct ahash_request *req) { - struct caam_hash_state *state = ahash_request_ctx(req); + struct caam_hash_state *state = ahash_request_ctx_dma(req); return state->final(req); } static int ahash_export(struct ahash_request *req, void *out) { - struct caam_hash_state *state = ahash_request_ctx(req); + struct caam_hash_state *state = ahash_request_ctx_dma(req); struct caam_export_state *export = out; u8 *buf = state->buf; int len = state->buflen; @@ -1553,7 +1553,7 @@ static int ahash_export(struct ahash_request *req, void *out) static int ahash_import(struct ahash_request *req, const void *in) { - struct caam_hash_state *state = ahash_request_ctx(req); + struct caam_hash_state *state = ahash_request_ctx_dma(req); const struct caam_export_state *export = in; memset(state, 0, sizeof(*state)); @@ -1762,7 +1762,7 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm) container_of(halg, struct ahash_alg, halg); struct caam_hash_alg *caam_hash = container_of(alg, struct caam_hash_alg, ahash_alg); - struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm); + struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash); /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */ static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE, HASH_MSG_LEN + SHA1_DIGEST_SIZE, @@ -1854,8 +1854,7 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm) ctx->enginectx.op.do_one_request = ahash_do_one_req; - crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), - sizeof(struct caam_hash_state)); + crypto_ahash_set_reqsize_dma(ahash, sizeof(struct caam_hash_state)); /* * For keyed hash algorithms shared descriptors @@ -1866,7 +1865,7 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm) static void caam_hash_cra_exit(struct crypto_tfm *tfm) { - struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm); + struct caam_hash_ctx *ctx = crypto_tfm_ctx_dma(tfm); dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_update_dma, offsetof(struct caam_hash_ctx, key) - @@ -1926,7 +1925,7 @@ caam_hash_alloc(struct caam_hash_template *template, alg->cra_module = THIS_MODULE; alg->cra_init = caam_hash_cra_init; alg->cra_exit = caam_hash_cra_exit; - alg->cra_ctxsize = sizeof(struct caam_hash_ctx); + alg->cra_ctxsize = sizeof(struct caam_hash_ctx) + crypto_dma_padding(); alg->cra_priority = CAAM_CRA_PRIORITY; alg->cra_blocksize = template->blocksize; alg->cra_alignmask = 0; diff --git a/drivers/crypto/caam/caampkc.c b/drivers/crypto/caam/caampkc.c index 88672757671012023ff08c29cde3ea61df066cba..aef031946f337b65dbca4b045ecb21e40efef318 100644 --- a/drivers/crypto/caam/caampkc.c +++ b/drivers/crypto/caam/caampkc.c @@ -57,7 +57,7 @@ static void rsa_pub_unmap(struct device *dev, struct rsa_edesc *edesc, struct akcipher_request *req) { struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); - struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); + struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm); struct caam_rsa_key *key = &ctx->key; struct rsa_pub_pdb *pdb = &edesc->pdb.pub; @@ -69,7 +69,7 @@ static void rsa_priv_f1_unmap(struct device *dev, struct rsa_edesc *edesc, struct akcipher_request *req) { struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); - struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); + struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm); struct caam_rsa_key *key = &ctx->key; struct rsa_priv_f1_pdb *pdb = &edesc->pdb.priv_f1; @@ -81,7 +81,7 @@ static void rsa_priv_f2_unmap(struct device *dev, struct rsa_edesc *edesc, struct akcipher_request *req) { struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); - struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); + struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm); struct caam_rsa_key *key = &ctx->key; struct rsa_priv_f2_pdb *pdb = &edesc->pdb.priv_f2; size_t p_sz = key->p_sz; @@ -98,7 +98,7 @@ static void rsa_priv_f3_unmap(struct device *dev, struct rsa_edesc *edesc, struct akcipher_request *req) { struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); - struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); + struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm); struct caam_rsa_key *key = &ctx->key; struct rsa_priv_f3_pdb *pdb = &edesc->pdb.priv_f3; size_t p_sz = key->p_sz; @@ -149,7 +149,7 @@ static void rsa_priv_f_done(struct device *dev, u32 *desc, u32 err, struct akcipher_request *req = context; struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); struct caam_drv_private_jr *jrp = dev_get_drvdata(dev); - struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); + struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm); struct caam_rsa_key *key = &ctx->key; struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req); struct rsa_edesc *edesc; @@ -242,7 +242,7 @@ static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req, size_t desclen) { struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); - struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); + struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm); struct device *dev = ctx->dev; struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req); struct caam_rsa_key *key = &ctx->key; @@ -371,7 +371,7 @@ static int akcipher_do_one_req(struct crypto_engine *engine, void *areq) base); struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req); - struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); + struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm); struct device *jrdev = ctx->dev; u32 *desc = req_ctx->edesc->hw_desc; int ret; @@ -399,7 +399,7 @@ static int set_rsa_pub_pdb(struct akcipher_request *req, { struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req); - struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); + struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm); struct caam_rsa_key *key = &ctx->key; struct device *dev = ctx->dev; struct rsa_pub_pdb *pdb = &edesc->pdb.pub; @@ -444,7 +444,7 @@ static int set_rsa_priv_f1_pdb(struct akcipher_request *req, struct rsa_edesc *edesc) { struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); - struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); + struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm); struct caam_rsa_key *key = &ctx->key; struct device *dev = ctx->dev; struct rsa_priv_f1_pdb *pdb = &edesc->pdb.priv_f1; @@ -491,7 +491,7 @@ static int set_rsa_priv_f2_pdb(struct akcipher_request *req, struct rsa_edesc *edesc) { struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); - struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); + struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm); struct caam_rsa_key *key = &ctx->key; struct device *dev = ctx->dev; struct rsa_priv_f2_pdb *pdb = &edesc->pdb.priv_f2; @@ -568,7 +568,7 @@ static int set_rsa_priv_f3_pdb(struct akcipher_request *req, struct rsa_edesc *edesc) { struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); - struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); + struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm); struct caam_rsa_key *key = &ctx->key; struct device *dev = ctx->dev; struct rsa_priv_f3_pdb *pdb = &edesc->pdb.priv_f3; @@ -664,7 +664,7 @@ static int akcipher_enqueue_req(struct device *jrdev, { struct caam_drv_private_jr *jrpriv = dev_get_drvdata(jrdev); struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); - struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); + struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm); struct caam_rsa_key *key = &ctx->key; struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req); struct rsa_edesc *edesc = req_ctx->edesc; @@ -707,7 +707,7 @@ static int akcipher_enqueue_req(struct device *jrdev, static int caam_rsa_enc(struct akcipher_request *req) { struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); - struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); + struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm); struct caam_rsa_key *key = &ctx->key; struct device *jrdev = ctx->dev; struct rsa_edesc *edesc; @@ -746,7 +746,7 @@ init_fail: static int caam_rsa_dec_priv_f1(struct akcipher_request *req) { struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); - struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); + struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm); struct device *jrdev = ctx->dev; struct rsa_edesc *edesc; int ret; @@ -775,7 +775,7 @@ init_fail: static int caam_rsa_dec_priv_f2(struct akcipher_request *req) { struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); - struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); + struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm); struct device *jrdev = ctx->dev; struct rsa_edesc *edesc; int ret; @@ -804,7 +804,7 @@ init_fail: static int caam_rsa_dec_priv_f3(struct akcipher_request *req) { struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); - struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); + struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm); struct device *jrdev = ctx->dev; struct rsa_edesc *edesc; int ret; @@ -833,7 +833,7 @@ init_fail: static int caam_rsa_dec(struct akcipher_request *req) { struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); - struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); + struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm); struct caam_rsa_key *key = &ctx->key; int ret; @@ -936,7 +936,7 @@ static int caam_rsa_check_key_length(unsigned int len) static int caam_rsa_set_pub_key(struct crypto_akcipher *tfm, const void *key, unsigned int keylen) { - struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); + struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm); struct rsa_key raw_key = {NULL}; struct caam_rsa_key *rsa_key = &ctx->key; int ret; @@ -1038,7 +1038,7 @@ free_p: static int caam_rsa_set_priv_key(struct crypto_akcipher *tfm, const void *key, unsigned int keylen) { - struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); + struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm); struct rsa_key raw_key = {NULL}; struct caam_rsa_key *rsa_key = &ctx->key; int ret; @@ -1089,7 +1089,7 @@ err: static unsigned int caam_rsa_max_size(struct crypto_akcipher *tfm) { - struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); + struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm); return ctx->key.n_sz; } @@ -1097,7 +1097,9 @@ static unsigned int caam_rsa_max_size(struct crypto_akcipher *tfm) /* Per session pkc's driver context creation function */ static int caam_rsa_init_tfm(struct crypto_akcipher *tfm) { - struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); + struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm); + + akcipher_set_reqsize(tfm, sizeof(struct caam_rsa_req_ctx)); ctx->dev = caam_jr_alloc(); @@ -1123,7 +1125,7 @@ static int caam_rsa_init_tfm(struct crypto_akcipher *tfm) /* Per session pkc's driver context cleanup function */ static void caam_rsa_exit_tfm(struct crypto_akcipher *tfm) { - struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); + struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm); struct caam_rsa_key *key = &ctx->key; dma_unmap_single(ctx->dev, ctx->padding_dma, CAAM_RSA_MAX_INPUT_SIZE - @@ -1141,13 +1143,13 @@ static struct caam_akcipher_alg caam_rsa = { .max_size = caam_rsa_max_size, .init = caam_rsa_init_tfm, .exit = caam_rsa_exit_tfm, - .reqsize = sizeof(struct caam_rsa_req_ctx), .base = { .cra_name = "rsa", .cra_driver_name = "rsa-caam", .cra_priority = 3000, .cra_module = THIS_MODULE, - .cra_ctxsize = sizeof(struct caam_rsa_ctx), + .cra_ctxsize = sizeof(struct caam_rsa_ctx) + + CRYPTO_DMA_PADDING, }, } }; diff --git a/drivers/crypto/caam/caamrng.c b/drivers/crypto/caam/caamrng.c index 77d048dfe5d061923a1e1422c00a525aa10c5908..1f0e820509767358b4bda23ff3f6647848d8865c 100644 --- a/drivers/crypto/caam/caamrng.c +++ b/drivers/crypto/caam/caamrng.c @@ -246,7 +246,6 @@ int caam_rng_init(struct device *ctrldev) ctx->rng.cleanup = caam_cleanup; ctx->rng.read = caam_read; ctx->rng.priv = (unsigned long)ctx; - ctx->rng.quality = 1024; dev_info(ctrldev, "registering rng-caam\n"); diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h index 66d6dad841bb2aa5d4f2871c4dfe990cbd9e60f0..66928f8a0c4b1e8baa4507eb70a1b5e6bce41c33 100644 --- a/drivers/crypto/caam/regs.h +++ b/drivers/crypto/caam/regs.h @@ -426,6 +426,9 @@ struct caam_perfmon { u32 rsvd2; #define CSTA_PLEND BIT(10) #define CSTA_ALT_PLEND BIT(18) +#define CSTA_MOO GENMASK(9, 8) +#define CSTA_MOO_SECURE 1 +#define CSTA_MOO_TRUSTED 2 u32 status; /* CSTA - CAAM Status */ u64 rsvd3; diff --git a/drivers/crypto/cavium/cpt/cptvf_algs.c b/drivers/crypto/cavium/cpt/cptvf_algs.c index ce3b91c612f0f83db1c18ee8645a0b746d813e38..9eca0c30218669e64b8a6e83386ce587aace4169 100644 --- a/drivers/crypto/cavium/cpt/cptvf_algs.c +++ b/drivers/crypto/cavium/cpt/cptvf_algs.c @@ -97,7 +97,7 @@ static inline u32 create_ctx_hdr(struct skcipher_request *req, u32 enc, { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct cvm_enc_ctx *ctx = crypto_skcipher_ctx(tfm); - struct cvm_req_ctx *rctx = skcipher_request_ctx(req); + struct cvm_req_ctx *rctx = skcipher_request_ctx_dma(req); struct fc_context *fctx = &rctx->fctx; u32 enc_iv_len = crypto_skcipher_ivsize(tfm); struct cpt_request_info *req_info = &rctx->cpt_req; @@ -151,7 +151,7 @@ static inline u32 create_ctx_hdr(struct skcipher_request *req, u32 enc, static inline u32 create_input_list(struct skcipher_request *req, u32 enc, u32 enc_iv_len) { - struct cvm_req_ctx *rctx = skcipher_request_ctx(req); + struct cvm_req_ctx *rctx = skcipher_request_ctx_dma(req); struct cpt_request_info *req_info = &rctx->cpt_req; u32 argcnt = 0; @@ -173,7 +173,7 @@ static inline void store_cb_info(struct skcipher_request *req, static inline void create_output_list(struct skcipher_request *req, u32 enc_iv_len) { - struct cvm_req_ctx *rctx = skcipher_request_ctx(req); + struct cvm_req_ctx *rctx = skcipher_request_ctx_dma(req); struct cpt_request_info *req_info = &rctx->cpt_req; u32 argcnt = 0; @@ -193,7 +193,7 @@ static inline void create_output_list(struct skcipher_request *req, static inline int cvm_enc_dec(struct skcipher_request *req, u32 enc) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct cvm_req_ctx *rctx = skcipher_request_ctx(req); + struct cvm_req_ctx *rctx = skcipher_request_ctx_dma(req); u32 enc_iv_len = crypto_skcipher_ivsize(tfm); struct fc_context *fctx = &rctx->fctx; struct cpt_request_info *req_info = &rctx->cpt_req; @@ -335,7 +335,7 @@ static int cvm_ecb_des3_setkey(struct crypto_skcipher *cipher, const u8 *key, static int cvm_enc_dec_init(struct crypto_skcipher *tfm) { - crypto_skcipher_set_reqsize(tfm, sizeof(struct cvm_req_ctx)); + crypto_skcipher_set_reqsize_dma(tfm, sizeof(struct cvm_req_ctx)); return 0; } diff --git a/drivers/crypto/cavium/nitrox/nitrox_aead.c b/drivers/crypto/cavium/nitrox/nitrox_aead.c index c93c4e41d267870d3e341b4821a6017c405772e6..0653484df23f70a5b0b015a697376bc0e2e4ddf3 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_aead.c +++ b/drivers/crypto/cavium/nitrox/nitrox_aead.c @@ -392,7 +392,7 @@ static int nitrox_rfc4106_setauthsize(struct crypto_aead *aead, static int nitrox_rfc4106_set_aead_rctx_sglist(struct aead_request *areq) { - struct nitrox_rfc4106_rctx *rctx = aead_request_ctx(areq); + struct nitrox_rfc4106_rctx *rctx = aead_request_ctx_dma(areq); struct nitrox_aead_rctx *aead_rctx = &rctx->base; unsigned int assoclen = areq->assoclen - GCM_RFC4106_IV_SIZE; struct scatterlist *sg; @@ -424,7 +424,7 @@ static int nitrox_rfc4106_set_aead_rctx_sglist(struct aead_request *areq) static void nitrox_rfc4106_callback(void *arg, int err) { struct aead_request *areq = arg; - struct nitrox_rfc4106_rctx *rctx = aead_request_ctx(areq); + struct nitrox_rfc4106_rctx *rctx = aead_request_ctx_dma(areq); struct nitrox_kcrypt_request *nkreq = &rctx->base.nkreq; free_src_sglist(nkreq); @@ -441,7 +441,7 @@ static int nitrox_rfc4106_enc(struct aead_request *areq) { struct crypto_aead *aead = crypto_aead_reqtfm(areq); struct nitrox_crypto_ctx *nctx = crypto_aead_ctx(aead); - struct nitrox_rfc4106_rctx *rctx = aead_request_ctx(areq); + struct nitrox_rfc4106_rctx *rctx = aead_request_ctx_dma(areq); struct nitrox_aead_rctx *aead_rctx = &rctx->base; struct se_crypto_request *creq = &aead_rctx->nkreq.creq; int ret; @@ -472,7 +472,7 @@ static int nitrox_rfc4106_enc(struct aead_request *areq) static int nitrox_rfc4106_dec(struct aead_request *areq) { struct crypto_aead *aead = crypto_aead_reqtfm(areq); - struct nitrox_crypto_ctx *nctx = crypto_aead_ctx(aead); + struct nitrox_crypto_ctx *nctx = crypto_aead_ctx_dma(aead); struct nitrox_rfc4106_rctx *rctx = aead_request_ctx(areq); struct nitrox_aead_rctx *aead_rctx = &rctx->base; struct se_crypto_request *creq = &aead_rctx->nkreq.creq; @@ -510,8 +510,8 @@ static int nitrox_rfc4106_init(struct crypto_aead *aead) if (ret) return ret; - crypto_aead_set_reqsize(aead, sizeof(struct aead_request) + - sizeof(struct nitrox_rfc4106_rctx)); + crypto_aead_set_reqsize_dma(aead, sizeof(struct aead_request) + + sizeof(struct nitrox_rfc4106_rctx)); return 0; } diff --git a/drivers/crypto/cavium/nitrox/nitrox_mbx.c b/drivers/crypto/cavium/nitrox/nitrox_mbx.c index 9e7308e39b304c80d3c67dca856c836d1bf229dd..d4e06999af9b7e3ff6d5b5d80d82e92e277e8212 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_mbx.c +++ b/drivers/crypto/cavium/nitrox/nitrox_mbx.c @@ -195,6 +195,7 @@ int nitrox_mbox_init(struct nitrox_device *ndev) ndev->iov.pf2vf_wq = alloc_workqueue("nitrox_pf2vf", 0, 0); if (!ndev->iov.pf2vf_wq) { kfree(ndev->iov.vfdev); + ndev->iov.vfdev = NULL; return -ENOMEM; } /* enable pf2vf mailbox interrupts */ diff --git a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c index 11a305fa19e67c29875de647d70bde87bafe9136..d8426bdf319082720fc7b4b18f88e71ec37c0fa0 100644 --- a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c +++ b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c @@ -25,7 +25,7 @@ static int ccp_aes_cmac_complete(struct crypto_async_request *async_req, { struct ahash_request *req = ahash_request_cast(async_req); struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx(req); + struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx_dma(req); unsigned int digest_size = crypto_ahash_digestsize(tfm); if (ret) @@ -56,8 +56,8 @@ static int ccp_do_cmac_update(struct ahash_request *req, unsigned int nbytes, unsigned int final) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct ccp_ctx *ctx = crypto_ahash_ctx(tfm); - struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx(req); + struct ccp_ctx *ctx = crypto_ahash_ctx_dma(tfm); + struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx_dma(req); struct scatterlist *sg, *cmac_key_sg = NULL; unsigned int block_size = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm)); @@ -182,7 +182,7 @@ e_free: static int ccp_aes_cmac_init(struct ahash_request *req) { - struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx(req); + struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx_dma(req); memset(rctx, 0, sizeof(*rctx)); @@ -219,7 +219,7 @@ static int ccp_aes_cmac_digest(struct ahash_request *req) static int ccp_aes_cmac_export(struct ahash_request *req, void *out) { - struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx(req); + struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx_dma(req); struct ccp_aes_cmac_exp_ctx state; /* Don't let anything leak to 'out' */ @@ -238,7 +238,7 @@ static int ccp_aes_cmac_export(struct ahash_request *req, void *out) static int ccp_aes_cmac_import(struct ahash_request *req, const void *in) { - struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx(req); + struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx_dma(req); struct ccp_aes_cmac_exp_ctx state; /* 'in' may not be aligned so memcpy to local variable */ @@ -256,7 +256,7 @@ static int ccp_aes_cmac_import(struct ahash_request *req, const void *in) static int ccp_aes_cmac_setkey(struct crypto_ahash *tfm, const u8 *key, unsigned int key_len) { - struct ccp_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm)); + struct ccp_ctx *ctx = crypto_ahash_ctx_dma(tfm); struct ccp_crypto_ahash_alg *alg = ccp_crypto_ahash_alg(crypto_ahash_tfm(tfm)); u64 k0_hi, k0_lo, k1_hi, k1_lo, k2_hi, k2_lo; @@ -334,13 +334,14 @@ static int ccp_aes_cmac_setkey(struct crypto_ahash *tfm, const u8 *key, static int ccp_aes_cmac_cra_init(struct crypto_tfm *tfm) { - struct ccp_ctx *ctx = crypto_tfm_ctx(tfm); + struct ccp_ctx *ctx = crypto_tfm_ctx_dma(tfm); struct crypto_ahash *ahash = __crypto_ahash_cast(tfm); ctx->complete = ccp_aes_cmac_complete; ctx->u.aes.key_len = 0; - crypto_ahash_set_reqsize(ahash, sizeof(struct ccp_aes_cmac_req_ctx)); + crypto_ahash_set_reqsize_dma(ahash, + sizeof(struct ccp_aes_cmac_req_ctx)); return 0; } @@ -382,7 +383,7 @@ int ccp_register_aes_cmac_algs(struct list_head *head) CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_NEED_FALLBACK; base->cra_blocksize = AES_BLOCK_SIZE; - base->cra_ctxsize = sizeof(struct ccp_ctx); + base->cra_ctxsize = sizeof(struct ccp_ctx) + crypto_dma_padding(); base->cra_priority = CCP_CRA_PRIORITY; base->cra_init = ccp_aes_cmac_cra_init; base->cra_module = THIS_MODULE; diff --git a/drivers/crypto/ccp/ccp-crypto-aes-galois.c b/drivers/crypto/ccp/ccp-crypto-aes-galois.c index 1c1c939f5c39bb93c7fb3cfe3c9d18948a90b46b..b1dbb8cea559e82f3bca9000385471062fb42334 100644 --- a/drivers/crypto/ccp/ccp-crypto-aes-galois.c +++ b/drivers/crypto/ccp/ccp-crypto-aes-galois.c @@ -29,7 +29,7 @@ static int ccp_aes_gcm_complete(struct crypto_async_request *async_req, int ret) static int ccp_aes_gcm_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int key_len) { - struct ccp_ctx *ctx = crypto_aead_ctx(tfm); + struct ccp_ctx *ctx = crypto_aead_ctx_dma(tfm); switch (key_len) { case AES_KEYSIZE_128: @@ -76,8 +76,8 @@ static int ccp_aes_gcm_setauthsize(struct crypto_aead *tfm, static int ccp_aes_gcm_crypt(struct aead_request *req, bool encrypt) { struct crypto_aead *tfm = crypto_aead_reqtfm(req); - struct ccp_ctx *ctx = crypto_aead_ctx(tfm); - struct ccp_aes_req_ctx *rctx = aead_request_ctx(req); + struct ccp_ctx *ctx = crypto_aead_ctx_dma(tfm); + struct ccp_aes_req_ctx *rctx = aead_request_ctx_dma(req); struct scatterlist *iv_sg = NULL; unsigned int iv_len = 0; int i; @@ -148,12 +148,12 @@ static int ccp_aes_gcm_decrypt(struct aead_request *req) static int ccp_aes_gcm_cra_init(struct crypto_aead *tfm) { - struct ccp_ctx *ctx = crypto_aead_ctx(tfm); + struct ccp_ctx *ctx = crypto_aead_ctx_dma(tfm); ctx->complete = ccp_aes_gcm_complete; ctx->u.aes.key_len = 0; - crypto_aead_set_reqsize(tfm, sizeof(struct ccp_aes_req_ctx)); + crypto_aead_set_reqsize_dma(tfm, sizeof(struct ccp_aes_req_ctx)); return 0; } @@ -176,7 +176,7 @@ static struct aead_alg ccp_aes_gcm_defaults = { CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_NEED_FALLBACK, .cra_blocksize = AES_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct ccp_ctx), + .cra_ctxsize = sizeof(struct ccp_ctx) + CRYPTO_DMA_PADDING, .cra_priority = CCP_CRA_PRIORITY, .cra_exit = ccp_aes_gcm_cra_exit, .cra_module = THIS_MODULE, diff --git a/drivers/crypto/ccp/ccp-crypto-aes-xts.c b/drivers/crypto/ccp/ccp-crypto-aes-xts.c index 6849261ca47d037c7dc9f2c78e6a0cf9d25c09d5..93f735d6b02b280f9a65cf2c392305112e3701af 100644 --- a/drivers/crypto/ccp/ccp-crypto-aes-xts.c +++ b/drivers/crypto/ccp/ccp-crypto-aes-xts.c @@ -62,7 +62,7 @@ static struct ccp_unit_size_map xts_unit_sizes[] = { static int ccp_aes_xts_complete(struct crypto_async_request *async_req, int ret) { struct skcipher_request *req = skcipher_request_cast(async_req); - struct ccp_aes_req_ctx *rctx = skcipher_request_ctx(req); + struct ccp_aes_req_ctx *rctx = skcipher_request_ctx_dma(req); if (ret) return ret; @@ -75,7 +75,7 @@ static int ccp_aes_xts_complete(struct crypto_async_request *async_req, int ret) static int ccp_aes_xts_setkey(struct crypto_skcipher *tfm, const u8 *key, unsigned int key_len) { - struct ccp_ctx *ctx = crypto_skcipher_ctx(tfm); + struct ccp_ctx *ctx = crypto_skcipher_ctx_dma(tfm); unsigned int ccpversion = ccp_version(); int ret; @@ -105,8 +105,8 @@ static int ccp_aes_xts_crypt(struct skcipher_request *req, unsigned int encrypt) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct ccp_ctx *ctx = crypto_skcipher_ctx(tfm); - struct ccp_aes_req_ctx *rctx = skcipher_request_ctx(req); + struct ccp_ctx *ctx = crypto_skcipher_ctx_dma(tfm); + struct ccp_aes_req_ctx *rctx = skcipher_request_ctx_dma(req); unsigned int ccpversion = ccp_version(); unsigned int fallback = 0; unsigned int unit; @@ -196,7 +196,7 @@ static int ccp_aes_xts_decrypt(struct skcipher_request *req) static int ccp_aes_xts_init_tfm(struct crypto_skcipher *tfm) { - struct ccp_ctx *ctx = crypto_skcipher_ctx(tfm); + struct ccp_ctx *ctx = crypto_skcipher_ctx_dma(tfm); struct crypto_skcipher *fallback_tfm; ctx->complete = ccp_aes_xts_complete; @@ -210,15 +210,16 @@ static int ccp_aes_xts_init_tfm(struct crypto_skcipher *tfm) } ctx->u.aes.tfm_skcipher = fallback_tfm; - crypto_skcipher_set_reqsize(tfm, sizeof(struct ccp_aes_req_ctx) + - crypto_skcipher_reqsize(fallback_tfm)); + crypto_skcipher_set_reqsize_dma(tfm, + sizeof(struct ccp_aes_req_ctx) + + crypto_skcipher_reqsize(fallback_tfm)); return 0; } static void ccp_aes_xts_exit_tfm(struct crypto_skcipher *tfm) { - struct ccp_ctx *ctx = crypto_skcipher_ctx(tfm); + struct ccp_ctx *ctx = crypto_skcipher_ctx_dma(tfm); crypto_free_skcipher(ctx->u.aes.tfm_skcipher); } @@ -246,7 +247,8 @@ static int ccp_register_aes_xts_alg(struct list_head *head, CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_NEED_FALLBACK; alg->base.cra_blocksize = AES_BLOCK_SIZE; - alg->base.cra_ctxsize = sizeof(struct ccp_ctx); + alg->base.cra_ctxsize = sizeof(struct ccp_ctx) + + crypto_dma_padding(); alg->base.cra_priority = CCP_CRA_PRIORITY; alg->base.cra_module = THIS_MODULE; diff --git a/drivers/crypto/ccp/ccp-crypto-aes.c b/drivers/crypto/ccp/ccp-crypto-aes.c index bed331953ff94ce4ae59bb4c711d048cd0d577bc..918e223f21b651f80bc0e031d487c07d98f85704 100644 --- a/drivers/crypto/ccp/ccp-crypto-aes.c +++ b/drivers/crypto/ccp/ccp-crypto-aes.c @@ -22,8 +22,9 @@ static int ccp_aes_complete(struct crypto_async_request *async_req, int ret) { struct skcipher_request *req = skcipher_request_cast(async_req); - struct ccp_ctx *ctx = crypto_tfm_ctx(req->base.tfm); - struct ccp_aes_req_ctx *rctx = skcipher_request_ctx(req); + struct ccp_ctx *ctx = crypto_skcipher_ctx_dma( + crypto_skcipher_reqtfm(req)); + struct ccp_aes_req_ctx *rctx = skcipher_request_ctx_dma(req); if (ret) return ret; @@ -38,7 +39,7 @@ static int ccp_aes_setkey(struct crypto_skcipher *tfm, const u8 *key, unsigned int key_len) { struct ccp_crypto_skcipher_alg *alg = ccp_crypto_skcipher_alg(tfm); - struct ccp_ctx *ctx = crypto_skcipher_ctx(tfm); + struct ccp_ctx *ctx = crypto_skcipher_ctx_dma(tfm); switch (key_len) { case AES_KEYSIZE_128: @@ -65,8 +66,8 @@ static int ccp_aes_setkey(struct crypto_skcipher *tfm, const u8 *key, static int ccp_aes_crypt(struct skcipher_request *req, bool encrypt) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct ccp_ctx *ctx = crypto_skcipher_ctx(tfm); - struct ccp_aes_req_ctx *rctx = skcipher_request_ctx(req); + struct ccp_ctx *ctx = crypto_skcipher_ctx_dma(tfm); + struct ccp_aes_req_ctx *rctx = skcipher_request_ctx_dma(req); struct scatterlist *iv_sg = NULL; unsigned int iv_len = 0; @@ -118,7 +119,7 @@ static int ccp_aes_decrypt(struct skcipher_request *req) static int ccp_aes_init_tfm(struct crypto_skcipher *tfm) { - struct ccp_ctx *ctx = crypto_skcipher_ctx(tfm); + struct ccp_ctx *ctx = crypto_skcipher_ctx_dma(tfm); ctx->complete = ccp_aes_complete; ctx->u.aes.key_len = 0; @@ -132,7 +133,7 @@ static int ccp_aes_rfc3686_complete(struct crypto_async_request *async_req, int ret) { struct skcipher_request *req = skcipher_request_cast(async_req); - struct ccp_aes_req_ctx *rctx = skcipher_request_ctx(req); + struct ccp_aes_req_ctx *rctx = skcipher_request_ctx_dma(req); /* Restore the original pointer */ req->iv = rctx->rfc3686_info; @@ -143,7 +144,7 @@ static int ccp_aes_rfc3686_complete(struct crypto_async_request *async_req, static int ccp_aes_rfc3686_setkey(struct crypto_skcipher *tfm, const u8 *key, unsigned int key_len) { - struct ccp_ctx *ctx = crypto_skcipher_ctx(tfm); + struct ccp_ctx *ctx = crypto_skcipher_ctx_dma(tfm); if (key_len < CTR_RFC3686_NONCE_SIZE) return -EINVAL; @@ -157,8 +158,8 @@ static int ccp_aes_rfc3686_setkey(struct crypto_skcipher *tfm, const u8 *key, static int ccp_aes_rfc3686_crypt(struct skcipher_request *req, bool encrypt) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct ccp_ctx *ctx = crypto_skcipher_ctx(tfm); - struct ccp_aes_req_ctx *rctx = skcipher_request_ctx(req); + struct ccp_ctx *ctx = crypto_skcipher_ctx_dma(tfm); + struct ccp_aes_req_ctx *rctx = skcipher_request_ctx_dma(req); u8 *iv; /* Initialize the CTR block */ @@ -190,12 +191,12 @@ static int ccp_aes_rfc3686_decrypt(struct skcipher_request *req) static int ccp_aes_rfc3686_init_tfm(struct crypto_skcipher *tfm) { - struct ccp_ctx *ctx = crypto_skcipher_ctx(tfm); + struct ccp_ctx *ctx = crypto_skcipher_ctx_dma(tfm); ctx->complete = ccp_aes_rfc3686_complete; ctx->u.aes.key_len = 0; - crypto_skcipher_set_reqsize(tfm, sizeof(struct ccp_aes_req_ctx)); + crypto_skcipher_set_reqsize_dma(tfm, sizeof(struct ccp_aes_req_ctx)); return 0; } @@ -213,7 +214,7 @@ static const struct skcipher_alg ccp_aes_defaults = { CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_NEED_FALLBACK, .base.cra_blocksize = AES_BLOCK_SIZE, - .base.cra_ctxsize = sizeof(struct ccp_ctx), + .base.cra_ctxsize = sizeof(struct ccp_ctx) + CRYPTO_DMA_PADDING, .base.cra_priority = CCP_CRA_PRIORITY, .base.cra_module = THIS_MODULE, }; @@ -231,7 +232,7 @@ static const struct skcipher_alg ccp_aes_rfc3686_defaults = { CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_NEED_FALLBACK, .base.cra_blocksize = CTR_RFC3686_BLOCK_SIZE, - .base.cra_ctxsize = sizeof(struct ccp_ctx), + .base.cra_ctxsize = sizeof(struct ccp_ctx) + CRYPTO_DMA_PADDING, .base.cra_priority = CCP_CRA_PRIORITY, .base.cra_module = THIS_MODULE, }; diff --git a/drivers/crypto/ccp/ccp-crypto-des3.c b/drivers/crypto/ccp/ccp-crypto-des3.c index 278636ed251a5349c57faec8af89ff6048dfcf3d..afae30adb703f6a5cc3fd3bf3882881a8ea46300 100644 --- a/drivers/crypto/ccp/ccp-crypto-des3.c +++ b/drivers/crypto/ccp/ccp-crypto-des3.c @@ -21,8 +21,9 @@ static int ccp_des3_complete(struct crypto_async_request *async_req, int ret) { struct skcipher_request *req = skcipher_request_cast(async_req); - struct ccp_ctx *ctx = crypto_tfm_ctx(req->base.tfm); - struct ccp_des3_req_ctx *rctx = skcipher_request_ctx(req); + struct ccp_ctx *ctx = crypto_skcipher_ctx_dma( + crypto_skcipher_reqtfm(req)); + struct ccp_des3_req_ctx *rctx = skcipher_request_ctx_dma(req); if (ret) return ret; @@ -37,7 +38,7 @@ static int ccp_des3_setkey(struct crypto_skcipher *tfm, const u8 *key, unsigned int key_len) { struct ccp_crypto_skcipher_alg *alg = ccp_crypto_skcipher_alg(tfm); - struct ccp_ctx *ctx = crypto_skcipher_ctx(tfm); + struct ccp_ctx *ctx = crypto_skcipher_ctx_dma(tfm); int err; err = verify_skcipher_des3_key(tfm, key); @@ -60,8 +61,8 @@ static int ccp_des3_setkey(struct crypto_skcipher *tfm, const u8 *key, static int ccp_des3_crypt(struct skcipher_request *req, bool encrypt) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct ccp_ctx *ctx = crypto_skcipher_ctx(tfm); - struct ccp_des3_req_ctx *rctx = skcipher_request_ctx(req); + struct ccp_ctx *ctx = crypto_skcipher_ctx_dma(tfm); + struct ccp_des3_req_ctx *rctx = skcipher_request_ctx_dma(req); struct scatterlist *iv_sg = NULL; unsigned int iv_len = 0; @@ -114,12 +115,12 @@ static int ccp_des3_decrypt(struct skcipher_request *req) static int ccp_des3_init_tfm(struct crypto_skcipher *tfm) { - struct ccp_ctx *ctx = crypto_skcipher_ctx(tfm); + struct ccp_ctx *ctx = crypto_skcipher_ctx_dma(tfm); ctx->complete = ccp_des3_complete; ctx->u.des3.key_len = 0; - crypto_skcipher_set_reqsize(tfm, sizeof(struct ccp_des3_req_ctx)); + crypto_skcipher_set_reqsize_dma(tfm, sizeof(struct ccp_des3_req_ctx)); return 0; } @@ -137,7 +138,7 @@ static const struct skcipher_alg ccp_des3_defaults = { CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_NEED_FALLBACK, .base.cra_blocksize = DES3_EDE_BLOCK_SIZE, - .base.cra_ctxsize = sizeof(struct ccp_ctx), + .base.cra_ctxsize = sizeof(struct ccp_ctx) + CRYPTO_DMA_PADDING, .base.cra_priority = CCP_CRA_PRIORITY, .base.cra_module = THIS_MODULE, }; diff --git a/drivers/crypto/ccp/ccp-crypto-main.c b/drivers/crypto/ccp/ccp-crypto-main.c index 5976530c00a8a16e90c420925bc90c2e8b792c5c..73442a382f683d2e277bc1f1f466e257996b0d87 100644 --- a/drivers/crypto/ccp/ccp-crypto-main.c +++ b/drivers/crypto/ccp/ccp-crypto-main.c @@ -78,13 +78,6 @@ struct ccp_crypto_cmd { int ret; }; -struct ccp_crypto_cpu { - struct work_struct work; - struct completion completion; - struct ccp_crypto_cmd *crypto_cmd; - int err; -}; - static inline bool ccp_crypto_success(int err) { if (err && (err != -EINPROGRESS) && (err != -EBUSY)) @@ -146,7 +139,7 @@ static void ccp_crypto_complete(void *data, int err) struct ccp_crypto_cmd *crypto_cmd = data; struct ccp_crypto_cmd *held, *next, *backlog; struct crypto_async_request *req = crypto_cmd->req; - struct ccp_ctx *ctx = crypto_tfm_ctx(req->tfm); + struct ccp_ctx *ctx = crypto_tfm_ctx_dma(req->tfm); int ret; if (err == -EINPROGRESS) { @@ -190,7 +183,7 @@ static void ccp_crypto_complete(void *data, int err) break; /* Error occurred, report it and get the next entry */ - ctx = crypto_tfm_ctx(held->req->tfm); + ctx = crypto_tfm_ctx_dma(held->req->tfm); if (ctx->complete) ret = ctx->complete(held->req, ret); held->req->complete(held->req, ret); @@ -400,7 +393,7 @@ static void ccp_unregister_algs(void) } } -static int ccp_crypto_init(void) +static int __init ccp_crypto_init(void) { int ret; @@ -421,7 +414,7 @@ static int ccp_crypto_init(void) return ret; } -static void ccp_crypto_exit(void) +static void __exit ccp_crypto_exit(void) { ccp_unregister_algs(); } diff --git a/drivers/crypto/ccp/ccp-crypto-rsa.c b/drivers/crypto/ccp/ccp-crypto-rsa.c index 1223ac70aea28714bb5cbbcbb773a231ef9cb176..a14f85512cf4f61a72870731dd7f5efc09b5ac12 100644 --- a/drivers/crypto/ccp/ccp-crypto-rsa.c +++ b/drivers/crypto/ccp/ccp-crypto-rsa.c @@ -44,7 +44,7 @@ static inline int ccp_copy_and_save_keypart(u8 **kpbuf, unsigned int *kplen, static int ccp_rsa_complete(struct crypto_async_request *async_req, int ret) { struct akcipher_request *req = akcipher_request_cast(async_req); - struct ccp_rsa_req_ctx *rctx = akcipher_request_ctx(req); + struct ccp_rsa_req_ctx *rctx = akcipher_request_ctx_dma(req); if (ret) return ret; @@ -56,7 +56,7 @@ static int ccp_rsa_complete(struct crypto_async_request *async_req, int ret) static unsigned int ccp_rsa_maxsize(struct crypto_akcipher *tfm) { - struct ccp_ctx *ctx = akcipher_tfm_ctx(tfm); + struct ccp_ctx *ctx = akcipher_tfm_ctx_dma(tfm); return ctx->u.rsa.n_len; } @@ -64,8 +64,8 @@ static unsigned int ccp_rsa_maxsize(struct crypto_akcipher *tfm) static int ccp_rsa_crypt(struct akcipher_request *req, bool encrypt) { struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); - struct ccp_ctx *ctx = akcipher_tfm_ctx(tfm); - struct ccp_rsa_req_ctx *rctx = akcipher_request_ctx(req); + struct ccp_ctx *ctx = akcipher_tfm_ctx_dma(tfm); + struct ccp_rsa_req_ctx *rctx = akcipher_request_ctx_dma(req); int ret = 0; memset(&rctx->cmd, 0, sizeof(rctx->cmd)); @@ -126,7 +126,7 @@ static void ccp_rsa_free_key_bufs(struct ccp_ctx *ctx) static int ccp_rsa_setkey(struct crypto_akcipher *tfm, const void *key, unsigned int keylen, bool private) { - struct ccp_ctx *ctx = akcipher_tfm_ctx(tfm); + struct ccp_ctx *ctx = akcipher_tfm_ctx_dma(tfm); struct rsa_key raw_key; int ret; @@ -192,9 +192,9 @@ static int ccp_rsa_setpubkey(struct crypto_akcipher *tfm, const void *key, static int ccp_rsa_init_tfm(struct crypto_akcipher *tfm) { - struct ccp_ctx *ctx = akcipher_tfm_ctx(tfm); + struct ccp_ctx *ctx = akcipher_tfm_ctx_dma(tfm); - akcipher_set_reqsize(tfm, sizeof(struct ccp_rsa_req_ctx)); + akcipher_set_reqsize_dma(tfm, sizeof(struct ccp_rsa_req_ctx)); ctx->complete = ccp_rsa_complete; return 0; @@ -202,7 +202,7 @@ static int ccp_rsa_init_tfm(struct crypto_akcipher *tfm) static void ccp_rsa_exit_tfm(struct crypto_akcipher *tfm) { - struct ccp_ctx *ctx = crypto_tfm_ctx(&tfm->base); + struct ccp_ctx *ctx = akcipher_tfm_ctx_dma(tfm); ccp_rsa_free_key_bufs(ctx); } @@ -220,7 +220,7 @@ static struct akcipher_alg ccp_rsa_defaults = { .cra_driver_name = "rsa-ccp", .cra_priority = CCP_CRA_PRIORITY, .cra_module = THIS_MODULE, - .cra_ctxsize = 2 * sizeof(struct ccp_ctx), + .cra_ctxsize = 2 * sizeof(struct ccp_ctx) + CRYPTO_DMA_PADDING, }, }; diff --git a/drivers/crypto/ccp/ccp-crypto-sha.c b/drivers/crypto/ccp/ccp-crypto-sha.c index 74fa5360e7226d82615e16a515454c8400389f20..fa3ae8e78f6f3e412fe89b230dc56d393238688f 100644 --- a/drivers/crypto/ccp/ccp-crypto-sha.c +++ b/drivers/crypto/ccp/ccp-crypto-sha.c @@ -28,7 +28,7 @@ static int ccp_sha_complete(struct crypto_async_request *async_req, int ret) { struct ahash_request *req = ahash_request_cast(async_req); struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req); + struct ccp_sha_req_ctx *rctx = ahash_request_ctx_dma(req); unsigned int digest_size = crypto_ahash_digestsize(tfm); if (ret) @@ -59,8 +59,8 @@ static int ccp_do_sha_update(struct ahash_request *req, unsigned int nbytes, unsigned int final) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct ccp_ctx *ctx = crypto_ahash_ctx(tfm); - struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req); + struct ccp_ctx *ctx = crypto_ahash_ctx_dma(tfm); + struct ccp_sha_req_ctx *rctx = ahash_request_ctx_dma(req); struct scatterlist *sg; unsigned int block_size = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm)); @@ -182,8 +182,8 @@ e_free: static int ccp_sha_init(struct ahash_request *req) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct ccp_ctx *ctx = crypto_ahash_ctx(tfm); - struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req); + struct ccp_ctx *ctx = crypto_ahash_ctx_dma(tfm); + struct ccp_sha_req_ctx *rctx = ahash_request_ctx_dma(req); struct ccp_crypto_ahash_alg *alg = ccp_crypto_ahash_alg(crypto_ahash_tfm(tfm)); unsigned int block_size = @@ -231,7 +231,7 @@ static int ccp_sha_digest(struct ahash_request *req) static int ccp_sha_export(struct ahash_request *req, void *out) { - struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req); + struct ccp_sha_req_ctx *rctx = ahash_request_ctx_dma(req); struct ccp_sha_exp_ctx state; /* Don't let anything leak to 'out' */ @@ -252,7 +252,7 @@ static int ccp_sha_export(struct ahash_request *req, void *out) static int ccp_sha_import(struct ahash_request *req, const void *in) { - struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req); + struct ccp_sha_req_ctx *rctx = ahash_request_ctx_dma(req); struct ccp_sha_exp_ctx state; /* 'in' may not be aligned so memcpy to local variable */ @@ -272,7 +272,7 @@ static int ccp_sha_import(struct ahash_request *req, const void *in) static int ccp_sha_setkey(struct crypto_ahash *tfm, const u8 *key, unsigned int key_len) { - struct ccp_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm)); + struct ccp_ctx *ctx = crypto_ahash_ctx_dma(tfm); struct crypto_shash *shash = ctx->u.sha.hmac_tfm; unsigned int block_size = crypto_shash_blocksize(shash); unsigned int digest_size = crypto_shash_digestsize(shash); @@ -313,13 +313,13 @@ static int ccp_sha_setkey(struct crypto_ahash *tfm, const u8 *key, static int ccp_sha_cra_init(struct crypto_tfm *tfm) { - struct ccp_ctx *ctx = crypto_tfm_ctx(tfm); struct crypto_ahash *ahash = __crypto_ahash_cast(tfm); + struct ccp_ctx *ctx = crypto_ahash_ctx_dma(ahash); ctx->complete = ccp_sha_complete; ctx->u.sha.key_len = 0; - crypto_ahash_set_reqsize(ahash, sizeof(struct ccp_sha_req_ctx)); + crypto_ahash_set_reqsize_dma(ahash, sizeof(struct ccp_sha_req_ctx)); return 0; } @@ -330,7 +330,7 @@ static void ccp_sha_cra_exit(struct crypto_tfm *tfm) static int ccp_hmac_sha_cra_init(struct crypto_tfm *tfm) { - struct ccp_ctx *ctx = crypto_tfm_ctx(tfm); + struct ccp_ctx *ctx = crypto_tfm_ctx_dma(tfm); struct ccp_crypto_ahash_alg *alg = ccp_crypto_ahash_alg(tfm); struct crypto_shash *hmac_tfm; @@ -348,7 +348,7 @@ static int ccp_hmac_sha_cra_init(struct crypto_tfm *tfm) static void ccp_hmac_sha_cra_exit(struct crypto_tfm *tfm) { - struct ccp_ctx *ctx = crypto_tfm_ctx(tfm); + struct ccp_ctx *ctx = crypto_tfm_ctx_dma(tfm); if (ctx->u.sha.hmac_tfm) crypto_free_shash(ctx->u.sha.hmac_tfm); @@ -492,7 +492,7 @@ static int ccp_register_sha_alg(struct list_head *head, CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_NEED_FALLBACK; base->cra_blocksize = def->block_size; - base->cra_ctxsize = sizeof(struct ccp_ctx); + base->cra_ctxsize = sizeof(struct ccp_ctx) + crypto_dma_padding(); base->cra_priority = CCP_CRA_PRIORITY; base->cra_init = ccp_sha_cra_init; base->cra_exit = ccp_sha_cra_exit; diff --git a/drivers/crypto/ccp/sp-pci.c b/drivers/crypto/ccp/sp-pci.c index 792d6da7f0c07aea57f8b169ca6ce398813c090b..084d052fddccbd63514efc15d7a3b95ceffba139 100644 --- a/drivers/crypto/ccp/sp-pci.c +++ b/drivers/crypto/ccp/sp-pci.c @@ -381,6 +381,15 @@ static const struct psp_vdata pspv3 = { .inten_reg = 0x10690, .intsts_reg = 0x10694, }; + +static const struct psp_vdata pspv4 = { + .sev = &sevv2, + .tee = &teev1, + .feature_reg = 0x109fc, + .inten_reg = 0x10690, + .intsts_reg = 0x10694, +}; + #endif static const struct sp_dev_vdata dev_vdata[] = { @@ -426,7 +435,7 @@ static const struct sp_dev_vdata dev_vdata[] = { { /* 5 */ .bar = 2, #ifdef CONFIG_CRYPTO_DEV_SP_PSP - .psp_vdata = &pspv2, + .psp_vdata = &pspv4, #endif }, { /* 6 */ diff --git a/drivers/crypto/ccree/cc_aead.c b/drivers/crypto/ccree/cc_aead.c index 35794c7271fb614d4179d89aad6d362d25d2cf2a..109ffb375fc69510be8b7339fb73fda00e1515cc 100644 --- a/drivers/crypto/ccree/cc_aead.c +++ b/drivers/crypto/ccree/cc_aead.c @@ -138,7 +138,7 @@ static int cc_aead_init(struct crypto_aead *tfm) ctx->flow_mode = cc_alg->flow_mode; ctx->auth_mode = cc_alg->auth_mode; ctx->drvdata = cc_alg->drvdata; - crypto_aead_set_reqsize(tfm, sizeof(struct aead_req_ctx)); + crypto_aead_set_reqsize_dma(tfm, sizeof(struct aead_req_ctx)); /* Allocate key buffer, cache line aligned */ ctx->enckey = dma_alloc_coherent(dev, AES_MAX_KEY_SIZE, @@ -208,7 +208,7 @@ init_failed: static void cc_aead_complete(struct device *dev, void *cc_req, int err) { struct aead_request *areq = (struct aead_request *)cc_req; - struct aead_req_ctx *areq_ctx = aead_request_ctx(areq); + struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(areq); struct crypto_aead *tfm = crypto_aead_reqtfm(cc_req); struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm); @@ -723,7 +723,7 @@ static void cc_set_assoc_desc(struct aead_request *areq, unsigned int flow_mode, { struct crypto_aead *tfm = crypto_aead_reqtfm(areq); struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm); - struct aead_req_ctx *areq_ctx = aead_request_ctx(areq); + struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(areq); enum cc_req_dma_buf_type assoc_dma_type = areq_ctx->assoc_buff_type; unsigned int idx = *seq_size; struct device *dev = drvdata_to_dev(ctx->drvdata); @@ -762,7 +762,7 @@ static void cc_proc_authen_desc(struct aead_request *areq, struct cc_hw_desc desc[], unsigned int *seq_size, int direct) { - struct aead_req_ctx *areq_ctx = aead_request_ctx(areq); + struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(areq); enum cc_req_dma_buf_type data_dma_type = areq_ctx->data_buff_type; unsigned int idx = *seq_size; struct crypto_aead *tfm = crypto_aead_reqtfm(areq); @@ -827,7 +827,7 @@ static void cc_proc_cipher_desc(struct aead_request *areq, unsigned int *seq_size) { unsigned int idx = *seq_size; - struct aead_req_ctx *areq_ctx = aead_request_ctx(areq); + struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(areq); enum cc_req_dma_buf_type data_dma_type = areq_ctx->data_buff_type; struct crypto_aead *tfm = crypto_aead_reqtfm(areq); struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm); @@ -873,7 +873,7 @@ static void cc_proc_digest_desc(struct aead_request *req, { struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm); - struct aead_req_ctx *req_ctx = aead_request_ctx(req); + struct aead_req_ctx *req_ctx = aead_request_ctx_dma(req); unsigned int idx = *seq_size; unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ? DRV_HASH_HW_SHA1 : DRV_HASH_HW_SHA256; @@ -923,7 +923,7 @@ static void cc_set_cipher_desc(struct aead_request *req, { struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm); - struct aead_req_ctx *req_ctx = aead_request_ctx(req); + struct aead_req_ctx *req_ctx = aead_request_ctx_dma(req); unsigned int hw_iv_size = req_ctx->hw_iv_size; unsigned int idx = *seq_size; int direct = req_ctx->gen_ctx.op_type; @@ -965,7 +965,7 @@ static void cc_set_cipher_desc(struct aead_request *req, static void cc_proc_cipher(struct aead_request *req, struct cc_hw_desc desc[], unsigned int *seq_size, unsigned int data_flow_mode) { - struct aead_req_ctx *req_ctx = aead_request_ctx(req); + struct aead_req_ctx *req_ctx = aead_request_ctx_dma(req); int direct = req_ctx->gen_ctx.op_type; unsigned int idx = *seq_size; @@ -1082,7 +1082,7 @@ static void cc_proc_header_desc(struct aead_request *req, struct cc_hw_desc desc[], unsigned int *seq_size) { - struct aead_req_ctx *areq_ctx = aead_request_ctx(req); + struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(req); unsigned int idx = *seq_size; /* Hash associated data */ @@ -1158,7 +1158,7 @@ static void cc_proc_scheme_desc(struct aead_request *req, static void cc_mlli_to_sram(struct aead_request *req, struct cc_hw_desc desc[], unsigned int *seq_size) { - struct aead_req_ctx *req_ctx = aead_request_ctx(req); + struct aead_req_ctx *req_ctx = aead_request_ctx_dma(req); struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm); struct device *dev = drvdata_to_dev(ctx->drvdata); @@ -1212,7 +1212,7 @@ static void cc_hmac_authenc(struct aead_request *req, struct cc_hw_desc desc[], { struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm); - struct aead_req_ctx *req_ctx = aead_request_ctx(req); + struct aead_req_ctx *req_ctx = aead_request_ctx_dma(req); int direct = req_ctx->gen_ctx.op_type; unsigned int data_flow_mode = cc_get_data_flow(direct, ctx->flow_mode, @@ -1265,7 +1265,7 @@ cc_xcbc_authenc(struct aead_request *req, struct cc_hw_desc desc[], { struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm); - struct aead_req_ctx *req_ctx = aead_request_ctx(req); + struct aead_req_ctx *req_ctx = aead_request_ctx_dma(req); int direct = req_ctx->gen_ctx.op_type; unsigned int data_flow_mode = cc_get_data_flow(direct, ctx->flow_mode, @@ -1312,7 +1312,7 @@ static int validate_data_size(struct cc_aead_ctx *ctx, enum drv_crypto_direction direct, struct aead_request *req) { - struct aead_req_ctx *areq_ctx = aead_request_ctx(req); + struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(req); struct device *dev = drvdata_to_dev(ctx->drvdata); unsigned int assoclen = areq_ctx->assoclen; unsigned int cipherlen = (direct == DRV_CRYPTO_DIRECTION_DECRYPT) ? @@ -1411,7 +1411,7 @@ static int cc_ccm(struct aead_request *req, struct cc_hw_desc desc[], { struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm); - struct aead_req_ctx *req_ctx = aead_request_ctx(req); + struct aead_req_ctx *req_ctx = aead_request_ctx_dma(req); unsigned int idx = *seq_size; unsigned int cipher_flow_mode; dma_addr_t mac_result; @@ -1533,7 +1533,7 @@ static int config_ccm_adata(struct aead_request *req) struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm); struct device *dev = drvdata_to_dev(ctx->drvdata); - struct aead_req_ctx *req_ctx = aead_request_ctx(req); + struct aead_req_ctx *req_ctx = aead_request_ctx_dma(req); //unsigned int size_of_a = 0, rem_a_size = 0; unsigned int lp = req->iv[0]; /* Note: The code assume that req->iv[0] already contains the value @@ -1591,7 +1591,7 @@ static void cc_proc_rfc4309_ccm(struct aead_request *req) { struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm); - struct aead_req_ctx *areq_ctx = aead_request_ctx(req); + struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(req); /* L' */ memset(areq_ctx->ctr_iv, 0, AES_BLOCK_SIZE); @@ -1615,7 +1615,7 @@ static void cc_set_ghash_desc(struct aead_request *req, { struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm); - struct aead_req_ctx *req_ctx = aead_request_ctx(req); + struct aead_req_ctx *req_ctx = aead_request_ctx_dma(req); unsigned int idx = *seq_size; /* load key to AES*/ @@ -1693,7 +1693,7 @@ static void cc_set_gctr_desc(struct aead_request *req, struct cc_hw_desc desc[], { struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm); - struct aead_req_ctx *req_ctx = aead_request_ctx(req); + struct aead_req_ctx *req_ctx = aead_request_ctx_dma(req); unsigned int idx = *seq_size; /* load key to AES*/ @@ -1730,7 +1730,7 @@ static void cc_proc_gcm_result(struct aead_request *req, { struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm); - struct aead_req_ctx *req_ctx = aead_request_ctx(req); + struct aead_req_ctx *req_ctx = aead_request_ctx_dma(req); dma_addr_t mac_result; unsigned int idx = *seq_size; @@ -1792,7 +1792,7 @@ static void cc_proc_gcm_result(struct aead_request *req, static int cc_gcm(struct aead_request *req, struct cc_hw_desc desc[], unsigned int *seq_size) { - struct aead_req_ctx *req_ctx = aead_request_ctx(req); + struct aead_req_ctx *req_ctx = aead_request_ctx_dma(req); unsigned int cipher_flow_mode; //in RFC4543 no data to encrypt. just copy data from src to dest. @@ -1830,7 +1830,7 @@ static int config_gcm_context(struct aead_request *req) { struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm); - struct aead_req_ctx *req_ctx = aead_request_ctx(req); + struct aead_req_ctx *req_ctx = aead_request_ctx_dma(req); struct device *dev = drvdata_to_dev(ctx->drvdata); unsigned int cryptlen = (req_ctx->gen_ctx.op_type == @@ -1879,7 +1879,7 @@ static void cc_proc_rfc4_gcm(struct aead_request *req) { struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm); - struct aead_req_ctx *areq_ctx = aead_request_ctx(req); + struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(req); memcpy(areq_ctx->ctr_iv + GCM_BLOCK_RFC4_NONCE_OFFSET, ctx->ctr_nonce, GCM_BLOCK_RFC4_NONCE_SIZE); @@ -1896,7 +1896,7 @@ static int cc_proc_aead(struct aead_request *req, struct cc_hw_desc desc[MAX_AEAD_PROCESS_SEQ]; struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm); - struct aead_req_ctx *areq_ctx = aead_request_ctx(req); + struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(req); struct device *dev = drvdata_to_dev(ctx->drvdata); struct cc_crypto_req cc_req = {}; @@ -2019,7 +2019,7 @@ exit: static int cc_aead_encrypt(struct aead_request *req) { - struct aead_req_ctx *areq_ctx = aead_request_ctx(req); + struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(req); int rc; memset(areq_ctx, 0, sizeof(*areq_ctx)); @@ -2039,7 +2039,7 @@ static int cc_rfc4309_ccm_encrypt(struct aead_request *req) { /* Very similar to cc_aead_encrypt() above. */ - struct aead_req_ctx *areq_ctx = aead_request_ctx(req); + struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(req); int rc; rc = crypto_ipsec_check_assoclen(req->assoclen); @@ -2063,7 +2063,7 @@ out: static int cc_aead_decrypt(struct aead_request *req) { - struct aead_req_ctx *areq_ctx = aead_request_ctx(req); + struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(req); int rc; memset(areq_ctx, 0, sizeof(*areq_ctx)); @@ -2081,7 +2081,7 @@ static int cc_aead_decrypt(struct aead_request *req) static int cc_rfc4309_ccm_decrypt(struct aead_request *req) { - struct aead_req_ctx *areq_ctx = aead_request_ctx(req); + struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(req); int rc; rc = crypto_ipsec_check_assoclen(req->assoclen); @@ -2193,7 +2193,7 @@ static int cc_rfc4543_gcm_setauthsize(struct crypto_aead *authenc, static int cc_rfc4106_gcm_encrypt(struct aead_request *req) { - struct aead_req_ctx *areq_ctx = aead_request_ctx(req); + struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(req); int rc; rc = crypto_ipsec_check_assoclen(req->assoclen); @@ -2217,7 +2217,7 @@ out: static int cc_rfc4543_gcm_encrypt(struct aead_request *req) { - struct aead_req_ctx *areq_ctx = aead_request_ctx(req); + struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(req); int rc; rc = crypto_ipsec_check_assoclen(req->assoclen); @@ -2244,7 +2244,7 @@ out: static int cc_rfc4106_gcm_decrypt(struct aead_request *req) { - struct aead_req_ctx *areq_ctx = aead_request_ctx(req); + struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(req); int rc; rc = crypto_ipsec_check_assoclen(req->assoclen); @@ -2268,7 +2268,7 @@ out: static int cc_rfc4543_gcm_decrypt(struct aead_request *req) { - struct aead_req_ctx *areq_ctx = aead_request_ctx(req); + struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(req); int rc; rc = crypto_ipsec_check_assoclen(req->assoclen); diff --git a/drivers/crypto/ccree/cc_buffer_mgr.c b/drivers/crypto/ccree/cc_buffer_mgr.c index 9efd88f871d1232fdc5708c312de012347175152..bcca55bff910ef04832969d79efc49952d04d8c6 100644 --- a/drivers/crypto/ccree/cc_buffer_mgr.c +++ b/drivers/crypto/ccree/cc_buffer_mgr.c @@ -52,7 +52,7 @@ static inline char *cc_dma_buf_type(enum cc_req_dma_buf_type type) static void cc_copy_mac(struct device *dev, struct aead_request *req, enum cc_sg_cpy_direct dir) { - struct aead_req_ctx *areq_ctx = aead_request_ctx(req); + struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(req); u32 skip = req->assoclen + req->cryptlen; cc_copy_sg_portion(dev, areq_ctx->backup_mac, req->src, @@ -456,7 +456,7 @@ cipher_exit: void cc_unmap_aead_request(struct device *dev, struct aead_request *req) { - struct aead_req_ctx *areq_ctx = aead_request_ctx(req); + struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(req); unsigned int hw_iv_size = areq_ctx->hw_iv_size; struct cc_drvdata *drvdata = dev_get_drvdata(dev); int src_direction = (req->src != req->dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL); @@ -546,7 +546,7 @@ static int cc_aead_chain_iv(struct cc_drvdata *drvdata, struct buffer_array *sg_data, bool is_last, bool do_chain) { - struct aead_req_ctx *areq_ctx = aead_request_ctx(req); + struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(req); unsigned int hw_iv_size = areq_ctx->hw_iv_size; struct device *dev = drvdata_to_dev(drvdata); gfp_t flags = cc_gfp_flags(&req->base); @@ -586,7 +586,7 @@ static int cc_aead_chain_assoc(struct cc_drvdata *drvdata, struct buffer_array *sg_data, bool is_last, bool do_chain) { - struct aead_req_ctx *areq_ctx = aead_request_ctx(req); + struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(req); int rc = 0; int mapped_nents = 0; struct device *dev = drvdata_to_dev(drvdata); @@ -652,7 +652,7 @@ chain_assoc_exit: static void cc_prepare_aead_data_dlli(struct aead_request *req, u32 *src_last_bytes, u32 *dst_last_bytes) { - struct aead_req_ctx *areq_ctx = aead_request_ctx(req); + struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(req); enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type; unsigned int authsize = areq_ctx->req_authsize; struct scatterlist *sg; @@ -678,7 +678,7 @@ static void cc_prepare_aead_data_mlli(struct cc_drvdata *drvdata, u32 *src_last_bytes, u32 *dst_last_bytes, bool is_last_table) { - struct aead_req_ctx *areq_ctx = aead_request_ctx(req); + struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(req); enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type; unsigned int authsize = areq_ctx->req_authsize; struct device *dev = drvdata_to_dev(drvdata); @@ -790,7 +790,7 @@ static int cc_aead_chain_data(struct cc_drvdata *drvdata, struct buffer_array *sg_data, bool is_last_table, bool do_chain) { - struct aead_req_ctx *areq_ctx = aead_request_ctx(req); + struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(req); struct device *dev = drvdata_to_dev(drvdata); enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type; unsigned int authsize = areq_ctx->req_authsize; @@ -895,7 +895,7 @@ chain_data_exit: static void cc_update_aead_mlli_nents(struct cc_drvdata *drvdata, struct aead_request *req) { - struct aead_req_ctx *areq_ctx = aead_request_ctx(req); + struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(req); u32 curr_mlli_size = 0; if (areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI) { @@ -945,7 +945,7 @@ static void cc_update_aead_mlli_nents(struct cc_drvdata *drvdata, int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req) { - struct aead_req_ctx *areq_ctx = aead_request_ctx(req); + struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(req); struct mlli_params *mlli_params = &areq_ctx->mlli_params; struct device *dev = drvdata_to_dev(drvdata); struct buffer_array sg_data; diff --git a/drivers/crypto/ccree/cc_debugfs.c b/drivers/crypto/ccree/cc_debugfs.c index 7083767602fcf98ad36051aa4279792aa9a0426c..8f008f024f8f1bee62b53339d1aa3327741940f7 100644 --- a/drivers/crypto/ccree/cc_debugfs.c +++ b/drivers/crypto/ccree/cc_debugfs.c @@ -55,7 +55,7 @@ void __init cc_debugfs_global_init(void) cc_debugfs_dir = debugfs_create_dir("ccree", NULL); } -void __exit cc_debugfs_global_fini(void) +void cc_debugfs_global_fini(void) { debugfs_remove(cc_debugfs_dir); } diff --git a/drivers/crypto/ccree/cc_driver.c b/drivers/crypto/ccree/cc_driver.c index cadead18b59e8d6cf0862b49b074ab32cafcbc9f..d489c6f808925796655d5838aa010d7000dbbdaa 100644 --- a/drivers/crypto/ccree/cc_driver.c +++ b/drivers/crypto/ccree/cc_driver.c @@ -651,9 +651,17 @@ static struct platform_driver ccree_driver = { static int __init ccree_init(void) { + int rc; + cc_debugfs_global_init(); - return platform_driver_register(&ccree_driver); + rc = platform_driver_register(&ccree_driver); + if (rc) { + cc_debugfs_global_fini(); + return rc; + } + + return 0; } module_init(ccree_init); diff --git a/drivers/crypto/ccree/cc_hash.c b/drivers/crypto/ccree/cc_hash.c index 683c9a430e11be1d18503a7a8a7ab8ef17234d1d..f418162932fe394dce84ee9673204cfb58f24a2c 100644 --- a/drivers/crypto/ccree/cc_hash.c +++ b/drivers/crypto/ccree/cc_hash.c @@ -283,9 +283,9 @@ static void cc_unmap_result(struct device *dev, struct ahash_req_ctx *state, static void cc_update_complete(struct device *dev, void *cc_req, int err) { struct ahash_request *req = (struct ahash_request *)cc_req; - struct ahash_req_ctx *state = ahash_request_ctx(req); + struct ahash_req_ctx *state = ahash_request_ctx_dma(req); struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm); + struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(tfm); dev_dbg(dev, "req=%pK\n", req); @@ -301,9 +301,9 @@ static void cc_update_complete(struct device *dev, void *cc_req, int err) static void cc_digest_complete(struct device *dev, void *cc_req, int err) { struct ahash_request *req = (struct ahash_request *)cc_req; - struct ahash_req_ctx *state = ahash_request_ctx(req); + struct ahash_req_ctx *state = ahash_request_ctx_dma(req); struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm); + struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(tfm); u32 digestsize = crypto_ahash_digestsize(tfm); dev_dbg(dev, "req=%pK\n", req); @@ -321,9 +321,9 @@ static void cc_digest_complete(struct device *dev, void *cc_req, int err) static void cc_hash_complete(struct device *dev, void *cc_req, int err) { struct ahash_request *req = (struct ahash_request *)cc_req; - struct ahash_req_ctx *state = ahash_request_ctx(req); + struct ahash_req_ctx *state = ahash_request_ctx_dma(req); struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm); + struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(tfm); u32 digestsize = crypto_ahash_digestsize(tfm); dev_dbg(dev, "req=%pK\n", req); @@ -341,9 +341,9 @@ static void cc_hash_complete(struct device *dev, void *cc_req, int err) static int cc_fin_result(struct cc_hw_desc *desc, struct ahash_request *req, int idx) { - struct ahash_req_ctx *state = ahash_request_ctx(req); + struct ahash_req_ctx *state = ahash_request_ctx_dma(req); struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm); + struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(tfm); u32 digestsize = crypto_ahash_digestsize(tfm); /* Get final MAC result */ @@ -364,9 +364,9 @@ static int cc_fin_result(struct cc_hw_desc *desc, struct ahash_request *req, static int cc_fin_hmac(struct cc_hw_desc *desc, struct ahash_request *req, int idx) { - struct ahash_req_ctx *state = ahash_request_ctx(req); + struct ahash_req_ctx *state = ahash_request_ctx_dma(req); struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm); + struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(tfm); u32 digestsize = crypto_ahash_digestsize(tfm); /* store the hash digest result in the context */ @@ -417,9 +417,9 @@ static int cc_fin_hmac(struct cc_hw_desc *desc, struct ahash_request *req, static int cc_hash_digest(struct ahash_request *req) { - struct ahash_req_ctx *state = ahash_request_ctx(req); + struct ahash_req_ctx *state = ahash_request_ctx_dma(req); struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm); + struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(tfm); u32 digestsize = crypto_ahash_digestsize(tfm); struct scatterlist *src = req->src; unsigned int nbytes = req->nbytes; @@ -555,9 +555,9 @@ static int cc_restore_hash(struct cc_hw_desc *desc, struct cc_hash_ctx *ctx, static int cc_hash_update(struct ahash_request *req) { - struct ahash_req_ctx *state = ahash_request_ctx(req); + struct ahash_req_ctx *state = ahash_request_ctx_dma(req); struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm); + struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(tfm); unsigned int block_size = crypto_tfm_alg_blocksize(&tfm->base); struct scatterlist *src = req->src; unsigned int nbytes = req->nbytes; @@ -631,9 +631,9 @@ static int cc_hash_update(struct ahash_request *req) static int cc_do_finup(struct ahash_request *req, bool update) { - struct ahash_req_ctx *state = ahash_request_ctx(req); + struct ahash_req_ctx *state = ahash_request_ctx_dma(req); struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm); + struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(tfm); u32 digestsize = crypto_ahash_digestsize(tfm); struct scatterlist *src = req->src; unsigned int nbytes = req->nbytes; @@ -711,9 +711,9 @@ static int cc_hash_final(struct ahash_request *req) static int cc_hash_init(struct ahash_request *req) { - struct ahash_req_ctx *state = ahash_request_ctx(req); + struct ahash_req_ctx *state = ahash_request_ctx_dma(req); struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm); + struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(tfm); struct device *dev = drvdata_to_dev(ctx->drvdata); dev_dbg(dev, "===== init (%d) ====\n", req->nbytes); @@ -736,7 +736,7 @@ static int cc_hash_setkey(struct crypto_ahash *ahash, const u8 *key, u32 larval_addr; struct device *dev; - ctx = crypto_ahash_ctx(ahash); + ctx = crypto_ahash_ctx_dma(ahash); dev = drvdata_to_dev(ctx->drvdata); dev_dbg(dev, "start keylen: %d", keylen); @@ -922,7 +922,7 @@ static int cc_xcbc_setkey(struct crypto_ahash *ahash, const u8 *key, unsigned int keylen) { struct cc_crypto_req cc_req = {}; - struct cc_hash_ctx *ctx = crypto_ahash_ctx(ahash); + struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash); struct device *dev = drvdata_to_dev(ctx->drvdata); int rc = 0; unsigned int idx = 0; @@ -1007,7 +1007,7 @@ static int cc_xcbc_setkey(struct crypto_ahash *ahash, static int cc_cmac_setkey(struct crypto_ahash *ahash, const u8 *key, unsigned int keylen) { - struct cc_hash_ctx *ctx = crypto_ahash_ctx(ahash); + struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash); struct device *dev = drvdata_to_dev(ctx->drvdata); dev_dbg(dev, "===== setkey (%d) ====\n", keylen); @@ -1109,7 +1109,7 @@ fail: static int cc_get_hash_len(struct crypto_tfm *tfm) { - struct cc_hash_ctx *ctx = crypto_tfm_ctx(tfm); + struct cc_hash_ctx *ctx = crypto_tfm_ctx_dma(tfm); if (ctx->hash_mode == DRV_HASH_SM3) return CC_SM3_HASH_LEN_SIZE; @@ -1119,7 +1119,7 @@ static int cc_get_hash_len(struct crypto_tfm *tfm) static int cc_cra_init(struct crypto_tfm *tfm) { - struct cc_hash_ctx *ctx = crypto_tfm_ctx(tfm); + struct cc_hash_ctx *ctx = crypto_tfm_ctx_dma(tfm); struct hash_alg_common *hash_alg_common = container_of(tfm->__crt_alg, struct hash_alg_common, base); struct ahash_alg *ahash_alg = @@ -1127,8 +1127,8 @@ static int cc_cra_init(struct crypto_tfm *tfm) struct cc_hash_alg *cc_alg = container_of(ahash_alg, struct cc_hash_alg, ahash_alg); - crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), - sizeof(struct ahash_req_ctx)); + crypto_ahash_set_reqsize_dma(__crypto_ahash_cast(tfm), + sizeof(struct ahash_req_ctx)); ctx->hash_mode = cc_alg->hash_mode; ctx->hw_mode = cc_alg->hw_mode; @@ -1140,7 +1140,7 @@ static int cc_cra_init(struct crypto_tfm *tfm) static void cc_cra_exit(struct crypto_tfm *tfm) { - struct cc_hash_ctx *ctx = crypto_tfm_ctx(tfm); + struct cc_hash_ctx *ctx = crypto_tfm_ctx_dma(tfm); struct device *dev = drvdata_to_dev(ctx->drvdata); dev_dbg(dev, "cc_cra_exit"); @@ -1149,9 +1149,9 @@ static void cc_cra_exit(struct crypto_tfm *tfm) static int cc_mac_update(struct ahash_request *req) { - struct ahash_req_ctx *state = ahash_request_ctx(req); + struct ahash_req_ctx *state = ahash_request_ctx_dma(req); struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm); + struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(tfm); struct device *dev = drvdata_to_dev(ctx->drvdata); unsigned int block_size = crypto_tfm_alg_blocksize(&tfm->base); struct cc_crypto_req cc_req = {}; @@ -1217,9 +1217,9 @@ static int cc_mac_update(struct ahash_request *req) static int cc_mac_final(struct ahash_request *req) { - struct ahash_req_ctx *state = ahash_request_ctx(req); + struct ahash_req_ctx *state = ahash_request_ctx_dma(req); struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm); + struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(tfm); struct device *dev = drvdata_to_dev(ctx->drvdata); struct cc_crypto_req cc_req = {}; struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN]; @@ -1338,9 +1338,9 @@ static int cc_mac_final(struct ahash_request *req) static int cc_mac_finup(struct ahash_request *req) { - struct ahash_req_ctx *state = ahash_request_ctx(req); + struct ahash_req_ctx *state = ahash_request_ctx_dma(req); struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm); + struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(tfm); struct device *dev = drvdata_to_dev(ctx->drvdata); struct cc_crypto_req cc_req = {}; struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN]; @@ -1419,9 +1419,9 @@ static int cc_mac_finup(struct ahash_request *req) static int cc_mac_digest(struct ahash_request *req) { - struct ahash_req_ctx *state = ahash_request_ctx(req); + struct ahash_req_ctx *state = ahash_request_ctx_dma(req); struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm); + struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(tfm); struct device *dev = drvdata_to_dev(ctx->drvdata); u32 digestsize = crypto_ahash_digestsize(tfm); struct cc_crypto_req cc_req = {}; @@ -1499,8 +1499,8 @@ static int cc_mac_digest(struct ahash_request *req) static int cc_hash_export(struct ahash_request *req, void *out) { struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); - struct cc_hash_ctx *ctx = crypto_ahash_ctx(ahash); - struct ahash_req_ctx *state = ahash_request_ctx(req); + struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash); + struct ahash_req_ctx *state = ahash_request_ctx_dma(req); u8 *curr_buff = cc_hash_buf(state); u32 curr_buff_cnt = *cc_hash_buf_cnt(state); const u32 tmp = CC_EXPORT_MAGIC; @@ -1525,9 +1525,9 @@ static int cc_hash_export(struct ahash_request *req, void *out) static int cc_hash_import(struct ahash_request *req, const void *in) { struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); - struct cc_hash_ctx *ctx = crypto_ahash_ctx(ahash); + struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash); struct device *dev = drvdata_to_dev(ctx->drvdata); - struct ahash_req_ctx *state = ahash_request_ctx(req); + struct ahash_req_ctx *state = ahash_request_ctx_dma(req); u32 tmp; memcpy(&tmp, in, sizeof(u32)); @@ -1846,7 +1846,7 @@ static struct cc_hash_alg *cc_alloc_hash_alg(struct cc_hash_template *template, template->driver_name); } alg->cra_module = THIS_MODULE; - alg->cra_ctxsize = sizeof(struct cc_hash_ctx); + alg->cra_ctxsize = sizeof(struct cc_hash_ctx) + crypto_dma_padding(); alg->cra_priority = CC_CRA_PRIO; alg->cra_blocksize = template->blocksize; alg->cra_alignmask = 0; @@ -2073,9 +2073,9 @@ static void cc_setup_xcbc(struct ahash_request *areq, struct cc_hw_desc desc[], unsigned int *seq_size) { unsigned int idx = *seq_size; - struct ahash_req_ctx *state = ahash_request_ctx(areq); + struct ahash_req_ctx *state = ahash_request_ctx_dma(areq); struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); - struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm); + struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(tfm); /* Setup XCBC MAC K1 */ hw_desc_init(&desc[idx]); @@ -2130,9 +2130,9 @@ static void cc_setup_cmac(struct ahash_request *areq, struct cc_hw_desc desc[], unsigned int *seq_size) { unsigned int idx = *seq_size; - struct ahash_req_ctx *state = ahash_request_ctx(areq); + struct ahash_req_ctx *state = ahash_request_ctx_dma(areq); struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); - struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm); + struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(tfm); /* Setup CMAC Key */ hw_desc_init(&desc[idx]); diff --git a/drivers/crypto/chelsio/Kconfig b/drivers/crypto/chelsio/Kconfig index f886401af13e7b271ccb94d62aa8c4a117ac0552..5dd3f6a4781a2c963c0bc1023686f643d933712b 100644 --- a/drivers/crypto/chelsio/Kconfig +++ b/drivers/crypto/chelsio/Kconfig @@ -3,11 +3,11 @@ config CRYPTO_DEV_CHELSIO tristate "Chelsio Crypto Co-processor Driver" depends on CHELSIO_T4 select CRYPTO_LIB_AES + select CRYPTO_LIB_GF128MUL select CRYPTO_SHA1 select CRYPTO_SHA256 select CRYPTO_SHA512 select CRYPTO_AUTHENC - select CRYPTO_GF128MUL help The Chelsio Crypto Co-processor driver for T6 adapters. diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c index 6933546f87b1a863e9eecc94470d09168bde5b51..68d65773ef2bdc8f0b24cf028c950201ab949e98 100644 --- a/drivers/crypto/chelsio/chcr_algo.c +++ b/drivers/crypto/chelsio/chcr_algo.c @@ -98,17 +98,17 @@ static int chcr_handle_cipher_resp(struct skcipher_request *req, static inline struct chcr_aead_ctx *AEAD_CTX(struct chcr_context *ctx) { - return ctx->crypto_ctx->aeadctx; + return &ctx->crypto_ctx->aeadctx; } static inline struct ablk_ctx *ABLK_CTX(struct chcr_context *ctx) { - return ctx->crypto_ctx->ablkctx; + return &ctx->crypto_ctx->ablkctx; } static inline struct hmac_ctx *HMAC_CTX(struct chcr_context *ctx) { - return ctx->crypto_ctx->hmacctx; + return &ctx->crypto_ctx->hmacctx; } static inline struct chcr_gcm_ctx *GCM_CTX(struct chcr_aead_ctx *gctx) @@ -210,7 +210,7 @@ static inline int chcr_handle_aead_resp(struct aead_request *req, unsigned char *input, int err) { - struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); + struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req); struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct chcr_dev *dev = a_ctx(tfm)->dev; @@ -718,7 +718,7 @@ static inline int get_qidxs(struct crypto_async_request *req, { struct aead_request *aead_req = container_of(req, struct aead_request, base); - struct chcr_aead_reqctx *reqctx = aead_request_ctx(aead_req); + struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(aead_req); *txqidx = reqctx->txqidx; *rxqidx = reqctx->rxqidx; break; @@ -2362,7 +2362,7 @@ static void chcr_hmac_cra_exit(struct crypto_tfm *tfm) inline void chcr_aead_common_exit(struct aead_request *req) { - struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); + struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req); struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct uld_ctx *u_ctx = ULD_CTX(a_ctx(tfm)); @@ -2373,7 +2373,7 @@ static int chcr_aead_common_init(struct aead_request *req) { struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm)); - struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); + struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req); unsigned int authsize = crypto_aead_authsize(tfm); int error = -EINVAL; @@ -2417,7 +2417,7 @@ static int chcr_aead_fallback(struct aead_request *req, unsigned short op_type) { struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm)); - struct aead_request *subreq = aead_request_ctx(req); + struct aead_request *subreq = aead_request_ctx_dma(req); aead_request_set_tfm(subreq, aeadctx->sw_cipher); aead_request_set_callback(subreq, req->base.flags, @@ -2438,7 +2438,7 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req, struct uld_ctx *u_ctx = ULD_CTX(ctx); struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx); struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx); - struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); + struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req); struct sk_buff *skb = NULL; struct chcr_wr *chcr_req; struct cpl_rx_phys_dsgl *phys_cpl; @@ -2576,7 +2576,7 @@ int chcr_aead_dma_map(struct device *dev, unsigned short op_type) { int error; - struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); + struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req); struct crypto_aead *tfm = crypto_aead_reqtfm(req); unsigned int authsize = crypto_aead_authsize(tfm); int src_len, dst_len; @@ -2637,7 +2637,7 @@ void chcr_aead_dma_unmap(struct device *dev, struct aead_request *req, unsigned short op_type) { - struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); + struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req); struct crypto_aead *tfm = crypto_aead_reqtfm(req); unsigned int authsize = crypto_aead_authsize(tfm); int src_len, dst_len; @@ -2678,7 +2678,7 @@ void chcr_add_aead_src_ent(struct aead_request *req, struct ulptx_sgl *ulptx) { struct ulptx_walk ulp_walk; - struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); + struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req); if (reqctx->imm) { u8 *buf = (u8 *)ulptx; @@ -2704,7 +2704,7 @@ void chcr_add_aead_dst_ent(struct aead_request *req, struct cpl_rx_phys_dsgl *phys_cpl, unsigned short qid) { - struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); + struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req); struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct dsgl_walk dsgl_walk; unsigned int authsize = crypto_aead_authsize(tfm); @@ -2894,7 +2894,7 @@ static int generate_b0(struct aead_request *req, u8 *ivptr, unsigned int l, lp, m; int rc; struct crypto_aead *aead = crypto_aead_reqtfm(req); - struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); + struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req); u8 *b0 = reqctx->scratch_pad; m = crypto_aead_authsize(aead); @@ -2932,7 +2932,7 @@ static int ccm_format_packet(struct aead_request *req, unsigned short op_type, unsigned int assoclen) { - struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); + struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req); struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm)); int rc = 0; @@ -2963,7 +2963,7 @@ static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl, struct chcr_context *ctx = a_ctx(tfm); struct uld_ctx *u_ctx = ULD_CTX(ctx); struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx); - struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); + struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req); unsigned int cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_CCM; unsigned int mac_mode = CHCR_SCMD_AUTH_MODE_CBCMAC; unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan; @@ -3036,7 +3036,7 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req, { struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm)); - struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); + struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req); struct sk_buff *skb = NULL; struct chcr_wr *chcr_req; struct cpl_rx_phys_dsgl *phys_cpl; @@ -3135,7 +3135,7 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req, struct chcr_context *ctx = a_ctx(tfm); struct uld_ctx *u_ctx = ULD_CTX(ctx); struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx); - struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); + struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req); struct sk_buff *skb = NULL; struct chcr_wr *chcr_req; struct cpl_rx_phys_dsgl *phys_cpl; @@ -3255,9 +3255,10 @@ static int chcr_aead_cra_init(struct crypto_aead *tfm) CRYPTO_ALG_ASYNC); if (IS_ERR(aeadctx->sw_cipher)) return PTR_ERR(aeadctx->sw_cipher); - crypto_aead_set_reqsize(tfm, max(sizeof(struct chcr_aead_reqctx), - sizeof(struct aead_request) + - crypto_aead_reqsize(aeadctx->sw_cipher))); + crypto_aead_set_reqsize_dma( + tfm, max(sizeof(struct chcr_aead_reqctx), + sizeof(struct aead_request) + + crypto_aead_reqsize(aeadctx->sw_cipher))); return chcr_device_init(a_ctx(tfm)); } @@ -3735,7 +3736,7 @@ static int chcr_aead_op(struct aead_request *req, create_wr_t create_wr_fn) { struct crypto_aead *tfm = crypto_aead_reqtfm(req); - struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); + struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req); struct chcr_context *ctx = a_ctx(tfm); struct uld_ctx *u_ctx = ULD_CTX(ctx); struct sk_buff *skb; @@ -3785,7 +3786,7 @@ static int chcr_aead_op(struct aead_request *req, static int chcr_aead_encrypt(struct aead_request *req) { struct crypto_aead *tfm = crypto_aead_reqtfm(req); - struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); + struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req); struct chcr_context *ctx = a_ctx(tfm); unsigned int cpu; @@ -3816,7 +3817,7 @@ static int chcr_aead_decrypt(struct aead_request *req) struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct chcr_context *ctx = a_ctx(tfm); struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx); - struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); + struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req); int size; unsigned int cpu; diff --git a/drivers/crypto/chelsio/chcr_crypto.h b/drivers/crypto/chelsio/chcr_crypto.h index c7816c83e32461dd44f028a8904c5a982ca273e8..7f88ddb086313e5016936c960ade224ab8e14535 100644 --- a/drivers/crypto/chelsio/chcr_crypto.h +++ b/drivers/crypto/chelsio/chcr_crypto.h @@ -248,9 +248,9 @@ struct hmac_ctx { struct __crypto_ctx { union { - DECLARE_FLEX_ARRAY(struct hmac_ctx, hmacctx); - DECLARE_FLEX_ARRAY(struct ablk_ctx, ablkctx); - DECLARE_FLEX_ARRAY(struct chcr_aead_ctx, aeadctx); + struct hmac_ctx hmacctx; + struct ablk_ctx ablkctx; + struct chcr_aead_ctx aeadctx; }; }; diff --git a/drivers/crypto/hisilicon/Kconfig b/drivers/crypto/hisilicon/Kconfig index 27e1fa91206390d62f4b65a52d75980f5001b279..743ce4fc3158c68b46a6b0ded41b8f21b9345dcb 100644 --- a/drivers/crypto/hisilicon/Kconfig +++ b/drivers/crypto/hisilicon/Kconfig @@ -26,7 +26,7 @@ config CRYPTO_DEV_HISI_SEC2 select CRYPTO_SHA1 select CRYPTO_SHA256 select CRYPTO_SHA512 - select CRYPTO_SM4 + select CRYPTO_SM4_GENERIC depends on PCI && PCI_MSI depends on UACCE || UACCE=n depends on ARM64 || (COMPILE_TEST && 64BIT) diff --git a/drivers/crypto/hisilicon/Makefile b/drivers/crypto/hisilicon/Makefile index 1e89269a2e4b01192af6953ac12c9018a487d4a3..8595a5a5d22888d2e9e3d090f170057971f77fe8 100644 --- a/drivers/crypto/hisilicon/Makefile +++ b/drivers/crypto/hisilicon/Makefile @@ -3,6 +3,6 @@ obj-$(CONFIG_CRYPTO_DEV_HISI_HPRE) += hpre/ obj-$(CONFIG_CRYPTO_DEV_HISI_SEC) += sec/ obj-$(CONFIG_CRYPTO_DEV_HISI_SEC2) += sec2/ obj-$(CONFIG_CRYPTO_DEV_HISI_QM) += hisi_qm.o -hisi_qm-objs = qm.o sgl.o +hisi_qm-objs = qm.o sgl.o debugfs.o obj-$(CONFIG_CRYPTO_DEV_HISI_ZIP) += zip/ obj-$(CONFIG_CRYPTO_DEV_HISI_TRNG) += trng/ diff --git a/drivers/crypto/hisilicon/debugfs.c b/drivers/crypto/hisilicon/debugfs.c new file mode 100644 index 0000000000000000000000000000000000000000..2cc1591949db7e078846115e5bd646e6afe2e37f --- /dev/null +++ b/drivers/crypto/hisilicon/debugfs.c @@ -0,0 +1,1147 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2022 HiSilicon Limited. */ +#include +#include "qm_common.h" + +#define QM_DFX_BASE 0x0100000 +#define QM_DFX_STATE1 0x0104000 +#define QM_DFX_STATE2 0x01040C8 +#define QM_DFX_COMMON 0x0000 +#define QM_DFX_BASE_LEN 0x5A +#define QM_DFX_STATE1_LEN 0x2E +#define QM_DFX_STATE2_LEN 0x11 +#define QM_DFX_COMMON_LEN 0xC3 +#define QM_DFX_REGS_LEN 4UL +#define QM_DBG_TMP_BUF_LEN 22 +#define CURRENT_FUN_MASK GENMASK(5, 0) +#define CURRENT_Q_MASK GENMASK(31, 16) +#define QM_SQE_ADDR_MASK GENMASK(7, 0) + +#define QM_DFX_MB_CNT_VF 0x104010 +#define QM_DFX_DB_CNT_VF 0x104020 +#define QM_DFX_SQE_CNT_VF_SQN 0x104030 +#define QM_DFX_CQE_CNT_VF_CQN 0x104040 +#define QM_DFX_QN_SHIFT 16 +#define QM_DFX_CNT_CLR_CE 0x100118 +#define QM_DBG_WRITE_LEN 1024 + +static const char * const qm_debug_file_name[] = { + [CURRENT_QM] = "current_qm", + [CURRENT_Q] = "current_q", + [CLEAR_ENABLE] = "clear_enable", +}; + +struct qm_dfx_item { + const char *name; + u32 offset; +}; + +struct qm_cmd_dump_item { + const char *cmd; + char *info_name; + int (*dump_fn)(struct hisi_qm *qm, char *cmd, char *info_name); +}; + +static struct qm_dfx_item qm_dfx_files[] = { + {"err_irq", offsetof(struct qm_dfx, err_irq_cnt)}, + {"aeq_irq", offsetof(struct qm_dfx, aeq_irq_cnt)}, + {"abnormal_irq", offsetof(struct qm_dfx, abnormal_irq_cnt)}, + {"create_qp_err", offsetof(struct qm_dfx, create_qp_err_cnt)}, + {"mb_err", offsetof(struct qm_dfx, mb_err_cnt)}, +}; + +#define CNT_CYC_REGS_NUM 10 +static const struct debugfs_reg32 qm_dfx_regs[] = { + /* XXX_CNT are reading clear register */ + {"QM_ECC_1BIT_CNT ", 0x104000ull}, + {"QM_ECC_MBIT_CNT ", 0x104008ull}, + {"QM_DFX_MB_CNT ", 0x104018ull}, + {"QM_DFX_DB_CNT ", 0x104028ull}, + {"QM_DFX_SQE_CNT ", 0x104038ull}, + {"QM_DFX_CQE_CNT ", 0x104048ull}, + {"QM_DFX_SEND_SQE_TO_ACC_CNT ", 0x104050ull}, + {"QM_DFX_WB_SQE_FROM_ACC_CNT ", 0x104058ull}, + {"QM_DFX_ACC_FINISH_CNT ", 0x104060ull}, + {"QM_DFX_CQE_ERR_CNT ", 0x1040b4ull}, + {"QM_DFX_FUNS_ACTIVE_ST ", 0x200ull}, + {"QM_ECC_1BIT_INF ", 0x104004ull}, + {"QM_ECC_MBIT_INF ", 0x10400cull}, + {"QM_DFX_ACC_RDY_VLD0 ", 0x1040a0ull}, + {"QM_DFX_ACC_RDY_VLD1 ", 0x1040a4ull}, + {"QM_DFX_AXI_RDY_VLD ", 0x1040a8ull}, + {"QM_DFX_FF_ST0 ", 0x1040c8ull}, + {"QM_DFX_FF_ST1 ", 0x1040ccull}, + {"QM_DFX_FF_ST2 ", 0x1040d0ull}, + {"QM_DFX_FF_ST3 ", 0x1040d4ull}, + {"QM_DFX_FF_ST4 ", 0x1040d8ull}, + {"QM_DFX_FF_ST5 ", 0x1040dcull}, + {"QM_DFX_FF_ST6 ", 0x1040e0ull}, + {"QM_IN_IDLE_ST ", 0x1040e4ull}, +}; + +static const struct debugfs_reg32 qm_vf_dfx_regs[] = { + {"QM_DFX_FUNS_ACTIVE_ST ", 0x200ull}, +}; + +/* define the QM's dfx regs region and region length */ +static struct dfx_diff_registers qm_diff_regs[] = { + { + .reg_offset = QM_DFX_BASE, + .reg_len = QM_DFX_BASE_LEN, + }, { + .reg_offset = QM_DFX_STATE1, + .reg_len = QM_DFX_STATE1_LEN, + }, { + .reg_offset = QM_DFX_STATE2, + .reg_len = QM_DFX_STATE2_LEN, + }, { + .reg_offset = QM_DFX_COMMON, + .reg_len = QM_DFX_COMMON_LEN, + }, +}; + +static struct hisi_qm *file_to_qm(struct debugfs_file *file) +{ + struct qm_debug *debug = file->debug; + + return container_of(debug, struct hisi_qm, debug); +} + +static ssize_t qm_cmd_read(struct file *filp, char __user *buffer, + size_t count, loff_t *pos) +{ + char buf[QM_DBG_READ_LEN]; + int len; + + len = scnprintf(buf, QM_DBG_READ_LEN, "%s\n", + "Please echo help to cmd to get help information"); + + return simple_read_from_buffer(buffer, count, pos, buf, len); +} + +static void dump_show(struct hisi_qm *qm, void *info, + unsigned int info_size, char *info_name) +{ + struct device *dev = &qm->pdev->dev; + u8 *info_curr = info; + u32 i; +#define BYTE_PER_DW 4 + + dev_info(dev, "%s DUMP\n", info_name); + for (i = 0; i < info_size; i += BYTE_PER_DW, info_curr += BYTE_PER_DW) { + pr_info("DW%u: %02X%02X %02X%02X\n", i / BYTE_PER_DW, + *(info_curr + 3), *(info_curr + 2), *(info_curr + 1), *(info_curr)); + } +} + +static int qm_sqc_dump(struct hisi_qm *qm, char *s, char *name) +{ + struct device *dev = &qm->pdev->dev; + struct qm_sqc *sqc, *sqc_curr; + dma_addr_t sqc_dma; + u32 qp_id; + int ret; + + if (!s) + return -EINVAL; + + ret = kstrtou32(s, 0, &qp_id); + if (ret || qp_id >= qm->qp_num) { + dev_err(dev, "Please input qp num (0-%u)", qm->qp_num - 1); + return -EINVAL; + } + + sqc = hisi_qm_ctx_alloc(qm, sizeof(*sqc), &sqc_dma); + if (IS_ERR(sqc)) + return PTR_ERR(sqc); + + ret = hisi_qm_mb(qm, QM_MB_CMD_SQC, sqc_dma, qp_id, 1); + if (ret) { + down_read(&qm->qps_lock); + if (qm->sqc) { + sqc_curr = qm->sqc + qp_id; + + dump_show(qm, sqc_curr, sizeof(*sqc), "SOFT SQC"); + } + up_read(&qm->qps_lock); + + goto free_ctx; + } + + dump_show(qm, sqc, sizeof(*sqc), name); + +free_ctx: + hisi_qm_ctx_free(qm, sizeof(*sqc), sqc, &sqc_dma); + return 0; +} + +static int qm_cqc_dump(struct hisi_qm *qm, char *s, char *name) +{ + struct device *dev = &qm->pdev->dev; + struct qm_cqc *cqc, *cqc_curr; + dma_addr_t cqc_dma; + u32 qp_id; + int ret; + + if (!s) + return -EINVAL; + + ret = kstrtou32(s, 0, &qp_id); + if (ret || qp_id >= qm->qp_num) { + dev_err(dev, "Please input qp num (0-%u)", qm->qp_num - 1); + return -EINVAL; + } + + cqc = hisi_qm_ctx_alloc(qm, sizeof(*cqc), &cqc_dma); + if (IS_ERR(cqc)) + return PTR_ERR(cqc); + + ret = hisi_qm_mb(qm, QM_MB_CMD_CQC, cqc_dma, qp_id, 1); + if (ret) { + down_read(&qm->qps_lock); + if (qm->cqc) { + cqc_curr = qm->cqc + qp_id; + + dump_show(qm, cqc_curr, sizeof(*cqc), "SOFT CQC"); + } + up_read(&qm->qps_lock); + + goto free_ctx; + } + + dump_show(qm, cqc, sizeof(*cqc), name); + +free_ctx: + hisi_qm_ctx_free(qm, sizeof(*cqc), cqc, &cqc_dma); + return 0; +} + +static int qm_eqc_aeqc_dump(struct hisi_qm *qm, char *s, char *name) +{ + struct device *dev = &qm->pdev->dev; + dma_addr_t xeqc_dma; + size_t size; + void *xeqc; + int ret; + u8 cmd; + + if (strsep(&s, " ")) { + dev_err(dev, "Please do not input extra characters!\n"); + return -EINVAL; + } + + if (!strcmp(name, "EQC")) { + cmd = QM_MB_CMD_EQC; + size = sizeof(struct qm_eqc); + } else { + cmd = QM_MB_CMD_AEQC; + size = sizeof(struct qm_aeqc); + } + + xeqc = hisi_qm_ctx_alloc(qm, size, &xeqc_dma); + if (IS_ERR(xeqc)) + return PTR_ERR(xeqc); + + ret = hisi_qm_mb(qm, cmd, xeqc_dma, 0, 1); + if (ret) + goto err_free_ctx; + + dump_show(qm, xeqc, size, name); + +err_free_ctx: + hisi_qm_ctx_free(qm, size, xeqc, &xeqc_dma); + return ret; +} + +static int q_dump_param_parse(struct hisi_qm *qm, char *s, + u32 *e_id, u32 *q_id, u16 q_depth) +{ + struct device *dev = &qm->pdev->dev; + unsigned int qp_num = qm->qp_num; + char *presult; + int ret; + + presult = strsep(&s, " "); + if (!presult) { + dev_err(dev, "Please input qp number!\n"); + return -EINVAL; + } + + ret = kstrtou32(presult, 0, q_id); + if (ret || *q_id >= qp_num) { + dev_err(dev, "Please input qp num (0-%u)", qp_num - 1); + return -EINVAL; + } + + presult = strsep(&s, " "); + if (!presult) { + dev_err(dev, "Please input sqe number!\n"); + return -EINVAL; + } + + ret = kstrtou32(presult, 0, e_id); + if (ret || *e_id >= q_depth) { + dev_err(dev, "Please input sqe num (0-%u)", q_depth - 1); + return -EINVAL; + } + + if (strsep(&s, " ")) { + dev_err(dev, "Please do not input extra characters!\n"); + return -EINVAL; + } + + return 0; +} + +static int qm_sq_dump(struct hisi_qm *qm, char *s, char *name) +{ + u16 sq_depth = qm->qp_array->cq_depth; + void *sqe, *sqe_curr; + struct hisi_qp *qp; + u32 qp_id, sqe_id; + int ret; + + ret = q_dump_param_parse(qm, s, &sqe_id, &qp_id, sq_depth); + if (ret) + return ret; + + sqe = kzalloc(qm->sqe_size * sq_depth, GFP_KERNEL); + if (!sqe) + return -ENOMEM; + + qp = &qm->qp_array[qp_id]; + memcpy(sqe, qp->sqe, qm->sqe_size * sq_depth); + sqe_curr = sqe + (u32)(sqe_id * qm->sqe_size); + memset(sqe_curr + qm->debug.sqe_mask_offset, QM_SQE_ADDR_MASK, + qm->debug.sqe_mask_len); + + dump_show(qm, sqe_curr, qm->sqe_size, name); + + kfree(sqe); + + return 0; +} + +static int qm_cq_dump(struct hisi_qm *qm, char *s, char *name) +{ + struct qm_cqe *cqe_curr; + struct hisi_qp *qp; + u32 qp_id, cqe_id; + int ret; + + ret = q_dump_param_parse(qm, s, &cqe_id, &qp_id, qm->qp_array->cq_depth); + if (ret) + return ret; + + qp = &qm->qp_array[qp_id]; + cqe_curr = qp->cqe + cqe_id; + dump_show(qm, cqe_curr, sizeof(struct qm_cqe), name); + + return 0; +} + +static int qm_eq_aeq_dump(struct hisi_qm *qm, char *s, char *name) +{ + struct device *dev = &qm->pdev->dev; + u16 xeq_depth; + size_t size; + void *xeqe; + u32 xeqe_id; + int ret; + + if (!s) + return -EINVAL; + + ret = kstrtou32(s, 0, &xeqe_id); + if (ret) + return -EINVAL; + + if (!strcmp(name, "EQE")) { + xeq_depth = qm->eq_depth; + size = sizeof(struct qm_eqe); + } else { + xeq_depth = qm->aeq_depth; + size = sizeof(struct qm_aeqe); + } + + if (xeqe_id >= xeq_depth) { + dev_err(dev, "Please input eqe or aeqe num (0-%u)", xeq_depth - 1); + return -EINVAL; + } + + down_read(&qm->qps_lock); + + if (qm->eqe && !strcmp(name, "EQE")) { + xeqe = qm->eqe + xeqe_id; + } else if (qm->aeqe && !strcmp(name, "AEQE")) { + xeqe = qm->aeqe + xeqe_id; + } else { + ret = -EINVAL; + goto err_unlock; + } + + dump_show(qm, xeqe, size, name); + +err_unlock: + up_read(&qm->qps_lock); + return ret; +} + +static int qm_dbg_help(struct hisi_qm *qm, char *s) +{ + struct device *dev = &qm->pdev->dev; + + if (strsep(&s, " ")) { + dev_err(dev, "Please do not input extra characters!\n"); + return -EINVAL; + } + + dev_info(dev, "available commands:\n"); + dev_info(dev, "sqc \n"); + dev_info(dev, "cqc \n"); + dev_info(dev, "eqc\n"); + dev_info(dev, "aeqc\n"); + dev_info(dev, "sq \n"); + dev_info(dev, "cq \n"); + dev_info(dev, "eq \n"); + dev_info(dev, "aeq \n"); + + return 0; +} + +static const struct qm_cmd_dump_item qm_cmd_dump_table[] = { + { + .cmd = "sqc", + .info_name = "SQC", + .dump_fn = qm_sqc_dump, + }, { + .cmd = "cqc", + .info_name = "CQC", + .dump_fn = qm_cqc_dump, + }, { + .cmd = "eqc", + .info_name = "EQC", + .dump_fn = qm_eqc_aeqc_dump, + }, { + .cmd = "aeqc", + .info_name = "AEQC", + .dump_fn = qm_eqc_aeqc_dump, + }, { + .cmd = "sq", + .info_name = "SQE", + .dump_fn = qm_sq_dump, + }, { + .cmd = "cq", + .info_name = "CQE", + .dump_fn = qm_cq_dump, + }, { + .cmd = "eq", + .info_name = "EQE", + .dump_fn = qm_eq_aeq_dump, + }, { + .cmd = "aeq", + .info_name = "AEQE", + .dump_fn = qm_eq_aeq_dump, + }, +}; + +static int qm_cmd_write_dump(struct hisi_qm *qm, const char *cmd_buf) +{ + struct device *dev = &qm->pdev->dev; + char *presult, *s, *s_tmp; + int table_size, i, ret; + + s = kstrdup(cmd_buf, GFP_KERNEL); + if (!s) + return -ENOMEM; + + s_tmp = s; + presult = strsep(&s, " "); + if (!presult) { + ret = -EINVAL; + goto err_buffer_free; + } + + if (!strcmp(presult, "help")) { + ret = qm_dbg_help(qm, s); + goto err_buffer_free; + } + + table_size = ARRAY_SIZE(qm_cmd_dump_table); + for (i = 0; i < table_size; i++) { + if (!strcmp(presult, qm_cmd_dump_table[i].cmd)) { + ret = qm_cmd_dump_table[i].dump_fn(qm, s, + qm_cmd_dump_table[i].info_name); + break; + } + } + + if (i == table_size) { + dev_info(dev, "Please echo help\n"); + ret = -EINVAL; + } + +err_buffer_free: + kfree(s_tmp); + + return ret; +} + +static ssize_t qm_cmd_write(struct file *filp, const char __user *buffer, + size_t count, loff_t *pos) +{ + struct hisi_qm *qm = filp->private_data; + char *cmd_buf, *cmd_buf_tmp; + int ret; + + if (*pos) + return 0; + + ret = hisi_qm_get_dfx_access(qm); + if (ret) + return ret; + + /* Judge if the instance is being reset. */ + if (unlikely(atomic_read(&qm->status.flags) == QM_STOP)) { + ret = 0; + goto put_dfx_access; + } + + if (count > QM_DBG_WRITE_LEN) { + ret = -ENOSPC; + goto put_dfx_access; + } + + cmd_buf = memdup_user_nul(buffer, count); + if (IS_ERR(cmd_buf)) { + ret = PTR_ERR(cmd_buf); + goto put_dfx_access; + } + + cmd_buf_tmp = strchr(cmd_buf, '\n'); + if (cmd_buf_tmp) { + *cmd_buf_tmp = '\0'; + count = cmd_buf_tmp - cmd_buf + 1; + } + + ret = qm_cmd_write_dump(qm, cmd_buf); + if (ret) { + kfree(cmd_buf); + goto put_dfx_access; + } + + kfree(cmd_buf); + + ret = count; + +put_dfx_access: + hisi_qm_put_dfx_access(qm); + return ret; +} + +static const struct file_operations qm_cmd_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = qm_cmd_read, + .write = qm_cmd_write, +}; + +/** + * hisi_qm_regs_dump() - Dump registers's value. + * @s: debugfs file handle. + * @regset: accelerator registers information. + * + * Dump accelerator registers. + */ +void hisi_qm_regs_dump(struct seq_file *s, struct debugfs_regset32 *regset) +{ + struct pci_dev *pdev = to_pci_dev(regset->dev); + struct hisi_qm *qm = pci_get_drvdata(pdev); + const struct debugfs_reg32 *regs = regset->regs; + int regs_len = regset->nregs; + int i, ret; + u32 val; + + ret = hisi_qm_get_dfx_access(qm); + if (ret) + return; + + for (i = 0; i < regs_len; i++) { + val = readl(regset->base + regs[i].offset); + seq_printf(s, "%s= 0x%08x\n", regs[i].name, val); + } + + hisi_qm_put_dfx_access(qm); +} +EXPORT_SYMBOL_GPL(hisi_qm_regs_dump); + +static int qm_regs_show(struct seq_file *s, void *unused) +{ + struct hisi_qm *qm = s->private; + struct debugfs_regset32 regset; + + if (qm->fun_type == QM_HW_PF) { + regset.regs = qm_dfx_regs; + regset.nregs = ARRAY_SIZE(qm_dfx_regs); + } else { + regset.regs = qm_vf_dfx_regs; + regset.nregs = ARRAY_SIZE(qm_vf_dfx_regs); + } + + regset.base = qm->io_base; + regset.dev = &qm->pdev->dev; + + hisi_qm_regs_dump(s, ®set); + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(qm_regs); + +static u32 current_q_read(struct hisi_qm *qm) +{ + return readl(qm->io_base + QM_DFX_SQE_CNT_VF_SQN) >> QM_DFX_QN_SHIFT; +} + +static int current_q_write(struct hisi_qm *qm, u32 val) +{ + u32 tmp; + + if (val >= qm->debug.curr_qm_qp_num) + return -EINVAL; + + tmp = val << QM_DFX_QN_SHIFT | + (readl(qm->io_base + QM_DFX_SQE_CNT_VF_SQN) & CURRENT_FUN_MASK); + writel(tmp, qm->io_base + QM_DFX_SQE_CNT_VF_SQN); + + tmp = val << QM_DFX_QN_SHIFT | + (readl(qm->io_base + QM_DFX_CQE_CNT_VF_CQN) & CURRENT_FUN_MASK); + writel(tmp, qm->io_base + QM_DFX_CQE_CNT_VF_CQN); + + return 0; +} + +static u32 clear_enable_read(struct hisi_qm *qm) +{ + return readl(qm->io_base + QM_DFX_CNT_CLR_CE); +} + +/* rd_clr_ctrl 1 enable read clear, otherwise 0 disable it */ +static int clear_enable_write(struct hisi_qm *qm, u32 rd_clr_ctrl) +{ + if (rd_clr_ctrl > 1) + return -EINVAL; + + writel(rd_clr_ctrl, qm->io_base + QM_DFX_CNT_CLR_CE); + + return 0; +} + +static u32 current_qm_read(struct hisi_qm *qm) +{ + return readl(qm->io_base + QM_DFX_MB_CNT_VF); +} + +static int qm_get_vf_qp_num(struct hisi_qm *qm, u32 fun_num) +{ + u32 remain_q_num, vfq_num; + u32 num_vfs = qm->vfs_num; + + vfq_num = (qm->ctrl_qp_num - qm->qp_num) / num_vfs; + if (vfq_num >= qm->max_qp_num) + return qm->max_qp_num; + + remain_q_num = (qm->ctrl_qp_num - qm->qp_num) % num_vfs; + if (vfq_num + remain_q_num <= qm->max_qp_num) + return fun_num == num_vfs ? vfq_num + remain_q_num : vfq_num; + + /* + * if vfq_num + remain_q_num > max_qp_num, the last VFs, + * each with one more queue. + */ + return fun_num + remain_q_num > num_vfs ? vfq_num + 1 : vfq_num; +} + +static int current_qm_write(struct hisi_qm *qm, u32 val) +{ + u32 tmp; + + if (val > qm->vfs_num) + return -EINVAL; + + /* According PF or VF Dev ID to calculation curr_qm_qp_num and store */ + if (!val) + qm->debug.curr_qm_qp_num = qm->qp_num; + else + qm->debug.curr_qm_qp_num = qm_get_vf_qp_num(qm, val); + + writel(val, qm->io_base + QM_DFX_MB_CNT_VF); + writel(val, qm->io_base + QM_DFX_DB_CNT_VF); + + tmp = val | + (readl(qm->io_base + QM_DFX_SQE_CNT_VF_SQN) & CURRENT_Q_MASK); + writel(tmp, qm->io_base + QM_DFX_SQE_CNT_VF_SQN); + + tmp = val | + (readl(qm->io_base + QM_DFX_CQE_CNT_VF_CQN) & CURRENT_Q_MASK); + writel(tmp, qm->io_base + QM_DFX_CQE_CNT_VF_CQN); + + return 0; +} + +static ssize_t qm_debug_read(struct file *filp, char __user *buf, + size_t count, loff_t *pos) +{ + struct debugfs_file *file = filp->private_data; + enum qm_debug_file index = file->index; + struct hisi_qm *qm = file_to_qm(file); + char tbuf[QM_DBG_TMP_BUF_LEN]; + u32 val; + int ret; + + ret = hisi_qm_get_dfx_access(qm); + if (ret) + return ret; + + mutex_lock(&file->lock); + switch (index) { + case CURRENT_QM: + val = current_qm_read(qm); + break; + case CURRENT_Q: + val = current_q_read(qm); + break; + case CLEAR_ENABLE: + val = clear_enable_read(qm); + break; + default: + goto err_input; + } + mutex_unlock(&file->lock); + + hisi_qm_put_dfx_access(qm); + ret = scnprintf(tbuf, QM_DBG_TMP_BUF_LEN, "%u\n", val); + return simple_read_from_buffer(buf, count, pos, tbuf, ret); + +err_input: + mutex_unlock(&file->lock); + hisi_qm_put_dfx_access(qm); + return -EINVAL; +} + +static ssize_t qm_debug_write(struct file *filp, const char __user *buf, + size_t count, loff_t *pos) +{ + struct debugfs_file *file = filp->private_data; + enum qm_debug_file index = file->index; + struct hisi_qm *qm = file_to_qm(file); + unsigned long val; + char tbuf[QM_DBG_TMP_BUF_LEN]; + int len, ret; + + if (*pos != 0) + return 0; + + if (count >= QM_DBG_TMP_BUF_LEN) + return -ENOSPC; + + len = simple_write_to_buffer(tbuf, QM_DBG_TMP_BUF_LEN - 1, pos, buf, + count); + if (len < 0) + return len; + + tbuf[len] = '\0'; + if (kstrtoul(tbuf, 0, &val)) + return -EFAULT; + + ret = hisi_qm_get_dfx_access(qm); + if (ret) + return ret; + + mutex_lock(&file->lock); + switch (index) { + case CURRENT_QM: + ret = current_qm_write(qm, val); + break; + case CURRENT_Q: + ret = current_q_write(qm, val); + break; + case CLEAR_ENABLE: + ret = clear_enable_write(qm, val); + break; + default: + ret = -EINVAL; + } + mutex_unlock(&file->lock); + + hisi_qm_put_dfx_access(qm); + + if (ret) + return ret; + + return count; +} + +static const struct file_operations qm_debug_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = qm_debug_read, + .write = qm_debug_write, +}; + +static void dfx_regs_uninit(struct hisi_qm *qm, + struct dfx_diff_registers *dregs, int reg_len) +{ + int i; + + /* Setting the pointer is NULL to prevent double free */ + for (i = 0; i < reg_len; i++) { + kfree(dregs[i].regs); + dregs[i].regs = NULL; + } + kfree(dregs); +} + +static struct dfx_diff_registers *dfx_regs_init(struct hisi_qm *qm, + const struct dfx_diff_registers *cregs, u32 reg_len) +{ + struct dfx_diff_registers *diff_regs; + u32 j, base_offset; + int i; + + diff_regs = kcalloc(reg_len, sizeof(*diff_regs), GFP_KERNEL); + if (!diff_regs) + return ERR_PTR(-ENOMEM); + + for (i = 0; i < reg_len; i++) { + if (!cregs[i].reg_len) + continue; + + diff_regs[i].reg_offset = cregs[i].reg_offset; + diff_regs[i].reg_len = cregs[i].reg_len; + diff_regs[i].regs = kcalloc(QM_DFX_REGS_LEN, cregs[i].reg_len, + GFP_KERNEL); + if (!diff_regs[i].regs) + goto alloc_error; + + for (j = 0; j < diff_regs[i].reg_len; j++) { + base_offset = diff_regs[i].reg_offset + + j * QM_DFX_REGS_LEN; + diff_regs[i].regs[j] = readl(qm->io_base + base_offset); + } + } + + return diff_regs; + +alloc_error: + while (i > 0) { + i--; + kfree(diff_regs[i].regs); + } + kfree(diff_regs); + return ERR_PTR(-ENOMEM); +} + +static int qm_diff_regs_init(struct hisi_qm *qm, + struct dfx_diff_registers *dregs, u32 reg_len) +{ + qm->debug.qm_diff_regs = dfx_regs_init(qm, qm_diff_regs, ARRAY_SIZE(qm_diff_regs)); + if (IS_ERR(qm->debug.qm_diff_regs)) + return PTR_ERR(qm->debug.qm_diff_regs); + + qm->debug.acc_diff_regs = dfx_regs_init(qm, dregs, reg_len); + if (IS_ERR(qm->debug.acc_diff_regs)) { + dfx_regs_uninit(qm, qm->debug.qm_diff_regs, ARRAY_SIZE(qm_diff_regs)); + return PTR_ERR(qm->debug.acc_diff_regs); + } + + return 0; +} + +static void qm_last_regs_uninit(struct hisi_qm *qm) +{ + struct qm_debug *debug = &qm->debug; + + if (qm->fun_type == QM_HW_VF || !debug->qm_last_words) + return; + + kfree(debug->qm_last_words); + debug->qm_last_words = NULL; +} + +static int qm_last_regs_init(struct hisi_qm *qm) +{ + int dfx_regs_num = ARRAY_SIZE(qm_dfx_regs); + struct qm_debug *debug = &qm->debug; + int i; + + if (qm->fun_type == QM_HW_VF) + return 0; + + debug->qm_last_words = kcalloc(dfx_regs_num, sizeof(unsigned int), GFP_KERNEL); + if (!debug->qm_last_words) + return -ENOMEM; + + for (i = 0; i < dfx_regs_num; i++) { + debug->qm_last_words[i] = readl_relaxed(qm->io_base + + qm_dfx_regs[i].offset); + } + + return 0; +} + +static void qm_diff_regs_uninit(struct hisi_qm *qm, u32 reg_len) +{ + dfx_regs_uninit(qm, qm->debug.acc_diff_regs, reg_len); + dfx_regs_uninit(qm, qm->debug.qm_diff_regs, ARRAY_SIZE(qm_diff_regs)); +} + +/** + * hisi_qm_regs_debugfs_init() - Allocate memory for registers. + * @qm: device qm handle. + * @dregs: diff registers handle. + * @reg_len: diff registers region length. + */ +int hisi_qm_regs_debugfs_init(struct hisi_qm *qm, + struct dfx_diff_registers *dregs, u32 reg_len) +{ + int ret; + + if (!qm || !dregs) + return -EINVAL; + + if (qm->fun_type != QM_HW_PF) + return 0; + + ret = qm_last_regs_init(qm); + if (ret) { + dev_info(&qm->pdev->dev, "failed to init qm words memory!\n"); + return ret; + } + + ret = qm_diff_regs_init(qm, dregs, reg_len); + if (ret) { + qm_last_regs_uninit(qm); + return ret; + } + + return 0; +} +EXPORT_SYMBOL_GPL(hisi_qm_regs_debugfs_init); + +/** + * hisi_qm_regs_debugfs_uninit() - Free memory for registers. + * @qm: device qm handle. + * @reg_len: diff registers region length. + */ +void hisi_qm_regs_debugfs_uninit(struct hisi_qm *qm, u32 reg_len) +{ + if (!qm || qm->fun_type != QM_HW_PF) + return; + + qm_diff_regs_uninit(qm, reg_len); + qm_last_regs_uninit(qm); +} +EXPORT_SYMBOL_GPL(hisi_qm_regs_debugfs_uninit); + +/** + * hisi_qm_acc_diff_regs_dump() - Dump registers's value. + * @qm: device qm handle. + * @s: Debugfs file handle. + * @dregs: diff registers handle. + * @regs_len: diff registers region length. + */ +void hisi_qm_acc_diff_regs_dump(struct hisi_qm *qm, struct seq_file *s, + struct dfx_diff_registers *dregs, u32 regs_len) +{ + u32 j, val, base_offset; + int i, ret; + + if (!qm || !s || !dregs) + return; + + ret = hisi_qm_get_dfx_access(qm); + if (ret) + return; + + down_read(&qm->qps_lock); + for (i = 0; i < regs_len; i++) { + if (!dregs[i].reg_len) + continue; + + for (j = 0; j < dregs[i].reg_len; j++) { + base_offset = dregs[i].reg_offset + j * QM_DFX_REGS_LEN; + val = readl(qm->io_base + base_offset); + if (val != dregs[i].regs[j]) + seq_printf(s, "0x%08x = 0x%08x ---> 0x%08x\n", + base_offset, dregs[i].regs[j], val); + } + } + up_read(&qm->qps_lock); + + hisi_qm_put_dfx_access(qm); +} +EXPORT_SYMBOL_GPL(hisi_qm_acc_diff_regs_dump); + +void hisi_qm_show_last_dfx_regs(struct hisi_qm *qm) +{ + struct qm_debug *debug = &qm->debug; + struct pci_dev *pdev = qm->pdev; + u32 val; + int i; + + if (qm->fun_type == QM_HW_VF || !debug->qm_last_words) + return; + + for (i = 0; i < ARRAY_SIZE(qm_dfx_regs); i++) { + val = readl_relaxed(qm->io_base + qm_dfx_regs[i].offset); + if (debug->qm_last_words[i] != val) + pci_info(pdev, "%s \t= 0x%08x => 0x%08x\n", + qm_dfx_regs[i].name, debug->qm_last_words[i], val); + } +} + +static int qm_diff_regs_show(struct seq_file *s, void *unused) +{ + struct hisi_qm *qm = s->private; + + hisi_qm_acc_diff_regs_dump(qm, s, qm->debug.qm_diff_regs, + ARRAY_SIZE(qm_diff_regs)); + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(qm_diff_regs); + +static ssize_t qm_status_read(struct file *filp, char __user *buffer, + size_t count, loff_t *pos) +{ + struct hisi_qm *qm = filp->private_data; + char buf[QM_DBG_READ_LEN]; + int val, len; + + val = atomic_read(&qm->status.flags); + len = scnprintf(buf, QM_DBG_READ_LEN, "%s\n", qm_s[val]); + + return simple_read_from_buffer(buffer, count, pos, buf, len); +} + +static const struct file_operations qm_status_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = qm_status_read, +}; + +static void qm_create_debugfs_file(struct hisi_qm *qm, struct dentry *dir, + enum qm_debug_file index) +{ + struct debugfs_file *file = qm->debug.files + index; + + debugfs_create_file(qm_debug_file_name[index], 0600, dir, file, + &qm_debug_fops); + + file->index = index; + mutex_init(&file->lock); + file->debug = &qm->debug; +} + +static int qm_debugfs_atomic64_set(void *data, u64 val) +{ + if (val) + return -EINVAL; + + atomic64_set((atomic64_t *)data, 0); + + return 0; +} + +static int qm_debugfs_atomic64_get(void *data, u64 *val) +{ + *val = atomic64_read((atomic64_t *)data); + + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(qm_atomic64_ops, qm_debugfs_atomic64_get, + qm_debugfs_atomic64_set, "%llu\n"); + +/** + * hisi_qm_debug_init() - Initialize qm related debugfs files. + * @qm: The qm for which we want to add debugfs files. + * + * Create qm related debugfs files. + */ +void hisi_qm_debug_init(struct hisi_qm *qm) +{ + struct dfx_diff_registers *qm_regs = qm->debug.qm_diff_regs; + struct qm_dfx *dfx = &qm->debug.dfx; + struct dentry *qm_d; + void *data; + int i; + + qm_d = debugfs_create_dir("qm", qm->debug.debug_root); + qm->debug.qm_d = qm_d; + + /* only show this in PF */ + if (qm->fun_type == QM_HW_PF) { + qm_create_debugfs_file(qm, qm->debug.debug_root, CURRENT_QM); + for (i = CURRENT_Q; i < DEBUG_FILE_NUM; i++) + qm_create_debugfs_file(qm, qm->debug.qm_d, i); + } + + if (qm_regs) + debugfs_create_file("diff_regs", 0444, qm->debug.qm_d, + qm, &qm_diff_regs_fops); + + debugfs_create_file("regs", 0444, qm->debug.qm_d, qm, &qm_regs_fops); + + debugfs_create_file("cmd", 0600, qm->debug.qm_d, qm, &qm_cmd_fops); + + debugfs_create_file("status", 0444, qm->debug.qm_d, qm, + &qm_status_fops); + for (i = 0; i < ARRAY_SIZE(qm_dfx_files); i++) { + data = (atomic64_t *)((uintptr_t)dfx + qm_dfx_files[i].offset); + debugfs_create_file(qm_dfx_files[i].name, + 0644, + qm_d, + data, + &qm_atomic64_ops); + } + + if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps)) + hisi_qm_set_algqos_init(qm); +} +EXPORT_SYMBOL_GPL(hisi_qm_debug_init); + +/** + * hisi_qm_debug_regs_clear() - clear qm debug related registers. + * @qm: The qm for which we want to clear its debug registers. + */ +void hisi_qm_debug_regs_clear(struct hisi_qm *qm) +{ + const struct debugfs_reg32 *regs; + int i; + + /* clear current_qm */ + writel(0x0, qm->io_base + QM_DFX_MB_CNT_VF); + writel(0x0, qm->io_base + QM_DFX_DB_CNT_VF); + + /* clear current_q */ + writel(0x0, qm->io_base + QM_DFX_SQE_CNT_VF_SQN); + writel(0x0, qm->io_base + QM_DFX_CQE_CNT_VF_CQN); + + /* + * these registers are reading and clearing, so clear them after + * reading them. + */ + writel(0x1, qm->io_base + QM_DFX_CNT_CLR_CE); + + regs = qm_dfx_regs; + for (i = 0; i < CNT_CYC_REGS_NUM; i++) { + readl(qm->io_base + regs->offset); + regs++; + } + + /* clear clear_enable */ + writel(0x0, qm->io_base + QM_DFX_CNT_CLR_CE); +} +EXPORT_SYMBOL_GPL(hisi_qm_debug_regs_clear); diff --git a/drivers/crypto/hisilicon/hpre/hpre_crypto.c b/drivers/crypto/hisilicon/hpre/hpre_crypto.c index ef02dadd6217098fe963416973e82a00db3970c4..8ede77310dc528c542c1d12dea0377ed9157c11f 100644 --- a/drivers/crypto/hisilicon/hpre/hpre_crypto.c +++ b/drivers/crypto/hisilicon/hpre/hpre_crypto.c @@ -147,6 +147,16 @@ struct hpre_asym_request { struct timespec64 req_time; }; +static inline unsigned int hpre_align_sz(void) +{ + return ((crypto_dma_align() - 1) | (HPRE_ALIGN_SZ - 1)) + 1; +} + +static inline unsigned int hpre_align_pd(void) +{ + return (hpre_align_sz() - 1) & ~(crypto_tfm_ctx_alignment() - 1); +} + static int hpre_alloc_req_id(struct hpre_ctx *ctx) { unsigned long flags; @@ -517,7 +527,7 @@ static int hpre_msg_request_set(struct hpre_ctx *ctx, void *req, bool is_rsa) } tmp = akcipher_request_ctx(akreq); - h_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ); + h_req = PTR_ALIGN(tmp, hpre_align_sz()); h_req->cb = hpre_rsa_cb; h_req->areq.rsa = akreq; msg = &h_req->req; @@ -531,7 +541,7 @@ static int hpre_msg_request_set(struct hpre_ctx *ctx, void *req, bool is_rsa) } tmp = kpp_request_ctx(kreq); - h_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ); + h_req = PTR_ALIGN(tmp, hpre_align_sz()); h_req->cb = hpre_dh_cb; h_req->areq.dh = kreq; msg = &h_req->req; @@ -582,7 +592,7 @@ static int hpre_dh_compute_value(struct kpp_request *req) struct crypto_kpp *tfm = crypto_kpp_reqtfm(req); struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); void *tmp = kpp_request_ctx(req); - struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ); + struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, hpre_align_sz()); struct hpre_sqe *msg = &hpre_req->req; int ret; @@ -740,6 +750,8 @@ static int hpre_dh_init_tfm(struct crypto_kpp *tfm) { struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); + kpp_set_reqsize(tfm, sizeof(struct hpre_asym_request) + hpre_align_pd()); + return hpre_ctx_init(ctx, HPRE_V2_ALG_TYPE); } @@ -783,7 +795,7 @@ static int hpre_rsa_enc(struct akcipher_request *req) struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm); void *tmp = akcipher_request_ctx(req); - struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ); + struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, hpre_align_sz()); struct hpre_sqe *msg = &hpre_req->req; int ret; @@ -831,7 +843,7 @@ static int hpre_rsa_dec(struct akcipher_request *req) struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm); void *tmp = akcipher_request_ctx(req); - struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ); + struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, hpre_align_sz()); struct hpre_sqe *msg = &hpre_req->req; int ret; @@ -1165,6 +1177,9 @@ static int hpre_rsa_init_tfm(struct crypto_akcipher *tfm) return PTR_ERR(ctx->rsa.soft_tfm); } + akcipher_set_reqsize(tfm, sizeof(struct hpre_asym_request) + + hpre_align_pd()); + ret = hpre_ctx_init(ctx, HPRE_V2_ALG_TYPE); if (ret) crypto_free_akcipher(ctx->rsa.soft_tfm); @@ -1485,7 +1500,7 @@ static int hpre_ecdh_msg_request_set(struct hpre_ctx *ctx, } tmp = kpp_request_ctx(req); - h_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ); + h_req = PTR_ALIGN(tmp, hpre_align_sz()); h_req->cb = hpre_ecdh_cb; h_req->areq.ecdh = req; msg = &h_req->req; @@ -1566,7 +1581,7 @@ static int hpre_ecdh_compute_value(struct kpp_request *req) struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); struct device *dev = ctx->dev; void *tmp = kpp_request_ctx(req); - struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ); + struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, hpre_align_sz()); struct hpre_sqe *msg = &hpre_req->req; int ret; @@ -1617,6 +1632,8 @@ static int hpre_ecdh_nist_p192_init_tfm(struct crypto_kpp *tfm) ctx->curve_id = ECC_CURVE_NIST_P192; + kpp_set_reqsize(tfm, sizeof(struct hpre_asym_request) + hpre_align_pd()); + return hpre_ctx_init(ctx, HPRE_V3_ECC_ALG_TYPE); } @@ -1626,6 +1643,8 @@ static int hpre_ecdh_nist_p256_init_tfm(struct crypto_kpp *tfm) ctx->curve_id = ECC_CURVE_NIST_P256; + kpp_set_reqsize(tfm, sizeof(struct hpre_asym_request) + hpre_align_pd()); + return hpre_ctx_init(ctx, HPRE_V3_ECC_ALG_TYPE); } @@ -1635,6 +1654,8 @@ static int hpre_ecdh_nist_p384_init_tfm(struct crypto_kpp *tfm) ctx->curve_id = ECC_CURVE_NIST_P384; + kpp_set_reqsize(tfm, sizeof(struct hpre_asym_request) + hpre_align_pd()); + return hpre_ctx_init(ctx, HPRE_V3_ECC_ALG_TYPE); } @@ -1791,7 +1812,7 @@ static int hpre_curve25519_msg_request_set(struct hpre_ctx *ctx, } tmp = kpp_request_ctx(req); - h_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ); + h_req = PTR_ALIGN(tmp, hpre_align_sz()); h_req->cb = hpre_curve25519_cb; h_req->areq.curve25519 = req; msg = &h_req->req; @@ -1912,7 +1933,7 @@ static int hpre_curve25519_compute_value(struct kpp_request *req) struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); struct device *dev = ctx->dev; void *tmp = kpp_request_ctx(req); - struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ); + struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, hpre_align_sz()); struct hpre_sqe *msg = &hpre_req->req; int ret; @@ -1961,6 +1982,8 @@ static int hpre_curve25519_init_tfm(struct crypto_kpp *tfm) { struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); + kpp_set_reqsize(tfm, sizeof(struct hpre_asym_request) + hpre_align_pd()); + return hpre_ctx_init(ctx, HPRE_V3_ECC_ALG_TYPE); } @@ -1981,7 +2004,6 @@ static struct akcipher_alg rsa = { .max_size = hpre_rsa_max_size, .init = hpre_rsa_init_tfm, .exit = hpre_rsa_exit_tfm, - .reqsize = sizeof(struct hpre_asym_request) + HPRE_ALIGN_SZ, .base = { .cra_ctxsize = sizeof(struct hpre_ctx), .cra_priority = HPRE_CRYPTO_ALG_PRI, @@ -1998,7 +2020,6 @@ static struct kpp_alg dh = { .max_size = hpre_dh_max_size, .init = hpre_dh_init_tfm, .exit = hpre_dh_exit_tfm, - .reqsize = sizeof(struct hpre_asym_request) + HPRE_ALIGN_SZ, .base = { .cra_ctxsize = sizeof(struct hpre_ctx), .cra_priority = HPRE_CRYPTO_ALG_PRI, @@ -2016,7 +2037,6 @@ static struct kpp_alg ecdh_curves[] = { .max_size = hpre_ecdh_max_size, .init = hpre_ecdh_nist_p192_init_tfm, .exit = hpre_ecdh_exit_tfm, - .reqsize = sizeof(struct hpre_asym_request) + HPRE_ALIGN_SZ, .base = { .cra_ctxsize = sizeof(struct hpre_ctx), .cra_priority = HPRE_CRYPTO_ALG_PRI, @@ -2031,7 +2051,6 @@ static struct kpp_alg ecdh_curves[] = { .max_size = hpre_ecdh_max_size, .init = hpre_ecdh_nist_p256_init_tfm, .exit = hpre_ecdh_exit_tfm, - .reqsize = sizeof(struct hpre_asym_request) + HPRE_ALIGN_SZ, .base = { .cra_ctxsize = sizeof(struct hpre_ctx), .cra_priority = HPRE_CRYPTO_ALG_PRI, @@ -2046,7 +2065,6 @@ static struct kpp_alg ecdh_curves[] = { .max_size = hpre_ecdh_max_size, .init = hpre_ecdh_nist_p384_init_tfm, .exit = hpre_ecdh_exit_tfm, - .reqsize = sizeof(struct hpre_asym_request) + HPRE_ALIGN_SZ, .base = { .cra_ctxsize = sizeof(struct hpre_ctx), .cra_priority = HPRE_CRYPTO_ALG_PRI, @@ -2064,7 +2082,6 @@ static struct kpp_alg curve25519_alg = { .max_size = hpre_curve25519_max_size, .init = hpre_curve25519_init_tfm, .exit = hpre_curve25519_exit_tfm, - .reqsize = sizeof(struct hpre_asym_request) + HPRE_ALIGN_SZ, .base = { .cra_ctxsize = sizeof(struct hpre_ctx), .cra_priority = HPRE_CRYPTO_ALG_PRI, diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c index 471e5ca720f5781551cffe39744deb0c800d15ac..923f9c2792654f7e2ed739e6041159ba46f6d250 100644 --- a/drivers/crypto/hisilicon/hpre/hpre_main.c +++ b/drivers/crypto/hisilicon/hpre/hpre_main.c @@ -1101,8 +1101,7 @@ static int hpre_debugfs_init(struct hisi_qm *qm) qm->debug.sqe_mask_offset = HPRE_SQE_MASK_OFFSET; qm->debug.sqe_mask_len = HPRE_SQE_MASK_LEN; - ret = hisi_qm_diff_regs_init(qm, hpre_diff_regs, - ARRAY_SIZE(hpre_diff_regs)); + ret = hisi_qm_regs_debugfs_init(qm, hpre_diff_regs, ARRAY_SIZE(hpre_diff_regs)); if (ret) { dev_warn(dev, "Failed to init HPRE diff regs!\n"); goto debugfs_remove; @@ -1121,7 +1120,7 @@ static int hpre_debugfs_init(struct hisi_qm *qm) return 0; failed_to_create: - hisi_qm_diff_regs_uninit(qm, ARRAY_SIZE(hpre_diff_regs)); + hisi_qm_regs_debugfs_uninit(qm, ARRAY_SIZE(hpre_diff_regs)); debugfs_remove: debugfs_remove_recursive(qm->debug.debug_root); return ret; @@ -1129,7 +1128,7 @@ debugfs_remove: static void hpre_debugfs_exit(struct hisi_qm *qm) { - hisi_qm_diff_regs_uninit(qm, ARRAY_SIZE(hpre_diff_regs)); + hisi_qm_regs_debugfs_uninit(qm, ARRAY_SIZE(hpre_diff_regs)); debugfs_remove_recursive(qm->debug.debug_root); } @@ -1437,18 +1436,12 @@ err_with_qm_init: static void hpre_remove(struct pci_dev *pdev) { struct hisi_qm *qm = pci_get_drvdata(pdev); - int ret; hisi_qm_pm_uninit(qm); hisi_qm_wait_task_finish(qm, &hpre_devices); hisi_qm_alg_unregister(qm, &hpre_devices); - if (qm->fun_type == QM_HW_PF && qm->vfs_num) { - ret = hisi_qm_sriov_disable(pdev, true); - if (ret) { - pci_err(pdev, "Disable SRIOV fail!\n"); - return; - } - } + if (qm->fun_type == QM_HW_PF && qm->vfs_num) + hisi_qm_sriov_disable(pdev, true); hpre_debugfs_exit(qm); hisi_qm_stop(qm, QM_NORMAL); diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index 8b387de69d229b7cb6059a6bc5094e277a4be16b..007ac7a69ce74759c2a973cf38c6d3894097b4b0 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -16,6 +16,7 @@ #include #include #include +#include "qm_common.h" /* eq/aeq irq enable */ #define QM_VF_AEQ_INT_SOURCE 0x0 @@ -119,8 +120,6 @@ #define QM_SQC_VFT_NUM_SHIFT_V2 45 #define QM_SQC_VFT_NUM_MASK_v2 GENMASK(9, 0) -#define QM_DFX_CNT_CLR_CE 0x100118 - #define QM_ABNORMAL_INT_SOURCE 0x100000 #define QM_ABNORMAL_INT_MASK 0x100004 #define QM_ABNORMAL_INT_MASK_VALUE 0x7fff @@ -187,14 +186,6 @@ #define QM_VF_RESET_WAIT_TIMEOUT_US \ (QM_VF_RESET_WAIT_US * QM_VF_RESET_WAIT_CNT) -#define QM_DFX_MB_CNT_VF 0x104010 -#define QM_DFX_DB_CNT_VF 0x104020 -#define QM_DFX_SQE_CNT_VF_SQN 0x104030 -#define QM_DFX_CQE_CNT_VF_CQN 0x104040 -#define QM_DFX_QN_SHIFT 16 -#define CURRENT_FUN_MASK GENMASK(5, 0) -#define CURRENT_Q_MASK GENMASK(31, 16) - #define POLL_PERIOD 10 #define POLL_TIMEOUT 1000 #define WAIT_PERIOD_US_MAX 200 @@ -211,19 +202,15 @@ #define QMC_ALIGN(sz) ALIGN(sz, 32) #define QM_DBG_READ_LEN 256 -#define QM_DBG_WRITE_LEN 1024 -#define QM_DBG_TMP_BUF_LEN 22 #define QM_PCI_COMMAND_INVALID ~0 #define QM_RESET_STOP_TX_OFFSET 1 #define QM_RESET_STOP_RX_OFFSET 2 #define WAIT_PERIOD 20 #define REMOVE_WAIT_DELAY 10 -#define QM_SQE_ADDR_MASK GENMASK(7, 0) #define QM_DRIVER_REMOVING 0 #define QM_RST_SCHED 1 -#define QM_RESETTING 2 #define QM_QOS_PARAM_NUM 2 #define QM_QOS_VAL_NUM 1 #define QM_QOS_BDF_PARAM_NUM 4 @@ -250,16 +237,6 @@ #define QM_QOS_MIN_CIR_B 100 #define QM_QOS_MAX_CIR_U 6 #define QM_QOS_MAX_CIR_S 11 -#define QM_QOS_VAL_MAX_LEN 32 -#define QM_DFX_BASE 0x0100000 -#define QM_DFX_STATE1 0x0104000 -#define QM_DFX_STATE2 0x01040C8 -#define QM_DFX_COMMON 0x0000 -#define QM_DFX_BASE_LEN 0x5A -#define QM_DFX_STATE1_LEN 0x2E -#define QM_DFX_STATE2_LEN 0x11 -#define QM_DFX_COMMON_LEN 0xC3 -#define QM_DFX_REGS_LEN 4UL #define QM_AUTOSUSPEND_DELAY 3000 #define QM_MK_CQC_DW3_V1(hop_num, pg_sz, buf_sz, cqe_sz) \ @@ -359,7 +336,7 @@ static const struct hisi_qm_cap_info qm_cap_info_vf[] = { static const struct hisi_qm_cap_info qm_basic_info[] = { {QM_TOTAL_QP_NUM_CAP, 0x100158, 0, GENMASK(10, 0), 0x1000, 0x400, 0x400}, {QM_FUNC_MAX_QP_CAP, 0x100158, 11, GENMASK(10, 0), 0x1000, 0x400, 0x400}, - {QM_XEQ_DEPTH_CAP, 0x3104, 0, GENMASK(15, 0), 0x800, 0x4000800, 0x4000800}, + {QM_XEQ_DEPTH_CAP, 0x3104, 0, GENMASK(31, 0), 0x800, 0x4000800, 0x4000800}, {QM_QP_DEPTH_CAP, 0x3108, 0, GENMASK(31, 0), 0x4000400, 0x4000400, 0x4000400}, {QM_EQ_IRQ_TYPE_CAP, 0x310c, 0, GENMASK(31, 0), 0x10000, 0x10000, 0x10000}, {QM_AEQ_IRQ_TYPE_CAP, 0x3110, 0, GENMASK(31, 0), 0x0, 0x10001, 0x10001}, @@ -369,73 +346,6 @@ static const struct hisi_qm_cap_info qm_basic_info[] = { {QM_VF_IRQ_NUM_CAP, 0x311c, 0, GENMASK(15, 0), 0x1, 0x2, 0x3}, }; -struct qm_cqe { - __le32 rsvd0; - __le16 cmd_id; - __le16 rsvd1; - __le16 sq_head; - __le16 sq_num; - __le16 rsvd2; - __le16 w7; -}; - -struct qm_eqe { - __le32 dw0; -}; - -struct qm_aeqe { - __le32 dw0; -}; - -struct qm_sqc { - __le16 head; - __le16 tail; - __le32 base_l; - __le32 base_h; - __le32 dw3; - __le16 w8; - __le16 rsvd0; - __le16 pasid; - __le16 w11; - __le16 cq_num; - __le16 w13; - __le32 rsvd1; -}; - -struct qm_cqc { - __le16 head; - __le16 tail; - __le32 base_l; - __le32 base_h; - __le32 dw3; - __le16 w8; - __le16 rsvd0; - __le16 pasid; - __le16 w11; - __le32 dw6; - __le32 rsvd1; -}; - -struct qm_eqc { - __le16 head; - __le16 tail; - __le32 base_l; - __le32 base_h; - __le32 dw3; - __le32 rsvd[2]; - __le32 dw6; -}; - -struct qm_aeqc { - __le16 head; - __le16 tail; - __le32 base_l; - __le32 base_h; - __le32 dw3; - __le32 rsvd[2]; - __le32 dw6; -}; - struct qm_mailbox { __le16 w0; __le16 queue_num; @@ -468,25 +378,6 @@ struct hisi_qm_hw_ops { int (*set_msi)(struct hisi_qm *qm, bool set); }; -struct qm_dfx_item { - const char *name; - u32 offset; -}; - -static struct qm_dfx_item qm_dfx_files[] = { - {"err_irq", offsetof(struct qm_dfx, err_irq_cnt)}, - {"aeq_irq", offsetof(struct qm_dfx, aeq_irq_cnt)}, - {"abnormal_irq", offsetof(struct qm_dfx, abnormal_irq_cnt)}, - {"create_qp_err", offsetof(struct qm_dfx, create_qp_err_cnt)}, - {"mb_err", offsetof(struct qm_dfx, mb_err_cnt)}, -}; - -static const char * const qm_debug_file_name[] = { - [CURRENT_QM] = "current_qm", - [CURRENT_Q] = "current_q", - [CLEAR_ENABLE] = "clear_enable", -}; - struct hisi_qm_hw_error { u32 int_msk; const char *msg; @@ -511,23 +402,6 @@ static const struct hisi_qm_hw_error qm_hw_error[] = { { /* sentinel */ } }; -/* define the QM's dfx regs region and region length */ -static struct dfx_diff_registers qm_diff_regs[] = { - { - .reg_offset = QM_DFX_BASE, - .reg_len = QM_DFX_BASE_LEN, - }, { - .reg_offset = QM_DFX_STATE1, - .reg_len = QM_DFX_STATE1_LEN, - }, { - .reg_offset = QM_DFX_STATE2, - .reg_len = QM_DFX_STATE2_LEN, - }, { - .reg_offset = QM_DFX_COMMON, - .reg_len = QM_DFX_COMMON_LEN, - }, -}; - static const char * const qm_db_timeout[] = { "sq", "cq", "eq", "aeq", }; @@ -536,10 +410,6 @@ static const char * const qm_fifo_overflow[] = { "cq", "eq", "aeq", }; -static const char * const qm_s[] = { - "init", "start", "close", "stop", -}; - static const char * const qp_s[] = { "none", "init", "start", "stop", "close", }; @@ -909,8 +779,8 @@ static void qm_get_xqc_depth(struct hisi_qm *qm, u16 *low_bits, u32 depth; depth = hisi_qm_get_hw_info(qm, qm_basic_info, type, qm->cap_ver); - *high_bits = depth & QM_XQ_DEPTH_MASK; - *low_bits = (depth >> QM_XQ_DEPTH_SHIFT) & QM_XQ_DEPTH_MASK; + *low_bits = depth & QM_XQ_DEPTH_MASK; + *high_bits = (depth >> QM_XQ_DEPTH_SHIFT) & QM_XQ_DEPTH_MASK; } static u32 qm_get_irq_num(struct hisi_qm *qm) @@ -1328,996 +1198,155 @@ static void qm_vft_data_cfg(struct hisi_qm *qm, enum vft_type type, u32 base, case SHAPER_VFT: if (factor) { tmp = factor->cir_b | - (factor->cir_u << QM_SHAPER_FACTOR_CIR_U_SHIFT) | - (factor->cir_s << QM_SHAPER_FACTOR_CIR_S_SHIFT) | - (QM_SHAPER_CBS_B << QM_SHAPER_FACTOR_CBS_B_SHIFT) | - (factor->cbs_s << QM_SHAPER_FACTOR_CBS_S_SHIFT); - } - break; - } - } - - writel(lower_32_bits(tmp), qm->io_base + QM_VFT_CFG_DATA_L); - writel(upper_32_bits(tmp), qm->io_base + QM_VFT_CFG_DATA_H); -} - -static int qm_set_vft_common(struct hisi_qm *qm, enum vft_type type, - u32 fun_num, u32 base, u32 number) -{ - struct qm_shaper_factor *factor = NULL; - unsigned int val; - int ret; - - if (type == SHAPER_VFT && test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps)) - factor = &qm->factor[fun_num]; - - ret = readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val, - val & BIT(0), POLL_PERIOD, - POLL_TIMEOUT); - if (ret) - return ret; - - writel(0x0, qm->io_base + QM_VFT_CFG_OP_WR); - writel(type, qm->io_base + QM_VFT_CFG_TYPE); - if (type == SHAPER_VFT) - fun_num |= base << QM_SHAPER_VFT_OFFSET; - - writel(fun_num, qm->io_base + QM_VFT_CFG); - - qm_vft_data_cfg(qm, type, base, number, factor); - - writel(0x0, qm->io_base + QM_VFT_CFG_RDY); - writel(0x1, qm->io_base + QM_VFT_CFG_OP_ENABLE); - - return readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val, - val & BIT(0), POLL_PERIOD, - POLL_TIMEOUT); -} - -static int qm_shaper_init_vft(struct hisi_qm *qm, u32 fun_num) -{ - u32 qos = qm->factor[fun_num].func_qos; - int ret, i; - - ret = qm_get_shaper_para(qos * QM_QOS_RATE, &qm->factor[fun_num]); - if (ret) { - dev_err(&qm->pdev->dev, "failed to calculate shaper parameter!\n"); - return ret; - } - writel(qm->type_rate, qm->io_base + QM_SHAPER_CFG); - for (i = ALG_TYPE_0; i <= ALG_TYPE_1; i++) { - /* The base number of queue reuse for different alg type */ - ret = qm_set_vft_common(qm, SHAPER_VFT, fun_num, i, 1); - if (ret) - return ret; - } - - return 0; -} - -/* The config should be conducted after qm_dev_mem_reset() */ -static int qm_set_sqc_cqc_vft(struct hisi_qm *qm, u32 fun_num, u32 base, - u32 number) -{ - int ret, i; - - for (i = SQC_VFT; i <= CQC_VFT; i++) { - ret = qm_set_vft_common(qm, i, fun_num, base, number); - if (ret) - return ret; - } - - /* init default shaper qos val */ - if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps)) { - ret = qm_shaper_init_vft(qm, fun_num); - if (ret) - goto back_sqc_cqc; - } - - return 0; -back_sqc_cqc: - for (i = SQC_VFT; i <= CQC_VFT; i++) - qm_set_vft_common(qm, i, fun_num, 0, 0); - - return ret; -} - -static int qm_get_vft_v2(struct hisi_qm *qm, u32 *base, u32 *number) -{ - u64 sqc_vft; - int ret; - - ret = hisi_qm_mb(qm, QM_MB_CMD_SQC_VFT_V2, 0, 0, 1); - if (ret) - return ret; - - sqc_vft = readl(qm->io_base + QM_MB_CMD_DATA_ADDR_L) | - ((u64)readl(qm->io_base + QM_MB_CMD_DATA_ADDR_H) << 32); - *base = QM_SQC_VFT_BASE_MASK_V2 & (sqc_vft >> QM_SQC_VFT_BASE_SHIFT_V2); - *number = (QM_SQC_VFT_NUM_MASK_v2 & - (sqc_vft >> QM_SQC_VFT_NUM_SHIFT_V2)) + 1; - - return 0; -} - -static int qm_get_vf_qp_num(struct hisi_qm *qm, u32 fun_num) -{ - u32 remain_q_num, vfq_num; - u32 num_vfs = qm->vfs_num; - - vfq_num = (qm->ctrl_qp_num - qm->qp_num) / num_vfs; - if (vfq_num >= qm->max_qp_num) - return qm->max_qp_num; - - remain_q_num = (qm->ctrl_qp_num - qm->qp_num) % num_vfs; - if (vfq_num + remain_q_num <= qm->max_qp_num) - return fun_num == num_vfs ? vfq_num + remain_q_num : vfq_num; - - /* - * if vfq_num + remain_q_num > max_qp_num, the last VFs, - * each with one more queue. - */ - return fun_num + remain_q_num > num_vfs ? vfq_num + 1 : vfq_num; -} - -static struct hisi_qm *file_to_qm(struct debugfs_file *file) -{ - struct qm_debug *debug = file->debug; - - return container_of(debug, struct hisi_qm, debug); -} - -static u32 current_q_read(struct hisi_qm *qm) -{ - return readl(qm->io_base + QM_DFX_SQE_CNT_VF_SQN) >> QM_DFX_QN_SHIFT; -} - -static int current_q_write(struct hisi_qm *qm, u32 val) -{ - u32 tmp; - - if (val >= qm->debug.curr_qm_qp_num) - return -EINVAL; - - tmp = val << QM_DFX_QN_SHIFT | - (readl(qm->io_base + QM_DFX_SQE_CNT_VF_SQN) & CURRENT_FUN_MASK); - writel(tmp, qm->io_base + QM_DFX_SQE_CNT_VF_SQN); - - tmp = val << QM_DFX_QN_SHIFT | - (readl(qm->io_base + QM_DFX_CQE_CNT_VF_CQN) & CURRENT_FUN_MASK); - writel(tmp, qm->io_base + QM_DFX_CQE_CNT_VF_CQN); - - return 0; -} - -static u32 clear_enable_read(struct hisi_qm *qm) -{ - return readl(qm->io_base + QM_DFX_CNT_CLR_CE); -} - -/* rd_clr_ctrl 1 enable read clear, otherwise 0 disable it */ -static int clear_enable_write(struct hisi_qm *qm, u32 rd_clr_ctrl) -{ - if (rd_clr_ctrl > 1) - return -EINVAL; - - writel(rd_clr_ctrl, qm->io_base + QM_DFX_CNT_CLR_CE); - - return 0; -} - -static u32 current_qm_read(struct hisi_qm *qm) -{ - return readl(qm->io_base + QM_DFX_MB_CNT_VF); -} - -static int current_qm_write(struct hisi_qm *qm, u32 val) -{ - u32 tmp; - - if (val > qm->vfs_num) - return -EINVAL; - - /* According PF or VF Dev ID to calculation curr_qm_qp_num and store */ - if (!val) - qm->debug.curr_qm_qp_num = qm->qp_num; - else - qm->debug.curr_qm_qp_num = qm_get_vf_qp_num(qm, val); - - writel(val, qm->io_base + QM_DFX_MB_CNT_VF); - writel(val, qm->io_base + QM_DFX_DB_CNT_VF); - - tmp = val | - (readl(qm->io_base + QM_DFX_SQE_CNT_VF_SQN) & CURRENT_Q_MASK); - writel(tmp, qm->io_base + QM_DFX_SQE_CNT_VF_SQN); - - tmp = val | - (readl(qm->io_base + QM_DFX_CQE_CNT_VF_CQN) & CURRENT_Q_MASK); - writel(tmp, qm->io_base + QM_DFX_CQE_CNT_VF_CQN); - - return 0; -} - -static ssize_t qm_debug_read(struct file *filp, char __user *buf, - size_t count, loff_t *pos) -{ - struct debugfs_file *file = filp->private_data; - enum qm_debug_file index = file->index; - struct hisi_qm *qm = file_to_qm(file); - char tbuf[QM_DBG_TMP_BUF_LEN]; - u32 val; - int ret; - - ret = hisi_qm_get_dfx_access(qm); - if (ret) - return ret; - - mutex_lock(&file->lock); - switch (index) { - case CURRENT_QM: - val = current_qm_read(qm); - break; - case CURRENT_Q: - val = current_q_read(qm); - break; - case CLEAR_ENABLE: - val = clear_enable_read(qm); - break; - default: - goto err_input; - } - mutex_unlock(&file->lock); - - hisi_qm_put_dfx_access(qm); - ret = scnprintf(tbuf, QM_DBG_TMP_BUF_LEN, "%u\n", val); - return simple_read_from_buffer(buf, count, pos, tbuf, ret); - -err_input: - mutex_unlock(&file->lock); - hisi_qm_put_dfx_access(qm); - return -EINVAL; -} - -static ssize_t qm_debug_write(struct file *filp, const char __user *buf, - size_t count, loff_t *pos) -{ - struct debugfs_file *file = filp->private_data; - enum qm_debug_file index = file->index; - struct hisi_qm *qm = file_to_qm(file); - unsigned long val; - char tbuf[QM_DBG_TMP_BUF_LEN]; - int len, ret; - - if (*pos != 0) - return 0; - - if (count >= QM_DBG_TMP_BUF_LEN) - return -ENOSPC; - - len = simple_write_to_buffer(tbuf, QM_DBG_TMP_BUF_LEN - 1, pos, buf, - count); - if (len < 0) - return len; - - tbuf[len] = '\0'; - if (kstrtoul(tbuf, 0, &val)) - return -EFAULT; - - ret = hisi_qm_get_dfx_access(qm); - if (ret) - return ret; - - mutex_lock(&file->lock); - switch (index) { - case CURRENT_QM: - ret = current_qm_write(qm, val); - break; - case CURRENT_Q: - ret = current_q_write(qm, val); - break; - case CLEAR_ENABLE: - ret = clear_enable_write(qm, val); - break; - default: - ret = -EINVAL; - } - mutex_unlock(&file->lock); - - hisi_qm_put_dfx_access(qm); - - if (ret) - return ret; - - return count; -} - -static const struct file_operations qm_debug_fops = { - .owner = THIS_MODULE, - .open = simple_open, - .read = qm_debug_read, - .write = qm_debug_write, -}; - -#define CNT_CYC_REGS_NUM 10 -static const struct debugfs_reg32 qm_dfx_regs[] = { - /* XXX_CNT are reading clear register */ - {"QM_ECC_1BIT_CNT ", 0x104000ull}, - {"QM_ECC_MBIT_CNT ", 0x104008ull}, - {"QM_DFX_MB_CNT ", 0x104018ull}, - {"QM_DFX_DB_CNT ", 0x104028ull}, - {"QM_DFX_SQE_CNT ", 0x104038ull}, - {"QM_DFX_CQE_CNT ", 0x104048ull}, - {"QM_DFX_SEND_SQE_TO_ACC_CNT ", 0x104050ull}, - {"QM_DFX_WB_SQE_FROM_ACC_CNT ", 0x104058ull}, - {"QM_DFX_ACC_FINISH_CNT ", 0x104060ull}, - {"QM_DFX_CQE_ERR_CNT ", 0x1040b4ull}, - {"QM_DFX_FUNS_ACTIVE_ST ", 0x200ull}, - {"QM_ECC_1BIT_INF ", 0x104004ull}, - {"QM_ECC_MBIT_INF ", 0x10400cull}, - {"QM_DFX_ACC_RDY_VLD0 ", 0x1040a0ull}, - {"QM_DFX_ACC_RDY_VLD1 ", 0x1040a4ull}, - {"QM_DFX_AXI_RDY_VLD ", 0x1040a8ull}, - {"QM_DFX_FF_ST0 ", 0x1040c8ull}, - {"QM_DFX_FF_ST1 ", 0x1040ccull}, - {"QM_DFX_FF_ST2 ", 0x1040d0ull}, - {"QM_DFX_FF_ST3 ", 0x1040d4ull}, - {"QM_DFX_FF_ST4 ", 0x1040d8ull}, - {"QM_DFX_FF_ST5 ", 0x1040dcull}, - {"QM_DFX_FF_ST6 ", 0x1040e0ull}, - {"QM_IN_IDLE_ST ", 0x1040e4ull}, -}; - -static const struct debugfs_reg32 qm_vf_dfx_regs[] = { - {"QM_DFX_FUNS_ACTIVE_ST ", 0x200ull}, -}; - -/** - * hisi_qm_regs_dump() - Dump registers's value. - * @s: debugfs file handle. - * @regset: accelerator registers information. - * - * Dump accelerator registers. - */ -void hisi_qm_regs_dump(struct seq_file *s, struct debugfs_regset32 *regset) -{ - struct pci_dev *pdev = to_pci_dev(regset->dev); - struct hisi_qm *qm = pci_get_drvdata(pdev); - const struct debugfs_reg32 *regs = regset->regs; - int regs_len = regset->nregs; - int i, ret; - u32 val; - - ret = hisi_qm_get_dfx_access(qm); - if (ret) - return; - - for (i = 0; i < regs_len; i++) { - val = readl(regset->base + regs[i].offset); - seq_printf(s, "%s= 0x%08x\n", regs[i].name, val); - } - - hisi_qm_put_dfx_access(qm); -} -EXPORT_SYMBOL_GPL(hisi_qm_regs_dump); - -static int qm_regs_show(struct seq_file *s, void *unused) -{ - struct hisi_qm *qm = s->private; - struct debugfs_regset32 regset; - - if (qm->fun_type == QM_HW_PF) { - regset.regs = qm_dfx_regs; - regset.nregs = ARRAY_SIZE(qm_dfx_regs); - } else { - regset.regs = qm_vf_dfx_regs; - regset.nregs = ARRAY_SIZE(qm_vf_dfx_regs); - } - - regset.base = qm->io_base; - regset.dev = &qm->pdev->dev; - - hisi_qm_regs_dump(s, ®set); - - return 0; -} - -DEFINE_SHOW_ATTRIBUTE(qm_regs); - -static struct dfx_diff_registers *dfx_regs_init(struct hisi_qm *qm, - const struct dfx_diff_registers *cregs, int reg_len) -{ - struct dfx_diff_registers *diff_regs; - u32 j, base_offset; - int i; - - diff_regs = kcalloc(reg_len, sizeof(*diff_regs), GFP_KERNEL); - if (!diff_regs) - return ERR_PTR(-ENOMEM); - - for (i = 0; i < reg_len; i++) { - if (!cregs[i].reg_len) - continue; - - diff_regs[i].reg_offset = cregs[i].reg_offset; - diff_regs[i].reg_len = cregs[i].reg_len; - diff_regs[i].regs = kcalloc(QM_DFX_REGS_LEN, cregs[i].reg_len, - GFP_KERNEL); - if (!diff_regs[i].regs) - goto alloc_error; - - for (j = 0; j < diff_regs[i].reg_len; j++) { - base_offset = diff_regs[i].reg_offset + - j * QM_DFX_REGS_LEN; - diff_regs[i].regs[j] = readl(qm->io_base + base_offset); - } - } - - return diff_regs; - -alloc_error: - while (i > 0) { - i--; - kfree(diff_regs[i].regs); - } - kfree(diff_regs); - return ERR_PTR(-ENOMEM); -} - -static void dfx_regs_uninit(struct hisi_qm *qm, - struct dfx_diff_registers *dregs, int reg_len) -{ - int i; - - /* Setting the pointer is NULL to prevent double free */ - for (i = 0; i < reg_len; i++) { - kfree(dregs[i].regs); - dregs[i].regs = NULL; - } - kfree(dregs); - dregs = NULL; -} - -/** - * hisi_qm_diff_regs_init() - Allocate memory for registers. - * @qm: device qm handle. - * @dregs: diff registers handle. - * @reg_len: diff registers region length. - */ -int hisi_qm_diff_regs_init(struct hisi_qm *qm, - struct dfx_diff_registers *dregs, int reg_len) -{ - if (!qm || !dregs || reg_len <= 0) - return -EINVAL; - - if (qm->fun_type != QM_HW_PF) - return 0; - - qm->debug.qm_diff_regs = dfx_regs_init(qm, qm_diff_regs, - ARRAY_SIZE(qm_diff_regs)); - if (IS_ERR(qm->debug.qm_diff_regs)) - return PTR_ERR(qm->debug.qm_diff_regs); - - qm->debug.acc_diff_regs = dfx_regs_init(qm, dregs, reg_len); - if (IS_ERR(qm->debug.acc_diff_regs)) { - dfx_regs_uninit(qm, qm->debug.qm_diff_regs, - ARRAY_SIZE(qm_diff_regs)); - return PTR_ERR(qm->debug.acc_diff_regs); - } - - return 0; -} -EXPORT_SYMBOL_GPL(hisi_qm_diff_regs_init); - -/** - * hisi_qm_diff_regs_uninit() - Free memory for registers. - * @qm: device qm handle. - * @reg_len: diff registers region length. - */ -void hisi_qm_diff_regs_uninit(struct hisi_qm *qm, int reg_len) -{ - if (!qm || reg_len <= 0 || qm->fun_type != QM_HW_PF) - return; - - dfx_regs_uninit(qm, qm->debug.acc_diff_regs, reg_len); - dfx_regs_uninit(qm, qm->debug.qm_diff_regs, ARRAY_SIZE(qm_diff_regs)); -} -EXPORT_SYMBOL_GPL(hisi_qm_diff_regs_uninit); - -/** - * hisi_qm_acc_diff_regs_dump() - Dump registers's value. - * @qm: device qm handle. - * @s: Debugfs file handle. - * @dregs: diff registers handle. - * @regs_len: diff registers region length. - */ -void hisi_qm_acc_diff_regs_dump(struct hisi_qm *qm, struct seq_file *s, - struct dfx_diff_registers *dregs, int regs_len) -{ - u32 j, val, base_offset; - int i, ret; - - if (!qm || !s || !dregs || regs_len <= 0) - return; - - ret = hisi_qm_get_dfx_access(qm); - if (ret) - return; - - down_read(&qm->qps_lock); - for (i = 0; i < regs_len; i++) { - if (!dregs[i].reg_len) - continue; - - for (j = 0; j < dregs[i].reg_len; j++) { - base_offset = dregs[i].reg_offset + j * QM_DFX_REGS_LEN; - val = readl(qm->io_base + base_offset); - if (val != dregs[i].regs[j]) - seq_printf(s, "0x%08x = 0x%08x ---> 0x%08x\n", - base_offset, dregs[i].regs[j], val); - } - } - up_read(&qm->qps_lock); - - hisi_qm_put_dfx_access(qm); -} -EXPORT_SYMBOL_GPL(hisi_qm_acc_diff_regs_dump); - -static int qm_diff_regs_show(struct seq_file *s, void *unused) -{ - struct hisi_qm *qm = s->private; - - hisi_qm_acc_diff_regs_dump(qm, s, qm->debug.qm_diff_regs, - ARRAY_SIZE(qm_diff_regs)); - - return 0; -} -DEFINE_SHOW_ATTRIBUTE(qm_diff_regs); - -static ssize_t qm_cmd_read(struct file *filp, char __user *buffer, - size_t count, loff_t *pos) -{ - char buf[QM_DBG_READ_LEN]; - int len; - - len = scnprintf(buf, QM_DBG_READ_LEN, "%s\n", - "Please echo help to cmd to get help information"); - - return simple_read_from_buffer(buffer, count, pos, buf, len); -} - -static void *qm_ctx_alloc(struct hisi_qm *qm, size_t ctx_size, - dma_addr_t *dma_addr) -{ - struct device *dev = &qm->pdev->dev; - void *ctx_addr; - - ctx_addr = kzalloc(ctx_size, GFP_KERNEL); - if (!ctx_addr) - return ERR_PTR(-ENOMEM); - - *dma_addr = dma_map_single(dev, ctx_addr, ctx_size, DMA_FROM_DEVICE); - if (dma_mapping_error(dev, *dma_addr)) { - dev_err(dev, "DMA mapping error!\n"); - kfree(ctx_addr); - return ERR_PTR(-ENOMEM); - } - - return ctx_addr; -} - -static void qm_ctx_free(struct hisi_qm *qm, size_t ctx_size, - const void *ctx_addr, dma_addr_t *dma_addr) -{ - struct device *dev = &qm->pdev->dev; - - dma_unmap_single(dev, *dma_addr, ctx_size, DMA_FROM_DEVICE); - kfree(ctx_addr); -} - -static void dump_show(struct hisi_qm *qm, void *info, - unsigned int info_size, char *info_name) -{ - struct device *dev = &qm->pdev->dev; - u8 *info_curr = info; - u32 i; -#define BYTE_PER_DW 4 - - dev_info(dev, "%s DUMP\n", info_name); - for (i = 0; i < info_size; i += BYTE_PER_DW, info_curr += BYTE_PER_DW) { - pr_info("DW%u: %02X%02X %02X%02X\n", i / BYTE_PER_DW, - *(info_curr + 3), *(info_curr + 2), *(info_curr + 1), *(info_curr)); - } -} - -static int qm_dump_sqc_raw(struct hisi_qm *qm, dma_addr_t dma_addr, u16 qp_id) -{ - return hisi_qm_mb(qm, QM_MB_CMD_SQC, dma_addr, qp_id, 1); -} - -static int qm_dump_cqc_raw(struct hisi_qm *qm, dma_addr_t dma_addr, u16 qp_id) -{ - return hisi_qm_mb(qm, QM_MB_CMD_CQC, dma_addr, qp_id, 1); -} - -static int qm_sqc_dump(struct hisi_qm *qm, const char *s) -{ - struct device *dev = &qm->pdev->dev; - struct qm_sqc *sqc, *sqc_curr; - dma_addr_t sqc_dma; - u32 qp_id; - int ret; - - if (!s) - return -EINVAL; - - ret = kstrtou32(s, 0, &qp_id); - if (ret || qp_id >= qm->qp_num) { - dev_err(dev, "Please input qp num (0-%u)", qm->qp_num - 1); - return -EINVAL; - } - - sqc = qm_ctx_alloc(qm, sizeof(*sqc), &sqc_dma); - if (IS_ERR(sqc)) - return PTR_ERR(sqc); - - ret = qm_dump_sqc_raw(qm, sqc_dma, qp_id); - if (ret) { - down_read(&qm->qps_lock); - if (qm->sqc) { - sqc_curr = qm->sqc + qp_id; - - dump_show(qm, sqc_curr, sizeof(*sqc), "SOFT SQC"); - } - up_read(&qm->qps_lock); - - goto free_ctx; - } - - dump_show(qm, sqc, sizeof(*sqc), "SQC"); - -free_ctx: - qm_ctx_free(qm, sizeof(*sqc), sqc, &sqc_dma); - return 0; -} - -static int qm_cqc_dump(struct hisi_qm *qm, const char *s) -{ - struct device *dev = &qm->pdev->dev; - struct qm_cqc *cqc, *cqc_curr; - dma_addr_t cqc_dma; - u32 qp_id; - int ret; - - if (!s) - return -EINVAL; - - ret = kstrtou32(s, 0, &qp_id); - if (ret || qp_id >= qm->qp_num) { - dev_err(dev, "Please input qp num (0-%u)", qm->qp_num - 1); - return -EINVAL; - } - - cqc = qm_ctx_alloc(qm, sizeof(*cqc), &cqc_dma); - if (IS_ERR(cqc)) - return PTR_ERR(cqc); - - ret = qm_dump_cqc_raw(qm, cqc_dma, qp_id); - if (ret) { - down_read(&qm->qps_lock); - if (qm->cqc) { - cqc_curr = qm->cqc + qp_id; - - dump_show(qm, cqc_curr, sizeof(*cqc), "SOFT CQC"); - } - up_read(&qm->qps_lock); - - goto free_ctx; - } - - dump_show(qm, cqc, sizeof(*cqc), "CQC"); - -free_ctx: - qm_ctx_free(qm, sizeof(*cqc), cqc, &cqc_dma); - return 0; -} - -static int qm_eqc_aeqc_dump(struct hisi_qm *qm, char *s, size_t size, - int cmd, char *name) -{ - struct device *dev = &qm->pdev->dev; - dma_addr_t xeqc_dma; - void *xeqc; - int ret; - - if (strsep(&s, " ")) { - dev_err(dev, "Please do not input extra characters!\n"); - return -EINVAL; - } - - xeqc = qm_ctx_alloc(qm, size, &xeqc_dma); - if (IS_ERR(xeqc)) - return PTR_ERR(xeqc); - - ret = hisi_qm_mb(qm, cmd, xeqc_dma, 0, 1); - if (ret) - goto err_free_ctx; - - dump_show(qm, xeqc, size, name); - -err_free_ctx: - qm_ctx_free(qm, size, xeqc, &xeqc_dma); - return ret; -} - -static int q_dump_param_parse(struct hisi_qm *qm, char *s, - u32 *e_id, u32 *q_id, u16 q_depth) -{ - struct device *dev = &qm->pdev->dev; - unsigned int qp_num = qm->qp_num; - char *presult; - int ret; - - presult = strsep(&s, " "); - if (!presult) { - dev_err(dev, "Please input qp number!\n"); - return -EINVAL; - } - - ret = kstrtou32(presult, 0, q_id); - if (ret || *q_id >= qp_num) { - dev_err(dev, "Please input qp num (0-%u)", qp_num - 1); - return -EINVAL; - } - - presult = strsep(&s, " "); - if (!presult) { - dev_err(dev, "Please input sqe number!\n"); - return -EINVAL; - } - - ret = kstrtou32(presult, 0, e_id); - if (ret || *e_id >= q_depth) { - dev_err(dev, "Please input sqe num (0-%u)", q_depth - 1); - return -EINVAL; - } - - if (strsep(&s, " ")) { - dev_err(dev, "Please do not input extra characters!\n"); - return -EINVAL; - } - - return 0; -} - -static int qm_sq_dump(struct hisi_qm *qm, char *s) -{ - u16 sq_depth = qm->qp_array->cq_depth; - void *sqe, *sqe_curr; - struct hisi_qp *qp; - u32 qp_id, sqe_id; - int ret; - - ret = q_dump_param_parse(qm, s, &sqe_id, &qp_id, sq_depth); - if (ret) - return ret; - - sqe = kzalloc(qm->sqe_size * sq_depth, GFP_KERNEL); - if (!sqe) - return -ENOMEM; - - qp = &qm->qp_array[qp_id]; - memcpy(sqe, qp->sqe, qm->sqe_size * sq_depth); - sqe_curr = sqe + (u32)(sqe_id * qm->sqe_size); - memset(sqe_curr + qm->debug.sqe_mask_offset, QM_SQE_ADDR_MASK, - qm->debug.sqe_mask_len); - - dump_show(qm, sqe_curr, qm->sqe_size, "SQE"); - - kfree(sqe); - - return 0; -} - -static int qm_cq_dump(struct hisi_qm *qm, char *s) -{ - struct qm_cqe *cqe_curr; - struct hisi_qp *qp; - u32 qp_id, cqe_id; - int ret; - - ret = q_dump_param_parse(qm, s, &cqe_id, &qp_id, qm->qp_array->cq_depth); - if (ret) - return ret; - - qp = &qm->qp_array[qp_id]; - cqe_curr = qp->cqe + cqe_id; - dump_show(qm, cqe_curr, sizeof(struct qm_cqe), "CQE"); + (factor->cir_u << QM_SHAPER_FACTOR_CIR_U_SHIFT) | + (factor->cir_s << QM_SHAPER_FACTOR_CIR_S_SHIFT) | + (QM_SHAPER_CBS_B << QM_SHAPER_FACTOR_CBS_B_SHIFT) | + (factor->cbs_s << QM_SHAPER_FACTOR_CBS_S_SHIFT); + } + break; + } + } - return 0; + writel(lower_32_bits(tmp), qm->io_base + QM_VFT_CFG_DATA_L); + writel(upper_32_bits(tmp), qm->io_base + QM_VFT_CFG_DATA_H); } -static int qm_eq_aeq_dump(struct hisi_qm *qm, const char *s, - size_t size, char *name) +static int qm_set_vft_common(struct hisi_qm *qm, enum vft_type type, + u32 fun_num, u32 base, u32 number) { - struct device *dev = &qm->pdev->dev; - void *xeqe; - u32 xeqe_id; + struct qm_shaper_factor *factor = NULL; + unsigned int val; int ret; - if (!s) - return -EINVAL; + if (type == SHAPER_VFT && test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps)) + factor = &qm->factor[fun_num]; - ret = kstrtou32(s, 0, &xeqe_id); + ret = readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val, + val & BIT(0), POLL_PERIOD, + POLL_TIMEOUT); if (ret) - return -EINVAL; + return ret; - if (!strcmp(name, "EQE") && xeqe_id >= qm->eq_depth) { - dev_err(dev, "Please input eqe num (0-%u)", qm->eq_depth - 1); - return -EINVAL; - } else if (!strcmp(name, "AEQE") && xeqe_id >= qm->aeq_depth) { - dev_err(dev, "Please input aeqe num (0-%u)", qm->eq_depth - 1); - return -EINVAL; - } + writel(0x0, qm->io_base + QM_VFT_CFG_OP_WR); + writel(type, qm->io_base + QM_VFT_CFG_TYPE); + if (type == SHAPER_VFT) + fun_num |= base << QM_SHAPER_VFT_OFFSET; - down_read(&qm->qps_lock); + writel(fun_num, qm->io_base + QM_VFT_CFG); - if (qm->eqe && !strcmp(name, "EQE")) { - xeqe = qm->eqe + xeqe_id; - } else if (qm->aeqe && !strcmp(name, "AEQE")) { - xeqe = qm->aeqe + xeqe_id; - } else { - ret = -EINVAL; - goto err_unlock; - } + qm_vft_data_cfg(qm, type, base, number, factor); - dump_show(qm, xeqe, size, name); + writel(0x0, qm->io_base + QM_VFT_CFG_RDY); + writel(0x1, qm->io_base + QM_VFT_CFG_OP_ENABLE); -err_unlock: - up_read(&qm->qps_lock); - return ret; + return readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val, + val & BIT(0), POLL_PERIOD, + POLL_TIMEOUT); } -static int qm_dbg_help(struct hisi_qm *qm, char *s) +static int qm_shaper_init_vft(struct hisi_qm *qm, u32 fun_num) { - struct device *dev = &qm->pdev->dev; + u32 qos = qm->factor[fun_num].func_qos; + int ret, i; - if (strsep(&s, " ")) { - dev_err(dev, "Please do not input extra characters!\n"); - return -EINVAL; + ret = qm_get_shaper_para(qos * QM_QOS_RATE, &qm->factor[fun_num]); + if (ret) { + dev_err(&qm->pdev->dev, "failed to calculate shaper parameter!\n"); + return ret; + } + writel(qm->type_rate, qm->io_base + QM_SHAPER_CFG); + for (i = ALG_TYPE_0; i <= ALG_TYPE_1; i++) { + /* The base number of queue reuse for different alg type */ + ret = qm_set_vft_common(qm, SHAPER_VFT, fun_num, i, 1); + if (ret) + return ret; } - - dev_info(dev, "available commands:\n"); - dev_info(dev, "sqc \n"); - dev_info(dev, "cqc \n"); - dev_info(dev, "eqc\n"); - dev_info(dev, "aeqc\n"); - dev_info(dev, "sq \n"); - dev_info(dev, "cq \n"); - dev_info(dev, "eq \n"); - dev_info(dev, "aeq \n"); return 0; } -static int qm_cmd_write_dump(struct hisi_qm *qm, const char *cmd_buf) +/* The config should be conducted after qm_dev_mem_reset() */ +static int qm_set_sqc_cqc_vft(struct hisi_qm *qm, u32 fun_num, u32 base, + u32 number) { - struct device *dev = &qm->pdev->dev; - char *presult, *s, *s_tmp; - int ret; - - s = kstrdup(cmd_buf, GFP_KERNEL); - if (!s) - return -ENOMEM; + int ret, i; - s_tmp = s; - presult = strsep(&s, " "); - if (!presult) { - ret = -EINVAL; - goto err_buffer_free; - } - - if (!strcmp(presult, "sqc")) - ret = qm_sqc_dump(qm, s); - else if (!strcmp(presult, "cqc")) - ret = qm_cqc_dump(qm, s); - else if (!strcmp(presult, "eqc")) - ret = qm_eqc_aeqc_dump(qm, s, sizeof(struct qm_eqc), - QM_MB_CMD_EQC, "EQC"); - else if (!strcmp(presult, "aeqc")) - ret = qm_eqc_aeqc_dump(qm, s, sizeof(struct qm_aeqc), - QM_MB_CMD_AEQC, "AEQC"); - else if (!strcmp(presult, "sq")) - ret = qm_sq_dump(qm, s); - else if (!strcmp(presult, "cq")) - ret = qm_cq_dump(qm, s); - else if (!strcmp(presult, "eq")) - ret = qm_eq_aeq_dump(qm, s, sizeof(struct qm_eqe), "EQE"); - else if (!strcmp(presult, "aeq")) - ret = qm_eq_aeq_dump(qm, s, sizeof(struct qm_aeqe), "AEQE"); - else if (!strcmp(presult, "help")) - ret = qm_dbg_help(qm, s); - else - ret = -EINVAL; + for (i = SQC_VFT; i <= CQC_VFT; i++) { + ret = qm_set_vft_common(qm, i, fun_num, base, number); + if (ret) + return ret; + } - if (ret) - dev_info(dev, "Please echo help\n"); + /* init default shaper qos val */ + if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps)) { + ret = qm_shaper_init_vft(qm, fun_num); + if (ret) + goto back_sqc_cqc; + } -err_buffer_free: - kfree(s_tmp); + return 0; +back_sqc_cqc: + for (i = SQC_VFT; i <= CQC_VFT; i++) + qm_set_vft_common(qm, i, fun_num, 0, 0); return ret; } -static ssize_t qm_cmd_write(struct file *filp, const char __user *buffer, - size_t count, loff_t *pos) +static int qm_get_vft_v2(struct hisi_qm *qm, u32 *base, u32 *number) { - struct hisi_qm *qm = filp->private_data; - char *cmd_buf, *cmd_buf_tmp; + u64 sqc_vft; int ret; - if (*pos) - return 0; - - ret = hisi_qm_get_dfx_access(qm); + ret = hisi_qm_mb(qm, QM_MB_CMD_SQC_VFT_V2, 0, 0, 1); if (ret) return ret; - /* Judge if the instance is being reset. */ - if (unlikely(atomic_read(&qm->status.flags) == QM_STOP)) { - ret = 0; - goto put_dfx_access; - } + sqc_vft = readl(qm->io_base + QM_MB_CMD_DATA_ADDR_L) | + ((u64)readl(qm->io_base + QM_MB_CMD_DATA_ADDR_H) << 32); + *base = QM_SQC_VFT_BASE_MASK_V2 & (sqc_vft >> QM_SQC_VFT_BASE_SHIFT_V2); + *number = (QM_SQC_VFT_NUM_MASK_v2 & + (sqc_vft >> QM_SQC_VFT_NUM_SHIFT_V2)) + 1; - if (count > QM_DBG_WRITE_LEN) { - ret = -ENOSPC; - goto put_dfx_access; - } + return 0; +} - cmd_buf = memdup_user_nul(buffer, count); - if (IS_ERR(cmd_buf)) { - ret = PTR_ERR(cmd_buf); - goto put_dfx_access; - } +void *hisi_qm_ctx_alloc(struct hisi_qm *qm, size_t ctx_size, + dma_addr_t *dma_addr) +{ + struct device *dev = &qm->pdev->dev; + void *ctx_addr; - cmd_buf_tmp = strchr(cmd_buf, '\n'); - if (cmd_buf_tmp) { - *cmd_buf_tmp = '\0'; - count = cmd_buf_tmp - cmd_buf + 1; - } + ctx_addr = kzalloc(ctx_size, GFP_KERNEL); + if (!ctx_addr) + return ERR_PTR(-ENOMEM); - ret = qm_cmd_write_dump(qm, cmd_buf); - if (ret) { - kfree(cmd_buf); - goto put_dfx_access; + *dma_addr = dma_map_single(dev, ctx_addr, ctx_size, DMA_FROM_DEVICE); + if (dma_mapping_error(dev, *dma_addr)) { + dev_err(dev, "DMA mapping error!\n"); + kfree(ctx_addr); + return ERR_PTR(-ENOMEM); } - kfree(cmd_buf); + return ctx_addr; +} - ret = count; +void hisi_qm_ctx_free(struct hisi_qm *qm, size_t ctx_size, + const void *ctx_addr, dma_addr_t *dma_addr) +{ + struct device *dev = &qm->pdev->dev; -put_dfx_access: - hisi_qm_put_dfx_access(qm); - return ret; + dma_unmap_single(dev, *dma_addr, ctx_size, DMA_FROM_DEVICE); + kfree(ctx_addr); } -static const struct file_operations qm_cmd_fops = { - .owner = THIS_MODULE, - .open = simple_open, - .read = qm_cmd_read, - .write = qm_cmd_write, -}; - -static void qm_create_debugfs_file(struct hisi_qm *qm, struct dentry *dir, - enum qm_debug_file index) +static int qm_dump_sqc_raw(struct hisi_qm *qm, dma_addr_t dma_addr, u16 qp_id) { - struct debugfs_file *file = qm->debug.files + index; - - debugfs_create_file(qm_debug_file_name[index], 0600, dir, file, - &qm_debug_fops); + return hisi_qm_mb(qm, QM_MB_CMD_SQC, dma_addr, qp_id, 1); +} - file->index = index; - mutex_init(&file->lock); - file->debug = &qm->debug; +static int qm_dump_cqc_raw(struct hisi_qm *qm, dma_addr_t dma_addr, u16 qp_id) +{ + return hisi_qm_mb(qm, QM_MB_CMD_CQC, dma_addr, qp_id, 1); } static void qm_hw_error_init_v1(struct hisi_qm *qm) @@ -3101,7 +2130,7 @@ static int qm_drain_qp(struct hisi_qp *qp) return ret; } - addr = qm_ctx_alloc(qm, size, &dma_addr); + addr = hisi_qm_ctx_alloc(qm, size, &dma_addr); if (IS_ERR(addr)) { dev_err(dev, "Failed to alloc ctx for sqc and cqc!\n"); return -ENOMEM; @@ -3136,7 +2165,7 @@ static int qm_drain_qp(struct hisi_qp *qp) usleep_range(WAIT_PERIOD_US_MIN, WAIT_PERIOD_US_MAX); } - qm_ctx_free(qm, size, addr, &dma_addr); + hisi_qm_ctx_free(qm, size, addr, &dma_addr); return ret; } @@ -3721,17 +2750,6 @@ static void hisi_qm_set_state(struct hisi_qm *qm, u8 state) writel(state, qm->io_base + QM_VF_STATE); } -static void qm_last_regs_uninit(struct hisi_qm *qm) -{ - struct qm_debug *debug = &qm->debug; - - if (qm->fun_type == QM_HW_VF || !debug->qm_last_words) - return; - - kfree(debug->qm_last_words); - debug->qm_last_words = NULL; -} - static void hisi_qm_unint_work(struct hisi_qm *qm) { destroy_workqueue(qm->wq); @@ -3762,8 +2780,6 @@ static void hisi_qm_memory_uninit(struct hisi_qm *qm) */ void hisi_qm_uninit(struct hisi_qm *qm) { - qm_last_regs_uninit(qm); - qm_cmd_uninit(qm); hisi_qm_unint_work(qm); down_write(&qm->qps_lock); @@ -4132,45 +3148,6 @@ err_unlock: } EXPORT_SYMBOL_GPL(hisi_qm_stop); -static ssize_t qm_status_read(struct file *filp, char __user *buffer, - size_t count, loff_t *pos) -{ - struct hisi_qm *qm = filp->private_data; - char buf[QM_DBG_READ_LEN]; - int val, len; - - val = atomic_read(&qm->status.flags); - len = scnprintf(buf, QM_DBG_READ_LEN, "%s\n", qm_s[val]); - - return simple_read_from_buffer(buffer, count, pos, buf, len); -} - -static const struct file_operations qm_status_fops = { - .owner = THIS_MODULE, - .open = simple_open, - .read = qm_status_read, -}; - -static int qm_debugfs_atomic64_set(void *data, u64 val) -{ - if (val) - return -EINVAL; - - atomic64_set((atomic64_t *)data, 0); - - return 0; -} - -static int qm_debugfs_atomic64_get(void *data, u64 *val) -{ - *val = atomic64_read((atomic64_t *)data); - - return 0; -} - -DEFINE_DEBUGFS_ATTRIBUTE(qm_atomic64_ops, qm_debugfs_atomic64_get, - qm_debugfs_atomic64_set, "%llu\n"); - static void qm_hw_error_init(struct hisi_qm *qm) { if (!qm->ops->hw_error_init) { @@ -4277,16 +3254,14 @@ static int hisi_qm_sort_devices(int node, struct list_head *head, struct hisi_qm *qm; struct list_head *n; struct device *dev; - int dev_node = 0; + int dev_node; list_for_each_entry(qm, &qm_list->list, list) { dev = &qm->pdev->dev; - if (IS_ENABLED(CONFIG_NUMA)) { - dev_node = dev_to_node(dev); - if (dev_node < 0) - dev_node = 0; - } + dev_node = dev_to_node(dev); + if (dev_node < 0) + dev_node = 0; res = kzalloc(sizeof(*res), GFP_KERNEL); if (!res) @@ -4592,49 +3567,36 @@ err_put_dfx_access: return ret; } -static ssize_t qm_qos_value_init(const char *buf, unsigned long *val) -{ - int buflen = strlen(buf); - int ret, i; - - for (i = 0; i < buflen; i++) { - if (!isdigit(buf[i])) - return -EINVAL; - } - - ret = sscanf(buf, "%lu", val); - if (ret != QM_QOS_VAL_NUM) - return -EINVAL; - - return 0; -} - static ssize_t qm_get_qos_value(struct hisi_qm *qm, const char *buf, unsigned long *val, unsigned int *fun_index) { + struct bus_type *bus_type = qm->pdev->dev.bus; char tbuf_bdf[QM_DBG_READ_LEN] = {0}; - char val_buf[QM_QOS_VAL_MAX_LEN] = {0}; - u32 tmp1, device, function; - int ret, bus; + char val_buf[QM_DBG_READ_LEN] = {0}; + struct pci_dev *pdev; + struct device *dev; + int ret; ret = sscanf(buf, "%s %s", tbuf_bdf, val_buf); if (ret != QM_QOS_PARAM_NUM) return -EINVAL; - ret = qm_qos_value_init(val_buf, val); + ret = kstrtoul(val_buf, 10, val); if (ret || *val == 0 || *val > QM_QOS_MAX_VAL) { pci_err(qm->pdev, "input qos value is error, please set 1~1000!\n"); return -EINVAL; } - ret = sscanf(tbuf_bdf, "%u:%x:%u.%u", &tmp1, &bus, &device, &function); - if (ret != QM_QOS_BDF_PARAM_NUM) { - pci_err(qm->pdev, "input pci bdf value is error!\n"); - return -EINVAL; + dev = bus_find_device_by_name(bus_type, NULL, tbuf_bdf); + if (!dev) { + pci_err(qm->pdev, "input pci bdf number is error!\n"); + return -ENODEV; } - *fun_index = PCI_DEVFN(device, function); + pdev = container_of(dev, struct pci_dev, dev); + + *fun_index = pdev->devfn; return 0; } @@ -4648,9 +3610,6 @@ static ssize_t qm_algqos_write(struct file *filp, const char __user *buf, unsigned long val; int len, ret; - if (qm->fun_type == QM_HW_VF) - return -EINVAL; - if (*pos != 0) return 0; @@ -4709,7 +3668,7 @@ static const struct file_operations qm_algqos_fops = { * * Create function qos debugfs files, VF ping PF to get function qos. */ -static void hisi_qm_set_algqos_init(struct hisi_qm *qm) +void hisi_qm_set_algqos_init(struct hisi_qm *qm) { if (qm->fun_type == QM_HW_PF) debugfs_create_file("alg_qos", 0644, qm->debug.debug_root, @@ -4719,88 +3678,6 @@ static void hisi_qm_set_algqos_init(struct hisi_qm *qm) qm, &qm_algqos_fops); } -/** - * hisi_qm_debug_init() - Initialize qm related debugfs files. - * @qm: The qm for which we want to add debugfs files. - * - * Create qm related debugfs files. - */ -void hisi_qm_debug_init(struct hisi_qm *qm) -{ - struct dfx_diff_registers *qm_regs = qm->debug.qm_diff_regs; - struct qm_dfx *dfx = &qm->debug.dfx; - struct dentry *qm_d; - void *data; - int i; - - qm_d = debugfs_create_dir("qm", qm->debug.debug_root); - qm->debug.qm_d = qm_d; - - /* only show this in PF */ - if (qm->fun_type == QM_HW_PF) { - qm_create_debugfs_file(qm, qm->debug.debug_root, CURRENT_QM); - for (i = CURRENT_Q; i < DEBUG_FILE_NUM; i++) - qm_create_debugfs_file(qm, qm->debug.qm_d, i); - } - - if (qm_regs) - debugfs_create_file("diff_regs", 0444, qm->debug.qm_d, - qm, &qm_diff_regs_fops); - - debugfs_create_file("regs", 0444, qm->debug.qm_d, qm, &qm_regs_fops); - - debugfs_create_file("cmd", 0600, qm->debug.qm_d, qm, &qm_cmd_fops); - - debugfs_create_file("status", 0444, qm->debug.qm_d, qm, - &qm_status_fops); - for (i = 0; i < ARRAY_SIZE(qm_dfx_files); i++) { - data = (atomic64_t *)((uintptr_t)dfx + qm_dfx_files[i].offset); - debugfs_create_file(qm_dfx_files[i].name, - 0644, - qm_d, - data, - &qm_atomic64_ops); - } - - if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps)) - hisi_qm_set_algqos_init(qm); -} -EXPORT_SYMBOL_GPL(hisi_qm_debug_init); - -/** - * hisi_qm_debug_regs_clear() - clear qm debug related registers. - * @qm: The qm for which we want to clear its debug registers. - */ -void hisi_qm_debug_regs_clear(struct hisi_qm *qm) -{ - const struct debugfs_reg32 *regs; - int i; - - /* clear current_qm */ - writel(0x0, qm->io_base + QM_DFX_MB_CNT_VF); - writel(0x0, qm->io_base + QM_DFX_DB_CNT_VF); - - /* clear current_q */ - writel(0x0, qm->io_base + QM_DFX_SQE_CNT_VF_SQN); - writel(0x0, qm->io_base + QM_DFX_CQE_CNT_VF_CQN); - - /* - * these registers are reading and clearing, so clear them after - * reading them. - */ - writel(0x1, qm->io_base + QM_DFX_CNT_CLR_CE); - - regs = qm_dfx_regs; - for (i = 0; i < CNT_CYC_REGS_NUM; i++) { - readl(qm->io_base + regs->offset); - regs++; - } - - /* clear clear_enable */ - writel(0x0, qm->io_base + QM_DFX_CNT_CLR_CE); -} -EXPORT_SYMBOL_GPL(hisi_qm_debug_regs_clear); - static void hisi_qm_init_vf_qos(struct hisi_qm *qm, int total_func) { int i; @@ -5439,24 +4316,6 @@ static int qm_controller_reset_done(struct hisi_qm *qm) return 0; } -static void qm_show_last_dfx_regs(struct hisi_qm *qm) -{ - struct qm_debug *debug = &qm->debug; - struct pci_dev *pdev = qm->pdev; - u32 val; - int i; - - if (qm->fun_type == QM_HW_VF || !debug->qm_last_words) - return; - - for (i = 0; i < ARRAY_SIZE(qm_dfx_regs); i++) { - val = readl_relaxed(qm->io_base + qm_dfx_regs[i].offset); - if (debug->qm_last_words[i] != val) - pci_info(pdev, "%s \t= 0x%08x => 0x%08x\n", - qm_dfx_regs[i].name, debug->qm_last_words[i], val); - } -} - static int qm_controller_reset(struct hisi_qm *qm) { struct pci_dev *pdev = qm->pdev; @@ -5472,7 +4331,7 @@ static int qm_controller_reset(struct hisi_qm *qm) return ret; } - qm_show_last_dfx_regs(qm); + hisi_qm_show_last_dfx_regs(qm); if (qm->err_ini->show_last_dfx_regs) qm->err_ini->show_last_dfx_regs(qm); @@ -5725,6 +4584,7 @@ static void qm_pf_reset_vf_done(struct hisi_qm *qm) cmd = QM_VF_START_FAIL; } + qm_cmd_init(qm); ret = qm_ping_pf(qm, cmd); if (ret) dev_warn(&pdev->dev, "PF responds timeout in reset done!\n"); @@ -5786,7 +4646,6 @@ static void qm_pf_reset_vf_process(struct hisi_qm *qm, goto err_get_status; qm_pf_reset_vf_done(qm); - qm_cmd_init(qm); dev_info(dev, "device reset done.\n"); @@ -6359,26 +5218,6 @@ err_destroy_idr: return ret; } -static void qm_last_regs_init(struct hisi_qm *qm) -{ - int dfx_regs_num = ARRAY_SIZE(qm_dfx_regs); - struct qm_debug *debug = &qm->debug; - int i; - - if (qm->fun_type == QM_HW_VF) - return; - - debug->qm_last_words = kcalloc(dfx_regs_num, sizeof(unsigned int), - GFP_KERNEL); - if (!debug->qm_last_words) - return; - - for (i = 0; i < dfx_regs_num; i++) { - debug->qm_last_words[i] = readl_relaxed(qm->io_base + - qm_dfx_regs[i].offset); - } -} - /** * hisi_qm_init() - Initialize configures about qm. * @qm: The qm needing init. @@ -6427,8 +5266,6 @@ int hisi_qm_init(struct hisi_qm *qm) qm_cmd_init(qm); atomic_set(&qm->status.flags, QM_INIT); - qm_last_regs_init(qm); - return 0; err_free_qm_memory: @@ -6631,8 +5468,14 @@ int hisi_qm_resume(struct device *dev) } ret = hisi_qm_start(qm); - if (ret) - pci_err(pdev, "failed to start qm(%d)\n", ret); + if (ret) { + if (qm_check_dev_error(qm)) { + pci_info(pdev, "failed to start qm due to device error, device will be reset!\n"); + return 0; + } + + pci_err(pdev, "failed to start qm(%d)!\n", ret); + } return ret; } diff --git a/drivers/crypto/hisilicon/qm_common.h b/drivers/crypto/hisilicon/qm_common.h new file mode 100644 index 0000000000000000000000000000000000000000..1406a422d4551714b6e9453616b1e0e68e7b8449 --- /dev/null +++ b/drivers/crypto/hisilicon/qm_common.h @@ -0,0 +1,87 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2022 HiSilicon Limited. */ +#ifndef QM_COMMON_H +#define QM_COMMON_H + +#define QM_DBG_READ_LEN 256 +#define QM_RESETTING 2 + +struct qm_cqe { + __le32 rsvd0; + __le16 cmd_id; + __le16 rsvd1; + __le16 sq_head; + __le16 sq_num; + __le16 rsvd2; + __le16 w7; +}; + +struct qm_eqe { + __le32 dw0; +}; + +struct qm_aeqe { + __le32 dw0; +}; + +struct qm_sqc { + __le16 head; + __le16 tail; + __le32 base_l; + __le32 base_h; + __le32 dw3; + __le16 w8; + __le16 rsvd0; + __le16 pasid; + __le16 w11; + __le16 cq_num; + __le16 w13; + __le32 rsvd1; +}; + +struct qm_cqc { + __le16 head; + __le16 tail; + __le32 base_l; + __le32 base_h; + __le32 dw3; + __le16 w8; + __le16 rsvd0; + __le16 pasid; + __le16 w11; + __le32 dw6; + __le32 rsvd1; +}; + +struct qm_eqc { + __le16 head; + __le16 tail; + __le32 base_l; + __le32 base_h; + __le32 dw3; + __le32 rsvd[2]; + __le32 dw6; +}; + +struct qm_aeqc { + __le16 head; + __le16 tail; + __le32 base_l; + __le32 base_h; + __le32 dw3; + __le32 rsvd[2]; + __le32 dw6; +}; + +static const char * const qm_s[] = { + "init", "start", "close", "stop", +}; + +void *hisi_qm_ctx_alloc(struct hisi_qm *qm, size_t ctx_size, + dma_addr_t *dma_addr); +void hisi_qm_ctx_free(struct hisi_qm *qm, size_t ctx_size, + const void *ctx_addr, dma_addr_t *dma_addr); +void hisi_qm_show_last_dfx_regs(struct hisi_qm *qm); +void hisi_qm_set_algqos_init(struct hisi_qm *qm); + +#endif diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c index 84ae8ddd1a131b658594ff70d3c2043ce78ac8cb..f5bfc9755a4a3eaec843589277584afcb6be8a4f 100644 --- a/drivers/crypto/hisilicon/sec2/sec_crypto.c +++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c @@ -283,7 +283,6 @@ static int sec_bd_send(struct sec_ctx *ctx, struct sec_req *req) spin_lock_bh(&qp_ctx->req_lock); ret = hisi_qp_send(qp_ctx->qp, &req->sec_sqe); - if (ctx->fake_req_limit <= atomic_read(&qp_ctx->qp->qp_status.used) && !ret) { list_add_tail(&req->backlog_head, &qp_ctx->backlog); @@ -2009,7 +2008,7 @@ static int sec_aead_sha512_ctx_init(struct crypto_aead *tfm) return sec_aead_ctx_init(tfm, "sha512"); } -static int sec_skcipher_cryptlen_ckeck(struct sec_ctx *ctx, +static int sec_skcipher_cryptlen_check(struct sec_ctx *ctx, struct sec_req *sreq) { u32 cryptlen = sreq->c_req.sk_req->cryptlen; @@ -2071,7 +2070,7 @@ static int sec_skcipher_param_check(struct sec_ctx *ctx, struct sec_req *sreq) } return 0; } else if (c_alg == SEC_CALG_AES || c_alg == SEC_CALG_SM4) { - return sec_skcipher_cryptlen_ckeck(ctx, sreq); + return sec_skcipher_cryptlen_check(ctx, sreq); } dev_err(dev, "skcipher algorithm error!\n"); diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c index 3705412bac5f19a1cf11cbbffa3a8b2efa647257..93572c0d4faa33b8f6065ff94eee6e23038fd29c 100644 --- a/drivers/crypto/hisilicon/sec2/sec_main.c +++ b/drivers/crypto/hisilicon/sec2/sec_main.c @@ -55,7 +55,7 @@ #define SEC_CONTROL_REG 0x301200 #define SEC_DYNAMIC_GATE_REG 0x30121c #define SEC_CORE_AUTO_GATE 0x30212c -#define SEC_DYNAMIC_GATE_EN 0x7bff +#define SEC_DYNAMIC_GATE_EN 0x7fff #define SEC_CORE_AUTO_GATE_EN GENMASK(3, 0) #define SEC_CLK_GATE_ENABLE BIT(3) #define SEC_CLK_GATE_DISABLE (~BIT(3)) @@ -427,7 +427,6 @@ static void sec_set_endian(struct hisi_qm *qm) if (!IS_ENABLED(CONFIG_64BIT)) reg |= BIT(1); - if (!IS_ENABLED(CONFIG_CPU_LITTLE_ENDIAN)) reg |= BIT(0); @@ -899,8 +898,7 @@ static int sec_debugfs_init(struct hisi_qm *qm) qm->debug.sqe_mask_offset = SEC_SQE_MASK_OFFSET; qm->debug.sqe_mask_len = SEC_SQE_MASK_LEN; - ret = hisi_qm_diff_regs_init(qm, sec_diff_regs, - ARRAY_SIZE(sec_diff_regs)); + ret = hisi_qm_regs_debugfs_init(qm, sec_diff_regs, ARRAY_SIZE(sec_diff_regs)); if (ret) { dev_warn(dev, "Failed to init SEC diff regs!\n"); goto debugfs_remove; @@ -915,7 +913,7 @@ static int sec_debugfs_init(struct hisi_qm *qm) return 0; failed_to_create: - hisi_qm_diff_regs_uninit(qm, ARRAY_SIZE(sec_diff_regs)); + hisi_qm_regs_debugfs_uninit(qm, ARRAY_SIZE(sec_diff_regs)); debugfs_remove: debugfs_remove_recursive(sec_debugfs_root); return ret; @@ -923,7 +921,7 @@ debugfs_remove: static void sec_debugfs_exit(struct hisi_qm *qm) { - hisi_qm_diff_regs_uninit(qm, ARRAY_SIZE(sec_diff_regs)); + hisi_qm_regs_debugfs_uninit(qm, ARRAY_SIZE(sec_diff_regs)); debugfs_remove_recursive(qm->debug.debug_root); } diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c index c863435e8c75aadc5c57c2d98900b251b6f99fe1..1549bec3aea59334a8202206ad595aea5166764c 100644 --- a/drivers/crypto/hisilicon/zip/zip_main.c +++ b/drivers/crypto/hisilicon/zip/zip_main.c @@ -849,8 +849,7 @@ static int hisi_zip_debugfs_init(struct hisi_qm *qm) qm->debug.sqe_mask_offset = HZIP_SQE_MASK_OFFSET; qm->debug.sqe_mask_len = HZIP_SQE_MASK_LEN; qm->debug.debug_root = dev_d; - ret = hisi_qm_diff_regs_init(qm, hzip_diff_regs, - ARRAY_SIZE(hzip_diff_regs)); + ret = hisi_qm_regs_debugfs_init(qm, hzip_diff_regs, ARRAY_SIZE(hzip_diff_regs)); if (ret) { dev_warn(dev, "Failed to init ZIP diff regs!\n"); goto debugfs_remove; @@ -869,7 +868,7 @@ static int hisi_zip_debugfs_init(struct hisi_qm *qm) return 0; failed_to_create: - hisi_qm_diff_regs_uninit(qm, ARRAY_SIZE(hzip_diff_regs)); + hisi_qm_regs_debugfs_uninit(qm, ARRAY_SIZE(hzip_diff_regs)); debugfs_remove: debugfs_remove_recursive(hzip_debugfs_root); return ret; @@ -895,7 +894,7 @@ static void hisi_zip_debug_regs_clear(struct hisi_qm *qm) static void hisi_zip_debugfs_exit(struct hisi_qm *qm) { - hisi_qm_diff_regs_uninit(qm, ARRAY_SIZE(hzip_diff_regs)); + hisi_qm_regs_debugfs_uninit(qm, ARRAY_SIZE(hzip_diff_regs)); debugfs_remove_recursive(qm->debug.debug_root); diff --git a/drivers/crypto/img-hash.c b/drivers/crypto/img-hash.c index d8e82d69745d8c95af1101c652a524e173edf540..9629e98bd68b7096af551610e63ca4fa2f139022 100644 --- a/drivers/crypto/img-hash.c +++ b/drivers/crypto/img-hash.c @@ -358,12 +358,16 @@ static int img_hash_dma_init(struct img_hash_dev *hdev) static void img_hash_dma_task(unsigned long d) { struct img_hash_dev *hdev = (struct img_hash_dev *)d; - struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req); + struct img_hash_request_ctx *ctx; u8 *addr; size_t nbytes, bleft, wsend, len, tbc; struct scatterlist tsg; - if (!hdev->req || !ctx->sg) + if (!hdev->req) + return; + + ctx = ahash_request_ctx(hdev->req); + if (!ctx->sg) return; addr = sg_virt(ctx->sg); diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c index ad0d8c4a71ac1b5801672071c60173998e6bdd37..ae6110376e21c690a2e0f065915585ab3a7a66c1 100644 --- a/drivers/crypto/inside-secure/safexcel.c +++ b/drivers/crypto/inside-secure/safexcel.c @@ -316,14 +316,20 @@ static void eip197_init_firmware(struct safexcel_crypto_priv *priv) static int eip197_write_firmware(struct safexcel_crypto_priv *priv, const struct firmware *fw) { - const __be32 *data = (const __be32 *)fw->data; + u32 val; int i; /* Write the firmware */ - for (i = 0; i < fw->size / sizeof(u32); i++) - writel(be32_to_cpu(data[i]), + for (i = 0; i < fw->size / sizeof(u32); i++) { + if (priv->data->fw_little_endian) + val = le32_to_cpu(((const __le32 *)fw->data)[i]); + else + val = be32_to_cpu(((const __be32 *)fw->data)[i]); + + writel(val, priv->base + EIP197_CLASSIFICATION_RAMS + - i * sizeof(__be32)); + i * sizeof(val)); + } /* Exclude final 2 NOPs from size */ return i - EIP197_FW_TERMINAL_NOPS; @@ -410,11 +416,13 @@ static int eip197_load_firmwares(struct safexcel_crypto_priv *priv) int i, j, ret = 0, pe; int ipuesz, ifppsz, minifw = 0; - if (priv->version == EIP197D_MRVL) + if (priv->data->version == EIP197D_MRVL) dir = "eip197d"; - else if (priv->version == EIP197B_MRVL || - priv->version == EIP197_DEVBRD) + else if (priv->data->version == EIP197B_MRVL || + priv->data->version == EIP197_DEVBRD) dir = "eip197b"; + else if (priv->data->version == EIP197C_MXL) + dir = "eip197c"; else return -ENODEV; @@ -423,7 +431,7 @@ retry_fw: snprintf(fw_path, 37, "inside-secure/%s/%s", dir, fw_name[i]); ret = firmware_request_nowarn(&fw[i], fw_path, priv->dev); if (ret) { - if (minifw || priv->version != EIP197B_MRVL) + if (minifw || priv->data->version != EIP197B_MRVL) goto release_fw; /* Fallback to the old firmware location for the @@ -1597,7 +1605,7 @@ static int safexcel_probe_generic(void *pdev, safexcel_configure(priv); - if (IS_ENABLED(CONFIG_PCI) && priv->version == EIP197_DEVBRD) { + if (IS_ENABLED(CONFIG_PCI) && priv->data->version == EIP197_DEVBRD) { /* * Request MSI vectors for global + 1 per ring - * or just 1 for older dev images @@ -1731,7 +1739,7 @@ static int safexcel_probe(struct platform_device *pdev) return -ENOMEM; priv->dev = dev; - priv->version = (enum safexcel_eip_version)of_device_get_match_data(dev); + priv->data = (struct safexcel_priv_data *)of_device_get_match_data(dev); platform_set_drvdata(pdev, priv); @@ -1806,27 +1814,52 @@ static int safexcel_remove(struct platform_device *pdev) return 0; } +static const struct safexcel_priv_data eip97ies_mrvl_data = { + .version = EIP97IES_MRVL, +}; + +static const struct safexcel_priv_data eip197b_mrvl_data = { + .version = EIP197B_MRVL, +}; + +static const struct safexcel_priv_data eip197d_mrvl_data = { + .version = EIP197D_MRVL, +}; + +static const struct safexcel_priv_data eip197_devbrd_data = { + .version = EIP197_DEVBRD, +}; + +static const struct safexcel_priv_data eip197c_mxl_data = { + .version = EIP197C_MXL, + .fw_little_endian = true, +}; + static const struct of_device_id safexcel_of_match_table[] = { { .compatible = "inside-secure,safexcel-eip97ies", - .data = (void *)EIP97IES_MRVL, + .data = &eip97ies_mrvl_data, }, { .compatible = "inside-secure,safexcel-eip197b", - .data = (void *)EIP197B_MRVL, + .data = &eip197b_mrvl_data, }, { .compatible = "inside-secure,safexcel-eip197d", - .data = (void *)EIP197D_MRVL, + .data = &eip197d_mrvl_data, + }, + { + .compatible = "inside-secure,safexcel-eip197c-mxl", + .data = &eip197c_mxl_data, }, /* For backward compatibility and intended for generic use */ { .compatible = "inside-secure,safexcel-eip97", - .data = (void *)EIP97IES_MRVL, + .data = &eip97ies_mrvl_data, }, { .compatible = "inside-secure,safexcel-eip197", - .data = (void *)EIP197B_MRVL, + .data = &eip197b_mrvl_data, }, {}, }; @@ -1862,7 +1895,7 @@ static int safexcel_pci_probe(struct pci_dev *pdev, return -ENOMEM; priv->dev = dev; - priv->version = (enum safexcel_eip_version)ent->driver_data; + priv->data = (struct safexcel_priv_data *)ent->driver_data; pci_set_drvdata(pdev, priv); @@ -1881,7 +1914,7 @@ static int safexcel_pci_probe(struct pci_dev *pdev, } priv->base = pcim_iomap_table(pdev)[0]; - if (priv->version == EIP197_DEVBRD) { + if (priv->data->version == EIP197_DEVBRD) { dev_dbg(dev, "Device identified as FPGA based development board - applying HW reset\n"); rc = pcim_iomap_regions(pdev, 4, "crypto_safexcel"); @@ -1949,7 +1982,7 @@ static const struct pci_device_id safexcel_pci_ids[] = { { PCI_DEVICE_SUB(PCI_VENDOR_ID_XILINX, 0x9038, 0x16ae, 0xc522), - .driver_data = EIP197_DEVBRD, + .driver_data = (kernel_ulong_t)&eip197_devbrd_data, }, {}, }; diff --git a/drivers/crypto/inside-secure/safexcel.h b/drivers/crypto/inside-secure/safexcel.h index 797ff91512e0d03f0b9a79b124a85c9d6fd11945..6c2fc662f64ffa727f4d2da1a4c74945d7932a54 100644 --- a/drivers/crypto/inside-secure/safexcel.h +++ b/drivers/crypto/inside-secure/safexcel.h @@ -730,7 +730,13 @@ enum safexcel_eip_version { EIP97IES_MRVL, EIP197B_MRVL, EIP197D_MRVL, - EIP197_DEVBRD + EIP197_DEVBRD, + EIP197C_MXL, +}; + +struct safexcel_priv_data { + enum safexcel_eip_version version; + bool fw_little_endian; }; /* Priority we use for advertising our algorithms */ @@ -815,7 +821,7 @@ struct safexcel_crypto_priv { struct clk *reg_clk; struct safexcel_config config; - enum safexcel_eip_version version; + struct safexcel_priv_data *data; struct safexcel_register_offsets offsets; struct safexcel_hwconfig hwconfig; u32 flags; diff --git a/drivers/crypto/inside-secure/safexcel_hash.c b/drivers/crypto/inside-secure/safexcel_hash.c index 103fc551d2af902262ef9fef45a5c95e87b92a21..ca46328472d4bc0041ba7d03f3cf10caa6f33bba 100644 --- a/drivers/crypto/inside-secure/safexcel_hash.c +++ b/drivers/crypto/inside-secure/safexcel_hash.c @@ -231,7 +231,7 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, struct safexcel_result_desc *rdesc; struct ahash_request *areq = ahash_request_cast(async); struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq); - struct safexcel_ahash_req *sreq = ahash_request_ctx(areq); + struct safexcel_ahash_req *sreq = ahash_request_ctx_dma(areq); struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(ahash); u64 cache_len; @@ -312,7 +312,7 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring, int *commands, int *results) { struct ahash_request *areq = ahash_request_cast(async); - struct safexcel_ahash_req *req = ahash_request_ctx(areq); + struct safexcel_ahash_req *req = ahash_request_ctx_dma(areq); struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq)); struct safexcel_crypto_priv *priv = ctx->base.priv; struct safexcel_command_desc *cdesc, *first_cdesc = NULL; @@ -569,7 +569,7 @@ static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring, bool *should_complete, int *ret) { struct ahash_request *areq = ahash_request_cast(async); - struct safexcel_ahash_req *req = ahash_request_ctx(areq); + struct safexcel_ahash_req *req = ahash_request_ctx_dma(areq); int err; BUG_ON(!(priv->flags & EIP197_TRC_CACHE) && req->needs_inv); @@ -608,7 +608,7 @@ static int safexcel_ahash_send(struct crypto_async_request *async, int ring, int *commands, int *results) { struct ahash_request *areq = ahash_request_cast(async); - struct safexcel_ahash_req *req = ahash_request_ctx(areq); + struct safexcel_ahash_req *req = ahash_request_ctx_dma(areq); int ret; if (req->needs_inv) @@ -624,7 +624,7 @@ static int safexcel_ahash_exit_inv(struct crypto_tfm *tfm) struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm); struct safexcel_crypto_priv *priv = ctx->base.priv; EIP197_REQUEST_ON_STACK(req, ahash, EIP197_AHASH_REQ_SIZE); - struct safexcel_ahash_req *rctx = ahash_request_ctx(req); + struct safexcel_ahash_req *rctx = ahash_request_ctx_dma(req); struct safexcel_inv_result result = {}; int ring = ctx->base.ring; @@ -663,7 +663,7 @@ static int safexcel_ahash_exit_inv(struct crypto_tfm *tfm) */ static int safexcel_ahash_cache(struct ahash_request *areq) { - struct safexcel_ahash_req *req = ahash_request_ctx(areq); + struct safexcel_ahash_req *req = ahash_request_ctx_dma(areq); u64 cache_len; /* cache_len: everything accepted by the driver but not sent yet, @@ -689,7 +689,7 @@ static int safexcel_ahash_cache(struct ahash_request *areq) static int safexcel_ahash_enqueue(struct ahash_request *areq) { struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq)); - struct safexcel_ahash_req *req = ahash_request_ctx(areq); + struct safexcel_ahash_req *req = ahash_request_ctx_dma(areq); struct safexcel_crypto_priv *priv = ctx->base.priv; int ret, ring; @@ -741,7 +741,7 @@ static int safexcel_ahash_enqueue(struct ahash_request *areq) static int safexcel_ahash_update(struct ahash_request *areq) { - struct safexcel_ahash_req *req = ahash_request_ctx(areq); + struct safexcel_ahash_req *req = ahash_request_ctx_dma(areq); int ret; /* If the request is 0 length, do nothing */ @@ -766,7 +766,7 @@ static int safexcel_ahash_update(struct ahash_request *areq) static int safexcel_ahash_final(struct ahash_request *areq) { - struct safexcel_ahash_req *req = ahash_request_ctx(areq); + struct safexcel_ahash_req *req = ahash_request_ctx_dma(areq); struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq)); req->finish = true; @@ -870,7 +870,7 @@ static int safexcel_ahash_final(struct ahash_request *areq) static int safexcel_ahash_finup(struct ahash_request *areq) { - struct safexcel_ahash_req *req = ahash_request_ctx(areq); + struct safexcel_ahash_req *req = ahash_request_ctx_dma(areq); req->finish = true; @@ -880,7 +880,7 @@ static int safexcel_ahash_finup(struct ahash_request *areq) static int safexcel_ahash_export(struct ahash_request *areq, void *out) { - struct safexcel_ahash_req *req = ahash_request_ctx(areq); + struct safexcel_ahash_req *req = ahash_request_ctx_dma(areq); struct safexcel_ahash_export_state *export = out; export->len = req->len; @@ -896,7 +896,7 @@ static int safexcel_ahash_export(struct ahash_request *areq, void *out) static int safexcel_ahash_import(struct ahash_request *areq, const void *in) { - struct safexcel_ahash_req *req = ahash_request_ctx(areq); + struct safexcel_ahash_req *req = ahash_request_ctx_dma(areq); const struct safexcel_ahash_export_state *export = in; int ret; @@ -927,15 +927,15 @@ static int safexcel_ahash_cra_init(struct crypto_tfm *tfm) ctx->base.handle_result = safexcel_handle_result; ctx->fb_do_setkey = false; - crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), - sizeof(struct safexcel_ahash_req)); + crypto_ahash_set_reqsize_dma(__crypto_ahash_cast(tfm), + sizeof(struct safexcel_ahash_req)); return 0; } static int safexcel_sha1_init(struct ahash_request *areq) { struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq)); - struct safexcel_ahash_req *req = ahash_request_ctx(areq); + struct safexcel_ahash_req *req = ahash_request_ctx_dma(areq); memset(req, 0, sizeof(*req)); @@ -1012,7 +1012,7 @@ struct safexcel_alg_template safexcel_alg_sha1 = { static int safexcel_hmac_sha1_init(struct ahash_request *areq) { struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq)); - struct safexcel_ahash_req *req = ahash_request_ctx(areq); + struct safexcel_ahash_req *req = ahash_request_ctx_dma(areq); memset(req, 0, sizeof(*req)); @@ -1124,7 +1124,7 @@ static int safexcel_hmac_init_iv(struct ahash_request *areq, if (ret) return ret; - req = ahash_request_ctx(areq); + req = ahash_request_ctx_dma(areq); req->hmac = true; req->last_req = true; @@ -1264,7 +1264,7 @@ struct safexcel_alg_template safexcel_alg_hmac_sha1 = { static int safexcel_sha256_init(struct ahash_request *areq) { struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq)); - struct safexcel_ahash_req *req = ahash_request_ctx(areq); + struct safexcel_ahash_req *req = ahash_request_ctx_dma(areq); memset(req, 0, sizeof(*req)); @@ -1321,7 +1321,7 @@ struct safexcel_alg_template safexcel_alg_sha256 = { static int safexcel_sha224_init(struct ahash_request *areq) { struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq)); - struct safexcel_ahash_req *req = ahash_request_ctx(areq); + struct safexcel_ahash_req *req = ahash_request_ctx_dma(areq); memset(req, 0, sizeof(*req)); @@ -1385,7 +1385,7 @@ static int safexcel_hmac_sha224_setkey(struct crypto_ahash *tfm, const u8 *key, static int safexcel_hmac_sha224_init(struct ahash_request *areq) { struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq)); - struct safexcel_ahash_req *req = ahash_request_ctx(areq); + struct safexcel_ahash_req *req = ahash_request_ctx_dma(areq); memset(req, 0, sizeof(*req)); @@ -1457,7 +1457,7 @@ static int safexcel_hmac_sha256_setkey(struct crypto_ahash *tfm, const u8 *key, static int safexcel_hmac_sha256_init(struct ahash_request *areq) { struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq)); - struct safexcel_ahash_req *req = ahash_request_ctx(areq); + struct safexcel_ahash_req *req = ahash_request_ctx_dma(areq); memset(req, 0, sizeof(*req)); @@ -1522,7 +1522,7 @@ struct safexcel_alg_template safexcel_alg_hmac_sha256 = { static int safexcel_sha512_init(struct ahash_request *areq) { struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq)); - struct safexcel_ahash_req *req = ahash_request_ctx(areq); + struct safexcel_ahash_req *req = ahash_request_ctx_dma(areq); memset(req, 0, sizeof(*req)); @@ -1579,7 +1579,7 @@ struct safexcel_alg_template safexcel_alg_sha512 = { static int safexcel_sha384_init(struct ahash_request *areq) { struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq)); - struct safexcel_ahash_req *req = ahash_request_ctx(areq); + struct safexcel_ahash_req *req = ahash_request_ctx_dma(areq); memset(req, 0, sizeof(*req)); @@ -1643,7 +1643,7 @@ static int safexcel_hmac_sha512_setkey(struct crypto_ahash *tfm, const u8 *key, static int safexcel_hmac_sha512_init(struct ahash_request *areq) { struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq)); - struct safexcel_ahash_req *req = ahash_request_ctx(areq); + struct safexcel_ahash_req *req = ahash_request_ctx_dma(areq); memset(req, 0, sizeof(*req)); @@ -1715,7 +1715,7 @@ static int safexcel_hmac_sha384_setkey(struct crypto_ahash *tfm, const u8 *key, static int safexcel_hmac_sha384_init(struct ahash_request *areq) { struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq)); - struct safexcel_ahash_req *req = ahash_request_ctx(areq); + struct safexcel_ahash_req *req = ahash_request_ctx_dma(areq); memset(req, 0, sizeof(*req)); @@ -1780,7 +1780,7 @@ struct safexcel_alg_template safexcel_alg_hmac_sha384 = { static int safexcel_md5_init(struct ahash_request *areq) { struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq)); - struct safexcel_ahash_req *req = ahash_request_ctx(areq); + struct safexcel_ahash_req *req = ahash_request_ctx_dma(areq); memset(req, 0, sizeof(*req)); @@ -1837,7 +1837,7 @@ struct safexcel_alg_template safexcel_alg_md5 = { static int safexcel_hmac_md5_init(struct ahash_request *areq) { struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq)); - struct safexcel_ahash_req *req = ahash_request_ctx(areq); + struct safexcel_ahash_req *req = ahash_request_ctx_dma(areq); memset(req, 0, sizeof(*req)); @@ -1920,7 +1920,7 @@ static int safexcel_crc32_cra_init(struct crypto_tfm *tfm) static int safexcel_crc32_init(struct ahash_request *areq) { struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq)); - struct safexcel_ahash_req *req = ahash_request_ctx(areq); + struct safexcel_ahash_req *req = ahash_request_ctx_dma(areq); memset(req, 0, sizeof(*req)); @@ -1992,7 +1992,7 @@ struct safexcel_alg_template safexcel_alg_crc32 = { static int safexcel_cbcmac_init(struct ahash_request *areq) { struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq)); - struct safexcel_ahash_req *req = ahash_request_ctx(areq); + struct safexcel_ahash_req *req = ahash_request_ctx_dma(areq); memset(req, 0, sizeof(*req)); @@ -2252,7 +2252,7 @@ struct safexcel_alg_template safexcel_alg_cmac = { static int safexcel_sm3_init(struct ahash_request *areq) { struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq)); - struct safexcel_ahash_req *req = ahash_request_ctx(areq); + struct safexcel_ahash_req *req = ahash_request_ctx_dma(areq); memset(req, 0, sizeof(*req)); @@ -2316,7 +2316,7 @@ static int safexcel_hmac_sm3_setkey(struct crypto_ahash *tfm, const u8 *key, static int safexcel_hmac_sm3_init(struct ahash_request *areq) { struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq)); - struct safexcel_ahash_req *req = ahash_request_ctx(areq); + struct safexcel_ahash_req *req = ahash_request_ctx_dma(areq); memset(req, 0, sizeof(*req)); @@ -2382,7 +2382,7 @@ static int safexcel_sha3_224_init(struct ahash_request *areq) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(tfm); - struct safexcel_ahash_req *req = ahash_request_ctx(areq); + struct safexcel_ahash_req *req = ahash_request_ctx_dma(areq); memset(req, 0, sizeof(*req)); @@ -2400,7 +2400,7 @@ static int safexcel_sha3_fbcheck(struct ahash_request *req) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(tfm); - struct ahash_request *subreq = ahash_request_ctx(req); + struct ahash_request *subreq = ahash_request_ctx_dma(req); int ret = 0; if (ctx->do_fallback) { @@ -2437,7 +2437,7 @@ static int safexcel_sha3_update(struct ahash_request *req) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(tfm); - struct ahash_request *subreq = ahash_request_ctx(req); + struct ahash_request *subreq = ahash_request_ctx_dma(req); ctx->do_fallback = true; return safexcel_sha3_fbcheck(req) ?: crypto_ahash_update(subreq); @@ -2447,7 +2447,7 @@ static int safexcel_sha3_final(struct ahash_request *req) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(tfm); - struct ahash_request *subreq = ahash_request_ctx(req); + struct ahash_request *subreq = ahash_request_ctx_dma(req); ctx->do_fallback = true; return safexcel_sha3_fbcheck(req) ?: crypto_ahash_final(subreq); @@ -2457,7 +2457,7 @@ static int safexcel_sha3_finup(struct ahash_request *req) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(tfm); - struct ahash_request *subreq = ahash_request_ctx(req); + struct ahash_request *subreq = ahash_request_ctx_dma(req); ctx->do_fallback |= !req->nbytes; if (ctx->do_fallback) @@ -2472,7 +2472,7 @@ static int safexcel_sha3_digest_fallback(struct ahash_request *req) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(tfm); - struct ahash_request *subreq = ahash_request_ctx(req); + struct ahash_request *subreq = ahash_request_ctx_dma(req); ctx->do_fallback = true; ctx->fb_init_done = false; @@ -2492,7 +2492,7 @@ static int safexcel_sha3_export(struct ahash_request *req, void *out) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(tfm); - struct ahash_request *subreq = ahash_request_ctx(req); + struct ahash_request *subreq = ahash_request_ctx_dma(req); ctx->do_fallback = true; return safexcel_sha3_fbcheck(req) ?: crypto_ahash_export(subreq, out); @@ -2502,7 +2502,7 @@ static int safexcel_sha3_import(struct ahash_request *req, const void *in) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(tfm); - struct ahash_request *subreq = ahash_request_ctx(req); + struct ahash_request *subreq = ahash_request_ctx_dma(req); ctx->do_fallback = true; return safexcel_sha3_fbcheck(req) ?: crypto_ahash_import(subreq, in); @@ -2526,9 +2526,10 @@ static int safexcel_sha3_cra_init(struct crypto_tfm *tfm) /* Update statesize from fallback algorithm! */ crypto_hash_alg_common(ahash)->statesize = crypto_ahash_statesize(ctx->fback); - crypto_ahash_set_reqsize(ahash, max(sizeof(struct safexcel_ahash_req), - sizeof(struct ahash_request) + - crypto_ahash_reqsize(ctx->fback))); + crypto_ahash_set_reqsize_dma( + ahash, max(sizeof(struct safexcel_ahash_req), + sizeof(struct ahash_request) + + crypto_ahash_reqsize(ctx->fback))); return 0; } @@ -2575,7 +2576,7 @@ static int safexcel_sha3_256_init(struct ahash_request *areq) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(tfm); - struct safexcel_ahash_req *req = ahash_request_ctx(areq); + struct safexcel_ahash_req *req = ahash_request_ctx_dma(areq); memset(req, 0, sizeof(*req)); @@ -2633,7 +2634,7 @@ static int safexcel_sha3_384_init(struct ahash_request *areq) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(tfm); - struct safexcel_ahash_req *req = ahash_request_ctx(areq); + struct safexcel_ahash_req *req = ahash_request_ctx_dma(areq); memset(req, 0, sizeof(*req)); @@ -2691,7 +2692,7 @@ static int safexcel_sha3_512_init(struct ahash_request *areq) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(tfm); - struct safexcel_ahash_req *req = ahash_request_ctx(areq); + struct safexcel_ahash_req *req = ahash_request_ctx_dma(areq); memset(req, 0, sizeof(*req)); @@ -2841,7 +2842,7 @@ static int safexcel_hmac_sha3_224_init(struct ahash_request *areq) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(tfm); - struct safexcel_ahash_req *req = ahash_request_ctx(areq); + struct safexcel_ahash_req *req = ahash_request_ctx_dma(areq); memset(req, 0, sizeof(*req)); @@ -2912,7 +2913,7 @@ static int safexcel_hmac_sha3_256_init(struct ahash_request *areq) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(tfm); - struct safexcel_ahash_req *req = ahash_request_ctx(areq); + struct safexcel_ahash_req *req = ahash_request_ctx_dma(areq); memset(req, 0, sizeof(*req)); @@ -2983,7 +2984,7 @@ static int safexcel_hmac_sha3_384_init(struct ahash_request *areq) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(tfm); - struct safexcel_ahash_req *req = ahash_request_ctx(areq); + struct safexcel_ahash_req *req = ahash_request_ctx_dma(areq); memset(req, 0, sizeof(*req)); @@ -3054,7 +3055,7 @@ static int safexcel_hmac_sha3_512_init(struct ahash_request *areq) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(tfm); - struct safexcel_ahash_req *req = ahash_request_ctx(areq); + struct safexcel_ahash_req *req = ahash_request_ctx_dma(areq); memset(req, 0, sizeof(*req)); diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c index d39a386b31ac7752ffb17b89efac49ea76063523..984b3cc0237ca5124816dfc98587f60bd4603989 100644 --- a/drivers/crypto/ixp4xx_crypto.c +++ b/drivers/crypto/ixp4xx_crypto.c @@ -420,7 +420,7 @@ static void one_packet(dma_addr_t phys) break; case CTL_FLAG_GEN_REVAES: ctx = crypto_tfm_ctx(crypt->data.tfm); - *(u32 *)ctx->decrypt.npe_ctx &= cpu_to_be32(~CIPH_ENCR); + *(__be32 *)ctx->decrypt.npe_ctx &= cpu_to_be32(~CIPH_ENCR); if (atomic_dec_and_test(&ctx->configuring)) complete(&ctx->completion); break; @@ -720,7 +720,7 @@ static int register_chain_var(struct crypto_tfm *tfm, u8 xpad, u32 target, crypt->init_len = init_len; crypt->ctl_flags |= CTL_FLAG_GEN_ICV; - buf->next = 0; + buf->next = NULL; buf->buf_len = HMAC_PAD_BLOCKLEN; buf->pkt_len = 0; buf->phys_addr = pad_phys; @@ -751,7 +751,7 @@ static int setup_auth(struct crypto_tfm *tfm, int encrypt, unsigned int authsize #ifndef __ARMEB__ cfgword ^= 0xAA000000; /* change the "byte swap" flags */ #endif - *(u32 *)cinfo = cpu_to_be32(cfgword); + *(__be32 *)cinfo = cpu_to_be32(cfgword); cinfo += sizeof(cfgword); /* write ICV to cryptinfo */ @@ -788,7 +788,7 @@ static int gen_rev_aes_key(struct crypto_tfm *tfm) if (!crypt) return -EAGAIN; - *(u32 *)dir->npe_ctx |= cpu_to_be32(CIPH_ENCR); + *(__be32 *)dir->npe_ctx |= cpu_to_be32(CIPH_ENCR); crypt->data.tfm = tfm; crypt->crypt_offs = 0; @@ -846,7 +846,7 @@ static int setup_cipher(struct crypto_tfm *tfm, int encrypt, const u8 *key, return err; } /* write cfg word to cryptinfo */ - *(u32 *)cinfo = cpu_to_be32(cipher_cfg); + *(__be32 *)cinfo = cpu_to_be32(cipher_cfg); cinfo += sizeof(cipher_cfg); /* write cipher key to cryptinfo */ diff --git a/drivers/crypto/keembay/keembay-ocs-hcu-core.c b/drivers/crypto/keembay/keembay-ocs-hcu-core.c index 0379dbf32a4c46f8b6189cd14cab8358841c504d..d4bcbed1f5466da6728ae0132f78d4d1e400598e 100644 --- a/drivers/crypto/keembay/keembay-ocs-hcu-core.c +++ b/drivers/crypto/keembay/keembay-ocs-hcu-core.c @@ -226,7 +226,7 @@ static void kmb_ocs_hcu_dma_cleanup(struct ahash_request *req, */ static int kmb_ocs_dma_prepare(struct ahash_request *req) { - struct ocs_hcu_rctx *rctx = ahash_request_ctx(req); + struct ocs_hcu_rctx *rctx = ahash_request_ctx_dma(req); struct device *dev = rctx->hcu_dev->dev; unsigned int remainder = 0; unsigned int total; @@ -356,7 +356,7 @@ cleanup: static void kmb_ocs_hcu_secure_cleanup(struct ahash_request *req) { - struct ocs_hcu_rctx *rctx = ahash_request_ctx(req); + struct ocs_hcu_rctx *rctx = ahash_request_ctx_dma(req); /* Clear buffer of any data. */ memzero_explicit(rctx->buffer, sizeof(rctx->buffer)); @@ -374,7 +374,7 @@ static int kmb_ocs_hcu_handle_queue(struct ahash_request *req) static int prepare_ipad(struct ahash_request *req) { - struct ocs_hcu_rctx *rctx = ahash_request_ctx(req); + struct ocs_hcu_rctx *rctx = ahash_request_ctx_dma(req); struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct ocs_hcu_ctx *ctx = crypto_ahash_ctx(tfm); int i; @@ -414,7 +414,7 @@ static int kmb_ocs_hcu_do_one_request(struct crypto_engine *engine, void *areq) base); struct ocs_hcu_dev *hcu_dev = kmb_ocs_hcu_find_dev(req); struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct ocs_hcu_rctx *rctx = ahash_request_ctx(req); + struct ocs_hcu_rctx *rctx = ahash_request_ctx_dma(req); struct ocs_hcu_ctx *tctx = crypto_ahash_ctx(tfm); int rc; int i; @@ -561,7 +561,7 @@ error: static int kmb_ocs_hcu_init(struct ahash_request *req) { struct ocs_hcu_dev *hcu_dev = kmb_ocs_hcu_find_dev(req); - struct ocs_hcu_rctx *rctx = ahash_request_ctx(req); + struct ocs_hcu_rctx *rctx = ahash_request_ctx_dma(req); struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct ocs_hcu_ctx *ctx = crypto_ahash_ctx(tfm); @@ -614,7 +614,7 @@ static int kmb_ocs_hcu_init(struct ahash_request *req) static int kmb_ocs_hcu_update(struct ahash_request *req) { - struct ocs_hcu_rctx *rctx = ahash_request_ctx(req); + struct ocs_hcu_rctx *rctx = ahash_request_ctx_dma(req); int rc; if (!req->nbytes) @@ -650,7 +650,7 @@ static int kmb_ocs_hcu_update(struct ahash_request *req) /* Common logic for kmb_ocs_hcu_final() and kmb_ocs_hcu_finup(). */ static int kmb_ocs_hcu_fin_common(struct ahash_request *req) { - struct ocs_hcu_rctx *rctx = ahash_request_ctx(req); + struct ocs_hcu_rctx *rctx = ahash_request_ctx_dma(req); struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct ocs_hcu_ctx *ctx = crypto_ahash_ctx(tfm); int rc; @@ -687,7 +687,7 @@ static int kmb_ocs_hcu_fin_common(struct ahash_request *req) static int kmb_ocs_hcu_final(struct ahash_request *req) { - struct ocs_hcu_rctx *rctx = ahash_request_ctx(req); + struct ocs_hcu_rctx *rctx = ahash_request_ctx_dma(req); rctx->sg_data_total = 0; rctx->sg_data_offset = 0; @@ -698,7 +698,7 @@ static int kmb_ocs_hcu_final(struct ahash_request *req) static int kmb_ocs_hcu_finup(struct ahash_request *req) { - struct ocs_hcu_rctx *rctx = ahash_request_ctx(req); + struct ocs_hcu_rctx *rctx = ahash_request_ctx_dma(req); rctx->sg_data_total = req->nbytes; rctx->sg_data_offset = 0; @@ -726,7 +726,7 @@ static int kmb_ocs_hcu_digest(struct ahash_request *req) static int kmb_ocs_hcu_export(struct ahash_request *req, void *out) { - struct ocs_hcu_rctx *rctx = ahash_request_ctx(req); + struct ocs_hcu_rctx *rctx = ahash_request_ctx_dma(req); /* Intermediate data is always stored and applied per request. */ memcpy(out, rctx, sizeof(*rctx)); @@ -736,7 +736,7 @@ static int kmb_ocs_hcu_export(struct ahash_request *req, void *out) static int kmb_ocs_hcu_import(struct ahash_request *req, const void *in) { - struct ocs_hcu_rctx *rctx = ahash_request_ctx(req); + struct ocs_hcu_rctx *rctx = ahash_request_ctx_dma(req); /* Intermediate data is always stored and applied per request. */ memcpy(rctx, in, sizeof(*rctx)); @@ -822,8 +822,8 @@ err_free_ahash: /* Set request size and initialize tfm context. */ static void __cra_init(struct crypto_tfm *tfm, struct ocs_hcu_ctx *ctx) { - crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), - sizeof(struct ocs_hcu_rctx)); + crypto_ahash_set_reqsize_dma(__crypto_ahash_cast(tfm), + sizeof(struct ocs_hcu_rctx)); /* Init context to 0. */ memzero_explicit(ctx, sizeof(*ctx)); diff --git a/drivers/crypto/marvell/octeontx/otx_cpt_hw_types.h b/drivers/crypto/marvell/octeontx/otx_cpt_hw_types.h index 205eacac4a348bc995ed99468af1d8d2b19c2107..f8aedafdfdc5f06be69f617cc4dfc1510bb75aa1 100644 --- a/drivers/crypto/marvell/octeontx/otx_cpt_hw_types.h +++ b/drivers/crypto/marvell/octeontx/otx_cpt_hw_types.h @@ -534,7 +534,7 @@ union otx_cptx_vqx_misc_ena_w1s { * Word0 * reserved_20_63:44 [63:20] Reserved. * dbell_cnt:20 [19:0](R/W/H) Number of instruction queue 64-bit words to add - * to the CPT instruction doorbell count. Readback value is the the + * to the CPT instruction doorbell count. Readback value is the * current number of pending doorbell requests. If counter overflows * CPT()_VQ()_MISC_INT[DBELL_DOVF] is set. To reset the count back to * zero, write one to clear CPT()_VQ()_MISC_INT_ENA_W1C[DBELL_DOVF], diff --git a/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c b/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c index df9c2b8747e6dd0c15c1ceaad085c93a4fe31daf..c4250e5fcf8f7bad435bbc6ffbd7138eebb39750 100644 --- a/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c +++ b/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c @@ -345,8 +345,7 @@ static void release_tar_archive(struct tar_arch_info_t *tar_arch) kfree(curr); } - if (tar_arch->fw) - release_firmware(tar_arch->fw); + release_firmware(tar_arch->fw); kfree(tar_arch); } diff --git a/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c b/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c index 01c48ddc4eeb42d625abaac0604bb3ed8469a97c..80ba77c793a7cb22e1c1da8769d32e11b491b527 100644 --- a/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c +++ b/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c @@ -103,7 +103,7 @@ static inline int validate_hmac_cipher_null(struct otx_cpt_req_info *cpt_req) req = container_of(cpt_req->areq, struct aead_request, base); tfm = crypto_aead_reqtfm(req); - rctx = aead_request_ctx(req); + rctx = aead_request_ctx_dma(req); if (memcmp(rctx->fctx.hmac.s.hmac_calc, rctx->fctx.hmac.s.hmac_recv, crypto_aead_authsize(tfm)) != 0) @@ -155,7 +155,7 @@ static void output_iv_copyback(struct crypto_async_request *areq) ctx = crypto_skcipher_ctx(stfm); if (ctx->cipher_type == OTX_CPT_AES_CBC || ctx->cipher_type == OTX_CPT_DES3_CBC) { - rctx = skcipher_request_ctx(sreq); + rctx = skcipher_request_ctx_dma(sreq); req_info = &rctx->cpt_req; ivsize = crypto_skcipher_ivsize(stfm); start = sreq->cryptlen - ivsize; @@ -233,7 +233,7 @@ static inline u32 create_ctx_hdr(struct skcipher_request *req, u32 enc, u32 *argcnt) { struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req); - struct otx_cpt_req_ctx *rctx = skcipher_request_ctx(req); + struct otx_cpt_req_ctx *rctx = skcipher_request_ctx_dma(req); struct otx_cpt_req_info *req_info = &rctx->cpt_req; struct crypto_tfm *tfm = crypto_skcipher_tfm(stfm); struct otx_cpt_enc_ctx *ctx = crypto_tfm_ctx(tfm); @@ -303,7 +303,7 @@ static inline u32 create_ctx_hdr(struct skcipher_request *req, u32 enc, static inline u32 create_input_list(struct skcipher_request *req, u32 enc, u32 enc_iv_len) { - struct otx_cpt_req_ctx *rctx = skcipher_request_ctx(req); + struct otx_cpt_req_ctx *rctx = skcipher_request_ctx_dma(req); struct otx_cpt_req_info *req_info = &rctx->cpt_req; u32 argcnt = 0; int ret; @@ -321,7 +321,7 @@ static inline u32 create_input_list(struct skcipher_request *req, u32 enc, static inline void create_output_list(struct skcipher_request *req, u32 enc_iv_len) { - struct otx_cpt_req_ctx *rctx = skcipher_request_ctx(req); + struct otx_cpt_req_ctx *rctx = skcipher_request_ctx_dma(req); struct otx_cpt_req_info *req_info = &rctx->cpt_req; u32 argcnt = 0; @@ -340,7 +340,7 @@ static inline void create_output_list(struct skcipher_request *req, static inline int cpt_enc_dec(struct skcipher_request *req, u32 enc) { struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req); - struct otx_cpt_req_ctx *rctx = skcipher_request_ctx(req); + struct otx_cpt_req_ctx *rctx = skcipher_request_ctx_dma(req); struct otx_cpt_req_info *req_info = &rctx->cpt_req; u32 enc_iv_len = crypto_skcipher_ivsize(stfm); struct pci_dev *pdev; @@ -501,15 +501,16 @@ static int otx_cpt_enc_dec_init(struct crypto_skcipher *tfm) * allocated since the cryptd daemon uses * this memory for request_ctx information */ - crypto_skcipher_set_reqsize(tfm, sizeof(struct otx_cpt_req_ctx) + - sizeof(struct skcipher_request)); + crypto_skcipher_set_reqsize_dma( + tfm, sizeof(struct otx_cpt_req_ctx) + + sizeof(struct skcipher_request)); return 0; } static int cpt_aead_init(struct crypto_aead *tfm, u8 cipher_type, u8 mac_type) { - struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx(tfm); + struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(tfm); ctx->cipher_type = cipher_type; ctx->mac_type = mac_type; @@ -551,7 +552,7 @@ static int cpt_aead_init(struct crypto_aead *tfm, u8 cipher_type, u8 mac_type) } } - crypto_aead_set_reqsize(tfm, sizeof(struct otx_cpt_req_ctx)); + crypto_aead_set_reqsize_dma(tfm, sizeof(struct otx_cpt_req_ctx)); return 0; } @@ -603,7 +604,7 @@ static int otx_cpt_aead_gcm_aes_init(struct crypto_aead *tfm) static void otx_cpt_aead_exit(struct crypto_aead *tfm) { - struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx(tfm); + struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(tfm); kfree(ctx->ipad); kfree(ctx->opad); @@ -619,7 +620,7 @@ static void otx_cpt_aead_exit(struct crypto_aead *tfm) static int otx_cpt_aead_set_authsize(struct crypto_aead *tfm, unsigned int authsize) { - struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx(tfm); + struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(tfm); switch (ctx->mac_type) { case OTX_CPT_SHA1: @@ -739,7 +740,7 @@ static int copy_pad(u8 mac_type, u8 *out_pad, u8 *in_pad) static int aead_hmac_init(struct crypto_aead *cipher) { - struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx(cipher); + struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(cipher); int state_size = crypto_shash_statesize(ctx->hashalg); int ds = crypto_shash_digestsize(ctx->hashalg); int bs = crypto_shash_blocksize(ctx->hashalg); @@ -837,7 +838,7 @@ static int otx_cpt_aead_cbc_aes_sha_setkey(struct crypto_aead *cipher, const unsigned char *key, unsigned int keylen) { - struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx(cipher); + struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(cipher); struct crypto_authenc_key_param *param; int enckeylen = 0, authkeylen = 0; struct rtattr *rta = (void *)key; @@ -896,7 +897,7 @@ static int otx_cpt_aead_ecb_null_sha_setkey(struct crypto_aead *cipher, const unsigned char *key, unsigned int keylen) { - struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx(cipher); + struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(cipher); struct crypto_authenc_key_param *param; struct rtattr *rta = (void *)key; int enckeylen = 0; @@ -932,7 +933,7 @@ static int otx_cpt_aead_gcm_aes_setkey(struct crypto_aead *cipher, const unsigned char *key, unsigned int keylen) { - struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx(cipher); + struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(cipher); /* * For aes gcm we expect to get encryption key (16, 24, 32 bytes) @@ -965,9 +966,9 @@ static int otx_cpt_aead_gcm_aes_setkey(struct crypto_aead *cipher, static inline u32 create_aead_ctx_hdr(struct aead_request *req, u32 enc, u32 *argcnt) { - struct otx_cpt_req_ctx *rctx = aead_request_ctx(req); + struct otx_cpt_req_ctx *rctx = aead_request_ctx_dma(req); struct crypto_aead *tfm = crypto_aead_reqtfm(req); - struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx(tfm); + struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(tfm); struct otx_cpt_req_info *req_info = &rctx->cpt_req; struct otx_cpt_fc_ctx *fctx = &rctx->fctx; int mac_len = crypto_aead_authsize(tfm); @@ -1050,9 +1051,9 @@ static inline u32 create_aead_ctx_hdr(struct aead_request *req, u32 enc, static inline u32 create_hmac_ctx_hdr(struct aead_request *req, u32 *argcnt, u32 enc) { - struct otx_cpt_req_ctx *rctx = aead_request_ctx(req); + struct otx_cpt_req_ctx *rctx = aead_request_ctx_dma(req); struct crypto_aead *tfm = crypto_aead_reqtfm(req); - struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx(tfm); + struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(tfm); struct otx_cpt_req_info *req_info = &rctx->cpt_req; req_info->ctrl.s.dma_mode = OTX_CPT_DMA_GATHER_SCATTER; @@ -1076,7 +1077,7 @@ static inline u32 create_hmac_ctx_hdr(struct aead_request *req, u32 *argcnt, static inline u32 create_aead_input_list(struct aead_request *req, u32 enc) { - struct otx_cpt_req_ctx *rctx = aead_request_ctx(req); + struct otx_cpt_req_ctx *rctx = aead_request_ctx_dma(req); struct otx_cpt_req_info *req_info = &rctx->cpt_req; u32 inputlen = req->cryptlen + req->assoclen; u32 status, argcnt = 0; @@ -1093,7 +1094,7 @@ static inline u32 create_aead_input_list(struct aead_request *req, u32 enc) static inline u32 create_aead_output_list(struct aead_request *req, u32 enc, u32 mac_len) { - struct otx_cpt_req_ctx *rctx = aead_request_ctx(req); + struct otx_cpt_req_ctx *rctx = aead_request_ctx_dma(req); struct otx_cpt_req_info *req_info = &rctx->cpt_req; u32 argcnt = 0, outputlen = 0; @@ -1111,7 +1112,7 @@ static inline u32 create_aead_output_list(struct aead_request *req, u32 enc, static inline u32 create_aead_null_input_list(struct aead_request *req, u32 enc, u32 mac_len) { - struct otx_cpt_req_ctx *rctx = aead_request_ctx(req); + struct otx_cpt_req_ctx *rctx = aead_request_ctx_dma(req); struct otx_cpt_req_info *req_info = &rctx->cpt_req; u32 inputlen, argcnt = 0; @@ -1130,7 +1131,7 @@ static inline u32 create_aead_null_input_list(struct aead_request *req, static inline u32 create_aead_null_output_list(struct aead_request *req, u32 enc, u32 mac_len) { - struct otx_cpt_req_ctx *rctx = aead_request_ctx(req); + struct otx_cpt_req_ctx *rctx = aead_request_ctx_dma(req); struct otx_cpt_req_info *req_info = &rctx->cpt_req; struct scatterlist *dst; u8 *ptr = NULL; @@ -1217,7 +1218,7 @@ error: static u32 cpt_aead_enc_dec(struct aead_request *req, u8 reg_type, u8 enc) { - struct otx_cpt_req_ctx *rctx = aead_request_ctx(req); + struct otx_cpt_req_ctx *rctx = aead_request_ctx_dma(req); struct otx_cpt_req_info *req_info = &rctx->cpt_req; struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct pci_dev *pdev; @@ -1409,7 +1410,7 @@ static struct aead_alg otx_cpt_aeads[] = { { .cra_driver_name = "cpt_hmac_sha1_cbc_aes", .cra_blocksize = AES_BLOCK_SIZE, .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, - .cra_ctxsize = sizeof(struct otx_cpt_aead_ctx), + .cra_ctxsize = sizeof(struct otx_cpt_aead_ctx) + CRYPTO_DMA_PADDING, .cra_priority = 4001, .cra_alignmask = 0, .cra_module = THIS_MODULE, @@ -1428,7 +1429,7 @@ static struct aead_alg otx_cpt_aeads[] = { { .cra_driver_name = "cpt_hmac_sha256_cbc_aes", .cra_blocksize = AES_BLOCK_SIZE, .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, - .cra_ctxsize = sizeof(struct otx_cpt_aead_ctx), + .cra_ctxsize = sizeof(struct otx_cpt_aead_ctx) + CRYPTO_DMA_PADDING, .cra_priority = 4001, .cra_alignmask = 0, .cra_module = THIS_MODULE, @@ -1447,7 +1448,7 @@ static struct aead_alg otx_cpt_aeads[] = { { .cra_driver_name = "cpt_hmac_sha384_cbc_aes", .cra_blocksize = AES_BLOCK_SIZE, .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, - .cra_ctxsize = sizeof(struct otx_cpt_aead_ctx), + .cra_ctxsize = sizeof(struct otx_cpt_aead_ctx) + CRYPTO_DMA_PADDING, .cra_priority = 4001, .cra_alignmask = 0, .cra_module = THIS_MODULE, @@ -1466,7 +1467,7 @@ static struct aead_alg otx_cpt_aeads[] = { { .cra_driver_name = "cpt_hmac_sha512_cbc_aes", .cra_blocksize = AES_BLOCK_SIZE, .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, - .cra_ctxsize = sizeof(struct otx_cpt_aead_ctx), + .cra_ctxsize = sizeof(struct otx_cpt_aead_ctx) + CRYPTO_DMA_PADDING, .cra_priority = 4001, .cra_alignmask = 0, .cra_module = THIS_MODULE, @@ -1485,7 +1486,7 @@ static struct aead_alg otx_cpt_aeads[] = { { .cra_driver_name = "cpt_hmac_sha1_ecb_null", .cra_blocksize = 1, .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, - .cra_ctxsize = sizeof(struct otx_cpt_aead_ctx), + .cra_ctxsize = sizeof(struct otx_cpt_aead_ctx) + CRYPTO_DMA_PADDING, .cra_priority = 4001, .cra_alignmask = 0, .cra_module = THIS_MODULE, @@ -1504,7 +1505,7 @@ static struct aead_alg otx_cpt_aeads[] = { { .cra_driver_name = "cpt_hmac_sha256_ecb_null", .cra_blocksize = 1, .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, - .cra_ctxsize = sizeof(struct otx_cpt_aead_ctx), + .cra_ctxsize = sizeof(struct otx_cpt_aead_ctx) + CRYPTO_DMA_PADDING, .cra_priority = 4001, .cra_alignmask = 0, .cra_module = THIS_MODULE, @@ -1523,7 +1524,7 @@ static struct aead_alg otx_cpt_aeads[] = { { .cra_driver_name = "cpt_hmac_sha384_ecb_null", .cra_blocksize = 1, .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, - .cra_ctxsize = sizeof(struct otx_cpt_aead_ctx), + .cra_ctxsize = sizeof(struct otx_cpt_aead_ctx) + CRYPTO_DMA_PADDING, .cra_priority = 4001, .cra_alignmask = 0, .cra_module = THIS_MODULE, @@ -1542,7 +1543,7 @@ static struct aead_alg otx_cpt_aeads[] = { { .cra_driver_name = "cpt_hmac_sha512_ecb_null", .cra_blocksize = 1, .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, - .cra_ctxsize = sizeof(struct otx_cpt_aead_ctx), + .cra_ctxsize = sizeof(struct otx_cpt_aead_ctx) + CRYPTO_DMA_PADDING, .cra_priority = 4001, .cra_alignmask = 0, .cra_module = THIS_MODULE, @@ -1561,7 +1562,7 @@ static struct aead_alg otx_cpt_aeads[] = { { .cra_driver_name = "cpt_rfc4106_gcm_aes", .cra_blocksize = 1, .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, - .cra_ctxsize = sizeof(struct otx_cpt_aead_ctx), + .cra_ctxsize = sizeof(struct otx_cpt_aead_ctx) + CRYPTO_DMA_PADDING, .cra_priority = 4001, .cra_alignmask = 0, .cra_module = THIS_MODULE, diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c b/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c index 67530e90bbfed3f8aa8fa346b6a9f81e7661fe83..30b423605c9c1214f6d7baae6de8eb723194c06f 100644 --- a/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c +++ b/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c @@ -87,7 +87,7 @@ static inline int validate_hmac_cipher_null(struct otx2_cpt_req_info *cpt_req) req = container_of(cpt_req->areq, struct aead_request, base); tfm = crypto_aead_reqtfm(req); - rctx = aead_request_ctx(req); + rctx = aead_request_ctx_dma(req); if (memcmp(rctx->fctx.hmac.s.hmac_calc, rctx->fctx.hmac.s.hmac_recv, crypto_aead_authsize(tfm)) != 0) @@ -137,7 +137,7 @@ static void output_iv_copyback(struct crypto_async_request *areq) ctx = crypto_skcipher_ctx(stfm); if (ctx->cipher_type == OTX2_CPT_AES_CBC || ctx->cipher_type == OTX2_CPT_DES3_CBC) { - rctx = skcipher_request_ctx(sreq); + rctx = skcipher_request_ctx_dma(sreq); req_info = &rctx->cpt_req; ivsize = crypto_skcipher_ivsize(stfm); start = sreq->cryptlen - ivsize; @@ -219,7 +219,7 @@ static inline int create_ctx_hdr(struct skcipher_request *req, u32 enc, u32 *argcnt) { struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req); - struct otx2_cpt_req_ctx *rctx = skcipher_request_ctx(req); + struct otx2_cpt_req_ctx *rctx = skcipher_request_ctx_dma(req); struct otx2_cpt_enc_ctx *ctx = crypto_skcipher_ctx(stfm); struct otx2_cpt_req_info *req_info = &rctx->cpt_req; struct otx2_cpt_fc_ctx *fctx = &rctx->fctx; @@ -288,7 +288,7 @@ static inline int create_ctx_hdr(struct skcipher_request *req, u32 enc, static inline int create_input_list(struct skcipher_request *req, u32 enc, u32 enc_iv_len) { - struct otx2_cpt_req_ctx *rctx = skcipher_request_ctx(req); + struct otx2_cpt_req_ctx *rctx = skcipher_request_ctx_dma(req); struct otx2_cpt_req_info *req_info = &rctx->cpt_req; u32 argcnt = 0; int ret; @@ -306,7 +306,7 @@ static inline int create_input_list(struct skcipher_request *req, u32 enc, static inline void create_output_list(struct skcipher_request *req, u32 enc_iv_len) { - struct otx2_cpt_req_ctx *rctx = skcipher_request_ctx(req); + struct otx2_cpt_req_ctx *rctx = skcipher_request_ctx_dma(req); struct otx2_cpt_req_info *req_info = &rctx->cpt_req; u32 argcnt = 0; @@ -325,7 +325,7 @@ static inline void create_output_list(struct skcipher_request *req, static int skcipher_do_fallback(struct skcipher_request *req, bool is_enc) { struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req); - struct otx2_cpt_req_ctx *rctx = skcipher_request_ctx(req); + struct otx2_cpt_req_ctx *rctx = skcipher_request_ctx_dma(req); struct otx2_cpt_enc_ctx *ctx = crypto_skcipher_ctx(stfm); int ret; @@ -348,7 +348,7 @@ static int skcipher_do_fallback(struct skcipher_request *req, bool is_enc) static inline int cpt_enc_dec(struct skcipher_request *req, u32 enc) { struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req); - struct otx2_cpt_req_ctx *rctx = skcipher_request_ctx(req); + struct otx2_cpt_req_ctx *rctx = skcipher_request_ctx_dma(req); struct otx2_cpt_enc_ctx *ctx = crypto_skcipher_ctx(stfm); struct otx2_cpt_req_info *req_info = &rctx->cpt_req; u32 enc_iv_len = crypto_skcipher_ivsize(stfm); @@ -537,8 +537,9 @@ static int otx2_cpt_enc_dec_init(struct crypto_skcipher *stfm) * allocated since the cryptd daemon uses * this memory for request_ctx information */ - crypto_skcipher_set_reqsize(stfm, sizeof(struct otx2_cpt_req_ctx) + - sizeof(struct skcipher_request)); + crypto_skcipher_set_reqsize_dma( + stfm, sizeof(struct otx2_cpt_req_ctx) + + sizeof(struct skcipher_request)); return cpt_skcipher_fallback_init(ctx, alg); } @@ -572,7 +573,7 @@ static int cpt_aead_fallback_init(struct otx2_cpt_aead_ctx *ctx, static int cpt_aead_init(struct crypto_aead *atfm, u8 cipher_type, u8 mac_type) { - struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx(atfm); + struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(atfm); struct crypto_tfm *tfm = crypto_aead_tfm(atfm); struct crypto_alg *alg = tfm->__crt_alg; @@ -629,7 +630,7 @@ static int cpt_aead_init(struct crypto_aead *atfm, u8 cipher_type, u8 mac_type) ctx->enc_align_len = 1; break; } - crypto_aead_set_reqsize(atfm, sizeof(struct otx2_cpt_req_ctx)); + crypto_aead_set_reqsize_dma(atfm, sizeof(struct otx2_cpt_req_ctx)); return cpt_aead_fallback_init(ctx, alg); } @@ -681,7 +682,7 @@ static int otx2_cpt_aead_gcm_aes_init(struct crypto_aead *tfm) static void otx2_cpt_aead_exit(struct crypto_aead *tfm) { - struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx(tfm); + struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(tfm); kfree(ctx->ipad); kfree(ctx->opad); @@ -698,7 +699,7 @@ static void otx2_cpt_aead_exit(struct crypto_aead *tfm) static int otx2_cpt_aead_gcm_set_authsize(struct crypto_aead *tfm, unsigned int authsize) { - struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx(tfm); + struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(tfm); if (crypto_rfc4106_check_authsize(authsize)) return -EINVAL; @@ -722,7 +723,7 @@ static int otx2_cpt_aead_set_authsize(struct crypto_aead *tfm, static int otx2_cpt_aead_null_set_authsize(struct crypto_aead *tfm, unsigned int authsize) { - struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx(tfm); + struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(tfm); ctx->is_trunc_hmac = true; tfm->authsize = authsize; @@ -794,7 +795,7 @@ static int copy_pad(u8 mac_type, u8 *out_pad, u8 *in_pad) static int aead_hmac_init(struct crypto_aead *cipher) { - struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx(cipher); + struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(cipher); int state_size = crypto_shash_statesize(ctx->hashalg); int ds = crypto_shash_digestsize(ctx->hashalg); int bs = crypto_shash_blocksize(ctx->hashalg); @@ -892,7 +893,7 @@ static int otx2_cpt_aead_cbc_aes_sha_setkey(struct crypto_aead *cipher, const unsigned char *key, unsigned int keylen) { - struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx(cipher); + struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(cipher); struct crypto_authenc_key_param *param; int enckeylen = 0, authkeylen = 0; struct rtattr *rta = (void *)key; @@ -944,7 +945,7 @@ static int otx2_cpt_aead_ecb_null_sha_setkey(struct crypto_aead *cipher, const unsigned char *key, unsigned int keylen) { - struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx(cipher); + struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(cipher); struct crypto_authenc_key_param *param; struct rtattr *rta = (void *)key; int enckeylen = 0; @@ -979,7 +980,7 @@ static int otx2_cpt_aead_gcm_aes_setkey(struct crypto_aead *cipher, const unsigned char *key, unsigned int keylen) { - struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx(cipher); + struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(cipher); /* * For aes gcm we expect to get encryption key (16, 24, 32 bytes) @@ -1012,9 +1013,9 @@ static int otx2_cpt_aead_gcm_aes_setkey(struct crypto_aead *cipher, static inline int create_aead_ctx_hdr(struct aead_request *req, u32 enc, u32 *argcnt) { - struct otx2_cpt_req_ctx *rctx = aead_request_ctx(req); + struct otx2_cpt_req_ctx *rctx = aead_request_ctx_dma(req); struct crypto_aead *tfm = crypto_aead_reqtfm(req); - struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx(tfm); + struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(tfm); struct otx2_cpt_req_info *req_info = &rctx->cpt_req; struct otx2_cpt_fc_ctx *fctx = &rctx->fctx; int mac_len = crypto_aead_authsize(tfm); @@ -1103,9 +1104,9 @@ static inline int create_aead_ctx_hdr(struct aead_request *req, u32 enc, static inline void create_hmac_ctx_hdr(struct aead_request *req, u32 *argcnt, u32 enc) { - struct otx2_cpt_req_ctx *rctx = aead_request_ctx(req); + struct otx2_cpt_req_ctx *rctx = aead_request_ctx_dma(req); struct crypto_aead *tfm = crypto_aead_reqtfm(req); - struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx(tfm); + struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(tfm); struct otx2_cpt_req_info *req_info = &rctx->cpt_req; req_info->ctrl.s.dma_mode = OTX2_CPT_DMA_MODE_SG; @@ -1127,7 +1128,7 @@ static inline void create_hmac_ctx_hdr(struct aead_request *req, u32 *argcnt, static inline int create_aead_input_list(struct aead_request *req, u32 enc) { - struct otx2_cpt_req_ctx *rctx = aead_request_ctx(req); + struct otx2_cpt_req_ctx *rctx = aead_request_ctx_dma(req); struct otx2_cpt_req_info *req_info = &rctx->cpt_req; u32 inputlen = req->cryptlen + req->assoclen; u32 status, argcnt = 0; @@ -1144,7 +1145,7 @@ static inline int create_aead_input_list(struct aead_request *req, u32 enc) static inline void create_aead_output_list(struct aead_request *req, u32 enc, u32 mac_len) { - struct otx2_cpt_req_ctx *rctx = aead_request_ctx(req); + struct otx2_cpt_req_ctx *rctx = aead_request_ctx_dma(req); struct otx2_cpt_req_info *req_info = &rctx->cpt_req; u32 argcnt = 0, outputlen = 0; @@ -1160,7 +1161,7 @@ static inline void create_aead_output_list(struct aead_request *req, u32 enc, static inline void create_aead_null_input_list(struct aead_request *req, u32 enc, u32 mac_len) { - struct otx2_cpt_req_ctx *rctx = aead_request_ctx(req); + struct otx2_cpt_req_ctx *rctx = aead_request_ctx_dma(req); struct otx2_cpt_req_info *req_info = &rctx->cpt_req; u32 inputlen, argcnt = 0; @@ -1177,7 +1178,7 @@ static inline void create_aead_null_input_list(struct aead_request *req, static inline int create_aead_null_output_list(struct aead_request *req, u32 enc, u32 mac_len) { - struct otx2_cpt_req_ctx *rctx = aead_request_ctx(req); + struct otx2_cpt_req_ctx *rctx = aead_request_ctx_dma(req); struct otx2_cpt_req_info *req_info = &rctx->cpt_req; struct scatterlist *dst; u8 *ptr = NULL; @@ -1257,9 +1258,9 @@ error_free: static int aead_do_fallback(struct aead_request *req, bool is_enc) { - struct otx2_cpt_req_ctx *rctx = aead_request_ctx(req); + struct otx2_cpt_req_ctx *rctx = aead_request_ctx_dma(req); struct crypto_aead *aead = crypto_aead_reqtfm(req); - struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx(aead); + struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(aead); int ret; if (ctx->fbk_cipher) { @@ -1281,10 +1282,10 @@ static int aead_do_fallback(struct aead_request *req, bool is_enc) static int cpt_aead_enc_dec(struct aead_request *req, u8 reg_type, u8 enc) { - struct otx2_cpt_req_ctx *rctx = aead_request_ctx(req); + struct otx2_cpt_req_ctx *rctx = aead_request_ctx_dma(req); struct otx2_cpt_req_info *req_info = &rctx->cpt_req; struct crypto_aead *tfm = crypto_aead_reqtfm(req); - struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx(tfm); + struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(tfm); struct pci_dev *pdev; int status, cpu_num; @@ -1458,7 +1459,7 @@ static struct aead_alg otx2_cpt_aeads[] = { { .cra_driver_name = "cpt_hmac_sha1_cbc_aes", .cra_blocksize = AES_BLOCK_SIZE, .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, - .cra_ctxsize = sizeof(struct otx2_cpt_aead_ctx), + .cra_ctxsize = sizeof(struct otx2_cpt_aead_ctx) + CRYPTO_DMA_PADDING, .cra_priority = 4001, .cra_alignmask = 0, .cra_module = THIS_MODULE, @@ -1477,7 +1478,7 @@ static struct aead_alg otx2_cpt_aeads[] = { { .cra_driver_name = "cpt_hmac_sha256_cbc_aes", .cra_blocksize = AES_BLOCK_SIZE, .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, - .cra_ctxsize = sizeof(struct otx2_cpt_aead_ctx), + .cra_ctxsize = sizeof(struct otx2_cpt_aead_ctx) + CRYPTO_DMA_PADDING, .cra_priority = 4001, .cra_alignmask = 0, .cra_module = THIS_MODULE, @@ -1496,7 +1497,7 @@ static struct aead_alg otx2_cpt_aeads[] = { { .cra_driver_name = "cpt_hmac_sha384_cbc_aes", .cra_blocksize = AES_BLOCK_SIZE, .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, - .cra_ctxsize = sizeof(struct otx2_cpt_aead_ctx), + .cra_ctxsize = sizeof(struct otx2_cpt_aead_ctx) + CRYPTO_DMA_PADDING, .cra_priority = 4001, .cra_alignmask = 0, .cra_module = THIS_MODULE, @@ -1515,7 +1516,7 @@ static struct aead_alg otx2_cpt_aeads[] = { { .cra_driver_name = "cpt_hmac_sha512_cbc_aes", .cra_blocksize = AES_BLOCK_SIZE, .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, - .cra_ctxsize = sizeof(struct otx2_cpt_aead_ctx), + .cra_ctxsize = sizeof(struct otx2_cpt_aead_ctx) + CRYPTO_DMA_PADDING, .cra_priority = 4001, .cra_alignmask = 0, .cra_module = THIS_MODULE, @@ -1534,7 +1535,7 @@ static struct aead_alg otx2_cpt_aeads[] = { { .cra_driver_name = "cpt_hmac_sha1_ecb_null", .cra_blocksize = 1, .cra_flags = CRYPTO_ALG_ASYNC, - .cra_ctxsize = sizeof(struct otx2_cpt_aead_ctx), + .cra_ctxsize = sizeof(struct otx2_cpt_aead_ctx) + CRYPTO_DMA_PADDING, .cra_priority = 4001, .cra_alignmask = 0, .cra_module = THIS_MODULE, @@ -1553,7 +1554,7 @@ static struct aead_alg otx2_cpt_aeads[] = { { .cra_driver_name = "cpt_hmac_sha256_ecb_null", .cra_blocksize = 1, .cra_flags = CRYPTO_ALG_ASYNC, - .cra_ctxsize = sizeof(struct otx2_cpt_aead_ctx), + .cra_ctxsize = sizeof(struct otx2_cpt_aead_ctx) + CRYPTO_DMA_PADDING, .cra_priority = 4001, .cra_alignmask = 0, .cra_module = THIS_MODULE, @@ -1572,7 +1573,7 @@ static struct aead_alg otx2_cpt_aeads[] = { { .cra_driver_name = "cpt_hmac_sha384_ecb_null", .cra_blocksize = 1, .cra_flags = CRYPTO_ALG_ASYNC, - .cra_ctxsize = sizeof(struct otx2_cpt_aead_ctx), + .cra_ctxsize = sizeof(struct otx2_cpt_aead_ctx) + CRYPTO_DMA_PADDING, .cra_priority = 4001, .cra_alignmask = 0, .cra_module = THIS_MODULE, @@ -1591,7 +1592,7 @@ static struct aead_alg otx2_cpt_aeads[] = { { .cra_driver_name = "cpt_hmac_sha512_ecb_null", .cra_blocksize = 1, .cra_flags = CRYPTO_ALG_ASYNC, - .cra_ctxsize = sizeof(struct otx2_cpt_aead_ctx), + .cra_ctxsize = sizeof(struct otx2_cpt_aead_ctx) + CRYPTO_DMA_PADDING, .cra_priority = 4001, .cra_alignmask = 0, .cra_module = THIS_MODULE, @@ -1610,7 +1611,7 @@ static struct aead_alg otx2_cpt_aeads[] = { { .cra_driver_name = "cpt_rfc4106_gcm_aes", .cra_blocksize = 1, .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, - .cra_ctxsize = sizeof(struct otx2_cpt_aead_ctx), + .cra_ctxsize = sizeof(struct otx2_cpt_aead_ctx) + CRYPTO_DMA_PADDING, .cra_priority = 4001, .cra_alignmask = 0, .cra_module = THIS_MODULE, diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c index 31e24df18877f52a07b6d55c36a2b1d78da1cd2d..20d0dcd50344b3335448adb83c0ef32c36e5596c 100644 --- a/drivers/crypto/n2_core.c +++ b/drivers/crypto/n2_core.c @@ -1229,6 +1229,7 @@ struct n2_hash_tmpl { const u8 *hash_init; u8 hw_op_hashsz; u8 digest_size; + u8 statesize; u8 block_size; u8 auth_type; u8 hmac_type; @@ -1260,6 +1261,7 @@ static const struct n2_hash_tmpl hash_tmpls[] = { .hmac_type = AUTH_TYPE_HMAC_MD5, .hw_op_hashsz = MD5_DIGEST_SIZE, .digest_size = MD5_DIGEST_SIZE, + .statesize = sizeof(struct md5_state), .block_size = MD5_HMAC_BLOCK_SIZE }, { .name = "sha1", .hash_zero = sha1_zero_message_hash, @@ -1268,6 +1270,7 @@ static const struct n2_hash_tmpl hash_tmpls[] = { .hmac_type = AUTH_TYPE_HMAC_SHA1, .hw_op_hashsz = SHA1_DIGEST_SIZE, .digest_size = SHA1_DIGEST_SIZE, + .statesize = sizeof(struct sha1_state), .block_size = SHA1_BLOCK_SIZE }, { .name = "sha256", .hash_zero = sha256_zero_message_hash, @@ -1276,6 +1279,7 @@ static const struct n2_hash_tmpl hash_tmpls[] = { .hmac_type = AUTH_TYPE_HMAC_SHA256, .hw_op_hashsz = SHA256_DIGEST_SIZE, .digest_size = SHA256_DIGEST_SIZE, + .statesize = sizeof(struct sha256_state), .block_size = SHA256_BLOCK_SIZE }, { .name = "sha224", .hash_zero = sha224_zero_message_hash, @@ -1284,6 +1288,7 @@ static const struct n2_hash_tmpl hash_tmpls[] = { .hmac_type = AUTH_TYPE_RESERVED, .hw_op_hashsz = SHA256_DIGEST_SIZE, .digest_size = SHA224_DIGEST_SIZE, + .statesize = sizeof(struct sha256_state), .block_size = SHA224_BLOCK_SIZE }, }; #define NUM_HASH_TMPLS ARRAY_SIZE(hash_tmpls) @@ -1424,6 +1429,7 @@ static int __n2_register_one_ahash(const struct n2_hash_tmpl *tmpl) halg = &ahash->halg; halg->digestsize = tmpl->digest_size; + halg->statesize = tmpl->statesize; base = &halg->base; snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name); diff --git a/drivers/crypto/nx/nx-842.h b/drivers/crypto/nx/nx-842.h index b66f19ac600f2a1df6c49e12624c564ce8ba9e57..7590bfb24d79bf427d9ed6a5b54874179dfa9e56 100644 --- a/drivers/crypto/nx/nx-842.h +++ b/drivers/crypto/nx/nx-842.h @@ -3,10 +3,10 @@ #ifndef __NX_842_H__ #define __NX_842_H__ +#include #include #include #include -#include #include #include #include diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c index 655a7f5a406a17d80ebb3103acdd0650a111fdec..cbeda59c6b1911e7cd17e7eb180770b32936fc1f 100644 --- a/drivers/crypto/omap-sham.c +++ b/drivers/crypto/omap-sham.c @@ -2114,7 +2114,7 @@ static int omap_sham_probe(struct platform_device *pdev) pm_runtime_enable(dev); - err = pm_runtime_get_sync(dev); + err = pm_runtime_resume_and_get(dev); if (err < 0) { dev_err(dev, "failed to get sync: %d\n", err); goto err_pm; diff --git a/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.c b/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.c index fda5f699ff57514362f2ced23f2afdde3a8d7474..834a705180c0254efebd28726713f835d147d93b 100644 --- a/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.c +++ b/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.c @@ -4,6 +4,7 @@ #include #include #include +#include #include #include #include @@ -357,10 +358,11 @@ void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data) hw_data->ring_pair_reset = adf_gen4_ring_pair_reset; hw_data->enable_pm = adf_gen4_enable_pm; hw_data->handle_pm_interrupt = adf_gen4_handle_pm_interrupt; - hw_data->dev_config = adf_crypto_dev_config; + hw_data->dev_config = adf_gen4_dev_config; adf_gen4_init_hw_csr_ops(&hw_data->csr_ops); adf_gen4_init_pf_pfvf_ops(&hw_data->pfvf_ops); + adf_gen4_init_dc_ops(&hw_data->dc_ops); } void adf_clean_hw_data_4xxx(struct adf_hw_device_data *hw_data) diff --git a/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.h b/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.h index 9d49248931f6a93a32542e027d6e5bbffc1f1586..e98428ba78e29b0956f0a891b8e1983c037111e2 100644 --- a/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.h +++ b/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.h @@ -70,6 +70,6 @@ enum icp_qat_4xxx_slice_mask { void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data); void adf_clean_hw_data_4xxx(struct adf_hw_device_data *hw_data); -int adf_crypto_dev_config(struct adf_accel_dev *accel_dev); +int adf_gen4_dev_config(struct adf_accel_dev *accel_dev); #endif diff --git a/drivers/crypto/qat/qat_4xxx/adf_drv.c b/drivers/crypto/qat/qat_4xxx/adf_drv.c index 2f212561acc47b8c95e52c076ae7cc92980e06c0..b3a4c7b238644f6e451f114ce0eea41c964cb731 100644 --- a/drivers/crypto/qat/qat_4xxx/adf_drv.c +++ b/drivers/crypto/qat/qat_4xxx/adf_drv.c @@ -9,6 +9,7 @@ #include #include "adf_4xxx_hw_data.h" +#include "qat_compression.h" #include "qat_crypto.h" #include "adf_transport_access_macros.h" @@ -19,6 +20,16 @@ static const struct pci_device_id adf_pci_tbl[] = { }; MODULE_DEVICE_TABLE(pci, adf_pci_tbl); +enum configs { + DEV_CFG_CY = 0, + DEV_CFG_DC, +}; + +static const char * const services_operations[] = { + ADF_CFG_CY, + ADF_CFG_DC, +}; + static void adf_cleanup_accel(struct adf_accel_dev *accel_dev) { if (accel_dev->hw_device) { @@ -53,7 +64,7 @@ static int adf_cfg_dev_init(struct adf_accel_dev *accel_dev) return 0; } -int adf_crypto_dev_config(struct adf_accel_dev *accel_dev) +static int adf_crypto_dev_config(struct adf_accel_dev *accel_dev) { char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES]; int banks = GET_MAX_BANKS(accel_dev); @@ -68,14 +79,6 @@ int adf_crypto_dev_config(struct adf_accel_dev *accel_dev) else instances = 0; - ret = adf_cfg_section_add(accel_dev, ADF_KERNEL_SEC); - if (ret) - goto err; - - ret = adf_cfg_section_add(accel_dev, "Accelerator0"); - if (ret) - goto err; - for (i = 0; i < instances; i++) { val = i; bank = i * 2; @@ -155,10 +158,128 @@ int adf_crypto_dev_config(struct adf_accel_dev *accel_dev) if (ret) goto err; - set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status); + val = 0; + ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_DC, + &val, ADF_DEC); + if (ret) + goto err; + return 0; err: - dev_err(&GET_DEV(accel_dev), "Failed to start QAT accel dev\n"); + dev_err(&GET_DEV(accel_dev), "Failed to add configuration for crypto\n"); + return ret; +} + +static int adf_comp_dev_config(struct adf_accel_dev *accel_dev) +{ + char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES]; + int banks = GET_MAX_BANKS(accel_dev); + int cpus = num_online_cpus(); + unsigned long val; + int instances; + int ret; + int i; + + if (adf_hw_dev_has_compression(accel_dev)) + instances = min(cpus, banks); + else + instances = 0; + + for (i = 0; i < instances; i++) { + val = i; + snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_BANK_NUM, i); + ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, + key, &val, ADF_DEC); + if (ret) + goto err; + + val = 512; + snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_SIZE, i); + ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, + key, &val, ADF_DEC); + if (ret) + goto err; + + val = 0; + snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_TX, i); + ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, + key, &val, ADF_DEC); + if (ret) + goto err; + + val = 1; + snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_RX, i); + ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, + key, &val, ADF_DEC); + if (ret) + goto err; + + val = ADF_COALESCING_DEF_TIME; + snprintf(key, sizeof(key), ADF_ETRMGR_COALESCE_TIMER_FORMAT, i); + ret = adf_cfg_add_key_value_param(accel_dev, "Accelerator0", + key, &val, ADF_DEC); + if (ret) + goto err; + } + + val = i; + ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_DC, + &val, ADF_DEC); + if (ret) + goto err; + + val = 0; + ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_CY, + &val, ADF_DEC); + if (ret) + goto err; + + return 0; +err: + dev_err(&GET_DEV(accel_dev), "Failed to add configuration for compression\n"); + return ret; +} + +int adf_gen4_dev_config(struct adf_accel_dev *accel_dev) +{ + char services[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {0}; + int ret; + + ret = adf_cfg_section_add(accel_dev, ADF_KERNEL_SEC); + if (ret) + goto err; + + ret = adf_cfg_section_add(accel_dev, "Accelerator0"); + if (ret) + goto err; + + ret = adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC, + ADF_SERVICES_ENABLED, services); + if (ret) + goto err; + + ret = sysfs_match_string(services_operations, services); + if (ret < 0) + goto err; + + switch (ret) { + case DEV_CFG_CY: + ret = adf_crypto_dev_config(accel_dev); + break; + case DEV_CFG_DC: + ret = adf_comp_dev_config(accel_dev); + break; + } + + if (ret) + goto err; + + set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status); + + return ret; + +err: + dev_err(&GET_DEV(accel_dev), "Failed to configure QAT driver\n"); return ret; } @@ -261,6 +382,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) hw_data->accel_capabilities_mask = hw_data->get_accel_cap(accel_dev); if (!hw_data->accel_capabilities_mask) { dev_err(&pdev->dev, "Failed to get capabilities mask.\n"); + ret = -EINVAL; goto out_err; } @@ -293,7 +415,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (ret) goto out_err_disable_aer; - ret = adf_crypto_dev_config(accel_dev); + ret = hw_data->dev_config(accel_dev); if (ret) goto out_err_disable_aer; diff --git a/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c b/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c index 50d5afa26a9bee75155345bacb39b9899015bcb2..c55c51a07677d907f6f47710eccb2b8ef24e338b 100644 --- a/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c +++ b/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c @@ -2,6 +2,8 @@ /* Copyright(c) 2014 - 2021 Intel Corporation */ #include #include +#include +#include #include #include #include "adf_c3xxx_hw_data.h" @@ -124,9 +126,11 @@ void adf_init_hw_data_c3xxx(struct adf_hw_device_data *hw_data) hw_data->reset_device = adf_reset_flr; hw_data->set_ssm_wdtimer = adf_gen2_set_ssm_wdtimer; hw_data->disable_iov = adf_disable_sriov; + hw_data->dev_config = adf_gen2_dev_config; adf_gen2_init_pf_pfvf_ops(&hw_data->pfvf_ops); adf_gen2_init_hw_csr_ops(&hw_data->csr_ops); + adf_gen2_init_dc_ops(&hw_data->dc_ops); } void adf_clean_hw_data_c3xxx(struct adf_hw_device_data *hw_data) diff --git a/drivers/crypto/qat/qat_c3xxx/adf_drv.c b/drivers/crypto/qat/qat_c3xxx/adf_drv.c index 2aef0bb791dfa3f39867662c55210bd47c96ec6e..1f4fbf4562b221b7a846293199fb08608fb2911f 100644 --- a/drivers/crypto/qat/qat_c3xxx/adf_drv.c +++ b/drivers/crypto/qat/qat_c3xxx/adf_drv.c @@ -201,7 +201,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto out_err_disable_aer; } - ret = qat_crypto_dev_config(accel_dev); + ret = hw_data->dev_config(accel_dev); if (ret) goto out_err_disable_aer; diff --git a/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c b/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c index a9fbe57b32ae35631a205604d3796604388a9ace..84d9486e04de6bc9438d7384af1e67d07af34232 100644 --- a/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c +++ b/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c @@ -2,6 +2,8 @@ /* Copyright(c) 2015 - 2021 Intel Corporation */ #include #include +#include +#include #include #include #include @@ -86,9 +88,11 @@ void adf_init_hw_data_c3xxxiov(struct adf_hw_device_data *hw_data) hw_data->get_sku = get_sku; hw_data->enable_ints = adf_vf_void_noop; hw_data->dev_class->instances++; + hw_data->dev_config = adf_gen2_dev_config; adf_devmgr_update_class_index(hw_data); adf_gen2_init_vf_pfvf_ops(&hw_data->pfvf_ops); adf_gen2_init_hw_csr_ops(&hw_data->csr_ops); + adf_gen2_init_dc_ops(&hw_data->dc_ops); } void adf_clean_hw_data_c3xxxiov(struct adf_hw_device_data *hw_data) diff --git a/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c b/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c index fa18d8009f533e3bd561baee7c60bc65490a4dbc..cf4ef83e186f835a7edf85ce461869eba43f28b9 100644 --- a/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c +++ b/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c @@ -177,8 +177,6 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (ret) goto out_err_dev_shutdown; - set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status); - ret = adf_dev_start(accel_dev); if (ret) goto out_err_dev_stop; diff --git a/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c b/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c index c00386fe6587a684f56dea91da0c1260e3e7a134..b7aa19d2fa80426e01bac03684cd6ec560b61ea3 100644 --- a/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c +++ b/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c @@ -2,6 +2,8 @@ /* Copyright(c) 2014 - 2021 Intel Corporation */ #include #include +#include +#include #include #include #include "adf_c62x_hw_data.h" @@ -126,9 +128,11 @@ void adf_init_hw_data_c62x(struct adf_hw_device_data *hw_data) hw_data->reset_device = adf_reset_flr; hw_data->set_ssm_wdtimer = adf_gen2_set_ssm_wdtimer; hw_data->disable_iov = adf_disable_sriov; + hw_data->dev_config = adf_gen2_dev_config; adf_gen2_init_pf_pfvf_ops(&hw_data->pfvf_ops); adf_gen2_init_hw_csr_ops(&hw_data->csr_ops); + adf_gen2_init_dc_ops(&hw_data->dc_ops); } void adf_clean_hw_data_c62x(struct adf_hw_device_data *hw_data) diff --git a/drivers/crypto/qat/qat_c62x/adf_drv.c b/drivers/crypto/qat/qat_c62x/adf_drv.c index 56163083f16163c878f02cf1859982492426a325..4ccaf298250c3c6286195381ba8d144c140d66e8 100644 --- a/drivers/crypto/qat/qat_c62x/adf_drv.c +++ b/drivers/crypto/qat/qat_c62x/adf_drv.c @@ -201,7 +201,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto out_err_disable_aer; } - ret = qat_crypto_dev_config(accel_dev); + ret = hw_data->dev_config(accel_dev); if (ret) goto out_err_disable_aer; diff --git a/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.c b/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.c index 0282038fca54893c93387007a0d24f83a63de912..751d7aa57fc7f04cd95c7ac42792f0286b76c4c5 100644 --- a/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.c +++ b/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.c @@ -2,6 +2,8 @@ /* Copyright(c) 2015 - 2021 Intel Corporation */ #include #include +#include +#include #include #include #include @@ -86,9 +88,11 @@ void adf_init_hw_data_c62xiov(struct adf_hw_device_data *hw_data) hw_data->get_sku = get_sku; hw_data->enable_ints = adf_vf_void_noop; hw_data->dev_class->instances++; + hw_data->dev_config = adf_gen2_dev_config; adf_devmgr_update_class_index(hw_data); adf_gen2_init_vf_pfvf_ops(&hw_data->pfvf_ops); adf_gen2_init_hw_csr_ops(&hw_data->csr_ops); + adf_gen2_init_dc_ops(&hw_data->dc_ops); } void adf_clean_hw_data_c62xiov(struct adf_hw_device_data *hw_data) diff --git a/drivers/crypto/qat/qat_c62xvf/adf_drv.c b/drivers/crypto/qat/qat_c62xvf/adf_drv.c index 686ec752d0e9e8c316904837ff4c265668065f2b..0e642c94b92936bce3a0e92656c3b9865a3617e9 100644 --- a/drivers/crypto/qat/qat_c62xvf/adf_drv.c +++ b/drivers/crypto/qat/qat_c62xvf/adf_drv.c @@ -177,8 +177,6 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (ret) goto out_err_dev_shutdown; - set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status); - ret = adf_dev_start(accel_dev); if (ret) goto out_err_dev_stop; diff --git a/drivers/crypto/qat/qat_common/Makefile b/drivers/crypto/qat/qat_common/Makefile index 80919cfcc29dafceac7cfd65ca7247ca5327e2a6..1fb8d50f509f82c24361a1d93e6148b8244e2c94 100644 --- a/drivers/crypto/qat/qat_common/Makefile +++ b/drivers/crypto/qat/qat_common/Makefile @@ -12,14 +12,20 @@ intel_qat-objs := adf_cfg.o \ adf_hw_arbiter.o \ adf_sysfs.o \ adf_gen2_hw_data.o \ + adf_gen2_config.o \ adf_gen4_hw_data.o \ adf_gen4_pm.o \ + adf_gen2_dc.o \ + adf_gen4_dc.o \ qat_crypto.o \ + qat_compression.o \ + qat_comp_algs.o \ qat_algs.o \ qat_asym_algs.o \ qat_algs_send.o \ qat_uclo.o \ - qat_hal.o + qat_hal.o \ + qat_bl.o intel_qat-$(CONFIG_DEBUG_FS) += adf_transport_debug.o intel_qat-$(CONFIG_PCI_IOV) += adf_sriov.o adf_vf_isr.o adf_pfvf_utils.o \ diff --git a/drivers/crypto/qat/qat_common/adf_accel_devices.h b/drivers/crypto/qat/qat_common/adf_accel_devices.h index 0a55a4f34dcfd803bbc2953dc14524c2509792ae..284f5aad3ee0bc687e6f4549517a7ace6d59c5aa 100644 --- a/drivers/crypto/qat/qat_common/adf_accel_devices.h +++ b/drivers/crypto/qat/qat_common/adf_accel_devices.h @@ -163,6 +163,10 @@ struct adf_pfvf_ops { u32 pfvf_offset, u8 compat_ver); }; +struct adf_dc_ops { + void (*build_deflate_ctx)(void *ctx); +}; + struct adf_hw_device_data { struct adf_hw_device_class *dev_class; u32 (*get_accel_mask)(struct adf_hw_device_data *self); @@ -202,6 +206,7 @@ struct adf_hw_device_data { int (*dev_config)(struct adf_accel_dev *accel_dev); struct adf_pfvf_ops pfvf_ops; struct adf_hw_csr_ops csr_ops; + struct adf_dc_ops dc_ops; const char *fw_name; const char *fw_mmp_name; u32 fuses; @@ -247,6 +252,7 @@ struct adf_hw_device_data { #define GET_MAX_ACCELENGINES(accel_dev) (GET_HW_DATA(accel_dev)->num_engines) #define GET_CSR_OPS(accel_dev) (&(accel_dev)->hw_device->csr_ops) #define GET_PFVF_OPS(accel_dev) (&(accel_dev)->hw_device->pfvf_ops) +#define GET_DC_OPS(accel_dev) (&(accel_dev)->hw_device->dc_ops) #define accel_to_pci_dev(accel_ptr) accel_ptr->accel_pci_dev.pci_dev struct adf_admin_comms; @@ -266,13 +272,21 @@ struct adf_accel_vf_info { u8 vf_compat_ver; }; +struct adf_dc_data { + u8 *ovf_buff; + size_t ovf_buff_sz; + dma_addr_t ovf_buff_p; +}; + struct adf_accel_dev { struct adf_etr_data *transport; struct adf_hw_device_data *hw_device; struct adf_cfg_device_data *cfg; struct adf_fw_loader_data *fw_loader; struct adf_admin_comms *admin; + struct adf_dc_data *dc_data; struct list_head crypto_list; + struct list_head compression_list; unsigned long status; atomic_t ref_count; struct dentry *debugfs_dir; diff --git a/drivers/crypto/qat/qat_common/adf_cfg_strings.h b/drivers/crypto/qat/qat_common/adf_cfg_strings.h index 655248dbf962d75cf73fdd6d75f86f2ac9bdf276..5d8c3bdb258c18042f551ecfe00330b18c561710 100644 --- a/drivers/crypto/qat/qat_common/adf_cfg_strings.h +++ b/drivers/crypto/qat/qat_common/adf_cfg_strings.h @@ -20,6 +20,7 @@ #define ADF_ETRMGR_BANK "Bank" #define ADF_RING_SYM_BANK_NUM "BankSymNumber" #define ADF_RING_ASYM_BANK_NUM "BankAsymNumber" +#define ADF_RING_DC_BANK_NUM "BankDcNumber" #define ADF_CY "Cy" #define ADF_DC "Dc" #define ADF_CFG_DC "dc" diff --git a/drivers/crypto/qat/qat_common/adf_common_drv.h b/drivers/crypto/qat/qat_common/adf_common_drv.h index 7bb477c3ce25ff9c9ca1c151d67f50ea2874509f..7189265573c089eaaa4d9b42d4e99e094e4ecca0 100644 --- a/drivers/crypto/qat/qat_common/adf_common_drv.h +++ b/drivers/crypto/qat/qat_common/adf_common_drv.h @@ -110,7 +110,6 @@ int adf_init_etr_data(struct adf_accel_dev *accel_dev); void adf_cleanup_etr_data(struct adf_accel_dev *accel_dev); int qat_crypto_register(void); int qat_crypto_unregister(void); -int qat_crypto_dev_config(struct adf_accel_dev *accel_dev); int qat_crypto_vf_dev_config(struct adf_accel_dev *accel_dev); struct qat_crypto_instance *qat_crypto_get_instance_node(int node); void qat_crypto_put_instance(struct qat_crypto_instance *inst); @@ -121,6 +120,14 @@ void qat_algs_unregister(void); int qat_asym_algs_register(void); void qat_asym_algs_unregister(void); +struct qat_compression_instance *qat_compression_get_instance_node(int node); +void qat_compression_put_instance(struct qat_compression_instance *inst); +int qat_compression_register(void); +int qat_compression_unregister(void); +int qat_comp_algs_register(void); +void qat_comp_algs_unregister(void); +void qat_comp_alg_callback(void *resp); + int adf_isr_resource_alloc(struct adf_accel_dev *accel_dev); void adf_isr_resource_free(struct adf_accel_dev *accel_dev); int adf_vf_isr_resource_alloc(struct adf_accel_dev *accel_dev); diff --git a/drivers/crypto/qat/qat_common/adf_ctl_drv.c b/drivers/crypto/qat/qat_common/adf_ctl_drv.c index 82b69e1f725ba278d76fdd414cb7b67e7d450073..9190532b27eb5bf917e5057b0f20a8a2641b5bf0 100644 --- a/drivers/crypto/qat/qat_common/adf_ctl_drv.c +++ b/drivers/crypto/qat/qat_common/adf_ctl_drv.c @@ -438,8 +438,13 @@ static int __init adf_register_ctl_device_driver(void) if (qat_crypto_register()) goto err_crypto_register; + if (qat_compression_register()) + goto err_compression_register; + return 0; +err_compression_register: + qat_crypto_unregister(); err_crypto_register: adf_exit_vf_wq(); err_vf_wq: @@ -463,6 +468,7 @@ static void __exit adf_unregister_ctl_device_driver(void) adf_exit_vf_wq(); adf_exit_pf_wq(); qat_crypto_unregister(); + qat_compression_unregister(); adf_clean_vf_map(false); mutex_destroy(&adf_ctl_lock); } diff --git a/drivers/crypto/qat/qat_common/adf_gen2_config.c b/drivers/crypto/qat/qat_common/adf_gen2_config.c new file mode 100644 index 0000000000000000000000000000000000000000..eeb30da7587a504ed85c075efe506983affbc4bd --- /dev/null +++ b/drivers/crypto/qat/qat_common/adf_gen2_config.c @@ -0,0 +1,206 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright(c) 2022 Intel Corporation */ +#include "adf_accel_devices.h" +#include "adf_cfg.h" +#include "adf_cfg_strings.h" +#include "adf_gen2_config.h" +#include "adf_common_drv.h" +#include "qat_crypto.h" +#include "qat_compression.h" +#include "adf_transport_access_macros.h" + +static int adf_gen2_crypto_dev_config(struct adf_accel_dev *accel_dev) +{ + char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES]; + int banks = GET_MAX_BANKS(accel_dev); + int cpus = num_online_cpus(); + unsigned long val; + int instances; + int ret; + int i; + + if (adf_hw_dev_has_crypto(accel_dev)) + instances = min(cpus, banks); + else + instances = 0; + + for (i = 0; i < instances; i++) { + val = i; + snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_BANK_NUM, i); + ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, + key, &val, ADF_DEC); + if (ret) + goto err; + + snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_BANK_NUM, i); + ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, + key, &val, ADF_DEC); + if (ret) + goto err; + + snprintf(key, sizeof(key), ADF_CY "%d" ADF_ETRMGR_CORE_AFFINITY, + i); + ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, + key, &val, ADF_DEC); + if (ret) + goto err; + + snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_SIZE, i); + val = 128; + ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, + key, &val, ADF_DEC); + if (ret) + goto err; + + val = 512; + snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_SIZE, i); + ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, + key, &val, ADF_DEC); + if (ret) + goto err; + + val = 0; + snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_TX, i); + ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, + key, &val, ADF_DEC); + if (ret) + goto err; + + val = 2; + snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_TX, i); + ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, + key, &val, ADF_DEC); + if (ret) + goto err; + + val = 8; + snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_RX, i); + ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, + key, &val, ADF_DEC); + if (ret) + goto err; + + val = 10; + snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_RX, i); + ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, + key, &val, ADF_DEC); + if (ret) + goto err; + + val = ADF_COALESCING_DEF_TIME; + snprintf(key, sizeof(key), ADF_ETRMGR_COALESCE_TIMER_FORMAT, i); + ret = adf_cfg_add_key_value_param(accel_dev, "Accelerator0", + key, &val, ADF_DEC); + if (ret) + goto err; + } + + val = i; + ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_CY, + &val, ADF_DEC); + if (ret) + goto err; + + return ret; + +err: + dev_err(&GET_DEV(accel_dev), "Failed to add configuration for crypto\n"); + return ret; +} + +static int adf_gen2_comp_dev_config(struct adf_accel_dev *accel_dev) +{ + char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES]; + int banks = GET_MAX_BANKS(accel_dev); + int cpus = num_online_cpus(); + unsigned long val; + int instances; + int ret; + int i; + + if (adf_hw_dev_has_compression(accel_dev)) + instances = min(cpus, banks); + else + instances = 0; + + for (i = 0; i < instances; i++) { + val = i; + snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_BANK_NUM, i); + ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, + key, &val, ADF_DEC); + if (ret) + goto err; + + val = 512; + snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_SIZE, i); + ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, + key, &val, ADF_DEC); + if (ret) + goto err; + + val = 6; + snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_TX, i); + ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, + key, &val, ADF_DEC); + if (ret) + goto err; + + val = 14; + snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_RX, i); + ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, + key, &val, ADF_DEC); + if (ret) + goto err; + } + + val = i; + ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_DC, + &val, ADF_DEC); + if (ret) + return ret; + + return ret; + +err: + dev_err(&GET_DEV(accel_dev), "Failed to add configuration for compression\n"); + return ret; +} + +/** + * adf_gen2_dev_config() - create dev config required to create instances + * + * @accel_dev: Pointer to acceleration device. + * + * Function creates device configuration required to create instances + * + * Return: 0 on success, error code otherwise. + */ +int adf_gen2_dev_config(struct adf_accel_dev *accel_dev) +{ + int ret; + + ret = adf_cfg_section_add(accel_dev, ADF_KERNEL_SEC); + if (ret) + goto err; + + ret = adf_cfg_section_add(accel_dev, "Accelerator0"); + if (ret) + goto err; + + ret = adf_gen2_crypto_dev_config(accel_dev); + if (ret) + goto err; + + ret = adf_gen2_comp_dev_config(accel_dev); + if (ret) + goto err; + + set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status); + + return ret; + +err: + dev_err(&GET_DEV(accel_dev), "Failed to configure QAT driver\n"); + return ret; +} +EXPORT_SYMBOL_GPL(adf_gen2_dev_config); diff --git a/drivers/crypto/qat/qat_common/adf_gen2_config.h b/drivers/crypto/qat/qat_common/adf_gen2_config.h new file mode 100644 index 0000000000000000000000000000000000000000..4bf9da2de68aad1f964a70e8a027e72731da40df --- /dev/null +++ b/drivers/crypto/qat/qat_common/adf_gen2_config.h @@ -0,0 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright(c) 2022 Intel Corporation */ +#ifndef ADF_GEN2_CONFIG_H_ +#define ADF_GEN2_CONFIG_H_ + +#include "adf_accel_devices.h" + +int adf_gen2_dev_config(struct adf_accel_dev *accel_dev); + +#endif diff --git a/drivers/crypto/qat/qat_common/adf_gen2_dc.c b/drivers/crypto/qat/qat_common/adf_gen2_dc.c new file mode 100644 index 0000000000000000000000000000000000000000..47261b1c1da69fbbbd73946780a059d0c5a5095a --- /dev/null +++ b/drivers/crypto/qat/qat_common/adf_gen2_dc.c @@ -0,0 +1,70 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright(c) 2022 Intel Corporation */ +#include "adf_accel_devices.h" +#include "adf_gen2_dc.h" +#include "icp_qat_fw_comp.h" + +static void qat_comp_build_deflate_ctx(void *ctx) +{ + struct icp_qat_fw_comp_req *req_tmpl = (struct icp_qat_fw_comp_req *)ctx; + struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr; + struct icp_qat_fw_comp_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars; + struct icp_qat_fw_comp_req_params *req_pars = &req_tmpl->comp_pars; + struct icp_qat_fw_comp_cd_hdr *comp_cd_ctrl = &req_tmpl->comp_cd_ctrl; + + memset(req_tmpl, 0, sizeof(*req_tmpl)); + header->hdr_flags = + ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET); + header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_COMP; + header->service_cmd_id = ICP_QAT_FW_COMP_CMD_STATIC; + header->comn_req_flags = + ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_16BYTE_DATA, + QAT_COMN_PTR_TYPE_SGL); + header->serv_specif_flags = + ICP_QAT_FW_COMP_FLAGS_BUILD(ICP_QAT_FW_COMP_STATELESS_SESSION, + ICP_QAT_FW_COMP_NOT_AUTO_SELECT_BEST, + ICP_QAT_FW_COMP_NOT_ENH_AUTO_SELECT_BEST, + ICP_QAT_FW_COMP_NOT_DISABLE_TYPE0_ENH_AUTO_SELECT_BEST, + ICP_QAT_FW_COMP_ENABLE_SECURE_RAM_USED_AS_INTMD_BUF); + cd_pars->u.sl.comp_slice_cfg_word[0] = + ICP_QAT_HW_COMPRESSION_CONFIG_BUILD(ICP_QAT_HW_COMPRESSION_DIR_COMPRESS, + ICP_QAT_HW_COMPRESSION_DELAYED_MATCH_DISABLED, + ICP_QAT_HW_COMPRESSION_ALGO_DEFLATE, + ICP_QAT_HW_COMPRESSION_DEPTH_1, + ICP_QAT_HW_COMPRESSION_FILE_TYPE_0); + req_pars->crc.legacy.initial_adler = COMP_CPR_INITIAL_ADLER; + req_pars->crc.legacy.initial_crc32 = COMP_CPR_INITIAL_CRC; + req_pars->req_par_flags = + ICP_QAT_FW_COMP_REQ_PARAM_FLAGS_BUILD(ICP_QAT_FW_COMP_SOP, + ICP_QAT_FW_COMP_EOP, + ICP_QAT_FW_COMP_BFINAL, + ICP_QAT_FW_COMP_CNV, + ICP_QAT_FW_COMP_CNV_RECOVERY, + ICP_QAT_FW_COMP_NO_CNV_DFX, + ICP_QAT_FW_COMP_CRC_MODE_LEGACY, + ICP_QAT_FW_COMP_NO_XXHASH_ACC, + ICP_QAT_FW_COMP_CNV_ERROR_NONE, + ICP_QAT_FW_COMP_NO_APPEND_CRC, + ICP_QAT_FW_COMP_NO_DROP_DATA); + ICP_QAT_FW_COMN_NEXT_ID_SET(comp_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR); + ICP_QAT_FW_COMN_CURR_ID_SET(comp_cd_ctrl, ICP_QAT_FW_SLICE_COMP); + + /* Fill second half of the template for decompression */ + memcpy(req_tmpl + 1, req_tmpl, sizeof(*req_tmpl)); + req_tmpl++; + header = &req_tmpl->comn_hdr; + header->service_cmd_id = ICP_QAT_FW_COMP_CMD_DECOMPRESS; + cd_pars = &req_tmpl->cd_pars; + cd_pars->u.sl.comp_slice_cfg_word[0] = + ICP_QAT_HW_COMPRESSION_CONFIG_BUILD(ICP_QAT_HW_COMPRESSION_DIR_DECOMPRESS, + ICP_QAT_HW_COMPRESSION_DELAYED_MATCH_DISABLED, + ICP_QAT_HW_COMPRESSION_ALGO_DEFLATE, + ICP_QAT_HW_COMPRESSION_DEPTH_1, + ICP_QAT_HW_COMPRESSION_FILE_TYPE_0); +} + +void adf_gen2_init_dc_ops(struct adf_dc_ops *dc_ops) +{ + dc_ops->build_deflate_ctx = qat_comp_build_deflate_ctx; +} +EXPORT_SYMBOL_GPL(adf_gen2_init_dc_ops); diff --git a/drivers/crypto/qat/qat_common/adf_gen2_dc.h b/drivers/crypto/qat/qat_common/adf_gen2_dc.h new file mode 100644 index 0000000000000000000000000000000000000000..6eae023354d768d30b913572e9c83eaa13983583 --- /dev/null +++ b/drivers/crypto/qat/qat_common/adf_gen2_dc.h @@ -0,0 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright(c) 2022 Intel Corporation */ +#ifndef ADF_GEN2_DC_H +#define ADF_GEN2_DC_H + +#include "adf_accel_devices.h" + +void adf_gen2_init_dc_ops(struct adf_dc_ops *dc_ops); + +#endif /* ADF_GEN2_DC_H */ diff --git a/drivers/crypto/qat/qat_common/adf_gen4_dc.c b/drivers/crypto/qat/qat_common/adf_gen4_dc.c new file mode 100644 index 0000000000000000000000000000000000000000..5859238e37de5e8242f3c1475fed75d34aaff1e1 --- /dev/null +++ b/drivers/crypto/qat/qat_common/adf_gen4_dc.c @@ -0,0 +1,83 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright(c) 2022 Intel Corporation */ +#include "adf_accel_devices.h" +#include "icp_qat_fw_comp.h" +#include "icp_qat_hw_20_comp.h" +#include "adf_gen4_dc.h" + +static void qat_comp_build_deflate(void *ctx) +{ + struct icp_qat_fw_comp_req *req_tmpl = + (struct icp_qat_fw_comp_req *)ctx; + struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr; + struct icp_qat_fw_comp_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars; + struct icp_qat_fw_comp_req_params *req_pars = &req_tmpl->comp_pars; + struct icp_qat_hw_comp_20_config_csr_upper hw_comp_upper_csr = {0}; + struct icp_qat_hw_comp_20_config_csr_lower hw_comp_lower_csr = {0}; + struct icp_qat_hw_decomp_20_config_csr_lower hw_decomp_lower_csr = {0}; + u32 upper_val; + u32 lower_val; + + memset(req_tmpl, 0, sizeof(*req_tmpl)); + header->hdr_flags = + ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET); + header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_COMP; + header->service_cmd_id = ICP_QAT_FW_COMP_CMD_STATIC; + header->comn_req_flags = + ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_16BYTE_DATA, + QAT_COMN_PTR_TYPE_SGL); + header->serv_specif_flags = + ICP_QAT_FW_COMP_FLAGS_BUILD(ICP_QAT_FW_COMP_STATELESS_SESSION, + ICP_QAT_FW_COMP_AUTO_SELECT_BEST, + ICP_QAT_FW_COMP_NOT_ENH_AUTO_SELECT_BEST, + ICP_QAT_FW_COMP_NOT_DISABLE_TYPE0_ENH_AUTO_SELECT_BEST, + ICP_QAT_FW_COMP_ENABLE_SECURE_RAM_USED_AS_INTMD_BUF); + hw_comp_lower_csr.skip_ctrl = ICP_QAT_HW_COMP_20_BYTE_SKIP_3BYTE_LITERAL; + hw_comp_lower_csr.algo = ICP_QAT_HW_COMP_20_HW_COMP_FORMAT_ILZ77; + hw_comp_lower_csr.lllbd = ICP_QAT_HW_COMP_20_LLLBD_CTRL_LLLBD_ENABLED; + hw_comp_lower_csr.sd = ICP_QAT_HW_COMP_20_SEARCH_DEPTH_LEVEL_1; + hw_comp_lower_csr.hash_update = ICP_QAT_HW_COMP_20_SKIP_HASH_UPDATE_DONT_ALLOW; + hw_comp_lower_csr.edmm = ICP_QAT_HW_COMP_20_EXTENDED_DELAY_MATCH_MODE_EDMM_ENABLED; + hw_comp_upper_csr.nice = ICP_QAT_HW_COMP_20_CONFIG_CSR_NICE_PARAM_DEFAULT_VAL; + hw_comp_upper_csr.lazy = ICP_QAT_HW_COMP_20_CONFIG_CSR_LAZY_PARAM_DEFAULT_VAL; + + upper_val = ICP_QAT_FW_COMP_20_BUILD_CONFIG_UPPER(hw_comp_upper_csr); + lower_val = ICP_QAT_FW_COMP_20_BUILD_CONFIG_LOWER(hw_comp_lower_csr); + + cd_pars->u.sl.comp_slice_cfg_word[0] = lower_val; + cd_pars->u.sl.comp_slice_cfg_word[1] = upper_val; + + req_pars->crc.legacy.initial_adler = COMP_CPR_INITIAL_ADLER; + req_pars->crc.legacy.initial_crc32 = COMP_CPR_INITIAL_CRC; + req_pars->req_par_flags = + ICP_QAT_FW_COMP_REQ_PARAM_FLAGS_BUILD(ICP_QAT_FW_COMP_SOP, + ICP_QAT_FW_COMP_EOP, + ICP_QAT_FW_COMP_BFINAL, + ICP_QAT_FW_COMP_CNV, + ICP_QAT_FW_COMP_CNV_RECOVERY, + ICP_QAT_FW_COMP_NO_CNV_DFX, + ICP_QAT_FW_COMP_CRC_MODE_LEGACY, + ICP_QAT_FW_COMP_NO_XXHASH_ACC, + ICP_QAT_FW_COMP_CNV_ERROR_NONE, + ICP_QAT_FW_COMP_NO_APPEND_CRC, + ICP_QAT_FW_COMP_NO_DROP_DATA); + + /* Fill second half of the template for decompression */ + memcpy(req_tmpl + 1, req_tmpl, sizeof(*req_tmpl)); + req_tmpl++; + header = &req_tmpl->comn_hdr; + header->service_cmd_id = ICP_QAT_FW_COMP_CMD_DECOMPRESS; + cd_pars = &req_tmpl->cd_pars; + + hw_decomp_lower_csr.algo = ICP_QAT_HW_DECOMP_20_HW_DECOMP_FORMAT_DEFLATE; + lower_val = ICP_QAT_FW_DECOMP_20_BUILD_CONFIG_LOWER(hw_decomp_lower_csr); + + cd_pars->u.sl.comp_slice_cfg_word[0] = lower_val; + cd_pars->u.sl.comp_slice_cfg_word[1] = 0; +} + +void adf_gen4_init_dc_ops(struct adf_dc_ops *dc_ops) +{ + dc_ops->build_deflate_ctx = qat_comp_build_deflate; +} +EXPORT_SYMBOL_GPL(adf_gen4_init_dc_ops); diff --git a/drivers/crypto/qat/qat_common/adf_gen4_dc.h b/drivers/crypto/qat/qat_common/adf_gen4_dc.h new file mode 100644 index 0000000000000000000000000000000000000000..0b1a6774412eb12cb6636d125495de0b7862baf3 --- /dev/null +++ b/drivers/crypto/qat/qat_common/adf_gen4_dc.h @@ -0,0 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright(c) 2022 Intel Corporation */ +#ifndef ADF_GEN4_DC_H +#define ADF_GEN4_DC_H + +#include "adf_accel_devices.h" + +void adf_gen4_init_dc_ops(struct adf_dc_ops *dc_ops); + +#endif /* ADF_GEN4_DC_H */ diff --git a/drivers/crypto/qat/qat_common/adf_init.c b/drivers/crypto/qat/qat_common/adf_init.c index 33a9a46d69494516409c529d8c5d5ab04cf6d9c7..cef7bb8ec007330cde32bf666eefb861519b2330 100644 --- a/drivers/crypto/qat/qat_common/adf_init.c +++ b/drivers/crypto/qat/qat_common/adf_init.c @@ -209,6 +209,14 @@ int adf_dev_start(struct adf_accel_dev *accel_dev) clear_bit(ADF_STATUS_STARTED, &accel_dev->status); return -EFAULT; } + + if (!list_empty(&accel_dev->compression_list) && qat_comp_algs_register()) { + dev_err(&GET_DEV(accel_dev), + "Failed to register compression algs\n"); + set_bit(ADF_STATUS_STARTING, &accel_dev->status); + clear_bit(ADF_STATUS_STARTED, &accel_dev->status); + return -EFAULT; + } return 0; } EXPORT_SYMBOL_GPL(adf_dev_start); @@ -242,6 +250,9 @@ void adf_dev_stop(struct adf_accel_dev *accel_dev) qat_asym_algs_unregister(); } + if (!list_empty(&accel_dev->compression_list)) + qat_comp_algs_unregister(); + list_for_each(list_itr, &service_table) { service = list_entry(list_itr, struct service_hndl, list); if (!test_bit(accel_dev->accel_id, service->start_status)) diff --git a/drivers/crypto/qat/qat_common/adf_sriov.c b/drivers/crypto/qat/qat_common/adf_sriov.c index b2db1d70d71fbd8462887a6324226eb884503c72..d85a90cc387b4f6337599c979a0a069a8335b020 100644 --- a/drivers/crypto/qat/qat_common/adf_sriov.c +++ b/drivers/crypto/qat/qat_common/adf_sriov.c @@ -170,6 +170,10 @@ int adf_sriov_configure(struct pci_dev *pdev, int numvfs) if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_CY, (void *)&val, ADF_DEC)) return -EFAULT; + ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_DC, + &val, ADF_DEC); + if (ret) + return ret; set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status); diff --git a/drivers/crypto/qat/qat_common/icp_qat_fw.h b/drivers/crypto/qat/qat_common/icp_qat_fw.h index 6dc09d270082f9136874e03fb032d94d26ad029d..c141160421e19a51fed9c60d71572b60f304844e 100644 --- a/drivers/crypto/qat/qat_common/icp_qat_fw.h +++ b/drivers/crypto/qat/qat_common/icp_qat_fw.h @@ -116,6 +116,10 @@ struct icp_qat_fw_comn_resp { #define ICP_QAT_FW_COMN_VALID_FLAG_BITPOS 7 #define ICP_QAT_FW_COMN_VALID_FLAG_MASK 0x1 #define ICP_QAT_FW_COMN_HDR_RESRVD_FLD_MASK 0x7F +#define ICP_QAT_FW_COMN_CNV_FLAG_BITPOS 6 +#define ICP_QAT_FW_COMN_CNV_FLAG_MASK 0x1 +#define ICP_QAT_FW_COMN_CNVNR_FLAG_BITPOS 5 +#define ICP_QAT_FW_COMN_CNVNR_FLAG_MASK 0x1 #define ICP_QAT_FW_COMN_OV_SRV_TYPE_GET(icp_qat_fw_comn_req_hdr_t) \ icp_qat_fw_comn_req_hdr_t.service_type @@ -132,6 +136,26 @@ struct icp_qat_fw_comn_resp { #define ICP_QAT_FW_COMN_HDR_VALID_FLAG_GET(hdr_t) \ ICP_QAT_FW_COMN_VALID_FLAG_GET(hdr_t.hdr_flags) +#define ICP_QAT_FW_COMN_HDR_CNVNR_FLAG_GET(hdr_flags) \ + QAT_FIELD_GET(hdr_flags, \ + ICP_QAT_FW_COMN_CNVNR_FLAG_BITPOS, \ + ICP_QAT_FW_COMN_CNVNR_FLAG_MASK) + +#define ICP_QAT_FW_COMN_HDR_CNVNR_FLAG_SET(hdr_t, val) \ + QAT_FIELD_SET((hdr_t.hdr_flags), (val), \ + ICP_QAT_FW_COMN_CNVNR_FLAG_BITPOS, \ + ICP_QAT_FW_COMN_CNVNR_FLAG_MASK) + +#define ICP_QAT_FW_COMN_HDR_CNV_FLAG_GET(hdr_flags) \ + QAT_FIELD_GET(hdr_flags, \ + ICP_QAT_FW_COMN_CNV_FLAG_BITPOS, \ + ICP_QAT_FW_COMN_CNV_FLAG_MASK) + +#define ICP_QAT_FW_COMN_HDR_CNV_FLAG_SET(hdr_t, val) \ + QAT_FIELD_SET((hdr_t.hdr_flags), (val), \ + ICP_QAT_FW_COMN_CNV_FLAG_BITPOS, \ + ICP_QAT_FW_COMN_CNV_FLAG_MASK) + #define ICP_QAT_FW_COMN_HDR_VALID_FLAG_SET(hdr_t, val) \ ICP_QAT_FW_COMN_VALID_FLAG_SET(hdr_t, val) diff --git a/drivers/crypto/qat/qat_common/icp_qat_fw_comp.h b/drivers/crypto/qat/qat_common/icp_qat_fw_comp.h new file mode 100644 index 0000000000000000000000000000000000000000..a03d43fef2b370e0583f5a2fee8ff5b707836bcb --- /dev/null +++ b/drivers/crypto/qat/qat_common/icp_qat_fw_comp.h @@ -0,0 +1,404 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright(c) 2022 Intel Corporation */ +#ifndef _ICP_QAT_FW_COMP_H_ +#define _ICP_QAT_FW_COMP_H_ +#include "icp_qat_fw.h" + +enum icp_qat_fw_comp_cmd_id { + ICP_QAT_FW_COMP_CMD_STATIC = 0, + ICP_QAT_FW_COMP_CMD_DYNAMIC = 1, + ICP_QAT_FW_COMP_CMD_DECOMPRESS = 2, + ICP_QAT_FW_COMP_CMD_DELIMITER +}; + +enum icp_qat_fw_comp_20_cmd_id { + ICP_QAT_FW_COMP_20_CMD_LZ4_COMPRESS = 3, + ICP_QAT_FW_COMP_20_CMD_LZ4_DECOMPRESS = 4, + ICP_QAT_FW_COMP_20_CMD_LZ4S_COMPRESS = 5, + ICP_QAT_FW_COMP_20_CMD_LZ4S_DECOMPRESS = 6, + ICP_QAT_FW_COMP_20_CMD_XP10_COMPRESS = 7, + ICP_QAT_FW_COMP_20_CMD_XP10_DECOMPRESS = 8, + ICP_QAT_FW_COMP_20_CMD_RESERVED_9 = 9, + ICP_QAT_FW_COMP_23_CMD_ZSTD_COMPRESS = 10, + ICP_QAT_FW_COMP_23_CMD_ZSTD_DECOMPRESS = 11, + ICP_QAT_FW_COMP_20_CMD_DELIMITER +}; + +#define ICP_QAT_FW_COMP_STATELESS_SESSION 0 +#define ICP_QAT_FW_COMP_STATEFUL_SESSION 1 +#define ICP_QAT_FW_COMP_NOT_AUTO_SELECT_BEST 0 +#define ICP_QAT_FW_COMP_AUTO_SELECT_BEST 1 +#define ICP_QAT_FW_COMP_NOT_ENH_AUTO_SELECT_BEST 0 +#define ICP_QAT_FW_COMP_ENH_AUTO_SELECT_BEST 1 +#define ICP_QAT_FW_COMP_NOT_DISABLE_TYPE0_ENH_AUTO_SELECT_BEST 0 +#define ICP_QAT_FW_COMP_DISABLE_TYPE0_ENH_AUTO_SELECT_BEST 1 +#define ICP_QAT_FW_COMP_DISABLE_SECURE_RAM_USED_AS_INTMD_BUF 1 +#define ICP_QAT_FW_COMP_ENABLE_SECURE_RAM_USED_AS_INTMD_BUF 0 +#define ICP_QAT_FW_COMP_SESSION_TYPE_BITPOS 2 +#define ICP_QAT_FW_COMP_SESSION_TYPE_MASK 0x1 +#define ICP_QAT_FW_COMP_AUTO_SELECT_BEST_BITPOS 3 +#define ICP_QAT_FW_COMP_AUTO_SELECT_BEST_MASK 0x1 +#define ICP_QAT_FW_COMP_ENHANCED_AUTO_SELECT_BEST_BITPOS 4 +#define ICP_QAT_FW_COMP_ENHANCED_AUTO_SELECT_BEST_MASK 0x1 +#define ICP_QAT_FW_COMP_RET_DISABLE_TYPE0_HEADER_DATA_BITPOS 5 +#define ICP_QAT_FW_COMP_RET_DISABLE_TYPE0_HEADER_DATA_MASK 0x1 +#define ICP_QAT_FW_COMP_DISABLE_SECURE_RAM_AS_INTMD_BUF_BITPOS 7 +#define ICP_QAT_FW_COMP_DISABLE_SECURE_RAM_AS_INTMD_BUF_MASK 0x1 + +#define ICP_QAT_FW_COMP_FLAGS_BUILD(sesstype, autoselect, enhanced_asb, \ + ret_uncomp, secure_ram) \ + ((((sesstype) & ICP_QAT_FW_COMP_SESSION_TYPE_MASK) << \ + ICP_QAT_FW_COMP_SESSION_TYPE_BITPOS) | \ + (((autoselect) & ICP_QAT_FW_COMP_AUTO_SELECT_BEST_MASK) << \ + ICP_QAT_FW_COMP_AUTO_SELECT_BEST_BITPOS) | \ + (((enhanced_asb) & ICP_QAT_FW_COMP_ENHANCED_AUTO_SELECT_BEST_MASK) << \ + ICP_QAT_FW_COMP_ENHANCED_AUTO_SELECT_BEST_BITPOS) | \ + (((ret_uncomp) & ICP_QAT_FW_COMP_RET_DISABLE_TYPE0_HEADER_DATA_MASK) << \ + ICP_QAT_FW_COMP_RET_DISABLE_TYPE0_HEADER_DATA_BITPOS) | \ + (((secure_ram) & ICP_QAT_FW_COMP_DISABLE_SECURE_RAM_AS_INTMD_BUF_MASK) << \ + ICP_QAT_FW_COMP_DISABLE_SECURE_RAM_AS_INTMD_BUF_BITPOS)) + +#define ICP_QAT_FW_COMP_SESSION_TYPE_GET(flags) \ + QAT_FIELD_GET(flags, ICP_QAT_FW_COMP_SESSION_TYPE_BITPOS, \ + ICP_QAT_FW_COMP_SESSION_TYPE_MASK) + +#define ICP_QAT_FW_COMP_SESSION_TYPE_SET(flags, val) \ + QAT_FIELD_SET(flags, val, ICP_QAT_FW_COMP_SESSION_TYPE_BITPOS, \ + ICP_QAT_FW_COMP_SESSION_TYPE_MASK) + +#define ICP_QAT_FW_COMP_AUTO_SELECT_BEST_GET(flags) \ + QAT_FIELD_GET(flags, ICP_QAT_FW_COMP_AUTO_SELECT_BEST_BITPOS, \ + ICP_QAT_FW_COMP_AUTO_SELECT_BEST_MASK) + +#define ICP_QAT_FW_COMP_EN_ASB_GET(flags) \ + QAT_FIELD_GET(flags, ICP_QAT_FW_COMP_ENHANCED_AUTO_SELECT_BEST_BITPOS, \ + ICP_QAT_FW_COMP_ENHANCED_AUTO_SELECT_BEST_MASK) + +#define ICP_QAT_FW_COMP_RET_UNCOMP_GET(flags) \ + QAT_FIELD_GET(flags, \ + ICP_QAT_FW_COMP_RET_DISABLE_TYPE0_HEADER_DATA_BITPOS, \ + ICP_QAT_FW_COMP_RET_DISABLE_TYPE0_HEADER_DATA_MASK) + +#define ICP_QAT_FW_COMP_SECURE_RAM_USE_GET(flags) \ + QAT_FIELD_GET(flags, \ + ICP_QAT_FW_COMP_DISABLE_SECURE_RAM_AS_INTMD_BUF_BITPOS, \ + ICP_QAT_FW_COMP_DISABLE_SECURE_RAM_AS_INTMD_BUF_MASK) + +struct icp_qat_fw_comp_req_hdr_cd_pars { + union { + struct { + __u64 content_desc_addr; + __u16 content_desc_resrvd1; + __u8 content_desc_params_sz; + __u8 content_desc_hdr_resrvd2; + __u32 content_desc_resrvd3; + } s; + struct { + __u32 comp_slice_cfg_word[ICP_QAT_FW_NUM_LONGWORDS_2]; + __u32 content_desc_resrvd4; + } sl; + } u; +}; + +struct icp_qat_fw_comp_req_params { + __u32 comp_len; + __u32 out_buffer_sz; + union { + struct { + __u32 initial_crc32; + __u32 initial_adler; + } legacy; + __u64 crc_data_addr; + } crc; + __u32 req_par_flags; + __u32 rsrvd; +}; + +#define ICP_QAT_FW_COMP_REQ_PARAM_FLAGS_BUILD(sop, eop, bfinal, cnv, cnvnr, \ + cnvdfx, crc, xxhash_acc, \ + cnv_error_type, append_crc, \ + drop_data) \ + ((((sop) & ICP_QAT_FW_COMP_SOP_MASK) << \ + ICP_QAT_FW_COMP_SOP_BITPOS) | \ + (((eop) & ICP_QAT_FW_COMP_EOP_MASK) << \ + ICP_QAT_FW_COMP_EOP_BITPOS) | \ + (((bfinal) & ICP_QAT_FW_COMP_BFINAL_MASK) \ + << ICP_QAT_FW_COMP_BFINAL_BITPOS) | \ + (((cnv) & ICP_QAT_FW_COMP_CNV_MASK) << \ + ICP_QAT_FW_COMP_CNV_BITPOS) | \ + (((cnvnr) & ICP_QAT_FW_COMP_CNVNR_MASK) \ + << ICP_QAT_FW_COMP_CNVNR_BITPOS) | \ + (((cnvdfx) & ICP_QAT_FW_COMP_CNV_DFX_MASK) \ + << ICP_QAT_FW_COMP_CNV_DFX_BITPOS) | \ + (((crc) & ICP_QAT_FW_COMP_CRC_MODE_MASK) \ + << ICP_QAT_FW_COMP_CRC_MODE_BITPOS) | \ + (((xxhash_acc) & ICP_QAT_FW_COMP_XXHASH_ACC_MODE_MASK) \ + << ICP_QAT_FW_COMP_XXHASH_ACC_MODE_BITPOS) | \ + (((cnv_error_type) & ICP_QAT_FW_COMP_CNV_ERROR_MASK) \ + << ICP_QAT_FW_COMP_CNV_ERROR_BITPOS) | \ + (((append_crc) & ICP_QAT_FW_COMP_APPEND_CRC_MASK) \ + << ICP_QAT_FW_COMP_APPEND_CRC_BITPOS) | \ + (((drop_data) & ICP_QAT_FW_COMP_DROP_DATA_MASK) \ + << ICP_QAT_FW_COMP_DROP_DATA_BITPOS)) + +#define ICP_QAT_FW_COMP_NOT_SOP 0 +#define ICP_QAT_FW_COMP_SOP 1 +#define ICP_QAT_FW_COMP_NOT_EOP 0 +#define ICP_QAT_FW_COMP_EOP 1 +#define ICP_QAT_FW_COMP_NOT_BFINAL 0 +#define ICP_QAT_FW_COMP_BFINAL 1 +#define ICP_QAT_FW_COMP_NO_CNV 0 +#define ICP_QAT_FW_COMP_CNV 1 +#define ICP_QAT_FW_COMP_NO_CNV_RECOVERY 0 +#define ICP_QAT_FW_COMP_CNV_RECOVERY 1 +#define ICP_QAT_FW_COMP_NO_CNV_DFX 0 +#define ICP_QAT_FW_COMP_CNV_DFX 1 +#define ICP_QAT_FW_COMP_CRC_MODE_LEGACY 0 +#define ICP_QAT_FW_COMP_CRC_MODE_E2E 1 +#define ICP_QAT_FW_COMP_NO_XXHASH_ACC 0 +#define ICP_QAT_FW_COMP_XXHASH_ACC 1 +#define ICP_QAT_FW_COMP_APPEND_CRC 1 +#define ICP_QAT_FW_COMP_NO_APPEND_CRC 0 +#define ICP_QAT_FW_COMP_DROP_DATA 1 +#define ICP_QAT_FW_COMP_NO_DROP_DATA 0 +#define ICP_QAT_FW_COMP_SOP_BITPOS 0 +#define ICP_QAT_FW_COMP_SOP_MASK 0x1 +#define ICP_QAT_FW_COMP_EOP_BITPOS 1 +#define ICP_QAT_FW_COMP_EOP_MASK 0x1 +#define ICP_QAT_FW_COMP_BFINAL_BITPOS 6 +#define ICP_QAT_FW_COMP_BFINAL_MASK 0x1 +#define ICP_QAT_FW_COMP_CNV_BITPOS 16 +#define ICP_QAT_FW_COMP_CNV_MASK 0x1 +#define ICP_QAT_FW_COMP_CNVNR_BITPOS 17 +#define ICP_QAT_FW_COMP_CNVNR_MASK 0x1 +#define ICP_QAT_FW_COMP_CNV_DFX_BITPOS 18 +#define ICP_QAT_FW_COMP_CNV_DFX_MASK 0x1 +#define ICP_QAT_FW_COMP_CRC_MODE_BITPOS 19 +#define ICP_QAT_FW_COMP_CRC_MODE_MASK 0x1 +#define ICP_QAT_FW_COMP_XXHASH_ACC_MODE_BITPOS 20 +#define ICP_QAT_FW_COMP_XXHASH_ACC_MODE_MASK 0x1 +#define ICP_QAT_FW_COMP_CNV_ERROR_BITPOS 21 +#define ICP_QAT_FW_COMP_CNV_ERROR_MASK 0b111 +#define ICP_QAT_FW_COMP_CNV_ERROR_NONE 0b000 +#define ICP_QAT_FW_COMP_CNV_ERROR_CHECKSUM 0b001 +#define ICP_QAT_FW_COMP_CNV_ERROR_DCPR_OBC_DIFF 0b010 +#define ICP_QAT_FW_COMP_CNV_ERROR_DCPR 0b011 +#define ICP_QAT_FW_COMP_CNV_ERROR_XLT 0b100 +#define ICP_QAT_FW_COMP_CNV_ERROR_DCPR_IBC_DIFF 0b101 +#define ICP_QAT_FW_COMP_APPEND_CRC_BITPOS 24 +#define ICP_QAT_FW_COMP_APPEND_CRC_MASK 0x1 +#define ICP_QAT_FW_COMP_DROP_DATA_BITPOS 25 +#define ICP_QAT_FW_COMP_DROP_DATA_MASK 0x1 + +#define ICP_QAT_FW_COMP_SOP_GET(flags) \ + QAT_FIELD_GET(flags, ICP_QAT_FW_COMP_SOP_BITPOS, \ + ICP_QAT_FW_COMP_SOP_MASK) + +#define ICP_QAT_FW_COMP_SOP_SET(flags, val) \ + QAT_FIELD_SET(flags, val, ICP_QAT_FW_COMP_SOP_BITPOS, \ + ICP_QAT_FW_COMP_SOP_MASK) + +#define ICP_QAT_FW_COMP_EOP_GET(flags) \ + QAT_FIELD_GET(flags, ICP_QAT_FW_COMP_EOP_BITPOS, \ + ICP_QAT_FW_COMP_EOP_MASK) + +#define ICP_QAT_FW_COMP_EOP_SET(flags, val) \ + QAT_FIELD_SET(flags, val, ICP_QAT_FW_COMP_EOP_BITPOS, \ + ICP_QAT_FW_COMP_EOP_MASK) + +#define ICP_QAT_FW_COMP_BFINAL_GET(flags) \ + QAT_FIELD_GET(flags, ICP_QAT_FW_COMP_BFINAL_BITPOS, \ + ICP_QAT_FW_COMP_BFINAL_MASK) + +#define ICP_QAT_FW_COMP_BFINAL_SET(flags, val) \ + QAT_FIELD_SET(flags, val, ICP_QAT_FW_COMP_BFINAL_BITPOS, \ + ICP_QAT_FW_COMP_BFINAL_MASK) + +#define ICP_QAT_FW_COMP_CNV_GET(flags) \ + QAT_FIELD_GET(flags, ICP_QAT_FW_COMP_CNV_BITPOS, \ + ICP_QAT_FW_COMP_CNV_MASK) + +#define ICP_QAT_FW_COMP_CNVNR_GET(flags) \ + QAT_FIELD_GET(flags, ICP_QAT_FW_COMP_CNVNR_BITPOS, \ + ICP_QAT_FW_COMP_CNVNR_MASK) + +#define ICP_QAT_FW_COMP_CNV_DFX_GET(flags) \ + QAT_FIELD_GET(flags, ICP_QAT_FW_COMP_CNV_DFX_BITPOS, \ + ICP_QAT_FW_COMP_CNV_DFX_MASK) + +#define ICP_QAT_FW_COMP_CNV_DFX_SET(flags, val) \ + QAT_FIELD_SET(flags, val, ICP_QAT_FW_COMP_CNV_DFX_BITPOS, \ + ICP_QAT_FW_COMP_CNV_DFX_MASK) + +#define ICP_QAT_FW_COMP_CRC_MODE_GET(flags) \ + QAT_FIELD_GET(flags, ICP_QAT_FW_COMP_CRC_MODE_BITPOS, \ + ICP_QAT_FW_COMP_CRC_MODE_MASK) + +#define ICP_QAT_FW_COMP_XXHASH_ACC_MODE_GET(flags) \ + QAT_FIELD_GET(flags, ICP_QAT_FW_COMP_XXHASH_ACC_MODE_BITPOS, \ + ICP_QAT_FW_COMP_XXHASH_ACC_MODE_MASK) + +#define ICP_QAT_FW_COMP_XXHASH_ACC_MODE_SET(flags, val) \ + QAT_FIELD_SET(flags, val, ICP_QAT_FW_COMP_XXHASH_ACC_MODE_BITPOS, \ + ICP_QAT_FW_COMP_XXHASH_ACC_MODE_MASK) + +#define ICP_QAT_FW_COMP_CNV_ERROR_TYPE_GET(flags) \ + QAT_FIELD_GET(flags, ICP_QAT_FW_COMP_CNV_ERROR_BITPOS, \ + ICP_QAT_FW_COMP_CNV_ERROR_MASK) + +#define ICP_QAT_FW_COMP_CNV_ERROR_TYPE_SET(flags, val) \ + QAT_FIELD_SET(flags, val, ICP_QAT_FW_COMP_CNV_ERROR_BITPOS, \ + ICP_QAT_FW_COMP_CNV_ERROR_MASK) + +struct icp_qat_fw_xlt_req_params { + __u64 inter_buff_ptr; +}; + +struct icp_qat_fw_comp_cd_hdr { + __u16 ram_bank_flags; + __u8 comp_cfg_offset; + __u8 next_curr_id; + __u32 resrvd; + __u64 comp_state_addr; + __u64 ram_banks_addr; +}; + +#define COMP_CPR_INITIAL_CRC 0 +#define COMP_CPR_INITIAL_ADLER 1 + +struct icp_qat_fw_xlt_cd_hdr { + __u16 resrvd1; + __u8 resrvd2; + __u8 next_curr_id; + __u32 resrvd3; +}; + +struct icp_qat_fw_comp_req { + struct icp_qat_fw_comn_req_hdr comn_hdr; + struct icp_qat_fw_comp_req_hdr_cd_pars cd_pars; + struct icp_qat_fw_comn_req_mid comn_mid; + struct icp_qat_fw_comp_req_params comp_pars; + union { + struct icp_qat_fw_xlt_req_params xlt_pars; + __u32 resrvd1[ICP_QAT_FW_NUM_LONGWORDS_2]; + } u1; + __u32 resrvd2[ICP_QAT_FW_NUM_LONGWORDS_2]; + struct icp_qat_fw_comp_cd_hdr comp_cd_ctrl; + union { + struct icp_qat_fw_xlt_cd_hdr xlt_cd_ctrl; + __u32 resrvd3[ICP_QAT_FW_NUM_LONGWORDS_2]; + } u2; +}; + +struct icp_qat_fw_resp_comp_pars { + __u32 input_byte_counter; + __u32 output_byte_counter; + union { + struct { + __u32 curr_crc32; + __u32 curr_adler_32; + } legacy; + __u32 resrvd[ICP_QAT_FW_NUM_LONGWORDS_2]; + } crc; +}; + +struct icp_qat_fw_comp_state { + __u32 rd8_counter; + __u32 status_flags; + __u32 in_counter; + __u32 out_counter; + __u64 intermediate_state; + __u32 lobc; + __u32 replaybc; + __u64 pcrc64_poly; + __u32 crc32; + __u32 adler_xxhash32; + __u64 pcrc64_xorout; + __u32 out_buf_size; + __u32 in_buf_size; + __u64 in_pcrc64; + __u64 out_pcrc64; + __u32 lobs; + __u32 libc; + __u64 reserved; + __u32 xxhash_state[4]; + __u32 cleartext[4]; +}; + +struct icp_qat_fw_comp_resp { + struct icp_qat_fw_comn_resp_hdr comn_resp; + __u64 opaque_data; + struct icp_qat_fw_resp_comp_pars comp_resp_pars; +}; + +#define QAT_FW_COMP_BANK_FLAG_MASK 0x1 +#define QAT_FW_COMP_BANK_I_BITPOS 8 +#define QAT_FW_COMP_BANK_H_BITPOS 7 +#define QAT_FW_COMP_BANK_G_BITPOS 6 +#define QAT_FW_COMP_BANK_F_BITPOS 5 +#define QAT_FW_COMP_BANK_E_BITPOS 4 +#define QAT_FW_COMP_BANK_D_BITPOS 3 +#define QAT_FW_COMP_BANK_C_BITPOS 2 +#define QAT_FW_COMP_BANK_B_BITPOS 1 +#define QAT_FW_COMP_BANK_A_BITPOS 0 + +enum icp_qat_fw_comp_bank_enabled { + ICP_QAT_FW_COMP_BANK_DISABLED = 0, + ICP_QAT_FW_COMP_BANK_ENABLED = 1, + ICP_QAT_FW_COMP_BANK_DELIMITER = 2 +}; + +#define ICP_QAT_FW_COMP_RAM_FLAGS_BUILD(bank_i_enable, bank_h_enable, \ + bank_g_enable, bank_f_enable, \ + bank_e_enable, bank_d_enable, \ + bank_c_enable, bank_b_enable, \ + bank_a_enable) \ + ((((bank_i_enable) & QAT_FW_COMP_BANK_FLAG_MASK) << \ + QAT_FW_COMP_BANK_I_BITPOS) | \ + (((bank_h_enable) & QAT_FW_COMP_BANK_FLAG_MASK) << \ + QAT_FW_COMP_BANK_H_BITPOS) | \ + (((bank_g_enable) & QAT_FW_COMP_BANK_FLAG_MASK) << \ + QAT_FW_COMP_BANK_G_BITPOS) | \ + (((bank_f_enable) & QAT_FW_COMP_BANK_FLAG_MASK) << \ + QAT_FW_COMP_BANK_F_BITPOS) | \ + (((bank_e_enable) & QAT_FW_COMP_BANK_FLAG_MASK) << \ + QAT_FW_COMP_BANK_E_BITPOS) | \ + (((bank_d_enable) & QAT_FW_COMP_BANK_FLAG_MASK) << \ + QAT_FW_COMP_BANK_D_BITPOS) | \ + (((bank_c_enable) & QAT_FW_COMP_BANK_FLAG_MASK) << \ + QAT_FW_COMP_BANK_C_BITPOS) | \ + (((bank_b_enable) & QAT_FW_COMP_BANK_FLAG_MASK) << \ + QAT_FW_COMP_BANK_B_BITPOS) | \ + (((bank_a_enable) & QAT_FW_COMP_BANK_FLAG_MASK) << \ + QAT_FW_COMP_BANK_A_BITPOS)) + +struct icp_qat_fw_comp_crc_data_struct { + __u32 crc32; + union { + __u32 adler; + __u32 xxhash; + } adler_xxhash_u; + __u32 cpr_in_crc_lo; + __u32 cpr_in_crc_hi; + __u32 cpr_out_crc_lo; + __u32 cpr_out_crc_hi; + __u32 xlt_in_crc_lo; + __u32 xlt_in_crc_hi; + __u32 xlt_out_crc_lo; + __u32 xlt_out_crc_hi; + __u32 prog_crc_poly_lo; + __u32 prog_crc_poly_hi; + __u32 xor_out_lo; + __u32 xor_out_hi; + __u32 append_crc_lo; + __u32 append_crc_hi; +}; + +struct xxhash_acc_state_buff { + __u32 in_counter; + __u32 out_counter; + __u32 xxhash_state[4]; + __u32 clear_txt[4]; +}; + +#endif diff --git a/drivers/crypto/qat/qat_common/icp_qat_hw.h b/drivers/crypto/qat/qat_common/icp_qat_hw.h index 433304cad2edf22a0a6f8c954e798e949b964f2b..4042739bb6fa9d38412c76d889623706c32ed712 100644 --- a/drivers/crypto/qat/qat_common/icp_qat_hw.h +++ b/drivers/crypto/qat/qat_common/icp_qat_hw.h @@ -307,4 +307,70 @@ struct icp_qat_hw_cipher_algo_blk { struct icp_qat_hw_ucs_cipher_aes256_f8 ucs_aes; }; } __aligned(64); + +enum icp_qat_hw_compression_direction { + ICP_QAT_HW_COMPRESSION_DIR_COMPRESS = 0, + ICP_QAT_HW_COMPRESSION_DIR_DECOMPRESS = 1, + ICP_QAT_HW_COMPRESSION_DIR_DELIMITER = 2 +}; + +enum icp_qat_hw_compression_delayed_match { + ICP_QAT_HW_COMPRESSION_DELAYED_MATCH_DISABLED = 0, + ICP_QAT_HW_COMPRESSION_DELAYED_MATCH_ENABLED = 1, + ICP_QAT_HW_COMPRESSION_DELAYED_MATCH_DELIMITER = 2 +}; + +enum icp_qat_hw_compression_algo { + ICP_QAT_HW_COMPRESSION_ALGO_DEFLATE = 0, + ICP_QAT_HW_COMPRESSION_ALGO_LZS = 1, + ICP_QAT_HW_COMPRESSION_ALGO_DELIMITER = 2 +}; + +enum icp_qat_hw_compression_depth { + ICP_QAT_HW_COMPRESSION_DEPTH_1 = 0, + ICP_QAT_HW_COMPRESSION_DEPTH_4 = 1, + ICP_QAT_HW_COMPRESSION_DEPTH_8 = 2, + ICP_QAT_HW_COMPRESSION_DEPTH_16 = 3, + ICP_QAT_HW_COMPRESSION_DEPTH_128 = 4, + ICP_QAT_HW_COMPRESSION_DEPTH_DELIMITER = 5 +}; + +enum icp_qat_hw_compression_file_type { + ICP_QAT_HW_COMPRESSION_FILE_TYPE_0 = 0, + ICP_QAT_HW_COMPRESSION_FILE_TYPE_1 = 1, + ICP_QAT_HW_COMPRESSION_FILE_TYPE_2 = 2, + ICP_QAT_HW_COMPRESSION_FILE_TYPE_3 = 3, + ICP_QAT_HW_COMPRESSION_FILE_TYPE_4 = 4, + ICP_QAT_HW_COMPRESSION_FILE_TYPE_DELIMITER = 5 +}; + +struct icp_qat_hw_compression_config { + __u32 lower_val; + __u32 upper_val; +}; + +#define QAT_COMPRESSION_DIR_BITPOS 4 +#define QAT_COMPRESSION_DIR_MASK 0x7 +#define QAT_COMPRESSION_DELAYED_MATCH_BITPOS 16 +#define QAT_COMPRESSION_DELAYED_MATCH_MASK 0x1 +#define QAT_COMPRESSION_ALGO_BITPOS 31 +#define QAT_COMPRESSION_ALGO_MASK 0x1 +#define QAT_COMPRESSION_DEPTH_BITPOS 28 +#define QAT_COMPRESSION_DEPTH_MASK 0x7 +#define QAT_COMPRESSION_FILE_TYPE_BITPOS 24 +#define QAT_COMPRESSION_FILE_TYPE_MASK 0xF + +#define ICP_QAT_HW_COMPRESSION_CONFIG_BUILD(dir, delayed, \ + algo, depth, filetype) \ + ((((dir) & QAT_COMPRESSION_DIR_MASK) << \ + QAT_COMPRESSION_DIR_BITPOS) | \ + (((delayed) & QAT_COMPRESSION_DELAYED_MATCH_MASK) << \ + QAT_COMPRESSION_DELAYED_MATCH_BITPOS) | \ + (((algo) & QAT_COMPRESSION_ALGO_MASK) << \ + QAT_COMPRESSION_ALGO_BITPOS) | \ + (((depth) & QAT_COMPRESSION_DEPTH_MASK) << \ + QAT_COMPRESSION_DEPTH_BITPOS) | \ + (((filetype) & QAT_COMPRESSION_FILE_TYPE_MASK) << \ + QAT_COMPRESSION_FILE_TYPE_BITPOS)) + #endif diff --git a/drivers/crypto/qat/qat_common/icp_qat_hw_20_comp.h b/drivers/crypto/qat/qat_common/icp_qat_hw_20_comp.h new file mode 100644 index 0000000000000000000000000000000000000000..7ea8962272f2f43ca17583cc03438530c406f4f2 --- /dev/null +++ b/drivers/crypto/qat/qat_common/icp_qat_hw_20_comp.h @@ -0,0 +1,164 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright(c) 2022 Intel Corporation */ +#ifndef _ICP_QAT_HW_20_COMP_H_ +#define _ICP_QAT_HW_20_COMP_H_ + +#include "icp_qat_hw_20_comp_defs.h" +#include "icp_qat_fw.h" + +struct icp_qat_hw_comp_20_config_csr_lower { + enum icp_qat_hw_comp_20_extended_delay_match_mode edmm; + enum icp_qat_hw_comp_20_hw_comp_format algo; + enum icp_qat_hw_comp_20_search_depth sd; + enum icp_qat_hw_comp_20_hbs_control hbs; + enum icp_qat_hw_comp_20_abd abd; + enum icp_qat_hw_comp_20_lllbd_ctrl lllbd; + enum icp_qat_hw_comp_20_min_match_control mmctrl; + enum icp_qat_hw_comp_20_skip_hash_collision hash_col; + enum icp_qat_hw_comp_20_skip_hash_update hash_update; + enum icp_qat_hw_comp_20_byte_skip skip_ctrl; +}; + +static inline __u32 +ICP_QAT_FW_COMP_20_BUILD_CONFIG_LOWER(struct icp_qat_hw_comp_20_config_csr_lower csr) +{ + u32 val32 = 0; + + QAT_FIELD_SET(val32, csr.algo, + ICP_QAT_HW_COMP_20_CONFIG_CSR_HW_COMP_FORMAT_BITPOS, + ICP_QAT_HW_COMP_20_CONFIG_CSR_HW_COMP_FORMAT_MASK); + QAT_FIELD_SET(val32, csr.sd, + ICP_QAT_HW_COMP_20_CONFIG_CSR_SEARCH_DEPTH_BITPOS, + ICP_QAT_HW_COMP_20_CONFIG_CSR_SEARCH_DEPTH_MASK); + QAT_FIELD_SET(val32, csr.edmm, + ICP_QAT_HW_COMP_20_CONFIG_CSR_EXTENDED_DELAY_MATCH_MODE_BITPOS, + ICP_QAT_HW_COMP_20_CONFIG_CSR_EXTENDED_DELAY_MATCH_MODE_MASK); + QAT_FIELD_SET(val32, csr.hbs, + ICP_QAT_HW_COMP_20_CONFIG_CSR_HBS_CONTROL_BITPOS, + ICP_QAT_HW_COMP_20_CONFIG_CSR_HBS_CONTROL_MASK); + QAT_FIELD_SET(val32, csr.lllbd, + ICP_QAT_HW_COMP_20_CONFIG_CSR_LLLBD_CTRL_BITPOS, + ICP_QAT_HW_COMP_20_CONFIG_CSR_LLLBD_CTRL_MASK); + QAT_FIELD_SET(val32, csr.mmctrl, + ICP_QAT_HW_COMP_20_CONFIG_CSR_MIN_MATCH_CONTROL_BITPOS, + ICP_QAT_HW_COMP_20_CONFIG_CSR_MIN_MATCH_CONTROL_MASK); + QAT_FIELD_SET(val32, csr.hash_col, + ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_COLLISION_BITPOS, + ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_COLLISION_MASK); + QAT_FIELD_SET(val32, csr.hash_update, + ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_UPDATE_BITPOS, + ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_UPDATE_MASK); + QAT_FIELD_SET(val32, csr.skip_ctrl, + ICP_QAT_HW_COMP_20_CONFIG_CSR_BYTE_SKIP_BITPOS, + ICP_QAT_HW_COMP_20_CONFIG_CSR_BYTE_SKIP_MASK); + QAT_FIELD_SET(val32, csr.abd, ICP_QAT_HW_COMP_20_CONFIG_CSR_ABD_BITPOS, + ICP_QAT_HW_COMP_20_CONFIG_CSR_ABD_MASK); + + return __builtin_bswap32(val32); +} + +struct icp_qat_hw_comp_20_config_csr_upper { + enum icp_qat_hw_comp_20_scb_control scb_ctrl; + enum icp_qat_hw_comp_20_rmb_control rmb_ctrl; + enum icp_qat_hw_comp_20_som_control som_ctrl; + enum icp_qat_hw_comp_20_skip_hash_rd_control skip_hash_ctrl; + enum icp_qat_hw_comp_20_scb_unload_control scb_unload_ctrl; + enum icp_qat_hw_comp_20_disable_token_fusion_control disable_token_fusion_ctrl; + enum icp_qat_hw_comp_20_lbms lbms; + enum icp_qat_hw_comp_20_scb_mode_reset_mask scb_mode_reset; + __u16 lazy; + __u16 nice; +}; + +static inline __u32 +ICP_QAT_FW_COMP_20_BUILD_CONFIG_UPPER(struct icp_qat_hw_comp_20_config_csr_upper csr) +{ + u32 val32 = 0; + + QAT_FIELD_SET(val32, csr.scb_ctrl, + ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_CONTROL_BITPOS, + ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_CONTROL_MASK); + QAT_FIELD_SET(val32, csr.rmb_ctrl, + ICP_QAT_HW_COMP_20_CONFIG_CSR_RMB_CONTROL_BITPOS, + ICP_QAT_HW_COMP_20_CONFIG_CSR_RMB_CONTROL_MASK); + QAT_FIELD_SET(val32, csr.som_ctrl, + ICP_QAT_HW_COMP_20_CONFIG_CSR_SOM_CONTROL_BITPOS, + ICP_QAT_HW_COMP_20_CONFIG_CSR_SOM_CONTROL_MASK); + QAT_FIELD_SET(val32, csr.skip_hash_ctrl, + ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_RD_CONTROL_BITPOS, + ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_RD_CONTROL_MASK); + QAT_FIELD_SET(val32, csr.scb_unload_ctrl, + ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_UNLOAD_CONTROL_BITPOS, + ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_UNLOAD_CONTROL_MASK); + QAT_FIELD_SET(val32, csr.disable_token_fusion_ctrl, + ICP_QAT_HW_COMP_20_CONFIG_CSR_DISABLE_TOKEN_FUSION_CONTROL_BITPOS, + ICP_QAT_HW_COMP_20_CONFIG_CSR_DISABLE_TOKEN_FUSION_CONTROL_MASK); + QAT_FIELD_SET(val32, csr.lbms, + ICP_QAT_HW_COMP_20_CONFIG_CSR_LBMS_BITPOS, + ICP_QAT_HW_COMP_20_CONFIG_CSR_LBMS_MASK); + QAT_FIELD_SET(val32, csr.scb_mode_reset, + ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_MODE_RESET_MASK_BITPOS, + ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_MODE_RESET_MASK_MASK); + QAT_FIELD_SET(val32, csr.lazy, + ICP_QAT_HW_COMP_20_CONFIG_CSR_LAZY_PARAM_BITPOS, + ICP_QAT_HW_COMP_20_CONFIG_CSR_LAZY_PARAM_MASK); + QAT_FIELD_SET(val32, csr.nice, + ICP_QAT_HW_COMP_20_CONFIG_CSR_NICE_PARAM_BITPOS, + ICP_QAT_HW_COMP_20_CONFIG_CSR_NICE_PARAM_MASK); + + return __builtin_bswap32(val32); +} + +struct icp_qat_hw_decomp_20_config_csr_lower { + enum icp_qat_hw_decomp_20_hbs_control hbs; + enum icp_qat_hw_decomp_20_lbms lbms; + enum icp_qat_hw_decomp_20_hw_comp_format algo; + enum icp_qat_hw_decomp_20_min_match_control mmctrl; + enum icp_qat_hw_decomp_20_lz4_block_checksum_present lbc; +}; + +static inline __u32 +ICP_QAT_FW_DECOMP_20_BUILD_CONFIG_LOWER(struct icp_qat_hw_decomp_20_config_csr_lower csr) +{ + u32 val32 = 0; + + QAT_FIELD_SET(val32, csr.hbs, + ICP_QAT_HW_DECOMP_20_CONFIG_CSR_HBS_CONTROL_BITPOS, + ICP_QAT_HW_DECOMP_20_CONFIG_CSR_HBS_CONTROL_MASK); + QAT_FIELD_SET(val32, csr.lbms, + ICP_QAT_HW_DECOMP_20_CONFIG_CSR_LBMS_BITPOS, + ICP_QAT_HW_DECOMP_20_CONFIG_CSR_LBMS_MASK); + QAT_FIELD_SET(val32, csr.algo, + ICP_QAT_HW_DECOMP_20_CONFIG_CSR_HW_DECOMP_FORMAT_BITPOS, + ICP_QAT_HW_DECOMP_20_CONFIG_CSR_HW_DECOMP_FORMAT_MASK); + QAT_FIELD_SET(val32, csr.mmctrl, + ICP_QAT_HW_DECOMP_20_CONFIG_CSR_MIN_MATCH_CONTROL_BITPOS, + ICP_QAT_HW_DECOMP_20_CONFIG_CSR_MIN_MATCH_CONTROL_MASK); + QAT_FIELD_SET(val32, csr.lbc, + ICP_QAT_HW_DECOMP_20_CONFIG_CSR_LZ4_BLOCK_CHECKSUM_PRESENT_BITPOS, + ICP_QAT_HW_DECOMP_20_CONFIG_CSR_LZ4_BLOCK_CHECKSUM_PRESENT_MASK); + + return __builtin_bswap32(val32); +} + +struct icp_qat_hw_decomp_20_config_csr_upper { + enum icp_qat_hw_decomp_20_speculative_decoder_control sdc; + enum icp_qat_hw_decomp_20_mini_cam_control mcc; +}; + +static inline __u32 +ICP_QAT_FW_DECOMP_20_BUILD_CONFIG_UPPER(struct icp_qat_hw_decomp_20_config_csr_upper csr) +{ + u32 val32 = 0; + + QAT_FIELD_SET(val32, csr.sdc, + ICP_QAT_HW_DECOMP_20_CONFIG_CSR_SPECULATIVE_DECODER_CONTROL_BITPOS, + ICP_QAT_HW_DECOMP_20_CONFIG_CSR_SPECULATIVE_DECODER_CONTROL_MASK); + QAT_FIELD_SET(val32, csr.mcc, + ICP_QAT_HW_DECOMP_20_CONFIG_CSR_MINI_CAM_CONTROL_BITPOS, + ICP_QAT_HW_DECOMP_20_CONFIG_CSR_MINI_CAM_CONTROL_MASK); + + return __builtin_bswap32(val32); +} + +#endif diff --git a/drivers/crypto/qat/qat_common/icp_qat_hw_20_comp_defs.h b/drivers/crypto/qat/qat_common/icp_qat_hw_20_comp_defs.h new file mode 100644 index 0000000000000000000000000000000000000000..208d4554283bfc8cffa50657a3f1f551f5db6007 --- /dev/null +++ b/drivers/crypto/qat/qat_common/icp_qat_hw_20_comp_defs.h @@ -0,0 +1,300 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright(c) 2022 Intel Corporation */ +#ifndef _ICP_QAT_HW_20_COMP_DEFS_H +#define _ICP_QAT_HW_20_COMP_DEFS_H + +#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_CONTROL_BITPOS 31 +#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_CONTROL_MASK 0x1 + +enum icp_qat_hw_comp_20_scb_control { + ICP_QAT_HW_COMP_20_SCB_CONTROL_ENABLE = 0x0, + ICP_QAT_HW_COMP_20_SCB_CONTROL_DISABLE = 0x1, +}; + +#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_CONTROL_DEFAULT_VAL \ + ICP_QAT_HW_COMP_20_SCB_CONTROL_DISABLE + +#define ICP_QAT_HW_COMP_20_CONFIG_CSR_RMB_CONTROL_BITPOS 30 +#define ICP_QAT_HW_COMP_20_CONFIG_CSR_RMB_CONTROL_MASK 0x1 + +enum icp_qat_hw_comp_20_rmb_control { + ICP_QAT_HW_COMP_20_RMB_CONTROL_RESET_ALL = 0x0, + ICP_QAT_HW_COMP_20_RMB_CONTROL_RESET_FC_ONLY = 0x1, +}; + +#define ICP_QAT_HW_COMP_20_CONFIG_CSR_RMB_CONTROL_DEFAULT_VAL \ + ICP_QAT_HW_COMP_20_RMB_CONTROL_RESET_ALL + +#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SOM_CONTROL_BITPOS 28 +#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SOM_CONTROL_MASK 0x3 + +enum icp_qat_hw_comp_20_som_control { + ICP_QAT_HW_COMP_20_SOM_CONTROL_NORMAL_MODE = 0x0, + ICP_QAT_HW_COMP_20_SOM_CONTROL_REPLAY_MODE = 0x1, + ICP_QAT_HW_COMP_20_SOM_CONTROL_INPUT_CRC = 0x2, + ICP_QAT_HW_COMP_20_SOM_CONTROL_RESERVED_MODE = 0x3, +}; + +#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SOM_CONTROL_DEFAULT_VAL \ + ICP_QAT_HW_COMP_20_SOM_CONTROL_NORMAL_MODE + +#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_RD_CONTROL_BITPOS 27 +#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_RD_CONTROL_MASK 0x1 + +enum icp_qat_hw_comp_20_skip_hash_rd_control { + ICP_QAT_HW_COMP_20_SKIP_HASH_RD_CONTROL_NO_SKIP = 0x0, + ICP_QAT_HW_COMP_20_SKIP_HASH_RD_CONTROL_SKIP_HASH_READS = 0x1, +}; + +#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_RD_CONTROL_DEFAULT_VAL \ + ICP_QAT_HW_COMP_20_SKIP_HASH_RD_CONTROL_NO_SKIP + +#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_UNLOAD_CONTROL_BITPOS 26 +#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_UNLOAD_CONTROL_MASK 0x1 + +enum icp_qat_hw_comp_20_scb_unload_control { + ICP_QAT_HW_COMP_20_SCB_UNLOAD_CONTROL_UNLOAD = 0x0, + ICP_QAT_HW_COMP_20_SCB_UNLOAD_CONTROL_NO_UNLOAD = 0x1, +}; + +#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_UNLOAD_CONTROL_DEFAULT_VAL \ + ICP_QAT_HW_COMP_20_SCB_UNLOAD_CONTROL_UNLOAD + +#define ICP_QAT_HW_COMP_20_CONFIG_CSR_DISABLE_TOKEN_FUSION_CONTROL_BITPOS 21 +#define ICP_QAT_HW_COMP_20_CONFIG_CSR_DISABLE_TOKEN_FUSION_CONTROL_MASK 0x1 + +enum icp_qat_hw_comp_20_disable_token_fusion_control { + ICP_QAT_HW_COMP_20_DISABLE_TOKEN_FUSION_CONTROL_ENABLE = 0x0, + ICP_QAT_HW_COMP_20_DISABLE_TOKEN_FUSION_CONTROL_DISABLE = 0x1, +}; + +#define ICP_QAT_HW_COMP_20_CONFIG_CSR_DISABLE_TOKEN_FUSION_CONTROL_DEFAULT_VAL \ + ICP_QAT_HW_COMP_20_DISABLE_TOKEN_FUSION_CONTROL_ENABLE + +#define ICP_QAT_HW_COMP_20_CONFIG_CSR_LBMS_BITPOS 19 +#define ICP_QAT_HW_COMP_20_CONFIG_CSR_LBMS_MASK 0x3 + +enum icp_qat_hw_comp_20_lbms { + ICP_QAT_HW_COMP_20_LBMS_LBMS_64KB = 0x0, + ICP_QAT_HW_COMP_20_LBMS_LBMS_256KB = 0x1, + ICP_QAT_HW_COMP_20_LBMS_LBMS_1MB = 0x2, + ICP_QAT_HW_COMP_20_LBMS_LBMS_4MB = 0x3, +}; + +#define ICP_QAT_HW_COMP_20_CONFIG_CSR_LBMS_DEFAULT_VAL \ + ICP_QAT_HW_COMP_20_LBMS_LBMS_64KB + +#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_MODE_RESET_MASK_BITPOS 18 +#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_MODE_RESET_MASK_MASK 0x1 + +enum icp_qat_hw_comp_20_scb_mode_reset_mask { + ICP_QAT_HW_COMP_20_SCB_MODE_RESET_MASK_RESET_COUNTERS = 0x0, + ICP_QAT_HW_COMP_20_SCB_MODE_RESET_MASK_RESET_COUNTERS_AND_HISTORY = 0x1, +}; + +#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_MODE_RESET_MASK_DEFAULT_VAL \ + ICP_QAT_HW_COMP_20_SCB_MODE_RESET_MASK_RESET_COUNTERS + +#define ICP_QAT_HW_COMP_20_CONFIG_CSR_LAZY_PARAM_BITPOS 9 +#define ICP_QAT_HW_COMP_20_CONFIG_CSR_LAZY_PARAM_MASK 0x1ff +#define ICP_QAT_HW_COMP_20_CONFIG_CSR_LAZY_PARAM_DEFAULT_VAL 258 + +#define ICP_QAT_HW_COMP_20_CONFIG_CSR_NICE_PARAM_BITPOS 0 +#define ICP_QAT_HW_COMP_20_CONFIG_CSR_NICE_PARAM_MASK 0x1ff +#define ICP_QAT_HW_COMP_20_CONFIG_CSR_NICE_PARAM_DEFAULT_VAL 259 + +#define ICP_QAT_HW_COMP_20_CONFIG_CSR_HBS_CONTROL_BITPOS 14 +#define ICP_QAT_HW_COMP_20_CONFIG_CSR_HBS_CONTROL_MASK 0x7 + +enum icp_qat_hw_comp_20_hbs_control { + ICP_QAT_HW_COMP_20_HBS_CONTROL_HBS_IS_32KB = 0x0, + ICP_QAT_HW_COMP_23_HBS_CONTROL_HBS_IS_64KB = 0x1, +}; + +#define ICP_QAT_HW_COMP_20_CONFIG_CSR_HBS_CONTROL_DEFAULT_VAL \ + ICP_QAT_HW_COMP_20_HBS_CONTROL_HBS_IS_32KB + +#define ICP_QAT_HW_COMP_20_CONFIG_CSR_ABD_BITPOS 13 +#define ICP_QAT_HW_COMP_20_CONFIG_CSR_ABD_MASK 0x1 + +enum icp_qat_hw_comp_20_abd { + ICP_QAT_HW_COMP_20_ABD_ABD_ENABLED = 0x0, + ICP_QAT_HW_COMP_20_ABD_ABD_DISABLED = 0x1, +}; + +#define ICP_QAT_HW_COMP_20_CONFIG_CSR_ABD_DEFAULT_VAL \ + ICP_QAT_HW_COMP_20_ABD_ABD_ENABLED + +#define ICP_QAT_HW_COMP_20_CONFIG_CSR_LLLBD_CTRL_BITPOS 12 +#define ICP_QAT_HW_COMP_20_CONFIG_CSR_LLLBD_CTRL_MASK 0x1 + +enum icp_qat_hw_comp_20_lllbd_ctrl { + ICP_QAT_HW_COMP_20_LLLBD_CTRL_LLLBD_ENABLED = 0x0, + ICP_QAT_HW_COMP_20_LLLBD_CTRL_LLLBD_DISABLED = 0x1, +}; + +#define ICP_QAT_HW_COMP_20_CONFIG_CSR_LLLBD_CTRL_DEFAULT_VAL \ + ICP_QAT_HW_COMP_20_LLLBD_CTRL_LLLBD_ENABLED + +#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SEARCH_DEPTH_BITPOS 8 +#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SEARCH_DEPTH_MASK 0xf + +enum icp_qat_hw_comp_20_search_depth { + ICP_QAT_HW_COMP_20_SEARCH_DEPTH_LEVEL_1 = 0x1, + ICP_QAT_HW_COMP_20_SEARCH_DEPTH_LEVEL_6 = 0x3, + ICP_QAT_HW_COMP_20_SEARCH_DEPTH_LEVEL_9 = 0x4, +}; + +#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SEARCH_DEPTH_DEFAULT_VAL \ + ICP_QAT_HW_COMP_20_SEARCH_DEPTH_LEVEL_1 + +#define ICP_QAT_HW_COMP_20_CONFIG_CSR_HW_COMP_FORMAT_BITPOS 5 +#define ICP_QAT_HW_COMP_20_CONFIG_CSR_HW_COMP_FORMAT_MASK 0x7 + +enum icp_qat_hw_comp_20_hw_comp_format { + ICP_QAT_HW_COMP_20_HW_COMP_FORMAT_ILZ77 = 0x0, + ICP_QAT_HW_COMP_20_HW_COMP_FORMAT_DEFLATE = 0x1, + ICP_QAT_HW_COMP_20_HW_COMP_FORMAT_LZ4 = 0x2, + ICP_QAT_HW_COMP_20_HW_COMP_FORMAT_LZ4S = 0x3, + ICP_QAT_HW_COMP_23_HW_COMP_FORMAT_ZSTD = 0x4, +}; + +#define ICP_QAT_HW_COMP_20_CONFIG_CSR_HW_COMP_FORMAT_DEFAULT_VAL \ + ICP_QAT_HW_COMP_20_HW_COMP_FORMAT_DEFLATE + +#define ICP_QAT_HW_COMP_20_CONFIG_CSR_MIN_MATCH_CONTROL_BITPOS 4 +#define ICP_QAT_HW_COMP_20_CONFIG_CSR_MIN_MATCH_CONTROL_MASK 0x1 + +enum icp_qat_hw_comp_20_min_match_control { + ICP_QAT_HW_COMP_20_MIN_MATCH_CONTROL_MATCH_3B = 0x0, + ICP_QAT_HW_COMP_20_MIN_MATCH_CONTROL_MATCH_4B = 0x1, +}; + +#define ICP_QAT_HW_COMP_20_CONFIG_CSR_MIN_MATCH_CONTROL_DEFAULT_VAL \ + ICP_QAT_HW_COMP_20_MIN_MATCH_CONTROL_MATCH_3B + +#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_COLLISION_BITPOS 3 +#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_COLLISION_MASK 0x1 + +enum icp_qat_hw_comp_20_skip_hash_collision { + ICP_QAT_HW_COMP_20_SKIP_HASH_COLLISION_ALLOW = 0x0, + ICP_QAT_HW_COMP_20_SKIP_HASH_COLLISION_DONT_ALLOW = 0x1, +}; + +#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_COLLISION_DEFAULT_VAL \ + ICP_QAT_HW_COMP_20_SKIP_HASH_COLLISION_ALLOW + +#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_UPDATE_BITPOS 2 +#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_UPDATE_MASK 0x1 + +enum icp_qat_hw_comp_20_skip_hash_update { + ICP_QAT_HW_COMP_20_SKIP_HASH_UPDATE_ALLOW = 0x0, + ICP_QAT_HW_COMP_20_SKIP_HASH_UPDATE_DONT_ALLOW = 0x1, +}; + +#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_UPDATE_DEFAULT_VAL \ + ICP_QAT_HW_COMP_20_SKIP_HASH_UPDATE_ALLOW + +#define ICP_QAT_HW_COMP_20_CONFIG_CSR_BYTE_SKIP_BITPOS 1 +#define ICP_QAT_HW_COMP_20_CONFIG_CSR_BYTE_SKIP_MASK 0x1 + +enum icp_qat_hw_comp_20_byte_skip { + ICP_QAT_HW_COMP_20_BYTE_SKIP_3BYTE_TOKEN = 0x0, + ICP_QAT_HW_COMP_20_BYTE_SKIP_3BYTE_LITERAL = 0x1, +}; + +#define ICP_QAT_HW_COMP_20_CONFIG_CSR_BYTE_SKIP_DEFAULT_VAL \ + ICP_QAT_HW_COMP_20_BYTE_SKIP_3BYTE_TOKEN + +#define ICP_QAT_HW_COMP_20_CONFIG_CSR_EXTENDED_DELAY_MATCH_MODE_BITPOS 0 +#define ICP_QAT_HW_COMP_20_CONFIG_CSR_EXTENDED_DELAY_MATCH_MODE_MASK 0x1 + +enum icp_qat_hw_comp_20_extended_delay_match_mode { + ICP_QAT_HW_COMP_20_EXTENDED_DELAY_MATCH_MODE_EDMM_DISABLED = 0x0, + ICP_QAT_HW_COMP_20_EXTENDED_DELAY_MATCH_MODE_EDMM_ENABLED = 0x1, +}; + +#define ICP_QAT_HW_COMP_20_CONFIG_CSR_EXTENDED_DELAY_MATCH_MODE_DEFAULT_VAL \ + ICP_QAT_HW_COMP_20_EXTENDED_DELAY_MATCH_MODE_EDMM_DISABLED + +#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_SPECULATIVE_DECODER_CONTROL_BITPOS 31 +#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_SPECULATIVE_DECODER_CONTROL_MASK 0x1 + +enum icp_qat_hw_decomp_20_speculative_decoder_control { + ICP_QAT_HW_DECOMP_20_SPECULATIVE_DECODER_CONTROL_ENABLE = 0x0, + ICP_QAT_HW_DECOMP_20_SPECULATIVE_DECODER_CONTROL_DISABLE = 0x1, +}; + +#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_SPECULATIVE_DECODER_CONTROL_DEFAULT_VAL \ + ICP_QAT_HW_DECOMP_20_SPECULATIVE_DECODER_CONTROL_ENABLE + +#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_MINI_CAM_CONTROL_BITPOS 30 +#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_MINI_CAM_CONTROL_MASK 0x1 + +enum icp_qat_hw_decomp_20_mini_cam_control { + ICP_QAT_HW_DECOMP_20_MINI_CAM_CONTROL_ENABLE = 0x0, + ICP_QAT_HW_DECOMP_20_MINI_CAM_CONTROL_DISABLE = 0x1, +}; + +#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_MINI_CAM_CONTROL_DEFAULT_VAL \ + ICP_QAT_HW_DECOMP_20_MINI_CAM_CONTROL_ENABLE + +#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_HBS_CONTROL_BITPOS 14 +#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_HBS_CONTROL_MASK 0x7 + +enum icp_qat_hw_decomp_20_hbs_control { + ICP_QAT_HW_DECOMP_20_HBS_CONTROL_HBS_IS_32KB = 0x0, +}; + +#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_HBS_CONTROL_DEFAULT_VAL \ + ICP_QAT_HW_DECOMP_20_HBS_CONTROL_HBS_IS_32KB + +#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_LBMS_BITPOS 8 +#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_LBMS_MASK 0x3 + +enum icp_qat_hw_decomp_20_lbms { + ICP_QAT_HW_DECOMP_20_LBMS_LBMS_64KB = 0x0, + ICP_QAT_HW_DECOMP_20_LBMS_LBMS_256KB = 0x1, + ICP_QAT_HW_DECOMP_20_LBMS_LBMS_1MB = 0x2, + ICP_QAT_HW_DECOMP_20_LBMS_LBMS_4MB = 0x3, +}; + +#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_LBMS_DEFAULT_VAL \ + ICP_QAT_HW_DECOMP_20_LBMS_LBMS_64KB + +#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_HW_DECOMP_FORMAT_BITPOS 5 +#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_HW_DECOMP_FORMAT_MASK 0x7 + +enum icp_qat_hw_decomp_20_hw_comp_format { + ICP_QAT_HW_DECOMP_20_HW_DECOMP_FORMAT_DEFLATE = 0x1, + ICP_QAT_HW_DECOMP_20_HW_DECOMP_FORMAT_LZ4 = 0x2, + ICP_QAT_HW_DECOMP_20_HW_DECOMP_FORMAT_LZ4S = 0x3, + ICP_QAT_HW_DECOMP_23_HW_DECOMP_FORMAT_ZSTD = 0x4, +}; + +#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_HW_DECOMP_FORMAT_DEFAULT_VAL \ + ICP_QAT_HW_DECOMP_20_HW_DECOMP_FORMAT_DEFLATE + +#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_MIN_MATCH_CONTROL_BITPOS 4 +#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_MIN_MATCH_CONTROL_MASK 0x1 + +enum icp_qat_hw_decomp_20_min_match_control { + ICP_QAT_HW_DECOMP_20_MIN_MATCH_CONTROL_MATCH_3B = 0x0, + ICP_QAT_HW_DECOMP_20_MIN_MATCH_CONTROL_MATCH_4B = 0x1, +}; + +#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_MIN_MATCH_CONTROL_DEFAULT_VAL \ + ICP_QAT_HW_DECOMP_20_MIN_MATCH_CONTROL_MATCH_3B + +#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_LZ4_BLOCK_CHECKSUM_PRESENT_BITPOS 3 +#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_LZ4_BLOCK_CHECKSUM_PRESENT_MASK 0x1 + +enum icp_qat_hw_decomp_20_lz4_block_checksum_present { + ICP_QAT_HW_DECOMP_20_LZ4_BLOCK_CHKSUM_ABSENT = 0x0, + ICP_QAT_HW_DECOMP_20_LZ4_BLOCK_CHKSUM_PRESENT = 0x1, +}; + +#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_LZ4_BLOCK_CHECKSUM_PRESENT_DEFAULT_VAL \ + ICP_QAT_HW_DECOMP_20_LZ4_BLOCK_CHKSUM_ABSENT + +#endif diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c index cad9c58caab13504ab29e66fa2e0bd5eaefc94f9..b4b9f0aa59b9860124af6d415befbcf0f9f8bb9b 100644 --- a/drivers/crypto/qat/qat_common/qat_algs.c +++ b/drivers/crypto/qat/qat_common/qat_algs.c @@ -23,6 +23,7 @@ #include "icp_qat_hw.h" #include "icp_qat_fw.h" #include "icp_qat_fw_la.h" +#include "qat_bl.h" #define QAT_AES_HW_CONFIG_ENC(alg, mode) \ ICP_QAT_HW_CIPHER_CONFIG_BUILD(mode, alg, \ @@ -663,189 +664,6 @@ static int qat_alg_aead_setkey(struct crypto_aead *tfm, const u8 *key, return qat_alg_aead_newkey(tfm, key, keylen); } -static void qat_alg_free_bufl(struct qat_crypto_instance *inst, - struct qat_crypto_request *qat_req) -{ - struct device *dev = &GET_DEV(inst->accel_dev); - struct qat_alg_buf_list *bl = qat_req->buf.bl; - struct qat_alg_buf_list *blout = qat_req->buf.blout; - dma_addr_t blp = qat_req->buf.blp; - dma_addr_t blpout = qat_req->buf.bloutp; - size_t sz = qat_req->buf.sz; - size_t sz_out = qat_req->buf.sz_out; - int bl_dma_dir; - int i; - - bl_dma_dir = blp != blpout ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL; - - for (i = 0; i < bl->num_bufs; i++) - dma_unmap_single(dev, bl->bufers[i].addr, - bl->bufers[i].len, bl_dma_dir); - - dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE); - - if (!qat_req->buf.sgl_src_valid) - kfree(bl); - - if (blp != blpout) { - /* If out of place operation dma unmap only data */ - int bufless = blout->num_bufs - blout->num_mapped_bufs; - - for (i = bufless; i < blout->num_bufs; i++) { - dma_unmap_single(dev, blout->bufers[i].addr, - blout->bufers[i].len, - DMA_FROM_DEVICE); - } - dma_unmap_single(dev, blpout, sz_out, DMA_TO_DEVICE); - - if (!qat_req->buf.sgl_dst_valid) - kfree(blout); - } -} - -static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst, - struct scatterlist *sgl, - struct scatterlist *sglout, - struct qat_crypto_request *qat_req, - gfp_t flags) -{ - struct device *dev = &GET_DEV(inst->accel_dev); - int i, sg_nctr = 0; - int n = sg_nents(sgl); - struct qat_alg_buf_list *bufl; - struct qat_alg_buf_list *buflout = NULL; - dma_addr_t blp = DMA_MAPPING_ERROR; - dma_addr_t bloutp = DMA_MAPPING_ERROR; - struct scatterlist *sg; - size_t sz_out, sz = struct_size(bufl, bufers, n); - int node = dev_to_node(&GET_DEV(inst->accel_dev)); - int bufl_dma_dir; - - if (unlikely(!n)) - return -EINVAL; - - qat_req->buf.sgl_src_valid = false; - qat_req->buf.sgl_dst_valid = false; - - if (n > QAT_MAX_BUFF_DESC) { - bufl = kzalloc_node(sz, flags, node); - if (unlikely(!bufl)) - return -ENOMEM; - } else { - bufl = &qat_req->buf.sgl_src.sgl_hdr; - memset(bufl, 0, sizeof(struct qat_alg_buf_list)); - qat_req->buf.sgl_src_valid = true; - } - - bufl_dma_dir = sgl != sglout ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL; - - for_each_sg(sgl, sg, n, i) - bufl->bufers[i].addr = DMA_MAPPING_ERROR; - - for_each_sg(sgl, sg, n, i) { - int y = sg_nctr; - - if (!sg->length) - continue; - - bufl->bufers[y].addr = dma_map_single(dev, sg_virt(sg), - sg->length, - bufl_dma_dir); - bufl->bufers[y].len = sg->length; - if (unlikely(dma_mapping_error(dev, bufl->bufers[y].addr))) - goto err_in; - sg_nctr++; - } - bufl->num_bufs = sg_nctr; - blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(dev, blp))) - goto err_in; - qat_req->buf.bl = bufl; - qat_req->buf.blp = blp; - qat_req->buf.sz = sz; - /* Handle out of place operation */ - if (sgl != sglout) { - struct qat_alg_buf *bufers; - - n = sg_nents(sglout); - sz_out = struct_size(buflout, bufers, n); - sg_nctr = 0; - - if (n > QAT_MAX_BUFF_DESC) { - buflout = kzalloc_node(sz_out, flags, node); - if (unlikely(!buflout)) - goto err_in; - } else { - buflout = &qat_req->buf.sgl_dst.sgl_hdr; - memset(buflout, 0, sizeof(struct qat_alg_buf_list)); - qat_req->buf.sgl_dst_valid = true; - } - - bufers = buflout->bufers; - for_each_sg(sglout, sg, n, i) - bufers[i].addr = DMA_MAPPING_ERROR; - - for_each_sg(sglout, sg, n, i) { - int y = sg_nctr; - - if (!sg->length) - continue; - - bufers[y].addr = dma_map_single(dev, sg_virt(sg), - sg->length, - DMA_FROM_DEVICE); - if (unlikely(dma_mapping_error(dev, bufers[y].addr))) - goto err_out; - bufers[y].len = sg->length; - sg_nctr++; - } - buflout->num_bufs = sg_nctr; - buflout->num_mapped_bufs = sg_nctr; - bloutp = dma_map_single(dev, buflout, sz_out, DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(dev, bloutp))) - goto err_out; - qat_req->buf.blout = buflout; - qat_req->buf.bloutp = bloutp; - qat_req->buf.sz_out = sz_out; - } else { - /* Otherwise set the src and dst to the same address */ - qat_req->buf.bloutp = qat_req->buf.blp; - qat_req->buf.sz_out = 0; - } - return 0; - -err_out: - if (!dma_mapping_error(dev, bloutp)) - dma_unmap_single(dev, bloutp, sz_out, DMA_TO_DEVICE); - - n = sg_nents(sglout); - for (i = 0; i < n; i++) - if (!dma_mapping_error(dev, buflout->bufers[i].addr)) - dma_unmap_single(dev, buflout->bufers[i].addr, - buflout->bufers[i].len, - DMA_FROM_DEVICE); - - if (!qat_req->buf.sgl_dst_valid) - kfree(buflout); - -err_in: - if (!dma_mapping_error(dev, blp)) - dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE); - - n = sg_nents(sgl); - for (i = 0; i < n; i++) - if (!dma_mapping_error(dev, bufl->bufers[i].addr)) - dma_unmap_single(dev, bufl->bufers[i].addr, - bufl->bufers[i].len, - bufl_dma_dir); - - if (!qat_req->buf.sgl_src_valid) - kfree(bufl); - - dev_err(dev, "Failed to map buf for dma\n"); - return -ENOMEM; -} - static void qat_aead_alg_callback(struct icp_qat_fw_la_resp *qat_resp, struct qat_crypto_request *qat_req) { @@ -855,7 +673,7 @@ static void qat_aead_alg_callback(struct icp_qat_fw_la_resp *qat_resp, u8 stat_filed = qat_resp->comn_resp.comn_status; int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed); - qat_alg_free_bufl(inst, qat_req); + qat_bl_free_bufl(inst->accel_dev, &qat_req->buf); if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK)) res = -EBADMSG; areq->base.complete(&areq->base, res); @@ -925,7 +743,7 @@ static void qat_skcipher_alg_callback(struct icp_qat_fw_la_resp *qat_resp, u8 stat_filed = qat_resp->comn_resp.comn_status; int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed); - qat_alg_free_bufl(inst, qat_req); + qat_bl_free_bufl(inst->accel_dev, &qat_req->buf); if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK)) res = -EINVAL; @@ -981,7 +799,8 @@ static int qat_alg_aead_dec(struct aead_request *areq) if (cipher_len % AES_BLOCK_SIZE != 0) return -EINVAL; - ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req, f); + ret = qat_bl_sgl_to_bufl(ctx->inst->accel_dev, areq->src, areq->dst, + &qat_req->buf, NULL, f); if (unlikely(ret)) return ret; @@ -1003,7 +822,7 @@ static int qat_alg_aead_dec(struct aead_request *areq) ret = qat_alg_send_sym_message(qat_req, ctx->inst, &areq->base); if (ret == -ENOSPC) - qat_alg_free_bufl(ctx->inst, qat_req); + qat_bl_free_bufl(ctx->inst->accel_dev, &qat_req->buf); return ret; } @@ -1024,7 +843,8 @@ static int qat_alg_aead_enc(struct aead_request *areq) if (areq->cryptlen % AES_BLOCK_SIZE != 0) return -EINVAL; - ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req, f); + ret = qat_bl_sgl_to_bufl(ctx->inst->accel_dev, areq->src, areq->dst, + &qat_req->buf, NULL, f); if (unlikely(ret)) return ret; @@ -1048,7 +868,7 @@ static int qat_alg_aead_enc(struct aead_request *areq) ret = qat_alg_send_sym_message(qat_req, ctx->inst, &areq->base); if (ret == -ENOSPC) - qat_alg_free_bufl(ctx->inst, qat_req); + qat_bl_free_bufl(ctx->inst->accel_dev, &qat_req->buf); return ret; } @@ -1209,7 +1029,8 @@ static int qat_alg_skcipher_encrypt(struct skcipher_request *req) if (req->cryptlen == 0) return 0; - ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req, f); + ret = qat_bl_sgl_to_bufl(ctx->inst->accel_dev, req->src, req->dst, + &qat_req->buf, NULL, f); if (unlikely(ret)) return ret; @@ -1230,7 +1051,7 @@ static int qat_alg_skcipher_encrypt(struct skcipher_request *req) ret = qat_alg_send_sym_message(qat_req, ctx->inst, &req->base); if (ret == -ENOSPC) - qat_alg_free_bufl(ctx->inst, qat_req); + qat_bl_free_bufl(ctx->inst->accel_dev, &qat_req->buf); return ret; } @@ -1275,7 +1096,8 @@ static int qat_alg_skcipher_decrypt(struct skcipher_request *req) if (req->cryptlen == 0) return 0; - ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req, f); + ret = qat_bl_sgl_to_bufl(ctx->inst->accel_dev, req->src, req->dst, + &qat_req->buf, NULL, f); if (unlikely(ret)) return ret; @@ -1297,7 +1119,7 @@ static int qat_alg_skcipher_decrypt(struct skcipher_request *req) ret = qat_alg_send_sym_message(qat_req, ctx->inst, &req->base); if (ret == -ENOSPC) - qat_alg_free_bufl(ctx->inst, qat_req); + qat_bl_free_bufl(ctx->inst->accel_dev, &qat_req->buf); return ret; } diff --git a/drivers/crypto/qat/qat_common/qat_algs_send.h b/drivers/crypto/qat/qat_common/qat_algs_send.h index 5ce9f4f69d8ff8339e9b64c215a19d57da4a13cd..0baca16e1eff001df731e5fb0583e79d34c695d5 100644 --- a/drivers/crypto/qat/qat_common/qat_algs_send.h +++ b/drivers/crypto/qat/qat_common/qat_algs_send.h @@ -3,7 +3,21 @@ #ifndef QAT_ALGS_SEND_H #define QAT_ALGS_SEND_H -#include "qat_crypto.h" +#include +#include "adf_transport_internal.h" + +struct qat_instance_backlog { + struct list_head list; + spinlock_t lock; /* protects backlog list */ +}; + +struct qat_alg_req { + u32 *fw_req; + struct adf_etr_ring_data *tx_ring; + struct crypto_async_request *base; + struct list_head list; + struct qat_instance_backlog *backlog; +}; int qat_alg_send_message(struct qat_alg_req *req); void qat_alg_send_backlog(struct qat_instance_backlog *backlog); diff --git a/drivers/crypto/qat/qat_common/qat_asym_algs.c b/drivers/crypto/qat/qat_common/qat_asym_algs.c index 94a26702aeae113762adb19dbe2e7909af4d393e..935a7e012946e30b3b703e7d427449909b53ec1e 100644 --- a/drivers/crypto/qat/qat_common/qat_asym_algs.c +++ b/drivers/crypto/qat/qat_common/qat_asym_algs.c @@ -494,6 +494,8 @@ static int qat_dh_init_tfm(struct crypto_kpp *tfm) if (!inst) return -EINVAL; + kpp_set_reqsize(tfm, sizeof(struct qat_asym_request) + 64); + ctx->p_size = 0; ctx->g2 = false; ctx->inst = inst; @@ -1230,6 +1232,8 @@ static int qat_rsa_init_tfm(struct crypto_akcipher *tfm) if (!inst) return -EINVAL; + akcipher_set_reqsize(tfm, sizeof(struct qat_asym_request) + 64); + ctx->key_sz = 0; ctx->inst = inst; return 0; @@ -1252,7 +1256,6 @@ static struct akcipher_alg rsa = { .max_size = qat_rsa_max_size, .init = qat_rsa_init_tfm, .exit = qat_rsa_exit_tfm, - .reqsize = sizeof(struct qat_asym_request) + 64, .base = { .cra_name = "rsa", .cra_driver_name = "qat-rsa", @@ -1269,7 +1272,6 @@ static struct kpp_alg dh = { .max_size = qat_dh_max_size, .init = qat_dh_init_tfm, .exit = qat_dh_exit_tfm, - .reqsize = sizeof(struct qat_asym_request) + 64, .base = { .cra_name = "dh", .cra_driver_name = "qat-dh", diff --git a/drivers/crypto/qat/qat_common/qat_bl.c b/drivers/crypto/qat/qat_common/qat_bl.c new file mode 100644 index 0000000000000000000000000000000000000000..2e89ff08041bfaf135e8f04bddfbd5e84d22f5bf --- /dev/null +++ b/drivers/crypto/qat/qat_common/qat_bl.c @@ -0,0 +1,383 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright(c) 2014 - 2022 Intel Corporation */ +#include +#include +#include +#include +#include +#include +#include "adf_accel_devices.h" +#include "qat_bl.h" +#include "qat_crypto.h" + +void qat_bl_free_bufl(struct adf_accel_dev *accel_dev, + struct qat_request_buffs *buf) +{ + struct device *dev = &GET_DEV(accel_dev); + struct qat_alg_buf_list *bl = buf->bl; + struct qat_alg_buf_list *blout = buf->blout; + dma_addr_t blp = buf->blp; + dma_addr_t blpout = buf->bloutp; + size_t sz = buf->sz; + size_t sz_out = buf->sz_out; + int bl_dma_dir; + int i; + + bl_dma_dir = blp != blpout ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL; + + for (i = 0; i < bl->num_bufs; i++) + dma_unmap_single(dev, bl->bufers[i].addr, + bl->bufers[i].len, bl_dma_dir); + + dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE); + + if (!buf->sgl_src_valid) + kfree(bl); + + if (blp != blpout) { + for (i = 0; i < blout->num_mapped_bufs; i++) { + dma_unmap_single(dev, blout->bufers[i].addr, + blout->bufers[i].len, + DMA_FROM_DEVICE); + } + dma_unmap_single(dev, blpout, sz_out, DMA_TO_DEVICE); + + if (!buf->sgl_dst_valid) + kfree(blout); + } +} + +static int __qat_bl_sgl_to_bufl(struct adf_accel_dev *accel_dev, + struct scatterlist *sgl, + struct scatterlist *sglout, + struct qat_request_buffs *buf, + dma_addr_t extra_dst_buff, + size_t sz_extra_dst_buff, + gfp_t flags) +{ + struct device *dev = &GET_DEV(accel_dev); + int i, sg_nctr = 0; + int n = sg_nents(sgl); + struct qat_alg_buf_list *bufl; + struct qat_alg_buf_list *buflout = NULL; + dma_addr_t blp = DMA_MAPPING_ERROR; + dma_addr_t bloutp = DMA_MAPPING_ERROR; + struct scatterlist *sg; + size_t sz_out, sz = struct_size(bufl, bufers, n); + int node = dev_to_node(&GET_DEV(accel_dev)); + int bufl_dma_dir; + + if (unlikely(!n)) + return -EINVAL; + + buf->sgl_src_valid = false; + buf->sgl_dst_valid = false; + + if (n > QAT_MAX_BUFF_DESC) { + bufl = kzalloc_node(sz, flags, node); + if (unlikely(!bufl)) + return -ENOMEM; + } else { + bufl = &buf->sgl_src.sgl_hdr; + memset(bufl, 0, sizeof(struct qat_alg_buf_list)); + buf->sgl_src_valid = true; + } + + bufl_dma_dir = sgl != sglout ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL; + + for (i = 0; i < n; i++) + bufl->bufers[i].addr = DMA_MAPPING_ERROR; + + for_each_sg(sgl, sg, n, i) { + int y = sg_nctr; + + if (!sg->length) + continue; + + bufl->bufers[y].addr = dma_map_single(dev, sg_virt(sg), + sg->length, + bufl_dma_dir); + bufl->bufers[y].len = sg->length; + if (unlikely(dma_mapping_error(dev, bufl->bufers[y].addr))) + goto err_in; + sg_nctr++; + } + bufl->num_bufs = sg_nctr; + blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(dev, blp))) + goto err_in; + buf->bl = bufl; + buf->blp = blp; + buf->sz = sz; + /* Handle out of place operation */ + if (sgl != sglout) { + struct qat_alg_buf *bufers; + int extra_buff = extra_dst_buff ? 1 : 0; + int n_sglout = sg_nents(sglout); + + n = n_sglout + extra_buff; + sz_out = struct_size(buflout, bufers, n); + sg_nctr = 0; + + if (n > QAT_MAX_BUFF_DESC) { + buflout = kzalloc_node(sz_out, flags, node); + if (unlikely(!buflout)) + goto err_in; + } else { + buflout = &buf->sgl_dst.sgl_hdr; + memset(buflout, 0, sizeof(struct qat_alg_buf_list)); + buf->sgl_dst_valid = true; + } + + bufers = buflout->bufers; + for (i = 0; i < n; i++) + bufers[i].addr = DMA_MAPPING_ERROR; + + for_each_sg(sglout, sg, n_sglout, i) { + int y = sg_nctr; + + if (!sg->length) + continue; + + bufers[y].addr = dma_map_single(dev, sg_virt(sg), + sg->length, + DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(dev, bufers[y].addr))) + goto err_out; + bufers[y].len = sg->length; + sg_nctr++; + } + if (extra_buff) { + bufers[sg_nctr].addr = extra_dst_buff; + bufers[sg_nctr].len = sz_extra_dst_buff; + } + + buflout->num_bufs = sg_nctr; + buflout->num_bufs += extra_buff; + buflout->num_mapped_bufs = sg_nctr; + bloutp = dma_map_single(dev, buflout, sz_out, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(dev, bloutp))) + goto err_out; + buf->blout = buflout; + buf->bloutp = bloutp; + buf->sz_out = sz_out; + } else { + /* Otherwise set the src and dst to the same address */ + buf->bloutp = buf->blp; + buf->sz_out = 0; + } + return 0; + +err_out: + if (!dma_mapping_error(dev, bloutp)) + dma_unmap_single(dev, bloutp, sz_out, DMA_TO_DEVICE); + + n = sg_nents(sglout); + for (i = 0; i < n; i++) { + if (buflout->bufers[i].addr == extra_dst_buff) + break; + if (!dma_mapping_error(dev, buflout->bufers[i].addr)) + dma_unmap_single(dev, buflout->bufers[i].addr, + buflout->bufers[i].len, + DMA_FROM_DEVICE); + } + + if (!buf->sgl_dst_valid) + kfree(buflout); + +err_in: + if (!dma_mapping_error(dev, blp)) + dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE); + + n = sg_nents(sgl); + for (i = 0; i < n; i++) + if (!dma_mapping_error(dev, bufl->bufers[i].addr)) + dma_unmap_single(dev, bufl->bufers[i].addr, + bufl->bufers[i].len, + bufl_dma_dir); + + if (!buf->sgl_src_valid) + kfree(bufl); + + dev_err(dev, "Failed to map buf for dma\n"); + return -ENOMEM; +} + +int qat_bl_sgl_to_bufl(struct adf_accel_dev *accel_dev, + struct scatterlist *sgl, + struct scatterlist *sglout, + struct qat_request_buffs *buf, + struct qat_sgl_to_bufl_params *params, + gfp_t flags) +{ + dma_addr_t extra_dst_buff = 0; + size_t sz_extra_dst_buff = 0; + + if (params) { + extra_dst_buff = params->extra_dst_buff; + sz_extra_dst_buff = params->sz_extra_dst_buff; + } + + return __qat_bl_sgl_to_bufl(accel_dev, sgl, sglout, buf, + extra_dst_buff, sz_extra_dst_buff, + flags); +} + +static void qat_bl_sgl_unmap(struct adf_accel_dev *accel_dev, + struct qat_alg_buf_list *bl) +{ + struct device *dev = &GET_DEV(accel_dev); + int n = bl->num_bufs; + int i; + + for (i = 0; i < n; i++) + if (!dma_mapping_error(dev, bl->bufers[i].addr)) + dma_unmap_single(dev, bl->bufers[i].addr, + bl->bufers[i].len, DMA_FROM_DEVICE); +} + +static int qat_bl_sgl_map(struct adf_accel_dev *accel_dev, + struct scatterlist *sgl, + struct qat_alg_buf_list **bl) +{ + struct device *dev = &GET_DEV(accel_dev); + struct qat_alg_buf_list *bufl; + int node = dev_to_node(dev); + struct scatterlist *sg; + int n, i, sg_nctr; + size_t sz; + + n = sg_nents(sgl); + sz = struct_size(bufl, bufers, n); + bufl = kzalloc_node(sz, GFP_KERNEL, node); + if (unlikely(!bufl)) + return -ENOMEM; + + for (i = 0; i < n; i++) + bufl->bufers[i].addr = DMA_MAPPING_ERROR; + + sg_nctr = 0; + for_each_sg(sgl, sg, n, i) { + int y = sg_nctr; + + if (!sg->length) + continue; + + bufl->bufers[y].addr = dma_map_single(dev, sg_virt(sg), + sg->length, + DMA_FROM_DEVICE); + bufl->bufers[y].len = sg->length; + if (unlikely(dma_mapping_error(dev, bufl->bufers[y].addr))) + goto err_map; + sg_nctr++; + } + bufl->num_bufs = sg_nctr; + bufl->num_mapped_bufs = sg_nctr; + + *bl = bufl; + + return 0; + +err_map: + for (i = 0; i < n; i++) + if (!dma_mapping_error(dev, bufl->bufers[i].addr)) + dma_unmap_single(dev, bufl->bufers[i].addr, + bufl->bufers[i].len, + DMA_FROM_DEVICE); + kfree(bufl); + *bl = NULL; + + return -ENOMEM; +} + +static void qat_bl_sgl_free_unmap(struct adf_accel_dev *accel_dev, + struct scatterlist *sgl, + struct qat_alg_buf_list *bl, + bool free_bl) +{ + if (bl) { + qat_bl_sgl_unmap(accel_dev, bl); + + if (free_bl) + kfree(bl); + } + if (sgl) + sgl_free(sgl); +} + +static int qat_bl_sgl_alloc_map(struct adf_accel_dev *accel_dev, + struct scatterlist **sgl, + struct qat_alg_buf_list **bl, + unsigned int dlen, + gfp_t gfp) +{ + struct scatterlist *dst; + int ret; + + dst = sgl_alloc(dlen, gfp, NULL); + if (!dst) { + dev_err(&GET_DEV(accel_dev), "sg_alloc failed\n"); + return -ENOMEM; + } + + ret = qat_bl_sgl_map(accel_dev, dst, bl); + if (ret) + goto err; + + *sgl = dst; + + return 0; + +err: + sgl_free(dst); + *sgl = NULL; + return ret; +} + +int qat_bl_realloc_map_new_dst(struct adf_accel_dev *accel_dev, + struct scatterlist **sg, + unsigned int dlen, + struct qat_request_buffs *qat_bufs, + gfp_t gfp) +{ + struct device *dev = &GET_DEV(accel_dev); + dma_addr_t new_blp = DMA_MAPPING_ERROR; + struct qat_alg_buf_list *new_bl; + struct scatterlist *new_sg; + size_t new_bl_size; + int ret; + + ret = qat_bl_sgl_alloc_map(accel_dev, &new_sg, &new_bl, dlen, gfp); + if (ret) + return ret; + + new_bl_size = struct_size(new_bl, bufers, new_bl->num_bufs); + + /* Map new firmware SGL descriptor */ + new_blp = dma_map_single(dev, new_bl, new_bl_size, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(dev, new_blp))) + goto err; + + /* Unmap old firmware SGL descriptor */ + dma_unmap_single(dev, qat_bufs->bloutp, qat_bufs->sz_out, DMA_TO_DEVICE); + + /* Free and unmap old scatterlist */ + qat_bl_sgl_free_unmap(accel_dev, *sg, qat_bufs->blout, + !qat_bufs->sgl_dst_valid); + + qat_bufs->sgl_dst_valid = false; + qat_bufs->blout = new_bl; + qat_bufs->bloutp = new_blp; + qat_bufs->sz_out = new_bl_size; + + *sg = new_sg; + + return 0; +err: + qat_bl_sgl_free_unmap(accel_dev, new_sg, new_bl, true); + + if (!dma_mapping_error(dev, new_blp)) + dma_unmap_single(dev, new_blp, new_bl_size, DMA_TO_DEVICE); + + return -ENOMEM; +} diff --git a/drivers/crypto/qat/qat_common/qat_bl.h b/drivers/crypto/qat/qat_common/qat_bl.h new file mode 100644 index 0000000000000000000000000000000000000000..8ca5e52ee9e21698d54e190be2bc8070e915bac4 --- /dev/null +++ b/drivers/crypto/qat/qat_common/qat_bl.h @@ -0,0 +1,67 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright(c) 2014 - 2022 Intel Corporation */ +#ifndef QAT_BL_H +#define QAT_BL_H +#include +#include +#include + +#define QAT_MAX_BUFF_DESC 4 + +struct qat_alg_buf { + u32 len; + u32 resrvd; + u64 addr; +} __packed; + +struct qat_alg_buf_list { + u64 resrvd; + u32 num_bufs; + u32 num_mapped_bufs; + struct qat_alg_buf bufers[]; +} __packed; + +struct qat_alg_fixed_buf_list { + struct qat_alg_buf_list sgl_hdr; + struct qat_alg_buf descriptors[QAT_MAX_BUFF_DESC]; +} __packed __aligned(64); + +struct qat_request_buffs { + struct qat_alg_buf_list *bl; + dma_addr_t blp; + struct qat_alg_buf_list *blout; + dma_addr_t bloutp; + size_t sz; + size_t sz_out; + bool sgl_src_valid; + bool sgl_dst_valid; + struct qat_alg_fixed_buf_list sgl_src; + struct qat_alg_fixed_buf_list sgl_dst; +}; + +struct qat_sgl_to_bufl_params { + dma_addr_t extra_dst_buff; + size_t sz_extra_dst_buff; +}; + +void qat_bl_free_bufl(struct adf_accel_dev *accel_dev, + struct qat_request_buffs *buf); +int qat_bl_sgl_to_bufl(struct adf_accel_dev *accel_dev, + struct scatterlist *sgl, + struct scatterlist *sglout, + struct qat_request_buffs *buf, + struct qat_sgl_to_bufl_params *params, + gfp_t flags); + +static inline gfp_t qat_algs_alloc_flags(struct crypto_async_request *req) +{ + return req->flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : GFP_ATOMIC; +} + +int qat_bl_realloc_map_new_dst(struct adf_accel_dev *accel_dev, + struct scatterlist **newd, + unsigned int dlen, + struct qat_request_buffs *qat_bufs, + gfp_t gfp); + +#endif diff --git a/drivers/crypto/qat/qat_common/qat_comp_algs.c b/drivers/crypto/qat/qat_common/qat_comp_algs.c new file mode 100644 index 0000000000000000000000000000000000000000..1480d36a8d2bb152b134ebe5f72fc6fdb537bbf9 --- /dev/null +++ b/drivers/crypto/qat/qat_common/qat_comp_algs.c @@ -0,0 +1,344 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright(c) 2022 Intel Corporation */ +#include +#include +#include +#include +#include +#include +#include "adf_accel_devices.h" +#include "adf_common_drv.h" +#include "qat_bl.h" +#include "qat_comp_req.h" +#include "qat_compression.h" +#include "qat_algs_send.h" + +static DEFINE_MUTEX(algs_lock); +static unsigned int active_devs; + +enum direction { + DECOMPRESSION = 0, + COMPRESSION = 1, +}; + +struct qat_compression_ctx { + u8 comp_ctx[QAT_COMP_CTX_SIZE]; + struct qat_compression_instance *inst; +}; + +struct qat_dst { + bool is_null; + int resubmitted; +}; + +struct qat_compression_req { + u8 req[QAT_COMP_REQ_SIZE]; + struct qat_compression_ctx *qat_compression_ctx; + struct acomp_req *acompress_req; + struct qat_request_buffs buf; + enum direction dir; + int actual_dlen; + struct qat_alg_req alg_req; + struct work_struct resubmit; + struct qat_dst dst; +}; + +static int qat_alg_send_dc_message(struct qat_compression_req *qat_req, + struct qat_compression_instance *inst, + struct crypto_async_request *base) +{ + struct qat_alg_req *alg_req = &qat_req->alg_req; + + alg_req->fw_req = (u32 *)&qat_req->req; + alg_req->tx_ring = inst->dc_tx; + alg_req->base = base; + alg_req->backlog = &inst->backlog; + + return qat_alg_send_message(alg_req); +} + +static void qat_comp_resubmit(struct work_struct *work) +{ + struct qat_compression_req *qat_req = + container_of(work, struct qat_compression_req, resubmit); + struct qat_compression_ctx *ctx = qat_req->qat_compression_ctx; + struct adf_accel_dev *accel_dev = ctx->inst->accel_dev; + struct qat_request_buffs *qat_bufs = &qat_req->buf; + struct qat_compression_instance *inst = ctx->inst; + struct acomp_req *areq = qat_req->acompress_req; + struct crypto_acomp *tfm = crypto_acomp_reqtfm(areq); + unsigned int dlen = CRYPTO_ACOMP_DST_MAX; + u8 *req = qat_req->req; + dma_addr_t dfbuf; + int ret; + + areq->dlen = dlen; + + dev_dbg(&GET_DEV(accel_dev), "[%s][%s] retry NULL dst request - dlen = %d\n", + crypto_tfm_alg_driver_name(crypto_acomp_tfm(tfm)), + qat_req->dir == COMPRESSION ? "comp" : "decomp", dlen); + + ret = qat_bl_realloc_map_new_dst(accel_dev, &areq->dst, dlen, qat_bufs, + qat_algs_alloc_flags(&areq->base)); + if (ret) + goto err; + + qat_req->dst.resubmitted = true; + + dfbuf = qat_req->buf.bloutp; + qat_comp_override_dst(req, dfbuf, dlen); + + ret = qat_alg_send_dc_message(qat_req, inst, &areq->base); + if (ret != -ENOSPC) + return; + +err: + qat_bl_free_bufl(accel_dev, qat_bufs); + areq->base.complete(&areq->base, ret); +} + +static void qat_comp_generic_callback(struct qat_compression_req *qat_req, + void *resp) +{ + struct acomp_req *areq = qat_req->acompress_req; + struct qat_compression_ctx *ctx = qat_req->qat_compression_ctx; + struct adf_accel_dev *accel_dev = ctx->inst->accel_dev; + struct crypto_acomp *tfm = crypto_acomp_reqtfm(areq); + struct qat_compression_instance *inst = ctx->inst; + int consumed, produced; + s8 cmp_err, xlt_err; + int res = -EBADMSG; + int status; + u8 cnv; + + status = qat_comp_get_cmp_status(resp); + status |= qat_comp_get_xlt_status(resp); + cmp_err = qat_comp_get_cmp_err(resp); + xlt_err = qat_comp_get_xlt_err(resp); + + consumed = qat_comp_get_consumed_ctr(resp); + produced = qat_comp_get_produced_ctr(resp); + + dev_dbg(&GET_DEV(accel_dev), + "[%s][%s][%s] slen = %8d dlen = %8d consumed = %8d produced = %8d cmp_err = %3d xlt_err = %3d", + crypto_tfm_alg_driver_name(crypto_acomp_tfm(tfm)), + qat_req->dir == COMPRESSION ? "comp " : "decomp", + status ? "ERR" : "OK ", + areq->slen, areq->dlen, consumed, produced, cmp_err, xlt_err); + + areq->dlen = 0; + + if (qat_req->dir == DECOMPRESSION && qat_req->dst.is_null) { + if (cmp_err == ERR_CODE_OVERFLOW_ERROR) { + if (qat_req->dst.resubmitted) { + dev_dbg(&GET_DEV(accel_dev), + "Output does not fit destination buffer\n"); + res = -EOVERFLOW; + goto end; + } + + INIT_WORK(&qat_req->resubmit, qat_comp_resubmit); + adf_misc_wq_queue_work(&qat_req->resubmit); + return; + } + } + + if (unlikely(status != ICP_QAT_FW_COMN_STATUS_FLAG_OK)) + goto end; + + if (qat_req->dir == COMPRESSION) { + cnv = qat_comp_get_cmp_cnv_flag(resp); + if (unlikely(!cnv)) { + dev_err(&GET_DEV(accel_dev), + "Verified compression not supported\n"); + goto end; + } + + if (unlikely(produced > qat_req->actual_dlen)) { + memset(inst->dc_data->ovf_buff, 0, + inst->dc_data->ovf_buff_sz); + dev_dbg(&GET_DEV(accel_dev), + "Actual buffer overflow: produced=%d, dlen=%d\n", + produced, qat_req->actual_dlen); + goto end; + } + } + + res = 0; + areq->dlen = produced; + +end: + qat_bl_free_bufl(accel_dev, &qat_req->buf); + areq->base.complete(&areq->base, res); +} + +void qat_comp_alg_callback(void *resp) +{ + struct qat_compression_req *qat_req = + (void *)(__force long)qat_comp_get_opaque(resp); + struct qat_instance_backlog *backlog = qat_req->alg_req.backlog; + + qat_comp_generic_callback(qat_req, resp); + + qat_alg_send_backlog(backlog); +} + +static int qat_comp_alg_init_tfm(struct crypto_acomp *acomp_tfm) +{ + struct crypto_tfm *tfm = crypto_acomp_tfm(acomp_tfm); + struct qat_compression_ctx *ctx = crypto_tfm_ctx(tfm); + struct qat_compression_instance *inst; + int node; + + if (tfm->node == NUMA_NO_NODE) + node = numa_node_id(); + else + node = tfm->node; + + memset(ctx, 0, sizeof(*ctx)); + inst = qat_compression_get_instance_node(node); + if (!inst) + return -EINVAL; + ctx->inst = inst; + + ctx->inst->build_deflate_ctx(ctx->comp_ctx); + + return 0; +} + +static void qat_comp_alg_exit_tfm(struct crypto_acomp *acomp_tfm) +{ + struct crypto_tfm *tfm = crypto_acomp_tfm(acomp_tfm); + struct qat_compression_ctx *ctx = crypto_tfm_ctx(tfm); + + qat_compression_put_instance(ctx->inst); + memset(ctx, 0, sizeof(*ctx)); +} + +static int qat_comp_alg_compress_decompress(struct acomp_req *areq, + enum direction dir) +{ + struct qat_compression_req *qat_req = acomp_request_ctx(areq); + struct crypto_acomp *acomp_tfm = crypto_acomp_reqtfm(areq); + struct crypto_tfm *tfm = crypto_acomp_tfm(acomp_tfm); + struct qat_compression_ctx *ctx = crypto_tfm_ctx(tfm); + struct qat_compression_instance *inst = ctx->inst; + struct qat_sgl_to_bufl_params *p_params = NULL; + gfp_t f = qat_algs_alloc_flags(&areq->base); + struct qat_sgl_to_bufl_params params; + unsigned int slen = areq->slen; + unsigned int dlen = areq->dlen; + dma_addr_t sfbuf, dfbuf; + u8 *req = qat_req->req; + size_t ovf_buff_sz; + int ret; + + if (!areq->src || !slen) + return -EINVAL; + + if (areq->dst && !dlen) + return -EINVAL; + + qat_req->dst.is_null = false; + + /* Handle acomp requests that require the allocation of a destination + * buffer. The size of the destination buffer is double the source + * buffer (rounded up to the size of a page) to fit the decompressed + * output or an expansion on the data for compression. + */ + if (!areq->dst) { + qat_req->dst.is_null = true; + + dlen = round_up(2 * slen, PAGE_SIZE); + areq->dst = sgl_alloc(dlen, f, NULL); + if (!areq->dst) + return -ENOMEM; + + areq->dlen = dlen; + qat_req->dst.resubmitted = false; + } + + if (dir == COMPRESSION) { + params.extra_dst_buff = inst->dc_data->ovf_buff_p; + ovf_buff_sz = inst->dc_data->ovf_buff_sz; + params.sz_extra_dst_buff = ovf_buff_sz; + p_params = ¶ms; + } + + ret = qat_bl_sgl_to_bufl(ctx->inst->accel_dev, areq->src, areq->dst, + &qat_req->buf, p_params, f); + if (unlikely(ret)) + return ret; + + sfbuf = qat_req->buf.blp; + dfbuf = qat_req->buf.bloutp; + qat_req->qat_compression_ctx = ctx; + qat_req->acompress_req = areq; + qat_req->dir = dir; + + if (dir == COMPRESSION) { + qat_req->actual_dlen = dlen; + dlen += ovf_buff_sz; + qat_comp_create_compression_req(ctx->comp_ctx, req, + (u64)(__force long)sfbuf, slen, + (u64)(__force long)dfbuf, dlen, + (u64)(__force long)qat_req); + } else { + qat_comp_create_decompression_req(ctx->comp_ctx, req, + (u64)(__force long)sfbuf, slen, + (u64)(__force long)dfbuf, dlen, + (u64)(__force long)qat_req); + } + + ret = qat_alg_send_dc_message(qat_req, inst, &areq->base); + if (ret == -ENOSPC) + qat_bl_free_bufl(inst->accel_dev, &qat_req->buf); + + return ret; +} + +static int qat_comp_alg_compress(struct acomp_req *req) +{ + return qat_comp_alg_compress_decompress(req, COMPRESSION); +} + +static int qat_comp_alg_decompress(struct acomp_req *req) +{ + return qat_comp_alg_compress_decompress(req, DECOMPRESSION); +} + +static struct acomp_alg qat_acomp[] = { { + .base = { + .cra_name = "deflate", + .cra_driver_name = "qat_deflate", + .cra_priority = 4001, + .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, + .cra_ctxsize = sizeof(struct qat_compression_ctx), + .cra_module = THIS_MODULE, + }, + .init = qat_comp_alg_init_tfm, + .exit = qat_comp_alg_exit_tfm, + .compress = qat_comp_alg_compress, + .decompress = qat_comp_alg_decompress, + .dst_free = sgl_free, + .reqsize = sizeof(struct qat_compression_req), +} }; + +int qat_comp_algs_register(void) +{ + int ret = 0; + + mutex_lock(&algs_lock); + if (++active_devs == 1) + ret = crypto_register_acomps(qat_acomp, ARRAY_SIZE(qat_acomp)); + mutex_unlock(&algs_lock); + return ret; +} + +void qat_comp_algs_unregister(void) +{ + mutex_lock(&algs_lock); + if (--active_devs == 0) + crypto_unregister_acomps(qat_acomp, ARRAY_SIZE(qat_acomp)); + mutex_unlock(&algs_lock); +} diff --git a/drivers/crypto/qat/qat_common/qat_comp_req.h b/drivers/crypto/qat/qat_common/qat_comp_req.h new file mode 100644 index 0000000000000000000000000000000000000000..404e32c5e77838df5dc94b22d33fb3b2e3ef0462 --- /dev/null +++ b/drivers/crypto/qat/qat_common/qat_comp_req.h @@ -0,0 +1,123 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright(c) 2022 Intel Corporation */ +#ifndef _QAT_COMP_REQ_H_ +#define _QAT_COMP_REQ_H_ + +#include "icp_qat_fw_comp.h" + +#define QAT_COMP_REQ_SIZE (sizeof(struct icp_qat_fw_comp_req)) +#define QAT_COMP_CTX_SIZE (QAT_COMP_REQ_SIZE * 2) + +static inline void qat_comp_create_req(void *ctx, void *req, u64 src, u32 slen, + u64 dst, u32 dlen, u64 opaque) +{ + struct icp_qat_fw_comp_req *fw_tmpl = ctx; + struct icp_qat_fw_comp_req *fw_req = req; + struct icp_qat_fw_comp_req_params *req_pars = &fw_req->comp_pars; + + memcpy(fw_req, fw_tmpl, sizeof(*fw_req)); + fw_req->comn_mid.src_data_addr = src; + fw_req->comn_mid.src_length = slen; + fw_req->comn_mid.dest_data_addr = dst; + fw_req->comn_mid.dst_length = dlen; + fw_req->comn_mid.opaque_data = opaque; + req_pars->comp_len = slen; + req_pars->out_buffer_sz = dlen; +} + +static inline void qat_comp_override_dst(void *req, u64 dst, u32 dlen) +{ + struct icp_qat_fw_comp_req *fw_req = req; + struct icp_qat_fw_comp_req_params *req_pars = &fw_req->comp_pars; + + fw_req->comn_mid.dest_data_addr = dst; + fw_req->comn_mid.dst_length = dlen; + req_pars->out_buffer_sz = dlen; +} + +static inline void qat_comp_create_compression_req(void *ctx, void *req, + u64 src, u32 slen, + u64 dst, u32 dlen, + u64 opaque) +{ + qat_comp_create_req(ctx, req, src, slen, dst, dlen, opaque); +} + +static inline void qat_comp_create_decompression_req(void *ctx, void *req, + u64 src, u32 slen, + u64 dst, u32 dlen, + u64 opaque) +{ + struct icp_qat_fw_comp_req *fw_tmpl = ctx; + + fw_tmpl++; + qat_comp_create_req(fw_tmpl, req, src, slen, dst, dlen, opaque); +} + +static inline u32 qat_comp_get_consumed_ctr(void *resp) +{ + struct icp_qat_fw_comp_resp *qat_resp = resp; + + return qat_resp->comp_resp_pars.input_byte_counter; +} + +static inline u32 qat_comp_get_produced_ctr(void *resp) +{ + struct icp_qat_fw_comp_resp *qat_resp = resp; + + return qat_resp->comp_resp_pars.output_byte_counter; +} + +static inline u32 qat_comp_get_produced_adler32(void *resp) +{ + struct icp_qat_fw_comp_resp *qat_resp = resp; + + return qat_resp->comp_resp_pars.crc.legacy.curr_adler_32; +} + +static inline u64 qat_comp_get_opaque(void *resp) +{ + struct icp_qat_fw_comp_resp *qat_resp = resp; + + return qat_resp->opaque_data; +} + +static inline s8 qat_comp_get_cmp_err(void *resp) +{ + struct icp_qat_fw_comp_resp *qat_resp = resp; + + return qat_resp->comn_resp.comn_error.cmp_err_code; +} + +static inline s8 qat_comp_get_xlt_err(void *resp) +{ + struct icp_qat_fw_comp_resp *qat_resp = resp; + + return qat_resp->comn_resp.comn_error.xlat_err_code; +} + +static inline s8 qat_comp_get_cmp_status(void *resp) +{ + struct icp_qat_fw_comp_resp *qat_resp = resp; + u8 stat_filed = qat_resp->comn_resp.comn_status; + + return ICP_QAT_FW_COMN_RESP_CMP_STAT_GET(stat_filed); +} + +static inline s8 qat_comp_get_xlt_status(void *resp) +{ + struct icp_qat_fw_comp_resp *qat_resp = resp; + u8 stat_filed = qat_resp->comn_resp.comn_status; + + return ICP_QAT_FW_COMN_RESP_XLAT_STAT_GET(stat_filed); +} + +static inline u8 qat_comp_get_cmp_cnv_flag(void *resp) +{ + struct icp_qat_fw_comp_resp *qat_resp = resp; + u8 flags = qat_resp->comn_resp.hdr_flags; + + return ICP_QAT_FW_COMN_HDR_CNV_FLAG_GET(flags); +} + +#endif diff --git a/drivers/crypto/qat/qat_common/qat_compression.c b/drivers/crypto/qat/qat_common/qat_compression.c new file mode 100644 index 0000000000000000000000000000000000000000..9fd10f4242f8468460b1bc31f5336362a72b395b --- /dev/null +++ b/drivers/crypto/qat/qat_common/qat_compression.c @@ -0,0 +1,297 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright(c) 2022 Intel Corporation */ +#include +#include +#include "adf_accel_devices.h" +#include "adf_common_drv.h" +#include "adf_transport.h" +#include "adf_transport_access_macros.h" +#include "adf_cfg.h" +#include "adf_cfg_strings.h" +#include "qat_compression.h" +#include "icp_qat_fw.h" + +#define SEC ADF_KERNEL_SEC + +static struct service_hndl qat_compression; + +void qat_compression_put_instance(struct qat_compression_instance *inst) +{ + atomic_dec(&inst->refctr); + adf_dev_put(inst->accel_dev); +} + +static int qat_compression_free_instances(struct adf_accel_dev *accel_dev) +{ + struct qat_compression_instance *inst; + struct list_head *list_ptr, *tmp; + int i; + + list_for_each_safe(list_ptr, tmp, &accel_dev->compression_list) { + inst = list_entry(list_ptr, + struct qat_compression_instance, list); + + for (i = 0; i < atomic_read(&inst->refctr); i++) + qat_compression_put_instance(inst); + + if (inst->dc_tx) + adf_remove_ring(inst->dc_tx); + + if (inst->dc_rx) + adf_remove_ring(inst->dc_rx); + + list_del(list_ptr); + kfree(inst); + } + return 0; +} + +struct qat_compression_instance *qat_compression_get_instance_node(int node) +{ + struct qat_compression_instance *inst = NULL; + struct adf_accel_dev *accel_dev = NULL; + unsigned long best = ~0; + struct list_head *itr; + + list_for_each(itr, adf_devmgr_get_head()) { + struct adf_accel_dev *tmp_dev; + unsigned long ctr; + int tmp_dev_node; + + tmp_dev = list_entry(itr, struct adf_accel_dev, list); + tmp_dev_node = dev_to_node(&GET_DEV(tmp_dev)); + + if ((node == tmp_dev_node || tmp_dev_node < 0) && + adf_dev_started(tmp_dev) && !list_empty(&tmp_dev->compression_list)) { + ctr = atomic_read(&tmp_dev->ref_count); + if (best > ctr) { + accel_dev = tmp_dev; + best = ctr; + } + } + } + + if (!accel_dev) { + pr_info("QAT: Could not find a device on node %d\n", node); + /* Get any started device */ + list_for_each(itr, adf_devmgr_get_head()) { + struct adf_accel_dev *tmp_dev; + + tmp_dev = list_entry(itr, struct adf_accel_dev, list); + if (adf_dev_started(tmp_dev) && + !list_empty(&tmp_dev->compression_list)) { + accel_dev = tmp_dev; + break; + } + } + } + + if (!accel_dev) + return NULL; + + best = ~0; + list_for_each(itr, &accel_dev->compression_list) { + struct qat_compression_instance *tmp_inst; + unsigned long ctr; + + tmp_inst = list_entry(itr, struct qat_compression_instance, list); + ctr = atomic_read(&tmp_inst->refctr); + if (best > ctr) { + inst = tmp_inst; + best = ctr; + } + } + if (inst) { + if (adf_dev_get(accel_dev)) { + dev_err(&GET_DEV(accel_dev), "Could not increment dev refctr\n"); + return NULL; + } + atomic_inc(&inst->refctr); + } + return inst; +} + +static int qat_compression_create_instances(struct adf_accel_dev *accel_dev) +{ + struct qat_compression_instance *inst; + char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES]; + char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES]; + unsigned long num_inst, num_msg_dc; + unsigned long bank; + int msg_size; + int ret; + int i; + + INIT_LIST_HEAD(&accel_dev->compression_list); + strscpy(key, ADF_NUM_DC, sizeof(key)); + ret = adf_cfg_get_param_value(accel_dev, SEC, key, val); + if (ret) + return ret; + + ret = kstrtoul(val, 10, &num_inst); + if (ret) + return ret; + + for (i = 0; i < num_inst; i++) { + inst = kzalloc_node(sizeof(*inst), GFP_KERNEL, + dev_to_node(&GET_DEV(accel_dev))); + if (!inst) { + ret = -ENOMEM; + goto err; + } + + list_add_tail(&inst->list, &accel_dev->compression_list); + inst->id = i; + atomic_set(&inst->refctr, 0); + inst->accel_dev = accel_dev; + inst->build_deflate_ctx = GET_DC_OPS(accel_dev)->build_deflate_ctx; + + snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_BANK_NUM, i); + ret = adf_cfg_get_param_value(accel_dev, SEC, key, val); + if (ret) + return ret; + + ret = kstrtoul(val, 10, &bank); + if (ret) + return ret; + + snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_SIZE, i); + ret = adf_cfg_get_param_value(accel_dev, SEC, key, val); + if (ret) + return ret; + + ret = kstrtoul(val, 10, &num_msg_dc); + if (ret) + return ret; + + msg_size = ICP_QAT_FW_REQ_DEFAULT_SZ; + snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_TX, i); + ret = adf_create_ring(accel_dev, SEC, bank, num_msg_dc, + msg_size, key, NULL, 0, &inst->dc_tx); + if (ret) + return ret; + + msg_size = ICP_QAT_FW_RESP_DEFAULT_SZ; + snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_RX, i); + ret = adf_create_ring(accel_dev, SEC, bank, num_msg_dc, + msg_size, key, qat_comp_alg_callback, 0, + &inst->dc_rx); + if (ret) + return ret; + + inst->dc_data = accel_dev->dc_data; + INIT_LIST_HEAD(&inst->backlog.list); + spin_lock_init(&inst->backlog.lock); + } + return 0; +err: + qat_compression_free_instances(accel_dev); + return ret; +} + +static int qat_compression_alloc_dc_data(struct adf_accel_dev *accel_dev) +{ + struct device *dev = &GET_DEV(accel_dev); + dma_addr_t obuff_p = DMA_MAPPING_ERROR; + size_t ovf_buff_sz = QAT_COMP_MAX_SKID; + struct adf_dc_data *dc_data = NULL; + u8 *obuff = NULL; + + dc_data = devm_kzalloc(dev, sizeof(*dc_data), GFP_KERNEL); + if (!dc_data) + goto err; + + obuff = kzalloc_node(ovf_buff_sz, GFP_KERNEL, dev_to_node(dev)); + if (!obuff) + goto err; + + obuff_p = dma_map_single(dev, obuff, ovf_buff_sz, DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(dev, obuff_p))) + goto err; + + dc_data->ovf_buff = obuff; + dc_data->ovf_buff_p = obuff_p; + dc_data->ovf_buff_sz = ovf_buff_sz; + + accel_dev->dc_data = dc_data; + + return 0; + +err: + accel_dev->dc_data = NULL; + kfree(obuff); + devm_kfree(dev, dc_data); + return -ENOMEM; +} + +static void qat_free_dc_data(struct adf_accel_dev *accel_dev) +{ + struct adf_dc_data *dc_data = accel_dev->dc_data; + struct device *dev = &GET_DEV(accel_dev); + + if (!dc_data) + return; + + dma_unmap_single(dev, dc_data->ovf_buff_p, dc_data->ovf_buff_sz, + DMA_FROM_DEVICE); + memset(dc_data->ovf_buff, 0, dc_data->ovf_buff_sz); + kfree(dc_data->ovf_buff); + devm_kfree(dev, dc_data); + accel_dev->dc_data = NULL; +} + +static int qat_compression_init(struct adf_accel_dev *accel_dev) +{ + int ret; + + ret = qat_compression_alloc_dc_data(accel_dev); + if (ret) + return ret; + + ret = qat_compression_create_instances(accel_dev); + if (ret) + qat_free_dc_data(accel_dev); + + return ret; +} + +static int qat_compression_shutdown(struct adf_accel_dev *accel_dev) +{ + qat_free_dc_data(accel_dev); + return qat_compression_free_instances(accel_dev); +} + +static int qat_compression_event_handler(struct adf_accel_dev *accel_dev, + enum adf_event event) +{ + int ret; + + switch (event) { + case ADF_EVENT_INIT: + ret = qat_compression_init(accel_dev); + break; + case ADF_EVENT_SHUTDOWN: + ret = qat_compression_shutdown(accel_dev); + break; + case ADF_EVENT_RESTARTING: + case ADF_EVENT_RESTARTED: + case ADF_EVENT_START: + case ADF_EVENT_STOP: + default: + ret = 0; + } + return ret; +} + +int qat_compression_register(void) +{ + memset(&qat_compression, 0, sizeof(qat_compression)); + qat_compression.event_hld = qat_compression_event_handler; + qat_compression.name = "qat_compression"; + return adf_service_register(&qat_compression); +} + +int qat_compression_unregister(void) +{ + return adf_service_unregister(&qat_compression); +} diff --git a/drivers/crypto/qat/qat_common/qat_compression.h b/drivers/crypto/qat/qat_common/qat_compression.h new file mode 100644 index 0000000000000000000000000000000000000000..aebac2302dcf23a9af5e1eafdb55c50c8959e8e8 --- /dev/null +++ b/drivers/crypto/qat/qat_common/qat_compression.h @@ -0,0 +1,37 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright(c) 2022 Intel Corporation */ +#ifndef _QAT_COMPRESSION_H_ +#define _QAT_COMPRESSION_H_ + +#include +#include +#include "adf_accel_devices.h" +#include "qat_algs_send.h" + +#define QAT_COMP_MAX_SKID 4096 + +struct qat_compression_instance { + struct adf_etr_ring_data *dc_tx; + struct adf_etr_ring_data *dc_rx; + struct adf_accel_dev *accel_dev; + struct list_head list; + unsigned long state; + int id; + atomic_t refctr; + struct qat_instance_backlog backlog; + struct adf_dc_data *dc_data; + void (*build_deflate_ctx)(void *ctx); +}; + +static inline bool adf_hw_dev_has_compression(struct adf_accel_dev *accel_dev) +{ + struct adf_hw_device_data *hw_device = accel_dev->hw_device; + u32 mask = ~hw_device->accel_capabilities_mask; + + if (mask & ADF_ACCEL_CAPABILITIES_COMPRESSION) + return false; + + return true; +} + +#endif diff --git a/drivers/crypto/qat/qat_common/qat_crypto.c b/drivers/crypto/qat/qat_common/qat_crypto.c index 9341d892533a7ebc57a23a1a0461ec126890dc65..e31199eade5b6f97587e59d99f0b2debd664b9fd 100644 --- a/drivers/crypto/qat/qat_common/qat_crypto.c +++ b/drivers/crypto/qat/qat_common/qat_crypto.c @@ -5,7 +5,6 @@ #include "adf_accel_devices.h" #include "adf_common_drv.h" #include "adf_transport.h" -#include "adf_transport_access_macros.h" #include "adf_cfg.h" #include "adf_cfg_strings.h" #include "adf_gen2_hw_data.h" @@ -126,126 +125,9 @@ int qat_crypto_vf_dev_config(struct adf_accel_dev *accel_dev) return -EFAULT; } - return qat_crypto_dev_config(accel_dev); + return GET_HW_DATA(accel_dev)->dev_config(accel_dev); } -/** - * qat_crypto_dev_config() - create dev config required to create crypto inst. - * - * @accel_dev: Pointer to acceleration device. - * - * Function creates device configuration required to create crypto instances - * - * Return: 0 on success, error code otherwise. - */ -int qat_crypto_dev_config(struct adf_accel_dev *accel_dev) -{ - char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES]; - int banks = GET_MAX_BANKS(accel_dev); - int cpus = num_online_cpus(); - unsigned long val; - int instances; - int ret; - int i; - - if (adf_hw_dev_has_crypto(accel_dev)) - instances = min(cpus, banks); - else - instances = 0; - - ret = adf_cfg_section_add(accel_dev, ADF_KERNEL_SEC); - if (ret) - goto err; - - ret = adf_cfg_section_add(accel_dev, "Accelerator0"); - if (ret) - goto err; - - for (i = 0; i < instances; i++) { - val = i; - snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_BANK_NUM, i); - ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, - key, &val, ADF_DEC); - if (ret) - goto err; - - snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_BANK_NUM, i); - ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, - key, &val, ADF_DEC); - if (ret) - goto err; - - snprintf(key, sizeof(key), ADF_CY "%d" ADF_ETRMGR_CORE_AFFINITY, - i); - ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, - key, &val, ADF_DEC); - if (ret) - goto err; - - snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_SIZE, i); - val = 128; - ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, - key, &val, ADF_DEC); - if (ret) - goto err; - - val = 512; - snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_SIZE, i); - ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, - key, &val, ADF_DEC); - if (ret) - goto err; - - val = 0; - snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_TX, i); - ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, - key, &val, ADF_DEC); - if (ret) - goto err; - - val = 2; - snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_TX, i); - ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, - key, &val, ADF_DEC); - if (ret) - goto err; - - val = 8; - snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_RX, i); - ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, - key, &val, ADF_DEC); - if (ret) - goto err; - - val = 10; - snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_RX, i); - ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, - key, &val, ADF_DEC); - if (ret) - goto err; - - val = ADF_COALESCING_DEF_TIME; - snprintf(key, sizeof(key), ADF_ETRMGR_COALESCE_TIMER_FORMAT, i); - ret = adf_cfg_add_key_value_param(accel_dev, "Accelerator0", - key, &val, ADF_DEC); - if (ret) - goto err; - } - - val = i; - ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_CY, - &val, ADF_DEC); - if (ret) - goto err; - - set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status); - return 0; -err: - dev_err(&GET_DEV(accel_dev), "Failed to start QAT accel dev\n"); - return ret; -} -EXPORT_SYMBOL_GPL(qat_crypto_dev_config); - static int qat_crypto_create_instances(struct adf_accel_dev *accel_dev) { unsigned long num_inst, num_msg_sym, num_msg_asym; diff --git a/drivers/crypto/qat/qat_common/qat_crypto.h b/drivers/crypto/qat/qat_common/qat_crypto.h index df3c738ce323a99440c3b65fc759df17e63cad03..6a0e961bb9dc774583e58539d9bf8476d6b647c6 100644 --- a/drivers/crypto/qat/qat_common/qat_crypto.h +++ b/drivers/crypto/qat/qat_common/qat_crypto.h @@ -8,19 +8,8 @@ #include #include "adf_accel_devices.h" #include "icp_qat_fw_la.h" - -struct qat_instance_backlog { - struct list_head list; - spinlock_t lock; /* protects backlog list */ -}; - -struct qat_alg_req { - u32 *fw_req; - struct adf_etr_ring_data *tx_ring; - struct crypto_async_request *base; - struct list_head list; - struct qat_instance_backlog *backlog; -}; +#include "qat_algs_send.h" +#include "qat_bl.h" struct qat_crypto_instance { struct adf_etr_ring_data *sym_tx; @@ -35,39 +24,6 @@ struct qat_crypto_instance { struct qat_instance_backlog backlog; }; -#define QAT_MAX_BUFF_DESC 4 - -struct qat_alg_buf { - u32 len; - u32 resrvd; - u64 addr; -} __packed; - -struct qat_alg_buf_list { - u64 resrvd; - u32 num_bufs; - u32 num_mapped_bufs; - struct qat_alg_buf bufers[]; -} __packed; - -struct qat_alg_fixed_buf_list { - struct qat_alg_buf_list sgl_hdr; - struct qat_alg_buf descriptors[QAT_MAX_BUFF_DESC]; -} __packed __aligned(64); - -struct qat_crypto_request_buffs { - struct qat_alg_buf_list *bl; - dma_addr_t blp; - struct qat_alg_buf_list *blout; - dma_addr_t bloutp; - size_t sz; - size_t sz_out; - bool sgl_src_valid; - bool sgl_dst_valid; - struct qat_alg_fixed_buf_list sgl_src; - struct qat_alg_fixed_buf_list sgl_dst; -}; - struct qat_crypto_request; struct qat_crypto_request { @@ -80,7 +36,7 @@ struct qat_crypto_request { struct aead_request *aead_req; struct skcipher_request *skcipher_req; }; - struct qat_crypto_request_buffs buf; + struct qat_request_buffs buf; void (*cb)(struct icp_qat_fw_la_resp *resp, struct qat_crypto_request *req); union { @@ -109,9 +65,4 @@ static inline bool adf_hw_dev_has_crypto(struct adf_accel_dev *accel_dev) return true; } -static inline gfp_t qat_algs_alloc_flags(struct crypto_async_request *req) -{ - return req->flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : GFP_ATOMIC; -} - #endif diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c index cb3bdd3618fb0e72701d7dc87272462a8a75a587..bc80bb475118d36dc8565bf4ca135d6d94e54ba6 100644 --- a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c +++ b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c @@ -2,6 +2,8 @@ /* Copyright(c) 2014 - 2021 Intel Corporation */ #include #include +#include +#include #include #include #include "adf_dh895xcc_hw_data.h" @@ -234,12 +236,14 @@ void adf_init_hw_data_dh895xcc(struct adf_hw_device_data *hw_data) hw_data->enable_ints = adf_gen2_enable_ints; hw_data->reset_device = adf_reset_sbr; hw_data->disable_iov = adf_disable_sriov; + hw_data->dev_config = adf_gen2_dev_config; adf_gen2_init_pf_pfvf_ops(&hw_data->pfvf_ops); hw_data->pfvf_ops.enable_vf2pf_interrupts = enable_vf2pf_interrupts; hw_data->pfvf_ops.disable_all_vf2pf_interrupts = disable_all_vf2pf_interrupts; hw_data->pfvf_ops.disable_pending_vf2pf_interrupts = disable_pending_vf2pf_interrupts; adf_gen2_init_hw_csr_ops(&hw_data->csr_ops); + adf_gen2_init_dc_ops(&hw_data->dc_ops); } void adf_clean_hw_data_dh895xcc(struct adf_hw_device_data *hw_data) diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_drv.c b/drivers/crypto/qat/qat_dh895xcc/adf_drv.c index acca56752aa02dec61fcc901ace74edecdf63c32..ebeb17b67fcd58d89f7ab569ab4491f23faf8e99 100644 --- a/drivers/crypto/qat/qat_dh895xcc/adf_drv.c +++ b/drivers/crypto/qat/qat_dh895xcc/adf_drv.c @@ -201,7 +201,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto out_err_disable_aer; } - ret = qat_crypto_dev_config(accel_dev); + ret = hw_data->dev_config(accel_dev); if (ret) goto out_err_disable_aer; diff --git a/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c b/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c index 31c14d7e1c115d00414388df60ab6a8605ba1e09..70e56cc16ecebb761f50961b64d47cbef0b8241e 100644 --- a/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c +++ b/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c @@ -2,6 +2,8 @@ /* Copyright(c) 2015 - 2021 Intel Corporation */ #include #include +#include +#include #include #include #include @@ -86,9 +88,11 @@ void adf_init_hw_data_dh895xcciov(struct adf_hw_device_data *hw_data) hw_data->get_sku = get_sku; hw_data->enable_ints = adf_vf_void_noop; hw_data->dev_class->instances++; + hw_data->dev_config = adf_gen2_dev_config; adf_devmgr_update_class_index(hw_data); adf_gen2_init_vf_pfvf_ops(&hw_data->pfvf_ops); adf_gen2_init_hw_csr_ops(&hw_data->csr_ops); + adf_gen2_init_dc_ops(&hw_data->dc_ops); } void adf_clean_hw_data_dh895xcciov(struct adf_hw_device_data *hw_data) diff --git a/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c b/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c index 18756b2e1c91276a18147f3dd320e47f86f48636..c1485e702b3eb5ab2e00cb8a55a6b36498e38369 100644 --- a/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c +++ b/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c @@ -177,8 +177,6 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (ret) goto out_err_dev_shutdown; - set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status); - ret = adf_dev_start(accel_dev); if (ret) goto out_err_dev_stop; diff --git a/drivers/crypto/qce/aead.c b/drivers/crypto/qce/aead.c index 6eb4d2e356297c1182f88382ff0757aee26fbf90..7d811728f047823c3254c4234ce77ae71719b168 100644 --- a/drivers/crypto/qce/aead.c +++ b/drivers/crypto/qce/aead.c @@ -24,7 +24,7 @@ static void qce_aead_done(void *data) { struct crypto_async_request *async_req = data; struct aead_request *req = aead_request_cast(async_req); - struct qce_aead_reqctx *rctx = aead_request_ctx(req); + struct qce_aead_reqctx *rctx = aead_request_ctx_dma(req); struct qce_aead_ctx *ctx = crypto_tfm_ctx(async_req->tfm); struct qce_alg_template *tmpl = to_aead_tmpl(crypto_aead_reqtfm(req)); struct qce_device *qce = tmpl->qce; @@ -92,7 +92,7 @@ static void qce_aead_done(void *data) static struct scatterlist * qce_aead_prepare_result_buf(struct sg_table *tbl, struct aead_request *req) { - struct qce_aead_reqctx *rctx = aead_request_ctx(req); + struct qce_aead_reqctx *rctx = aead_request_ctx_dma(req); struct qce_alg_template *tmpl = to_aead_tmpl(crypto_aead_reqtfm(req)); struct qce_device *qce = tmpl->qce; @@ -103,7 +103,7 @@ qce_aead_prepare_result_buf(struct sg_table *tbl, struct aead_request *req) static struct scatterlist * qce_aead_prepare_ccm_result_buf(struct sg_table *tbl, struct aead_request *req) { - struct qce_aead_reqctx *rctx = aead_request_ctx(req); + struct qce_aead_reqctx *rctx = aead_request_ctx_dma(req); sg_init_one(&rctx->result_sg, rctx->ccmresult_buf, QCE_BAM_BURST_SIZE); return qce_sgtable_add(tbl, &rctx->result_sg, QCE_BAM_BURST_SIZE); @@ -112,7 +112,7 @@ qce_aead_prepare_ccm_result_buf(struct sg_table *tbl, struct aead_request *req) static struct scatterlist * qce_aead_prepare_dst_buf(struct aead_request *req) { - struct qce_aead_reqctx *rctx = aead_request_ctx(req); + struct qce_aead_reqctx *rctx = aead_request_ctx_dma(req); struct qce_alg_template *tmpl = to_aead_tmpl(crypto_aead_reqtfm(req)); struct qce_device *qce = tmpl->qce; struct scatterlist *sg, *msg_sg, __sg[2]; @@ -186,7 +186,7 @@ qce_aead_ccm_prepare_buf_assoclen(struct aead_request *req) { struct scatterlist *sg, *msg_sg, __sg[2]; struct crypto_aead *tfm = crypto_aead_reqtfm(req); - struct qce_aead_reqctx *rctx = aead_request_ctx(req); + struct qce_aead_reqctx *rctx = aead_request_ctx_dma(req); struct qce_aead_ctx *ctx = crypto_aead_ctx(tfm); unsigned int assoclen = rctx->assoclen; unsigned int adata_header_len, cryptlen, totallen; @@ -300,7 +300,7 @@ err_free: static int qce_aead_prepare_buf(struct aead_request *req) { - struct qce_aead_reqctx *rctx = aead_request_ctx(req); + struct qce_aead_reqctx *rctx = aead_request_ctx_dma(req); struct qce_alg_template *tmpl = to_aead_tmpl(crypto_aead_reqtfm(req)); struct qce_device *qce = tmpl->qce; struct scatterlist *sg; @@ -328,7 +328,7 @@ static int qce_aead_prepare_buf(struct aead_request *req) static int qce_aead_ccm_prepare_buf(struct aead_request *req) { - struct qce_aead_reqctx *rctx = aead_request_ctx(req); + struct qce_aead_reqctx *rctx = aead_request_ctx_dma(req); struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct qce_aead_ctx *ctx = crypto_aead_ctx(tfm); struct scatterlist *sg; @@ -408,7 +408,7 @@ static int qce_aead_async_req_handle(struct crypto_async_request *async_req) { struct aead_request *req = aead_request_cast(async_req); - struct qce_aead_reqctx *rctx = aead_request_ctx(req); + struct qce_aead_reqctx *rctx = aead_request_ctx_dma(req); struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct qce_aead_ctx *ctx = crypto_tfm_ctx(async_req->tfm); struct qce_alg_template *tmpl = to_aead_tmpl(crypto_aead_reqtfm(req)); @@ -502,7 +502,7 @@ error_free: static int qce_aead_crypt(struct aead_request *req, int encrypt) { struct crypto_aead *tfm = crypto_aead_reqtfm(req); - struct qce_aead_reqctx *rctx = aead_request_ctx(req); + struct qce_aead_reqctx *rctx = aead_request_ctx_dma(req); struct qce_aead_ctx *ctx = crypto_aead_ctx(tfm); struct qce_alg_template *tmpl = to_aead_tmpl(tfm); unsigned int blocksize = crypto_aead_blocksize(tfm); @@ -675,8 +675,8 @@ static int qce_aead_init(struct crypto_aead *tfm) if (IS_ERR(ctx->fallback)) return PTR_ERR(ctx->fallback); - crypto_aead_set_reqsize(tfm, sizeof(struct qce_aead_reqctx) + - crypto_aead_reqsize(ctx->fallback)); + crypto_aead_set_reqsize_dma(tfm, sizeof(struct qce_aead_reqctx) + + crypto_aead_reqsize(ctx->fallback)); return 0; } diff --git a/drivers/crypto/qce/common.c b/drivers/crypto/qce/common.c index 7c612ba5068f7b31a2740618665a97e69c41bb9c..04253a8d33409a2a51db527435d09ae85a7880af 100644 --- a/drivers/crypto/qce/common.c +++ b/drivers/crypto/qce/common.c @@ -3,6 +3,7 @@ * Copyright (c) 2012-2014, The Linux Foundation. All rights reserved. */ +#include #include #include #include @@ -147,7 +148,7 @@ static int qce_setup_regs_ahash(struct crypto_async_request *async_req) { struct ahash_request *req = ahash_request_cast(async_req); struct crypto_ahash *ahash = __crypto_ahash_cast(async_req->tfm); - struct qce_sha_reqctx *rctx = ahash_request_ctx(req); + struct qce_sha_reqctx *rctx = ahash_request_ctx_dma(req); struct qce_alg_template *tmpl = to_ahash_tmpl(async_req->tfm); struct qce_device *qce = tmpl->qce; unsigned int digestsize = crypto_ahash_digestsize(ahash); @@ -419,7 +420,7 @@ static unsigned int qce_be32_to_cpu_array(u32 *dst, const u8 *src, unsigned int static int qce_setup_regs_aead(struct crypto_async_request *async_req) { struct aead_request *req = aead_request_cast(async_req); - struct qce_aead_reqctx *rctx = aead_request_ctx(req); + struct qce_aead_reqctx *rctx = aead_request_ctx_dma(req); struct qce_aead_ctx *ctx = crypto_tfm_ctx(async_req->tfm); struct qce_alg_template *tmpl = to_aead_tmpl(crypto_aead_reqtfm(req)); struct qce_device *qce = tmpl->qce; diff --git a/drivers/crypto/qce/sha.c b/drivers/crypto/qce/sha.c index 37bafd7aeb79df4fb650e5c95e2604798a5cf653..fc72af8aa9a725be7ba59cad71be4123b8dfe97f 100644 --- a/drivers/crypto/qce/sha.c +++ b/drivers/crypto/qce/sha.c @@ -38,7 +38,7 @@ static void qce_ahash_done(void *data) struct crypto_async_request *async_req = data; struct ahash_request *req = ahash_request_cast(async_req); struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); - struct qce_sha_reqctx *rctx = ahash_request_ctx(req); + struct qce_sha_reqctx *rctx = ahash_request_ctx_dma(req); struct qce_alg_template *tmpl = to_ahash_tmpl(async_req->tfm); struct qce_device *qce = tmpl->qce; struct qce_result_dump *result = qce->dma.result_buf; @@ -75,7 +75,7 @@ static void qce_ahash_done(void *data) static int qce_ahash_async_req_handle(struct crypto_async_request *async_req) { struct ahash_request *req = ahash_request_cast(async_req); - struct qce_sha_reqctx *rctx = ahash_request_ctx(req); + struct qce_sha_reqctx *rctx = ahash_request_ctx_dma(req); struct qce_sha_ctx *ctx = crypto_tfm_ctx(async_req->tfm); struct qce_alg_template *tmpl = to_ahash_tmpl(async_req->tfm); struct qce_device *qce = tmpl->qce; @@ -132,7 +132,7 @@ error_unmap_src: static int qce_ahash_init(struct ahash_request *req) { - struct qce_sha_reqctx *rctx = ahash_request_ctx(req); + struct qce_sha_reqctx *rctx = ahash_request_ctx_dma(req); struct qce_alg_template *tmpl = to_ahash_tmpl(req->base.tfm); const u32 *std_iv = tmpl->std_iv; @@ -147,7 +147,7 @@ static int qce_ahash_init(struct ahash_request *req) static int qce_ahash_export(struct ahash_request *req, void *out) { - struct qce_sha_reqctx *rctx = ahash_request_ctx(req); + struct qce_sha_reqctx *rctx = ahash_request_ctx_dma(req); struct qce_sha_saved_state *export_state = out; memcpy(export_state->pending_buf, rctx->buf, rctx->buflen); @@ -164,7 +164,7 @@ static int qce_ahash_export(struct ahash_request *req, void *out) static int qce_ahash_import(struct ahash_request *req, const void *in) { - struct qce_sha_reqctx *rctx = ahash_request_ctx(req); + struct qce_sha_reqctx *rctx = ahash_request_ctx_dma(req); const struct qce_sha_saved_state *import_state = in; memset(rctx, 0, sizeof(*rctx)); @@ -183,7 +183,7 @@ static int qce_ahash_import(struct ahash_request *req, const void *in) static int qce_ahash_update(struct ahash_request *req) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct qce_sha_reqctx *rctx = ahash_request_ctx(req); + struct qce_sha_reqctx *rctx = ahash_request_ctx_dma(req); struct qce_alg_template *tmpl = to_ahash_tmpl(req->base.tfm); struct qce_device *qce = tmpl->qce; struct scatterlist *sg_last, *sg; @@ -275,7 +275,7 @@ static int qce_ahash_update(struct ahash_request *req) static int qce_ahash_final(struct ahash_request *req) { - struct qce_sha_reqctx *rctx = ahash_request_ctx(req); + struct qce_sha_reqctx *rctx = ahash_request_ctx_dma(req); struct qce_alg_template *tmpl = to_ahash_tmpl(req->base.tfm); struct qce_device *qce = tmpl->qce; @@ -302,7 +302,7 @@ static int qce_ahash_final(struct ahash_request *req) static int qce_ahash_digest(struct ahash_request *req) { - struct qce_sha_reqctx *rctx = ahash_request_ctx(req); + struct qce_sha_reqctx *rctx = ahash_request_ctx_dma(req); struct qce_alg_template *tmpl = to_ahash_tmpl(req->base.tfm); struct qce_device *qce = tmpl->qce; int ret; @@ -395,7 +395,7 @@ static int qce_ahash_cra_init(struct crypto_tfm *tfm) struct crypto_ahash *ahash = __crypto_ahash_cast(tfm); struct qce_sha_ctx *ctx = crypto_tfm_ctx(tfm); - crypto_ahash_set_reqsize(ahash, sizeof(struct qce_sha_reqctx)); + crypto_ahash_set_reqsize_dma(ahash, sizeof(struct qce_sha_reqctx)); memset(ctx, 0, sizeof(*ctx)); return 0; } diff --git a/drivers/crypto/rockchip/rk3288_crypto.c b/drivers/crypto/rockchip/rk3288_crypto.c index 35d73061d1569b292de14d6a206d832f800e8007..9f6ba770a90a1d8967d3f1469762cf47342ac7f4 100644 --- a/drivers/crypto/rockchip/rk3288_crypto.c +++ b/drivers/crypto/rockchip/rk3288_crypto.c @@ -14,235 +14,162 @@ #include #include #include +#include #include #include #include -static int rk_crypto_enable_clk(struct rk_crypto_info *dev) -{ - int err; - - err = clk_prepare_enable(dev->sclk); - if (err) { - dev_err(dev->dev, "[%s:%d], Couldn't enable clock sclk\n", - __func__, __LINE__); - goto err_return; - } - err = clk_prepare_enable(dev->aclk); - if (err) { - dev_err(dev->dev, "[%s:%d], Couldn't enable clock aclk\n", - __func__, __LINE__); - goto err_aclk; - } - err = clk_prepare_enable(dev->hclk); - if (err) { - dev_err(dev->dev, "[%s:%d], Couldn't enable clock hclk\n", - __func__, __LINE__); - goto err_hclk; - } - err = clk_prepare_enable(dev->dmaclk); - if (err) { - dev_err(dev->dev, "[%s:%d], Couldn't enable clock dmaclk\n", - __func__, __LINE__); - goto err_dmaclk; - } - return err; -err_dmaclk: - clk_disable_unprepare(dev->hclk); -err_hclk: - clk_disable_unprepare(dev->aclk); -err_aclk: - clk_disable_unprepare(dev->sclk); -err_return: - return err; -} +static struct rockchip_ip rocklist = { + .dev_list = LIST_HEAD_INIT(rocklist.dev_list), + .lock = __SPIN_LOCK_UNLOCKED(rocklist.lock), +}; -static void rk_crypto_disable_clk(struct rk_crypto_info *dev) +struct rk_crypto_info *get_rk_crypto(void) { - clk_disable_unprepare(dev->dmaclk); - clk_disable_unprepare(dev->hclk); - clk_disable_unprepare(dev->aclk); - clk_disable_unprepare(dev->sclk); + struct rk_crypto_info *first; + + spin_lock(&rocklist.lock); + first = list_first_entry_or_null(&rocklist.dev_list, + struct rk_crypto_info, list); + list_rotate_left(&rocklist.dev_list); + spin_unlock(&rocklist.lock); + return first; } -static int check_alignment(struct scatterlist *sg_src, - struct scatterlist *sg_dst, - int align_mask) -{ - int in, out, align; +static const struct rk_variant rk3288_variant = { + .num_clks = 4, + .rkclks = { + { "sclk", 150000000}, + } +}; - in = IS_ALIGNED((uint32_t)sg_src->offset, 4) && - IS_ALIGNED((uint32_t)sg_src->length, align_mask); - if (!sg_dst) - return in; - out = IS_ALIGNED((uint32_t)sg_dst->offset, 4) && - IS_ALIGNED((uint32_t)sg_dst->length, align_mask); - align = in && out; +static const struct rk_variant rk3328_variant = { + .num_clks = 3, +}; - return (align && (sg_src->length == sg_dst->length)); -} +static const struct rk_variant rk3399_variant = { + .num_clks = 3, +}; -static int rk_load_data(struct rk_crypto_info *dev, - struct scatterlist *sg_src, - struct scatterlist *sg_dst) +static int rk_crypto_get_clks(struct rk_crypto_info *dev) { - unsigned int count; - - dev->aligned = dev->aligned ? - check_alignment(sg_src, sg_dst, dev->align_size) : - dev->aligned; - if (dev->aligned) { - count = min(dev->left_bytes, sg_src->length); - dev->left_bytes -= count; - - if (!dma_map_sg(dev->dev, sg_src, 1, DMA_TO_DEVICE)) { - dev_err(dev->dev, "[%s:%d] dma_map_sg(src) error\n", - __func__, __LINE__); - return -EINVAL; - } - dev->addr_in = sg_dma_address(sg_src); - - if (sg_dst) { - if (!dma_map_sg(dev->dev, sg_dst, 1, DMA_FROM_DEVICE)) { - dev_err(dev->dev, - "[%s:%d] dma_map_sg(dst) error\n", - __func__, __LINE__); - dma_unmap_sg(dev->dev, sg_src, 1, - DMA_TO_DEVICE); - return -EINVAL; - } - dev->addr_out = sg_dma_address(sg_dst); - } - } else { - count = (dev->left_bytes > PAGE_SIZE) ? - PAGE_SIZE : dev->left_bytes; - - if (!sg_pcopy_to_buffer(dev->first, dev->src_nents, - dev->addr_vir, count, - dev->total - dev->left_bytes)) { - dev_err(dev->dev, "[%s:%d] pcopy err\n", - __func__, __LINE__); - return -EINVAL; - } - dev->left_bytes -= count; - sg_init_one(&dev->sg_tmp, dev->addr_vir, count); - if (!dma_map_sg(dev->dev, &dev->sg_tmp, 1, DMA_TO_DEVICE)) { - dev_err(dev->dev, "[%s:%d] dma_map_sg(sg_tmp) error\n", - __func__, __LINE__); - return -ENOMEM; - } - dev->addr_in = sg_dma_address(&dev->sg_tmp); - - if (sg_dst) { - if (!dma_map_sg(dev->dev, &dev->sg_tmp, 1, - DMA_FROM_DEVICE)) { - dev_err(dev->dev, - "[%s:%d] dma_map_sg(sg_tmp) error\n", - __func__, __LINE__); - dma_unmap_sg(dev->dev, &dev->sg_tmp, 1, - DMA_TO_DEVICE); - return -ENOMEM; + int i, j, err; + unsigned long cr; + + dev->num_clks = devm_clk_bulk_get_all(dev->dev, &dev->clks); + if (dev->num_clks < dev->variant->num_clks) { + dev_err(dev->dev, "Missing clocks, got %d instead of %d\n", + dev->num_clks, dev->variant->num_clks); + return -EINVAL; + } + + for (i = 0; i < dev->num_clks; i++) { + cr = clk_get_rate(dev->clks[i].clk); + for (j = 0; j < ARRAY_SIZE(dev->variant->rkclks); j++) { + if (dev->variant->rkclks[j].max == 0) + continue; + if (strcmp(dev->variant->rkclks[j].name, dev->clks[i].id)) + continue; + if (cr > dev->variant->rkclks[j].max) { + err = clk_set_rate(dev->clks[i].clk, + dev->variant->rkclks[j].max); + if (err) + dev_err(dev->dev, "Fail downclocking %s from %lu to %lu\n", + dev->variant->rkclks[j].name, cr, + dev->variant->rkclks[j].max); + else + dev_info(dev->dev, "Downclocking %s from %lu to %lu\n", + dev->variant->rkclks[j].name, cr, + dev->variant->rkclks[j].max); } - dev->addr_out = sg_dma_address(&dev->sg_tmp); } } - dev->count = count; return 0; } -static void rk_unload_data(struct rk_crypto_info *dev) +static int rk_crypto_enable_clk(struct rk_crypto_info *dev) { - struct scatterlist *sg_in, *sg_out; + int err; - sg_in = dev->aligned ? dev->sg_src : &dev->sg_tmp; - dma_unmap_sg(dev->dev, sg_in, 1, DMA_TO_DEVICE); + err = clk_bulk_prepare_enable(dev->num_clks, dev->clks); + if (err) + dev_err(dev->dev, "Could not enable clock clks\n"); - if (dev->sg_dst) { - sg_out = dev->aligned ? dev->sg_dst : &dev->sg_tmp; - dma_unmap_sg(dev->dev, sg_out, 1, DMA_FROM_DEVICE); - } + return err; } -static irqreturn_t rk_crypto_irq_handle(int irq, void *dev_id) +static void rk_crypto_disable_clk(struct rk_crypto_info *dev) { - struct rk_crypto_info *dev = platform_get_drvdata(dev_id); - u32 interrupt_status; + clk_bulk_disable_unprepare(dev->num_clks, dev->clks); +} - spin_lock(&dev->lock); - interrupt_status = CRYPTO_READ(dev, RK_CRYPTO_INTSTS); - CRYPTO_WRITE(dev, RK_CRYPTO_INTSTS, interrupt_status); +/* + * Power management strategy: The device is suspended until a request + * is handled. For avoiding suspend/resume yoyo, the autosuspend is set to 2s. + */ +static int rk_crypto_pm_suspend(struct device *dev) +{ + struct rk_crypto_info *rkdev = dev_get_drvdata(dev); - if (interrupt_status & 0x0a) { - dev_warn(dev->dev, "DMA Error\n"); - dev->err = -EFAULT; - } - tasklet_schedule(&dev->done_task); + rk_crypto_disable_clk(rkdev); + reset_control_assert(rkdev->rst); - spin_unlock(&dev->lock); - return IRQ_HANDLED; + return 0; } -static int rk_crypto_enqueue(struct rk_crypto_info *dev, - struct crypto_async_request *async_req) +static int rk_crypto_pm_resume(struct device *dev) { - unsigned long flags; + struct rk_crypto_info *rkdev = dev_get_drvdata(dev); int ret; - spin_lock_irqsave(&dev->lock, flags); - ret = crypto_enqueue_request(&dev->queue, async_req); - if (dev->busy) { - spin_unlock_irqrestore(&dev->lock, flags); + ret = rk_crypto_enable_clk(rkdev); + if (ret) return ret; - } - dev->busy = true; - spin_unlock_irqrestore(&dev->lock, flags); - tasklet_schedule(&dev->queue_task); - return ret; -} + reset_control_deassert(rkdev->rst); + return 0; -static void rk_crypto_queue_task_cb(unsigned long data) -{ - struct rk_crypto_info *dev = (struct rk_crypto_info *)data; - struct crypto_async_request *async_req, *backlog; - unsigned long flags; - int err = 0; +} - dev->err = 0; - spin_lock_irqsave(&dev->lock, flags); - backlog = crypto_get_backlog(&dev->queue); - async_req = crypto_dequeue_request(&dev->queue); +static const struct dev_pm_ops rk_crypto_pm_ops = { + SET_RUNTIME_PM_OPS(rk_crypto_pm_suspend, rk_crypto_pm_resume, NULL) +}; - if (!async_req) { - dev->busy = false; - spin_unlock_irqrestore(&dev->lock, flags); - return; - } - spin_unlock_irqrestore(&dev->lock, flags); +static int rk_crypto_pm_init(struct rk_crypto_info *rkdev) +{ + int err; - if (backlog) { - backlog->complete(backlog, -EINPROGRESS); - backlog = NULL; - } + pm_runtime_use_autosuspend(rkdev->dev); + pm_runtime_set_autosuspend_delay(rkdev->dev, 2000); - dev->async_req = async_req; - err = dev->start(dev); + err = pm_runtime_set_suspended(rkdev->dev); if (err) - dev->complete(dev->async_req, err); + return err; + pm_runtime_enable(rkdev->dev); + return err; } -static void rk_crypto_done_task_cb(unsigned long data) +static void rk_crypto_pm_exit(struct rk_crypto_info *rkdev) { - struct rk_crypto_info *dev = (struct rk_crypto_info *)data; + pm_runtime_disable(rkdev->dev); +} - if (dev->err) { - dev->complete(dev->async_req, dev->err); - return; +static irqreturn_t rk_crypto_irq_handle(int irq, void *dev_id) +{ + struct rk_crypto_info *dev = platform_get_drvdata(dev_id); + u32 interrupt_status; + + interrupt_status = CRYPTO_READ(dev, RK_CRYPTO_INTSTS); + CRYPTO_WRITE(dev, RK_CRYPTO_INTSTS, interrupt_status); + + dev->status = 1; + if (interrupt_status & 0x0a) { + dev_warn(dev->dev, "DMA Error\n"); + dev->status = 0; } + complete(&dev->complete); - dev->err = dev->update(dev); - if (dev->err) - dev->complete(dev->async_req, dev->err); + return IRQ_HANDLED; } static struct rk_crypto_tmp *rk_cipher_algs[] = { @@ -257,6 +184,62 @@ static struct rk_crypto_tmp *rk_cipher_algs[] = { &rk_ahash_md5, }; +#ifdef CONFIG_CRYPTO_DEV_ROCKCHIP_DEBUG +static int rk_crypto_debugfs_show(struct seq_file *seq, void *v) +{ + struct rk_crypto_info *dd; + unsigned int i; + + spin_lock(&rocklist.lock); + list_for_each_entry(dd, &rocklist.dev_list, list) { + seq_printf(seq, "%s %s requests: %lu\n", + dev_driver_string(dd->dev), dev_name(dd->dev), + dd->nreq); + } + spin_unlock(&rocklist.lock); + + for (i = 0; i < ARRAY_SIZE(rk_cipher_algs); i++) { + if (!rk_cipher_algs[i]->dev) + continue; + switch (rk_cipher_algs[i]->type) { + case CRYPTO_ALG_TYPE_SKCIPHER: + seq_printf(seq, "%s %s reqs=%lu fallback=%lu\n", + rk_cipher_algs[i]->alg.skcipher.base.cra_driver_name, + rk_cipher_algs[i]->alg.skcipher.base.cra_name, + rk_cipher_algs[i]->stat_req, rk_cipher_algs[i]->stat_fb); + seq_printf(seq, "\tfallback due to length: %lu\n", + rk_cipher_algs[i]->stat_fb_len); + seq_printf(seq, "\tfallback due to alignment: %lu\n", + rk_cipher_algs[i]->stat_fb_align); + seq_printf(seq, "\tfallback due to SGs: %lu\n", + rk_cipher_algs[i]->stat_fb_sgdiff); + break; + case CRYPTO_ALG_TYPE_AHASH: + seq_printf(seq, "%s %s reqs=%lu fallback=%lu\n", + rk_cipher_algs[i]->alg.hash.halg.base.cra_driver_name, + rk_cipher_algs[i]->alg.hash.halg.base.cra_name, + rk_cipher_algs[i]->stat_req, rk_cipher_algs[i]->stat_fb); + break; + } + } + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(rk_crypto_debugfs); +#endif + +static void register_debugfs(struct rk_crypto_info *crypto_info) +{ +#ifdef CONFIG_CRYPTO_DEV_ROCKCHIP_DEBUG + /* Ignore error of debugfs */ + rocklist.dbgfs_dir = debugfs_create_dir("rk3288_crypto", NULL); + rocklist.dbgfs_stats = debugfs_create_file("stats", 0444, + rocklist.dbgfs_dir, + &rocklist, + &rk_crypto_debugfs_fops); +#endif +} + static int rk_crypto_register(struct rk_crypto_info *crypto_info) { unsigned int i, k; @@ -264,12 +247,22 @@ static int rk_crypto_register(struct rk_crypto_info *crypto_info) for (i = 0; i < ARRAY_SIZE(rk_cipher_algs); i++) { rk_cipher_algs[i]->dev = crypto_info; - if (rk_cipher_algs[i]->type == ALG_TYPE_CIPHER) - err = crypto_register_skcipher( - &rk_cipher_algs[i]->alg.skcipher); - else - err = crypto_register_ahash( - &rk_cipher_algs[i]->alg.hash); + switch (rk_cipher_algs[i]->type) { + case CRYPTO_ALG_TYPE_SKCIPHER: + dev_info(crypto_info->dev, "Register %s as %s\n", + rk_cipher_algs[i]->alg.skcipher.base.cra_name, + rk_cipher_algs[i]->alg.skcipher.base.cra_driver_name); + err = crypto_register_skcipher(&rk_cipher_algs[i]->alg.skcipher); + break; + case CRYPTO_ALG_TYPE_AHASH: + dev_info(crypto_info->dev, "Register %s as %s\n", + rk_cipher_algs[i]->alg.hash.halg.base.cra_name, + rk_cipher_algs[i]->alg.hash.halg.base.cra_driver_name); + err = crypto_register_ahash(&rk_cipher_algs[i]->alg.hash); + break; + default: + dev_err(crypto_info->dev, "unknown algorithm\n"); + } if (err) goto err_cipher_algs; } @@ -277,7 +270,7 @@ static int rk_crypto_register(struct rk_crypto_info *crypto_info) err_cipher_algs: for (k = 0; k < i; k++) { - if (rk_cipher_algs[i]->type == ALG_TYPE_CIPHER) + if (rk_cipher_algs[i]->type == CRYPTO_ALG_TYPE_SKCIPHER) crypto_unregister_skcipher(&rk_cipher_algs[k]->alg.skcipher); else crypto_unregister_ahash(&rk_cipher_algs[i]->alg.hash); @@ -290,22 +283,23 @@ static void rk_crypto_unregister(void) unsigned int i; for (i = 0; i < ARRAY_SIZE(rk_cipher_algs); i++) { - if (rk_cipher_algs[i]->type == ALG_TYPE_CIPHER) + if (rk_cipher_algs[i]->type == CRYPTO_ALG_TYPE_SKCIPHER) crypto_unregister_skcipher(&rk_cipher_algs[i]->alg.skcipher); else crypto_unregister_ahash(&rk_cipher_algs[i]->alg.hash); } } -static void rk_crypto_action(void *data) -{ - struct rk_crypto_info *crypto_info = data; - - reset_control_assert(crypto_info->rst); -} - static const struct of_device_id crypto_of_id_table[] = { - { .compatible = "rockchip,rk3288-crypto" }, + { .compatible = "rockchip,rk3288-crypto", + .data = &rk3288_variant, + }, + { .compatible = "rockchip,rk3328-crypto", + .data = &rk3328_variant, + }, + { .compatible = "rockchip,rk3399-crypto", + .data = &rk3399_variant, + }, {} }; MODULE_DEVICE_TABLE(of, crypto_of_id_table); @@ -313,7 +307,7 @@ MODULE_DEVICE_TABLE(of, crypto_of_id_table); static int rk_crypto_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; - struct rk_crypto_info *crypto_info; + struct rk_crypto_info *crypto_info, *first; int err = 0; crypto_info = devm_kzalloc(&pdev->dev, @@ -323,7 +317,16 @@ static int rk_crypto_probe(struct platform_device *pdev) goto err_crypto; } - crypto_info->rst = devm_reset_control_get(dev, "crypto-rst"); + crypto_info->dev = &pdev->dev; + platform_set_drvdata(pdev, crypto_info); + + crypto_info->variant = of_device_get_match_data(&pdev->dev); + if (!crypto_info->variant) { + dev_err(&pdev->dev, "Missing variant\n"); + return -EINVAL; + } + + crypto_info->rst = devm_reset_control_array_get_exclusive(dev); if (IS_ERR(crypto_info->rst)) { err = PTR_ERR(crypto_info->rst); goto err_crypto; @@ -333,46 +336,18 @@ static int rk_crypto_probe(struct platform_device *pdev) usleep_range(10, 20); reset_control_deassert(crypto_info->rst); - err = devm_add_action_or_reset(dev, rk_crypto_action, crypto_info); - if (err) - goto err_crypto; - - spin_lock_init(&crypto_info->lock); - crypto_info->reg = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(crypto_info->reg)) { err = PTR_ERR(crypto_info->reg); goto err_crypto; } - crypto_info->aclk = devm_clk_get(&pdev->dev, "aclk"); - if (IS_ERR(crypto_info->aclk)) { - err = PTR_ERR(crypto_info->aclk); - goto err_crypto; - } - - crypto_info->hclk = devm_clk_get(&pdev->dev, "hclk"); - if (IS_ERR(crypto_info->hclk)) { - err = PTR_ERR(crypto_info->hclk); - goto err_crypto; - } - - crypto_info->sclk = devm_clk_get(&pdev->dev, "sclk"); - if (IS_ERR(crypto_info->sclk)) { - err = PTR_ERR(crypto_info->sclk); - goto err_crypto; - } - - crypto_info->dmaclk = devm_clk_get(&pdev->dev, "apb_pclk"); - if (IS_ERR(crypto_info->dmaclk)) { - err = PTR_ERR(crypto_info->dmaclk); + err = rk_crypto_get_clks(crypto_info); + if (err) goto err_crypto; - } crypto_info->irq = platform_get_irq(pdev, 0); if (crypto_info->irq < 0) { - dev_warn(crypto_info->dev, - "control Interrupt is not available.\n"); err = crypto_info->irq; goto err_crypto; } @@ -382,49 +357,64 @@ static int rk_crypto_probe(struct platform_device *pdev) "rk-crypto", pdev); if (err) { - dev_err(crypto_info->dev, "irq request failed.\n"); + dev_err(&pdev->dev, "irq request failed.\n"); goto err_crypto; } - crypto_info->dev = &pdev->dev; - platform_set_drvdata(pdev, crypto_info); - - tasklet_init(&crypto_info->queue_task, - rk_crypto_queue_task_cb, (unsigned long)crypto_info); - tasklet_init(&crypto_info->done_task, - rk_crypto_done_task_cb, (unsigned long)crypto_info); - crypto_init_queue(&crypto_info->queue, 50); + crypto_info->engine = crypto_engine_alloc_init(&pdev->dev, true); + crypto_engine_start(crypto_info->engine); + init_completion(&crypto_info->complete); - crypto_info->enable_clk = rk_crypto_enable_clk; - crypto_info->disable_clk = rk_crypto_disable_clk; - crypto_info->load_data = rk_load_data; - crypto_info->unload_data = rk_unload_data; - crypto_info->enqueue = rk_crypto_enqueue; - crypto_info->busy = false; + err = rk_crypto_pm_init(crypto_info); + if (err) + goto err_pm; + + spin_lock(&rocklist.lock); + first = list_first_entry_or_null(&rocklist.dev_list, + struct rk_crypto_info, list); + list_add_tail(&crypto_info->list, &rocklist.dev_list); + spin_unlock(&rocklist.lock); + + if (!first) { + err = rk_crypto_register(crypto_info); + if (err) { + dev_err(dev, "Fail to register crypto algorithms"); + goto err_register_alg; + } - err = rk_crypto_register(crypto_info); - if (err) { - dev_err(dev, "err in register alg"); - goto err_register_alg; + register_debugfs(crypto_info); } - dev_info(dev, "Crypto Accelerator successfully registered\n"); return 0; err_register_alg: - tasklet_kill(&crypto_info->queue_task); - tasklet_kill(&crypto_info->done_task); + rk_crypto_pm_exit(crypto_info); +err_pm: + crypto_engine_exit(crypto_info->engine); err_crypto: + dev_err(dev, "Crypto Accelerator not successfully registered\n"); return err; } static int rk_crypto_remove(struct platform_device *pdev) { struct rk_crypto_info *crypto_tmp = platform_get_drvdata(pdev); - - rk_crypto_unregister(); - tasklet_kill(&crypto_tmp->done_task); - tasklet_kill(&crypto_tmp->queue_task); + struct rk_crypto_info *first; + + spin_lock_bh(&rocklist.lock); + list_del(&crypto_tmp->list); + first = list_first_entry_or_null(&rocklist.dev_list, + struct rk_crypto_info, list); + spin_unlock_bh(&rocklist.lock); + + if (!first) { +#ifdef CONFIG_CRYPTO_DEV_ROCKCHIP_DEBUG + debugfs_remove_recursive(rocklist.dbgfs_dir); +#endif + rk_crypto_unregister(); + } + rk_crypto_pm_exit(crypto_tmp); + crypto_engine_exit(crypto_tmp->engine); return 0; } @@ -433,6 +423,7 @@ static struct platform_driver crypto_driver = { .remove = rk_crypto_remove, .driver = { .name = "rk3288-crypto", + .pm = &rk_crypto_pm_ops, .of_match_table = crypto_of_id_table, }, }; diff --git a/drivers/crypto/rockchip/rk3288_crypto.h b/drivers/crypto/rockchip/rk3288_crypto.h index 97278c2574ff93a93c2c7f01467a6a1895f14264..b2695258cade969b8898adb0f8cdd1f335263935 100644 --- a/drivers/crypto/rockchip/rk3288_crypto.h +++ b/drivers/crypto/rockchip/rk3288_crypto.h @@ -5,9 +5,13 @@ #include #include #include +#include #include +#include #include +#include #include +#include #include #include @@ -184,85 +188,91 @@ #define CRYPTO_WRITE(dev, offset, val) \ writel_relaxed((val), ((dev)->reg + (offset))) +#define RK_MAX_CLKS 4 + +/* + * struct rockchip_ip - struct for managing a list of RK crypto instance + * @dev_list: Used for doing a list of rk_crypto_info + * @lock: Control access to dev_list + * @dbgfs_dir: Debugfs dentry for statistic directory + * @dbgfs_stats: Debugfs dentry for statistic counters + */ +struct rockchip_ip { + struct list_head dev_list; + spinlock_t lock; /* Control access to dev_list */ + struct dentry *dbgfs_dir; + struct dentry *dbgfs_stats; +}; + +struct rk_clks { + const char *name; + unsigned long max; +}; + +struct rk_variant { + int num_clks; + struct rk_clks rkclks[RK_MAX_CLKS]; +}; + struct rk_crypto_info { + struct list_head list; struct device *dev; - struct clk *aclk; - struct clk *hclk; - struct clk *sclk; - struct clk *dmaclk; + struct clk_bulk_data *clks; + int num_clks; struct reset_control *rst; void __iomem *reg; int irq; - struct crypto_queue queue; - struct tasklet_struct queue_task; - struct tasklet_struct done_task; - struct crypto_async_request *async_req; - int err; - /* device lock */ - spinlock_t lock; - - /* the public variable */ - struct scatterlist *sg_src; - struct scatterlist *sg_dst; - struct scatterlist sg_tmp; - struct scatterlist *first; - unsigned int left_bytes; - void *addr_vir; - int aligned; - int align_size; - size_t src_nents; - size_t dst_nents; - unsigned int total; - unsigned int count; - dma_addr_t addr_in; - dma_addr_t addr_out; - bool busy; - int (*start)(struct rk_crypto_info *dev); - int (*update)(struct rk_crypto_info *dev); - void (*complete)(struct crypto_async_request *base, int err); - int (*enable_clk)(struct rk_crypto_info *dev); - void (*disable_clk)(struct rk_crypto_info *dev); - int (*load_data)(struct rk_crypto_info *dev, - struct scatterlist *sg_src, - struct scatterlist *sg_dst); - void (*unload_data)(struct rk_crypto_info *dev); - int (*enqueue)(struct rk_crypto_info *dev, - struct crypto_async_request *async_req); + const struct rk_variant *variant; + unsigned long nreq; + struct crypto_engine *engine; + struct completion complete; + int status; }; /* the private variable of hash */ struct rk_ahash_ctx { - struct rk_crypto_info *dev; + struct crypto_engine_ctx enginectx; /* for fallback */ struct crypto_ahash *fallback_tfm; }; -/* the privete variable of hash for fallback */ +/* the private variable of hash for fallback */ struct rk_ahash_rctx { + struct rk_crypto_info *dev; struct ahash_request fallback_req; u32 mode; + int nrsg; }; /* the private variable of cipher */ struct rk_cipher_ctx { - struct rk_crypto_info *dev; + struct crypto_engine_ctx enginectx; unsigned int keylen; - u32 mode; + u8 key[AES_MAX_KEY_SIZE]; u8 iv[AES_BLOCK_SIZE]; + struct crypto_skcipher *fallback_tfm; }; -enum alg_type { - ALG_TYPE_HASH, - ALG_TYPE_CIPHER, +struct rk_cipher_rctx { + struct rk_crypto_info *dev; + u8 backup_iv[AES_BLOCK_SIZE]; + u32 mode; + struct skcipher_request fallback_req; // keep at the end }; struct rk_crypto_tmp { - struct rk_crypto_info *dev; + u32 type; + struct rk_crypto_info *dev; union { struct skcipher_alg skcipher; struct ahash_alg hash; } alg; - enum alg_type type; + unsigned long stat_req; + unsigned long stat_fb; + unsigned long stat_fb_len; + unsigned long stat_fb_sglen; + unsigned long stat_fb_align; + unsigned long stat_fb_sgdiff; }; extern struct rk_crypto_tmp rk_ecb_aes_alg; @@ -276,4 +286,5 @@ extern struct rk_crypto_tmp rk_ahash_sha1; extern struct rk_crypto_tmp rk_ahash_sha256; extern struct rk_crypto_tmp rk_ahash_md5; +struct rk_crypto_info *get_rk_crypto(void); #endif diff --git a/drivers/crypto/rockchip/rk3288_crypto_ahash.c b/drivers/crypto/rockchip/rk3288_crypto_ahash.c index ed03058497bc2863e1c6333fb42f025efd85291b..a78ff3dcd0b1f366081948c34705d8b72f8a99ff 100644 --- a/drivers/crypto/rockchip/rk3288_crypto_ahash.c +++ b/drivers/crypto/rockchip/rk3288_crypto_ahash.c @@ -9,6 +9,8 @@ * Some ideas are from marvell/cesa.c and s5p-sss.c driver. */ #include +#include +#include #include "rk3288_crypto.h" /* @@ -16,6 +18,44 @@ * so we put the fixed hash out when met zero message. */ +static bool rk_ahash_need_fallback(struct ahash_request *req) +{ + struct scatterlist *sg; + + sg = req->src; + while (sg) { + if (!IS_ALIGNED(sg->offset, sizeof(u32))) { + return true; + } + if (sg->length % 4) { + return true; + } + sg = sg_next(sg); + } + return false; +} + +static int rk_ahash_digest_fb(struct ahash_request *areq) +{ + struct rk_ahash_rctx *rctx = ahash_request_ctx(areq); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); + struct rk_ahash_ctx *tfmctx = crypto_ahash_ctx(tfm); + struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg); + struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.hash); + + algt->stat_fb++; + + ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm); + rctx->fallback_req.base.flags = areq->base.flags & + CRYPTO_TFM_REQ_MAY_SLEEP; + + rctx->fallback_req.nbytes = areq->nbytes; + rctx->fallback_req.src = areq->src; + rctx->fallback_req.result = areq->result; + + return crypto_ahash_digest(&rctx->fallback_req); +} + static int zero_message_process(struct ahash_request *req) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); @@ -38,15 +78,9 @@ static int zero_message_process(struct ahash_request *req) return 0; } -static void rk_ahash_crypto_complete(struct crypto_async_request *base, int err) +static void rk_ahash_reg_init(struct ahash_request *req, + struct rk_crypto_info *dev) { - if (base->complete) - base->complete(base, err); -} - -static void rk_ahash_reg_init(struct rk_crypto_info *dev) -{ - struct ahash_request *req = ahash_request_cast(dev->async_req); struct rk_ahash_rctx *rctx = ahash_request_ctx(req); int reg_status; @@ -74,7 +108,7 @@ static void rk_ahash_reg_init(struct rk_crypto_info *dev) RK_CRYPTO_BYTESWAP_BRFIFO | RK_CRYPTO_BYTESWAP_BTFIFO); - CRYPTO_WRITE(dev, RK_CRYPTO_HASH_MSG_LEN, dev->total); + CRYPTO_WRITE(dev, RK_CRYPTO_HASH_MSG_LEN, req->nbytes); } static int rk_ahash_init(struct ahash_request *req) @@ -164,51 +198,80 @@ static int rk_ahash_export(struct ahash_request *req, void *out) static int rk_ahash_digest(struct ahash_request *req) { - struct rk_ahash_ctx *tctx = crypto_tfm_ctx(req->base.tfm); - struct rk_crypto_info *dev = tctx->dev; + struct rk_ahash_rctx *rctx = ahash_request_ctx(req); + struct rk_crypto_info *dev; + struct crypto_engine *engine; + + if (rk_ahash_need_fallback(req)) + return rk_ahash_digest_fb(req); if (!req->nbytes) return zero_message_process(req); - else - return dev->enqueue(dev, &req->base); + + dev = get_rk_crypto(); + + rctx->dev = dev; + engine = dev->engine; + + return crypto_transfer_hash_request_to_engine(engine, req); } -static void crypto_ahash_dma_start(struct rk_crypto_info *dev) +static void crypto_ahash_dma_start(struct rk_crypto_info *dev, struct scatterlist *sg) { - CRYPTO_WRITE(dev, RK_CRYPTO_HRDMAS, dev->addr_in); - CRYPTO_WRITE(dev, RK_CRYPTO_HRDMAL, (dev->count + 3) / 4); + CRYPTO_WRITE(dev, RK_CRYPTO_HRDMAS, sg_dma_address(sg)); + CRYPTO_WRITE(dev, RK_CRYPTO_HRDMAL, sg_dma_len(sg) / 4); CRYPTO_WRITE(dev, RK_CRYPTO_CTRL, RK_CRYPTO_HASH_START | (RK_CRYPTO_HASH_START << 16)); } -static int rk_ahash_set_data_start(struct rk_crypto_info *dev) +static int rk_hash_prepare(struct crypto_engine *engine, void *breq) { - int err; + struct ahash_request *areq = container_of(breq, struct ahash_request, base); + struct rk_ahash_rctx *rctx = ahash_request_ctx(areq); + struct rk_crypto_info *rkc = rctx->dev; + int ret; - err = dev->load_data(dev, dev->sg_src, NULL); - if (!err) - crypto_ahash_dma_start(dev); - return err; + ret = dma_map_sg(rkc->dev, areq->src, sg_nents(areq->src), DMA_TO_DEVICE); + if (ret <= 0) + return -EINVAL; + + rctx->nrsg = ret; + + return 0; } -static int rk_ahash_start(struct rk_crypto_info *dev) +static int rk_hash_unprepare(struct crypto_engine *engine, void *breq) { - struct ahash_request *req = ahash_request_cast(dev->async_req); - struct crypto_ahash *tfm; - struct rk_ahash_rctx *rctx; - - dev->total = req->nbytes; - dev->left_bytes = req->nbytes; - dev->aligned = 0; - dev->align_size = 4; - dev->sg_dst = NULL; - dev->sg_src = req->src; - dev->first = req->src; - dev->src_nents = sg_nents(req->src); - rctx = ahash_request_ctx(req); + struct ahash_request *areq = container_of(breq, struct ahash_request, base); + struct rk_ahash_rctx *rctx = ahash_request_ctx(areq); + struct rk_crypto_info *rkc = rctx->dev; + + dma_unmap_sg(rkc->dev, areq->src, rctx->nrsg, DMA_TO_DEVICE); + return 0; +} + +static int rk_hash_run(struct crypto_engine *engine, void *breq) +{ + struct ahash_request *areq = container_of(breq, struct ahash_request, base); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); + struct rk_ahash_rctx *rctx = ahash_request_ctx(areq); + struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg); + struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.hash); + struct scatterlist *sg = areq->src; + struct rk_crypto_info *rkc = rctx->dev; + int err = 0; + int i; + u32 v; + + err = pm_runtime_resume_and_get(rkc->dev); + if (err) + return err; + rctx->mode = 0; - tfm = crypto_ahash_reqtfm(req); + algt->stat_req++; + rkc->nreq++; + switch (crypto_ahash_digestsize(tfm)) { case SHA1_DIGEST_SIZE: rctx->mode = RK_CRYPTO_HASH_SHA1; @@ -220,100 +283,88 @@ static int rk_ahash_start(struct rk_crypto_info *dev) rctx->mode = RK_CRYPTO_HASH_MD5; break; default: - return -EINVAL; + err = -EINVAL; + goto theend; } - rk_ahash_reg_init(dev); - return rk_ahash_set_data_start(dev); -} - -static int rk_ahash_crypto_rx(struct rk_crypto_info *dev) -{ - int err = 0; - struct ahash_request *req = ahash_request_cast(dev->async_req); - struct crypto_ahash *tfm; - - dev->unload_data(dev); - if (dev->left_bytes) { - if (dev->aligned) { - if (sg_is_last(dev->sg_src)) { - dev_warn(dev->dev, "[%s:%d], Lack of data\n", - __func__, __LINE__); - err = -ENOMEM; - goto out_rx; - } - dev->sg_src = sg_next(dev->sg_src); + rk_ahash_reg_init(areq, rkc); + + while (sg) { + reinit_completion(&rkc->complete); + rkc->status = 0; + crypto_ahash_dma_start(rkc, sg); + wait_for_completion_interruptible_timeout(&rkc->complete, + msecs_to_jiffies(2000)); + if (!rkc->status) { + dev_err(rkc->dev, "DMA timeout\n"); + err = -EFAULT; + goto theend; } - err = rk_ahash_set_data_start(dev); - } else { - /* - * it will take some time to process date after last dma - * transmission. - * - * waiting time is relative with the last date len, - * so cannot set a fixed time here. - * 10us makes system not call here frequently wasting - * efficiency, and make it response quickly when dma - * complete. - */ - while (!CRYPTO_READ(dev, RK_CRYPTO_HASH_STS)) - udelay(10); - - tfm = crypto_ahash_reqtfm(req); - memcpy_fromio(req->result, dev->reg + RK_CRYPTO_HASH_DOUT_0, - crypto_ahash_digestsize(tfm)); - dev->complete(dev->async_req, 0); - tasklet_schedule(&dev->queue_task); + sg = sg_next(sg); } -out_rx: - return err; + /* + * it will take some time to process date after last dma + * transmission. + * + * waiting time is relative with the last date len, + * so cannot set a fixed time here. + * 10us makes system not call here frequently wasting + * efficiency, and make it response quickly when dma + * complete. + */ + readl_poll_timeout(rkc->reg + RK_CRYPTO_HASH_STS, v, v == 0, 10, 1000); + + for (i = 0; i < crypto_ahash_digestsize(tfm) / 4; i++) { + v = readl(rkc->reg + RK_CRYPTO_HASH_DOUT_0 + i * 4); + put_unaligned_le32(v, areq->result + i * 4); + } + +theend: + pm_runtime_put_autosuspend(rkc->dev); + + local_bh_disable(); + crypto_finalize_hash_request(engine, breq, err); + local_bh_enable(); + + return 0; } static int rk_cra_hash_init(struct crypto_tfm *tfm) { struct rk_ahash_ctx *tctx = crypto_tfm_ctx(tfm); - struct rk_crypto_tmp *algt; - struct ahash_alg *alg = __crypto_ahash_alg(tfm->__crt_alg); - const char *alg_name = crypto_tfm_alg_name(tfm); - - algt = container_of(alg, struct rk_crypto_tmp, alg.hash); - - tctx->dev = algt->dev; - tctx->dev->addr_vir = (void *)__get_free_page(GFP_KERNEL); - if (!tctx->dev->addr_vir) { - dev_err(tctx->dev->dev, "failed to kmalloc for addr_vir\n"); - return -ENOMEM; - } - tctx->dev->start = rk_ahash_start; - tctx->dev->update = rk_ahash_crypto_rx; - tctx->dev->complete = rk_ahash_crypto_complete; + struct ahash_alg *alg = __crypto_ahash_alg(tfm->__crt_alg); + struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.hash); /* for fallback */ tctx->fallback_tfm = crypto_alloc_ahash(alg_name, 0, - CRYPTO_ALG_NEED_FALLBACK); + CRYPTO_ALG_NEED_FALLBACK); if (IS_ERR(tctx->fallback_tfm)) { - dev_err(tctx->dev->dev, "Could not load fallback driver.\n"); + dev_err(algt->dev->dev, "Could not load fallback driver.\n"); return PTR_ERR(tctx->fallback_tfm); } + crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), sizeof(struct rk_ahash_rctx) + crypto_ahash_reqsize(tctx->fallback_tfm)); - return tctx->dev->enable_clk(tctx->dev); + tctx->enginectx.op.do_one_request = rk_hash_run; + tctx->enginectx.op.prepare_request = rk_hash_prepare; + tctx->enginectx.op.unprepare_request = rk_hash_unprepare; + + return 0; } static void rk_cra_hash_exit(struct crypto_tfm *tfm) { struct rk_ahash_ctx *tctx = crypto_tfm_ctx(tfm); - free_page((unsigned long)tctx->dev->addr_vir); - return tctx->dev->disable_clk(tctx->dev); + crypto_free_ahash(tctx->fallback_tfm); } struct rk_crypto_tmp rk_ahash_sha1 = { - .type = ALG_TYPE_HASH, + .type = CRYPTO_ALG_TYPE_AHASH, .alg.hash = { .init = rk_ahash_init, .update = rk_ahash_update, @@ -337,13 +388,13 @@ struct rk_crypto_tmp rk_ahash_sha1 = { .cra_init = rk_cra_hash_init, .cra_exit = rk_cra_hash_exit, .cra_module = THIS_MODULE, - } - } + } + } } }; struct rk_crypto_tmp rk_ahash_sha256 = { - .type = ALG_TYPE_HASH, + .type = CRYPTO_ALG_TYPE_AHASH, .alg.hash = { .init = rk_ahash_init, .update = rk_ahash_update, @@ -367,13 +418,13 @@ struct rk_crypto_tmp rk_ahash_sha256 = { .cra_init = rk_cra_hash_init, .cra_exit = rk_cra_hash_exit, .cra_module = THIS_MODULE, - } - } + } + } } }; struct rk_crypto_tmp rk_ahash_md5 = { - .type = ALG_TYPE_HASH, + .type = CRYPTO_ALG_TYPE_AHASH, .alg.hash = { .init = rk_ahash_init, .update = rk_ahash_update, @@ -397,7 +448,7 @@ struct rk_crypto_tmp rk_ahash_md5 = { .cra_init = rk_cra_hash_init, .cra_exit = rk_cra_hash_exit, .cra_module = THIS_MODULE, - } } + } } }; diff --git a/drivers/crypto/rockchip/rk3288_crypto_skcipher.c b/drivers/crypto/rockchip/rk3288_crypto_skcipher.c index 5bbf0d2722e11cffef500ad42a27cb7d5e65980a..59069457582bf6d4a5194e13a8cc19caf9782034 100644 --- a/drivers/crypto/rockchip/rk3288_crypto_skcipher.c +++ b/drivers/crypto/rockchip/rk3288_crypto_skcipher.c @@ -9,23 +9,94 @@ * Some ideas are from marvell-cesa.c and s5p-sss.c driver. */ #include +#include #include "rk3288_crypto.h" #define RK_CRYPTO_DEC BIT(0) -static void rk_crypto_complete(struct crypto_async_request *base, int err) +static int rk_cipher_need_fallback(struct skcipher_request *req) { - if (base->complete) - base->complete(base, err); + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct skcipher_alg *alg = crypto_skcipher_alg(tfm); + struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.skcipher); + struct scatterlist *sgs, *sgd; + unsigned int stodo, dtodo, len; + unsigned int bs = crypto_skcipher_blocksize(tfm); + + if (!req->cryptlen) + return true; + + len = req->cryptlen; + sgs = req->src; + sgd = req->dst; + while (sgs && sgd) { + if (!IS_ALIGNED(sgs->offset, sizeof(u32))) { + algt->stat_fb_align++; + return true; + } + if (!IS_ALIGNED(sgd->offset, sizeof(u32))) { + algt->stat_fb_align++; + return true; + } + stodo = min(len, sgs->length); + if (stodo % bs) { + algt->stat_fb_len++; + return true; + } + dtodo = min(len, sgd->length); + if (dtodo % bs) { + algt->stat_fb_len++; + return true; + } + if (stodo != dtodo) { + algt->stat_fb_sgdiff++; + return true; + } + len -= stodo; + sgs = sg_next(sgs); + sgd = sg_next(sgd); + } + return false; } -static int rk_handle_req(struct rk_crypto_info *dev, - struct skcipher_request *req) +static int rk_cipher_fallback(struct skcipher_request *areq) { - if (!IS_ALIGNED(req->cryptlen, dev->align_size)) - return -EINVAL; + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); + struct rk_cipher_ctx *op = crypto_skcipher_ctx(tfm); + struct rk_cipher_rctx *rctx = skcipher_request_ctx(areq); + struct skcipher_alg *alg = crypto_skcipher_alg(tfm); + struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.skcipher); + int err; + + algt->stat_fb++; + + skcipher_request_set_tfm(&rctx->fallback_req, op->fallback_tfm); + skcipher_request_set_callback(&rctx->fallback_req, areq->base.flags, + areq->base.complete, areq->base.data); + skcipher_request_set_crypt(&rctx->fallback_req, areq->src, areq->dst, + areq->cryptlen, areq->iv); + if (rctx->mode & RK_CRYPTO_DEC) + err = crypto_skcipher_decrypt(&rctx->fallback_req); else - return dev->enqueue(dev, &req->base); + err = crypto_skcipher_encrypt(&rctx->fallback_req); + return err; +} + +static int rk_cipher_handle_req(struct skcipher_request *req) +{ + struct rk_cipher_rctx *rctx = skcipher_request_ctx(req); + struct rk_crypto_info *rkc; + struct crypto_engine *engine; + + if (rk_cipher_need_fallback(req)) + return rk_cipher_fallback(req); + + rkc = get_rk_crypto(); + + engine = rkc->engine; + rctx->dev = rkc; + + return crypto_transfer_skcipher_request_to_engine(engine, req); } static int rk_aes_setkey(struct crypto_skcipher *cipher, @@ -38,8 +109,9 @@ static int rk_aes_setkey(struct crypto_skcipher *cipher, keylen != AES_KEYSIZE_256) return -EINVAL; ctx->keylen = keylen; - memcpy_toio(ctx->dev->reg + RK_CRYPTO_AES_KEY_0, key, keylen); - return 0; + memcpy(ctx->key, key, keylen); + + return crypto_skcipher_setkey(ctx->fallback_tfm, key, keylen); } static int rk_des_setkey(struct crypto_skcipher *cipher, @@ -53,8 +125,9 @@ static int rk_des_setkey(struct crypto_skcipher *cipher, return err; ctx->keylen = keylen; - memcpy_toio(ctx->dev->reg + RK_CRYPTO_TDES_KEY1_0, key, keylen); - return 0; + memcpy(ctx->key, key, keylen); + + return crypto_skcipher_setkey(ctx->fallback_tfm, key, keylen); } static int rk_tdes_setkey(struct crypto_skcipher *cipher, @@ -68,161 +141,136 @@ static int rk_tdes_setkey(struct crypto_skcipher *cipher, return err; ctx->keylen = keylen; - memcpy_toio(ctx->dev->reg + RK_CRYPTO_TDES_KEY1_0, key, keylen); - return 0; + memcpy(ctx->key, key, keylen); + + return crypto_skcipher_setkey(ctx->fallback_tfm, key, keylen); } static int rk_aes_ecb_encrypt(struct skcipher_request *req) { - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); - struct rk_crypto_info *dev = ctx->dev; + struct rk_cipher_rctx *rctx = skcipher_request_ctx(req); - ctx->mode = RK_CRYPTO_AES_ECB_MODE; - return rk_handle_req(dev, req); + rctx->mode = RK_CRYPTO_AES_ECB_MODE; + return rk_cipher_handle_req(req); } static int rk_aes_ecb_decrypt(struct skcipher_request *req) { - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); - struct rk_crypto_info *dev = ctx->dev; + struct rk_cipher_rctx *rctx = skcipher_request_ctx(req); - ctx->mode = RK_CRYPTO_AES_ECB_MODE | RK_CRYPTO_DEC; - return rk_handle_req(dev, req); + rctx->mode = RK_CRYPTO_AES_ECB_MODE | RK_CRYPTO_DEC; + return rk_cipher_handle_req(req); } static int rk_aes_cbc_encrypt(struct skcipher_request *req) { - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); - struct rk_crypto_info *dev = ctx->dev; + struct rk_cipher_rctx *rctx = skcipher_request_ctx(req); - ctx->mode = RK_CRYPTO_AES_CBC_MODE; - return rk_handle_req(dev, req); + rctx->mode = RK_CRYPTO_AES_CBC_MODE; + return rk_cipher_handle_req(req); } static int rk_aes_cbc_decrypt(struct skcipher_request *req) { - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); - struct rk_crypto_info *dev = ctx->dev; + struct rk_cipher_rctx *rctx = skcipher_request_ctx(req); - ctx->mode = RK_CRYPTO_AES_CBC_MODE | RK_CRYPTO_DEC; - return rk_handle_req(dev, req); + rctx->mode = RK_CRYPTO_AES_CBC_MODE | RK_CRYPTO_DEC; + return rk_cipher_handle_req(req); } static int rk_des_ecb_encrypt(struct skcipher_request *req) { - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); - struct rk_crypto_info *dev = ctx->dev; + struct rk_cipher_rctx *rctx = skcipher_request_ctx(req); - ctx->mode = 0; - return rk_handle_req(dev, req); + rctx->mode = 0; + return rk_cipher_handle_req(req); } static int rk_des_ecb_decrypt(struct skcipher_request *req) { - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); - struct rk_crypto_info *dev = ctx->dev; + struct rk_cipher_rctx *rctx = skcipher_request_ctx(req); - ctx->mode = RK_CRYPTO_DEC; - return rk_handle_req(dev, req); + rctx->mode = RK_CRYPTO_DEC; + return rk_cipher_handle_req(req); } static int rk_des_cbc_encrypt(struct skcipher_request *req) { - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); - struct rk_crypto_info *dev = ctx->dev; + struct rk_cipher_rctx *rctx = skcipher_request_ctx(req); - ctx->mode = RK_CRYPTO_TDES_CHAINMODE_CBC; - return rk_handle_req(dev, req); + rctx->mode = RK_CRYPTO_TDES_CHAINMODE_CBC; + return rk_cipher_handle_req(req); } static int rk_des_cbc_decrypt(struct skcipher_request *req) { - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); - struct rk_crypto_info *dev = ctx->dev; + struct rk_cipher_rctx *rctx = skcipher_request_ctx(req); - ctx->mode = RK_CRYPTO_TDES_CHAINMODE_CBC | RK_CRYPTO_DEC; - return rk_handle_req(dev, req); + rctx->mode = RK_CRYPTO_TDES_CHAINMODE_CBC | RK_CRYPTO_DEC; + return rk_cipher_handle_req(req); } static int rk_des3_ede_ecb_encrypt(struct skcipher_request *req) { - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); - struct rk_crypto_info *dev = ctx->dev; + struct rk_cipher_rctx *rctx = skcipher_request_ctx(req); - ctx->mode = RK_CRYPTO_TDES_SELECT; - return rk_handle_req(dev, req); + rctx->mode = RK_CRYPTO_TDES_SELECT; + return rk_cipher_handle_req(req); } static int rk_des3_ede_ecb_decrypt(struct skcipher_request *req) { - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); - struct rk_crypto_info *dev = ctx->dev; + struct rk_cipher_rctx *rctx = skcipher_request_ctx(req); - ctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_DEC; - return rk_handle_req(dev, req); + rctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_DEC; + return rk_cipher_handle_req(req); } static int rk_des3_ede_cbc_encrypt(struct skcipher_request *req) { - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); - struct rk_crypto_info *dev = ctx->dev; + struct rk_cipher_rctx *rctx = skcipher_request_ctx(req); - ctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_TDES_CHAINMODE_CBC; - return rk_handle_req(dev, req); + rctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_TDES_CHAINMODE_CBC; + return rk_cipher_handle_req(req); } static int rk_des3_ede_cbc_decrypt(struct skcipher_request *req) { - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); - struct rk_crypto_info *dev = ctx->dev; + struct rk_cipher_rctx *rctx = skcipher_request_ctx(req); - ctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_TDES_CHAINMODE_CBC | + rctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_TDES_CHAINMODE_CBC | RK_CRYPTO_DEC; - return rk_handle_req(dev, req); + return rk_cipher_handle_req(req); } -static void rk_ablk_hw_init(struct rk_crypto_info *dev) +static void rk_cipher_hw_init(struct rk_crypto_info *dev, struct skcipher_request *req) { - struct skcipher_request *req = - skcipher_request_cast(dev->async_req); struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req); struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher); + struct rk_cipher_rctx *rctx = skcipher_request_ctx(req); struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(cipher); - u32 ivsize, block, conf_reg = 0; + u32 block, conf_reg = 0; block = crypto_tfm_alg_blocksize(tfm); - ivsize = crypto_skcipher_ivsize(cipher); if (block == DES_BLOCK_SIZE) { - ctx->mode |= RK_CRYPTO_TDES_FIFO_MODE | + rctx->mode |= RK_CRYPTO_TDES_FIFO_MODE | RK_CRYPTO_TDES_BYTESWAP_KEY | RK_CRYPTO_TDES_BYTESWAP_IV; - CRYPTO_WRITE(dev, RK_CRYPTO_TDES_CTRL, ctx->mode); - memcpy_toio(dev->reg + RK_CRYPTO_TDES_IV_0, req->iv, ivsize); + CRYPTO_WRITE(dev, RK_CRYPTO_TDES_CTRL, rctx->mode); + memcpy_toio(dev->reg + RK_CRYPTO_TDES_KEY1_0, ctx->key, ctx->keylen); conf_reg = RK_CRYPTO_DESSEL; } else { - ctx->mode |= RK_CRYPTO_AES_FIFO_MODE | + rctx->mode |= RK_CRYPTO_AES_FIFO_MODE | RK_CRYPTO_AES_KEY_CHANGE | RK_CRYPTO_AES_BYTESWAP_KEY | RK_CRYPTO_AES_BYTESWAP_IV; if (ctx->keylen == AES_KEYSIZE_192) - ctx->mode |= RK_CRYPTO_AES_192BIT_key; + rctx->mode |= RK_CRYPTO_AES_192BIT_key; else if (ctx->keylen == AES_KEYSIZE_256) - ctx->mode |= RK_CRYPTO_AES_256BIT_key; - CRYPTO_WRITE(dev, RK_CRYPTO_AES_CTRL, ctx->mode); - memcpy_toio(dev->reg + RK_CRYPTO_AES_IV_0, req->iv, ivsize); + rctx->mode |= RK_CRYPTO_AES_256BIT_key; + CRYPTO_WRITE(dev, RK_CRYPTO_AES_CTRL, rctx->mode); + memcpy_toio(dev->reg + RK_CRYPTO_AES_KEY_0, ctx->key, ctx->keylen); } conf_reg |= RK_CRYPTO_BYTESWAP_BTFIFO | RK_CRYPTO_BYTESWAP_BRFIFO; @@ -231,189 +279,196 @@ static void rk_ablk_hw_init(struct rk_crypto_info *dev) RK_CRYPTO_BCDMA_ERR_ENA | RK_CRYPTO_BCDMA_DONE_ENA); } -static void crypto_dma_start(struct rk_crypto_info *dev) +static void crypto_dma_start(struct rk_crypto_info *dev, + struct scatterlist *sgs, + struct scatterlist *sgd, unsigned int todo) { - CRYPTO_WRITE(dev, RK_CRYPTO_BRDMAS, dev->addr_in); - CRYPTO_WRITE(dev, RK_CRYPTO_BRDMAL, dev->count / 4); - CRYPTO_WRITE(dev, RK_CRYPTO_BTDMAS, dev->addr_out); + CRYPTO_WRITE(dev, RK_CRYPTO_BRDMAS, sg_dma_address(sgs)); + CRYPTO_WRITE(dev, RK_CRYPTO_BRDMAL, todo); + CRYPTO_WRITE(dev, RK_CRYPTO_BTDMAS, sg_dma_address(sgd)); CRYPTO_WRITE(dev, RK_CRYPTO_CTRL, RK_CRYPTO_BLOCK_START | _SBF(RK_CRYPTO_BLOCK_START, 16)); } -static int rk_set_data_start(struct rk_crypto_info *dev) +static int rk_cipher_run(struct crypto_engine *engine, void *async_req) { - int err; - struct skcipher_request *req = - skcipher_request_cast(dev->async_req); - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); - u32 ivsize = crypto_skcipher_ivsize(tfm); - u8 *src_last_blk = page_address(sg_page(dev->sg_src)) + - dev->sg_src->offset + dev->sg_src->length - ivsize; - - /* Store the iv that need to be updated in chain mode. - * And update the IV buffer to contain the next IV for decryption mode. - */ - if (ctx->mode & RK_CRYPTO_DEC) { - memcpy(ctx->iv, src_last_blk, ivsize); - sg_pcopy_to_buffer(dev->first, dev->src_nents, req->iv, - ivsize, dev->total - ivsize); - } - - err = dev->load_data(dev, dev->sg_src, dev->sg_dst); - if (!err) - crypto_dma_start(dev); - return err; -} - -static int rk_ablk_start(struct rk_crypto_info *dev) -{ - struct skcipher_request *req = - skcipher_request_cast(dev->async_req); - unsigned long flags; + struct skcipher_request *areq = container_of(async_req, struct skcipher_request, base); + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); + struct rk_cipher_rctx *rctx = skcipher_request_ctx(areq); + struct scatterlist *sgs, *sgd; int err = 0; + int ivsize = crypto_skcipher_ivsize(tfm); + int offset; + u8 iv[AES_BLOCK_SIZE]; + u8 biv[AES_BLOCK_SIZE]; + u8 *ivtouse = areq->iv; + unsigned int len = areq->cryptlen; + unsigned int todo; + struct skcipher_alg *alg = crypto_skcipher_alg(tfm); + struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.skcipher); + struct rk_crypto_info *rkc = rctx->dev; - dev->left_bytes = req->cryptlen; - dev->total = req->cryptlen; - dev->sg_src = req->src; - dev->first = req->src; - dev->src_nents = sg_nents(req->src); - dev->sg_dst = req->dst; - dev->dst_nents = sg_nents(req->dst); - dev->aligned = 1; - - spin_lock_irqsave(&dev->lock, flags); - rk_ablk_hw_init(dev); - err = rk_set_data_start(dev); - spin_unlock_irqrestore(&dev->lock, flags); - return err; -} + err = pm_runtime_resume_and_get(rkc->dev); + if (err) + return err; -static void rk_iv_copyback(struct rk_crypto_info *dev) -{ - struct skcipher_request *req = - skcipher_request_cast(dev->async_req); - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); - u32 ivsize = crypto_skcipher_ivsize(tfm); + algt->stat_req++; + rkc->nreq++; - /* Update the IV buffer to contain the next IV for encryption mode. */ - if (!(ctx->mode & RK_CRYPTO_DEC)) { - if (dev->aligned) { - memcpy(req->iv, sg_virt(dev->sg_dst) + - dev->sg_dst->length - ivsize, ivsize); - } else { - memcpy(req->iv, dev->addr_vir + - dev->count - ivsize, ivsize); + ivsize = crypto_skcipher_ivsize(tfm); + if (areq->iv && crypto_skcipher_ivsize(tfm) > 0) { + if (rctx->mode & RK_CRYPTO_DEC) { + offset = areq->cryptlen - ivsize; + scatterwalk_map_and_copy(rctx->backup_iv, areq->src, + offset, ivsize, 0); } } -} -static void rk_update_iv(struct rk_crypto_info *dev) -{ - struct skcipher_request *req = - skcipher_request_cast(dev->async_req); - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); - u32 ivsize = crypto_skcipher_ivsize(tfm); - u8 *new_iv = NULL; + sgs = areq->src; + sgd = areq->dst; - if (ctx->mode & RK_CRYPTO_DEC) { - new_iv = ctx->iv; - } else { - new_iv = page_address(sg_page(dev->sg_dst)) + - dev->sg_dst->offset + dev->sg_dst->length - ivsize; + while (sgs && sgd && len) { + if (!sgs->length) { + sgs = sg_next(sgs); + sgd = sg_next(sgd); + continue; + } + if (rctx->mode & RK_CRYPTO_DEC) { + /* we backup last block of source to be used as IV at next step */ + offset = sgs->length - ivsize; + scatterwalk_map_and_copy(biv, sgs, offset, ivsize, 0); + } + if (sgs == sgd) { + err = dma_map_sg(rkc->dev, sgs, 1, DMA_BIDIRECTIONAL); + if (err <= 0) { + err = -EINVAL; + goto theend_iv; + } + } else { + err = dma_map_sg(rkc->dev, sgs, 1, DMA_TO_DEVICE); + if (err <= 0) { + err = -EINVAL; + goto theend_iv; + } + err = dma_map_sg(rkc->dev, sgd, 1, DMA_FROM_DEVICE); + if (err <= 0) { + err = -EINVAL; + goto theend_sgs; + } + } + err = 0; + rk_cipher_hw_init(rkc, areq); + if (ivsize) { + if (ivsize == DES_BLOCK_SIZE) + memcpy_toio(rkc->reg + RK_CRYPTO_TDES_IV_0, ivtouse, ivsize); + else + memcpy_toio(rkc->reg + RK_CRYPTO_AES_IV_0, ivtouse, ivsize); + } + reinit_completion(&rkc->complete); + rkc->status = 0; + + todo = min(sg_dma_len(sgs), len); + len -= todo; + crypto_dma_start(rkc, sgs, sgd, todo / 4); + wait_for_completion_interruptible_timeout(&rkc->complete, + msecs_to_jiffies(2000)); + if (!rkc->status) { + dev_err(rkc->dev, "DMA timeout\n"); + err = -EFAULT; + goto theend; + } + if (sgs == sgd) { + dma_unmap_sg(rkc->dev, sgs, 1, DMA_BIDIRECTIONAL); + } else { + dma_unmap_sg(rkc->dev, sgs, 1, DMA_TO_DEVICE); + dma_unmap_sg(rkc->dev, sgd, 1, DMA_FROM_DEVICE); + } + if (rctx->mode & RK_CRYPTO_DEC) { + memcpy(iv, biv, ivsize); + ivtouse = iv; + } else { + offset = sgd->length - ivsize; + scatterwalk_map_and_copy(iv, sgd, offset, ivsize, 0); + ivtouse = iv; + } + sgs = sg_next(sgs); + sgd = sg_next(sgd); } - if (ivsize == DES_BLOCK_SIZE) - memcpy_toio(dev->reg + RK_CRYPTO_TDES_IV_0, new_iv, ivsize); - else if (ivsize == AES_BLOCK_SIZE) - memcpy_toio(dev->reg + RK_CRYPTO_AES_IV_0, new_iv, ivsize); -} - -/* return: - * true some err was occurred - * fault no err, continue - */ -static int rk_ablk_rx(struct rk_crypto_info *dev) -{ - int err = 0; - struct skcipher_request *req = - skcipher_request_cast(dev->async_req); - - dev->unload_data(dev); - if (!dev->aligned) { - if (!sg_pcopy_from_buffer(req->dst, dev->dst_nents, - dev->addr_vir, dev->count, - dev->total - dev->left_bytes - - dev->count)) { - err = -EINVAL; - goto out_rx; + if (areq->iv && ivsize > 0) { + offset = areq->cryptlen - ivsize; + if (rctx->mode & RK_CRYPTO_DEC) { + memcpy(areq->iv, rctx->backup_iv, ivsize); + memzero_explicit(rctx->backup_iv, ivsize); + } else { + scatterwalk_map_and_copy(areq->iv, areq->dst, offset, + ivsize, 0); } } - if (dev->left_bytes) { - rk_update_iv(dev); - if (dev->aligned) { - if (sg_is_last(dev->sg_src)) { - dev_err(dev->dev, "[%s:%d] Lack of data\n", - __func__, __LINE__); - err = -ENOMEM; - goto out_rx; - } - dev->sg_src = sg_next(dev->sg_src); - dev->sg_dst = sg_next(dev->sg_dst); - } - err = rk_set_data_start(dev); + +theend: + pm_runtime_put_autosuspend(rkc->dev); + + local_bh_disable(); + crypto_finalize_skcipher_request(engine, areq, err); + local_bh_enable(); + return 0; + +theend_sgs: + if (sgs == sgd) { + dma_unmap_sg(rkc->dev, sgs, 1, DMA_BIDIRECTIONAL); } else { - rk_iv_copyback(dev); - /* here show the calculation is over without any err */ - dev->complete(dev->async_req, 0); - tasklet_schedule(&dev->queue_task); + dma_unmap_sg(rkc->dev, sgs, 1, DMA_TO_DEVICE); + dma_unmap_sg(rkc->dev, sgd, 1, DMA_FROM_DEVICE); } -out_rx: +theend_iv: return err; } -static int rk_ablk_init_tfm(struct crypto_skcipher *tfm) +static int rk_cipher_tfm_init(struct crypto_skcipher *tfm) { struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); + const char *name = crypto_tfm_alg_name(&tfm->base); struct skcipher_alg *alg = crypto_skcipher_alg(tfm); - struct rk_crypto_tmp *algt; + struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.skcipher); - algt = container_of(alg, struct rk_crypto_tmp, alg.skcipher); + ctx->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK); + if (IS_ERR(ctx->fallback_tfm)) { + dev_err(algt->dev->dev, "ERROR: Cannot allocate fallback for %s %ld\n", + name, PTR_ERR(ctx->fallback_tfm)); + return PTR_ERR(ctx->fallback_tfm); + } + + tfm->reqsize = sizeof(struct rk_cipher_rctx) + + crypto_skcipher_reqsize(ctx->fallback_tfm); - ctx->dev = algt->dev; - ctx->dev->align_size = crypto_tfm_alg_alignmask(crypto_skcipher_tfm(tfm)) + 1; - ctx->dev->start = rk_ablk_start; - ctx->dev->update = rk_ablk_rx; - ctx->dev->complete = rk_crypto_complete; - ctx->dev->addr_vir = (char *)__get_free_page(GFP_KERNEL); + ctx->enginectx.op.do_one_request = rk_cipher_run; - return ctx->dev->addr_vir ? ctx->dev->enable_clk(ctx->dev) : -ENOMEM; + return 0; } -static void rk_ablk_exit_tfm(struct crypto_skcipher *tfm) +static void rk_cipher_tfm_exit(struct crypto_skcipher *tfm) { struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); - free_page((unsigned long)ctx->dev->addr_vir); - ctx->dev->disable_clk(ctx->dev); + memzero_explicit(ctx->key, ctx->keylen); + crypto_free_skcipher(ctx->fallback_tfm); } struct rk_crypto_tmp rk_ecb_aes_alg = { - .type = ALG_TYPE_CIPHER, + .type = CRYPTO_ALG_TYPE_SKCIPHER, .alg.skcipher = { .base.cra_name = "ecb(aes)", .base.cra_driver_name = "ecb-aes-rk", .base.cra_priority = 300, - .base.cra_flags = CRYPTO_ALG_ASYNC, + .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, .base.cra_blocksize = AES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct rk_cipher_ctx), .base.cra_alignmask = 0x0f, .base.cra_module = THIS_MODULE, - .init = rk_ablk_init_tfm, - .exit = rk_ablk_exit_tfm, + .init = rk_cipher_tfm_init, + .exit = rk_cipher_tfm_exit, .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, .setkey = rk_aes_setkey, @@ -423,19 +478,19 @@ struct rk_crypto_tmp rk_ecb_aes_alg = { }; struct rk_crypto_tmp rk_cbc_aes_alg = { - .type = ALG_TYPE_CIPHER, + .type = CRYPTO_ALG_TYPE_SKCIPHER, .alg.skcipher = { .base.cra_name = "cbc(aes)", .base.cra_driver_name = "cbc-aes-rk", .base.cra_priority = 300, - .base.cra_flags = CRYPTO_ALG_ASYNC, + .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, .base.cra_blocksize = AES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct rk_cipher_ctx), .base.cra_alignmask = 0x0f, .base.cra_module = THIS_MODULE, - .init = rk_ablk_init_tfm, - .exit = rk_ablk_exit_tfm, + .init = rk_cipher_tfm_init, + .exit = rk_cipher_tfm_exit, .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, .ivsize = AES_BLOCK_SIZE, @@ -446,19 +501,19 @@ struct rk_crypto_tmp rk_cbc_aes_alg = { }; struct rk_crypto_tmp rk_ecb_des_alg = { - .type = ALG_TYPE_CIPHER, + .type = CRYPTO_ALG_TYPE_SKCIPHER, .alg.skcipher = { .base.cra_name = "ecb(des)", .base.cra_driver_name = "ecb-des-rk", .base.cra_priority = 300, - .base.cra_flags = CRYPTO_ALG_ASYNC, + .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, .base.cra_blocksize = DES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct rk_cipher_ctx), .base.cra_alignmask = 0x07, .base.cra_module = THIS_MODULE, - .init = rk_ablk_init_tfm, - .exit = rk_ablk_exit_tfm, + .init = rk_cipher_tfm_init, + .exit = rk_cipher_tfm_exit, .min_keysize = DES_KEY_SIZE, .max_keysize = DES_KEY_SIZE, .setkey = rk_des_setkey, @@ -468,19 +523,19 @@ struct rk_crypto_tmp rk_ecb_des_alg = { }; struct rk_crypto_tmp rk_cbc_des_alg = { - .type = ALG_TYPE_CIPHER, + .type = CRYPTO_ALG_TYPE_SKCIPHER, .alg.skcipher = { .base.cra_name = "cbc(des)", .base.cra_driver_name = "cbc-des-rk", .base.cra_priority = 300, - .base.cra_flags = CRYPTO_ALG_ASYNC, + .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, .base.cra_blocksize = DES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct rk_cipher_ctx), .base.cra_alignmask = 0x07, .base.cra_module = THIS_MODULE, - .init = rk_ablk_init_tfm, - .exit = rk_ablk_exit_tfm, + .init = rk_cipher_tfm_init, + .exit = rk_cipher_tfm_exit, .min_keysize = DES_KEY_SIZE, .max_keysize = DES_KEY_SIZE, .ivsize = DES_BLOCK_SIZE, @@ -491,19 +546,19 @@ struct rk_crypto_tmp rk_cbc_des_alg = { }; struct rk_crypto_tmp rk_ecb_des3_ede_alg = { - .type = ALG_TYPE_CIPHER, + .type = CRYPTO_ALG_TYPE_SKCIPHER, .alg.skcipher = { .base.cra_name = "ecb(des3_ede)", .base.cra_driver_name = "ecb-des3-ede-rk", .base.cra_priority = 300, - .base.cra_flags = CRYPTO_ALG_ASYNC, + .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, .base.cra_blocksize = DES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct rk_cipher_ctx), .base.cra_alignmask = 0x07, .base.cra_module = THIS_MODULE, - .init = rk_ablk_init_tfm, - .exit = rk_ablk_exit_tfm, + .init = rk_cipher_tfm_init, + .exit = rk_cipher_tfm_exit, .min_keysize = DES3_EDE_KEY_SIZE, .max_keysize = DES3_EDE_KEY_SIZE, .setkey = rk_tdes_setkey, @@ -513,19 +568,19 @@ struct rk_crypto_tmp rk_ecb_des3_ede_alg = { }; struct rk_crypto_tmp rk_cbc_des3_ede_alg = { - .type = ALG_TYPE_CIPHER, + .type = CRYPTO_ALG_TYPE_SKCIPHER, .alg.skcipher = { .base.cra_name = "cbc(des3_ede)", .base.cra_driver_name = "cbc-des3-ede-rk", .base.cra_priority = 300, - .base.cra_flags = CRYPTO_ALG_ASYNC, + .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, .base.cra_blocksize = DES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct rk_cipher_ctx), .base.cra_alignmask = 0x07, .base.cra_module = THIS_MODULE, - .init = rk_ablk_init_tfm, - .exit = rk_ablk_exit_tfm, + .init = rk_cipher_tfm_init, + .exit = rk_cipher_tfm_exit, .min_keysize = DES3_EDE_KEY_SIZE, .max_keysize = DES3_EDE_KEY_SIZE, .ivsize = DES_BLOCK_SIZE, diff --git a/drivers/crypto/stm32/Kconfig b/drivers/crypto/stm32/Kconfig index 4a4c3284ae1f3b5de3187a2082a858c79582e187..4fc581e9e59569a32230d0e84ba543944fb02507 100644 --- a/drivers/crypto/stm32/Kconfig +++ b/drivers/crypto/stm32/Kconfig @@ -10,7 +10,7 @@ config CRYPTO_DEV_STM32_CRC config CRYPTO_DEV_STM32_HASH tristate "Support for STM32 hash accelerators" - depends on ARCH_STM32 + depends on ARCH_STM32 || ARCH_U8500 depends on HAS_DMA select CRYPTO_HASH select CRYPTO_MD5 @@ -23,7 +23,7 @@ config CRYPTO_DEV_STM32_HASH config CRYPTO_DEV_STM32_CRYP tristate "Support for STM32 cryp accelerators" - depends on ARCH_STM32 + depends on ARCH_STM32 || ARCH_U8500 select CRYPTO_HASH select CRYPTO_ENGINE select CRYPTO_LIB_DES diff --git a/drivers/crypto/stm32/stm32-cryp.c b/drivers/crypto/stm32/stm32-cryp.c index 59ef541123ae62d9bf29ffbe20462a079fafc312..4208338e72b6051da268abdbace9790c2186b346 100644 --- a/drivers/crypto/stm32/stm32-cryp.c +++ b/drivers/crypto/stm32/stm32-cryp.c @@ -2,6 +2,7 @@ /* * Copyright (C) STMicroelectronics SA 2017 * Author: Fabien Dessenne + * Ux500 support taken from snippets in the old Ux500 cryp driver */ #include @@ -62,6 +63,29 @@ #define CRYP_CSGCMCCM0R 0x00000050 #define CRYP_CSGCM0R 0x00000070 +#define UX500_CRYP_CR 0x00000000 +#define UX500_CRYP_SR 0x00000004 +#define UX500_CRYP_DIN 0x00000008 +#define UX500_CRYP_DINSIZE 0x0000000C +#define UX500_CRYP_DOUT 0x00000010 +#define UX500_CRYP_DOUSIZE 0x00000014 +#define UX500_CRYP_DMACR 0x00000018 +#define UX500_CRYP_IMSC 0x0000001C +#define UX500_CRYP_RIS 0x00000020 +#define UX500_CRYP_MIS 0x00000024 +#define UX500_CRYP_K1L 0x00000028 +#define UX500_CRYP_K1R 0x0000002C +#define UX500_CRYP_K2L 0x00000030 +#define UX500_CRYP_K2R 0x00000034 +#define UX500_CRYP_K3L 0x00000038 +#define UX500_CRYP_K3R 0x0000003C +#define UX500_CRYP_K4L 0x00000040 +#define UX500_CRYP_K4R 0x00000044 +#define UX500_CRYP_IV0L 0x00000048 +#define UX500_CRYP_IV0R 0x0000004C +#define UX500_CRYP_IV1L 0x00000050 +#define UX500_CRYP_IV1R 0x00000054 + /* Registers values */ #define CR_DEC_NOT_ENC 0x00000004 #define CR_TDES_ECB 0x00000000 @@ -71,7 +95,8 @@ #define CR_AES_ECB 0x00000020 #define CR_AES_CBC 0x00000028 #define CR_AES_CTR 0x00000030 -#define CR_AES_KP 0x00000038 +#define CR_AES_KP 0x00000038 /* Not on Ux500 */ +#define CR_AES_XTS 0x00000038 /* Only on Ux500 */ #define CR_AES_GCM 0x00080000 #define CR_AES_CCM 0x00080008 #define CR_AES_UNKNOWN 0xFFFFFFFF @@ -83,6 +108,8 @@ #define CR_KEY128 0x00000000 #define CR_KEY192 0x00000100 #define CR_KEY256 0x00000200 +#define CR_KEYRDEN 0x00000400 /* Only on Ux500 */ +#define CR_KSE 0x00000800 /* Only on Ux500 */ #define CR_FFLUSH 0x00004000 #define CR_CRYPEN 0x00008000 #define CR_PH_INIT 0x00000000 @@ -107,8 +134,25 @@ #define CRYP_AUTOSUSPEND_DELAY 50 struct stm32_cryp_caps { - bool swap_final; - bool padding_wa; + bool aeads_support; + bool linear_aes_key; + bool kp_mode; + bool iv_protection; + bool swap_final; + bool padding_wa; + u32 cr; + u32 sr; + u32 din; + u32 dout; + u32 imsc; + u32 mis; + u32 k1l; + u32 k1r; + u32 k3r; + u32 iv0l; + u32 iv0r; + u32 iv1l; + u32 iv1r; }; struct stm32_cryp_ctx { @@ -228,20 +272,21 @@ static inline int stm32_cryp_wait_busy(struct stm32_cryp *cryp) { u32 status; - return readl_relaxed_poll_timeout(cryp->regs + CRYP_SR, status, + return readl_relaxed_poll_timeout(cryp->regs + cryp->caps->sr, status, !(status & SR_BUSY), 10, 100000); } static inline void stm32_cryp_enable(struct stm32_cryp *cryp) { - writel_relaxed(readl_relaxed(cryp->regs + CRYP_CR) | CR_CRYPEN, cryp->regs + CRYP_CR); + writel_relaxed(readl_relaxed(cryp->regs + cryp->caps->cr) | CR_CRYPEN, + cryp->regs + cryp->caps->cr); } static inline int stm32_cryp_wait_enable(struct stm32_cryp *cryp) { u32 status; - return readl_relaxed_poll_timeout(cryp->regs + CRYP_CR, status, + return readl_relaxed_poll_timeout(cryp->regs + cryp->caps->cr, status, !(status & CR_CRYPEN), 10, 100000); } @@ -249,10 +294,22 @@ static inline int stm32_cryp_wait_output(struct stm32_cryp *cryp) { u32 status; - return readl_relaxed_poll_timeout(cryp->regs + CRYP_SR, status, + return readl_relaxed_poll_timeout(cryp->regs + cryp->caps->sr, status, status & SR_OFNE, 10, 100000); } +static inline void stm32_cryp_key_read_enable(struct stm32_cryp *cryp) +{ + writel_relaxed(readl_relaxed(cryp->regs + cryp->caps->cr) | CR_KEYRDEN, + cryp->regs + cryp->caps->cr); +} + +static inline void stm32_cryp_key_read_disable(struct stm32_cryp *cryp) +{ + writel_relaxed(readl_relaxed(cryp->regs + cryp->caps->cr) & ~CR_KEYRDEN, + cryp->regs + cryp->caps->cr); +} + static int stm32_cryp_read_auth_tag(struct stm32_cryp *cryp); static void stm32_cryp_finish_req(struct stm32_cryp *cryp, int err); @@ -281,12 +338,12 @@ static void stm32_cryp_hw_write_iv(struct stm32_cryp *cryp, __be32 *iv) if (!iv) return; - stm32_cryp_write(cryp, CRYP_IV0LR, be32_to_cpu(*iv++)); - stm32_cryp_write(cryp, CRYP_IV0RR, be32_to_cpu(*iv++)); + stm32_cryp_write(cryp, cryp->caps->iv0l, be32_to_cpu(*iv++)); + stm32_cryp_write(cryp, cryp->caps->iv0r, be32_to_cpu(*iv++)); if (is_aes(cryp)) { - stm32_cryp_write(cryp, CRYP_IV1LR, be32_to_cpu(*iv++)); - stm32_cryp_write(cryp, CRYP_IV1RR, be32_to_cpu(*iv++)); + stm32_cryp_write(cryp, cryp->caps->iv1l, be32_to_cpu(*iv++)); + stm32_cryp_write(cryp, cryp->caps->iv1r, be32_to_cpu(*iv++)); } } @@ -298,12 +355,102 @@ static void stm32_cryp_get_iv(struct stm32_cryp *cryp) if (!tmp) return; - *tmp++ = cpu_to_be32(stm32_cryp_read(cryp, CRYP_IV0LR)); - *tmp++ = cpu_to_be32(stm32_cryp_read(cryp, CRYP_IV0RR)); + if (cryp->caps->iv_protection) + stm32_cryp_key_read_enable(cryp); + + *tmp++ = cpu_to_be32(stm32_cryp_read(cryp, cryp->caps->iv0l)); + *tmp++ = cpu_to_be32(stm32_cryp_read(cryp, cryp->caps->iv0r)); if (is_aes(cryp)) { - *tmp++ = cpu_to_be32(stm32_cryp_read(cryp, CRYP_IV1LR)); - *tmp++ = cpu_to_be32(stm32_cryp_read(cryp, CRYP_IV1RR)); + *tmp++ = cpu_to_be32(stm32_cryp_read(cryp, cryp->caps->iv1l)); + *tmp++ = cpu_to_be32(stm32_cryp_read(cryp, cryp->caps->iv1r)); + } + + if (cryp->caps->iv_protection) + stm32_cryp_key_read_disable(cryp); +} + +/** + * ux500_swap_bits_in_byte() - mirror the bits in a byte + * @b: the byte to be mirrored + * + * The bits are swapped the following way: + * Byte b include bits 0-7, nibble 1 (n1) include bits 0-3 and + * nibble 2 (n2) bits 4-7. + * + * Nibble 1 (n1): + * (The "old" (moved) bit is replaced with a zero) + * 1. Move bit 6 and 7, 4 positions to the left. + * 2. Move bit 3 and 5, 2 positions to the left. + * 3. Move bit 1-4, 1 position to the left. + * + * Nibble 2 (n2): + * 1. Move bit 0 and 1, 4 positions to the right. + * 2. Move bit 2 and 4, 2 positions to the right. + * 3. Move bit 3-6, 1 position to the right. + * + * Combine the two nibbles to a complete and swapped byte. + */ +static inline u8 ux500_swap_bits_in_byte(u8 b) +{ +#define R_SHIFT_4_MASK 0xc0 /* Bits 6 and 7, right shift 4 */ +#define R_SHIFT_2_MASK 0x28 /* (After right shift 4) Bits 3 and 5, + right shift 2 */ +#define R_SHIFT_1_MASK 0x1e /* (After right shift 2) Bits 1-4, + right shift 1 */ +#define L_SHIFT_4_MASK 0x03 /* Bits 0 and 1, left shift 4 */ +#define L_SHIFT_2_MASK 0x14 /* (After left shift 4) Bits 2 and 4, + left shift 2 */ +#define L_SHIFT_1_MASK 0x78 /* (After left shift 1) Bits 3-6, + left shift 1 */ + + u8 n1; + u8 n2; + + /* Swap most significant nibble */ + /* Right shift 4, bits 6 and 7 */ + n1 = ((b & R_SHIFT_4_MASK) >> 4) | (b & ~(R_SHIFT_4_MASK >> 4)); + /* Right shift 2, bits 3 and 5 */ + n1 = ((n1 & R_SHIFT_2_MASK) >> 2) | (n1 & ~(R_SHIFT_2_MASK >> 2)); + /* Right shift 1, bits 1-4 */ + n1 = (n1 & R_SHIFT_1_MASK) >> 1; + + /* Swap least significant nibble */ + /* Left shift 4, bits 0 and 1 */ + n2 = ((b & L_SHIFT_4_MASK) << 4) | (b & ~(L_SHIFT_4_MASK << 4)); + /* Left shift 2, bits 2 and 4 */ + n2 = ((n2 & L_SHIFT_2_MASK) << 2) | (n2 & ~(L_SHIFT_2_MASK << 2)); + /* Left shift 1, bits 3-6 */ + n2 = (n2 & L_SHIFT_1_MASK) << 1; + + return n1 | n2; +} + +/** + * ux500_swizzle_key() - Shuffle around words and bits in the AES key + * @in: key to swizzle + * @out: swizzled key + * @len: length of key, in bytes + * + * This "key swizzling procedure" is described in the examples in the + * DB8500 design specification. There is no real description of why + * the bits have been arranged like this in the hardware. + */ +static inline void ux500_swizzle_key(const u8 *in, u8 *out, u32 len) +{ + int i = 0; + int bpw = sizeof(u32); + int j; + int index = 0; + + j = len - bpw; + while (j >= 0) { + for (i = 0; i < bpw; i++) { + index = len - j - bpw + i; + out[j + i] = + ux500_swap_bits_in_byte(in[index]); + } + j -= bpw; } } @@ -313,14 +460,33 @@ static void stm32_cryp_hw_write_key(struct stm32_cryp *c) int r_id; if (is_des(c)) { - stm32_cryp_write(c, CRYP_K1LR, be32_to_cpu(c->ctx->key[0])); - stm32_cryp_write(c, CRYP_K1RR, be32_to_cpu(c->ctx->key[1])); - } else { - r_id = CRYP_K3RR; - for (i = c->ctx->keylen / sizeof(u32); i > 0; i--, r_id -= 4) - stm32_cryp_write(c, r_id, - be32_to_cpu(c->ctx->key[i - 1])); + stm32_cryp_write(c, c->caps->k1l, be32_to_cpu(c->ctx->key[0])); + stm32_cryp_write(c, c->caps->k1r, be32_to_cpu(c->ctx->key[1])); + return; } + + /* + * On the Ux500 the AES key is considered as a single bit sequence + * of 128, 192 or 256 bits length. It is written linearly into the + * registers from K1L and down, and need to be processed to become + * a proper big-endian bit sequence. + */ + if (is_aes(c) && c->caps->linear_aes_key) { + u32 tmpkey[8]; + + ux500_swizzle_key((u8 *)c->ctx->key, + (u8 *)tmpkey, c->ctx->keylen); + + r_id = c->caps->k1l; + for (i = 0; i < c->ctx->keylen / sizeof(u32); i++, r_id += 4) + stm32_cryp_write(c, r_id, tmpkey[i]); + + return; + } + + r_id = c->caps->k3r; + for (i = c->ctx->keylen / sizeof(u32); i > 0; i--, r_id -= 4) + stm32_cryp_write(c, r_id, be32_to_cpu(c->ctx->key[i - 1])); } static u32 stm32_cryp_get_hw_mode(struct stm32_cryp *cryp) @@ -373,7 +539,7 @@ static int stm32_cryp_gcm_init(struct stm32_cryp *cryp, u32 cfg) cryp->gcm_ctr = GCM_CTR_INIT; stm32_cryp_hw_write_iv(cryp, iv); - stm32_cryp_write(cryp, CRYP_CR, cfg | CR_PH_INIT | CR_CRYPEN); + stm32_cryp_write(cryp, cryp->caps->cr, cfg | CR_PH_INIT | CR_CRYPEN); /* Wait for end of processing */ ret = stm32_cryp_wait_enable(cryp); @@ -385,10 +551,10 @@ static int stm32_cryp_gcm_init(struct stm32_cryp *cryp, u32 cfg) /* Prepare next phase */ if (cryp->areq->assoclen) { cfg |= CR_PH_HEADER; - stm32_cryp_write(cryp, CRYP_CR, cfg); + stm32_cryp_write(cryp, cryp->caps->cr, cfg); } else if (stm32_cryp_get_input_text_len(cryp)) { cfg |= CR_PH_PAYLOAD; - stm32_cryp_write(cryp, CRYP_CR, cfg); + stm32_cryp_write(cryp, cryp->caps->cr, cfg); } return 0; @@ -405,20 +571,20 @@ static void stm32_crypt_gcmccm_end_header(struct stm32_cryp *cryp) err = stm32_cryp_wait_busy(cryp); if (err) { dev_err(cryp->dev, "Timeout (gcm/ccm header)\n"); - stm32_cryp_write(cryp, CRYP_IMSCR, 0); + stm32_cryp_write(cryp, cryp->caps->imsc, 0); stm32_cryp_finish_req(cryp, err); return; } if (stm32_cryp_get_input_text_len(cryp)) { /* Phase 3 : payload */ - cfg = stm32_cryp_read(cryp, CRYP_CR); + cfg = stm32_cryp_read(cryp, cryp->caps->cr); cfg &= ~CR_CRYPEN; - stm32_cryp_write(cryp, CRYP_CR, cfg); + stm32_cryp_write(cryp, cryp->caps->cr, cfg); cfg &= ~CR_PH_MASK; cfg |= CR_PH_PAYLOAD | CR_CRYPEN; - stm32_cryp_write(cryp, CRYP_CR, cfg); + stm32_cryp_write(cryp, cryp->caps->cr, cfg); } else { /* * Phase 4 : tag. @@ -458,7 +624,7 @@ static void stm32_cryp_write_ccm_first_header(struct stm32_cryp *cryp) scatterwalk_copychunks((char *)block + len, &cryp->in_walk, written, 0); for (i = 0; i < AES_BLOCK_32; i++) - stm32_cryp_write(cryp, CRYP_DIN, block[i]); + stm32_cryp_write(cryp, cryp->caps->din, block[i]); cryp->header_in -= written; @@ -494,7 +660,7 @@ static int stm32_cryp_ccm_init(struct stm32_cryp *cryp, u32 cfg) b0[AES_BLOCK_SIZE - 1] = textlen & 0xFF; /* Enable HW */ - stm32_cryp_write(cryp, CRYP_CR, cfg | CR_PH_INIT | CR_CRYPEN); + stm32_cryp_write(cryp, cryp->caps->cr, cfg | CR_PH_INIT | CR_CRYPEN); /* Write B0 */ d = (u32 *)b0; @@ -505,7 +671,7 @@ static int stm32_cryp_ccm_init(struct stm32_cryp *cryp, u32 cfg) if (!cryp->caps->padding_wa) xd = be32_to_cpu(bd[i]); - stm32_cryp_write(cryp, CRYP_DIN, xd); + stm32_cryp_write(cryp, cryp->caps->din, xd); } /* Wait for end of processing */ @@ -518,13 +684,13 @@ static int stm32_cryp_ccm_init(struct stm32_cryp *cryp, u32 cfg) /* Prepare next phase */ if (cryp->areq->assoclen) { cfg |= CR_PH_HEADER | CR_CRYPEN; - stm32_cryp_write(cryp, CRYP_CR, cfg); + stm32_cryp_write(cryp, cryp->caps->cr, cfg); /* Write first (special) block (may move to next phase [payload]) */ stm32_cryp_write_ccm_first_header(cryp); } else if (stm32_cryp_get_input_text_len(cryp)) { cfg |= CR_PH_PAYLOAD; - stm32_cryp_write(cryp, CRYP_CR, cfg); + stm32_cryp_write(cryp, cryp->caps->cr, cfg); } return 0; @@ -538,7 +704,7 @@ static int stm32_cryp_hw_init(struct stm32_cryp *cryp) pm_runtime_get_sync(cryp->dev); /* Disable interrupt */ - stm32_cryp_write(cryp, CRYP_IMSCR, 0); + stm32_cryp_write(cryp, cryp->caps->imsc, 0); /* Set configuration */ cfg = CR_DATA8 | CR_FFLUSH; @@ -566,7 +732,12 @@ static int stm32_cryp_hw_init(struct stm32_cryp *cryp) if (is_decrypt(cryp) && ((hw_mode == CR_AES_ECB) || (hw_mode == CR_AES_CBC))) { /* Configure in key preparation mode */ - stm32_cryp_write(cryp, CRYP_CR, cfg | CR_AES_KP); + if (cryp->caps->kp_mode) + stm32_cryp_write(cryp, cryp->caps->cr, + cfg | CR_AES_KP); + else + stm32_cryp_write(cryp, + cryp->caps->cr, cfg | CR_AES_ECB | CR_KSE); /* Set key only after full configuration done */ stm32_cryp_hw_write_key(cryp); @@ -583,14 +754,14 @@ static int stm32_cryp_hw_init(struct stm32_cryp *cryp) cfg |= hw_mode | CR_DEC_NOT_ENC; /* Apply updated config (Decrypt + algo) and flush */ - stm32_cryp_write(cryp, CRYP_CR, cfg); + stm32_cryp_write(cryp, cryp->caps->cr, cfg); } else { cfg |= hw_mode; if (is_decrypt(cryp)) cfg |= CR_DEC_NOT_ENC; /* Apply config and flush */ - stm32_cryp_write(cryp, CRYP_CR, cfg); + stm32_cryp_write(cryp, cryp->caps->cr, cfg); /* Set key only after configuration done */ stm32_cryp_hw_write_key(cryp); @@ -649,7 +820,7 @@ static void stm32_cryp_finish_req(struct stm32_cryp *cryp, int err) static int stm32_cryp_cpu_start(struct stm32_cryp *cryp) { /* Enable interrupt and let the IRQ handler do everything */ - stm32_cryp_write(cryp, CRYP_IMSCR, IMSCR_IN | IMSCR_OUT); + stm32_cryp_write(cryp, cryp->caps->imsc, IMSCR_IN | IMSCR_OUT); return 0; } @@ -1137,14 +1308,14 @@ static int stm32_cryp_read_auth_tag(struct stm32_cryp *cryp) int ret = 0; /* Update Config */ - cfg = stm32_cryp_read(cryp, CRYP_CR); + cfg = stm32_cryp_read(cryp, cryp->caps->cr); cfg &= ~CR_PH_MASK; cfg |= CR_PH_FINAL; cfg &= ~CR_DEC_NOT_ENC; cfg |= CR_CRYPEN; - stm32_cryp_write(cryp, CRYP_CR, cfg); + stm32_cryp_write(cryp, cryp->caps->cr, cfg); if (is_gcm(cryp)) { /* GCM: write aad and payload size (in bits) */ @@ -1152,8 +1323,8 @@ static int stm32_cryp_read_auth_tag(struct stm32_cryp *cryp) if (cryp->caps->swap_final) size_bit = (__force u32)cpu_to_be32(size_bit); - stm32_cryp_write(cryp, CRYP_DIN, 0); - stm32_cryp_write(cryp, CRYP_DIN, size_bit); + stm32_cryp_write(cryp, cryp->caps->din, 0); + stm32_cryp_write(cryp, cryp->caps->din, size_bit); size_bit = is_encrypt(cryp) ? cryp->areq->cryptlen : cryp->areq->cryptlen - cryp->authsize; @@ -1161,8 +1332,8 @@ static int stm32_cryp_read_auth_tag(struct stm32_cryp *cryp) if (cryp->caps->swap_final) size_bit = (__force u32)cpu_to_be32(size_bit); - stm32_cryp_write(cryp, CRYP_DIN, 0); - stm32_cryp_write(cryp, CRYP_DIN, size_bit); + stm32_cryp_write(cryp, cryp->caps->din, 0); + stm32_cryp_write(cryp, cryp->caps->din, size_bit); } else { /* CCM: write CTR0 */ u32 iv32[AES_BLOCK_32]; @@ -1177,7 +1348,7 @@ static int stm32_cryp_read_auth_tag(struct stm32_cryp *cryp) if (!cryp->caps->padding_wa) xiv = be32_to_cpu(biv[i]); - stm32_cryp_write(cryp, CRYP_DIN, xiv); + stm32_cryp_write(cryp, cryp->caps->din, xiv); } } @@ -1193,7 +1364,7 @@ static int stm32_cryp_read_auth_tag(struct stm32_cryp *cryp) /* Get and write tag */ for (i = 0; i < AES_BLOCK_32; i++) - out_tag[i] = stm32_cryp_read(cryp, CRYP_DOUT); + out_tag[i] = stm32_cryp_read(cryp, cryp->caps->dout); scatterwalk_copychunks(out_tag, &cryp->out_walk, cryp->authsize, 1); } else { @@ -1203,7 +1374,7 @@ static int stm32_cryp_read_auth_tag(struct stm32_cryp *cryp) scatterwalk_copychunks(in_tag, &cryp->in_walk, cryp->authsize, 0); for (i = 0; i < AES_BLOCK_32; i++) - out_tag[i] = stm32_cryp_read(cryp, CRYP_DOUT); + out_tag[i] = stm32_cryp_read(cryp, cryp->caps->dout); if (crypto_memneq(in_tag, out_tag, cryp->authsize)) ret = -EBADMSG; @@ -1211,7 +1382,7 @@ static int stm32_cryp_read_auth_tag(struct stm32_cryp *cryp) /* Disable cryp */ cfg &= ~CR_CRYPEN; - stm32_cryp_write(cryp, CRYP_CR, cfg); + stm32_cryp_write(cryp, cryp->caps->cr, cfg); return ret; } @@ -1227,19 +1398,19 @@ static void stm32_cryp_check_ctr_counter(struct stm32_cryp *cryp) */ crypto_inc((u8 *)cryp->last_ctr, sizeof(cryp->last_ctr)); - cr = stm32_cryp_read(cryp, CRYP_CR); - stm32_cryp_write(cryp, CRYP_CR, cr & ~CR_CRYPEN); + cr = stm32_cryp_read(cryp, cryp->caps->cr); + stm32_cryp_write(cryp, cryp->caps->cr, cr & ~CR_CRYPEN); stm32_cryp_hw_write_iv(cryp, cryp->last_ctr); - stm32_cryp_write(cryp, CRYP_CR, cr); + stm32_cryp_write(cryp, cryp->caps->cr, cr); } /* The IV registers are BE */ - cryp->last_ctr[0] = cpu_to_be32(stm32_cryp_read(cryp, CRYP_IV0LR)); - cryp->last_ctr[1] = cpu_to_be32(stm32_cryp_read(cryp, CRYP_IV0RR)); - cryp->last_ctr[2] = cpu_to_be32(stm32_cryp_read(cryp, CRYP_IV1LR)); - cryp->last_ctr[3] = cpu_to_be32(stm32_cryp_read(cryp, CRYP_IV1RR)); + cryp->last_ctr[0] = cpu_to_be32(stm32_cryp_read(cryp, cryp->caps->iv0l)); + cryp->last_ctr[1] = cpu_to_be32(stm32_cryp_read(cryp, cryp->caps->iv0r)); + cryp->last_ctr[2] = cpu_to_be32(stm32_cryp_read(cryp, cryp->caps->iv1l)); + cryp->last_ctr[3] = cpu_to_be32(stm32_cryp_read(cryp, cryp->caps->iv1r)); } static void stm32_cryp_irq_read_data(struct stm32_cryp *cryp) @@ -1248,7 +1419,7 @@ static void stm32_cryp_irq_read_data(struct stm32_cryp *cryp) u32 block[AES_BLOCK_32]; for (i = 0; i < cryp->hw_blocksize / sizeof(u32); i++) - block[i] = stm32_cryp_read(cryp, CRYP_DOUT); + block[i] = stm32_cryp_read(cryp, cryp->caps->dout); scatterwalk_copychunks(block, &cryp->out_walk, min_t(size_t, cryp->hw_blocksize, cryp->payload_out), 1); @@ -1264,7 +1435,7 @@ static void stm32_cryp_irq_write_block(struct stm32_cryp *cryp) scatterwalk_copychunks(block, &cryp->in_walk, min_t(size_t, cryp->hw_blocksize, cryp->payload_in), 0); for (i = 0; i < cryp->hw_blocksize / sizeof(u32); i++) - stm32_cryp_write(cryp, CRYP_DIN, block[i]); + stm32_cryp_write(cryp, cryp->caps->din, block[i]); cryp->payload_in -= min_t(size_t, cryp->hw_blocksize, cryp->payload_in); } @@ -1278,22 +1449,22 @@ static void stm32_cryp_irq_write_gcm_padded_data(struct stm32_cryp *cryp) /* 'Special workaround' procedure described in the datasheet */ /* a) disable ip */ - stm32_cryp_write(cryp, CRYP_IMSCR, 0); - cfg = stm32_cryp_read(cryp, CRYP_CR); + stm32_cryp_write(cryp, cryp->caps->imsc, 0); + cfg = stm32_cryp_read(cryp, cryp->caps->cr); cfg &= ~CR_CRYPEN; - stm32_cryp_write(cryp, CRYP_CR, cfg); + stm32_cryp_write(cryp, cryp->caps->cr, cfg); /* b) Update IV1R */ - stm32_cryp_write(cryp, CRYP_IV1RR, cryp->gcm_ctr - 2); + stm32_cryp_write(cryp, cryp->caps->iv1r, cryp->gcm_ctr - 2); /* c) change mode to CTR */ cfg &= ~CR_ALGO_MASK; cfg |= CR_AES_CTR; - stm32_cryp_write(cryp, CRYP_CR, cfg); + stm32_cryp_write(cryp, cryp->caps->cr, cfg); /* a) enable IP */ cfg |= CR_CRYPEN; - stm32_cryp_write(cryp, CRYP_CR, cfg); + stm32_cryp_write(cryp, cryp->caps->cr, cfg); /* b) pad and write the last block */ stm32_cryp_irq_write_block(cryp); @@ -1310,7 +1481,7 @@ static void stm32_cryp_irq_write_gcm_padded_data(struct stm32_cryp *cryp) * block value */ for (i = 0; i < cryp->hw_blocksize / sizeof(u32); i++) - block[i] = stm32_cryp_read(cryp, CRYP_DOUT); + block[i] = stm32_cryp_read(cryp, cryp->caps->dout); scatterwalk_copychunks(block, &cryp->out_walk, min_t(size_t, cryp->hw_blocksize, cryp->payload_out), 1); @@ -1320,16 +1491,16 @@ static void stm32_cryp_irq_write_gcm_padded_data(struct stm32_cryp *cryp) /* d) change mode back to AES GCM */ cfg &= ~CR_ALGO_MASK; cfg |= CR_AES_GCM; - stm32_cryp_write(cryp, CRYP_CR, cfg); + stm32_cryp_write(cryp, cryp->caps->cr, cfg); /* e) change phase to Final */ cfg &= ~CR_PH_MASK; cfg |= CR_PH_FINAL; - stm32_cryp_write(cryp, CRYP_CR, cfg); + stm32_cryp_write(cryp, cryp->caps->cr, cfg); /* f) write padded data */ for (i = 0; i < AES_BLOCK_32; i++) - stm32_cryp_write(cryp, CRYP_DIN, block[i]); + stm32_cryp_write(cryp, cryp->caps->din, block[i]); /* g) Empty fifo out */ err = stm32_cryp_wait_output(cryp); @@ -1339,7 +1510,7 @@ static void stm32_cryp_irq_write_gcm_padded_data(struct stm32_cryp *cryp) } for (i = 0; i < AES_BLOCK_32; i++) - stm32_cryp_read(cryp, CRYP_DOUT); + stm32_cryp_read(cryp, cryp->caps->dout); /* h) run the he normal Final phase */ stm32_cryp_finish_req(cryp, 0); @@ -1350,13 +1521,13 @@ static void stm32_cryp_irq_set_npblb(struct stm32_cryp *cryp) u32 cfg; /* disable ip, set NPBLB and reneable ip */ - cfg = stm32_cryp_read(cryp, CRYP_CR); + cfg = stm32_cryp_read(cryp, cryp->caps->cr); cfg &= ~CR_CRYPEN; - stm32_cryp_write(cryp, CRYP_CR, cfg); + stm32_cryp_write(cryp, cryp->caps->cr, cfg); cfg |= (cryp->hw_blocksize - cryp->payload_in) << CR_NBPBL_SHIFT; cfg |= CR_CRYPEN; - stm32_cryp_write(cryp, CRYP_CR, cfg); + stm32_cryp_write(cryp, cryp->caps->cr, cfg); } static void stm32_cryp_irq_write_ccm_padded_data(struct stm32_cryp *cryp) @@ -1370,11 +1541,11 @@ static void stm32_cryp_irq_write_ccm_padded_data(struct stm32_cryp *cryp) /* 'Special workaround' procedure described in the datasheet */ /* a) disable ip */ - stm32_cryp_write(cryp, CRYP_IMSCR, 0); + stm32_cryp_write(cryp, cryp->caps->imsc, 0); - cfg = stm32_cryp_read(cryp, CRYP_CR); + cfg = stm32_cryp_read(cryp, cryp->caps->cr); cfg &= ~CR_CRYPEN; - stm32_cryp_write(cryp, CRYP_CR, cfg); + stm32_cryp_write(cryp, cryp->caps->cr, cfg); /* b) get IV1 from CRYP_CSGCMCCM7 */ iv1tmp = stm32_cryp_read(cryp, CRYP_CSGCMCCM0R + 7 * 4); @@ -1384,23 +1555,23 @@ static void stm32_cryp_irq_write_ccm_padded_data(struct stm32_cryp *cryp) cstmp1[i] = stm32_cryp_read(cryp, CRYP_CSGCMCCM0R + i * 4); /* d) Write IV1R */ - stm32_cryp_write(cryp, CRYP_IV1RR, iv1tmp); + stm32_cryp_write(cryp, cryp->caps->iv1r, iv1tmp); /* e) change mode to CTR */ cfg &= ~CR_ALGO_MASK; cfg |= CR_AES_CTR; - stm32_cryp_write(cryp, CRYP_CR, cfg); + stm32_cryp_write(cryp, cryp->caps->cr, cfg); /* a) enable IP */ cfg |= CR_CRYPEN; - stm32_cryp_write(cryp, CRYP_CR, cfg); + stm32_cryp_write(cryp, cryp->caps->cr, cfg); /* b) pad and write the last block */ stm32_cryp_irq_write_block(cryp); /* wait end of process */ err = stm32_cryp_wait_output(cryp); if (err) { - dev_err(cryp->dev, "Timeout (wite ccm padded data)\n"); + dev_err(cryp->dev, "Timeout (write ccm padded data)\n"); return stm32_cryp_finish_req(cryp, err); } @@ -1410,7 +1581,7 @@ static void stm32_cryp_irq_write_ccm_padded_data(struct stm32_cryp *cryp) * block value */ for (i = 0; i < cryp->hw_blocksize / sizeof(u32); i++) - block[i] = stm32_cryp_read(cryp, CRYP_DOUT); + block[i] = stm32_cryp_read(cryp, cryp->caps->dout); scatterwalk_copychunks(block, &cryp->out_walk, min_t(size_t, cryp->hw_blocksize, cryp->payload_out), 1); @@ -1423,24 +1594,24 @@ static void stm32_cryp_irq_write_ccm_padded_data(struct stm32_cryp *cryp) /* e) change mode back to AES CCM */ cfg &= ~CR_ALGO_MASK; cfg |= CR_AES_CCM; - stm32_cryp_write(cryp, CRYP_CR, cfg); + stm32_cryp_write(cryp, cryp->caps->cr, cfg); /* f) change phase to header */ cfg &= ~CR_PH_MASK; cfg |= CR_PH_HEADER; - stm32_cryp_write(cryp, CRYP_CR, cfg); + stm32_cryp_write(cryp, cryp->caps->cr, cfg); /* g) XOR and write padded data */ for (i = 0; i < ARRAY_SIZE(block); i++) { block[i] ^= cstmp1[i]; block[i] ^= cstmp2[i]; - stm32_cryp_write(cryp, CRYP_DIN, block[i]); + stm32_cryp_write(cryp, cryp->caps->din, block[i]); } /* h) wait for completion */ err = stm32_cryp_wait_busy(cryp); if (err) - dev_err(cryp->dev, "Timeout (wite ccm padded data)\n"); + dev_err(cryp->dev, "Timeout (write ccm padded data)\n"); /* i) run the he normal Final phase */ stm32_cryp_finish_req(cryp, err); @@ -1497,7 +1668,7 @@ static void stm32_cryp_irq_write_gcmccm_header(struct stm32_cryp *cryp) scatterwalk_copychunks(block, &cryp->in_walk, written, 0); for (i = 0; i < AES_BLOCK_32; i++) - stm32_cryp_write(cryp, CRYP_DIN, block[i]); + stm32_cryp_write(cryp, cryp->caps->din, block[i]); cryp->header_in -= written; @@ -1508,7 +1679,7 @@ static irqreturn_t stm32_cryp_irq_thread(int irq, void *arg) { struct stm32_cryp *cryp = arg; u32 ph; - u32 it_mask = stm32_cryp_read(cryp, CRYP_IMSCR); + u32 it_mask = stm32_cryp_read(cryp, cryp->caps->imsc); if (cryp->irq_status & MISR_OUT) /* Output FIFO IRQ: read data */ @@ -1516,7 +1687,7 @@ static irqreturn_t stm32_cryp_irq_thread(int irq, void *arg) if (cryp->irq_status & MISR_IN) { if (is_gcm(cryp) || is_ccm(cryp)) { - ph = stm32_cryp_read(cryp, CRYP_CR) & CR_PH_MASK; + ph = stm32_cryp_read(cryp, cryp->caps->cr) & CR_PH_MASK; if (unlikely(ph == CR_PH_HEADER)) /* Write Header */ stm32_cryp_irq_write_gcmccm_header(cryp); @@ -1536,7 +1707,7 @@ static irqreturn_t stm32_cryp_irq_thread(int irq, void *arg) it_mask &= ~IMSCR_IN; if (!cryp->payload_out) it_mask &= ~IMSCR_OUT; - stm32_cryp_write(cryp, CRYP_IMSCR, it_mask); + stm32_cryp_write(cryp, cryp->caps->imsc, it_mask); if (!cryp->payload_in && !cryp->header_in && !cryp->payload_out) stm32_cryp_finish_req(cryp, 0); @@ -1548,7 +1719,7 @@ static irqreturn_t stm32_cryp_irq(int irq, void *arg) { struct stm32_cryp *cryp = arg; - cryp->irq_status = stm32_cryp_read(cryp, CRYP_MISR); + cryp->irq_status = stm32_cryp_read(cryp, cryp->caps->mis); return IRQ_WAKE_THREAD; } @@ -1722,17 +1893,74 @@ static struct aead_alg aead_algs[] = { }, }; +static const struct stm32_cryp_caps ux500_data = { + .aeads_support = false, + .linear_aes_key = true, + .kp_mode = false, + .iv_protection = true, + .swap_final = true, + .padding_wa = true, + .cr = UX500_CRYP_CR, + .sr = UX500_CRYP_SR, + .din = UX500_CRYP_DIN, + .dout = UX500_CRYP_DOUT, + .imsc = UX500_CRYP_IMSC, + .mis = UX500_CRYP_MIS, + .k1l = UX500_CRYP_K1L, + .k1r = UX500_CRYP_K1R, + .k3r = UX500_CRYP_K3R, + .iv0l = UX500_CRYP_IV0L, + .iv0r = UX500_CRYP_IV0R, + .iv1l = UX500_CRYP_IV1L, + .iv1r = UX500_CRYP_IV1R, +}; + static const struct stm32_cryp_caps f7_data = { + .aeads_support = true, + .linear_aes_key = false, + .kp_mode = true, + .iv_protection = false, .swap_final = true, .padding_wa = true, + .cr = CRYP_CR, + .sr = CRYP_SR, + .din = CRYP_DIN, + .dout = CRYP_DOUT, + .imsc = CRYP_IMSCR, + .mis = CRYP_MISR, + .k1l = CRYP_K1LR, + .k1r = CRYP_K1RR, + .k3r = CRYP_K3RR, + .iv0l = CRYP_IV0LR, + .iv0r = CRYP_IV0RR, + .iv1l = CRYP_IV1LR, + .iv1r = CRYP_IV1RR, }; static const struct stm32_cryp_caps mp1_data = { + .aeads_support = true, + .linear_aes_key = false, + .kp_mode = true, + .iv_protection = false, .swap_final = false, .padding_wa = false, + .cr = CRYP_CR, + .sr = CRYP_SR, + .din = CRYP_DIN, + .dout = CRYP_DOUT, + .imsc = CRYP_IMSCR, + .mis = CRYP_MISR, + .k1l = CRYP_K1LR, + .k1r = CRYP_K1RR, + .k3r = CRYP_K3RR, + .iv0l = CRYP_IV0LR, + .iv0r = CRYP_IV0RR, + .iv1l = CRYP_IV1LR, + .iv1r = CRYP_IV1RR, }; static const struct of_device_id stm32_dt_ids[] = { + { .compatible = "stericsson,ux500-cryp", .data = &ux500_data}, { .compatible = "st,stm32f756-cryp", .data = &f7_data}, { .compatible = "st,stm32mp1-cryp", .data = &mp1_data}, {}, @@ -1829,9 +2057,11 @@ static int stm32_cryp_probe(struct platform_device *pdev) goto err_algs; } - ret = crypto_register_aeads(aead_algs, ARRAY_SIZE(aead_algs)); - if (ret) - goto err_aead_algs; + if (cryp->caps->aeads_support) { + ret = crypto_register_aeads(aead_algs, ARRAY_SIZE(aead_algs)); + if (ret) + goto err_aead_algs; + } dev_info(dev, "Initialized\n"); @@ -1869,7 +2099,8 @@ static int stm32_cryp_remove(struct platform_device *pdev) if (ret < 0) return ret; - crypto_unregister_aeads(aead_algs, ARRAY_SIZE(aead_algs)); + if (cryp->caps->aeads_support) + crypto_unregister_aeads(aead_algs, ARRAY_SIZE(aead_algs)); crypto_unregister_skciphers(crypto_algs, ARRAY_SIZE(crypto_algs)); crypto_engine_exit(cryp->engine); diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index c9ad6c21309018603cdde5cfd903474f7a37c5d5..71db6450b6aa4038f5d81b21c2aac44550c6c19e 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c @@ -1999,7 +1999,7 @@ static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes) /* Buffer up to one whole block */ nents = sg_nents_for_len(areq->src, nbytes); if (nents < 0) { - dev_err(ctx->dev, "Invalid number of src SG.\n"); + dev_err(dev, "Invalid number of src SG.\n"); return nents; } sg_copy_to_buffer(areq->src, nents, @@ -2040,7 +2040,7 @@ static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes) offset = nbytes_to_hash - req_ctx->nbuf; nents = sg_nents_for_len(areq->src, offset); if (nents < 0) { - dev_err(ctx->dev, "Invalid number of src SG.\n"); + dev_err(dev, "Invalid number of src SG.\n"); return nents; } sg_copy_to_buffer(areq->src, nents, @@ -2054,7 +2054,7 @@ static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes) if (to_hash_later) { nents = sg_nents_for_len(areq->src, nbytes); if (nents < 0) { - dev_err(ctx->dev, "Invalid number of src SG.\n"); + dev_err(dev, "Invalid number of src SG.\n"); return nents; } sg_pcopy_to_buffer(areq->src, nents, diff --git a/drivers/crypto/talitos.h b/drivers/crypto/talitos.h index 32825119e8805f24174e92458074c7c32a84153f..1a93ee355929d705e6e4222b53b1d0345bdf46e2 100644 --- a/drivers/crypto/talitos.h +++ b/drivers/crypto/talitos.h @@ -65,8 +65,8 @@ struct talitos_edesc { dma_addr_t dma_link_tbl; struct talitos_desc desc; union { - struct talitos_ptr link_tbl[0]; - u8 buf[0]; + DECLARE_FLEX_ARRAY(struct talitos_ptr, link_tbl); + DECLARE_FLEX_ARRAY(u8, buf); }; }; diff --git a/drivers/crypto/ux500/Kconfig b/drivers/crypto/ux500/Kconfig index f56d65c56ccf78579efb822a175f0ebc0276fd98..dcbd7404768f182bd78e485302664eeef0dd538a 100644 --- a/drivers/crypto/ux500/Kconfig +++ b/drivers/crypto/ux500/Kconfig @@ -4,16 +4,6 @@ # Author: Shujuan Chen (shujuan.chen@stericsson.com) # -config CRYPTO_DEV_UX500_CRYP - tristate "UX500 crypto driver for CRYP block" - depends on CRYPTO_DEV_UX500 - select CRYPTO_ALGAPI - select CRYPTO_SKCIPHER - select CRYPTO_LIB_DES - help - This selects the crypto driver for the UX500_CRYP hardware. It supports - AES-ECB, CBC and CTR with keys sizes of 128, 192 and 256 bit sizes. - config CRYPTO_DEV_UX500_HASH tristate "UX500 crypto driver for HASH block" depends on CRYPTO_DEV_UX500 diff --git a/drivers/crypto/ux500/Makefile b/drivers/crypto/ux500/Makefile index f014eb01710a609111c7766bead6e865b5556b45..f1aa4edf66f4f496952560e71341779c01d07d52 100644 --- a/drivers/crypto/ux500/Makefile +++ b/drivers/crypto/ux500/Makefile @@ -5,4 +5,3 @@ # obj-$(CONFIG_CRYPTO_DEV_UX500_HASH) += hash/ -obj-$(CONFIG_CRYPTO_DEV_UX500_CRYP) += cryp/ diff --git a/drivers/crypto/ux500/cryp/Makefile b/drivers/crypto/ux500/cryp/Makefile deleted file mode 100644 index 3e67531f484cd75f5f8771f821918c1dc2240155..0000000000000000000000000000000000000000 --- a/drivers/crypto/ux500/cryp/Makefile +++ /dev/null @@ -1,10 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0-only -#/* -# * Copyright (C) ST-Ericsson SA 2010 -# * Author: shujuan.chen@stericsson.com for ST-Ericsson. -# */ - -ccflags-$(CONFIG_CRYPTO_DEV_UX500_DEBUG) += -DDEBUG - -obj-$(CONFIG_CRYPTO_DEV_UX500_CRYP) += ux500_cryp.o -ux500_cryp-objs := cryp.o cryp_irq.o cryp_core.o diff --git a/drivers/crypto/ux500/cryp/cryp.c b/drivers/crypto/ux500/cryp/cryp.c deleted file mode 100644 index 759d0d9786fd19019728a212d76f1d70040f8ae8..0000000000000000000000000000000000000000 --- a/drivers/crypto/ux500/cryp/cryp.c +++ /dev/null @@ -1,394 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright (C) ST-Ericsson SA 2010 - * Author: Shujuan Chen for ST-Ericsson. - * Author: Jonas Linde for ST-Ericsson. - * Author: Niklas Hernaeus for ST-Ericsson. - * Author: Joakim Bech for ST-Ericsson. - * Author: Berne Hebark for ST-Ericsson. - */ - -#include -#include -#include - -#include "cryp_p.h" -#include "cryp.h" - -/* - * cryp_wait_until_done - wait until the device logic is not busy - */ -void cryp_wait_until_done(struct cryp_device_data *device_data) -{ - while (cryp_is_logic_busy(device_data)) - cpu_relax(); -} - -/** - * cryp_check - This routine checks Peripheral and PCell Id - * @device_data: Pointer to the device data struct for base address. - */ -int cryp_check(struct cryp_device_data *device_data) -{ - int peripheralid2 = 0; - - if (NULL == device_data) - return -EINVAL; - - peripheralid2 = readl_relaxed(&device_data->base->periphId2); - - if (peripheralid2 != CRYP_PERIPHERAL_ID2_DB8500) - return -EPERM; - - /* Check Peripheral and Pcell Id Register for CRYP */ - if ((CRYP_PERIPHERAL_ID0 == - readl_relaxed(&device_data->base->periphId0)) - && (CRYP_PERIPHERAL_ID1 == - readl_relaxed(&device_data->base->periphId1)) - && (CRYP_PERIPHERAL_ID3 == - readl_relaxed(&device_data->base->periphId3)) - && (CRYP_PCELL_ID0 == - readl_relaxed(&device_data->base->pcellId0)) - && (CRYP_PCELL_ID1 == - readl_relaxed(&device_data->base->pcellId1)) - && (CRYP_PCELL_ID2 == - readl_relaxed(&device_data->base->pcellId2)) - && (CRYP_PCELL_ID3 == - readl_relaxed(&device_data->base->pcellId3))) { - return 0; - } - - return -EPERM; -} - -/** - * cryp_activity - This routine enables/disable the cryptography function. - * @device_data: Pointer to the device data struct for base address. - * @cryp_crypen: Enable/Disable functionality - */ -void cryp_activity(struct cryp_device_data *device_data, - enum cryp_crypen cryp_crypen) -{ - CRYP_PUT_BITS(&device_data->base->cr, - cryp_crypen, - CRYP_CR_CRYPEN_POS, - CRYP_CR_CRYPEN_MASK); -} - -/** - * cryp_flush_inoutfifo - Resets both the input and the output FIFOs - * @device_data: Pointer to the device data struct for base address. - */ -void cryp_flush_inoutfifo(struct cryp_device_data *device_data) -{ - /* - * We always need to disable the hardware before trying to flush the - * FIFO. This is something that isn't written in the design - * specification, but we have been informed by the hardware designers - * that this must be done. - */ - cryp_activity(device_data, CRYP_CRYPEN_DISABLE); - cryp_wait_until_done(device_data); - - CRYP_SET_BITS(&device_data->base->cr, CRYP_CR_FFLUSH_MASK); - /* - * CRYP_SR_INFIFO_READY_MASK is the expected value on the status - * register when starting a new calculation, which means Input FIFO is - * not full and input FIFO is empty. - */ - while (readl_relaxed(&device_data->base->sr) != - CRYP_SR_INFIFO_READY_MASK) - cpu_relax(); -} - -/** - * cryp_set_configuration - This routine set the cr CRYP IP - * @device_data: Pointer to the device data struct for base address. - * @cryp_config: Pointer to the configuration parameter - * @control_register: The control register to be written later on. - */ -int cryp_set_configuration(struct cryp_device_data *device_data, - struct cryp_config *cryp_config, - u32 *control_register) -{ - u32 cr_for_kse; - - if (NULL == device_data || NULL == cryp_config) - return -EINVAL; - - *control_register |= (cryp_config->keysize << CRYP_CR_KEYSIZE_POS); - - /* Prepare key for decryption in AES_ECB and AES_CBC mode. */ - if ((CRYP_ALGORITHM_DECRYPT == cryp_config->algodir) && - ((CRYP_ALGO_AES_ECB == cryp_config->algomode) || - (CRYP_ALGO_AES_CBC == cryp_config->algomode))) { - cr_for_kse = *control_register; - /* - * This seems a bit odd, but it is indeed needed to set this to - * encrypt even though it is a decryption that we are doing. It - * also mentioned in the design spec that you need to do this. - * After the keyprepartion for decrypting is done you should set - * algodir back to decryption, which is done outside this if - * statement. - * - * According to design specification we should set mode ECB - * during key preparation even though we might be running CBC - * when enter this function. - * - * Writing to KSE_ENABLED will drop CRYPEN when key preparation - * is done. Therefore we need to set CRYPEN again outside this - * if statement when running decryption. - */ - cr_for_kse |= ((CRYP_ALGORITHM_ENCRYPT << CRYP_CR_ALGODIR_POS) | - (CRYP_ALGO_AES_ECB << CRYP_CR_ALGOMODE_POS) | - (CRYP_CRYPEN_ENABLE << CRYP_CR_CRYPEN_POS) | - (KSE_ENABLED << CRYP_CR_KSE_POS)); - - writel_relaxed(cr_for_kse, &device_data->base->cr); - cryp_wait_until_done(device_data); - } - - *control_register |= - ((cryp_config->algomode << CRYP_CR_ALGOMODE_POS) | - (cryp_config->algodir << CRYP_CR_ALGODIR_POS)); - - return 0; -} - -/** - * cryp_configure_protection - set the protection bits in the CRYP logic. - * @device_data: Pointer to the device data struct for base address. - * @p_protect_config: Pointer to the protection mode and - * secure mode configuration - */ -int cryp_configure_protection(struct cryp_device_data *device_data, - struct cryp_protection_config *p_protect_config) -{ - if (NULL == p_protect_config) - return -EINVAL; - - CRYP_WRITE_BIT(&device_data->base->cr, - (u32) p_protect_config->secure_access, - CRYP_CR_SECURE_MASK); - CRYP_PUT_BITS(&device_data->base->cr, - p_protect_config->privilege_access, - CRYP_CR_PRLG_POS, - CRYP_CR_PRLG_MASK); - - return 0; -} - -/** - * cryp_is_logic_busy - returns the busy status of the CRYP logic - * @device_data: Pointer to the device data struct for base address. - */ -int cryp_is_logic_busy(struct cryp_device_data *device_data) -{ - return CRYP_TEST_BITS(&device_data->base->sr, - CRYP_SR_BUSY_MASK); -} - -/** - * cryp_configure_for_dma - configures the CRYP IP for DMA operation - * @device_data: Pointer to the device data struct for base address. - * @dma_req: Specifies the DMA request type value. - */ -void cryp_configure_for_dma(struct cryp_device_data *device_data, - enum cryp_dma_req_type dma_req) -{ - CRYP_SET_BITS(&device_data->base->dmacr, - (u32) dma_req); -} - -/** - * cryp_configure_key_values - configures the key values for CRYP operations - * @device_data: Pointer to the device data struct for base address. - * @key_reg_index: Key value index register - * @key_value: The key value struct - */ -int cryp_configure_key_values(struct cryp_device_data *device_data, - enum cryp_key_reg_index key_reg_index, - struct cryp_key_value key_value) -{ - while (cryp_is_logic_busy(device_data)) - cpu_relax(); - - switch (key_reg_index) { - case CRYP_KEY_REG_1: - writel_relaxed(key_value.key_value_left, - &device_data->base->key_1_l); - writel_relaxed(key_value.key_value_right, - &device_data->base->key_1_r); - break; - case CRYP_KEY_REG_2: - writel_relaxed(key_value.key_value_left, - &device_data->base->key_2_l); - writel_relaxed(key_value.key_value_right, - &device_data->base->key_2_r); - break; - case CRYP_KEY_REG_3: - writel_relaxed(key_value.key_value_left, - &device_data->base->key_3_l); - writel_relaxed(key_value.key_value_right, - &device_data->base->key_3_r); - break; - case CRYP_KEY_REG_4: - writel_relaxed(key_value.key_value_left, - &device_data->base->key_4_l); - writel_relaxed(key_value.key_value_right, - &device_data->base->key_4_r); - break; - default: - return -EINVAL; - } - - return 0; -} - -/** - * cryp_configure_init_vector - configures the initialization vector register - * @device_data: Pointer to the device data struct for base address. - * @init_vector_index: Specifies the index of the init vector. - * @init_vector_value: Specifies the value for the init vector. - */ -int cryp_configure_init_vector(struct cryp_device_data *device_data, - enum cryp_init_vector_index - init_vector_index, - struct cryp_init_vector_value - init_vector_value) -{ - while (cryp_is_logic_busy(device_data)) - cpu_relax(); - - switch (init_vector_index) { - case CRYP_INIT_VECTOR_INDEX_0: - writel_relaxed(init_vector_value.init_value_left, - &device_data->base->init_vect_0_l); - writel_relaxed(init_vector_value.init_value_right, - &device_data->base->init_vect_0_r); - break; - case CRYP_INIT_VECTOR_INDEX_1: - writel_relaxed(init_vector_value.init_value_left, - &device_data->base->init_vect_1_l); - writel_relaxed(init_vector_value.init_value_right, - &device_data->base->init_vect_1_r); - break; - default: - return -EINVAL; - } - - return 0; -} - -/** - * cryp_save_device_context - Store hardware registers and - * other device context parameter - * @device_data: Pointer to the device data struct for base address. - * @ctx: Crypto device context - * @cryp_mode: Mode: Polling, Interrupt or DMA - */ -void cryp_save_device_context(struct cryp_device_data *device_data, - struct cryp_device_context *ctx, - int cryp_mode) -{ - enum cryp_algo_mode algomode; - struct cryp_register __iomem *src_reg = device_data->base; - struct cryp_config *config = - (struct cryp_config *)device_data->current_ctx; - - /* - * Always start by disable the hardware and wait for it to finish the - * ongoing calculations before trying to reprogram it. - */ - cryp_activity(device_data, CRYP_CRYPEN_DISABLE); - cryp_wait_until_done(device_data); - - if (cryp_mode == CRYP_MODE_DMA) - cryp_configure_for_dma(device_data, CRYP_DMA_DISABLE_BOTH); - - if (CRYP_TEST_BITS(&src_reg->sr, CRYP_SR_IFEM_MASK) == 0) - ctx->din = readl_relaxed(&src_reg->din); - - ctx->cr = readl_relaxed(&src_reg->cr) & CRYP_CR_CONTEXT_SAVE_MASK; - - switch (config->keysize) { - case CRYP_KEY_SIZE_256: - ctx->key_4_l = readl_relaxed(&src_reg->key_4_l); - ctx->key_4_r = readl_relaxed(&src_reg->key_4_r); - fallthrough; - - case CRYP_KEY_SIZE_192: - ctx->key_3_l = readl_relaxed(&src_reg->key_3_l); - ctx->key_3_r = readl_relaxed(&src_reg->key_3_r); - fallthrough; - - case CRYP_KEY_SIZE_128: - ctx->key_2_l = readl_relaxed(&src_reg->key_2_l); - ctx->key_2_r = readl_relaxed(&src_reg->key_2_r); - fallthrough; - - default: - ctx->key_1_l = readl_relaxed(&src_reg->key_1_l); - ctx->key_1_r = readl_relaxed(&src_reg->key_1_r); - } - - /* Save IV for CBC mode for both AES and DES. */ - algomode = ((ctx->cr & CRYP_CR_ALGOMODE_MASK) >> CRYP_CR_ALGOMODE_POS); - if (algomode == CRYP_ALGO_TDES_CBC || - algomode == CRYP_ALGO_DES_CBC || - algomode == CRYP_ALGO_AES_CBC) { - ctx->init_vect_0_l = readl_relaxed(&src_reg->init_vect_0_l); - ctx->init_vect_0_r = readl_relaxed(&src_reg->init_vect_0_r); - ctx->init_vect_1_l = readl_relaxed(&src_reg->init_vect_1_l); - ctx->init_vect_1_r = readl_relaxed(&src_reg->init_vect_1_r); - } -} - -/** - * cryp_restore_device_context - Restore hardware registers and - * other device context parameter - * @device_data: Pointer to the device data struct for base address. - * @ctx: Crypto device context - */ -void cryp_restore_device_context(struct cryp_device_data *device_data, - struct cryp_device_context *ctx) -{ - struct cryp_register __iomem *reg = device_data->base; - struct cryp_config *config = - (struct cryp_config *)device_data->current_ctx; - - /* - * Fall through for all items in switch statement. DES is captured in - * the default. - */ - switch (config->keysize) { - case CRYP_KEY_SIZE_256: - writel_relaxed(ctx->key_4_l, ®->key_4_l); - writel_relaxed(ctx->key_4_r, ®->key_4_r); - fallthrough; - - case CRYP_KEY_SIZE_192: - writel_relaxed(ctx->key_3_l, ®->key_3_l); - writel_relaxed(ctx->key_3_r, ®->key_3_r); - fallthrough; - - case CRYP_KEY_SIZE_128: - writel_relaxed(ctx->key_2_l, ®->key_2_l); - writel_relaxed(ctx->key_2_r, ®->key_2_r); - fallthrough; - - default: - writel_relaxed(ctx->key_1_l, ®->key_1_l); - writel_relaxed(ctx->key_1_r, ®->key_1_r); - } - - /* Restore IV for CBC mode for AES and DES. */ - if (config->algomode == CRYP_ALGO_TDES_CBC || - config->algomode == CRYP_ALGO_DES_CBC || - config->algomode == CRYP_ALGO_AES_CBC) { - writel_relaxed(ctx->init_vect_0_l, ®->init_vect_0_l); - writel_relaxed(ctx->init_vect_0_r, ®->init_vect_0_r); - writel_relaxed(ctx->init_vect_1_l, ®->init_vect_1_l); - writel_relaxed(ctx->init_vect_1_r, ®->init_vect_1_r); - } -} diff --git a/drivers/crypto/ux500/cryp/cryp.h b/drivers/crypto/ux500/cryp/cryp.h deleted file mode 100644 index 59e1557a620a99864c2ab09f880a98ded4cc5751..0000000000000000000000000000000000000000 --- a/drivers/crypto/ux500/cryp/cryp.h +++ /dev/null @@ -1,315 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright (C) ST-Ericsson SA 2010 - * Author: Shujuan Chen for ST-Ericsson. - * Author: Jonas Linde for ST-Ericsson. - * Author: Joakim Bech for ST-Ericsson. - * Author: Berne Hebark for ST-Ericsson. - * Author: Niklas Hernaeus for ST-Ericsson. - */ - -#ifndef _CRYP_H_ -#define _CRYP_H_ - -#include -#include -#include -#include - -#define DEV_DBG_NAME "crypX crypX:" - -/* CRYP enable/disable */ -enum cryp_crypen { - CRYP_CRYPEN_DISABLE = 0, - CRYP_CRYPEN_ENABLE = 1 -}; - -/* CRYP Start Computation enable/disable */ -enum cryp_start { - CRYP_START_DISABLE = 0, - CRYP_START_ENABLE = 1 -}; - -/* CRYP Init Signal enable/disable */ -enum cryp_init { - CRYP_INIT_DISABLE = 0, - CRYP_INIT_ENABLE = 1 -}; - -/* Cryp State enable/disable */ -enum cryp_state { - CRYP_STATE_DISABLE = 0, - CRYP_STATE_ENABLE = 1 -}; - -/* Key preparation bit enable */ -enum cryp_key_prep { - KSE_DISABLED = 0, - KSE_ENABLED = 1 -}; - -/* Key size for AES */ -#define CRYP_KEY_SIZE_128 (0) -#define CRYP_KEY_SIZE_192 (1) -#define CRYP_KEY_SIZE_256 (2) - -/* AES modes */ -enum cryp_algo_mode { - CRYP_ALGO_TDES_ECB, - CRYP_ALGO_TDES_CBC, - CRYP_ALGO_DES_ECB, - CRYP_ALGO_DES_CBC, - CRYP_ALGO_AES_ECB, - CRYP_ALGO_AES_CBC, - CRYP_ALGO_AES_CTR, - CRYP_ALGO_AES_XTS -}; - -/* Cryp Encryption or Decryption */ -enum cryp_algorithm_dir { - CRYP_ALGORITHM_ENCRYPT, - CRYP_ALGORITHM_DECRYPT -}; - -/* Hardware access method */ -enum cryp_mode { - CRYP_MODE_POLLING, - CRYP_MODE_INTERRUPT, - CRYP_MODE_DMA -}; - -/** - * struct cryp_config - - * @keysize: Key size for AES - * @algomode: AES modes - * @algodir: Cryp Encryption or Decryption - * - * CRYP configuration structure to be passed to set configuration - */ -struct cryp_config { - int keysize; - enum cryp_algo_mode algomode; - enum cryp_algorithm_dir algodir; -}; - -/** - * struct cryp_protection_config - - * @privilege_access: Privileged cryp state enable/disable - * @secure_access: Secure cryp state enable/disable - * - * Protection configuration structure for setting privilage access - */ -struct cryp_protection_config { - enum cryp_state privilege_access; - enum cryp_state secure_access; -}; - -/* Cryp status */ -enum cryp_status_id { - CRYP_STATUS_BUSY = 0x10, - CRYP_STATUS_OUTPUT_FIFO_FULL = 0x08, - CRYP_STATUS_OUTPUT_FIFO_NOT_EMPTY = 0x04, - CRYP_STATUS_INPUT_FIFO_NOT_FULL = 0x02, - CRYP_STATUS_INPUT_FIFO_EMPTY = 0x01 -}; - -/* Cryp DMA interface */ -#define CRYP_DMA_TX_FIFO 0x08 -#define CRYP_DMA_RX_FIFO 0x10 - -enum cryp_dma_req_type { - CRYP_DMA_DISABLE_BOTH, - CRYP_DMA_ENABLE_IN_DATA, - CRYP_DMA_ENABLE_OUT_DATA, - CRYP_DMA_ENABLE_BOTH_DIRECTIONS -}; - -enum cryp_dma_channel { - CRYP_DMA_RX = 0, - CRYP_DMA_TX -}; - -/* Key registers */ -enum cryp_key_reg_index { - CRYP_KEY_REG_1, - CRYP_KEY_REG_2, - CRYP_KEY_REG_3, - CRYP_KEY_REG_4 -}; - -/* Key register left and right */ -struct cryp_key_value { - u32 key_value_left; - u32 key_value_right; -}; - -/* Cryp Initialization structure */ -enum cryp_init_vector_index { - CRYP_INIT_VECTOR_INDEX_0, - CRYP_INIT_VECTOR_INDEX_1 -}; - -/* struct cryp_init_vector_value - - * @init_value_left - * @init_value_right - * */ -struct cryp_init_vector_value { - u32 init_value_left; - u32 init_value_right; -}; - -/** - * struct cryp_device_context - structure for a cryp context. - * @cr: control register - * @dmacr: DMA control register - * @imsc: Interrupt mask set/clear register - * @key_1_l: Key 1l register - * @key_1_r: Key 1r register - * @key_2_l: Key 2l register - * @key_2_r: Key 2r register - * @key_3_l: Key 3l register - * @key_3_r: Key 3r register - * @key_4_l: Key 4l register - * @key_4_r: Key 4r register - * @init_vect_0_l: Initialization vector 0l register - * @init_vect_0_r: Initialization vector 0r register - * @init_vect_1_l: Initialization vector 1l register - * @init_vect_1_r: Initialization vector 0r register - * @din: Data in register - * @dout: Data out register - * - * CRYP power management specifc structure. - */ -struct cryp_device_context { - u32 cr; - u32 dmacr; - u32 imsc; - - u32 key_1_l; - u32 key_1_r; - u32 key_2_l; - u32 key_2_r; - u32 key_3_l; - u32 key_3_r; - u32 key_4_l; - u32 key_4_r; - - u32 init_vect_0_l; - u32 init_vect_0_r; - u32 init_vect_1_l; - u32 init_vect_1_r; - - u32 din; - u32 dout; -}; - -struct cryp_dma { - dma_cap_mask_t mask; - struct completion cryp_dma_complete; - struct dma_chan *chan_cryp2mem; - struct dma_chan *chan_mem2cryp; - struct stedma40_chan_cfg *cfg_cryp2mem; - struct stedma40_chan_cfg *cfg_mem2cryp; - int sg_src_len; - int sg_dst_len; - struct scatterlist *sg_src; - struct scatterlist *sg_dst; - int nents_src; - int nents_dst; -}; - -/** - * struct cryp_device_data - structure for a cryp device. - * @base: Pointer to virtual base address of the cryp device. - * @phybase: Pointer to physical memory location of the cryp device. - * @dev: Pointer to the devices dev structure. - * @clk: Pointer to the device's clock control. - * @irq: IRQ number - * @pwr_regulator: Pointer to the device's power control. - * @power_status: Current status of the power. - * @ctx_lock: Lock for current_ctx. - * @current_ctx: Pointer to the currently allocated context. - * @list_node: For inclusion into a klist. - * @dma: The dma structure holding channel configuration. - * @power_state: TRUE = power state on, FALSE = power state off. - * @power_state_spinlock: Spinlock for power_state. - * @restore_dev_ctx: TRUE = saved ctx, FALSE = no saved ctx. - */ -struct cryp_device_data { - struct cryp_register __iomem *base; - phys_addr_t phybase; - struct device *dev; - struct clk *clk; - int irq; - struct regulator *pwr_regulator; - int power_status; - spinlock_t ctx_lock; - struct cryp_ctx *current_ctx; - struct klist_node list_node; - struct cryp_dma dma; - bool power_state; - spinlock_t power_state_spinlock; - bool restore_dev_ctx; -}; - -void cryp_wait_until_done(struct cryp_device_data *device_data); - -/* Initialization functions */ - -int cryp_check(struct cryp_device_data *device_data); - -void cryp_activity(struct cryp_device_data *device_data, - enum cryp_crypen cryp_crypen); - -void cryp_flush_inoutfifo(struct cryp_device_data *device_data); - -int cryp_set_configuration(struct cryp_device_data *device_data, - struct cryp_config *cryp_config, - u32 *control_register); - -void cryp_configure_for_dma(struct cryp_device_data *device_data, - enum cryp_dma_req_type dma_req); - -int cryp_configure_key_values(struct cryp_device_data *device_data, - enum cryp_key_reg_index key_reg_index, - struct cryp_key_value key_value); - -int cryp_configure_init_vector(struct cryp_device_data *device_data, - enum cryp_init_vector_index - init_vector_index, - struct cryp_init_vector_value - init_vector_value); - -int cryp_configure_protection(struct cryp_device_data *device_data, - struct cryp_protection_config *p_protect_config); - -/* Power management funtions */ -void cryp_save_device_context(struct cryp_device_data *device_data, - struct cryp_device_context *ctx, - int cryp_mode); - -void cryp_restore_device_context(struct cryp_device_data *device_data, - struct cryp_device_context *ctx); - -/* Data transfer and status bits. */ -int cryp_is_logic_busy(struct cryp_device_data *device_data); - -int cryp_get_status(struct cryp_device_data *device_data); - -/** - * cryp_write_indata - This routine writes 32 bit data into the data input - * register of the cryptography IP. - * @device_data: Pointer to the device data struct for base address. - * @write_data: Data to write. - */ -int cryp_write_indata(struct cryp_device_data *device_data, u32 write_data); - -/** - * cryp_read_outdata - This routine reads the data from the data output - * register of the CRYP logic - * @device_data: Pointer to the device data struct for base address. - * @read_data: Read the data from the output FIFO. - */ -int cryp_read_outdata(struct cryp_device_data *device_data, u32 *read_data); - -#endif /* _CRYP_H_ */ diff --git a/drivers/crypto/ux500/cryp/cryp_core.c b/drivers/crypto/ux500/cryp/cryp_core.c deleted file mode 100644 index 5a57c9afd8c886931d12ff454d4f700a0a320ab1..0000000000000000000000000000000000000000 --- a/drivers/crypto/ux500/cryp/cryp_core.c +++ /dev/null @@ -1,1600 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright (C) ST-Ericsson SA 2010 - * Author: Shujuan Chen for ST-Ericsson. - * Author: Joakim Bech for ST-Ericsson. - * Author: Berne Hebark for ST-Ericsson. - * Author: Niklas Hernaeus for ST-Ericsson. - * Author: Jonas Linde for ST-Ericsson. - * Author: Andreas Westin for ST-Ericsson. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include - -#include - -#include "cryp_p.h" -#include "cryp.h" - -#define CRYP_MAX_KEY_SIZE 32 -#define BYTES_PER_WORD 4 - -static int cryp_mode; -static atomic_t session_id; - -static struct stedma40_chan_cfg *mem_to_engine; -static struct stedma40_chan_cfg *engine_to_mem; - -/** - * struct cryp_driver_data - data specific to the driver. - * - * @device_list: A list of registered devices to choose from. - * @device_allocation: A semaphore initialized with number of devices. - */ -struct cryp_driver_data { - struct klist device_list; - struct semaphore device_allocation; -}; - -/** - * struct cryp_ctx - Crypto context - * @config: Crypto mode. - * @key: Key array. - * @keylen: Length of key. - * @iv: Pointer to initialization vector. - * @indata: Pointer to indata. - * @outdata: Pointer to outdata. - * @datalen: Length of indata. - * @outlen: Length of outdata. - * @blocksize: Size of blocks. - * @updated: Updated flag. - * @dev_ctx: Device dependent context. - * @device: Pointer to the device. - * @session_id: Atomic session ID. - */ -struct cryp_ctx { - struct cryp_config config; - u8 key[CRYP_MAX_KEY_SIZE]; - u32 keylen; - u8 *iv; - const u8 *indata; - u8 *outdata; - u32 datalen; - u32 outlen; - u32 blocksize; - u8 updated; - struct cryp_device_context dev_ctx; - struct cryp_device_data *device; - u32 session_id; -}; - -static struct cryp_driver_data driver_data; - -/** - * swap_bits_in_byte - mirror the bits in a byte - * @b: the byte to be mirrored - * - * The bits are swapped the following way: - * Byte b include bits 0-7, nibble 1 (n1) include bits 0-3 and - * nibble 2 (n2) bits 4-7. - * - * Nibble 1 (n1): - * (The "old" (moved) bit is replaced with a zero) - * 1. Move bit 6 and 7, 4 positions to the left. - * 2. Move bit 3 and 5, 2 positions to the left. - * 3. Move bit 1-4, 1 position to the left. - * - * Nibble 2 (n2): - * 1. Move bit 0 and 1, 4 positions to the right. - * 2. Move bit 2 and 4, 2 positions to the right. - * 3. Move bit 3-6, 1 position to the right. - * - * Combine the two nibbles to a complete and swapped byte. - */ - -static inline u8 swap_bits_in_byte(u8 b) -{ -#define R_SHIFT_4_MASK 0xc0 /* Bits 6 and 7, right shift 4 */ -#define R_SHIFT_2_MASK 0x28 /* (After right shift 4) Bits 3 and 5, - right shift 2 */ -#define R_SHIFT_1_MASK 0x1e /* (After right shift 2) Bits 1-4, - right shift 1 */ -#define L_SHIFT_4_MASK 0x03 /* Bits 0 and 1, left shift 4 */ -#define L_SHIFT_2_MASK 0x14 /* (After left shift 4) Bits 2 and 4, - left shift 2 */ -#define L_SHIFT_1_MASK 0x78 /* (After left shift 1) Bits 3-6, - left shift 1 */ - - u8 n1; - u8 n2; - - /* Swap most significant nibble */ - /* Right shift 4, bits 6 and 7 */ - n1 = ((b & R_SHIFT_4_MASK) >> 4) | (b & ~(R_SHIFT_4_MASK >> 4)); - /* Right shift 2, bits 3 and 5 */ - n1 = ((n1 & R_SHIFT_2_MASK) >> 2) | (n1 & ~(R_SHIFT_2_MASK >> 2)); - /* Right shift 1, bits 1-4 */ - n1 = (n1 & R_SHIFT_1_MASK) >> 1; - - /* Swap least significant nibble */ - /* Left shift 4, bits 0 and 1 */ - n2 = ((b & L_SHIFT_4_MASK) << 4) | (b & ~(L_SHIFT_4_MASK << 4)); - /* Left shift 2, bits 2 and 4 */ - n2 = ((n2 & L_SHIFT_2_MASK) << 2) | (n2 & ~(L_SHIFT_2_MASK << 2)); - /* Left shift 1, bits 3-6 */ - n2 = (n2 & L_SHIFT_1_MASK) << 1; - - return n1 | n2; -} - -static inline void swap_words_in_key_and_bits_in_byte(const u8 *in, - u8 *out, u32 len) -{ - unsigned int i = 0; - int j; - int index = 0; - - j = len - BYTES_PER_WORD; - while (j >= 0) { - for (i = 0; i < BYTES_PER_WORD; i++) { - index = len - j - BYTES_PER_WORD + i; - out[j + i] = - swap_bits_in_byte(in[index]); - } - j -= BYTES_PER_WORD; - } -} - -static void add_session_id(struct cryp_ctx *ctx) -{ - /* - * We never want 0 to be a valid value, since this is the default value - * for the software context. - */ - if (unlikely(atomic_inc_and_test(&session_id))) - atomic_inc(&session_id); - - ctx->session_id = atomic_read(&session_id); -} - -static irqreturn_t cryp_interrupt_handler(int irq, void *param) -{ - struct cryp_ctx *ctx; - int count; - struct cryp_device_data *device_data; - - if (param == NULL) { - BUG_ON(!param); - return IRQ_HANDLED; - } - - /* The device is coming from the one found in hw_crypt_noxts. */ - device_data = (struct cryp_device_data *)param; - - ctx = device_data->current_ctx; - - if (ctx == NULL) { - BUG_ON(!ctx); - return IRQ_HANDLED; - } - - dev_dbg(ctx->device->dev, "[%s] (len: %d) %s, ", __func__, ctx->outlen, - cryp_pending_irq_src(device_data, CRYP_IRQ_SRC_OUTPUT_FIFO) ? - "out" : "in"); - - if (cryp_pending_irq_src(device_data, - CRYP_IRQ_SRC_OUTPUT_FIFO)) { - if (ctx->outlen / ctx->blocksize > 0) { - count = ctx->blocksize / 4; - - readsl(&device_data->base->dout, ctx->outdata, count); - ctx->outdata += count; - ctx->outlen -= count; - - if (ctx->outlen == 0) { - cryp_disable_irq_src(device_data, - CRYP_IRQ_SRC_OUTPUT_FIFO); - } - } - } else if (cryp_pending_irq_src(device_data, - CRYP_IRQ_SRC_INPUT_FIFO)) { - if (ctx->datalen / ctx->blocksize > 0) { - count = ctx->blocksize / 4; - - writesl(&device_data->base->din, ctx->indata, count); - - ctx->indata += count; - ctx->datalen -= count; - - if (ctx->datalen == 0) - cryp_disable_irq_src(device_data, - CRYP_IRQ_SRC_INPUT_FIFO); - - if (ctx->config.algomode == CRYP_ALGO_AES_XTS) { - CRYP_PUT_BITS(&device_data->base->cr, - CRYP_START_ENABLE, - CRYP_CR_START_POS, - CRYP_CR_START_MASK); - - cryp_wait_until_done(device_data); - } - } - } - - return IRQ_HANDLED; -} - -static int mode_is_aes(enum cryp_algo_mode mode) -{ - return CRYP_ALGO_AES_ECB == mode || - CRYP_ALGO_AES_CBC == mode || - CRYP_ALGO_AES_CTR == mode || - CRYP_ALGO_AES_XTS == mode; -} - -static int cfg_iv(struct cryp_device_data *device_data, u32 left, u32 right, - enum cryp_init_vector_index index) -{ - struct cryp_init_vector_value vector_value; - - dev_dbg(device_data->dev, "[%s]", __func__); - - vector_value.init_value_left = left; - vector_value.init_value_right = right; - - return cryp_configure_init_vector(device_data, - index, - vector_value); -} - -static int cfg_ivs(struct cryp_device_data *device_data, struct cryp_ctx *ctx) -{ - int i; - int status = 0; - int num_of_regs = ctx->blocksize / 8; - __be32 *civ = (__be32 *)ctx->iv; - u32 iv[AES_BLOCK_SIZE / 4]; - - dev_dbg(device_data->dev, "[%s]", __func__); - - /* - * Since we loop on num_of_regs we need to have a check in case - * someone provides an incorrect blocksize which would force calling - * cfg_iv with i greater than 2 which is an error. - */ - if (num_of_regs > 2) { - dev_err(device_data->dev, "[%s] Incorrect blocksize %d", - __func__, ctx->blocksize); - return -EINVAL; - } - - for (i = 0; i < ctx->blocksize / 4; i++) - iv[i] = be32_to_cpup(civ + i); - - for (i = 0; i < num_of_regs; i++) { - status = cfg_iv(device_data, iv[i*2], iv[i*2+1], - (enum cryp_init_vector_index) i); - if (status != 0) - return status; - } - return status; -} - -static int set_key(struct cryp_device_data *device_data, - u32 left_key, - u32 right_key, - enum cryp_key_reg_index index) -{ - struct cryp_key_value key_value; - int cryp_error; - - dev_dbg(device_data->dev, "[%s]", __func__); - - key_value.key_value_left = left_key; - key_value.key_value_right = right_key; - - cryp_error = cryp_configure_key_values(device_data, - index, - key_value); - if (cryp_error != 0) - dev_err(device_data->dev, "[%s]: " - "cryp_configure_key_values() failed!", __func__); - - return cryp_error; -} - -static int cfg_keys(struct cryp_ctx *ctx) -{ - int i; - int num_of_regs = ctx->keylen / 8; - u32 swapped_key[CRYP_MAX_KEY_SIZE / 4]; - __be32 *ckey = (__be32 *)ctx->key; - int cryp_error = 0; - - dev_dbg(ctx->device->dev, "[%s]", __func__); - - if (mode_is_aes(ctx->config.algomode)) { - swap_words_in_key_and_bits_in_byte((u8 *)ckey, - (u8 *)swapped_key, - ctx->keylen); - } else { - for (i = 0; i < ctx->keylen / 4; i++) - swapped_key[i] = be32_to_cpup(ckey + i); - } - - for (i = 0; i < num_of_regs; i++) { - cryp_error = set_key(ctx->device, - swapped_key[i * 2], - swapped_key[i * 2 + 1], - (enum cryp_key_reg_index) i); - - if (cryp_error != 0) { - dev_err(ctx->device->dev, "[%s]: set_key() failed!", - __func__); - return cryp_error; - } - } - return cryp_error; -} - -static int cryp_setup_context(struct cryp_ctx *ctx, - struct cryp_device_data *device_data) -{ - u32 control_register = CRYP_CR_DEFAULT; - - switch (cryp_mode) { - case CRYP_MODE_INTERRUPT: - writel_relaxed(CRYP_IMSC_DEFAULT, &device_data->base->imsc); - break; - - case CRYP_MODE_DMA: - writel_relaxed(CRYP_DMACR_DEFAULT, &device_data->base->dmacr); - break; - - default: - break; - } - - if (ctx->updated == 0) { - cryp_flush_inoutfifo(device_data); - if (cfg_keys(ctx) != 0) { - dev_err(ctx->device->dev, "[%s]: cfg_keys failed!", - __func__); - return -EINVAL; - } - - if (ctx->iv && - CRYP_ALGO_AES_ECB != ctx->config.algomode && - CRYP_ALGO_DES_ECB != ctx->config.algomode && - CRYP_ALGO_TDES_ECB != ctx->config.algomode) { - if (cfg_ivs(device_data, ctx) != 0) - return -EPERM; - } - - cryp_set_configuration(device_data, &ctx->config, - &control_register); - add_session_id(ctx); - } else if (ctx->updated == 1 && - ctx->session_id != atomic_read(&session_id)) { - cryp_flush_inoutfifo(device_data); - cryp_restore_device_context(device_data, &ctx->dev_ctx); - - add_session_id(ctx); - control_register = ctx->dev_ctx.cr; - } else - control_register = ctx->dev_ctx.cr; - - writel(control_register | - (CRYP_CRYPEN_ENABLE << CRYP_CR_CRYPEN_POS), - &device_data->base->cr); - - return 0; -} - -static int cryp_get_device_data(struct cryp_ctx *ctx, - struct cryp_device_data **device_data) -{ - int ret; - struct klist_iter device_iterator; - struct klist_node *device_node; - struct cryp_device_data *local_device_data = NULL; - pr_debug(DEV_DBG_NAME " [%s]", __func__); - - /* Wait until a device is available */ - ret = down_interruptible(&driver_data.device_allocation); - if (ret) - return ret; /* Interrupted */ - - /* Select a device */ - klist_iter_init(&driver_data.device_list, &device_iterator); - - device_node = klist_next(&device_iterator); - while (device_node) { - local_device_data = container_of(device_node, - struct cryp_device_data, list_node); - spin_lock(&local_device_data->ctx_lock); - /* current_ctx allocates a device, NULL = unallocated */ - if (local_device_data->current_ctx) { - device_node = klist_next(&device_iterator); - } else { - local_device_data->current_ctx = ctx; - ctx->device = local_device_data; - spin_unlock(&local_device_data->ctx_lock); - break; - } - spin_unlock(&local_device_data->ctx_lock); - } - klist_iter_exit(&device_iterator); - - if (!device_node) { - /** - * No free device found. - * Since we allocated a device with down_interruptible, this - * should not be able to happen. - * Number of available devices, which are contained in - * device_allocation, is therefore decremented by not doing - * an up(device_allocation). - */ - return -EBUSY; - } - - *device_data = local_device_data; - - return 0; -} - -static void cryp_dma_setup_channel(struct cryp_device_data *device_data, - struct device *dev) -{ - struct dma_slave_config mem2cryp = { - .direction = DMA_MEM_TO_DEV, - .dst_addr = device_data->phybase + CRYP_DMA_TX_FIFO, - .dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES, - .dst_maxburst = 4, - }; - struct dma_slave_config cryp2mem = { - .direction = DMA_DEV_TO_MEM, - .src_addr = device_data->phybase + CRYP_DMA_RX_FIFO, - .src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES, - .src_maxburst = 4, - }; - - dma_cap_zero(device_data->dma.mask); - dma_cap_set(DMA_SLAVE, device_data->dma.mask); - - device_data->dma.cfg_mem2cryp = mem_to_engine; - device_data->dma.chan_mem2cryp = - dma_request_channel(device_data->dma.mask, - stedma40_filter, - device_data->dma.cfg_mem2cryp); - - device_data->dma.cfg_cryp2mem = engine_to_mem; - device_data->dma.chan_cryp2mem = - dma_request_channel(device_data->dma.mask, - stedma40_filter, - device_data->dma.cfg_cryp2mem); - - dmaengine_slave_config(device_data->dma.chan_mem2cryp, &mem2cryp); - dmaengine_slave_config(device_data->dma.chan_cryp2mem, &cryp2mem); - - init_completion(&device_data->dma.cryp_dma_complete); -} - -static void cryp_dma_out_callback(void *data) -{ - struct cryp_ctx *ctx = (struct cryp_ctx *) data; - dev_dbg(ctx->device->dev, "[%s]: ", __func__); - - complete(&ctx->device->dma.cryp_dma_complete); -} - -static int cryp_set_dma_transfer(struct cryp_ctx *ctx, - struct scatterlist *sg, - int len, - enum dma_data_direction direction) -{ - struct dma_async_tx_descriptor *desc; - struct dma_chan *channel = NULL; - dma_cookie_t cookie; - - dev_dbg(ctx->device->dev, "[%s]: ", __func__); - - if (unlikely(!IS_ALIGNED((unsigned long)sg, 4))) { - dev_err(ctx->device->dev, "[%s]: Data in sg list isn't " - "aligned! Addr: 0x%08lx", __func__, (unsigned long)sg); - return -EFAULT; - } - - switch (direction) { - case DMA_TO_DEVICE: - channel = ctx->device->dma.chan_mem2cryp; - ctx->device->dma.sg_src = sg; - ctx->device->dma.sg_src_len = dma_map_sg(channel->device->dev, - ctx->device->dma.sg_src, - ctx->device->dma.nents_src, - direction); - - if (!ctx->device->dma.sg_src_len) { - dev_dbg(ctx->device->dev, - "[%s]: Could not map the sg list (TO_DEVICE)", - __func__); - return -EFAULT; - } - - dev_dbg(ctx->device->dev, "[%s]: Setting up DMA for buffer " - "(TO_DEVICE)", __func__); - - desc = dmaengine_prep_slave_sg(channel, - ctx->device->dma.sg_src, - ctx->device->dma.sg_src_len, - DMA_MEM_TO_DEV, DMA_CTRL_ACK); - break; - - case DMA_FROM_DEVICE: - channel = ctx->device->dma.chan_cryp2mem; - ctx->device->dma.sg_dst = sg; - ctx->device->dma.sg_dst_len = dma_map_sg(channel->device->dev, - ctx->device->dma.sg_dst, - ctx->device->dma.nents_dst, - direction); - - if (!ctx->device->dma.sg_dst_len) { - dev_dbg(ctx->device->dev, - "[%s]: Could not map the sg list (FROM_DEVICE)", - __func__); - return -EFAULT; - } - - dev_dbg(ctx->device->dev, "[%s]: Setting up DMA for buffer " - "(FROM_DEVICE)", __func__); - - desc = dmaengine_prep_slave_sg(channel, - ctx->device->dma.sg_dst, - ctx->device->dma.sg_dst_len, - DMA_DEV_TO_MEM, - DMA_CTRL_ACK | - DMA_PREP_INTERRUPT); - - desc->callback = cryp_dma_out_callback; - desc->callback_param = ctx; - break; - - default: - dev_dbg(ctx->device->dev, "[%s]: Invalid DMA direction", - __func__); - return -EFAULT; - } - - cookie = dmaengine_submit(desc); - if (dma_submit_error(cookie)) { - dev_dbg(ctx->device->dev, "[%s]: DMA submission failed\n", - __func__); - return cookie; - } - - dma_async_issue_pending(channel); - - return 0; -} - -static void cryp_dma_done(struct cryp_ctx *ctx) -{ - struct dma_chan *chan; - - dev_dbg(ctx->device->dev, "[%s]: ", __func__); - - chan = ctx->device->dma.chan_mem2cryp; - dmaengine_terminate_all(chan); - dma_unmap_sg(chan->device->dev, ctx->device->dma.sg_src, - ctx->device->dma.nents_src, DMA_TO_DEVICE); - - chan = ctx->device->dma.chan_cryp2mem; - dmaengine_terminate_all(chan); - dma_unmap_sg(chan->device->dev, ctx->device->dma.sg_dst, - ctx->device->dma.nents_dst, DMA_FROM_DEVICE); -} - -static int cryp_dma_write(struct cryp_ctx *ctx, struct scatterlist *sg, - int len) -{ - int error = cryp_set_dma_transfer(ctx, sg, len, DMA_TO_DEVICE); - dev_dbg(ctx->device->dev, "[%s]: ", __func__); - - if (error) { - dev_dbg(ctx->device->dev, "[%s]: cryp_set_dma_transfer() " - "failed", __func__); - return error; - } - - return len; -} - -static int cryp_dma_read(struct cryp_ctx *ctx, struct scatterlist *sg, int len) -{ - int error = cryp_set_dma_transfer(ctx, sg, len, DMA_FROM_DEVICE); - if (error) { - dev_dbg(ctx->device->dev, "[%s]: cryp_set_dma_transfer() " - "failed", __func__); - return error; - } - - return len; -} - -static void cryp_polling_mode(struct cryp_ctx *ctx, - struct cryp_device_data *device_data) -{ - int len = ctx->blocksize / BYTES_PER_WORD; - int remaining_length = ctx->datalen; - u32 *indata = (u32 *)ctx->indata; - u32 *outdata = (u32 *)ctx->outdata; - - while (remaining_length > 0) { - writesl(&device_data->base->din, indata, len); - indata += len; - remaining_length -= (len * BYTES_PER_WORD); - cryp_wait_until_done(device_data); - - readsl(&device_data->base->dout, outdata, len); - outdata += len; - cryp_wait_until_done(device_data); - } -} - -static int cryp_disable_power(struct device *dev, - struct cryp_device_data *device_data, - bool save_device_context) -{ - int ret = 0; - - dev_dbg(dev, "[%s]", __func__); - - spin_lock(&device_data->power_state_spinlock); - if (!device_data->power_state) - goto out; - - spin_lock(&device_data->ctx_lock); - if (save_device_context && device_data->current_ctx) { - cryp_save_device_context(device_data, - &device_data->current_ctx->dev_ctx, - cryp_mode); - device_data->restore_dev_ctx = true; - } - spin_unlock(&device_data->ctx_lock); - - clk_disable(device_data->clk); - ret = regulator_disable(device_data->pwr_regulator); - if (ret) - dev_err(dev, "[%s]: " - "regulator_disable() failed!", - __func__); - - device_data->power_state = false; - -out: - spin_unlock(&device_data->power_state_spinlock); - - return ret; -} - -static int cryp_enable_power( - struct device *dev, - struct cryp_device_data *device_data, - bool restore_device_context) -{ - int ret = 0; - - dev_dbg(dev, "[%s]", __func__); - - spin_lock(&device_data->power_state_spinlock); - if (!device_data->power_state) { - ret = regulator_enable(device_data->pwr_regulator); - if (ret) { - dev_err(dev, "[%s]: regulator_enable() failed!", - __func__); - goto out; - } - - ret = clk_enable(device_data->clk); - if (ret) { - dev_err(dev, "[%s]: clk_enable() failed!", - __func__); - regulator_disable(device_data->pwr_regulator); - goto out; - } - device_data->power_state = true; - } - - if (device_data->restore_dev_ctx) { - spin_lock(&device_data->ctx_lock); - if (restore_device_context && device_data->current_ctx) { - device_data->restore_dev_ctx = false; - cryp_restore_device_context(device_data, - &device_data->current_ctx->dev_ctx); - } - spin_unlock(&device_data->ctx_lock); - } -out: - spin_unlock(&device_data->power_state_spinlock); - - return ret; -} - -static int hw_crypt_noxts(struct cryp_ctx *ctx, - struct cryp_device_data *device_data) -{ - int ret = 0; - - const u8 *indata = ctx->indata; - u8 *outdata = ctx->outdata; - u32 datalen = ctx->datalen; - u32 outlen = datalen; - - pr_debug(DEV_DBG_NAME " [%s]", __func__); - - ctx->outlen = ctx->datalen; - - if (unlikely(!IS_ALIGNED((unsigned long)indata, 4))) { - pr_debug(DEV_DBG_NAME " [%s]: Data isn't aligned! Addr: " - "0x%08lx", __func__, (unsigned long)indata); - return -EINVAL; - } - - ret = cryp_setup_context(ctx, device_data); - - if (ret) - goto out; - - if (cryp_mode == CRYP_MODE_INTERRUPT) { - cryp_enable_irq_src(device_data, CRYP_IRQ_SRC_INPUT_FIFO | - CRYP_IRQ_SRC_OUTPUT_FIFO); - - /* - * ctx->outlen is decremented in the cryp_interrupt_handler - * function. We had to add cpu_relax() (barrier) to make sure - * that gcc didn't optimze away this variable. - */ - while (ctx->outlen > 0) - cpu_relax(); - } else if (cryp_mode == CRYP_MODE_POLLING || - cryp_mode == CRYP_MODE_DMA) { - /* - * The reason for having DMA in this if case is that if we are - * running cryp_mode = 2, then we separate DMA routines for - * handling cipher/plaintext > blocksize, except when - * running the normal CRYPTO_ALG_TYPE_CIPHER, then we still use - * the polling mode. Overhead of doing DMA setup eats up the - * benefits using it. - */ - cryp_polling_mode(ctx, device_data); - } else { - dev_err(ctx->device->dev, "[%s]: Invalid operation mode!", - __func__); - ret = -EPERM; - goto out; - } - - cryp_save_device_context(device_data, &ctx->dev_ctx, cryp_mode); - ctx->updated = 1; - -out: - ctx->indata = indata; - ctx->outdata = outdata; - ctx->datalen = datalen; - ctx->outlen = outlen; - - return ret; -} - -static int get_nents(struct scatterlist *sg, int nbytes) -{ - int nents = 0; - - while (nbytes > 0) { - nbytes -= sg->length; - sg = sg_next(sg); - nents++; - } - - return nents; -} - -static int ablk_dma_crypt(struct skcipher_request *areq) -{ - struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(areq); - struct cryp_ctx *ctx = crypto_skcipher_ctx(cipher); - struct cryp_device_data *device_data; - - int bytes_written = 0; - int bytes_read = 0; - int ret; - - pr_debug(DEV_DBG_NAME " [%s]", __func__); - - ctx->datalen = areq->cryptlen; - ctx->outlen = areq->cryptlen; - - ret = cryp_get_device_data(ctx, &device_data); - if (ret) - return ret; - - ret = cryp_setup_context(ctx, device_data); - if (ret) - goto out; - - /* We have the device now, so store the nents in the dma struct. */ - ctx->device->dma.nents_src = get_nents(areq->src, ctx->datalen); - ctx->device->dma.nents_dst = get_nents(areq->dst, ctx->outlen); - - /* Enable DMA in- and output. */ - cryp_configure_for_dma(device_data, CRYP_DMA_ENABLE_BOTH_DIRECTIONS); - - bytes_written = cryp_dma_write(ctx, areq->src, ctx->datalen); - bytes_read = cryp_dma_read(ctx, areq->dst, bytes_written); - - wait_for_completion(&ctx->device->dma.cryp_dma_complete); - cryp_dma_done(ctx); - - cryp_save_device_context(device_data, &ctx->dev_ctx, cryp_mode); - ctx->updated = 1; - -out: - spin_lock(&device_data->ctx_lock); - device_data->current_ctx = NULL; - ctx->device = NULL; - spin_unlock(&device_data->ctx_lock); - - /* - * The down_interruptible part for this semaphore is called in - * cryp_get_device_data. - */ - up(&driver_data.device_allocation); - - if (unlikely(bytes_written != bytes_read)) - return -EPERM; - - return 0; -} - -static int ablk_crypt(struct skcipher_request *areq) -{ - struct skcipher_walk walk; - struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(areq); - struct cryp_ctx *ctx = crypto_skcipher_ctx(cipher); - struct cryp_device_data *device_data; - unsigned long src_paddr; - unsigned long dst_paddr; - int ret; - int nbytes; - - pr_debug(DEV_DBG_NAME " [%s]", __func__); - - ret = cryp_get_device_data(ctx, &device_data); - if (ret) - goto out; - - ret = skcipher_walk_async(&walk, areq); - - if (ret) { - pr_err(DEV_DBG_NAME "[%s]: skcipher_walk_async() failed!", - __func__); - goto out; - } - - while ((nbytes = walk.nbytes) > 0) { - ctx->iv = walk.iv; - src_paddr = (page_to_phys(walk.src.phys.page) + walk.src.phys.offset); - ctx->indata = phys_to_virt(src_paddr); - - dst_paddr = (page_to_phys(walk.dst.phys.page) + walk.dst.phys.offset); - ctx->outdata = phys_to_virt(dst_paddr); - - ctx->datalen = nbytes - (nbytes % ctx->blocksize); - - ret = hw_crypt_noxts(ctx, device_data); - if (ret) - goto out; - - nbytes -= ctx->datalen; - ret = skcipher_walk_done(&walk, nbytes); - if (ret) - goto out; - } - -out: - /* Release the device */ - spin_lock(&device_data->ctx_lock); - device_data->current_ctx = NULL; - ctx->device = NULL; - spin_unlock(&device_data->ctx_lock); - - /* - * The down_interruptible part for this semaphore is called in - * cryp_get_device_data. - */ - up(&driver_data.device_allocation); - - return ret; -} - -static int aes_skcipher_setkey(struct crypto_skcipher *cipher, - const u8 *key, unsigned int keylen) -{ - struct cryp_ctx *ctx = crypto_skcipher_ctx(cipher); - - pr_debug(DEV_DBG_NAME " [%s]", __func__); - - switch (keylen) { - case AES_KEYSIZE_128: - ctx->config.keysize = CRYP_KEY_SIZE_128; - break; - - case AES_KEYSIZE_192: - ctx->config.keysize = CRYP_KEY_SIZE_192; - break; - - case AES_KEYSIZE_256: - ctx->config.keysize = CRYP_KEY_SIZE_256; - break; - - default: - pr_err(DEV_DBG_NAME "[%s]: Unknown keylen!", __func__); - return -EINVAL; - } - - memcpy(ctx->key, key, keylen); - ctx->keylen = keylen; - - ctx->updated = 0; - - return 0; -} - -static int des_skcipher_setkey(struct crypto_skcipher *cipher, - const u8 *key, unsigned int keylen) -{ - struct cryp_ctx *ctx = crypto_skcipher_ctx(cipher); - int err; - - pr_debug(DEV_DBG_NAME " [%s]", __func__); - - err = verify_skcipher_des_key(cipher, key); - if (err) - return err; - - memcpy(ctx->key, key, keylen); - ctx->keylen = keylen; - - ctx->updated = 0; - return 0; -} - -static int des3_skcipher_setkey(struct crypto_skcipher *cipher, - const u8 *key, unsigned int keylen) -{ - struct cryp_ctx *ctx = crypto_skcipher_ctx(cipher); - int err; - - pr_debug(DEV_DBG_NAME " [%s]", __func__); - - err = verify_skcipher_des3_key(cipher, key); - if (err) - return err; - - memcpy(ctx->key, key, keylen); - ctx->keylen = keylen; - - ctx->updated = 0; - return 0; -} - -static int cryp_blk_encrypt(struct skcipher_request *areq) -{ - struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(areq); - struct cryp_ctx *ctx = crypto_skcipher_ctx(cipher); - - pr_debug(DEV_DBG_NAME " [%s]", __func__); - - ctx->config.algodir = CRYP_ALGORITHM_ENCRYPT; - - /* - * DMA does not work for DES due to a hw bug */ - if (cryp_mode == CRYP_MODE_DMA && mode_is_aes(ctx->config.algomode)) - return ablk_dma_crypt(areq); - - /* For everything except DMA, we run the non DMA version. */ - return ablk_crypt(areq); -} - -static int cryp_blk_decrypt(struct skcipher_request *areq) -{ - struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(areq); - struct cryp_ctx *ctx = crypto_skcipher_ctx(cipher); - - pr_debug(DEV_DBG_NAME " [%s]", __func__); - - ctx->config.algodir = CRYP_ALGORITHM_DECRYPT; - - /* DMA does not work for DES due to a hw bug */ - if (cryp_mode == CRYP_MODE_DMA && mode_is_aes(ctx->config.algomode)) - return ablk_dma_crypt(areq); - - /* For everything except DMA, we run the non DMA version. */ - return ablk_crypt(areq); -} - -struct cryp_algo_template { - enum cryp_algo_mode algomode; - struct skcipher_alg skcipher; -}; - -static int cryp_init_tfm(struct crypto_skcipher *tfm) -{ - struct cryp_ctx *ctx = crypto_skcipher_ctx(tfm); - struct skcipher_alg *alg = crypto_skcipher_alg(tfm); - struct cryp_algo_template *cryp_alg = container_of(alg, - struct cryp_algo_template, - skcipher); - - ctx->config.algomode = cryp_alg->algomode; - ctx->blocksize = crypto_skcipher_blocksize(tfm); - - return 0; -} - -static struct cryp_algo_template cryp_algs[] = { - { - .algomode = CRYP_ALGO_AES_ECB, - .skcipher = { - .base.cra_name = "ecb(aes)", - .base.cra_driver_name = "ecb-aes-ux500", - .base.cra_priority = 300, - .base.cra_flags = CRYPTO_ALG_ASYNC, - .base.cra_blocksize = AES_BLOCK_SIZE, - .base.cra_ctxsize = sizeof(struct cryp_ctx), - .base.cra_alignmask = 3, - .base.cra_module = THIS_MODULE, - - .min_keysize = AES_MIN_KEY_SIZE, - .max_keysize = AES_MAX_KEY_SIZE, - .setkey = aes_skcipher_setkey, - .encrypt = cryp_blk_encrypt, - .decrypt = cryp_blk_decrypt, - .init = cryp_init_tfm, - } - }, - { - .algomode = CRYP_ALGO_AES_CBC, - .skcipher = { - .base.cra_name = "cbc(aes)", - .base.cra_driver_name = "cbc-aes-ux500", - .base.cra_priority = 300, - .base.cra_flags = CRYPTO_ALG_ASYNC, - .base.cra_blocksize = AES_BLOCK_SIZE, - .base.cra_ctxsize = sizeof(struct cryp_ctx), - .base.cra_alignmask = 3, - .base.cra_module = THIS_MODULE, - - .min_keysize = AES_MIN_KEY_SIZE, - .max_keysize = AES_MAX_KEY_SIZE, - .setkey = aes_skcipher_setkey, - .encrypt = cryp_blk_encrypt, - .decrypt = cryp_blk_decrypt, - .init = cryp_init_tfm, - .ivsize = AES_BLOCK_SIZE, - } - }, - { - .algomode = CRYP_ALGO_AES_CTR, - .skcipher = { - .base.cra_name = "ctr(aes)", - .base.cra_driver_name = "ctr-aes-ux500", - .base.cra_priority = 300, - .base.cra_flags = CRYPTO_ALG_ASYNC, - .base.cra_blocksize = 1, - .base.cra_ctxsize = sizeof(struct cryp_ctx), - .base.cra_alignmask = 3, - .base.cra_module = THIS_MODULE, - - .min_keysize = AES_MIN_KEY_SIZE, - .max_keysize = AES_MAX_KEY_SIZE, - .setkey = aes_skcipher_setkey, - .encrypt = cryp_blk_encrypt, - .decrypt = cryp_blk_decrypt, - .init = cryp_init_tfm, - .ivsize = AES_BLOCK_SIZE, - .chunksize = AES_BLOCK_SIZE, - } - }, - { - .algomode = CRYP_ALGO_DES_ECB, - .skcipher = { - .base.cra_name = "ecb(des)", - .base.cra_driver_name = "ecb-des-ux500", - .base.cra_priority = 300, - .base.cra_flags = CRYPTO_ALG_ASYNC, - .base.cra_blocksize = DES_BLOCK_SIZE, - .base.cra_ctxsize = sizeof(struct cryp_ctx), - .base.cra_alignmask = 3, - .base.cra_module = THIS_MODULE, - - .min_keysize = DES_KEY_SIZE, - .max_keysize = DES_KEY_SIZE, - .setkey = des_skcipher_setkey, - .encrypt = cryp_blk_encrypt, - .decrypt = cryp_blk_decrypt, - .init = cryp_init_tfm, - } - }, - { - .algomode = CRYP_ALGO_TDES_ECB, - .skcipher = { - .base.cra_name = "ecb(des3_ede)", - .base.cra_driver_name = "ecb-des3_ede-ux500", - .base.cra_priority = 300, - .base.cra_flags = CRYPTO_ALG_ASYNC, - .base.cra_blocksize = DES3_EDE_BLOCK_SIZE, - .base.cra_ctxsize = sizeof(struct cryp_ctx), - .base.cra_alignmask = 3, - .base.cra_module = THIS_MODULE, - - .min_keysize = DES3_EDE_KEY_SIZE, - .max_keysize = DES3_EDE_KEY_SIZE, - .setkey = des3_skcipher_setkey, - .encrypt = cryp_blk_encrypt, - .decrypt = cryp_blk_decrypt, - .init = cryp_init_tfm, - } - }, - { - .algomode = CRYP_ALGO_DES_CBC, - .skcipher = { - .base.cra_name = "cbc(des)", - .base.cra_driver_name = "cbc-des-ux500", - .base.cra_priority = 300, - .base.cra_flags = CRYPTO_ALG_ASYNC, - .base.cra_blocksize = DES_BLOCK_SIZE, - .base.cra_ctxsize = sizeof(struct cryp_ctx), - .base.cra_alignmask = 3, - .base.cra_module = THIS_MODULE, - - .min_keysize = DES_KEY_SIZE, - .max_keysize = DES_KEY_SIZE, - .setkey = des_skcipher_setkey, - .encrypt = cryp_blk_encrypt, - .decrypt = cryp_blk_decrypt, - .ivsize = DES_BLOCK_SIZE, - .init = cryp_init_tfm, - } - }, - { - .algomode = CRYP_ALGO_TDES_CBC, - .skcipher = { - .base.cra_name = "cbc(des3_ede)", - .base.cra_driver_name = "cbc-des3_ede-ux500", - .base.cra_priority = 300, - .base.cra_flags = CRYPTO_ALG_ASYNC, - .base.cra_blocksize = DES3_EDE_BLOCK_SIZE, - .base.cra_ctxsize = sizeof(struct cryp_ctx), - .base.cra_alignmask = 3, - .base.cra_module = THIS_MODULE, - - .min_keysize = DES3_EDE_KEY_SIZE, - .max_keysize = DES3_EDE_KEY_SIZE, - .setkey = des3_skcipher_setkey, - .encrypt = cryp_blk_encrypt, - .decrypt = cryp_blk_decrypt, - .ivsize = DES3_EDE_BLOCK_SIZE, - .init = cryp_init_tfm, - } - } -}; - -/** - * cryp_algs_register_all - - */ -static int cryp_algs_register_all(void) -{ - int ret; - int i; - int count; - - pr_debug("[%s]", __func__); - - for (i = 0; i < ARRAY_SIZE(cryp_algs); i++) { - ret = crypto_register_skcipher(&cryp_algs[i].skcipher); - if (ret) { - count = i; - pr_err("[%s] alg registration failed", - cryp_algs[i].skcipher.base.cra_driver_name); - goto unreg; - } - } - return 0; -unreg: - for (i = 0; i < count; i++) - crypto_unregister_skcipher(&cryp_algs[i].skcipher); - return ret; -} - -/** - * cryp_algs_unregister_all - - */ -static void cryp_algs_unregister_all(void) -{ - int i; - - pr_debug(DEV_DBG_NAME " [%s]", __func__); - - for (i = 0; i < ARRAY_SIZE(cryp_algs); i++) - crypto_unregister_skcipher(&cryp_algs[i].skcipher); -} - -static int ux500_cryp_probe(struct platform_device *pdev) -{ - int ret; - struct resource *res; - struct cryp_device_data *device_data; - struct cryp_protection_config prot = { - .privilege_access = CRYP_STATE_ENABLE - }; - struct device *dev = &pdev->dev; - - dev_dbg(dev, "[%s]", __func__); - device_data = devm_kzalloc(dev, sizeof(*device_data), GFP_KERNEL); - if (!device_data) { - ret = -ENOMEM; - goto out; - } - - device_data->dev = dev; - device_data->current_ctx = NULL; - - /* Grab the DMA configuration from platform data. */ - mem_to_engine = &((struct cryp_platform_data *) - dev->platform_data)->mem_to_engine; - engine_to_mem = &((struct cryp_platform_data *) - dev->platform_data)->engine_to_mem; - - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) { - dev_err(dev, "[%s]: platform_get_resource() failed", - __func__); - ret = -ENODEV; - goto out; - } - - device_data->phybase = res->start; - device_data->base = devm_ioremap_resource(dev, res); - if (IS_ERR(device_data->base)) { - ret = PTR_ERR(device_data->base); - goto out; - } - - spin_lock_init(&device_data->ctx_lock); - spin_lock_init(&device_data->power_state_spinlock); - - /* Enable power for CRYP hardware block */ - device_data->pwr_regulator = regulator_get(&pdev->dev, "v-ape"); - if (IS_ERR(device_data->pwr_regulator)) { - dev_err(dev, "[%s]: could not get cryp regulator", __func__); - ret = PTR_ERR(device_data->pwr_regulator); - device_data->pwr_regulator = NULL; - goto out; - } - - /* Enable the clk for CRYP hardware block */ - device_data->clk = devm_clk_get(&pdev->dev, NULL); - if (IS_ERR(device_data->clk)) { - dev_err(dev, "[%s]: clk_get() failed!", __func__); - ret = PTR_ERR(device_data->clk); - goto out_regulator; - } - - ret = clk_prepare(device_data->clk); - if (ret) { - dev_err(dev, "[%s]: clk_prepare() failed!", __func__); - goto out_regulator; - } - - /* Enable device power (and clock) */ - ret = cryp_enable_power(device_data->dev, device_data, false); - if (ret) { - dev_err(dev, "[%s]: cryp_enable_power() failed!", __func__); - goto out_clk_unprepare; - } - - if (cryp_check(device_data)) { - dev_err(dev, "[%s]: cryp_check() failed!", __func__); - ret = -EINVAL; - goto out_power; - } - - if (cryp_configure_protection(device_data, &prot)) { - dev_err(dev, "[%s]: cryp_configure_protection() failed!", - __func__); - ret = -EINVAL; - goto out_power; - } - - device_data->irq = platform_get_irq(pdev, 0); - if (device_data->irq <= 0) { - ret = device_data->irq ? device_data->irq : -ENXIO; - goto out_power; - } - - ret = devm_request_irq(&pdev->dev, device_data->irq, - cryp_interrupt_handler, 0, "cryp1", device_data); - if (ret) { - dev_err(dev, "[%s]: Unable to request IRQ", __func__); - goto out_power; - } - - if (cryp_mode == CRYP_MODE_DMA) - cryp_dma_setup_channel(device_data, dev); - - platform_set_drvdata(pdev, device_data); - - /* Put the new device into the device list... */ - klist_add_tail(&device_data->list_node, &driver_data.device_list); - - /* ... and signal that a new device is available. */ - up(&driver_data.device_allocation); - - atomic_set(&session_id, 1); - - ret = cryp_algs_register_all(); - if (ret) { - dev_err(dev, "[%s]: cryp_algs_register_all() failed!", - __func__); - goto out_power; - } - - dev_info(dev, "successfully registered\n"); - - return 0; - -out_power: - cryp_disable_power(device_data->dev, device_data, false); - -out_clk_unprepare: - clk_unprepare(device_data->clk); - -out_regulator: - regulator_put(device_data->pwr_regulator); - -out: - return ret; -} - -static int ux500_cryp_remove(struct platform_device *pdev) -{ - struct cryp_device_data *device_data; - - dev_dbg(&pdev->dev, "[%s]", __func__); - device_data = platform_get_drvdata(pdev); - if (!device_data) { - dev_err(&pdev->dev, "[%s]: platform_get_drvdata() failed!", - __func__); - return -ENOMEM; - } - - /* Try to decrease the number of available devices. */ - if (down_trylock(&driver_data.device_allocation)) - return -EBUSY; - - /* Check that the device is free */ - spin_lock(&device_data->ctx_lock); - /* current_ctx allocates a device, NULL = unallocated */ - if (device_data->current_ctx) { - /* The device is busy */ - spin_unlock(&device_data->ctx_lock); - /* Return the device to the pool. */ - up(&driver_data.device_allocation); - return -EBUSY; - } - - spin_unlock(&device_data->ctx_lock); - - /* Remove the device from the list */ - if (klist_node_attached(&device_data->list_node)) - klist_remove(&device_data->list_node); - - /* If this was the last device, remove the services */ - if (list_empty(&driver_data.device_list.k_list)) - cryp_algs_unregister_all(); - - if (cryp_disable_power(&pdev->dev, device_data, false)) - dev_err(&pdev->dev, "[%s]: cryp_disable_power() failed", - __func__); - - clk_unprepare(device_data->clk); - regulator_put(device_data->pwr_regulator); - - return 0; -} - -static void ux500_cryp_shutdown(struct platform_device *pdev) -{ - struct cryp_device_data *device_data; - - dev_dbg(&pdev->dev, "[%s]", __func__); - - device_data = platform_get_drvdata(pdev); - if (!device_data) { - dev_err(&pdev->dev, "[%s]: platform_get_drvdata() failed!", - __func__); - return; - } - - /* Check that the device is free */ - spin_lock(&device_data->ctx_lock); - /* current_ctx allocates a device, NULL = unallocated */ - if (!device_data->current_ctx) { - if (down_trylock(&driver_data.device_allocation)) - dev_dbg(&pdev->dev, "[%s]: Cryp still in use!" - "Shutting down anyway...", __func__); - /** - * (Allocate the device) - * Need to set this to non-null (dummy) value, - * to avoid usage if context switching. - */ - device_data->current_ctx++; - } - spin_unlock(&device_data->ctx_lock); - - /* Remove the device from the list */ - if (klist_node_attached(&device_data->list_node)) - klist_remove(&device_data->list_node); - - /* If this was the last device, remove the services */ - if (list_empty(&driver_data.device_list.k_list)) - cryp_algs_unregister_all(); - - if (cryp_disable_power(&pdev->dev, device_data, false)) - dev_err(&pdev->dev, "[%s]: cryp_disable_power() failed", - __func__); - -} - -#ifdef CONFIG_PM_SLEEP -static int ux500_cryp_suspend(struct device *dev) -{ - int ret; - struct platform_device *pdev = to_platform_device(dev); - struct cryp_device_data *device_data; - struct cryp_ctx *temp_ctx = NULL; - - dev_dbg(dev, "[%s]", __func__); - - /* Handle state? */ - device_data = platform_get_drvdata(pdev); - if (!device_data) { - dev_err(dev, "[%s]: platform_get_drvdata() failed!", __func__); - return -ENOMEM; - } - - disable_irq(device_data->irq); - - spin_lock(&device_data->ctx_lock); - if (!device_data->current_ctx) - device_data->current_ctx++; - spin_unlock(&device_data->ctx_lock); - - if (device_data->current_ctx == ++temp_ctx) { - if (down_interruptible(&driver_data.device_allocation)) - dev_dbg(dev, "[%s]: down_interruptible() failed", - __func__); - ret = cryp_disable_power(dev, device_data, false); - - } else - ret = cryp_disable_power(dev, device_data, true); - - if (ret) - dev_err(dev, "[%s]: cryp_disable_power()", __func__); - - return ret; -} - -static int ux500_cryp_resume(struct device *dev) -{ - int ret = 0; - struct platform_device *pdev = to_platform_device(dev); - struct cryp_device_data *device_data; - struct cryp_ctx *temp_ctx = NULL; - - dev_dbg(dev, "[%s]", __func__); - - device_data = platform_get_drvdata(pdev); - if (!device_data) { - dev_err(dev, "[%s]: platform_get_drvdata() failed!", __func__); - return -ENOMEM; - } - - spin_lock(&device_data->ctx_lock); - if (device_data->current_ctx == ++temp_ctx) - device_data->current_ctx = NULL; - spin_unlock(&device_data->ctx_lock); - - - if (!device_data->current_ctx) - up(&driver_data.device_allocation); - else - ret = cryp_enable_power(dev, device_data, true); - - if (ret) - dev_err(dev, "[%s]: cryp_enable_power() failed!", __func__); - else - enable_irq(device_data->irq); - - return ret; -} -#endif - -static SIMPLE_DEV_PM_OPS(ux500_cryp_pm, ux500_cryp_suspend, ux500_cryp_resume); - -static const struct of_device_id ux500_cryp_match[] = { - { .compatible = "stericsson,ux500-cryp" }, - { }, -}; -MODULE_DEVICE_TABLE(of, ux500_cryp_match); - -static struct platform_driver cryp_driver = { - .probe = ux500_cryp_probe, - .remove = ux500_cryp_remove, - .shutdown = ux500_cryp_shutdown, - .driver = { - .name = "cryp1", - .of_match_table = ux500_cryp_match, - .pm = &ux500_cryp_pm, - } -}; - -static int __init ux500_cryp_mod_init(void) -{ - pr_debug("[%s] is called!", __func__); - klist_init(&driver_data.device_list, NULL, NULL); - /* Initialize the semaphore to 0 devices (locked state) */ - sema_init(&driver_data.device_allocation, 0); - return platform_driver_register(&cryp_driver); -} - -static void __exit ux500_cryp_mod_fini(void) -{ - pr_debug("[%s] is called!", __func__); - platform_driver_unregister(&cryp_driver); -} - -module_init(ux500_cryp_mod_init); -module_exit(ux500_cryp_mod_fini); - -module_param(cryp_mode, int, 0); - -MODULE_DESCRIPTION("Driver for ST-Ericsson UX500 CRYP crypto engine."); -MODULE_ALIAS_CRYPTO("aes-all"); -MODULE_ALIAS_CRYPTO("des-all"); - -MODULE_LICENSE("GPL"); diff --git a/drivers/crypto/ux500/cryp/cryp_irq.c b/drivers/crypto/ux500/cryp/cryp_irq.c deleted file mode 100644 index 6d2f07bec98a7e74beba3c0297de40fa11538094..0000000000000000000000000000000000000000 --- a/drivers/crypto/ux500/cryp/cryp_irq.c +++ /dev/null @@ -1,45 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright (C) ST-Ericsson SA 2010 - * Author: Shujuan Chen for ST-Ericsson. - * Author: Jonas Linde for ST-Ericsson. - * Author: Joakim Bech for ST-Ericsson. - * Author: Berne Hebark for ST-Ericsson. - * Author: Niklas Hernaeus for ST-Ericsson. - */ - -#include -#include -#include - -#include "cryp.h" -#include "cryp_p.h" -#include "cryp_irq.h" -#include "cryp_irqp.h" - -void cryp_enable_irq_src(struct cryp_device_data *device_data, u32 irq_src) -{ - u32 i; - - dev_dbg(device_data->dev, "[%s]", __func__); - - i = readl_relaxed(&device_data->base->imsc); - i = i | irq_src; - writel_relaxed(i, &device_data->base->imsc); -} - -void cryp_disable_irq_src(struct cryp_device_data *device_data, u32 irq_src) -{ - u32 i; - - dev_dbg(device_data->dev, "[%s]", __func__); - - i = readl_relaxed(&device_data->base->imsc); - i = i & ~irq_src; - writel_relaxed(i, &device_data->base->imsc); -} - -bool cryp_pending_irq_src(struct cryp_device_data *device_data, u32 irq_src) -{ - return (readl_relaxed(&device_data->base->mis) & irq_src) > 0; -} diff --git a/drivers/crypto/ux500/cryp/cryp_irq.h b/drivers/crypto/ux500/cryp/cryp_irq.h deleted file mode 100644 index da90029ea141240bb5c57d98eb31c28043036c13..0000000000000000000000000000000000000000 --- a/drivers/crypto/ux500/cryp/cryp_irq.h +++ /dev/null @@ -1,31 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright (C) ST-Ericsson SA 2010 - * Author: Shujuan Chen for ST-Ericsson. - * Author: Jonas Linde for ST-Ericsson. - * Author: Joakim Bech for ST-Ericsson. - * Author: Berne Hebark for ST-Ericsson. - * Author: Niklas Hernaeus for ST-Ericsson. - */ - -#ifndef _CRYP_IRQ_H_ -#define _CRYP_IRQ_H_ - -#include "cryp.h" - -enum cryp_irq_src_id { - CRYP_IRQ_SRC_INPUT_FIFO = 0x1, - CRYP_IRQ_SRC_OUTPUT_FIFO = 0x2, - CRYP_IRQ_SRC_ALL = 0x3 -}; - -/* - * M0 Funtions - */ -void cryp_enable_irq_src(struct cryp_device_data *device_data, u32 irq_src); - -void cryp_disable_irq_src(struct cryp_device_data *device_data, u32 irq_src); - -bool cryp_pending_irq_src(struct cryp_device_data *device_data, u32 irq_src); - -#endif /* _CRYP_IRQ_H_ */ diff --git a/drivers/crypto/ux500/cryp/cryp_irqp.h b/drivers/crypto/ux500/cryp/cryp_irqp.h deleted file mode 100644 index 4981a3f461e5ed6f8bc0e1c378749ce5a857a956..0000000000000000000000000000000000000000 --- a/drivers/crypto/ux500/cryp/cryp_irqp.h +++ /dev/null @@ -1,125 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright (C) ST-Ericsson SA 2010 - * Author: Shujuan Chen for ST-Ericsson. - * Author: Jonas Linde for ST-Ericsson. - * Author: Joakim Bech for ST-Ericsson. - * Author: Berne Hebark for ST-Ericsson. - * Author: Niklas Hernaeus for ST-Ericsson. - */ - -#ifndef __CRYP_IRQP_H_ -#define __CRYP_IRQP_H_ - -#include "cryp_irq.h" - -/* - * - * CRYP Registers - Offset mapping - * +-----------------+ - * 00h | CRYP_CR | Configuration register - * +-----------------+ - * 04h | CRYP_SR | Status register - * +-----------------+ - * 08h | CRYP_DIN | Data In register - * +-----------------+ - * 0ch | CRYP_DOUT | Data out register - * +-----------------+ - * 10h | CRYP_DMACR | DMA control register - * +-----------------+ - * 14h | CRYP_IMSC | IMSC - * +-----------------+ - * 18h | CRYP_RIS | Raw interrupt status - * +-----------------+ - * 1ch | CRYP_MIS | Masked interrupt status. - * +-----------------+ - * Key registers - * IVR registers - * Peripheral - * Cell IDs - * - * Refer data structure for other register map - */ - -/** - * struct cryp_register - * @cr - Configuration register - * @status - Status register - * @din - Data input register - * @din_size - Data input size register - * @dout - Data output register - * @dout_size - Data output size register - * @dmacr - Dma control register - * @imsc - Interrupt mask set/clear register - * @ris - Raw interrupt status - * @mis - Masked interrupt statu register - * @key_1_l - Key register 1 L - * @key_1_r - Key register 1 R - * @key_2_l - Key register 2 L - * @key_2_r - Key register 2 R - * @key_3_l - Key register 3 L - * @key_3_r - Key register 3 R - * @key_4_l - Key register 4 L - * @key_4_r - Key register 4 R - * @init_vect_0_l - init vector 0 L - * @init_vect_0_r - init vector 0 R - * @init_vect_1_l - init vector 1 L - * @init_vect_1_r - init vector 1 R - * @cryp_unused1 - unused registers - * @itcr - Integration test control register - * @itip - Integration test input register - * @itop - Integration test output register - * @cryp_unused2 - unused registers - * @periphId0 - FE0 CRYP Peripheral Identication Register - * @periphId1 - FE4 - * @periphId2 - FE8 - * @periphId3 - FEC - * @pcellId0 - FF0 CRYP PCell Identication Register - * @pcellId1 - FF4 - * @pcellId2 - FF8 - * @pcellId3 - FFC - */ -struct cryp_register { - u32 cr; /* Configuration register */ - u32 sr; /* Status register */ - u32 din; /* Data input register */ - u32 din_size; /* Data input size register */ - u32 dout; /* Data output register */ - u32 dout_size; /* Data output size register */ - u32 dmacr; /* Dma control register */ - u32 imsc; /* Interrupt mask set/clear register */ - u32 ris; /* Raw interrupt status */ - u32 mis; /* Masked interrupt statu register */ - - u32 key_1_l; /*Key register 1 L */ - u32 key_1_r; /*Key register 1 R */ - u32 key_2_l; /*Key register 2 L */ - u32 key_2_r; /*Key register 2 R */ - u32 key_3_l; /*Key register 3 L */ - u32 key_3_r; /*Key register 3 R */ - u32 key_4_l; /*Key register 4 L */ - u32 key_4_r; /*Key register 4 R */ - - u32 init_vect_0_l; /*init vector 0 L */ - u32 init_vect_0_r; /*init vector 0 R */ - u32 init_vect_1_l; /*init vector 1 L */ - u32 init_vect_1_r; /*init vector 1 R */ - - u32 cryp_unused1[(0x80 - 0x58) / sizeof(u32)]; /* unused registers */ - u32 itcr; /*Integration test control register */ - u32 itip; /*Integration test input register */ - u32 itop; /*Integration test output register */ - u32 cryp_unused2[(0xFE0 - 0x8C) / sizeof(u32)]; /* unused registers */ - - u32 periphId0; /* FE0 CRYP Peripheral Identication Register */ - u32 periphId1; /* FE4 */ - u32 periphId2; /* FE8 */ - u32 periphId3; /* FEC */ - - u32 pcellId0; /* FF0 CRYP PCell Identication Register */ - u32 pcellId1; /* FF4 */ - u32 pcellId2; /* FF8 */ - u32 pcellId3; /* FFC */ -}; - -#endif diff --git a/drivers/crypto/ux500/cryp/cryp_p.h b/drivers/crypto/ux500/cryp/cryp_p.h deleted file mode 100644 index 60b47fe4de35d9b29c3ed2a0e902254f6de1bd38..0000000000000000000000000000000000000000 --- a/drivers/crypto/ux500/cryp/cryp_p.h +++ /dev/null @@ -1,122 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright (C) ST-Ericsson SA 2010 - * Author: Shujuan Chen for ST-Ericsson. - * Author: Jonas Linde for ST-Ericsson. - * Author: Joakim Bech for ST-Ericsson. - * Author: Berne Hebark for ST-Ericsson. - * Author: Niklas Hernaeus for ST-Ericsson. - */ - -#ifndef _CRYP_P_H_ -#define _CRYP_P_H_ - -#include -#include - -#include "cryp.h" -#include "cryp_irqp.h" - -/* - * Generic Macros - */ -#define CRYP_SET_BITS(reg_name, mask) \ - writel_relaxed((readl_relaxed(reg_name) | mask), reg_name) - -#define CRYP_WRITE_BIT(reg_name, val, mask) \ - writel_relaxed(((readl_relaxed(reg_name) & ~(mask)) |\ - ((val) & (mask))), reg_name) - -#define CRYP_TEST_BITS(reg_name, val) \ - (readl_relaxed(reg_name) & (val)) - -#define CRYP_PUT_BITS(reg, val, shift, mask) \ - writel_relaxed(((readl_relaxed(reg) & ~(mask)) | \ - (((u32)val << shift) & (mask))), reg) - -/* - * CRYP specific Macros - */ -#define CRYP_PERIPHERAL_ID0 0xE3 -#define CRYP_PERIPHERAL_ID1 0x05 - -#define CRYP_PERIPHERAL_ID2_DB8500 0x28 -#define CRYP_PERIPHERAL_ID3 0x00 - -#define CRYP_PCELL_ID0 0x0D -#define CRYP_PCELL_ID1 0xF0 -#define CRYP_PCELL_ID2 0x05 -#define CRYP_PCELL_ID3 0xB1 - -/* - * CRYP register default values - */ -#define MAX_DEVICE_SUPPORT 2 - -/* Priv set, keyrden set and datatype 8bits swapped set as default. */ -#define CRYP_CR_DEFAULT 0x0482 -#define CRYP_DMACR_DEFAULT 0x0 -#define CRYP_IMSC_DEFAULT 0x0 -#define CRYP_DIN_DEFAULT 0x0 -#define CRYP_DOUT_DEFAULT 0x0 -#define CRYP_KEY_DEFAULT 0x0 -#define CRYP_INIT_VECT_DEFAULT 0x0 - -/* - * CRYP Control register specific mask - */ -#define CRYP_CR_SECURE_MASK BIT(0) -#define CRYP_CR_PRLG_MASK BIT(1) -#define CRYP_CR_ALGODIR_MASK BIT(2) -#define CRYP_CR_ALGOMODE_MASK (BIT(5) | BIT(4) | BIT(3)) -#define CRYP_CR_DATATYPE_MASK (BIT(7) | BIT(6)) -#define CRYP_CR_KEYSIZE_MASK (BIT(9) | BIT(8)) -#define CRYP_CR_KEYRDEN_MASK BIT(10) -#define CRYP_CR_KSE_MASK BIT(11) -#define CRYP_CR_START_MASK BIT(12) -#define CRYP_CR_INIT_MASK BIT(13) -#define CRYP_CR_FFLUSH_MASK BIT(14) -#define CRYP_CR_CRYPEN_MASK BIT(15) -#define CRYP_CR_CONTEXT_SAVE_MASK (CRYP_CR_SECURE_MASK |\ - CRYP_CR_PRLG_MASK |\ - CRYP_CR_ALGODIR_MASK |\ - CRYP_CR_ALGOMODE_MASK |\ - CRYP_CR_KEYSIZE_MASK |\ - CRYP_CR_KEYRDEN_MASK |\ - CRYP_CR_DATATYPE_MASK) - - -#define CRYP_SR_INFIFO_READY_MASK (BIT(0) | BIT(1)) -#define CRYP_SR_IFEM_MASK BIT(0) -#define CRYP_SR_BUSY_MASK BIT(4) - -/* - * Bit position used while setting bits in register - */ -#define CRYP_CR_PRLG_POS 1 -#define CRYP_CR_ALGODIR_POS 2 -#define CRYP_CR_ALGOMODE_POS 3 -#define CRYP_CR_DATATYPE_POS 6 -#define CRYP_CR_KEYSIZE_POS 8 -#define CRYP_CR_KEYRDEN_POS 10 -#define CRYP_CR_KSE_POS 11 -#define CRYP_CR_START_POS 12 -#define CRYP_CR_INIT_POS 13 -#define CRYP_CR_CRYPEN_POS 15 - -#define CRYP_SR_BUSY_POS 4 - -/* - * CRYP PCRs------PC_NAND control register - * BIT_MASK - */ -#define CRYP_DMA_REQ_MASK (BIT(1) | BIT(0)) -#define CRYP_DMA_REQ_MASK_POS 0 - - -struct cryp_system_context { - /* CRYP Register structure */ - struct cryp_register *p_cryp_reg[MAX_DEVICE_SUPPORT]; -}; - -#endif diff --git a/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c b/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c index 168195672e2e10bed4a18d13991de26ef6536125..b2979be613b8f5cb1f0d0194216b8d1c6ff00d5c 100644 --- a/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c +++ b/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c @@ -479,6 +479,9 @@ static int virtio_crypto_rsa_init_tfm(struct crypto_akcipher *tfm) ctx->enginectx.op.prepare_request = NULL; ctx->enginectx.op.unprepare_request = NULL; + akcipher_set_reqsize(tfm, + sizeof(struct virtio_crypto_akcipher_request)); + return 0; } @@ -505,7 +508,6 @@ static struct virtio_crypto_akcipher_algo virtio_crypto_akcipher_algs[] = { .max_size = virtio_crypto_rsa_max_size, .init = virtio_crypto_rsa_init_tfm, .exit = virtio_crypto_rsa_exit_tfm, - .reqsize = sizeof(struct virtio_crypto_akcipher_request), .base = { .cra_name = "rsa", .cra_driver_name = "virtio-crypto-rsa", @@ -528,7 +530,6 @@ static struct virtio_crypto_akcipher_algo virtio_crypto_akcipher_algs[] = { .max_size = virtio_crypto_rsa_max_size, .init = virtio_crypto_rsa_init_tfm, .exit = virtio_crypto_rsa_exit_tfm, - .reqsize = sizeof(struct virtio_crypto_akcipher_request), .base = { .cra_name = "pkcs1pad(rsa,sha1)", .cra_driver_name = "virtio-pkcs1-rsa-with-sha1", diff --git a/drivers/crypto/vmx/Makefile b/drivers/crypto/vmx/Makefile index 2560cfea1dec28414c99b6ba001f4594fe439a4b..7257b8c446263f8995124c31c2c7d78d419d88de 100644 --- a/drivers/crypto/vmx/Makefile +++ b/drivers/crypto/vmx/Makefile @@ -2,10 +2,22 @@ obj-$(CONFIG_CRYPTO_DEV_VMX_ENCRYPT) += vmx-crypto.o vmx-crypto-objs := vmx.o aesp8-ppc.o ghashp8-ppc.o aes.o aes_cbc.o aes_ctr.o aes_xts.o ghash.o +ifeq ($(CONFIG_CPU_LITTLE_ENDIAN),y) +override flavour := linux-ppc64le +else +ifdef CONFIG_PPC64_ELF_ABI_V2 +override flavour := linux-ppc64-elfv2 +else +override flavour := linux-ppc64 +endif +endif + quiet_cmd_perl = PERL $@ - cmd_perl = $(PERL) $< $(if $(CONFIG_CPU_LITTLE_ENDIAN), linux-ppc64le, linux-ppc64) > $@ + cmd_perl = $(PERL) $< $(flavour) > $@ targets += aesp8-ppc.S ghashp8-ppc.S $(obj)/aesp8-ppc.S $(obj)/ghashp8-ppc.S: $(obj)/%.S: $(src)/%.pl FORCE $(call if_changed,perl) + +OBJECT_FILES_NON_STANDARD_aesp8-ppc.o := y diff --git a/drivers/crypto/vmx/ppc-xlate.pl b/drivers/crypto/vmx/ppc-xlate.pl index 36db2ef09e5bf52047ee368c665bbcefbd2b0412..b583898c11ae8d785ba2418f5548d89974f7c4de 100644 --- a/drivers/crypto/vmx/ppc-xlate.pl +++ b/drivers/crypto/vmx/ppc-xlate.pl @@ -9,6 +9,8 @@ open STDOUT,">$output" || die "can't open $output: $!"; my %GLOBALS; my $dotinlocallabels=($flavour=~/linux/)?1:0; +my $elfv2abi=(($flavour =~ /linux-ppc64le/) or ($flavour =~ /linux-ppc64-elfv2/))?1:0; +my $dotfunctions=($elfv2abi=~1)?0:1; ################################################################ # directives which need special treatment on different platforms @@ -40,7 +42,7 @@ my $globl = sub { }; my $text = sub { my $ret = ($flavour =~ /aix/) ? ".csect\t.text[PR],7" : ".text"; - $ret = ".abiversion 2\n".$ret if ($flavour =~ /linux.*64le/); + $ret = ".abiversion 2\n".$ret if ($elfv2abi); $ret; }; my $machine = sub { @@ -56,8 +58,8 @@ my $size = sub { if ($flavour =~ /linux/) { shift; my $name = shift; $name =~ s|^[\.\_]||; - my $ret = ".size $name,.-".($flavour=~/64$/?".":"").$name; - $ret .= "\n.size .$name,.-.$name" if ($flavour=~/64$/); + my $ret = ".size $name,.-".($dotfunctions?".":"").$name; + $ret .= "\n.size .$name,.-.$name" if ($dotfunctions); $ret; } else @@ -142,7 +144,7 @@ my $vmr = sub { # Some ABIs specify vrsave, special-purpose register #256, as reserved # for system use. -my $no_vrsave = ($flavour =~ /linux-ppc64le/); +my $no_vrsave = ($elfv2abi); my $mtspr = sub { my ($f,$idx,$ra) = @_; if ($idx == 256 && $no_vrsave) { diff --git a/drivers/dio/dio.c b/drivers/dio/dio.c index 0e5a5662d5a40c21d1aa2c9e38eb200af70324e1..0a051d65688002cce5f80b52a9d3c6feaba0a644 100644 --- a/drivers/dio/dio.c +++ b/drivers/dio/dio.c @@ -109,6 +109,12 @@ static char dio_no_name[] = { 0 }; #endif /* CONFIG_DIO_CONSTANTS */ +static void dio_dev_release(struct device *dev) +{ + struct dio_dev *ddev = container_of(dev, typeof(struct dio_dev), dev); + kfree(ddev); +} + int __init dio_find(int deviceid) { /* Called to find a DIO device before the full bus scan has run. @@ -225,6 +231,7 @@ static int __init dio_init(void) dev->bus = &dio_bus; dev->dev.parent = &dio_bus.dev; dev->dev.bus = &dio_bus_type; + dev->dev.release = dio_dev_release; dev->scode = scode; dev->resource.start = pa; dev->resource.end = pa + DIO_SIZE(scode, va); @@ -252,6 +259,7 @@ static int __init dio_init(void) if (error) { pr_err("DIO: Error registering device %s\n", dev->name); + put_device(&dev->dev); continue; } error = dio_create_sysfs_dev_files(dev); diff --git a/drivers/dma-buf/dma-buf-sysfs-stats.c b/drivers/dma-buf/dma-buf-sysfs-stats.c index 2bba0babcb62b9f67cc8716c507d6fe816860734..f69d68122b9b1313ebc6f94cfb12d7c707cebe2e 100644 --- a/drivers/dma-buf/dma-buf-sysfs-stats.c +++ b/drivers/dma-buf/dma-buf-sysfs-stats.c @@ -132,7 +132,7 @@ void dma_buf_stats_teardown(struct dma_buf *dmabuf) /* Statistics files do not need to send uevents. */ -static int dmabuf_sysfs_uevent_filter(struct kobject *kobj) +static int dmabuf_sysfs_uevent_filter(const struct kobject *kobj) { return 0; } diff --git a/drivers/dma-buf/dma-heap.c b/drivers/dma-buf/dma-heap.c index 59d158873f4cb451b5a5db4f0691e25d6a56909c..c9e41e8a9e272c9764f3d8b60e4119ac91ad1468 100644 --- a/drivers/dma-buf/dma-heap.c +++ b/drivers/dma-buf/dma-heap.c @@ -301,7 +301,7 @@ err0: return err_ret; } -static char *dma_heap_devnode(struct device *dev, umode_t *mode) +static char *dma_heap_devnode(const struct device *dev, umode_t *mode) { return kasprintf(GFP_KERNEL, "dma_heap/%s", dev_name(dev)); } diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c index e3885c90a3acba1b7cebbf895038893d097aa223..1c76aed8e2626c4b26a4276ef2b90c99d8451cec 100644 --- a/drivers/dma-buf/dma-resv.c +++ b/drivers/dma-buf/dma-resv.c @@ -98,12 +98,17 @@ static void dma_resv_list_set(struct dma_resv_list *list, static struct dma_resv_list *dma_resv_list_alloc(unsigned int max_fences) { struct dma_resv_list *list; + size_t size; - list = kmalloc(struct_size(list, table, max_fences), GFP_KERNEL); + /* Round up to the next kmalloc bucket size. */ + size = kmalloc_size_roundup(struct_size(list, table, max_fences)); + + list = kmalloc(size, GFP_KERNEL); if (!list) return NULL; - list->max_fences = (ksize(list) - offsetof(typeof(*list), table)) / + /* Given the resulting bucket size, recalculated max_fences. */ + list->max_fences = (size - offsetof(typeof(*list), table)) / sizeof(*list->table); return list; diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 25e111ab21f83089f0076a6d185fb6931ac64867..b6d48d54f42fcffda66ccb2ac2e5e01a15502d37 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -97,6 +97,7 @@ config AT_HDMAC tristate "Atmel AHB DMA support" depends on ARCH_AT91 select DMA_ENGINE + select DMA_VIRTUAL_CHANNELS help Support the Atmel AHB DMA controller. @@ -357,14 +358,6 @@ config INTEL_IOATDMA If unsure, say N. -config INTEL_IOP_ADMA - tristate "Intel IOP32x ADMA support" - depends on ARCH_IOP32X || COMPILE_TEST - select DMA_ENGINE - select ASYNC_TX_ENABLE_CHANNEL_SWITCH - help - Enable support for the Intel(R) IOP Series RAID engines. - config K3_DMA tristate "Hisilicon K3 DMA support" depends on ARCH_HI3xxx || ARCH_HISI || COMPILE_TEST diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index 10f7d424100176932e11dbd5198a3ea3dad93e99..5b55ada052a7ff5c48e6eac2fcb9fc5ca68bab44 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile @@ -44,7 +44,6 @@ obj-$(CONFIG_IMX_SDMA) += imx-sdma.o obj-$(CONFIG_INTEL_IDMA64) += idma64.o obj-$(CONFIG_INTEL_IOATDMA) += ioat/ obj-y += idxd/ -obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o obj-$(CONFIG_K3_DMA) += k3dma.o obj-$(CONFIG_LPC18XX_DMAMUX) += lpc18xx-dmamux.o obj-$(CONFIG_MILBEAUT_HDMAC) += milbeaut-hdmac.o diff --git a/drivers/dma/apple-admac.c b/drivers/dma/apple-admac.c index a2cc520225d32850db821ff0a8b7633a86343ddb..90f28bda29c8bd41e19b340e779c351634d29283 100644 --- a/drivers/dma/apple-admac.c +++ b/drivers/dma/apple-admac.c @@ -21,6 +21,12 @@ #define NCHANNELS_MAX 64 #define IRQ_NOUTPUTS 4 +/* + * For allocation purposes we split the cache + * memory into blocks of fixed size (given in bytes). + */ +#define SRAM_BLOCK 2048 + #define RING_WRITE_SLOT GENMASK(1, 0) #define RING_READ_SLOT GENMASK(5, 4) #define RING_FULL BIT(9) @@ -36,6 +42,9 @@ #define REG_TX_STOP 0x0004 #define REG_RX_START 0x0008 #define REG_RX_STOP 0x000c +#define REG_IMPRINT 0x0090 +#define REG_TX_SRAM_SIZE 0x0094 +#define REG_RX_SRAM_SIZE 0x0098 #define REG_CHAN_CTL(ch) (0x8000 + (ch) * 0x200) #define REG_CHAN_CTL_RST_RINGS BIT(0) @@ -53,7 +62,9 @@ #define BUS_WIDTH_FRAME_2_WORDS 0x10 #define BUS_WIDTH_FRAME_4_WORDS 0x20 -#define CHAN_BUFSIZE 0x8000 +#define REG_CHAN_SRAM_CARVEOUT(ch) (0x8050 + (ch) * 0x200) +#define CHAN_SRAM_CARVEOUT_SIZE GENMASK(31, 16) +#define CHAN_SRAM_CARVEOUT_BASE GENMASK(15, 0) #define REG_CHAN_FIFOCTL(ch) (0x8054 + (ch) * 0x200) #define CHAN_FIFOCTL_LIMIT GENMASK(31, 16) @@ -76,6 +87,8 @@ struct admac_chan { struct dma_chan chan; struct tasklet_struct tasklet; + u32 carveout; + spinlock_t lock; struct admac_tx *current_tx; int nperiod_acks; @@ -92,12 +105,24 @@ struct admac_chan { struct list_head to_free; }; +struct admac_sram { + u32 size; + /* + * SRAM_CARVEOUT has 16-bit fields, so the SRAM cannot be larger than + * 64K and a 32-bit bitfield over 2K blocks covers it. + */ + u32 allocated; +}; + struct admac_data { struct dma_device dma; struct device *dev; __iomem void *base; struct reset_control *rstc; + struct mutex cache_alloc_lock; + struct admac_sram txcache, rxcache; + int irq; int irq_index; int nchannels; @@ -118,6 +143,60 @@ struct admac_tx { struct list_head node; }; +static int admac_alloc_sram_carveout(struct admac_data *ad, + enum dma_transfer_direction dir, + u32 *out) +{ + struct admac_sram *sram; + int i, ret = 0, nblocks; + + if (dir == DMA_MEM_TO_DEV) + sram = &ad->txcache; + else + sram = &ad->rxcache; + + mutex_lock(&ad->cache_alloc_lock); + + nblocks = sram->size / SRAM_BLOCK; + for (i = 0; i < nblocks; i++) + if (!(sram->allocated & BIT(i))) + break; + + if (i < nblocks) { + *out = FIELD_PREP(CHAN_SRAM_CARVEOUT_BASE, i * SRAM_BLOCK) | + FIELD_PREP(CHAN_SRAM_CARVEOUT_SIZE, SRAM_BLOCK); + sram->allocated |= BIT(i); + } else { + ret = -EBUSY; + } + + mutex_unlock(&ad->cache_alloc_lock); + + return ret; +} + +static void admac_free_sram_carveout(struct admac_data *ad, + enum dma_transfer_direction dir, + u32 carveout) +{ + struct admac_sram *sram; + u32 base = FIELD_GET(CHAN_SRAM_CARVEOUT_BASE, carveout); + int i; + + if (dir == DMA_MEM_TO_DEV) + sram = &ad->txcache; + else + sram = &ad->rxcache; + + if (WARN_ON(base >= sram->size)) + return; + + mutex_lock(&ad->cache_alloc_lock); + i = base / SRAM_BLOCK; + sram->allocated &= ~BIT(i); + mutex_unlock(&ad->cache_alloc_lock); +} + static void admac_modify(struct admac_data *ad, int reg, u32 mask, u32 val) { void __iomem *addr = ad->base + reg; @@ -466,15 +545,28 @@ static void admac_synchronize(struct dma_chan *chan) static int admac_alloc_chan_resources(struct dma_chan *chan) { struct admac_chan *adchan = to_admac_chan(chan); + struct admac_data *ad = adchan->host; + int ret; dma_cookie_init(&adchan->chan); + ret = admac_alloc_sram_carveout(ad, admac_chan_direction(adchan->no), + &adchan->carveout); + if (ret < 0) + return ret; + + writel_relaxed(adchan->carveout, + ad->base + REG_CHAN_SRAM_CARVEOUT(adchan->no)); return 0; } static void admac_free_chan_resources(struct dma_chan *chan) { + struct admac_chan *adchan = to_admac_chan(chan); + admac_terminate_all(chan); admac_synchronize(chan); + admac_free_sram_carveout(adchan->host, admac_chan_direction(adchan->no), + adchan->carveout); } static struct dma_chan *admac_dma_of_xlate(struct of_phandle_args *dma_spec, @@ -712,6 +804,7 @@ static int admac_probe(struct platform_device *pdev) platform_set_drvdata(pdev, ad); ad->dev = &pdev->dev; ad->nchannels = nchannels; + mutex_init(&ad->cache_alloc_lock); /* * The controller has 4 IRQ outputs. Try them all until @@ -801,6 +894,13 @@ static int admac_probe(struct platform_device *pdev) goto free_irq; } + ad->txcache.size = readl_relaxed(ad->base + REG_TX_SRAM_SIZE); + ad->rxcache.size = readl_relaxed(ad->base + REG_RX_SRAM_SIZE); + + dev_info(&pdev->dev, "Audio DMA Controller\n"); + dev_info(&pdev->dev, "imprint %x TX cache %u RX cache %u\n", + readl_relaxed(ad->base + REG_IMPRINT), ad->txcache.size, ad->rxcache.size); + return 0; free_irq: diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index 858bd64f13135f1370a59d2290570189de89415c..8858470246e1552e5e278a2b254f8ae874df894b 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c @@ -3,6 +3,7 @@ * Driver for the Atmel AHB DMA Controller (aka HDMA or DMAC on AT91 systems) * * Copyright (C) 2008 Atmel Corporation + * Copyright (C) 2022 Microchip Technology, Inc. and its subsidiaries * * This supports the Atmel AHB DMA Controller found in several Atmel SoCs. * The only Atmel DMA Controller that is not covered by this driver is the one @@ -10,20 +11,22 @@ */ #include +#include #include #include -#include #include +#include #include #include -#include -#include #include +#include #include #include +#include +#include -#include "at_hdmac_regs.h" #include "dmaengine.h" +#include "virt-dma.h" /* * Glossary @@ -34,9 +37,449 @@ * atc_ / atchan : ATmel DMA Channel entity related */ -#define ATC_DEFAULT_CFG (ATC_FIFOCFG_HALFFIFO) -#define ATC_DEFAULT_CTRLB (ATC_SIF(AT_DMA_MEM_IF) \ - |ATC_DIF(AT_DMA_MEM_IF)) +#define AT_DMA_MAX_NR_CHANNELS 8 + +/* Global Configuration Register */ +#define AT_DMA_GCFG 0x00 +#define AT_DMA_IF_BIGEND(i) BIT((i)) /* AHB-Lite Interface i in Big-endian mode */ +#define AT_DMA_ARB_CFG BIT(4) /* Arbiter mode. */ + +/* Controller Enable Register */ +#define AT_DMA_EN 0x04 +#define AT_DMA_ENABLE BIT(0) + +/* Software Single Request Register */ +#define AT_DMA_SREQ 0x08 +#define AT_DMA_SSREQ(x) BIT((x) << 1) /* Request a source single transfer on channel x */ +#define AT_DMA_DSREQ(x) BIT(1 + ((x) << 1)) /* Request a destination single transfer on channel x */ + +/* Software Chunk Transfer Request Register */ +#define AT_DMA_CREQ 0x0c +#define AT_DMA_SCREQ(x) BIT((x) << 1) /* Request a source chunk transfer on channel x */ +#define AT_DMA_DCREQ(x) BIT(1 + ((x) << 1)) /* Request a destination chunk transfer on channel x */ + +/* Software Last Transfer Flag Register */ +#define AT_DMA_LAST 0x10 +#define AT_DMA_SLAST(x) BIT((x) << 1) /* This src rq is last tx of buffer on channel x */ +#define AT_DMA_DLAST(x) BIT(1 + ((x) << 1)) /* This dst rq is last tx of buffer on channel x */ + +/* Request Synchronization Register */ +#define AT_DMA_SYNC 0x14 +#define AT_DMA_SYR(h) BIT((h)) /* Synchronize handshake line h */ + +/* Error, Chained Buffer transfer completed and Buffer transfer completed Interrupt registers */ +#define AT_DMA_EBCIER 0x18 /* Enable register */ +#define AT_DMA_EBCIDR 0x1c /* Disable register */ +#define AT_DMA_EBCIMR 0x20 /* Mask Register */ +#define AT_DMA_EBCISR 0x24 /* Status Register */ +#define AT_DMA_CBTC_OFFSET 8 +#define AT_DMA_ERR_OFFSET 16 +#define AT_DMA_BTC(x) BIT((x)) +#define AT_DMA_CBTC(x) BIT(AT_DMA_CBTC_OFFSET + (x)) +#define AT_DMA_ERR(x) BIT(AT_DMA_ERR_OFFSET + (x)) + +/* Channel Handler Enable Register */ +#define AT_DMA_CHER 0x28 +#define AT_DMA_ENA(x) BIT((x)) +#define AT_DMA_SUSP(x) BIT(8 + (x)) +#define AT_DMA_KEEP(x) BIT(24 + (x)) + +/* Channel Handler Disable Register */ +#define AT_DMA_CHDR 0x2c +#define AT_DMA_DIS(x) BIT(x) +#define AT_DMA_RES(x) BIT(8 + (x)) + +/* Channel Handler Status Register */ +#define AT_DMA_CHSR 0x30 +#define AT_DMA_EMPT(x) BIT(16 + (x)) +#define AT_DMA_STAL(x) BIT(24 + (x)) + +/* Channel registers base address */ +#define AT_DMA_CH_REGS_BASE 0x3c +#define ch_regs(x) (AT_DMA_CH_REGS_BASE + (x) * 0x28) /* Channel x base addr */ + +/* Hardware register offset for each channel */ +#define ATC_SADDR_OFFSET 0x00 /* Source Address Register */ +#define ATC_DADDR_OFFSET 0x04 /* Destination Address Register */ +#define ATC_DSCR_OFFSET 0x08 /* Descriptor Address Register */ +#define ATC_CTRLA_OFFSET 0x0c /* Control A Register */ +#define ATC_CTRLB_OFFSET 0x10 /* Control B Register */ +#define ATC_CFG_OFFSET 0x14 /* Configuration Register */ +#define ATC_SPIP_OFFSET 0x18 /* Src PIP Configuration Register */ +#define ATC_DPIP_OFFSET 0x1c /* Dst PIP Configuration Register */ + + +/* Bitfield definitions */ + +/* Bitfields in DSCR */ +#define ATC_DSCR_IF GENMASK(1, 0) /* Dsc feched via AHB-Lite Interface */ + +/* Bitfields in CTRLA */ +#define ATC_BTSIZE_MAX GENMASK(15, 0) /* Maximum Buffer Transfer Size */ +#define ATC_BTSIZE GENMASK(15, 0) /* Buffer Transfer Size */ +#define ATC_SCSIZE GENMASK(18, 16) /* Source Chunk Transfer Size */ +#define ATC_DCSIZE GENMASK(22, 20) /* Destination Chunk Transfer Size */ +#define ATC_SRC_WIDTH GENMASK(25, 24) /* Source Single Transfer Size */ +#define ATC_DST_WIDTH GENMASK(29, 28) /* Destination Single Transfer Size */ +#define ATC_DONE BIT(31) /* Tx Done (only written back in descriptor) */ + +/* Bitfields in CTRLB */ +#define ATC_SIF GENMASK(1, 0) /* Src tx done via AHB-Lite Interface i */ +#define ATC_DIF GENMASK(5, 4) /* Dst tx done via AHB-Lite Interface i */ +#define AT_DMA_MEM_IF 0x0 /* interface 0 as memory interface */ +#define AT_DMA_PER_IF 0x1 /* interface 1 as peripheral interface */ +#define ATC_SRC_PIP BIT(8) /* Source Picture-in-Picture enabled */ +#define ATC_DST_PIP BIT(12) /* Destination Picture-in-Picture enabled */ +#define ATC_SRC_DSCR_DIS BIT(16) /* Src Descriptor fetch disable */ +#define ATC_DST_DSCR_DIS BIT(20) /* Dst Descriptor fetch disable */ +#define ATC_FC GENMASK(22, 21) /* Choose Flow Controller */ +#define ATC_FC_MEM2MEM 0x0 /* Mem-to-Mem (DMA) */ +#define ATC_FC_MEM2PER 0x1 /* Mem-to-Periph (DMA) */ +#define ATC_FC_PER2MEM 0x2 /* Periph-to-Mem (DMA) */ +#define ATC_FC_PER2PER 0x3 /* Periph-to-Periph (DMA) */ +#define ATC_FC_PER2MEM_PER 0x4 /* Periph-to-Mem (Peripheral) */ +#define ATC_FC_MEM2PER_PER 0x5 /* Mem-to-Periph (Peripheral) */ +#define ATC_FC_PER2PER_SRCPER 0x6 /* Periph-to-Periph (Src Peripheral) */ +#define ATC_FC_PER2PER_DSTPER 0x7 /* Periph-to-Periph (Dst Peripheral) */ +#define ATC_SRC_ADDR_MODE GENMASK(25, 24) +#define ATC_SRC_ADDR_MODE_INCR 0x0 /* Incrementing Mode */ +#define ATC_SRC_ADDR_MODE_DECR 0x1 /* Decrementing Mode */ +#define ATC_SRC_ADDR_MODE_FIXED 0x2 /* Fixed Mode */ +#define ATC_DST_ADDR_MODE GENMASK(29, 28) +#define ATC_DST_ADDR_MODE_INCR 0x0 /* Incrementing Mode */ +#define ATC_DST_ADDR_MODE_DECR 0x1 /* Decrementing Mode */ +#define ATC_DST_ADDR_MODE_FIXED 0x2 /* Fixed Mode */ +#define ATC_IEN BIT(30) /* BTC interrupt enable (active low) */ +#define ATC_AUTO BIT(31) /* Auto multiple buffer tx enable */ + +/* Bitfields in CFG */ +#define ATC_PER_MSB(h) ((0x30U & (h)) >> 4) /* Extract most significant bits of a handshaking identifier */ + +#define ATC_SRC_PER GENMASK(3, 0) /* Channel src rq associated with periph handshaking ifc h */ +#define ATC_DST_PER GENMASK(7, 4) /* Channel dst rq associated with periph handshaking ifc h */ +#define ATC_SRC_REP BIT(8) /* Source Replay Mod */ +#define ATC_SRC_H2SEL BIT(9) /* Source Handshaking Mod */ +#define ATC_SRC_PER_MSB GENMASK(11, 10) /* Channel src rq (most significant bits) */ +#define ATC_DST_REP BIT(12) /* Destination Replay Mod */ +#define ATC_DST_H2SEL BIT(13) /* Destination Handshaking Mod */ +#define ATC_DST_PER_MSB GENMASK(15, 14) /* Channel dst rq (most significant bits) */ +#define ATC_SOD BIT(16) /* Stop On Done */ +#define ATC_LOCK_IF BIT(20) /* Interface Lock */ +#define ATC_LOCK_B BIT(21) /* AHB Bus Lock */ +#define ATC_LOCK_IF_L BIT(22) /* Master Interface Arbiter Lock */ +#define ATC_AHB_PROT GENMASK(26, 24) /* AHB Protection */ +#define ATC_FIFOCFG GENMASK(29, 28) /* FIFO Request Configuration */ +#define ATC_FIFOCFG_LARGESTBURST 0x0 +#define ATC_FIFOCFG_HALFFIFO 0x1 +#define ATC_FIFOCFG_ENOUGHSPACE 0x2 + +/* Bitfields in SPIP */ +#define ATC_SPIP_HOLE GENMASK(15, 0) +#define ATC_SPIP_BOUNDARY GENMASK(25, 16) + +/* Bitfields in DPIP */ +#define ATC_DPIP_HOLE GENMASK(15, 0) +#define ATC_DPIP_BOUNDARY GENMASK(25, 16) + +#define ATC_SRC_PER_ID(id) (FIELD_PREP(ATC_SRC_PER_MSB, (id)) | \ + FIELD_PREP(ATC_SRC_PER, (id))) +#define ATC_DST_PER_ID(id) (FIELD_PREP(ATC_DST_PER_MSB, (id)) | \ + FIELD_PREP(ATC_DST_PER, (id))) + + + +/*-- descriptors -----------------------------------------------------*/ + +/* LLI == Linked List Item; aka DMA buffer descriptor */ +struct at_lli { + /* values that are not changed by hardware */ + u32 saddr; + u32 daddr; + /* value that may get written back: */ + u32 ctrla; + /* more values that are not changed by hardware */ + u32 ctrlb; + u32 dscr; /* chain to next lli */ +}; + +/** + * struct atdma_sg - atdma scatter gather entry + * @len: length of the current Linked List Item. + * @lli: linked list item that is passed to the DMA controller + * @lli_phys: physical address of the LLI. + */ +struct atdma_sg { + unsigned int len; + struct at_lli *lli; + dma_addr_t lli_phys; +}; + +/** + * struct at_desc - software descriptor + * @vd: pointer to the virtual dma descriptor. + * @atchan: pointer to the atmel dma channel. + * @total_len: total transaction byte count + * @sg_len: number of sg entries. + * @sg: array of sgs. + */ +struct at_desc { + struct virt_dma_desc vd; + struct at_dma_chan *atchan; + size_t total_len; + unsigned int sglen; + /* Interleaved data */ + size_t boundary; + size_t dst_hole; + size_t src_hole; + + /* Memset temporary buffer */ + bool memset_buffer; + dma_addr_t memset_paddr; + int *memset_vaddr; + struct atdma_sg sg[]; +}; + +/*-- Channels --------------------------------------------------------*/ + +/** + * atc_status - information bits stored in channel status flag + * + * Manipulated with atomic operations. + */ +enum atc_status { + ATC_IS_PAUSED = 1, + ATC_IS_CYCLIC = 24, +}; + +/** + * struct at_dma_chan - internal representation of an Atmel HDMAC channel + * @vc: virtual dma channel entry. + * @atdma: pointer to the driver data. + * @ch_regs: memory mapped register base + * @mask: channel index in a mask + * @per_if: peripheral interface + * @mem_if: memory interface + * @status: transmit status information from irq/prep* functions + * to tasklet (use atomic operations) + * @save_cfg: configuration register that is saved on suspend/resume cycle + * @save_dscr: for cyclic operations, preserve next descriptor address in + * the cyclic list on suspend/resume cycle + * @dma_sconfig: configuration for slave transfers, passed via + * .device_config + * @desc: pointer to the atmel dma descriptor. + */ +struct at_dma_chan { + struct virt_dma_chan vc; + struct at_dma *atdma; + void __iomem *ch_regs; + u8 mask; + u8 per_if; + u8 mem_if; + unsigned long status; + u32 save_cfg; + u32 save_dscr; + struct dma_slave_config dma_sconfig; + bool cyclic; + struct at_desc *desc; +}; + +#define channel_readl(atchan, name) \ + __raw_readl((atchan)->ch_regs + ATC_##name##_OFFSET) + +#define channel_writel(atchan, name, val) \ + __raw_writel((val), (atchan)->ch_regs + ATC_##name##_OFFSET) + +/* + * Fix sconfig's burst size according to at_hdmac. We need to convert them as: + * 1 -> 0, 4 -> 1, 8 -> 2, 16 -> 3, 32 -> 4, 64 -> 5, 128 -> 6, 256 -> 7. + * + * This can be done by finding most significant bit set. + */ +static inline void convert_burst(u32 *maxburst) +{ + if (*maxburst > 1) + *maxburst = fls(*maxburst) - 2; + else + *maxburst = 0; +} + +/* + * Fix sconfig's bus width according to at_hdmac. + * 1 byte -> 0, 2 bytes -> 1, 4 bytes -> 2. + */ +static inline u8 convert_buswidth(enum dma_slave_buswidth addr_width) +{ + switch (addr_width) { + case DMA_SLAVE_BUSWIDTH_2_BYTES: + return 1; + case DMA_SLAVE_BUSWIDTH_4_BYTES: + return 2; + default: + /* For 1 byte width or fallback */ + return 0; + } +} + +/*-- Controller ------------------------------------------------------*/ + +/** + * struct at_dma - internal representation of an Atmel HDMA Controller + * @dma_device: dmaengine dma_device object members + * @atdma_devtype: identifier of DMA controller compatibility + * @ch_regs: memory mapped register base + * @clk: dma controller clock + * @save_imr: interrupt mask register that is saved on suspend/resume cycle + * @all_chan_mask: all channels availlable in a mask + * @lli_pool: hw lli table + * @chan: channels table to store at_dma_chan structures + */ +struct at_dma { + struct dma_device dma_device; + void __iomem *regs; + struct clk *clk; + u32 save_imr; + + u8 all_chan_mask; + + struct dma_pool *lli_pool; + struct dma_pool *memset_pool; + /* AT THE END channels table */ + struct at_dma_chan chan[]; +}; + +#define dma_readl(atdma, name) \ + __raw_readl((atdma)->regs + AT_DMA_##name) +#define dma_writel(atdma, name, val) \ + __raw_writel((val), (atdma)->regs + AT_DMA_##name) + +static inline struct at_desc *to_atdma_desc(struct dma_async_tx_descriptor *t) +{ + return container_of(t, struct at_desc, vd.tx); +} + +static inline struct at_dma_chan *to_at_dma_chan(struct dma_chan *chan) +{ + return container_of(chan, struct at_dma_chan, vc.chan); +} + +static inline struct at_dma *to_at_dma(struct dma_device *ddev) +{ + return container_of(ddev, struct at_dma, dma_device); +} + + +/*-- Helper functions ------------------------------------------------*/ + +static struct device *chan2dev(struct dma_chan *chan) +{ + return &chan->dev->device; +} + +#if defined(VERBOSE_DEBUG) +static void vdbg_dump_regs(struct at_dma_chan *atchan) +{ + struct at_dma *atdma = to_at_dma(atchan->vc.chan.device); + + dev_err(chan2dev(&atchan->vc.chan), + " channel %d : imr = 0x%x, chsr = 0x%x\n", + atchan->vc.chan.chan_id, + dma_readl(atdma, EBCIMR), + dma_readl(atdma, CHSR)); + + dev_err(chan2dev(&atchan->vc.chan), + " channel: s0x%x d0x%x ctrl0x%x:0x%x cfg0x%x l0x%x\n", + channel_readl(atchan, SADDR), + channel_readl(atchan, DADDR), + channel_readl(atchan, CTRLA), + channel_readl(atchan, CTRLB), + channel_readl(atchan, CFG), + channel_readl(atchan, DSCR)); +} +#else +static void vdbg_dump_regs(struct at_dma_chan *atchan) {} +#endif + +static void atc_dump_lli(struct at_dma_chan *atchan, struct at_lli *lli) +{ + dev_crit(chan2dev(&atchan->vc.chan), + "desc: s%pad d%pad ctrl0x%x:0x%x l%pad\n", + &lli->saddr, &lli->daddr, + lli->ctrla, lli->ctrlb, &lli->dscr); +} + + +static void atc_setup_irq(struct at_dma *atdma, int chan_id, int on) +{ + u32 ebci; + + /* enable interrupts on buffer transfer completion & error */ + ebci = AT_DMA_BTC(chan_id) + | AT_DMA_ERR(chan_id); + if (on) + dma_writel(atdma, EBCIER, ebci); + else + dma_writel(atdma, EBCIDR, ebci); +} + +static void atc_enable_chan_irq(struct at_dma *atdma, int chan_id) +{ + atc_setup_irq(atdma, chan_id, 1); +} + +static void atc_disable_chan_irq(struct at_dma *atdma, int chan_id) +{ + atc_setup_irq(atdma, chan_id, 0); +} + + +/** + * atc_chan_is_enabled - test if given channel is enabled + * @atchan: channel we want to test status + */ +static inline int atc_chan_is_enabled(struct at_dma_chan *atchan) +{ + struct at_dma *atdma = to_at_dma(atchan->vc.chan.device); + + return !!(dma_readl(atdma, CHSR) & atchan->mask); +} + +/** + * atc_chan_is_paused - test channel pause/resume status + * @atchan: channel we want to test status + */ +static inline int atc_chan_is_paused(struct at_dma_chan *atchan) +{ + return test_bit(ATC_IS_PAUSED, &atchan->status); +} + +/** + * atc_chan_is_cyclic - test if given channel has cyclic property set + * @atchan: channel we want to test status + */ +static inline int atc_chan_is_cyclic(struct at_dma_chan *atchan) +{ + return test_bit(ATC_IS_CYCLIC, &atchan->status); +} + +/** + * set_lli_eol - set end-of-link to descriptor so it will end transfer + * @desc: descriptor, signle or at the end of a chain, to end chain on + * @i: index of the atmel scatter gather entry that is at the end of the chain. + */ +static void set_lli_eol(struct at_desc *desc, unsigned int i) +{ + u32 ctrlb = desc->sg[i].lli->ctrlb; + + ctrlb &= ~ATC_IEN; + ctrlb |= ATC_SRC_DSCR_DIS | ATC_DST_DSCR_DIS; + + desc->sg[i].lli->ctrlb = ctrlb; + desc->sg[i].lli->dscr = 0; +} + +#define ATC_DEFAULT_CFG FIELD_PREP(ATC_FIFOCFG, ATC_FIFOCFG_HALFFIFO) +#define ATC_DEFAULT_CTRLB (FIELD_PREP(ATC_SIF, AT_DMA_MEM_IF) | \ + FIELD_PREP(ATC_DIF, AT_DMA_MEM_IF)) #define ATC_DMA_BUSWIDTHS\ (BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) |\ BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |\ @@ -74,13 +517,6 @@ struct at_dma_slave { u32 cfg; }; -/* prototypes */ -static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx); -static void atc_issue_pending(struct dma_chan *chan); - - -/*----------------------------------------------------------------------*/ - static inline unsigned int atc_get_xfer_width(dma_addr_t src, dma_addr_t dst, size_t len) { @@ -96,194 +532,72 @@ static inline unsigned int atc_get_xfer_width(dma_addr_t src, dma_addr_t dst, return width; } -static struct at_desc *atc_first_active(struct at_dma_chan *atchan) -{ - return list_first_entry(&atchan->active_list, - struct at_desc, desc_node); -} - -static struct at_desc *atc_first_queued(struct at_dma_chan *atchan) -{ - return list_first_entry(&atchan->queue, - struct at_desc, desc_node); -} - -/** - * atc_alloc_descriptor - allocate and return an initialized descriptor - * @chan: the channel to allocate descriptors for - * @gfp_flags: GFP allocation flags - * - * Note: The ack-bit is positioned in the descriptor flag at creation time - * to make initial allocation more convenient. This bit will be cleared - * and control will be given to client at usage time (during - * preparation functions). - */ -static struct at_desc *atc_alloc_descriptor(struct dma_chan *chan, - gfp_t gfp_flags) -{ - struct at_desc *desc = NULL; - struct at_dma *atdma = to_at_dma(chan->device); - dma_addr_t phys; - - desc = dma_pool_zalloc(atdma->dma_desc_pool, gfp_flags, &phys); - if (desc) { - INIT_LIST_HEAD(&desc->tx_list); - dma_async_tx_descriptor_init(&desc->txd, chan); - /* txd.flags will be overwritten in prep functions */ - desc->txd.flags = DMA_CTRL_ACK; - desc->txd.tx_submit = atc_tx_submit; - desc->txd.phys = phys; - } - - return desc; -} - -/** - * atc_desc_get - get an unused descriptor from free_list - * @atchan: channel we want a new descriptor for - */ -static struct at_desc *atc_desc_get(struct at_dma_chan *atchan) +static void atdma_lli_chain(struct at_desc *desc, unsigned int i) { - struct at_desc *desc, *_desc; - struct at_desc *ret = NULL; - unsigned long flags; - unsigned int i = 0; - - spin_lock_irqsave(&atchan->lock, flags); - list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) { - i++; - if (async_tx_test_ack(&desc->txd)) { - list_del(&desc->desc_node); - ret = desc; - break; - } - dev_dbg(chan2dev(&atchan->chan_common), - "desc %p not ACKed\n", desc); - } - spin_unlock_irqrestore(&atchan->lock, flags); - dev_vdbg(chan2dev(&atchan->chan_common), - "scanned %u descriptors on freelist\n", i); - - /* no more descriptor available in initial pool: create one more */ - if (!ret) - ret = atc_alloc_descriptor(&atchan->chan_common, GFP_NOWAIT); - - return ret; -} + struct atdma_sg *atdma_sg = &desc->sg[i]; -/** - * atc_desc_put - move a descriptor, including any children, to the free list - * @atchan: channel we work on - * @desc: descriptor, at the head of a chain, to move to free list - */ -static void atc_desc_put(struct at_dma_chan *atchan, struct at_desc *desc) -{ - if (desc) { - struct at_desc *child; - unsigned long flags; - - spin_lock_irqsave(&atchan->lock, flags); - list_for_each_entry(child, &desc->tx_list, desc_node) - dev_vdbg(chan2dev(&atchan->chan_common), - "moving child desc %p to freelist\n", - child); - list_splice_init(&desc->tx_list, &atchan->free_list); - dev_vdbg(chan2dev(&atchan->chan_common), - "moving desc %p to freelist\n", desc); - list_add(&desc->desc_node, &atchan->free_list); - spin_unlock_irqrestore(&atchan->lock, flags); - } -} - -/** - * atc_desc_chain - build chain adding a descriptor - * @first: address of first descriptor of the chain - * @prev: address of previous descriptor of the chain - * @desc: descriptor to queue - * - * Called from prep_* functions - */ -static void atc_desc_chain(struct at_desc **first, struct at_desc **prev, - struct at_desc *desc) -{ - if (!(*first)) { - *first = desc; - } else { - /* inform the HW lli about chaining */ - (*prev)->lli.dscr = desc->txd.phys; - /* insert the link descriptor to the LD ring */ - list_add_tail(&desc->desc_node, - &(*first)->tx_list); - } - *prev = desc; + if (i) + desc->sg[i - 1].lli->dscr = atdma_sg->lli_phys; } /** * atc_dostart - starts the DMA engine for real * @atchan: the channel we want to start - * @first: first descriptor in the list we want to begin with - * - * Called with atchan->lock held and bh disabled */ -static void atc_dostart(struct at_dma_chan *atchan, struct at_desc *first) +static void atc_dostart(struct at_dma_chan *atchan) { - struct at_dma *atdma = to_at_dma(atchan->chan_common.device); + struct virt_dma_desc *vd = vchan_next_desc(&atchan->vc); + struct at_desc *desc; - /* ASSERT: channel is idle */ - if (atc_chan_is_enabled(atchan)) { - dev_err(chan2dev(&atchan->chan_common), - "BUG: Attempted to start non-idle channel\n"); - dev_err(chan2dev(&atchan->chan_common), - " channel: s0x%x d0x%x ctrl0x%x:0x%x l0x%x\n", - channel_readl(atchan, SADDR), - channel_readl(atchan, DADDR), - channel_readl(atchan, CTRLA), - channel_readl(atchan, CTRLB), - channel_readl(atchan, DSCR)); - - /* The tasklet will hopefully advance the queue... */ + if (!vd) { + atchan->desc = NULL; return; } vdbg_dump_regs(atchan); + list_del(&vd->node); + atchan->desc = desc = to_atdma_desc(&vd->tx); + channel_writel(atchan, SADDR, 0); channel_writel(atchan, DADDR, 0); channel_writel(atchan, CTRLA, 0); channel_writel(atchan, CTRLB, 0); - channel_writel(atchan, DSCR, first->txd.phys); - channel_writel(atchan, SPIP, ATC_SPIP_HOLE(first->src_hole) | - ATC_SPIP_BOUNDARY(first->boundary)); - channel_writel(atchan, DPIP, ATC_DPIP_HOLE(first->dst_hole) | - ATC_DPIP_BOUNDARY(first->boundary)); + channel_writel(atchan, DSCR, desc->sg[0].lli_phys); + channel_writel(atchan, SPIP, + FIELD_PREP(ATC_SPIP_HOLE, desc->src_hole) | + FIELD_PREP(ATC_SPIP_BOUNDARY, desc->boundary)); + channel_writel(atchan, DPIP, + FIELD_PREP(ATC_DPIP_HOLE, desc->dst_hole) | + FIELD_PREP(ATC_DPIP_BOUNDARY, desc->boundary)); + /* Don't allow CPU to reorder channel enable. */ wmb(); - dma_writel(atdma, CHER, atchan->mask); + dma_writel(atchan->atdma, CHER, atchan->mask); vdbg_dump_regs(atchan); } -/* - * atc_get_desc_by_cookie - get the descriptor of a cookie - * @atchan: the DMA channel - * @cookie: the cookie to get the descriptor for - */ -static struct at_desc *atc_get_desc_by_cookie(struct at_dma_chan *atchan, - dma_cookie_t cookie) +static void atdma_desc_free(struct virt_dma_desc *vd) { - struct at_desc *desc, *_desc; + struct at_dma *atdma = to_at_dma(vd->tx.chan->device); + struct at_desc *desc = to_atdma_desc(&vd->tx); + unsigned int i; - list_for_each_entry_safe(desc, _desc, &atchan->queue, desc_node) { - if (desc->txd.cookie == cookie) - return desc; + for (i = 0; i < desc->sglen; i++) { + if (desc->sg[i].lli) + dma_pool_free(atdma->lli_pool, desc->sg[i].lli, + desc->sg[i].lli_phys); } - list_for_each_entry_safe(desc, _desc, &atchan->active_list, desc_node) { - if (desc->txd.cookie == cookie) - return desc; + /* If the transfer was a memset, free our temporary buffer */ + if (desc->memset_buffer) { + dma_pool_free(atdma->memset_pool, desc->memset_vaddr, + desc->memset_paddr); + desc->memset_buffer = false; } - return NULL; + kfree(desc); } /** @@ -293,10 +607,10 @@ static struct at_desc *atc_get_desc_by_cookie(struct at_dma_chan *atchan, * @current_len: the number of bytes left before reading CTRLA * @ctrla: the value of CTRLA */ -static inline int atc_calc_bytes_left(int current_len, u32 ctrla) +static inline u32 atc_calc_bytes_left(u32 current_len, u32 ctrla) { - u32 btsize = (ctrla & ATC_BTSIZE_MAX); - u32 src_width = ATC_REG_TO_SRC_WIDTH(ctrla); + u32 btsize = FIELD_GET(ATC_BTSIZE, ctrla); + u32 src_width = FIELD_GET(ATC_SRC_WIDTH, ctrla); /* * According to the datasheet, when reading the Control A Register @@ -308,246 +622,153 @@ static inline int atc_calc_bytes_left(int current_len, u32 ctrla) } /** - * atc_get_bytes_left - get the number of bytes residue for a cookie - * @chan: DMA channel - * @cookie: transaction identifier to check status of + * atc_get_llis_residue - Get residue for a hardware linked list transfer + * + * Calculate the residue by removing the length of the Linked List Item (LLI) + * already transferred from the total length. To get the current LLI we can use + * the value of the channel's DSCR register and compare it against the DSCR + * value of each LLI. + * + * The CTRLA register provides us with the amount of data already read from the + * source for the LLI. So we can compute a more accurate residue by also + * removing the number of bytes corresponding to this amount of data. + * + * However, the DSCR and CTRLA registers cannot be read both atomically. Hence a + * race condition may occur: the first read register may refer to one LLI + * whereas the second read may refer to a later LLI in the list because of the + * DMA transfer progression inbetween the two reads. + * + * One solution could have been to pause the DMA transfer, read the DSCR and + * CTRLA then resume the DMA transfer. Nonetheless, this approach presents some + * drawbacks: + * - If the DMA transfer is paused, RX overruns or TX underruns are more likey + * to occur depending on the system latency. Taking the USART driver as an + * example, it uses a cyclic DMA transfer to read data from the Receive + * Holding Register (RHR) to avoid RX overruns since the RHR is not protected + * by any FIFO on most Atmel SoCs. So pausing the DMA transfer to compute the + * residue would break the USART driver design. + * - The atc_pause() function masks interrupts but we'd rather avoid to do so + * for system latency purpose. + * + * Then we'd rather use another solution: the DSCR is read a first time, the + * CTRLA is read in turn, next the DSCR is read a second time. If the two + * consecutive read values of the DSCR are the same then we assume both refers + * to the very same LLI as well as the CTRLA value read inbetween does. For + * cyclic tranfers, the assumption is that a full loop is "not so fast". If the + * two DSCR values are different, we read again the CTRLA then the DSCR till two + * consecutive read values from DSCR are equal or till the maximum trials is + * reach. This algorithm is very unlikely not to find a stable value for DSCR. + * @atchan: pointer to an atmel hdmac channel. + * @desc: pointer to the descriptor for which the residue is calculated. + * @residue: residue to be set to dma_tx_state. + * Returns 0 on success, -errno otherwise. */ -static int atc_get_bytes_left(struct dma_chan *chan, dma_cookie_t cookie) +static int atc_get_llis_residue(struct at_dma_chan *atchan, + struct at_desc *desc, u32 *residue) { - struct at_dma_chan *atchan = to_at_dma_chan(chan); - struct at_desc *desc_first = atc_first_active(atchan); - struct at_desc *desc; - int ret; - u32 ctrla, dscr; + u32 len, ctrla, dscr; unsigned int i; - /* - * If the cookie doesn't match to the currently running transfer then - * we can return the total length of the associated DMA transfer, - * because it is still queued. - */ - desc = atc_get_desc_by_cookie(atchan, cookie); - if (desc == NULL) - return -EINVAL; - else if (desc != desc_first) - return desc->total_len; + len = desc->total_len; + dscr = channel_readl(atchan, DSCR); + rmb(); /* ensure DSCR is read before CTRLA */ + ctrla = channel_readl(atchan, CTRLA); + for (i = 0; i < ATC_MAX_DSCR_TRIALS; ++i) { + u32 new_dscr; - /* cookie matches to the currently running transfer */ - ret = desc_first->total_len; - - if (desc_first->lli.dscr) { - /* hardware linked list transfer */ + rmb(); /* ensure DSCR is read after CTRLA */ + new_dscr = channel_readl(atchan, DSCR); /* - * Calculate the residue by removing the length of the child - * descriptors already transferred from the total length. - * To get the current child descriptor we can use the value of - * the channel's DSCR register and compare it against the value - * of the hardware linked list structure of each child - * descriptor. - * - * The CTRLA register provides us with the amount of data - * already read from the source for the current child - * descriptor. So we can compute a more accurate residue by also - * removing the number of bytes corresponding to this amount of - * data. - * - * However, the DSCR and CTRLA registers cannot be read both - * atomically. Hence a race condition may occur: the first read - * register may refer to one child descriptor whereas the second - * read may refer to a later child descriptor in the list - * because of the DMA transfer progression inbetween the two - * reads. - * - * One solution could have been to pause the DMA transfer, read - * the DSCR and CTRLA then resume the DMA transfer. Nonetheless, - * this approach presents some drawbacks: - * - If the DMA transfer is paused, RX overruns or TX underruns - * are more likey to occur depending on the system latency. - * Taking the USART driver as an example, it uses a cyclic DMA - * transfer to read data from the Receive Holding Register - * (RHR) to avoid RX overruns since the RHR is not protected - * by any FIFO on most Atmel SoCs. So pausing the DMA transfer - * to compute the residue would break the USART driver design. - * - The atc_pause() function masks interrupts but we'd rather - * avoid to do so for system latency purpose. - * - * Then we'd rather use another solution: the DSCR is read a - * first time, the CTRLA is read in turn, next the DSCR is read - * a second time. If the two consecutive read values of the DSCR - * are the same then we assume both refers to the very same - * child descriptor as well as the CTRLA value read inbetween - * does. For cyclic tranfers, the assumption is that a full loop - * is "not so fast". - * If the two DSCR values are different, we read again the CTRLA - * then the DSCR till two consecutive read values from DSCR are - * equal or till the maxium trials is reach. - * This algorithm is very unlikely not to find a stable value for - * DSCR. + * If the DSCR register value has not changed inside the DMA + * controller since the previous read, we assume that both the + * dscr and ctrla values refers to the very same descriptor. */ - - dscr = channel_readl(atchan, DSCR); - rmb(); /* ensure DSCR is read before CTRLA */ - ctrla = channel_readl(atchan, CTRLA); - for (i = 0; i < ATC_MAX_DSCR_TRIALS; ++i) { - u32 new_dscr; - - rmb(); /* ensure DSCR is read after CTRLA */ - new_dscr = channel_readl(atchan, DSCR); - - /* - * If the DSCR register value has not changed inside the - * DMA controller since the previous read, we assume - * that both the dscr and ctrla values refers to the - * very same descriptor. - */ - if (likely(new_dscr == dscr)) - break; - - /* - * DSCR has changed inside the DMA controller, so the - * previouly read value of CTRLA may refer to an already - * processed descriptor hence could be outdated. - * We need to update ctrla to match the current - * descriptor. - */ - dscr = new_dscr; - rmb(); /* ensure DSCR is read before CTRLA */ - ctrla = channel_readl(atchan, CTRLA); - } - if (unlikely(i == ATC_MAX_DSCR_TRIALS)) - return -ETIMEDOUT; - - /* for the first descriptor we can be more accurate */ - if (desc_first->lli.dscr == dscr) - return atc_calc_bytes_left(ret, ctrla); - - ret -= desc_first->len; - list_for_each_entry(desc, &desc_first->tx_list, desc_node) { - if (desc->lli.dscr == dscr) - break; - - ret -= desc->len; - } + if (likely(new_dscr == dscr)) + break; /* - * For the current descriptor in the chain we can calculate - * the remaining bytes using the channel's register. + * DSCR has changed inside the DMA controller, so the previouly + * read value of CTRLA may refer to an already processed + * descriptor hence could be outdated. We need to update ctrla + * to match the current descriptor. */ - ret = atc_calc_bytes_left(ret, ctrla); - } else { - /* single transfer */ + dscr = new_dscr; + rmb(); /* ensure DSCR is read before CTRLA */ ctrla = channel_readl(atchan, CTRLA); - ret = atc_calc_bytes_left(ret, ctrla); } + if (unlikely(i == ATC_MAX_DSCR_TRIALS)) + return -ETIMEDOUT; - return ret; -} - -/** - * atc_chain_complete - finish work for one transaction chain - * @atchan: channel we work on - * @desc: descriptor at the head of the chain we want do complete - */ -static void -atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc) -{ - struct dma_async_tx_descriptor *txd = &desc->txd; - struct at_dma *atdma = to_at_dma(atchan->chan_common.device); - unsigned long flags; - - dev_vdbg(chan2dev(&atchan->chan_common), - "descriptor %u complete\n", txd->cookie); - - spin_lock_irqsave(&atchan->lock, flags); - - /* mark the descriptor as complete for non cyclic cases only */ - if (!atc_chan_is_cyclic(atchan)) - dma_cookie_complete(txd); - - spin_unlock_irqrestore(&atchan->lock, flags); - - dma_descriptor_unmap(txd); - /* for cyclic transfers, - * no need to replay callback function while stopping */ - if (!atc_chan_is_cyclic(atchan)) - dmaengine_desc_get_callback_invoke(txd, NULL); + /* For the first descriptor we can be more accurate. */ + if (desc->sg[0].lli->dscr == dscr) { + *residue = atc_calc_bytes_left(len, ctrla); + return 0; + } + len -= desc->sg[0].len; - dma_run_dependencies(txd); + for (i = 1; i < desc->sglen; i++) { + if (desc->sg[i].lli && desc->sg[i].lli->dscr == dscr) + break; + len -= desc->sg[i].len; + } - spin_lock_irqsave(&atchan->lock, flags); - /* move children to free_list */ - list_splice_init(&desc->tx_list, &atchan->free_list); - /* add myself to free_list */ - list_add(&desc->desc_node, &atchan->free_list); - spin_unlock_irqrestore(&atchan->lock, flags); + /* + * For the current LLI in the chain we can calculate the remaining bytes + * using the channel's CTRLA register. + */ + *residue = atc_calc_bytes_left(len, ctrla); + return 0; - /* If the transfer was a memset, free our temporary buffer */ - if (desc->memset_buffer) { - dma_pool_free(atdma->memset_pool, desc->memset_vaddr, - desc->memset_paddr); - desc->memset_buffer = false; - } } /** - * atc_advance_work - at the end of a transaction, move forward - * @atchan: channel where the transaction ended + * atc_get_residue - get the number of bytes residue for a cookie. + * The residue is passed by address and updated on success. + * @chan: DMA channel + * @cookie: transaction identifier to check status of + * @residue: residue to be updated. + * Return 0 on success, -errono otherwise. */ -static void atc_advance_work(struct at_dma_chan *atchan) +static int atc_get_residue(struct dma_chan *chan, dma_cookie_t cookie, + u32 *residue) { - struct at_desc *desc; - unsigned long flags; + struct at_dma_chan *atchan = to_at_dma_chan(chan); + struct virt_dma_desc *vd; + struct at_desc *desc = NULL; + u32 len, ctrla; - dev_vdbg(chan2dev(&atchan->chan_common), "advance_work\n"); + vd = vchan_find_desc(&atchan->vc, cookie); + if (vd) + desc = to_atdma_desc(&vd->tx); + else if (atchan->desc && atchan->desc->vd.tx.cookie == cookie) + desc = atchan->desc; - spin_lock_irqsave(&atchan->lock, flags); - if (atc_chan_is_enabled(atchan) || list_empty(&atchan->active_list)) - return spin_unlock_irqrestore(&atchan->lock, flags); + if (!desc) + return -EINVAL; - desc = atc_first_active(atchan); - /* Remove the transfer node from the active list. */ - list_del_init(&desc->desc_node); - spin_unlock_irqrestore(&atchan->lock, flags); - atc_chain_complete(atchan, desc); + if (desc->sg[0].lli->dscr) + /* hardware linked list transfer */ + return atc_get_llis_residue(atchan, desc, residue); - /* advance work */ - spin_lock_irqsave(&atchan->lock, flags); - if (!list_empty(&atchan->active_list)) { - desc = atc_first_queued(atchan); - list_move_tail(&desc->desc_node, &atchan->active_list); - atc_dostart(atchan, desc); - } - spin_unlock_irqrestore(&atchan->lock, flags); + /* single transfer */ + len = desc->total_len; + ctrla = channel_readl(atchan, CTRLA); + *residue = atc_calc_bytes_left(len, ctrla); + return 0; } - /** * atc_handle_error - handle errors reported by DMA controller - * @atchan: channel where error occurs + * @atchan: channel where error occurs. + * @i: channel index */ -static void atc_handle_error(struct at_dma_chan *atchan) +static void atc_handle_error(struct at_dma_chan *atchan, unsigned int i) { - struct at_desc *bad_desc; - struct at_desc *desc; - struct at_desc *child; - unsigned long flags; + struct at_desc *desc = atchan->desc; - spin_lock_irqsave(&atchan->lock, flags); - /* - * The descriptor currently at the head of the active list is - * broked. Since we don't have any way to report errors, we'll - * just have to scream loudly and try to carry on. - */ - bad_desc = atc_first_active(atchan); - list_del_init(&bad_desc->desc_node); - - /* Try to restart the controller */ - if (!list_empty(&atchan->active_list)) { - desc = atc_first_queued(atchan); - list_move_tail(&desc->desc_node, &atchan->active_list); - atc_dostart(atchan, desc); - } + /* Disable channel on AHB error */ + dma_writel(atchan->atdma, CHDR, AT_DMA_RES(i) | atchan->mask); /* * KERN_CRITICAL may seem harsh, but since this only happens @@ -556,54 +777,42 @@ static void atc_handle_error(struct at_dma_chan *atchan) * controller flagged an error instead of scribbling over * random memory locations. */ - dev_crit(chan2dev(&atchan->chan_common), - "Bad descriptor submitted for DMA!\n"); - dev_crit(chan2dev(&atchan->chan_common), - " cookie: %d\n", bad_desc->txd.cookie); - atc_dump_lli(atchan, &bad_desc->lli); - list_for_each_entry(child, &bad_desc->tx_list, desc_node) - atc_dump_lli(atchan, &child->lli); - - spin_unlock_irqrestore(&atchan->lock, flags); - - /* Pretend the descriptor completed successfully */ - atc_chain_complete(atchan, bad_desc); + dev_crit(chan2dev(&atchan->vc.chan), "Bad descriptor submitted for DMA!\n"); + dev_crit(chan2dev(&atchan->vc.chan), "cookie: %d\n", + desc->vd.tx.cookie); + for (i = 0; i < desc->sglen; i++) + atc_dump_lli(atchan, desc->sg[i].lli); } -/** - * atc_handle_cyclic - at the end of a period, run callback function - * @atchan: channel used for cyclic operations - */ -static void atc_handle_cyclic(struct at_dma_chan *atchan) +static void atdma_handle_chan_done(struct at_dma_chan *atchan, u32 pending, + unsigned int i) { - struct at_desc *first = atc_first_active(atchan); - struct dma_async_tx_descriptor *txd = &first->txd; - - dev_vdbg(chan2dev(&atchan->chan_common), - "new cyclic period llp 0x%08x\n", - channel_readl(atchan, DSCR)); - - dmaengine_desc_get_callback_invoke(txd, NULL); -} - -/*-- IRQ & Tasklet ---------------------------------------------------*/ - -static void atc_tasklet(struct tasklet_struct *t) -{ - struct at_dma_chan *atchan = from_tasklet(atchan, t, tasklet); + struct at_desc *desc; - if (test_and_clear_bit(ATC_IS_ERROR, &atchan->status)) - return atc_handle_error(atchan); + spin_lock(&atchan->vc.lock); + desc = atchan->desc; - if (atc_chan_is_cyclic(atchan)) - return atc_handle_cyclic(atchan); + if (desc) { + if (pending & AT_DMA_ERR(i)) { + atc_handle_error(atchan, i); + /* Pretend the descriptor completed successfully */ + } - atc_advance_work(atchan); + if (atc_chan_is_cyclic(atchan)) { + vchan_cyclic_callback(&desc->vd); + } else { + vchan_cookie_complete(&desc->vd); + atchan->desc = NULL; + if (!(atc_chan_is_enabled(atchan))) + atc_dostart(atchan); + } + } + spin_unlock(&atchan->vc.lock); } static irqreturn_t at_dma_interrupt(int irq, void *dev_id) { - struct at_dma *atdma = (struct at_dma *)dev_id; + struct at_dma *atdma = dev_id; struct at_dma_chan *atchan; int i; u32 status, pending, imr; @@ -617,23 +826,16 @@ static irqreturn_t at_dma_interrupt(int irq, void *dev_id) if (!pending) break; - dev_vdbg(atdma->dma_common.dev, + dev_vdbg(atdma->dma_device.dev, "interrupt: status = 0x%08x, 0x%08x, 0x%08x\n", status, imr, pending); - for (i = 0; i < atdma->dma_common.chancnt; i++) { + for (i = 0; i < atdma->dma_device.chancnt; i++) { atchan = &atdma->chan[i]; - if (pending & (AT_DMA_BTC(i) | AT_DMA_ERR(i))) { - if (pending & AT_DMA_ERR(i)) { - /* Disable channel on AHB error */ - dma_writel(atdma, CHDR, - AT_DMA_RES(i) | atchan->mask); - /* Give information to tasklet */ - set_bit(ATC_IS_ERROR, &atchan->status); - } - tasklet_schedule(&atchan->tasklet); - ret = IRQ_HANDLED; - } + if (!(pending & (AT_DMA_BTC(i) | AT_DMA_ERR(i)))) + continue; + atdma_handle_chan_done(atchan, pending, i); + ret = IRQ_HANDLED; } } while (pending); @@ -641,35 +843,7 @@ static irqreturn_t at_dma_interrupt(int irq, void *dev_id) return ret; } - /*-- DMA Engine API --------------------------------------------------*/ - -/** - * atc_tx_submit - set the prepared descriptor(s) to be executed by the engine - * @tx: descriptor at the head of the transaction chain - * - * Queue chain if DMA engine is working already - * - * Cookie increment and adding to active_list or queue must be atomic - */ -static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx) -{ - struct at_desc *desc = txd_to_at_desc(tx); - struct at_dma_chan *atchan = to_at_dma_chan(tx->chan); - dma_cookie_t cookie; - unsigned long flags; - - spin_lock_irqsave(&atchan->lock, flags); - cookie = dma_cookie_assign(tx); - - list_add_tail(&desc->desc_node, &atchan->queue); - spin_unlock_irqrestore(&atchan->lock, flags); - - dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n", - desc->txd.cookie); - return cookie; -} - /** * atc_prep_dma_interleaved - prepare memory to memory interleaved operation * @chan: the channel to prepare operation on @@ -681,9 +855,12 @@ atc_prep_dma_interleaved(struct dma_chan *chan, struct dma_interleaved_template *xt, unsigned long flags) { + struct at_dma *atdma = to_at_dma(chan->device); struct at_dma_chan *atchan = to_at_dma_chan(chan); struct data_chunk *first; - struct at_desc *desc = NULL; + struct atdma_sg *atdma_sg; + struct at_desc *desc; + struct at_lli *lli; size_t xfer_count; unsigned int dwidth; u32 ctrla; @@ -722,8 +899,7 @@ atc_prep_dma_interleaved(struct dma_chan *chan, len += chunk->size; } - dwidth = atc_get_xfer_width(xt->src_start, - xt->dst_start, len); + dwidth = atc_get_xfer_width(xt->src_start, xt->dst_start, len); xfer_count = len >> dwidth; if (xfer_count > ATC_BTSIZE_MAX) { @@ -731,42 +907,43 @@ atc_prep_dma_interleaved(struct dma_chan *chan, return NULL; } - ctrla = ATC_SRC_WIDTH(dwidth) | - ATC_DST_WIDTH(dwidth); - - ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN - | ATC_SRC_ADDR_MODE_INCR - | ATC_DST_ADDR_MODE_INCR - | ATC_SRC_PIP - | ATC_DST_PIP - | ATC_FC_MEM2MEM; - - /* create the transfer */ - desc = atc_desc_get(atchan); - if (!desc) { - dev_err(chan2dev(chan), - "%s: couldn't allocate our descriptor\n", __func__); + ctrla = FIELD_PREP(ATC_SRC_WIDTH, dwidth) | + FIELD_PREP(ATC_DST_WIDTH, dwidth); + + ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN | + FIELD_PREP(ATC_SRC_ADDR_MODE, ATC_SRC_ADDR_MODE_INCR) | + FIELD_PREP(ATC_DST_ADDR_MODE, ATC_DST_ADDR_MODE_INCR) | + ATC_SRC_PIP | ATC_DST_PIP | + FIELD_PREP(ATC_FC, ATC_FC_MEM2MEM); + + desc = kzalloc(struct_size(desc, sg, 1), GFP_ATOMIC); + if (!desc) + return NULL; + desc->sglen = 1; + + atdma_sg = desc->sg; + atdma_sg->lli = dma_pool_alloc(atdma->lli_pool, GFP_NOWAIT, + &atdma_sg->lli_phys); + if (!atdma_sg->lli) { + kfree(desc); return NULL; } + lli = atdma_sg->lli; - desc->lli.saddr = xt->src_start; - desc->lli.daddr = xt->dst_start; - desc->lli.ctrla = ctrla | xfer_count; - desc->lli.ctrlb = ctrlb; + lli->saddr = xt->src_start; + lli->daddr = xt->dst_start; + lli->ctrla = ctrla | xfer_count; + lli->ctrlb = ctrlb; desc->boundary = first->size >> dwidth; desc->dst_hole = (dmaengine_get_dst_icg(xt, first) >> dwidth) + 1; desc->src_hole = (dmaengine_get_src_icg(xt, first) >> dwidth) + 1; - desc->txd.cookie = -EBUSY; - desc->total_len = desc->len = len; - - /* set end-of-link to the last link descriptor of list*/ - set_desc_eol(desc); - - desc->txd.flags = flags; /* client is in control of this ack */ + atdma_sg->len = len; + desc->total_len = len; - return &desc->txd; + set_lli_eol(desc, 0); + return vchan_tx_prep(&atchan->vc, &desc->vd, flags); } /** @@ -781,29 +958,36 @@ static struct dma_async_tx_descriptor * atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, size_t len, unsigned long flags) { + struct at_dma *atdma = to_at_dma(chan->device); struct at_dma_chan *atchan = to_at_dma_chan(chan); struct at_desc *desc = NULL; - struct at_desc *first = NULL; - struct at_desc *prev = NULL; size_t xfer_count; size_t offset; + size_t sg_len; unsigned int src_width; unsigned int dst_width; + unsigned int i; u32 ctrla; u32 ctrlb; - dev_vdbg(chan2dev(chan), "prep_dma_memcpy: d%pad s%pad l0x%zx f0x%lx\n", - &dest, &src, len, flags); + dev_dbg(chan2dev(chan), "prep_dma_memcpy: d%pad s%pad l0x%zx f0x%lx\n", + &dest, &src, len, flags); if (unlikely(!len)) { - dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n"); + dev_err(chan2dev(chan), "prep_dma_memcpy: length is zero!\n"); return NULL; } - ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN - | ATC_SRC_ADDR_MODE_INCR - | ATC_DST_ADDR_MODE_INCR - | ATC_FC_MEM2MEM; + sg_len = DIV_ROUND_UP(len, ATC_BTSIZE_MAX); + desc = kzalloc(struct_size(desc, sg, sg_len), GFP_ATOMIC); + if (!desc) + return NULL; + desc->sglen = sg_len; + + ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN | + FIELD_PREP(ATC_SRC_ADDR_MODE, ATC_SRC_ADDR_MODE_INCR) | + FIELD_PREP(ATC_DST_ADDR_MODE, ATC_DST_ADDR_MODE_INCR) | + FIELD_PREP(ATC_FC, ATC_FC_MEM2MEM); /* * We can be a lot more clever here, but this should take care @@ -811,82 +995,78 @@ atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, */ src_width = dst_width = atc_get_xfer_width(src, dest, len); - ctrla = ATC_SRC_WIDTH(src_width) | - ATC_DST_WIDTH(dst_width); + ctrla = FIELD_PREP(ATC_SRC_WIDTH, src_width) | + FIELD_PREP(ATC_DST_WIDTH, dst_width); - for (offset = 0; offset < len; offset += xfer_count << src_width) { - xfer_count = min_t(size_t, (len - offset) >> src_width, - ATC_BTSIZE_MAX); + for (offset = 0, i = 0; offset < len; + offset += xfer_count << src_width, i++) { + struct atdma_sg *atdma_sg = &desc->sg[i]; + struct at_lli *lli; - desc = atc_desc_get(atchan); - if (!desc) + atdma_sg->lli = dma_pool_alloc(atdma->lli_pool, GFP_NOWAIT, + &atdma_sg->lli_phys); + if (!atdma_sg->lli) goto err_desc_get; + lli = atdma_sg->lli; + + xfer_count = min_t(size_t, (len - offset) >> src_width, + ATC_BTSIZE_MAX); - desc->lli.saddr = src + offset; - desc->lli.daddr = dest + offset; - desc->lli.ctrla = ctrla | xfer_count; - desc->lli.ctrlb = ctrlb; + lli->saddr = src + offset; + lli->daddr = dest + offset; + lli->ctrla = ctrla | xfer_count; + lli->ctrlb = ctrlb; - desc->txd.cookie = 0; - desc->len = xfer_count << src_width; + desc->sg[i].len = xfer_count << src_width; - atc_desc_chain(&first, &prev, desc); + atdma_lli_chain(desc, i); } - /* First descriptor of the chain embedds additional information */ - first->txd.cookie = -EBUSY; - first->total_len = len; + desc->total_len = len; /* set end-of-link to the last link descriptor of list*/ - set_desc_eol(desc); - - first->txd.flags = flags; /* client is in control of this ack */ + set_lli_eol(desc, i - 1); - return &first->txd; + return vchan_tx_prep(&atchan->vc, &desc->vd, flags); err_desc_get: - atc_desc_put(atchan, first); + atdma_desc_free(&desc->vd); return NULL; } -static struct at_desc *atc_create_memset_desc(struct dma_chan *chan, - dma_addr_t psrc, - dma_addr_t pdst, - size_t len) +static int atdma_create_memset_lli(struct dma_chan *chan, + struct atdma_sg *atdma_sg, + dma_addr_t psrc, dma_addr_t pdst, size_t len) { - struct at_dma_chan *atchan = to_at_dma_chan(chan); - struct at_desc *desc; + struct at_dma *atdma = to_at_dma(chan->device); + struct at_lli *lli; size_t xfer_count; - - u32 ctrla = ATC_SRC_WIDTH(2) | ATC_DST_WIDTH(2); + u32 ctrla = FIELD_PREP(ATC_SRC_WIDTH, 2) | FIELD_PREP(ATC_DST_WIDTH, 2); u32 ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN | - ATC_SRC_ADDR_MODE_FIXED | - ATC_DST_ADDR_MODE_INCR | - ATC_FC_MEM2MEM; + FIELD_PREP(ATC_SRC_ADDR_MODE, ATC_SRC_ADDR_MODE_FIXED) | + FIELD_PREP(ATC_DST_ADDR_MODE, ATC_DST_ADDR_MODE_INCR) | + FIELD_PREP(ATC_FC, ATC_FC_MEM2MEM); xfer_count = len >> 2; if (xfer_count > ATC_BTSIZE_MAX) { - dev_err(chan2dev(chan), "%s: buffer is too big\n", - __func__); - return NULL; + dev_err(chan2dev(chan), "%s: buffer is too big\n", __func__); + return -EINVAL; } - desc = atc_desc_get(atchan); - if (!desc) { - dev_err(chan2dev(chan), "%s: can't get a descriptor\n", - __func__); - return NULL; - } + atdma_sg->lli = dma_pool_alloc(atdma->lli_pool, GFP_NOWAIT, + &atdma_sg->lli_phys); + if (!atdma_sg->lli) + return -ENOMEM; + lli = atdma_sg->lli; - desc->lli.saddr = psrc; - desc->lli.daddr = pdst; - desc->lli.ctrla = ctrla | xfer_count; - desc->lli.ctrlb = ctrlb; + lli->saddr = psrc; + lli->daddr = pdst; + lli->ctrla = ctrla | xfer_count; + lli->ctrlb = ctrlb; - desc->txd.cookie = 0; - desc->len = len; + atdma_sg->len = len; - return desc; + return 0; } /** @@ -901,11 +1081,13 @@ static struct dma_async_tx_descriptor * atc_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value, size_t len, unsigned long flags) { + struct at_dma_chan *atchan = to_at_dma_chan(chan); struct at_dma *atdma = to_at_dma(chan->device); struct at_desc *desc; void __iomem *vaddr; dma_addr_t paddr; char fill_pattern; + int ret; dev_vdbg(chan2dev(chan), "%s: d%pad v0x%x l0x%zx f0x%lx\n", __func__, &dest, value, len, flags); @@ -936,27 +1118,28 @@ atc_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value, (fill_pattern << 8) | fill_pattern; - desc = atc_create_memset_desc(chan, paddr, dest, len); - if (!desc) { - dev_err(chan2dev(chan), "%s: couldn't get a descriptor\n", - __func__); + desc = kzalloc(struct_size(desc, sg, 1), GFP_ATOMIC); + if (!desc) goto err_free_buffer; - } + desc->sglen = 1; + + ret = atdma_create_memset_lli(chan, desc->sg, paddr, dest, len); + if (ret) + goto err_free_desc; desc->memset_paddr = paddr; desc->memset_vaddr = vaddr; desc->memset_buffer = true; - desc->txd.cookie = -EBUSY; desc->total_len = len; /* set end-of-link on the descriptor */ - set_desc_eol(desc); - - desc->txd.flags = flags; + set_lli_eol(desc, 0); - return &desc->txd; + return vchan_tx_prep(&atchan->vc, &desc->vd, flags); +err_free_desc: + kfree(desc); err_free_buffer: dma_pool_free(atdma->memset_pool, vaddr, paddr); return NULL; @@ -970,12 +1153,13 @@ atc_prep_dma_memset_sg(struct dma_chan *chan, { struct at_dma_chan *atchan = to_at_dma_chan(chan); struct at_dma *atdma = to_at_dma(chan->device); - struct at_desc *desc = NULL, *first = NULL, *prev = NULL; + struct at_desc *desc; struct scatterlist *sg; void __iomem *vaddr; dma_addr_t paddr; size_t total_len = 0; int i; + int ret; dev_vdbg(chan2dev(chan), "%s: v0x%x l0x%zx f0x%lx\n", __func__, value, sg_len, flags); @@ -994,6 +1178,11 @@ atc_prep_dma_memset_sg(struct dma_chan *chan, } *(u32*)vaddr = value; + desc = kzalloc(struct_size(desc, sg, sg_len), GFP_ATOMIC); + if (!desc) + goto err_free_dma_buf; + desc->sglen = sg_len; + for_each_sg(sgl, sg, sg_len, i) { dma_addr_t dest = sg_dma_address(sg); size_t len = sg_dma_len(sg); @@ -1004,38 +1193,33 @@ atc_prep_dma_memset_sg(struct dma_chan *chan, if (!is_dma_fill_aligned(chan->device, dest, 0, len)) { dev_err(chan2dev(chan), "%s: buffer is not aligned\n", __func__); - goto err_put_desc; + goto err_free_desc; } - desc = atc_create_memset_desc(chan, paddr, dest, len); - if (!desc) - goto err_put_desc; - - atc_desc_chain(&first, &prev, desc); + ret = atdma_create_memset_lli(chan, &desc->sg[i], paddr, dest, + len); + if (ret) + goto err_free_desc; + atdma_lli_chain(desc, i); total_len += len; } - /* - * Only set the buffer pointers on the last descriptor to - * avoid free'ing while we have our transfer still going - */ desc->memset_paddr = paddr; desc->memset_vaddr = vaddr; desc->memset_buffer = true; - first->txd.cookie = -EBUSY; - first->total_len = total_len; + desc->total_len = total_len; /* set end-of-link on the descriptor */ - set_desc_eol(desc); - - first->txd.flags = flags; + set_lli_eol(desc, i - 1); - return &first->txd; + return vchan_tx_prep(&atchan->vc, &desc->vd, flags); -err_put_desc: - atc_desc_put(atchan, first); +err_free_desc: + atdma_desc_free(&desc->vd); +err_free_dma_buf: + dma_pool_free(atdma->memset_pool, vaddr, paddr); return NULL; } @@ -1053,11 +1237,11 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len, enum dma_transfer_direction direction, unsigned long flags, void *context) { + struct at_dma *atdma = to_at_dma(chan->device); struct at_dma_chan *atchan = to_at_dma_chan(chan); struct at_dma_slave *atslave = chan->private; struct dma_slave_config *sconfig = &atchan->dma_sconfig; - struct at_desc *first = NULL; - struct at_desc *prev = NULL; + struct at_desc *desc; u32 ctrla; u32 ctrlb; dma_addr_t reg; @@ -1077,27 +1261,38 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, return NULL; } - ctrla = ATC_SCSIZE(sconfig->src_maxburst) - | ATC_DCSIZE(sconfig->dst_maxburst); + desc = kzalloc(struct_size(desc, sg, sg_len), GFP_ATOMIC); + if (!desc) + return NULL; + desc->sglen = sg_len; + + ctrla = FIELD_PREP(ATC_SCSIZE, sconfig->src_maxburst) | + FIELD_PREP(ATC_DCSIZE, sconfig->dst_maxburst); ctrlb = ATC_IEN; switch (direction) { case DMA_MEM_TO_DEV: reg_width = convert_buswidth(sconfig->dst_addr_width); - ctrla |= ATC_DST_WIDTH(reg_width); - ctrlb |= ATC_DST_ADDR_MODE_FIXED - | ATC_SRC_ADDR_MODE_INCR - | ATC_FC_MEM2PER - | ATC_SIF(atchan->mem_if) | ATC_DIF(atchan->per_if); + ctrla |= FIELD_PREP(ATC_DST_WIDTH, reg_width); + ctrlb |= FIELD_PREP(ATC_DST_ADDR_MODE, + ATC_DST_ADDR_MODE_FIXED) | + FIELD_PREP(ATC_SRC_ADDR_MODE, ATC_SRC_ADDR_MODE_INCR) | + FIELD_PREP(ATC_FC, ATC_FC_MEM2PER) | + FIELD_PREP(ATC_SIF, atchan->mem_if) | + FIELD_PREP(ATC_DIF, atchan->per_if); reg = sconfig->dst_addr; for_each_sg(sgl, sg, sg_len, i) { - struct at_desc *desc; + struct atdma_sg *atdma_sg = &desc->sg[i]; + struct at_lli *lli; u32 len; u32 mem; - desc = atc_desc_get(atchan); - if (!desc) + atdma_sg->lli = dma_pool_alloc(atdma->lli_pool, + GFP_NOWAIT, + &atdma_sg->lli_phys); + if (!atdma_sg->lli) goto err_desc_get; + lli = atdma_sg->lli; mem = sg_dma_address(sg); len = sg_dma_len(sg); @@ -1110,35 +1305,43 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, if (unlikely(mem & 3 || len & 3)) mem_width = 0; - desc->lli.saddr = mem; - desc->lli.daddr = reg; - desc->lli.ctrla = ctrla - | ATC_SRC_WIDTH(mem_width) - | len >> mem_width; - desc->lli.ctrlb = ctrlb; - desc->len = len; + lli->saddr = mem; + lli->daddr = reg; + lli->ctrla = ctrla | + FIELD_PREP(ATC_SRC_WIDTH, mem_width) | + len >> mem_width; + lli->ctrlb = ctrlb; - atc_desc_chain(&first, &prev, desc); + atdma_sg->len = len; total_len += len; + + desc->sg[i].len = len; + atdma_lli_chain(desc, i); } break; case DMA_DEV_TO_MEM: reg_width = convert_buswidth(sconfig->src_addr_width); - ctrla |= ATC_SRC_WIDTH(reg_width); - ctrlb |= ATC_DST_ADDR_MODE_INCR - | ATC_SRC_ADDR_MODE_FIXED - | ATC_FC_PER2MEM - | ATC_SIF(atchan->per_if) | ATC_DIF(atchan->mem_if); + ctrla |= FIELD_PREP(ATC_SRC_WIDTH, reg_width); + ctrlb |= FIELD_PREP(ATC_DST_ADDR_MODE, ATC_DST_ADDR_MODE_INCR) | + FIELD_PREP(ATC_SRC_ADDR_MODE, + ATC_SRC_ADDR_MODE_FIXED) | + FIELD_PREP(ATC_FC, ATC_FC_PER2MEM) | + FIELD_PREP(ATC_SIF, atchan->per_if) | + FIELD_PREP(ATC_DIF, atchan->mem_if); reg = sconfig->src_addr; for_each_sg(sgl, sg, sg_len, i) { - struct at_desc *desc; + struct atdma_sg *atdma_sg = &desc->sg[i]; + struct at_lli *lli; u32 len; u32 mem; - desc = atc_desc_get(atchan); - if (!desc) + atdma_sg->lli = dma_pool_alloc(atdma->lli_pool, + GFP_NOWAIT, + &atdma_sg->lli_phys); + if (!atdma_sg->lli) goto err_desc_get; + lli = atdma_sg->lli; mem = sg_dma_address(sg); len = sg_dma_len(sg); @@ -1151,16 +1354,17 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, if (unlikely(mem & 3 || len & 3)) mem_width = 0; - desc->lli.saddr = reg; - desc->lli.daddr = mem; - desc->lli.ctrla = ctrla - | ATC_DST_WIDTH(mem_width) - | len >> reg_width; - desc->lli.ctrlb = ctrlb; - desc->len = len; + lli->saddr = reg; + lli->daddr = mem; + lli->ctrla = ctrla | + FIELD_PREP(ATC_DST_WIDTH, mem_width) | + len >> reg_width; + lli->ctrlb = ctrlb; - atc_desc_chain(&first, &prev, desc); + desc->sg[i].len = len; total_len += len; + + atdma_lli_chain(desc, i); } break; default: @@ -1168,21 +1372,16 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, } /* set end-of-link to the last link descriptor of list*/ - set_desc_eol(prev); + set_lli_eol(desc, i - 1); - /* First descriptor of the chain embedds additional information */ - first->txd.cookie = -EBUSY; - first->total_len = total_len; + desc->total_len = total_len; - /* first link descriptor of list is responsible of flags */ - first->txd.flags = flags; /* client is in control of this ack */ - - return &first->txd; + return vchan_tx_prep(&atchan->vc, &desc->vd, flags); err_desc_get: dev_err(chan2dev(chan), "not enough descriptors available\n"); err: - atc_desc_put(atchan, first); + atdma_desc_free(&desc->vd); return NULL; } @@ -1212,50 +1411,59 @@ err_out: */ static int atc_dma_cyclic_fill_desc(struct dma_chan *chan, struct at_desc *desc, - unsigned int period_index, dma_addr_t buf_addr, + unsigned int i, dma_addr_t buf_addr, unsigned int reg_width, size_t period_len, enum dma_transfer_direction direction) { + struct at_dma *atdma = to_at_dma(chan->device); struct at_dma_chan *atchan = to_at_dma_chan(chan); struct dma_slave_config *sconfig = &atchan->dma_sconfig; - u32 ctrla; + struct atdma_sg *atdma_sg = &desc->sg[i]; + struct at_lli *lli; - /* prepare common CRTLA value */ - ctrla = ATC_SCSIZE(sconfig->src_maxburst) - | ATC_DCSIZE(sconfig->dst_maxburst) - | ATC_DST_WIDTH(reg_width) - | ATC_SRC_WIDTH(reg_width) - | period_len >> reg_width; + atdma_sg->lli = dma_pool_alloc(atdma->lli_pool, GFP_ATOMIC, + &atdma_sg->lli_phys); + if (!atdma_sg->lli) + return -ENOMEM; + lli = atdma_sg->lli; switch (direction) { case DMA_MEM_TO_DEV: - desc->lli.saddr = buf_addr + (period_len * period_index); - desc->lli.daddr = sconfig->dst_addr; - desc->lli.ctrla = ctrla; - desc->lli.ctrlb = ATC_DST_ADDR_MODE_FIXED - | ATC_SRC_ADDR_MODE_INCR - | ATC_FC_MEM2PER - | ATC_SIF(atchan->mem_if) - | ATC_DIF(atchan->per_if); - desc->len = period_len; + lli->saddr = buf_addr + (period_len * i); + lli->daddr = sconfig->dst_addr; + lli->ctrlb = FIELD_PREP(ATC_DST_ADDR_MODE, + ATC_DST_ADDR_MODE_FIXED) | + FIELD_PREP(ATC_SRC_ADDR_MODE, + ATC_SRC_ADDR_MODE_INCR) | + FIELD_PREP(ATC_FC, ATC_FC_MEM2PER) | + FIELD_PREP(ATC_SIF, atchan->mem_if) | + FIELD_PREP(ATC_DIF, atchan->per_if); + break; case DMA_DEV_TO_MEM: - desc->lli.saddr = sconfig->src_addr; - desc->lli.daddr = buf_addr + (period_len * period_index); - desc->lli.ctrla = ctrla; - desc->lli.ctrlb = ATC_DST_ADDR_MODE_INCR - | ATC_SRC_ADDR_MODE_FIXED - | ATC_FC_PER2MEM - | ATC_SIF(atchan->per_if) - | ATC_DIF(atchan->mem_if); - desc->len = period_len; + lli->saddr = sconfig->src_addr; + lli->daddr = buf_addr + (period_len * i); + lli->ctrlb = FIELD_PREP(ATC_DST_ADDR_MODE, + ATC_DST_ADDR_MODE_INCR) | + FIELD_PREP(ATC_SRC_ADDR_MODE, + ATC_SRC_ADDR_MODE_FIXED) | + FIELD_PREP(ATC_FC, ATC_FC_PER2MEM) | + FIELD_PREP(ATC_SIF, atchan->per_if) | + FIELD_PREP(ATC_DIF, atchan->mem_if); break; default: return -EINVAL; } + lli->ctrla = FIELD_PREP(ATC_SCSIZE, sconfig->src_maxburst) | + FIELD_PREP(ATC_DCSIZE, sconfig->dst_maxburst) | + FIELD_PREP(ATC_DST_WIDTH, reg_width) | + FIELD_PREP(ATC_SRC_WIDTH, reg_width) | + period_len >> reg_width; + desc->sg[i].len = period_len; + return 0; } @@ -1276,8 +1484,7 @@ atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, struct at_dma_chan *atchan = to_at_dma_chan(chan); struct at_dma_slave *atslave = chan->private; struct dma_slave_config *sconfig = &atchan->dma_sconfig; - struct at_desc *first = NULL; - struct at_desc *prev = NULL; + struct at_desc *desc; unsigned long was_cyclic; unsigned int reg_width; unsigned int periods = buf_len / period_len; @@ -1311,33 +1518,26 @@ atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, if (atc_dma_cyclic_check_values(reg_width, buf_addr, period_len)) goto err_out; + desc = kzalloc(struct_size(desc, sg, periods), GFP_ATOMIC); + if (!desc) + goto err_out; + desc->sglen = periods; + /* build cyclic linked list */ for (i = 0; i < periods; i++) { - struct at_desc *desc; - - desc = atc_desc_get(atchan); - if (!desc) - goto err_desc_get; - if (atc_dma_cyclic_fill_desc(chan, desc, i, buf_addr, reg_width, period_len, direction)) - goto err_desc_get; - - atc_desc_chain(&first, &prev, desc); + goto err_fill_desc; + atdma_lli_chain(desc, i); } - + desc->total_len = buf_len; /* lets make a cyclic list */ - prev->lli.dscr = first->txd.phys; + desc->sg[i - 1].lli->dscr = desc->sg[0].lli_phys; - /* First descriptor of the chain embedds additional information */ - first->txd.cookie = -EBUSY; - first->total_len = buf_len; + return vchan_tx_prep(&atchan->vc, &desc->vd, flags); - return &first->txd; - -err_desc_get: - dev_err(chan2dev(chan), "not enough descriptors available\n"); - atc_desc_put(atchan, first); +err_fill_desc: + atdma_desc_free(&desc->vd); err_out: clear_bit(ATC_IS_CYCLIC, &atchan->status); return NULL; @@ -1366,17 +1566,17 @@ static int atc_pause(struct dma_chan *chan) { struct at_dma_chan *atchan = to_at_dma_chan(chan); struct at_dma *atdma = to_at_dma(chan->device); - int chan_id = atchan->chan_common.chan_id; + int chan_id = atchan->vc.chan.chan_id; unsigned long flags; dev_vdbg(chan2dev(chan), "%s\n", __func__); - spin_lock_irqsave(&atchan->lock, flags); + spin_lock_irqsave(&atchan->vc.lock, flags); dma_writel(atdma, CHER, AT_DMA_SUSP(chan_id)); set_bit(ATC_IS_PAUSED, &atchan->status); - spin_unlock_irqrestore(&atchan->lock, flags); + spin_unlock_irqrestore(&atchan->vc.lock, flags); return 0; } @@ -1385,7 +1585,7 @@ static int atc_resume(struct dma_chan *chan) { struct at_dma_chan *atchan = to_at_dma_chan(chan); struct at_dma *atdma = to_at_dma(chan->device); - int chan_id = atchan->chan_common.chan_id; + int chan_id = atchan->vc.chan.chan_id; unsigned long flags; dev_vdbg(chan2dev(chan), "%s\n", __func__); @@ -1393,12 +1593,12 @@ static int atc_resume(struct dma_chan *chan) if (!atc_chan_is_paused(atchan)) return 0; - spin_lock_irqsave(&atchan->lock, flags); + spin_lock_irqsave(&atchan->vc.lock, flags); dma_writel(atdma, CHDR, AT_DMA_RES(chan_id)); clear_bit(ATC_IS_PAUSED, &atchan->status); - spin_unlock_irqrestore(&atchan->lock, flags); + spin_unlock_irqrestore(&atchan->vc.lock, flags); return 0; } @@ -1407,9 +1607,11 @@ static int atc_terminate_all(struct dma_chan *chan) { struct at_dma_chan *atchan = to_at_dma_chan(chan); struct at_dma *atdma = to_at_dma(chan->device); - int chan_id = atchan->chan_common.chan_id; + int chan_id = atchan->vc.chan.chan_id; unsigned long flags; + LIST_HEAD(list); + dev_vdbg(chan2dev(chan), "%s\n", __func__); /* @@ -1418,7 +1620,7 @@ static int atc_terminate_all(struct dma_chan *chan) * channel. We still have to poll the channel enable bit due * to AHB/HSB limitations. */ - spin_lock_irqsave(&atchan->lock, flags); + spin_lock_irqsave(&atchan->vc.lock, flags); /* disabling channel: must also remove suspend state */ dma_writel(atdma, CHDR, AT_DMA_RES(chan_id) | atchan->mask); @@ -1427,15 +1629,20 @@ static int atc_terminate_all(struct dma_chan *chan) while (dma_readl(atdma, CHSR) & atchan->mask) cpu_relax(); - /* active_list entries will end up before queued entries */ - list_splice_tail_init(&atchan->queue, &atchan->free_list); - list_splice_tail_init(&atchan->active_list, &atchan->free_list); + if (atchan->desc) { + vchan_terminate_vdesc(&atchan->desc->vd); + atchan->desc = NULL; + } + + vchan_get_all_descriptors(&atchan->vc, &list); clear_bit(ATC_IS_PAUSED, &atchan->status); /* if channel dedicated to cyclic operations, free it */ clear_bit(ATC_IS_CYCLIC, &atchan->status); - spin_unlock_irqrestore(&atchan->lock, flags); + spin_unlock_irqrestore(&atchan->vc.lock, flags); + + vchan_dma_desc_free_list(&atchan->vc, &list); return 0; } @@ -1457,60 +1664,43 @@ atc_tx_status(struct dma_chan *chan, { struct at_dma_chan *atchan = to_at_dma_chan(chan); unsigned long flags; - enum dma_status ret; - int bytes = 0; - - ret = dma_cookie_status(chan, cookie, txstate); - if (ret == DMA_COMPLETE) - return ret; - /* - * There's no point calculating the residue if there's - * no txstate to store the value. - */ - if (!txstate) - return DMA_ERROR; + enum dma_status dma_status; + u32 residue; + int ret; - spin_lock_irqsave(&atchan->lock, flags); + dma_status = dma_cookie_status(chan, cookie, txstate); + if (dma_status == DMA_COMPLETE || !txstate) + return dma_status; + spin_lock_irqsave(&atchan->vc.lock, flags); /* Get number of bytes left in the active transactions */ - bytes = atc_get_bytes_left(chan, cookie); - - spin_unlock_irqrestore(&atchan->lock, flags); + ret = atc_get_residue(chan, cookie, &residue); + spin_unlock_irqrestore(&atchan->vc.lock, flags); - if (unlikely(bytes < 0)) { + if (unlikely(ret < 0)) { dev_vdbg(chan2dev(chan), "get residual bytes error\n"); return DMA_ERROR; } else { - dma_set_residue(txstate, bytes); + dma_set_residue(txstate, residue); } - dev_vdbg(chan2dev(chan), "tx_status %d: cookie = %d residue = %d\n", - ret, cookie, bytes); + dev_vdbg(chan2dev(chan), "tx_status %d: cookie = %d residue = %u\n", + dma_status, cookie, residue); - return ret; + return dma_status; } -/** - * atc_issue_pending - takes the first transaction descriptor in the pending - * queue and starts the transfer. - * @chan: target DMA channel - */ static void atc_issue_pending(struct dma_chan *chan) { struct at_dma_chan *atchan = to_at_dma_chan(chan); - struct at_desc *desc; unsigned long flags; - dev_vdbg(chan2dev(chan), "issue_pending\n"); - - spin_lock_irqsave(&atchan->lock, flags); - if (atc_chan_is_enabled(atchan) || list_empty(&atchan->queue)) - return spin_unlock_irqrestore(&atchan->lock, flags); - - desc = atc_first_queued(atchan); - list_move_tail(&desc->desc_node, &atchan->active_list); - atc_dostart(atchan, desc); - spin_unlock_irqrestore(&atchan->lock, flags); + spin_lock_irqsave(&atchan->vc.lock, flags); + if (vchan_issue_pending(&atchan->vc) && !atchan->desc) { + if (!(atc_chan_is_enabled(atchan))) + atc_dostart(atchan); + } + spin_unlock_irqrestore(&atchan->vc.lock, flags); } /** @@ -1523,9 +1713,7 @@ static int atc_alloc_chan_resources(struct dma_chan *chan) { struct at_dma_chan *atchan = to_at_dma_chan(chan); struct at_dma *atdma = to_at_dma(chan->device); - struct at_desc *desc; struct at_dma_slave *atslave; - int i; u32 cfg; dev_vdbg(chan2dev(chan), "alloc_chan_resources\n"); @@ -1536,11 +1724,6 @@ static int atc_alloc_chan_resources(struct dma_chan *chan) return -EIO; } - if (!list_empty(&atchan->free_list)) { - dev_dbg(chan2dev(chan), "can't allocate channel resources (channel not freed from a previous use)\n"); - return -EIO; - } - cfg = ATC_DEFAULT_CFG; atslave = chan->private; @@ -1549,33 +1732,17 @@ static int atc_alloc_chan_resources(struct dma_chan *chan) * We need controller-specific data to set up slave * transfers. */ - BUG_ON(!atslave->dma_dev || atslave->dma_dev != atdma->dma_common.dev); + BUG_ON(!atslave->dma_dev || atslave->dma_dev != atdma->dma_device.dev); /* if cfg configuration specified take it instead of default */ if (atslave->cfg) cfg = atslave->cfg; } - /* Allocate initial pool of descriptors */ - for (i = 0; i < init_nr_desc_per_channel; i++) { - desc = atc_alloc_descriptor(chan, GFP_KERNEL); - if (!desc) { - dev_err(atdma->dma_common.dev, - "Only %d initial descriptors\n", i); - break; - } - list_add_tail(&desc->desc_node, &atchan->free_list); - } - - dma_cookie_init(chan); - /* channel parameters */ channel_writel(atchan, CFG, cfg); - dev_dbg(chan2dev(chan), - "alloc_chan_resources: allocated %d descriptors\n", i); - - return i; + return 0; } /** @@ -1585,22 +1752,10 @@ static int atc_alloc_chan_resources(struct dma_chan *chan) static void atc_free_chan_resources(struct dma_chan *chan) { struct at_dma_chan *atchan = to_at_dma_chan(chan); - struct at_dma *atdma = to_at_dma(chan->device); - struct at_desc *desc, *_desc; - LIST_HEAD(list); - /* ASSERT: channel is idle */ - BUG_ON(!list_empty(&atchan->active_list)); - BUG_ON(!list_empty(&atchan->queue)); BUG_ON(atc_chan_is_enabled(atchan)); - list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) { - dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc); - list_del(&desc->desc_node); - /* free link descriptor */ - dma_pool_free(atdma->dma_desc_pool, desc, desc->txd.phys); - } - list_splice_init(&atchan->free_list, &list); + vchan_free_chan_resources(to_virt_chan(chan)); atchan->status = 0; /* @@ -1651,14 +1806,13 @@ static struct dma_chan *at_dma_xlate(struct of_phandle_args *dma_spec, return NULL; } - atslave->cfg = ATC_DST_H2SEL_HW | ATC_SRC_H2SEL_HW; + atslave->cfg = ATC_DST_H2SEL | ATC_SRC_H2SEL; /* * We can fill both SRC_PER and DST_PER, one of these fields will be * ignored depending on DMA transfer direction. */ per_id = dma_spec->args[1] & AT91_DMA_CFG_PER_ID_MASK; - atslave->cfg |= ATC_DST_PER_MSB(per_id) | ATC_DST_PER(per_id) - | ATC_SRC_PER_MSB(per_id) | ATC_SRC_PER(per_id); + atslave->cfg |= ATC_DST_PER_ID(per_id) | ATC_SRC_PER_ID(per_id); /* * We have to translate the value we get from the device tree since * the half FIFO configuration value had to be 0 to keep backward @@ -1666,14 +1820,16 @@ static struct dma_chan *at_dma_xlate(struct of_phandle_args *dma_spec, */ switch (dma_spec->args[1] & AT91_DMA_CFG_FIFOCFG_MASK) { case AT91_DMA_CFG_FIFOCFG_ALAP: - atslave->cfg |= ATC_FIFOCFG_LARGESTBURST; + atslave->cfg |= FIELD_PREP(ATC_FIFOCFG, + ATC_FIFOCFG_LARGESTBURST); break; case AT91_DMA_CFG_FIFOCFG_ASAP: - atslave->cfg |= ATC_FIFOCFG_ENOUGHSPACE; + atslave->cfg |= FIELD_PREP(ATC_FIFOCFG, + ATC_FIFOCFG_ENOUGHSPACE); break; case AT91_DMA_CFG_FIFOCFG_HALF: default: - atslave->cfg |= ATC_FIFOCFG_HALFFIFO; + atslave->cfg |= FIELD_PREP(ATC_FIFOCFG, ATC_FIFOCFG_HALFFIFO); } atslave->dma_dev = &dmac_pdev->dev; @@ -1768,9 +1924,7 @@ static void at_dma_off(struct at_dma *atdma) static int __init at_dma_probe(struct platform_device *pdev) { - struct resource *io; struct at_dma *atdma; - size_t size; int irq; int err; int i; @@ -1790,44 +1944,31 @@ static int __init at_dma_probe(struct platform_device *pdev) if (!plat_dat) return -ENODEV; - io = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!io) - return -EINVAL; + atdma = devm_kzalloc(&pdev->dev, + struct_size(atdma, chan, plat_dat->nr_channels), + GFP_KERNEL); + if (!atdma) + return -ENOMEM; + + atdma->regs = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(atdma->regs)) + return PTR_ERR(atdma->regs); irq = platform_get_irq(pdev, 0); if (irq < 0) return irq; - size = sizeof(struct at_dma); - size += plat_dat->nr_channels * sizeof(struct at_dma_chan); - atdma = kzalloc(size, GFP_KERNEL); - if (!atdma) - return -ENOMEM; - /* discover transaction capabilities */ - atdma->dma_common.cap_mask = plat_dat->cap_mask; + atdma->dma_device.cap_mask = plat_dat->cap_mask; atdma->all_chan_mask = (1 << plat_dat->nr_channels) - 1; - size = resource_size(io); - if (!request_mem_region(io->start, size, pdev->dev.driver->name)) { - err = -EBUSY; - goto err_kfree; - } - - atdma->regs = ioremap(io->start, size); - if (!atdma->regs) { - err = -ENOMEM; - goto err_release_r; - } + atdma->clk = devm_clk_get(&pdev->dev, "dma_clk"); + if (IS_ERR(atdma->clk)) + return PTR_ERR(atdma->clk); - atdma->clk = clk_get(&pdev->dev, "dma_clk"); - if (IS_ERR(atdma->clk)) { - err = PTR_ERR(atdma->clk); - goto err_clk; - } err = clk_prepare_enable(atdma->clk); if (err) - goto err_clk_prepare; + return err; /* force dma off, just in case */ at_dma_off(atdma); @@ -1839,11 +1980,11 @@ static int __init at_dma_probe(struct platform_device *pdev) platform_set_drvdata(pdev, atdma); /* create a pool of consistent memory blocks for hardware descriptors */ - atdma->dma_desc_pool = dma_pool_create("at_hdmac_desc_pool", - &pdev->dev, sizeof(struct at_desc), - 4 /* word alignment */, 0); - if (!atdma->dma_desc_pool) { - dev_err(&pdev->dev, "No memory for descriptors dma pool\n"); + atdma->lli_pool = dma_pool_create("at_hdmac_lli_pool", + &pdev->dev, sizeof(struct at_lli), + 4 /* word alignment */, 0); + if (!atdma->lli_pool) { + dev_err(&pdev->dev, "Unable to allocate DMA LLI descriptor pool\n"); err = -ENOMEM; goto err_desc_pool_create; } @@ -1862,73 +2003,66 @@ static int __init at_dma_probe(struct platform_device *pdev) cpu_relax(); /* initialize channels related values */ - INIT_LIST_HEAD(&atdma->dma_common.channels); + INIT_LIST_HEAD(&atdma->dma_device.channels); for (i = 0; i < plat_dat->nr_channels; i++) { struct at_dma_chan *atchan = &atdma->chan[i]; atchan->mem_if = AT_DMA_MEM_IF; atchan->per_if = AT_DMA_PER_IF; - atchan->chan_common.device = &atdma->dma_common; - dma_cookie_init(&atchan->chan_common); - list_add_tail(&atchan->chan_common.device_node, - &atdma->dma_common.channels); atchan->ch_regs = atdma->regs + ch_regs(i); - spin_lock_init(&atchan->lock); atchan->mask = 1 << i; - INIT_LIST_HEAD(&atchan->active_list); - INIT_LIST_HEAD(&atchan->queue); - INIT_LIST_HEAD(&atchan->free_list); - - tasklet_setup(&atchan->tasklet, atc_tasklet); + atchan->atdma = atdma; + atchan->vc.desc_free = atdma_desc_free; + vchan_init(&atchan->vc, &atdma->dma_device); atc_enable_chan_irq(atdma, i); } /* set base routines */ - atdma->dma_common.device_alloc_chan_resources = atc_alloc_chan_resources; - atdma->dma_common.device_free_chan_resources = atc_free_chan_resources; - atdma->dma_common.device_tx_status = atc_tx_status; - atdma->dma_common.device_issue_pending = atc_issue_pending; - atdma->dma_common.dev = &pdev->dev; + atdma->dma_device.device_alloc_chan_resources = atc_alloc_chan_resources; + atdma->dma_device.device_free_chan_resources = atc_free_chan_resources; + atdma->dma_device.device_tx_status = atc_tx_status; + atdma->dma_device.device_issue_pending = atc_issue_pending; + atdma->dma_device.dev = &pdev->dev; /* set prep routines based on capability */ - if (dma_has_cap(DMA_INTERLEAVE, atdma->dma_common.cap_mask)) - atdma->dma_common.device_prep_interleaved_dma = atc_prep_dma_interleaved; + if (dma_has_cap(DMA_INTERLEAVE, atdma->dma_device.cap_mask)) + atdma->dma_device.device_prep_interleaved_dma = atc_prep_dma_interleaved; - if (dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask)) - atdma->dma_common.device_prep_dma_memcpy = atc_prep_dma_memcpy; + if (dma_has_cap(DMA_MEMCPY, atdma->dma_device.cap_mask)) + atdma->dma_device.device_prep_dma_memcpy = atc_prep_dma_memcpy; - if (dma_has_cap(DMA_MEMSET, atdma->dma_common.cap_mask)) { - atdma->dma_common.device_prep_dma_memset = atc_prep_dma_memset; - atdma->dma_common.device_prep_dma_memset_sg = atc_prep_dma_memset_sg; - atdma->dma_common.fill_align = DMAENGINE_ALIGN_4_BYTES; + if (dma_has_cap(DMA_MEMSET, atdma->dma_device.cap_mask)) { + atdma->dma_device.device_prep_dma_memset = atc_prep_dma_memset; + atdma->dma_device.device_prep_dma_memset_sg = atc_prep_dma_memset_sg; + atdma->dma_device.fill_align = DMAENGINE_ALIGN_4_BYTES; } - if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask)) { - atdma->dma_common.device_prep_slave_sg = atc_prep_slave_sg; + if (dma_has_cap(DMA_SLAVE, atdma->dma_device.cap_mask)) { + atdma->dma_device.device_prep_slave_sg = atc_prep_slave_sg; /* controller can do slave DMA: can trigger cyclic transfers */ - dma_cap_set(DMA_CYCLIC, atdma->dma_common.cap_mask); - atdma->dma_common.device_prep_dma_cyclic = atc_prep_dma_cyclic; - atdma->dma_common.device_config = atc_config; - atdma->dma_common.device_pause = atc_pause; - atdma->dma_common.device_resume = atc_resume; - atdma->dma_common.device_terminate_all = atc_terminate_all; - atdma->dma_common.src_addr_widths = ATC_DMA_BUSWIDTHS; - atdma->dma_common.dst_addr_widths = ATC_DMA_BUSWIDTHS; - atdma->dma_common.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); - atdma->dma_common.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; + dma_cap_set(DMA_CYCLIC, atdma->dma_device.cap_mask); + atdma->dma_device.device_prep_dma_cyclic = atc_prep_dma_cyclic; + atdma->dma_device.device_config = atc_config; + atdma->dma_device.device_pause = atc_pause; + atdma->dma_device.device_resume = atc_resume; + atdma->dma_device.device_terminate_all = atc_terminate_all; + atdma->dma_device.src_addr_widths = ATC_DMA_BUSWIDTHS; + atdma->dma_device.dst_addr_widths = ATC_DMA_BUSWIDTHS; + atdma->dma_device.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); + atdma->dma_device.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; } dma_writel(atdma, EN, AT_DMA_ENABLE); dev_info(&pdev->dev, "Atmel AHB DMA Controller ( %s%s%s), %d channels\n", - dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask) ? "cpy " : "", - dma_has_cap(DMA_MEMSET, atdma->dma_common.cap_mask) ? "set " : "", - dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask) ? "slave " : "", + dma_has_cap(DMA_MEMCPY, atdma->dma_device.cap_mask) ? "cpy " : "", + dma_has_cap(DMA_MEMSET, atdma->dma_device.cap_mask) ? "set " : "", + dma_has_cap(DMA_SLAVE, atdma->dma_device.cap_mask) ? "slave " : "", plat_dat->nr_channels); - err = dma_async_device_register(&atdma->dma_common); + err = dma_async_device_register(&atdma->dma_device); if (err) { dev_err(&pdev->dev, "Unable to register: %d.\n", err); goto err_dma_async_device_register; @@ -1951,24 +2085,15 @@ static int __init at_dma_probe(struct platform_device *pdev) return 0; err_of_dma_controller_register: - dma_async_device_unregister(&atdma->dma_common); + dma_async_device_unregister(&atdma->dma_device); err_dma_async_device_register: dma_pool_destroy(atdma->memset_pool); err_memset_pool_create: - dma_pool_destroy(atdma->dma_desc_pool); + dma_pool_destroy(atdma->lli_pool); err_desc_pool_create: free_irq(platform_get_irq(pdev, 0), atdma); err_irq: clk_disable_unprepare(atdma->clk); -err_clk_prepare: - clk_put(atdma->clk); -err_clk: - iounmap(atdma->regs); - atdma->regs = NULL; -err_release_r: - release_mem_region(io->start, size); -err_kfree: - kfree(atdma); return err; } @@ -1976,38 +2101,24 @@ static int at_dma_remove(struct platform_device *pdev) { struct at_dma *atdma = platform_get_drvdata(pdev); struct dma_chan *chan, *_chan; - struct resource *io; at_dma_off(atdma); if (pdev->dev.of_node) of_dma_controller_free(pdev->dev.of_node); - dma_async_device_unregister(&atdma->dma_common); + dma_async_device_unregister(&atdma->dma_device); dma_pool_destroy(atdma->memset_pool); - dma_pool_destroy(atdma->dma_desc_pool); + dma_pool_destroy(atdma->lli_pool); free_irq(platform_get_irq(pdev, 0), atdma); - list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels, + list_for_each_entry_safe(chan, _chan, &atdma->dma_device.channels, device_node) { - struct at_dma_chan *atchan = to_at_dma_chan(chan); - /* Disable interrupts */ atc_disable_chan_irq(atdma, chan->chan_id); - - tasklet_kill(&atchan->tasklet); list_del(&chan->device_node); } clk_disable_unprepare(atdma->clk); - clk_put(atdma->clk); - - iounmap(atdma->regs); - atdma->regs = NULL; - - io = platform_get_resource(pdev, IORESOURCE_MEM, 0); - release_mem_region(io->start, resource_size(io)); - - kfree(atdma); return 0; } @@ -2025,7 +2136,7 @@ static int at_dma_prepare(struct device *dev) struct at_dma *atdma = dev_get_drvdata(dev); struct dma_chan *chan, *_chan; - list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels, + list_for_each_entry_safe(chan, _chan, &atdma->dma_device.channels, device_node) { struct at_dma_chan *atchan = to_at_dma_chan(chan); /* wait for transaction completion (except in cyclic case) */ @@ -2037,7 +2148,7 @@ static int at_dma_prepare(struct device *dev) static void atc_suspend_cyclic(struct at_dma_chan *atchan) { - struct dma_chan *chan = &atchan->chan_common; + struct dma_chan *chan = &atchan->vc.chan; /* Channel should be paused by user * do it anyway even if it is not done already */ @@ -2060,7 +2171,7 @@ static int at_dma_suspend_noirq(struct device *dev) struct dma_chan *chan, *_chan; /* preserve data */ - list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels, + list_for_each_entry_safe(chan, _chan, &atdma->dma_device.channels, device_node) { struct at_dma_chan *atchan = to_at_dma_chan(chan); @@ -2078,7 +2189,7 @@ static int at_dma_suspend_noirq(struct device *dev) static void atc_resume_cyclic(struct at_dma_chan *atchan) { - struct at_dma *atdma = to_at_dma(atchan->chan_common.device); + struct at_dma *atdma = to_at_dma(atchan->vc.chan.device); /* restore channel status for cyclic descriptors list: * next descriptor in the cyclic list at the time of suspend */ @@ -2110,7 +2221,7 @@ static int at_dma_resume_noirq(struct device *dev) /* restore saved data */ dma_writel(atdma, EBCIER, atdma->save_imr); - list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels, + list_for_each_entry_safe(chan, _chan, &atdma->dma_device.channels, device_node) { struct at_dma_chan *atchan = to_at_dma_chan(chan); @@ -2121,7 +2232,7 @@ static int at_dma_resume_noirq(struct device *dev) return 0; } -static const struct dev_pm_ops at_dma_dev_pm_ops = { +static const struct dev_pm_ops __maybe_unused at_dma_dev_pm_ops = { .prepare = at_dma_prepare, .suspend_noirq = at_dma_suspend_noirq, .resume_noirq = at_dma_resume_noirq, @@ -2133,7 +2244,7 @@ static struct platform_driver at_dma_driver = { .id_table = atdma_devtypes, .driver = { .name = "at_hdmac", - .pm = &at_dma_dev_pm_ops, + .pm = pm_ptr(&at_dma_dev_pm_ops), .of_match_table = of_match_ptr(atmel_dma_dt_ids), }, }; @@ -2152,5 +2263,6 @@ module_exit(at_dma_exit); MODULE_DESCRIPTION("Atmel AHB DMA Controller driver"); MODULE_AUTHOR("Nicolas Ferre "); +MODULE_AUTHOR("Tudor Ambarus "); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:at_hdmac"); diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h deleted file mode 100644 index d4d382d746078eaee3c064f7f03cb49526cef7c7..0000000000000000000000000000000000000000 --- a/drivers/dma/at_hdmac_regs.h +++ /dev/null @@ -1,478 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -/* - * Header file for the Atmel AHB DMA Controller driver - * - * Copyright (C) 2008 Atmel Corporation - */ -#ifndef AT_HDMAC_REGS_H -#define AT_HDMAC_REGS_H - -#define AT_DMA_MAX_NR_CHANNELS 8 - - -#define AT_DMA_GCFG 0x00 /* Global Configuration Register */ -#define AT_DMA_IF_BIGEND(i) (0x1 << (i)) /* AHB-Lite Interface i in Big-endian mode */ -#define AT_DMA_ARB_CFG (0x1 << 4) /* Arbiter mode. */ -#define AT_DMA_ARB_CFG_FIXED (0x0 << 4) -#define AT_DMA_ARB_CFG_ROUND_ROBIN (0x1 << 4) - -#define AT_DMA_EN 0x04 /* Controller Enable Register */ -#define AT_DMA_ENABLE (0x1 << 0) - -#define AT_DMA_SREQ 0x08 /* Software Single Request Register */ -#define AT_DMA_SSREQ(x) (0x1 << ((x) << 1)) /* Request a source single transfer on channel x */ -#define AT_DMA_DSREQ(x) (0x1 << (1 + ((x) << 1))) /* Request a destination single transfer on channel x */ - -#define AT_DMA_CREQ 0x0C /* Software Chunk Transfer Request Register */ -#define AT_DMA_SCREQ(x) (0x1 << ((x) << 1)) /* Request a source chunk transfer on channel x */ -#define AT_DMA_DCREQ(x) (0x1 << (1 + ((x) << 1))) /* Request a destination chunk transfer on channel x */ - -#define AT_DMA_LAST 0x10 /* Software Last Transfer Flag Register */ -#define AT_DMA_SLAST(x) (0x1 << ((x) << 1)) /* This src rq is last tx of buffer on channel x */ -#define AT_DMA_DLAST(x) (0x1 << (1 + ((x) << 1))) /* This dst rq is last tx of buffer on channel x */ - -#define AT_DMA_SYNC 0x14 /* Request Synchronization Register */ -#define AT_DMA_SYR(h) (0x1 << (h)) /* Synchronize handshake line h */ - -/* Error, Chained Buffer transfer completed and Buffer transfer completed Interrupt registers */ -#define AT_DMA_EBCIER 0x18 /* Enable register */ -#define AT_DMA_EBCIDR 0x1C /* Disable register */ -#define AT_DMA_EBCIMR 0x20 /* Mask Register */ -#define AT_DMA_EBCISR 0x24 /* Status Register */ -#define AT_DMA_CBTC_OFFSET 8 -#define AT_DMA_ERR_OFFSET 16 -#define AT_DMA_BTC(x) (0x1 << (x)) -#define AT_DMA_CBTC(x) (0x1 << (AT_DMA_CBTC_OFFSET + (x))) -#define AT_DMA_ERR(x) (0x1 << (AT_DMA_ERR_OFFSET + (x))) - -#define AT_DMA_CHER 0x28 /* Channel Handler Enable Register */ -#define AT_DMA_ENA(x) (0x1 << (x)) -#define AT_DMA_SUSP(x) (0x1 << ( 8 + (x))) -#define AT_DMA_KEEP(x) (0x1 << (24 + (x))) - -#define AT_DMA_CHDR 0x2C /* Channel Handler Disable Register */ -#define AT_DMA_DIS(x) (0x1 << (x)) -#define AT_DMA_RES(x) (0x1 << ( 8 + (x))) - -#define AT_DMA_CHSR 0x30 /* Channel Handler Status Register */ -#define AT_DMA_EMPT(x) (0x1 << (16 + (x))) -#define AT_DMA_STAL(x) (0x1 << (24 + (x))) - - -#define AT_DMA_CH_REGS_BASE 0x3C /* Channel registers base address */ -#define ch_regs(x) (AT_DMA_CH_REGS_BASE + (x) * 0x28) /* Channel x base addr */ - -/* Hardware register offset for each channel */ -#define ATC_SADDR_OFFSET 0x00 /* Source Address Register */ -#define ATC_DADDR_OFFSET 0x04 /* Destination Address Register */ -#define ATC_DSCR_OFFSET 0x08 /* Descriptor Address Register */ -#define ATC_CTRLA_OFFSET 0x0C /* Control A Register */ -#define ATC_CTRLB_OFFSET 0x10 /* Control B Register */ -#define ATC_CFG_OFFSET 0x14 /* Configuration Register */ -#define ATC_SPIP_OFFSET 0x18 /* Src PIP Configuration Register */ -#define ATC_DPIP_OFFSET 0x1C /* Dst PIP Configuration Register */ - - -/* Bitfield definitions */ - -/* Bitfields in DSCR */ -#define ATC_DSCR_IF(i) (0x3 & (i)) /* Dsc feched via AHB-Lite Interface i */ - -/* Bitfields in CTRLA */ -#define ATC_BTSIZE_MAX 0xFFFFUL /* Maximum Buffer Transfer Size */ -#define ATC_BTSIZE(x) (ATC_BTSIZE_MAX & (x)) /* Buffer Transfer Size */ -#define ATC_SCSIZE_MASK (0x7 << 16) /* Source Chunk Transfer Size */ -#define ATC_SCSIZE(x) (ATC_SCSIZE_MASK & ((x) << 16)) -#define ATC_SCSIZE_1 (0x0 << 16) -#define ATC_SCSIZE_4 (0x1 << 16) -#define ATC_SCSIZE_8 (0x2 << 16) -#define ATC_SCSIZE_16 (0x3 << 16) -#define ATC_SCSIZE_32 (0x4 << 16) -#define ATC_SCSIZE_64 (0x5 << 16) -#define ATC_SCSIZE_128 (0x6 << 16) -#define ATC_SCSIZE_256 (0x7 << 16) -#define ATC_DCSIZE_MASK (0x7 << 20) /* Destination Chunk Transfer Size */ -#define ATC_DCSIZE(x) (ATC_DCSIZE_MASK & ((x) << 20)) -#define ATC_DCSIZE_1 (0x0 << 20) -#define ATC_DCSIZE_4 (0x1 << 20) -#define ATC_DCSIZE_8 (0x2 << 20) -#define ATC_DCSIZE_16 (0x3 << 20) -#define ATC_DCSIZE_32 (0x4 << 20) -#define ATC_DCSIZE_64 (0x5 << 20) -#define ATC_DCSIZE_128 (0x6 << 20) -#define ATC_DCSIZE_256 (0x7 << 20) -#define ATC_SRC_WIDTH_MASK (0x3 << 24) /* Source Single Transfer Size */ -#define ATC_SRC_WIDTH(x) ((x) << 24) -#define ATC_SRC_WIDTH_BYTE (0x0 << 24) -#define ATC_SRC_WIDTH_HALFWORD (0x1 << 24) -#define ATC_SRC_WIDTH_WORD (0x2 << 24) -#define ATC_REG_TO_SRC_WIDTH(r) (((r) >> 24) & 0x3) -#define ATC_DST_WIDTH_MASK (0x3 << 28) /* Destination Single Transfer Size */ -#define ATC_DST_WIDTH(x) ((x) << 28) -#define ATC_DST_WIDTH_BYTE (0x0 << 28) -#define ATC_DST_WIDTH_HALFWORD (0x1 << 28) -#define ATC_DST_WIDTH_WORD (0x2 << 28) -#define ATC_DONE (0x1 << 31) /* Tx Done (only written back in descriptor) */ - -/* Bitfields in CTRLB */ -#define ATC_SIF(i) (0x3 & (i)) /* Src tx done via AHB-Lite Interface i */ -#define ATC_DIF(i) ((0x3 & (i)) << 4) /* Dst tx done via AHB-Lite Interface i */ - /* Specify AHB interfaces */ -#define AT_DMA_MEM_IF 0 /* interface 0 as memory interface */ -#define AT_DMA_PER_IF 1 /* interface 1 as peripheral interface */ - -#define ATC_SRC_PIP (0x1 << 8) /* Source Picture-in-Picture enabled */ -#define ATC_DST_PIP (0x1 << 12) /* Destination Picture-in-Picture enabled */ -#define ATC_SRC_DSCR_DIS (0x1 << 16) /* Src Descriptor fetch disable */ -#define ATC_DST_DSCR_DIS (0x1 << 20) /* Dst Descriptor fetch disable */ -#define ATC_FC_MASK (0x7 << 21) /* Choose Flow Controller */ -#define ATC_FC_MEM2MEM (0x0 << 21) /* Mem-to-Mem (DMA) */ -#define ATC_FC_MEM2PER (0x1 << 21) /* Mem-to-Periph (DMA) */ -#define ATC_FC_PER2MEM (0x2 << 21) /* Periph-to-Mem (DMA) */ -#define ATC_FC_PER2PER (0x3 << 21) /* Periph-to-Periph (DMA) */ -#define ATC_FC_PER2MEM_PER (0x4 << 21) /* Periph-to-Mem (Peripheral) */ -#define ATC_FC_MEM2PER_PER (0x5 << 21) /* Mem-to-Periph (Peripheral) */ -#define ATC_FC_PER2PER_SRCPER (0x6 << 21) /* Periph-to-Periph (Src Peripheral) */ -#define ATC_FC_PER2PER_DSTPER (0x7 << 21) /* Periph-to-Periph (Dst Peripheral) */ -#define ATC_SRC_ADDR_MODE_MASK (0x3 << 24) -#define ATC_SRC_ADDR_MODE_INCR (0x0 << 24) /* Incrementing Mode */ -#define ATC_SRC_ADDR_MODE_DECR (0x1 << 24) /* Decrementing Mode */ -#define ATC_SRC_ADDR_MODE_FIXED (0x2 << 24) /* Fixed Mode */ -#define ATC_DST_ADDR_MODE_MASK (0x3 << 28) -#define ATC_DST_ADDR_MODE_INCR (0x0 << 28) /* Incrementing Mode */ -#define ATC_DST_ADDR_MODE_DECR (0x1 << 28) /* Decrementing Mode */ -#define ATC_DST_ADDR_MODE_FIXED (0x2 << 28) /* Fixed Mode */ -#define ATC_IEN (0x1 << 30) /* BTC interrupt enable (active low) */ -#define ATC_AUTO (0x1 << 31) /* Auto multiple buffer tx enable */ - -/* Bitfields in CFG */ -#define ATC_PER_MSB(h) ((0x30U & (h)) >> 4) /* Extract most significant bits of a handshaking identifier */ - -#define ATC_SRC_PER(h) (0xFU & (h)) /* Channel src rq associated with periph handshaking ifc h */ -#define ATC_DST_PER(h) ((0xFU & (h)) << 4) /* Channel dst rq associated with periph handshaking ifc h */ -#define ATC_SRC_REP (0x1 << 8) /* Source Replay Mod */ -#define ATC_SRC_H2SEL (0x1 << 9) /* Source Handshaking Mod */ -#define ATC_SRC_H2SEL_SW (0x0 << 9) -#define ATC_SRC_H2SEL_HW (0x1 << 9) -#define ATC_SRC_PER_MSB(h) (ATC_PER_MSB(h) << 10) /* Channel src rq (most significant bits) */ -#define ATC_DST_REP (0x1 << 12) /* Destination Replay Mod */ -#define ATC_DST_H2SEL (0x1 << 13) /* Destination Handshaking Mod */ -#define ATC_DST_H2SEL_SW (0x0 << 13) -#define ATC_DST_H2SEL_HW (0x1 << 13) -#define ATC_DST_PER_MSB(h) (ATC_PER_MSB(h) << 14) /* Channel dst rq (most significant bits) */ -#define ATC_SOD (0x1 << 16) /* Stop On Done */ -#define ATC_LOCK_IF (0x1 << 20) /* Interface Lock */ -#define ATC_LOCK_B (0x1 << 21) /* AHB Bus Lock */ -#define ATC_LOCK_IF_L (0x1 << 22) /* Master Interface Arbiter Lock */ -#define ATC_LOCK_IF_L_CHUNK (0x0 << 22) -#define ATC_LOCK_IF_L_BUFFER (0x1 << 22) -#define ATC_AHB_PROT_MASK (0x7 << 24) /* AHB Protection */ -#define ATC_FIFOCFG_MASK (0x3 << 28) /* FIFO Request Configuration */ -#define ATC_FIFOCFG_LARGESTBURST (0x0 << 28) -#define ATC_FIFOCFG_HALFFIFO (0x1 << 28) -#define ATC_FIFOCFG_ENOUGHSPACE (0x2 << 28) - -/* Bitfields in SPIP */ -#define ATC_SPIP_HOLE(x) (0xFFFFU & (x)) -#define ATC_SPIP_BOUNDARY(x) ((0x3FF & (x)) << 16) - -/* Bitfields in DPIP */ -#define ATC_DPIP_HOLE(x) (0xFFFFU & (x)) -#define ATC_DPIP_BOUNDARY(x) ((0x3FF & (x)) << 16) - - -/*-- descriptors -----------------------------------------------------*/ - -/* LLI == Linked List Item; aka DMA buffer descriptor */ -struct at_lli { - /* values that are not changed by hardware */ - u32 saddr; - u32 daddr; - /* value that may get written back: */ - u32 ctrla; - /* more values that are not changed by hardware */ - u32 ctrlb; - u32 dscr; /* chain to next lli */ -}; - -/** - * struct at_desc - software descriptor - * @at_lli: hardware lli structure - * @txd: support for the async_tx api - * @desc_node: node on the channed descriptors list - * @len: descriptor byte count - * @total_len: total transaction byte count - */ -struct at_desc { - /* FIRST values the hardware uses */ - struct at_lli lli; - - /* THEN values for driver housekeeping */ - struct list_head tx_list; - struct dma_async_tx_descriptor txd; - struct list_head desc_node; - size_t len; - size_t total_len; - - /* Interleaved data */ - size_t boundary; - size_t dst_hole; - size_t src_hole; - - /* Memset temporary buffer */ - bool memset_buffer; - dma_addr_t memset_paddr; - int *memset_vaddr; -}; - -static inline struct at_desc * -txd_to_at_desc(struct dma_async_tx_descriptor *txd) -{ - return container_of(txd, struct at_desc, txd); -} - - -/*-- Channels --------------------------------------------------------*/ - -/** - * atc_status - information bits stored in channel status flag - * - * Manipulated with atomic operations. - */ -enum atc_status { - ATC_IS_ERROR = 0, - ATC_IS_PAUSED = 1, - ATC_IS_CYCLIC = 24, -}; - -/** - * struct at_dma_chan - internal representation of an Atmel HDMAC channel - * @chan_common: common dmaengine channel object members - * @device: parent device - * @ch_regs: memory mapped register base - * @mask: channel index in a mask - * @per_if: peripheral interface - * @mem_if: memory interface - * @status: transmit status information from irq/prep* functions - * to tasklet (use atomic operations) - * @tasklet: bottom half to finish transaction work - * @save_cfg: configuration register that is saved on suspend/resume cycle - * @save_dscr: for cyclic operations, preserve next descriptor address in - * the cyclic list on suspend/resume cycle - * @dma_sconfig: configuration for slave transfers, passed via - * .device_config - * @lock: serializes enqueue/dequeue operations to descriptors lists - * @active_list: list of descriptors dmaengine is being running on - * @queue: list of descriptors ready to be submitted to engine - * @free_list: list of descriptors usable by the channel - */ -struct at_dma_chan { - struct dma_chan chan_common; - struct at_dma *device; - void __iomem *ch_regs; - u8 mask; - u8 per_if; - u8 mem_if; - unsigned long status; - struct tasklet_struct tasklet; - u32 save_cfg; - u32 save_dscr; - struct dma_slave_config dma_sconfig; - - spinlock_t lock; - - /* these other elements are all protected by lock */ - struct list_head active_list; - struct list_head queue; - struct list_head free_list; -}; - -#define channel_readl(atchan, name) \ - __raw_readl((atchan)->ch_regs + ATC_##name##_OFFSET) - -#define channel_writel(atchan, name, val) \ - __raw_writel((val), (atchan)->ch_regs + ATC_##name##_OFFSET) - -static inline struct at_dma_chan *to_at_dma_chan(struct dma_chan *dchan) -{ - return container_of(dchan, struct at_dma_chan, chan_common); -} - -/* - * Fix sconfig's burst size according to at_hdmac. We need to convert them as: - * 1 -> 0, 4 -> 1, 8 -> 2, 16 -> 3, 32 -> 4, 64 -> 5, 128 -> 6, 256 -> 7. - * - * This can be done by finding most significant bit set. - */ -static inline void convert_burst(u32 *maxburst) -{ - if (*maxburst > 1) - *maxburst = fls(*maxburst) - 2; - else - *maxburst = 0; -} - -/* - * Fix sconfig's bus width according to at_hdmac. - * 1 byte -> 0, 2 bytes -> 1, 4 bytes -> 2. - */ -static inline u8 convert_buswidth(enum dma_slave_buswidth addr_width) -{ - switch (addr_width) { - case DMA_SLAVE_BUSWIDTH_2_BYTES: - return 1; - case DMA_SLAVE_BUSWIDTH_4_BYTES: - return 2; - default: - /* For 1 byte width or fallback */ - return 0; - } -} - -/*-- Controller ------------------------------------------------------*/ - -/** - * struct at_dma - internal representation of an Atmel HDMA Controller - * @chan_common: common dmaengine dma_device object members - * @atdma_devtype: identifier of DMA controller compatibility - * @ch_regs: memory mapped register base - * @clk: dma controller clock - * @save_imr: interrupt mask register that is saved on suspend/resume cycle - * @all_chan_mask: all channels availlable in a mask - * @dma_desc_pool: base of DMA descriptor region (DMA address) - * @chan: channels table to store at_dma_chan structures - */ -struct at_dma { - struct dma_device dma_common; - void __iomem *regs; - struct clk *clk; - u32 save_imr; - - u8 all_chan_mask; - - struct dma_pool *dma_desc_pool; - struct dma_pool *memset_pool; - /* AT THE END channels table */ - struct at_dma_chan chan[]; -}; - -#define dma_readl(atdma, name) \ - __raw_readl((atdma)->regs + AT_DMA_##name) -#define dma_writel(atdma, name, val) \ - __raw_writel((val), (atdma)->regs + AT_DMA_##name) - -static inline struct at_dma *to_at_dma(struct dma_device *ddev) -{ - return container_of(ddev, struct at_dma, dma_common); -} - - -/*-- Helper functions ------------------------------------------------*/ - -static struct device *chan2dev(struct dma_chan *chan) -{ - return &chan->dev->device; -} - -#if defined(VERBOSE_DEBUG) -static void vdbg_dump_regs(struct at_dma_chan *atchan) -{ - struct at_dma *atdma = to_at_dma(atchan->chan_common.device); - - dev_err(chan2dev(&atchan->chan_common), - " channel %d : imr = 0x%x, chsr = 0x%x\n", - atchan->chan_common.chan_id, - dma_readl(atdma, EBCIMR), - dma_readl(atdma, CHSR)); - - dev_err(chan2dev(&atchan->chan_common), - " channel: s0x%x d0x%x ctrl0x%x:0x%x cfg0x%x l0x%x\n", - channel_readl(atchan, SADDR), - channel_readl(atchan, DADDR), - channel_readl(atchan, CTRLA), - channel_readl(atchan, CTRLB), - channel_readl(atchan, CFG), - channel_readl(atchan, DSCR)); -} -#else -static void vdbg_dump_regs(struct at_dma_chan *atchan) {} -#endif - -static void atc_dump_lli(struct at_dma_chan *atchan, struct at_lli *lli) -{ - dev_crit(chan2dev(&atchan->chan_common), - "desc: s%pad d%pad ctrl0x%x:0x%x l%pad\n", - &lli->saddr, &lli->daddr, - lli->ctrla, lli->ctrlb, &lli->dscr); -} - - -static void atc_setup_irq(struct at_dma *atdma, int chan_id, int on) -{ - u32 ebci; - - /* enable interrupts on buffer transfer completion & error */ - ebci = AT_DMA_BTC(chan_id) - | AT_DMA_ERR(chan_id); - if (on) - dma_writel(atdma, EBCIER, ebci); - else - dma_writel(atdma, EBCIDR, ebci); -} - -static void atc_enable_chan_irq(struct at_dma *atdma, int chan_id) -{ - atc_setup_irq(atdma, chan_id, 1); -} - -static void atc_disable_chan_irq(struct at_dma *atdma, int chan_id) -{ - atc_setup_irq(atdma, chan_id, 0); -} - - -/** - * atc_chan_is_enabled - test if given channel is enabled - * @atchan: channel we want to test status - */ -static inline int atc_chan_is_enabled(struct at_dma_chan *atchan) -{ - struct at_dma *atdma = to_at_dma(atchan->chan_common.device); - - return !!(dma_readl(atdma, CHSR) & atchan->mask); -} - -/** - * atc_chan_is_paused - test channel pause/resume status - * @atchan: channel we want to test status - */ -static inline int atc_chan_is_paused(struct at_dma_chan *atchan) -{ - return test_bit(ATC_IS_PAUSED, &atchan->status); -} - -/** - * atc_chan_is_cyclic - test if given channel has cyclic property set - * @atchan: channel we want to test status - */ -static inline int atc_chan_is_cyclic(struct at_dma_chan *atchan) -{ - return test_bit(ATC_IS_CYCLIC, &atchan->status); -} - -/** - * set_desc_eol - set end-of-link to descriptor so it will end transfer - * @desc: descriptor, signle or at the end of a chain, to end chain on - */ -static void set_desc_eol(struct at_desc *desc) -{ - u32 ctrlb = desc->lli.ctrlb; - - ctrlb &= ~ATC_IEN; - ctrlb |= ATC_SRC_DSCR_DIS | ATC_DST_DSCR_DIS; - - desc->lli.ctrlb = ctrlb; - desc->lli.dscr = 0; -} - -#endif /* AT_HDMAC_REGS_H */ diff --git a/drivers/dma/dma-jz4780.c b/drivers/dma/dma-jz4780.c index 2a483802d9eeb476d414bf737ae6da166c4c3fc3..9c1a6e9a9c03084d622daef7d19bbd8955254547 100644 --- a/drivers/dma/dma-jz4780.c +++ b/drivers/dma/dma-jz4780.c @@ -1038,6 +1038,13 @@ static const struct jz4780_dma_soc_data jz4725b_dma_soc_data = { JZ_SOC_DATA_BREAK_LINKS, }; +static const struct jz4780_dma_soc_data jz4755_dma_soc_data = { + .nb_channels = 4, + .transfer_ord_max = 5, + .flags = JZ_SOC_DATA_PER_CHAN_PM | JZ_SOC_DATA_NO_DCKES_DCKEC | + JZ_SOC_DATA_BREAK_LINKS, +}; + static const struct jz4780_dma_soc_data jz4760_dma_soc_data = { .nb_channels = 5, .transfer_ord_max = 6, @@ -1101,6 +1108,7 @@ static const struct jz4780_dma_soc_data x1830_dma_soc_data = { static const struct of_device_id jz4780_dma_dt_match[] = { { .compatible = "ingenic,jz4740-dma", .data = &jz4740_dma_soc_data }, { .compatible = "ingenic,jz4725b-dma", .data = &jz4725b_dma_soc_data }, + { .compatible = "ingenic,jz4755-dma", .data = &jz4755_dma_soc_data }, { .compatible = "ingenic,jz4760-dma", .data = &jz4760_dma_soc_data }, { .compatible = "ingenic,jz4760-mdma", .data = &jz4760_mdma_soc_data }, { .compatible = "ingenic,jz4760-bdma", .data = &jz4760_bdma_soc_data }, diff --git a/drivers/dma/idma64.c b/drivers/dma/idma64.c index f4c07ad3be15b427a43b7c9a60e4e5d43f9a9fa2..c33087c5cd0214f23f5f595c86bedf1371f4912f 100644 --- a/drivers/dma/idma64.c +++ b/drivers/dma/idma64.c @@ -600,7 +600,7 @@ static int idma64_probe(struct idma64_chip *chip) return 0; } -static int idma64_remove(struct idma64_chip *chip) +static void idma64_remove(struct idma64_chip *chip) { struct idma64 *idma64 = chip->idma64; unsigned short i; @@ -618,8 +618,6 @@ static int idma64_remove(struct idma64_chip *chip) tasklet_kill(&idma64c->vchan.task); } - - return 0; } /* ---------------------------------------------------------------------- */ @@ -664,7 +662,9 @@ static int idma64_platform_remove(struct platform_device *pdev) { struct idma64_chip *chip = platform_get_drvdata(pdev); - return idma64_remove(chip); + idma64_remove(chip); + + return 0; } static int __maybe_unused idma64_pm_suspend(struct device *dev) diff --git a/drivers/dma/idxd/cdev.c b/drivers/dma/idxd/cdev.c index a9b96b18772f322ef4c74ba8acad04ae1f978c99..e13e92609943dbb91769a2c22b1bff57b228e2e6 100644 --- a/drivers/dma/idxd/cdev.c +++ b/drivers/dma/idxd/cdev.c @@ -6,7 +6,6 @@ #include #include #include -#include #include #include #include @@ -100,7 +99,7 @@ static int idxd_cdev_open(struct inode *inode, struct file *filp) filp->private_data = ctx; if (device_user_pasid_enabled(idxd)) { - sva = iommu_sva_bind_device(dev, current->mm, NULL); + sva = iommu_sva_bind_device(dev, current->mm); if (IS_ERR(sva)) { rc = PTR_ERR(sva); dev_err(dev, "pasid allocation failed: %d\n", rc); diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c index 6f44fa8f78a5d330d1c8239706df6b9a3474d559..06f5d3783d7719e01ad896818f5fa48a75cf945e 100644 --- a/drivers/dma/idxd/device.c +++ b/drivers/dma/idxd/device.c @@ -7,7 +7,6 @@ #include #include #include -#include #include #include "../dmaengine.h" #include "idxd.h" diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c index 09cbf0c179ba9e29df1b2d835fe3cd08b9141ea5..529ea09c909408952fe32777bb7c71c95a2506ce 100644 --- a/drivers/dma/idxd/init.c +++ b/drivers/dma/idxd/init.c @@ -14,7 +14,6 @@ #include #include #include -#include #include #include #include @@ -502,29 +501,7 @@ static struct idxd_device *idxd_alloc(struct pci_dev *pdev, struct idxd_driver_d static int idxd_enable_system_pasid(struct idxd_device *idxd) { - int flags; - unsigned int pasid; - struct iommu_sva *sva; - - flags = SVM_FLAG_SUPERVISOR_MODE; - - sva = iommu_sva_bind_device(&idxd->pdev->dev, NULL, &flags); - if (IS_ERR(sva)) { - dev_warn(&idxd->pdev->dev, - "iommu sva bind failed: %ld\n", PTR_ERR(sva)); - return PTR_ERR(sva); - } - - pasid = iommu_sva_get_pasid(sva); - if (pasid == IOMMU_PASID_INVALID) { - iommu_sva_unbind_device(sva); - return -ENODEV; - } - - idxd->sva = sva; - idxd->pasid = pasid; - dev_dbg(&idxd->pdev->dev, "system pasid: %u\n", pasid); - return 0; + return -EOPNOTSUPP; } static void idxd_disable_system_pasid(struct idxd_device *idxd) diff --git a/drivers/dma/idxd/sysfs.c b/drivers/dma/idxd/sysfs.c index 7269bd54554f60cb5b8915c9a1ed7f2a36ba66bf..3229dfc78650784733a261fa539e6de64bf1979d 100644 --- a/drivers/dma/idxd/sysfs.c +++ b/drivers/dma/idxd/sysfs.c @@ -528,6 +528,22 @@ static bool idxd_group_attr_progress_limit_invisible(struct attribute *attr, !idxd->hw.group_cap.progress_limit; } +static bool idxd_group_attr_read_buffers_invisible(struct attribute *attr, + struct idxd_device *idxd) +{ + /* + * Intel IAA does not support Read Buffer allocation control, + * make these attributes invisible. + */ + return (attr == &dev_attr_group_use_token_limit.attr || + attr == &dev_attr_group_use_read_buffer_limit.attr || + attr == &dev_attr_group_tokens_allowed.attr || + attr == &dev_attr_group_read_buffers_allowed.attr || + attr == &dev_attr_group_tokens_reserved.attr || + attr == &dev_attr_group_read_buffers_reserved.attr) && + idxd->data->type == IDXD_TYPE_IAX; +} + static umode_t idxd_group_attr_visible(struct kobject *kobj, struct attribute *attr, int n) { @@ -538,6 +554,9 @@ static umode_t idxd_group_attr_visible(struct kobject *kobj, if (idxd_group_attr_progress_limit_invisible(attr, idxd)) return 0; + if (idxd_group_attr_read_buffers_invisible(attr, idxd)) + return 0; + return attr->mode; } @@ -1233,6 +1252,14 @@ static bool idxd_wq_attr_op_config_invisible(struct attribute *attr, !idxd->hw.wq_cap.op_config; } +static bool idxd_wq_attr_max_batch_size_invisible(struct attribute *attr, + struct idxd_device *idxd) +{ + /* Intel IAA does not support batch processing, make it invisible */ + return attr == &dev_attr_wq_max_batch_size.attr && + idxd->data->type == IDXD_TYPE_IAX; +} + static umode_t idxd_wq_attr_visible(struct kobject *kobj, struct attribute *attr, int n) { @@ -1243,6 +1270,9 @@ static umode_t idxd_wq_attr_visible(struct kobject *kobj, if (idxd_wq_attr_op_config_invisible(attr, idxd)) return 0; + if (idxd_wq_attr_max_batch_size_invisible(attr, idxd)) + return 0; + return attr->mode; } @@ -1533,6 +1563,43 @@ static ssize_t cmd_status_store(struct device *dev, struct device_attribute *att } static DEVICE_ATTR_RW(cmd_status); +static bool idxd_device_attr_max_batch_size_invisible(struct attribute *attr, + struct idxd_device *idxd) +{ + /* Intel IAA does not support batch processing, make it invisible */ + return attr == &dev_attr_max_batch_size.attr && + idxd->data->type == IDXD_TYPE_IAX; +} + +static bool idxd_device_attr_read_buffers_invisible(struct attribute *attr, + struct idxd_device *idxd) +{ + /* + * Intel IAA does not support Read Buffer allocation control, + * make these attributes invisible. + */ + return (attr == &dev_attr_max_tokens.attr || + attr == &dev_attr_max_read_buffers.attr || + attr == &dev_attr_token_limit.attr || + attr == &dev_attr_read_buffer_limit.attr) && + idxd->data->type == IDXD_TYPE_IAX; +} + +static umode_t idxd_device_attr_visible(struct kobject *kobj, + struct attribute *attr, int n) +{ + struct device *dev = container_of(kobj, struct device, kobj); + struct idxd_device *idxd = confdev_to_idxd(dev); + + if (idxd_device_attr_max_batch_size_invisible(attr, idxd)) + return 0; + + if (idxd_device_attr_read_buffers_invisible(attr, idxd)) + return 0; + + return attr->mode; +} + static struct attribute *idxd_device_attributes[] = { &dev_attr_version.attr, &dev_attr_max_groups.attr, @@ -1560,6 +1627,7 @@ static struct attribute *idxd_device_attributes[] = { static const struct attribute_group idxd_device_attribute_group = { .attrs = idxd_device_attributes, + .is_visible = idxd_device_attr_visible, }; static const struct attribute_group *idxd_attribute_groups[] = { diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c index e2070df6cad287bda75386961060339eb1b15d30..79d244011093cfb95e6195d0bfde045bcea3c8de 100644 --- a/drivers/dma/ioat/dma.c +++ b/drivers/dma/ioat/dma.c @@ -33,7 +33,7 @@ MODULE_PARM_DESC(completion_timeout, static int idle_timeout = 2000; module_param(idle_timeout, int, 0644); MODULE_PARM_DESC(idle_timeout, - "set ioat idel timeout [msec] (default 2000 [msec])"); + "set ioat idle timeout [msec] (default 2000 [msec])"); #define IDLE_TIMEOUT msecs_to_jiffies(idle_timeout) #define COMPLETION_TIMEOUT msecs_to_jiffies(completion_timeout) diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c deleted file mode 100644 index 310b899d581fa2dbbaa5abd0c97237d40b19177d..0000000000000000000000000000000000000000 --- a/drivers/dma/iop-adma.c +++ /dev/null @@ -1,1554 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * offload engine driver for the Intel Xscale series of i/o processors - * Copyright © 2006, Intel Corporation. - */ - -/* - * This driver supports the asynchrounous DMA copy and RAID engines available - * on the Intel Xscale(R) family of I/O Processors (IOP 32x, 33x, 134x) - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "iop-adma.h" -#include "dmaengine.h" - -#define to_iop_adma_chan(chan) container_of(chan, struct iop_adma_chan, common) -#define to_iop_adma_device(dev) \ - container_of(dev, struct iop_adma_device, common) -#define tx_to_iop_adma_slot(tx) \ - container_of(tx, struct iop_adma_desc_slot, async_tx) - -/** - * iop_adma_free_slots - flags descriptor slots for reuse - * @slot: Slot to free - * Caller must hold &iop_chan->lock while calling this function - */ -static void iop_adma_free_slots(struct iop_adma_desc_slot *slot) -{ - int stride = slot->slots_per_op; - - while (stride--) { - slot->slots_per_op = 0; - slot = list_entry(slot->slot_node.next, - struct iop_adma_desc_slot, - slot_node); - } -} - -static dma_cookie_t -iop_adma_run_tx_complete_actions(struct iop_adma_desc_slot *desc, - struct iop_adma_chan *iop_chan, dma_cookie_t cookie) -{ - struct dma_async_tx_descriptor *tx = &desc->async_tx; - - BUG_ON(tx->cookie < 0); - if (tx->cookie > 0) { - cookie = tx->cookie; - tx->cookie = 0; - - /* call the callback (must not sleep or submit new - * operations to this channel) - */ - dmaengine_desc_get_callback_invoke(tx, NULL); - - dma_descriptor_unmap(tx); - if (desc->group_head) - desc->group_head = NULL; - } - - /* run dependent operations */ - dma_run_dependencies(tx); - - return cookie; -} - -static int -iop_adma_clean_slot(struct iop_adma_desc_slot *desc, - struct iop_adma_chan *iop_chan) -{ - /* the client is allowed to attach dependent operations - * until 'ack' is set - */ - if (!async_tx_test_ack(&desc->async_tx)) - return 0; - - /* leave the last descriptor in the chain - * so we can append to it - */ - if (desc->chain_node.next == &iop_chan->chain) - return 1; - - dev_dbg(iop_chan->device->common.dev, - "\tfree slot: %d slots_per_op: %d\n", - desc->idx, desc->slots_per_op); - - list_del(&desc->chain_node); - iop_adma_free_slots(desc); - - return 0; -} - -static void __iop_adma_slot_cleanup(struct iop_adma_chan *iop_chan) -{ - struct iop_adma_desc_slot *iter, *_iter, *grp_start = NULL; - dma_cookie_t cookie = 0; - u32 current_desc = iop_chan_get_current_descriptor(iop_chan); - int busy = iop_chan_is_busy(iop_chan); - int seen_current = 0, slot_cnt = 0, slots_per_op = 0; - - dev_dbg(iop_chan->device->common.dev, "%s\n", __func__); - /* free completed slots from the chain starting with - * the oldest descriptor - */ - list_for_each_entry_safe(iter, _iter, &iop_chan->chain, - chain_node) { - pr_debug("\tcookie: %d slot: %d busy: %d " - "this_desc: %pad next_desc: %#llx ack: %d\n", - iter->async_tx.cookie, iter->idx, busy, - &iter->async_tx.phys, (u64)iop_desc_get_next_desc(iter), - async_tx_test_ack(&iter->async_tx)); - prefetch(_iter); - prefetch(&_iter->async_tx); - - /* do not advance past the current descriptor loaded into the - * hardware channel, subsequent descriptors are either in - * process or have not been submitted - */ - if (seen_current) - break; - - /* stop the search if we reach the current descriptor and the - * channel is busy, or if it appears that the current descriptor - * needs to be re-read (i.e. has been appended to) - */ - if (iter->async_tx.phys == current_desc) { - BUG_ON(seen_current++); - if (busy || iop_desc_get_next_desc(iter)) - break; - } - - /* detect the start of a group transaction */ - if (!slot_cnt && !slots_per_op) { - slot_cnt = iter->slot_cnt; - slots_per_op = iter->slots_per_op; - if (slot_cnt <= slots_per_op) { - slot_cnt = 0; - slots_per_op = 0; - } - } - - if (slot_cnt) { - pr_debug("\tgroup++\n"); - if (!grp_start) - grp_start = iter; - slot_cnt -= slots_per_op; - } - - /* all the members of a group are complete */ - if (slots_per_op != 0 && slot_cnt == 0) { - struct iop_adma_desc_slot *grp_iter, *_grp_iter; - int end_of_chain = 0; - pr_debug("\tgroup end\n"); - - /* collect the total results */ - if (grp_start->xor_check_result) { - u32 zero_sum_result = 0; - slot_cnt = grp_start->slot_cnt; - grp_iter = grp_start; - - list_for_each_entry_from(grp_iter, - &iop_chan->chain, chain_node) { - zero_sum_result |= - iop_desc_get_zero_result(grp_iter); - pr_debug("\titer%d result: %d\n", - grp_iter->idx, zero_sum_result); - slot_cnt -= slots_per_op; - if (slot_cnt == 0) - break; - } - pr_debug("\tgrp_start->xor_check_result: %p\n", - grp_start->xor_check_result); - *grp_start->xor_check_result = zero_sum_result; - } - - /* clean up the group */ - slot_cnt = grp_start->slot_cnt; - grp_iter = grp_start; - list_for_each_entry_safe_from(grp_iter, _grp_iter, - &iop_chan->chain, chain_node) { - cookie = iop_adma_run_tx_complete_actions( - grp_iter, iop_chan, cookie); - - slot_cnt -= slots_per_op; - end_of_chain = iop_adma_clean_slot(grp_iter, - iop_chan); - - if (slot_cnt == 0 || end_of_chain) - break; - } - - /* the group should be complete at this point */ - BUG_ON(slot_cnt); - - slots_per_op = 0; - grp_start = NULL; - if (end_of_chain) - break; - else - continue; - } else if (slots_per_op) /* wait for group completion */ - continue; - - /* write back zero sum results (single descriptor case) */ - if (iter->xor_check_result && iter->async_tx.cookie) - *iter->xor_check_result = - iop_desc_get_zero_result(iter); - - cookie = iop_adma_run_tx_complete_actions( - iter, iop_chan, cookie); - - if (iop_adma_clean_slot(iter, iop_chan)) - break; - } - - if (cookie > 0) { - iop_chan->common.completed_cookie = cookie; - pr_debug("\tcompleted cookie %d\n", cookie); - } -} - -static void -iop_adma_slot_cleanup(struct iop_adma_chan *iop_chan) -{ - spin_lock_bh(&iop_chan->lock); - __iop_adma_slot_cleanup(iop_chan); - spin_unlock_bh(&iop_chan->lock); -} - -static void iop_adma_tasklet(struct tasklet_struct *t) -{ - struct iop_adma_chan *iop_chan = from_tasklet(iop_chan, t, - irq_tasklet); - - /* lockdep will flag depedency submissions as potentially - * recursive locking, this is not the case as a dependency - * submission will never recurse a channels submit routine. - * There are checks in async_tx.c to prevent this. - */ - spin_lock_nested(&iop_chan->lock, SINGLE_DEPTH_NESTING); - __iop_adma_slot_cleanup(iop_chan); - spin_unlock(&iop_chan->lock); -} - -static struct iop_adma_desc_slot * -iop_adma_alloc_slots(struct iop_adma_chan *iop_chan, int num_slots, - int slots_per_op) -{ - struct iop_adma_desc_slot *iter, *_iter, *alloc_start = NULL; - LIST_HEAD(chain); - int slots_found, retry = 0; - - /* start search from the last allocated descrtiptor - * if a contiguous allocation can not be found start searching - * from the beginning of the list - */ -retry: - slots_found = 0; - if (retry == 0) - iter = iop_chan->last_used; - else - iter = list_entry(&iop_chan->all_slots, - struct iop_adma_desc_slot, - slot_node); - - list_for_each_entry_safe_continue( - iter, _iter, &iop_chan->all_slots, slot_node) { - prefetch(_iter); - prefetch(&_iter->async_tx); - if (iter->slots_per_op) { - /* give up after finding the first busy slot - * on the second pass through the list - */ - if (retry) - break; - - slots_found = 0; - continue; - } - - /* start the allocation if the slot is correctly aligned */ - if (!slots_found++) { - if (iop_desc_is_aligned(iter, slots_per_op)) - alloc_start = iter; - else { - slots_found = 0; - continue; - } - } - - if (slots_found == num_slots) { - struct iop_adma_desc_slot *alloc_tail = NULL; - struct iop_adma_desc_slot *last_used = NULL; - iter = alloc_start; - while (num_slots) { - int i; - dev_dbg(iop_chan->device->common.dev, - "allocated slot: %d " - "(desc %p phys: %#llx) slots_per_op %d\n", - iter->idx, iter->hw_desc, - (u64)iter->async_tx.phys, slots_per_op); - - /* pre-ack all but the last descriptor */ - if (num_slots != slots_per_op) - async_tx_ack(&iter->async_tx); - - list_add_tail(&iter->chain_node, &chain); - alloc_tail = iter; - iter->async_tx.cookie = 0; - iter->slot_cnt = num_slots; - iter->xor_check_result = NULL; - for (i = 0; i < slots_per_op; i++) { - iter->slots_per_op = slots_per_op - i; - last_used = iter; - iter = list_entry(iter->slot_node.next, - struct iop_adma_desc_slot, - slot_node); - } - num_slots -= slots_per_op; - } - alloc_tail->group_head = alloc_start; - alloc_tail->async_tx.cookie = -EBUSY; - list_splice(&chain, &alloc_tail->tx_list); - iop_chan->last_used = last_used; - iop_desc_clear_next_desc(alloc_start); - iop_desc_clear_next_desc(alloc_tail); - return alloc_tail; - } - } - if (!retry++) - goto retry; - - /* perform direct reclaim if the allocation fails */ - __iop_adma_slot_cleanup(iop_chan); - - return NULL; -} - -static void iop_adma_check_threshold(struct iop_adma_chan *iop_chan) -{ - dev_dbg(iop_chan->device->common.dev, "pending: %d\n", - iop_chan->pending); - - if (iop_chan->pending >= IOP_ADMA_THRESHOLD) { - iop_chan->pending = 0; - iop_chan_append(iop_chan); - } -} - -static dma_cookie_t -iop_adma_tx_submit(struct dma_async_tx_descriptor *tx) -{ - struct iop_adma_desc_slot *sw_desc = tx_to_iop_adma_slot(tx); - struct iop_adma_chan *iop_chan = to_iop_adma_chan(tx->chan); - struct iop_adma_desc_slot *grp_start, *old_chain_tail; - int slot_cnt; - dma_cookie_t cookie; - dma_addr_t next_dma; - - grp_start = sw_desc->group_head; - slot_cnt = grp_start->slot_cnt; - - spin_lock_bh(&iop_chan->lock); - cookie = dma_cookie_assign(tx); - - old_chain_tail = list_entry(iop_chan->chain.prev, - struct iop_adma_desc_slot, chain_node); - list_splice_init(&sw_desc->tx_list, - &old_chain_tail->chain_node); - - /* fix up the hardware chain */ - next_dma = grp_start->async_tx.phys; - iop_desc_set_next_desc(old_chain_tail, next_dma); - BUG_ON(iop_desc_get_next_desc(old_chain_tail) != next_dma); /* flush */ - - /* check for pre-chained descriptors */ - iop_paranoia(iop_desc_get_next_desc(sw_desc)); - - /* increment the pending count by the number of slots - * memcpy operations have a 1:1 (slot:operation) relation - * other operations are heavier and will pop the threshold - * more often. - */ - iop_chan->pending += slot_cnt; - iop_adma_check_threshold(iop_chan); - spin_unlock_bh(&iop_chan->lock); - - dev_dbg(iop_chan->device->common.dev, "%s cookie: %d slot: %d\n", - __func__, sw_desc->async_tx.cookie, sw_desc->idx); - - return cookie; -} - -static void iop_chan_start_null_memcpy(struct iop_adma_chan *iop_chan); -static void iop_chan_start_null_xor(struct iop_adma_chan *iop_chan); - -/** - * iop_adma_alloc_chan_resources - returns the number of allocated descriptors - * @chan: allocate descriptor resources for this channel - * - * Note: We keep the slots for 1 operation on iop_chan->chain at all times. To - * avoid deadlock, via async_xor, num_descs_in_pool must at a minimum be - * greater than 2x the number slots needed to satisfy a device->max_xor - * request. - * */ -static int iop_adma_alloc_chan_resources(struct dma_chan *chan) -{ - char *hw_desc; - dma_addr_t dma_desc; - int idx; - struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); - struct iop_adma_desc_slot *slot = NULL; - int init = iop_chan->slots_allocated ? 0 : 1; - struct iop_adma_platform_data *plat_data = - dev_get_platdata(&iop_chan->device->pdev->dev); - int num_descs_in_pool = plat_data->pool_size/IOP_ADMA_SLOT_SIZE; - - /* Allocate descriptor slots */ - do { - idx = iop_chan->slots_allocated; - if (idx == num_descs_in_pool) - break; - - slot = kzalloc(sizeof(*slot), GFP_KERNEL); - if (!slot) { - printk(KERN_INFO "IOP ADMA Channel only initialized" - " %d descriptor slots", idx); - break; - } - hw_desc = (char *) iop_chan->device->dma_desc_pool_virt; - slot->hw_desc = (void *) &hw_desc[idx * IOP_ADMA_SLOT_SIZE]; - - dma_async_tx_descriptor_init(&slot->async_tx, chan); - slot->async_tx.tx_submit = iop_adma_tx_submit; - INIT_LIST_HEAD(&slot->tx_list); - INIT_LIST_HEAD(&slot->chain_node); - INIT_LIST_HEAD(&slot->slot_node); - dma_desc = iop_chan->device->dma_desc_pool; - slot->async_tx.phys = dma_desc + idx * IOP_ADMA_SLOT_SIZE; - slot->idx = idx; - - spin_lock_bh(&iop_chan->lock); - iop_chan->slots_allocated++; - list_add_tail(&slot->slot_node, &iop_chan->all_slots); - spin_unlock_bh(&iop_chan->lock); - } while (iop_chan->slots_allocated < num_descs_in_pool); - - if (idx && !iop_chan->last_used) - iop_chan->last_used = list_entry(iop_chan->all_slots.next, - struct iop_adma_desc_slot, - slot_node); - - dev_dbg(iop_chan->device->common.dev, - "allocated %d descriptor slots last_used: %p\n", - iop_chan->slots_allocated, iop_chan->last_used); - - /* initialize the channel and the chain with a null operation */ - if (init) { - if (dma_has_cap(DMA_MEMCPY, - iop_chan->device->common.cap_mask)) - iop_chan_start_null_memcpy(iop_chan); - else if (dma_has_cap(DMA_XOR, - iop_chan->device->common.cap_mask)) - iop_chan_start_null_xor(iop_chan); - else - BUG(); - } - - return (idx > 0) ? idx : -ENOMEM; -} - -static struct dma_async_tx_descriptor * -iop_adma_prep_dma_interrupt(struct dma_chan *chan, unsigned long flags) -{ - struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); - struct iop_adma_desc_slot *sw_desc, *grp_start; - int slot_cnt, slots_per_op; - - dev_dbg(iop_chan->device->common.dev, "%s\n", __func__); - - spin_lock_bh(&iop_chan->lock); - slot_cnt = iop_chan_interrupt_slot_count(&slots_per_op, iop_chan); - sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op); - if (sw_desc) { - grp_start = sw_desc->group_head; - iop_desc_init_interrupt(grp_start, iop_chan); - sw_desc->async_tx.flags = flags; - } - spin_unlock_bh(&iop_chan->lock); - - return sw_desc ? &sw_desc->async_tx : NULL; -} - -static struct dma_async_tx_descriptor * -iop_adma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dma_dest, - dma_addr_t dma_src, size_t len, unsigned long flags) -{ - struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); - struct iop_adma_desc_slot *sw_desc, *grp_start; - int slot_cnt, slots_per_op; - - if (unlikely(!len)) - return NULL; - BUG_ON(len > IOP_ADMA_MAX_BYTE_COUNT); - - dev_dbg(iop_chan->device->common.dev, "%s len: %zu\n", - __func__, len); - - spin_lock_bh(&iop_chan->lock); - slot_cnt = iop_chan_memcpy_slot_count(len, &slots_per_op); - sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op); - if (sw_desc) { - grp_start = sw_desc->group_head; - iop_desc_init_memcpy(grp_start, flags); - iop_desc_set_byte_count(grp_start, iop_chan, len); - iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest); - iop_desc_set_memcpy_src_addr(grp_start, dma_src); - sw_desc->async_tx.flags = flags; - } - spin_unlock_bh(&iop_chan->lock); - - return sw_desc ? &sw_desc->async_tx : NULL; -} - -static struct dma_async_tx_descriptor * -iop_adma_prep_dma_xor(struct dma_chan *chan, dma_addr_t dma_dest, - dma_addr_t *dma_src, unsigned int src_cnt, size_t len, - unsigned long flags) -{ - struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); - struct iop_adma_desc_slot *sw_desc, *grp_start; - int slot_cnt, slots_per_op; - - if (unlikely(!len)) - return NULL; - BUG_ON(len > IOP_ADMA_XOR_MAX_BYTE_COUNT); - - dev_dbg(iop_chan->device->common.dev, - "%s src_cnt: %d len: %zu flags: %lx\n", - __func__, src_cnt, len, flags); - - spin_lock_bh(&iop_chan->lock); - slot_cnt = iop_chan_xor_slot_count(len, src_cnt, &slots_per_op); - sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op); - if (sw_desc) { - grp_start = sw_desc->group_head; - iop_desc_init_xor(grp_start, src_cnt, flags); - iop_desc_set_byte_count(grp_start, iop_chan, len); - iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest); - sw_desc->async_tx.flags = flags; - while (src_cnt--) - iop_desc_set_xor_src_addr(grp_start, src_cnt, - dma_src[src_cnt]); - } - spin_unlock_bh(&iop_chan->lock); - - return sw_desc ? &sw_desc->async_tx : NULL; -} - -static struct dma_async_tx_descriptor * -iop_adma_prep_dma_xor_val(struct dma_chan *chan, dma_addr_t *dma_src, - unsigned int src_cnt, size_t len, u32 *result, - unsigned long flags) -{ - struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); - struct iop_adma_desc_slot *sw_desc, *grp_start; - int slot_cnt, slots_per_op; - - if (unlikely(!len)) - return NULL; - - dev_dbg(iop_chan->device->common.dev, "%s src_cnt: %d len: %zu\n", - __func__, src_cnt, len); - - spin_lock_bh(&iop_chan->lock); - slot_cnt = iop_chan_zero_sum_slot_count(len, src_cnt, &slots_per_op); - sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op); - if (sw_desc) { - grp_start = sw_desc->group_head; - iop_desc_init_zero_sum(grp_start, src_cnt, flags); - iop_desc_set_zero_sum_byte_count(grp_start, len); - grp_start->xor_check_result = result; - pr_debug("\t%s: grp_start->xor_check_result: %p\n", - __func__, grp_start->xor_check_result); - sw_desc->async_tx.flags = flags; - while (src_cnt--) - iop_desc_set_zero_sum_src_addr(grp_start, src_cnt, - dma_src[src_cnt]); - } - spin_unlock_bh(&iop_chan->lock); - - return sw_desc ? &sw_desc->async_tx : NULL; -} - -static struct dma_async_tx_descriptor * -iop_adma_prep_dma_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src, - unsigned int src_cnt, const unsigned char *scf, size_t len, - unsigned long flags) -{ - struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); - struct iop_adma_desc_slot *sw_desc, *g; - int slot_cnt, slots_per_op; - int continue_srcs; - - if (unlikely(!len)) - return NULL; - BUG_ON(len > IOP_ADMA_XOR_MAX_BYTE_COUNT); - - dev_dbg(iop_chan->device->common.dev, - "%s src_cnt: %d len: %zu flags: %lx\n", - __func__, src_cnt, len, flags); - - if (dmaf_p_disabled_continue(flags)) - continue_srcs = 1+src_cnt; - else if (dmaf_continue(flags)) - continue_srcs = 3+src_cnt; - else - continue_srcs = 0+src_cnt; - - spin_lock_bh(&iop_chan->lock); - slot_cnt = iop_chan_pq_slot_count(len, continue_srcs, &slots_per_op); - sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op); - if (sw_desc) { - int i; - - g = sw_desc->group_head; - iop_desc_set_byte_count(g, iop_chan, len); - - /* even if P is disabled its destination address (bits - * [3:0]) must match Q. It is ok if P points to an - * invalid address, it won't be written. - */ - if (flags & DMA_PREP_PQ_DISABLE_P) - dst[0] = dst[1] & 0x7; - - iop_desc_set_pq_addr(g, dst); - sw_desc->async_tx.flags = flags; - for (i = 0; i < src_cnt; i++) - iop_desc_set_pq_src_addr(g, i, src[i], scf[i]); - - /* if we are continuing a previous operation factor in - * the old p and q values, see the comment for dma_maxpq - * in include/linux/dmaengine.h - */ - if (dmaf_p_disabled_continue(flags)) - iop_desc_set_pq_src_addr(g, i++, dst[1], 1); - else if (dmaf_continue(flags)) { - iop_desc_set_pq_src_addr(g, i++, dst[0], 0); - iop_desc_set_pq_src_addr(g, i++, dst[1], 1); - iop_desc_set_pq_src_addr(g, i++, dst[1], 0); - } - iop_desc_init_pq(g, i, flags); - } - spin_unlock_bh(&iop_chan->lock); - - return sw_desc ? &sw_desc->async_tx : NULL; -} - -static struct dma_async_tx_descriptor * -iop_adma_prep_dma_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src, - unsigned int src_cnt, const unsigned char *scf, - size_t len, enum sum_check_flags *pqres, - unsigned long flags) -{ - struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); - struct iop_adma_desc_slot *sw_desc, *g; - int slot_cnt, slots_per_op; - - if (unlikely(!len)) - return NULL; - BUG_ON(len > IOP_ADMA_XOR_MAX_BYTE_COUNT); - - dev_dbg(iop_chan->device->common.dev, "%s src_cnt: %d len: %zu\n", - __func__, src_cnt, len); - - spin_lock_bh(&iop_chan->lock); - slot_cnt = iop_chan_pq_zero_sum_slot_count(len, src_cnt + 2, &slots_per_op); - sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op); - if (sw_desc) { - /* for validate operations p and q are tagged onto the - * end of the source list - */ - int pq_idx = src_cnt; - - g = sw_desc->group_head; - iop_desc_init_pq_zero_sum(g, src_cnt+2, flags); - iop_desc_set_pq_zero_sum_byte_count(g, len); - g->pq_check_result = pqres; - pr_debug("\t%s: g->pq_check_result: %p\n", - __func__, g->pq_check_result); - sw_desc->async_tx.flags = flags; - while (src_cnt--) - iop_desc_set_pq_zero_sum_src_addr(g, src_cnt, - src[src_cnt], - scf[src_cnt]); - iop_desc_set_pq_zero_sum_addr(g, pq_idx, src); - } - spin_unlock_bh(&iop_chan->lock); - - return sw_desc ? &sw_desc->async_tx : NULL; -} - -static void iop_adma_free_chan_resources(struct dma_chan *chan) -{ - struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); - struct iop_adma_desc_slot *iter, *_iter; - int in_use_descs = 0; - - iop_adma_slot_cleanup(iop_chan); - - spin_lock_bh(&iop_chan->lock); - list_for_each_entry_safe(iter, _iter, &iop_chan->chain, - chain_node) { - in_use_descs++; - list_del(&iter->chain_node); - } - list_for_each_entry_safe_reverse( - iter, _iter, &iop_chan->all_slots, slot_node) { - list_del(&iter->slot_node); - kfree(iter); - iop_chan->slots_allocated--; - } - iop_chan->last_used = NULL; - - dev_dbg(iop_chan->device->common.dev, "%s slots_allocated %d\n", - __func__, iop_chan->slots_allocated); - spin_unlock_bh(&iop_chan->lock); - - /* one is ok since we left it on there on purpose */ - if (in_use_descs > 1) - printk(KERN_ERR "IOP: Freeing %d in use descriptors!\n", - in_use_descs - 1); -} - -/** - * iop_adma_status - poll the status of an ADMA transaction - * @chan: ADMA channel handle - * @cookie: ADMA transaction identifier - * @txstate: a holder for the current state of the channel or NULL - */ -static enum dma_status iop_adma_status(struct dma_chan *chan, - dma_cookie_t cookie, - struct dma_tx_state *txstate) -{ - struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); - int ret; - - ret = dma_cookie_status(chan, cookie, txstate); - if (ret == DMA_COMPLETE) - return ret; - - iop_adma_slot_cleanup(iop_chan); - - return dma_cookie_status(chan, cookie, txstate); -} - -static irqreturn_t iop_adma_eot_handler(int irq, void *data) -{ - struct iop_adma_chan *chan = data; - - dev_dbg(chan->device->common.dev, "%s\n", __func__); - - tasklet_schedule(&chan->irq_tasklet); - - iop_adma_device_clear_eot_status(chan); - - return IRQ_HANDLED; -} - -static irqreturn_t iop_adma_eoc_handler(int irq, void *data) -{ - struct iop_adma_chan *chan = data; - - dev_dbg(chan->device->common.dev, "%s\n", __func__); - - tasklet_schedule(&chan->irq_tasklet); - - iop_adma_device_clear_eoc_status(chan); - - return IRQ_HANDLED; -} - -static irqreturn_t iop_adma_err_handler(int irq, void *data) -{ - struct iop_adma_chan *chan = data; - unsigned long status = iop_chan_get_status(chan); - - dev_err(chan->device->common.dev, - "error ( %s%s%s%s%s%s%s)\n", - iop_is_err_int_parity(status, chan) ? "int_parity " : "", - iop_is_err_mcu_abort(status, chan) ? "mcu_abort " : "", - iop_is_err_int_tabort(status, chan) ? "int_tabort " : "", - iop_is_err_int_mabort(status, chan) ? "int_mabort " : "", - iop_is_err_pci_tabort(status, chan) ? "pci_tabort " : "", - iop_is_err_pci_mabort(status, chan) ? "pci_mabort " : "", - iop_is_err_split_tx(status, chan) ? "split_tx " : ""); - - iop_adma_device_clear_err_status(chan); - - BUG(); - - return IRQ_HANDLED; -} - -static void iop_adma_issue_pending(struct dma_chan *chan) -{ - struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); - - if (iop_chan->pending) { - iop_chan->pending = 0; - iop_chan_append(iop_chan); - } -} - -/* - * Perform a transaction to verify the HW works. - */ -#define IOP_ADMA_TEST_SIZE 2000 - -static int iop_adma_memcpy_self_test(struct iop_adma_device *device) -{ - int i; - void *src, *dest; - dma_addr_t src_dma, dest_dma; - struct dma_chan *dma_chan; - dma_cookie_t cookie; - struct dma_async_tx_descriptor *tx; - int err = 0; - struct iop_adma_chan *iop_chan; - - dev_dbg(device->common.dev, "%s\n", __func__); - - src = kmalloc(IOP_ADMA_TEST_SIZE, GFP_KERNEL); - if (!src) - return -ENOMEM; - dest = kzalloc(IOP_ADMA_TEST_SIZE, GFP_KERNEL); - if (!dest) { - kfree(src); - return -ENOMEM; - } - - /* Fill in src buffer */ - for (i = 0; i < IOP_ADMA_TEST_SIZE; i++) - ((u8 *) src)[i] = (u8)i; - - /* Start copy, using first DMA channel */ - dma_chan = container_of(device->common.channels.next, - struct dma_chan, - device_node); - if (iop_adma_alloc_chan_resources(dma_chan) < 1) { - err = -ENODEV; - goto out; - } - - dest_dma = dma_map_single(dma_chan->device->dev, dest, - IOP_ADMA_TEST_SIZE, DMA_FROM_DEVICE); - src_dma = dma_map_single(dma_chan->device->dev, src, - IOP_ADMA_TEST_SIZE, DMA_TO_DEVICE); - tx = iop_adma_prep_dma_memcpy(dma_chan, dest_dma, src_dma, - IOP_ADMA_TEST_SIZE, - DMA_PREP_INTERRUPT | DMA_CTRL_ACK); - - cookie = iop_adma_tx_submit(tx); - iop_adma_issue_pending(dma_chan); - msleep(1); - - if (iop_adma_status(dma_chan, cookie, NULL) != - DMA_COMPLETE) { - dev_err(dma_chan->device->dev, - "Self-test copy timed out, disabling\n"); - err = -ENODEV; - goto free_resources; - } - - iop_chan = to_iop_adma_chan(dma_chan); - dma_sync_single_for_cpu(&iop_chan->device->pdev->dev, dest_dma, - IOP_ADMA_TEST_SIZE, DMA_FROM_DEVICE); - if (memcmp(src, dest, IOP_ADMA_TEST_SIZE)) { - dev_err(dma_chan->device->dev, - "Self-test copy failed compare, disabling\n"); - err = -ENODEV; - goto free_resources; - } - -free_resources: - iop_adma_free_chan_resources(dma_chan); -out: - kfree(src); - kfree(dest); - return err; -} - -#define IOP_ADMA_NUM_SRC_TEST 4 /* must be <= 15 */ -static int -iop_adma_xor_val_self_test(struct iop_adma_device *device) -{ - int i, src_idx; - struct page *dest; - struct page *xor_srcs[IOP_ADMA_NUM_SRC_TEST]; - struct page *zero_sum_srcs[IOP_ADMA_NUM_SRC_TEST + 1]; - dma_addr_t dma_srcs[IOP_ADMA_NUM_SRC_TEST + 1]; - dma_addr_t dest_dma; - struct dma_async_tx_descriptor *tx; - struct dma_chan *dma_chan; - dma_cookie_t cookie; - u8 cmp_byte = 0; - u32 cmp_word; - u32 zero_sum_result; - int err = 0; - struct iop_adma_chan *iop_chan; - - dev_dbg(device->common.dev, "%s\n", __func__); - - for (src_idx = 0; src_idx < IOP_ADMA_NUM_SRC_TEST; src_idx++) { - xor_srcs[src_idx] = alloc_page(GFP_KERNEL); - if (!xor_srcs[src_idx]) { - while (src_idx--) - __free_page(xor_srcs[src_idx]); - return -ENOMEM; - } - } - - dest = alloc_page(GFP_KERNEL); - if (!dest) { - while (src_idx--) - __free_page(xor_srcs[src_idx]); - return -ENOMEM; - } - - /* Fill in src buffers */ - for (src_idx = 0; src_idx < IOP_ADMA_NUM_SRC_TEST; src_idx++) { - u8 *ptr = page_address(xor_srcs[src_idx]); - for (i = 0; i < PAGE_SIZE; i++) - ptr[i] = (1 << src_idx); - } - - for (src_idx = 0; src_idx < IOP_ADMA_NUM_SRC_TEST; src_idx++) - cmp_byte ^= (u8) (1 << src_idx); - - cmp_word = (cmp_byte << 24) | (cmp_byte << 16) | - (cmp_byte << 8) | cmp_byte; - - memset(page_address(dest), 0, PAGE_SIZE); - - dma_chan = container_of(device->common.channels.next, - struct dma_chan, - device_node); - if (iop_adma_alloc_chan_resources(dma_chan) < 1) { - err = -ENODEV; - goto out; - } - - /* test xor */ - dest_dma = dma_map_page(dma_chan->device->dev, dest, 0, - PAGE_SIZE, DMA_FROM_DEVICE); - for (i = 0; i < IOP_ADMA_NUM_SRC_TEST; i++) - dma_srcs[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i], - 0, PAGE_SIZE, DMA_TO_DEVICE); - tx = iop_adma_prep_dma_xor(dma_chan, dest_dma, dma_srcs, - IOP_ADMA_NUM_SRC_TEST, PAGE_SIZE, - DMA_PREP_INTERRUPT | DMA_CTRL_ACK); - - cookie = iop_adma_tx_submit(tx); - iop_adma_issue_pending(dma_chan); - msleep(8); - - if (iop_adma_status(dma_chan, cookie, NULL) != - DMA_COMPLETE) { - dev_err(dma_chan->device->dev, - "Self-test xor timed out, disabling\n"); - err = -ENODEV; - goto free_resources; - } - - iop_chan = to_iop_adma_chan(dma_chan); - dma_sync_single_for_cpu(&iop_chan->device->pdev->dev, dest_dma, - PAGE_SIZE, DMA_FROM_DEVICE); - for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) { - u32 *ptr = page_address(dest); - if (ptr[i] != cmp_word) { - dev_err(dma_chan->device->dev, - "Self-test xor failed compare, disabling\n"); - err = -ENODEV; - goto free_resources; - } - } - dma_sync_single_for_device(&iop_chan->device->pdev->dev, dest_dma, - PAGE_SIZE, DMA_TO_DEVICE); - - /* skip zero sum if the capability is not present */ - if (!dma_has_cap(DMA_XOR_VAL, dma_chan->device->cap_mask)) - goto free_resources; - - /* zero sum the sources with the destintation page */ - for (i = 0; i < IOP_ADMA_NUM_SRC_TEST; i++) - zero_sum_srcs[i] = xor_srcs[i]; - zero_sum_srcs[i] = dest; - - zero_sum_result = 1; - - for (i = 0; i < IOP_ADMA_NUM_SRC_TEST + 1; i++) - dma_srcs[i] = dma_map_page(dma_chan->device->dev, - zero_sum_srcs[i], 0, PAGE_SIZE, - DMA_TO_DEVICE); - tx = iop_adma_prep_dma_xor_val(dma_chan, dma_srcs, - IOP_ADMA_NUM_SRC_TEST + 1, PAGE_SIZE, - &zero_sum_result, - DMA_PREP_INTERRUPT | DMA_CTRL_ACK); - - cookie = iop_adma_tx_submit(tx); - iop_adma_issue_pending(dma_chan); - msleep(8); - - if (iop_adma_status(dma_chan, cookie, NULL) != DMA_COMPLETE) { - dev_err(dma_chan->device->dev, - "Self-test zero sum timed out, disabling\n"); - err = -ENODEV; - goto free_resources; - } - - if (zero_sum_result != 0) { - dev_err(dma_chan->device->dev, - "Self-test zero sum failed compare, disabling\n"); - err = -ENODEV; - goto free_resources; - } - - /* test for non-zero parity sum */ - zero_sum_result = 0; - for (i = 0; i < IOP_ADMA_NUM_SRC_TEST + 1; i++) - dma_srcs[i] = dma_map_page(dma_chan->device->dev, - zero_sum_srcs[i], 0, PAGE_SIZE, - DMA_TO_DEVICE); - tx = iop_adma_prep_dma_xor_val(dma_chan, dma_srcs, - IOP_ADMA_NUM_SRC_TEST + 1, PAGE_SIZE, - &zero_sum_result, - DMA_PREP_INTERRUPT | DMA_CTRL_ACK); - - cookie = iop_adma_tx_submit(tx); - iop_adma_issue_pending(dma_chan); - msleep(8); - - if (iop_adma_status(dma_chan, cookie, NULL) != DMA_COMPLETE) { - dev_err(dma_chan->device->dev, - "Self-test non-zero sum timed out, disabling\n"); - err = -ENODEV; - goto free_resources; - } - - if (zero_sum_result != 1) { - dev_err(dma_chan->device->dev, - "Self-test non-zero sum failed compare, disabling\n"); - err = -ENODEV; - goto free_resources; - } - -free_resources: - iop_adma_free_chan_resources(dma_chan); -out: - src_idx = IOP_ADMA_NUM_SRC_TEST; - while (src_idx--) - __free_page(xor_srcs[src_idx]); - __free_page(dest); - return err; -} - -#ifdef CONFIG_RAID6_PQ -static int -iop_adma_pq_zero_sum_self_test(struct iop_adma_device *device) -{ - /* combined sources, software pq results, and extra hw pq results */ - struct page *pq[IOP_ADMA_NUM_SRC_TEST+2+2]; - /* ptr to the extra hw pq buffers defined above */ - struct page **pq_hw = &pq[IOP_ADMA_NUM_SRC_TEST+2]; - /* address conversion buffers (dma_map / page_address) */ - void *pq_sw[IOP_ADMA_NUM_SRC_TEST+2]; - dma_addr_t pq_src[IOP_ADMA_NUM_SRC_TEST+2]; - dma_addr_t *pq_dest = &pq_src[IOP_ADMA_NUM_SRC_TEST]; - - int i; - struct dma_async_tx_descriptor *tx; - struct dma_chan *dma_chan; - dma_cookie_t cookie; - u32 zero_sum_result; - int err = 0; - struct device *dev; - - dev_dbg(device->common.dev, "%s\n", __func__); - - for (i = 0; i < ARRAY_SIZE(pq); i++) { - pq[i] = alloc_page(GFP_KERNEL); - if (!pq[i]) { - while (i--) - __free_page(pq[i]); - return -ENOMEM; - } - } - - /* Fill in src buffers */ - for (i = 0; i < IOP_ADMA_NUM_SRC_TEST; i++) { - pq_sw[i] = page_address(pq[i]); - memset(pq_sw[i], 0x11111111 * (1<common.channels.next, - struct dma_chan, - device_node); - if (iop_adma_alloc_chan_resources(dma_chan) < 1) { - err = -ENODEV; - goto out; - } - - dev = dma_chan->device->dev; - - /* initialize the dests */ - memset(page_address(pq_hw[0]), 0 , PAGE_SIZE); - memset(page_address(pq_hw[1]), 0 , PAGE_SIZE); - - /* test pq */ - pq_dest[0] = dma_map_page(dev, pq_hw[0], 0, PAGE_SIZE, DMA_FROM_DEVICE); - pq_dest[1] = dma_map_page(dev, pq_hw[1], 0, PAGE_SIZE, DMA_FROM_DEVICE); - for (i = 0; i < IOP_ADMA_NUM_SRC_TEST; i++) - pq_src[i] = dma_map_page(dev, pq[i], 0, PAGE_SIZE, - DMA_TO_DEVICE); - - tx = iop_adma_prep_dma_pq(dma_chan, pq_dest, pq_src, - IOP_ADMA_NUM_SRC_TEST, (u8 *)raid6_gfexp, - PAGE_SIZE, - DMA_PREP_INTERRUPT | - DMA_CTRL_ACK); - - cookie = iop_adma_tx_submit(tx); - iop_adma_issue_pending(dma_chan); - msleep(8); - - if (iop_adma_status(dma_chan, cookie, NULL) != - DMA_COMPLETE) { - dev_err(dev, "Self-test pq timed out, disabling\n"); - err = -ENODEV; - goto free_resources; - } - - raid6_call.gen_syndrome(IOP_ADMA_NUM_SRC_TEST+2, PAGE_SIZE, pq_sw); - - if (memcmp(pq_sw[IOP_ADMA_NUM_SRC_TEST], - page_address(pq_hw[0]), PAGE_SIZE) != 0) { - dev_err(dev, "Self-test p failed compare, disabling\n"); - err = -ENODEV; - goto free_resources; - } - if (memcmp(pq_sw[IOP_ADMA_NUM_SRC_TEST+1], - page_address(pq_hw[1]), PAGE_SIZE) != 0) { - dev_err(dev, "Self-test q failed compare, disabling\n"); - err = -ENODEV; - goto free_resources; - } - - /* test correct zero sum using the software generated pq values */ - for (i = 0; i < IOP_ADMA_NUM_SRC_TEST + 2; i++) - pq_src[i] = dma_map_page(dev, pq[i], 0, PAGE_SIZE, - DMA_TO_DEVICE); - - zero_sum_result = ~0; - tx = iop_adma_prep_dma_pq_val(dma_chan, &pq_src[IOP_ADMA_NUM_SRC_TEST], - pq_src, IOP_ADMA_NUM_SRC_TEST, - raid6_gfexp, PAGE_SIZE, &zero_sum_result, - DMA_PREP_INTERRUPT|DMA_CTRL_ACK); - - cookie = iop_adma_tx_submit(tx); - iop_adma_issue_pending(dma_chan); - msleep(8); - - if (iop_adma_status(dma_chan, cookie, NULL) != - DMA_COMPLETE) { - dev_err(dev, "Self-test pq-zero-sum timed out, disabling\n"); - err = -ENODEV; - goto free_resources; - } - - if (zero_sum_result != 0) { - dev_err(dev, "Self-test pq-zero-sum failed to validate: %x\n", - zero_sum_result); - err = -ENODEV; - goto free_resources; - } - - /* test incorrect zero sum */ - i = IOP_ADMA_NUM_SRC_TEST; - memset(pq_sw[i] + 100, 0, 100); - memset(pq_sw[i+1] + 200, 0, 200); - for (i = 0; i < IOP_ADMA_NUM_SRC_TEST + 2; i++) - pq_src[i] = dma_map_page(dev, pq[i], 0, PAGE_SIZE, - DMA_TO_DEVICE); - - zero_sum_result = 0; - tx = iop_adma_prep_dma_pq_val(dma_chan, &pq_src[IOP_ADMA_NUM_SRC_TEST], - pq_src, IOP_ADMA_NUM_SRC_TEST, - raid6_gfexp, PAGE_SIZE, &zero_sum_result, - DMA_PREP_INTERRUPT|DMA_CTRL_ACK); - - cookie = iop_adma_tx_submit(tx); - iop_adma_issue_pending(dma_chan); - msleep(8); - - if (iop_adma_status(dma_chan, cookie, NULL) != - DMA_COMPLETE) { - dev_err(dev, "Self-test !pq-zero-sum timed out, disabling\n"); - err = -ENODEV; - goto free_resources; - } - - if (zero_sum_result != (SUM_CHECK_P_RESULT | SUM_CHECK_Q_RESULT)) { - dev_err(dev, "Self-test !pq-zero-sum failed to validate: %x\n", - zero_sum_result); - err = -ENODEV; - goto free_resources; - } - -free_resources: - iop_adma_free_chan_resources(dma_chan); -out: - i = ARRAY_SIZE(pq); - while (i--) - __free_page(pq[i]); - return err; -} -#endif - -static int iop_adma_remove(struct platform_device *dev) -{ - struct iop_adma_device *device = platform_get_drvdata(dev); - struct dma_chan *chan, *_chan; - struct iop_adma_chan *iop_chan; - struct iop_adma_platform_data *plat_data = dev_get_platdata(&dev->dev); - - dma_async_device_unregister(&device->common); - - dma_free_coherent(&dev->dev, plat_data->pool_size, - device->dma_desc_pool_virt, device->dma_desc_pool); - - list_for_each_entry_safe(chan, _chan, &device->common.channels, - device_node) { - iop_chan = to_iop_adma_chan(chan); - list_del(&chan->device_node); - kfree(iop_chan); - } - kfree(device); - - return 0; -} - -static int iop_adma_probe(struct platform_device *pdev) -{ - struct resource *res; - int ret = 0, i; - struct iop_adma_device *adev; - struct iop_adma_chan *iop_chan; - struct dma_device *dma_dev; - struct iop_adma_platform_data *plat_data = dev_get_platdata(&pdev->dev); - - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) - return -ENODEV; - - if (!devm_request_mem_region(&pdev->dev, res->start, - resource_size(res), pdev->name)) - return -EBUSY; - - adev = kzalloc(sizeof(*adev), GFP_KERNEL); - if (!adev) - return -ENOMEM; - dma_dev = &adev->common; - - /* allocate coherent memory for hardware descriptors - * note: writecombine gives slightly better performance, but - * requires that we explicitly flush the writes - */ - adev->dma_desc_pool_virt = dma_alloc_wc(&pdev->dev, - plat_data->pool_size, - &adev->dma_desc_pool, - GFP_KERNEL); - if (!adev->dma_desc_pool_virt) { - ret = -ENOMEM; - goto err_free_adev; - } - - dev_dbg(&pdev->dev, "%s: allocated descriptor pool virt %p phys %pad\n", - __func__, adev->dma_desc_pool_virt, &adev->dma_desc_pool); - - adev->id = plat_data->hw_id; - - /* discover transaction capabilites from the platform data */ - dma_dev->cap_mask = plat_data->cap_mask; - - adev->pdev = pdev; - platform_set_drvdata(pdev, adev); - - INIT_LIST_HEAD(&dma_dev->channels); - - /* set base routines */ - dma_dev->device_alloc_chan_resources = iop_adma_alloc_chan_resources; - dma_dev->device_free_chan_resources = iop_adma_free_chan_resources; - dma_dev->device_tx_status = iop_adma_status; - dma_dev->device_issue_pending = iop_adma_issue_pending; - dma_dev->dev = &pdev->dev; - - /* set prep routines based on capability */ - if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) - dma_dev->device_prep_dma_memcpy = iop_adma_prep_dma_memcpy; - if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { - dma_dev->max_xor = iop_adma_get_max_xor(); - dma_dev->device_prep_dma_xor = iop_adma_prep_dma_xor; - } - if (dma_has_cap(DMA_XOR_VAL, dma_dev->cap_mask)) - dma_dev->device_prep_dma_xor_val = - iop_adma_prep_dma_xor_val; - if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) { - dma_set_maxpq(dma_dev, iop_adma_get_max_pq(), 0); - dma_dev->device_prep_dma_pq = iop_adma_prep_dma_pq; - } - if (dma_has_cap(DMA_PQ_VAL, dma_dev->cap_mask)) - dma_dev->device_prep_dma_pq_val = - iop_adma_prep_dma_pq_val; - if (dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask)) - dma_dev->device_prep_dma_interrupt = - iop_adma_prep_dma_interrupt; - - iop_chan = kzalloc(sizeof(*iop_chan), GFP_KERNEL); - if (!iop_chan) { - ret = -ENOMEM; - goto err_free_dma; - } - iop_chan->device = adev; - - iop_chan->mmr_base = devm_ioremap(&pdev->dev, res->start, - resource_size(res)); - if (!iop_chan->mmr_base) { - ret = -ENOMEM; - goto err_free_iop_chan; - } - tasklet_setup(&iop_chan->irq_tasklet, iop_adma_tasklet); - - /* clear errors before enabling interrupts */ - iop_adma_device_clear_err_status(iop_chan); - - for (i = 0; i < 3; i++) { - static const irq_handler_t handler[] = { - iop_adma_eot_handler, - iop_adma_eoc_handler, - iop_adma_err_handler - }; - int irq = platform_get_irq(pdev, i); - if (irq < 0) { - ret = -ENXIO; - goto err_free_iop_chan; - } else { - ret = devm_request_irq(&pdev->dev, irq, - handler[i], 0, pdev->name, iop_chan); - if (ret) - goto err_free_iop_chan; - } - } - - spin_lock_init(&iop_chan->lock); - INIT_LIST_HEAD(&iop_chan->chain); - INIT_LIST_HEAD(&iop_chan->all_slots); - iop_chan->common.device = dma_dev; - dma_cookie_init(&iop_chan->common); - list_add_tail(&iop_chan->common.device_node, &dma_dev->channels); - - if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) { - ret = iop_adma_memcpy_self_test(adev); - dev_dbg(&pdev->dev, "memcpy self test returned %d\n", ret); - if (ret) - goto err_free_iop_chan; - } - - if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { - ret = iop_adma_xor_val_self_test(adev); - dev_dbg(&pdev->dev, "xor self test returned %d\n", ret); - if (ret) - goto err_free_iop_chan; - } - - if (dma_has_cap(DMA_PQ, dma_dev->cap_mask) && - dma_has_cap(DMA_PQ_VAL, dma_dev->cap_mask)) { - #ifdef CONFIG_RAID6_PQ - ret = iop_adma_pq_zero_sum_self_test(adev); - dev_dbg(&pdev->dev, "pq self test returned %d\n", ret); - #else - /* can not test raid6, so do not publish capability */ - dma_cap_clear(DMA_PQ, dma_dev->cap_mask); - dma_cap_clear(DMA_PQ_VAL, dma_dev->cap_mask); - ret = 0; - #endif - if (ret) - goto err_free_iop_chan; - } - - dev_info(&pdev->dev, "Intel(R) IOP: ( %s%s%s%s%s%s)\n", - dma_has_cap(DMA_PQ, dma_dev->cap_mask) ? "pq " : "", - dma_has_cap(DMA_PQ_VAL, dma_dev->cap_mask) ? "pq_val " : "", - dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "", - dma_has_cap(DMA_XOR_VAL, dma_dev->cap_mask) ? "xor_val " : "", - dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "", - dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : ""); - - dma_async_device_register(dma_dev); - goto out; - - err_free_iop_chan: - kfree(iop_chan); - err_free_dma: - dma_free_coherent(&adev->pdev->dev, plat_data->pool_size, - adev->dma_desc_pool_virt, adev->dma_desc_pool); - err_free_adev: - kfree(adev); - out: - return ret; -} - -static void iop_chan_start_null_memcpy(struct iop_adma_chan *iop_chan) -{ - struct iop_adma_desc_slot *sw_desc, *grp_start; - dma_cookie_t cookie; - int slot_cnt, slots_per_op; - - dev_dbg(iop_chan->device->common.dev, "%s\n", __func__); - - spin_lock_bh(&iop_chan->lock); - slot_cnt = iop_chan_memcpy_slot_count(0, &slots_per_op); - sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op); - if (sw_desc) { - grp_start = sw_desc->group_head; - - list_splice_init(&sw_desc->tx_list, &iop_chan->chain); - async_tx_ack(&sw_desc->async_tx); - iop_desc_init_memcpy(grp_start, 0); - iop_desc_set_byte_count(grp_start, iop_chan, 0); - iop_desc_set_dest_addr(grp_start, iop_chan, 0); - iop_desc_set_memcpy_src_addr(grp_start, 0); - - cookie = dma_cookie_assign(&sw_desc->async_tx); - - /* initialize the completed cookie to be less than - * the most recently used cookie - */ - iop_chan->common.completed_cookie = cookie - 1; - - /* channel should not be busy */ - BUG_ON(iop_chan_is_busy(iop_chan)); - - /* clear any prior error-status bits */ - iop_adma_device_clear_err_status(iop_chan); - - /* disable operation */ - iop_chan_disable(iop_chan); - - /* set the descriptor address */ - iop_chan_set_next_descriptor(iop_chan, sw_desc->async_tx.phys); - - /* 1/ don't add pre-chained descriptors - * 2/ dummy read to flush next_desc write - */ - BUG_ON(iop_desc_get_next_desc(sw_desc)); - - /* run the descriptor */ - iop_chan_enable(iop_chan); - } else - dev_err(iop_chan->device->common.dev, - "failed to allocate null descriptor\n"); - spin_unlock_bh(&iop_chan->lock); -} - -static void iop_chan_start_null_xor(struct iop_adma_chan *iop_chan) -{ - struct iop_adma_desc_slot *sw_desc, *grp_start; - dma_cookie_t cookie; - int slot_cnt, slots_per_op; - - dev_dbg(iop_chan->device->common.dev, "%s\n", __func__); - - spin_lock_bh(&iop_chan->lock); - slot_cnt = iop_chan_xor_slot_count(0, 2, &slots_per_op); - sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op); - if (sw_desc) { - grp_start = sw_desc->group_head; - list_splice_init(&sw_desc->tx_list, &iop_chan->chain); - async_tx_ack(&sw_desc->async_tx); - iop_desc_init_null_xor(grp_start, 2, 0); - iop_desc_set_byte_count(grp_start, iop_chan, 0); - iop_desc_set_dest_addr(grp_start, iop_chan, 0); - iop_desc_set_xor_src_addr(grp_start, 0, 0); - iop_desc_set_xor_src_addr(grp_start, 1, 0); - - cookie = dma_cookie_assign(&sw_desc->async_tx); - - /* initialize the completed cookie to be less than - * the most recently used cookie - */ - iop_chan->common.completed_cookie = cookie - 1; - - /* channel should not be busy */ - BUG_ON(iop_chan_is_busy(iop_chan)); - - /* clear any prior error-status bits */ - iop_adma_device_clear_err_status(iop_chan); - - /* disable operation */ - iop_chan_disable(iop_chan); - - /* set the descriptor address */ - iop_chan_set_next_descriptor(iop_chan, sw_desc->async_tx.phys); - - /* 1/ don't add pre-chained descriptors - * 2/ dummy read to flush next_desc write - */ - BUG_ON(iop_desc_get_next_desc(sw_desc)); - - /* run the descriptor */ - iop_chan_enable(iop_chan); - } else - dev_err(iop_chan->device->common.dev, - "failed to allocate null descriptor\n"); - spin_unlock_bh(&iop_chan->lock); -} - -static struct platform_driver iop_adma_driver = { - .probe = iop_adma_probe, - .remove = iop_adma_remove, - .driver = { - .name = "iop-adma", - }, -}; - -module_platform_driver(iop_adma_driver); - -MODULE_AUTHOR("Intel Corporation"); -MODULE_DESCRIPTION("IOP ADMA Engine Driver"); -MODULE_LICENSE("GPL"); -MODULE_ALIAS("platform:iop-adma"); diff --git a/drivers/dma/iop-adma.h b/drivers/dma/iop-adma.h deleted file mode 100644 index d44eabb6f5ebd6800bf70eba800c4b0f0d7bad47..0000000000000000000000000000000000000000 --- a/drivers/dma/iop-adma.h +++ /dev/null @@ -1,914 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright © 2006, Intel Corporation. - */ -#ifndef _ADMA_H -#define _ADMA_H -#include -#include -#include - -/* Memory copy units */ -#define DMA_CCR(chan) (chan->mmr_base + 0x0) -#define DMA_CSR(chan) (chan->mmr_base + 0x4) -#define DMA_DAR(chan) (chan->mmr_base + 0xc) -#define DMA_NDAR(chan) (chan->mmr_base + 0x10) -#define DMA_PADR(chan) (chan->mmr_base + 0x14) -#define DMA_PUADR(chan) (chan->mmr_base + 0x18) -#define DMA_LADR(chan) (chan->mmr_base + 0x1c) -#define DMA_BCR(chan) (chan->mmr_base + 0x20) -#define DMA_DCR(chan) (chan->mmr_base + 0x24) - -/* Application accelerator unit */ -#define AAU_ACR(chan) (chan->mmr_base + 0x0) -#define AAU_ASR(chan) (chan->mmr_base + 0x4) -#define AAU_ADAR(chan) (chan->mmr_base + 0x8) -#define AAU_ANDAR(chan) (chan->mmr_base + 0xc) -#define AAU_SAR(src, chan) (chan->mmr_base + (0x10 + ((src) << 2))) -#define AAU_DAR(chan) (chan->mmr_base + 0x20) -#define AAU_ABCR(chan) (chan->mmr_base + 0x24) -#define AAU_ADCR(chan) (chan->mmr_base + 0x28) -#define AAU_SAR_EDCR(src_edc) (chan->mmr_base + (0x02c + ((src_edc-4) << 2))) -#define AAU_EDCR0_IDX 8 -#define AAU_EDCR1_IDX 17 -#define AAU_EDCR2_IDX 26 - -struct iop3xx_aau_desc_ctrl { - unsigned int int_en:1; - unsigned int blk1_cmd_ctrl:3; - unsigned int blk2_cmd_ctrl:3; - unsigned int blk3_cmd_ctrl:3; - unsigned int blk4_cmd_ctrl:3; - unsigned int blk5_cmd_ctrl:3; - unsigned int blk6_cmd_ctrl:3; - unsigned int blk7_cmd_ctrl:3; - unsigned int blk8_cmd_ctrl:3; - unsigned int blk_ctrl:2; - unsigned int dual_xor_en:1; - unsigned int tx_complete:1; - unsigned int zero_result_err:1; - unsigned int zero_result_en:1; - unsigned int dest_write_en:1; -}; - -struct iop3xx_aau_e_desc_ctrl { - unsigned int reserved:1; - unsigned int blk1_cmd_ctrl:3; - unsigned int blk2_cmd_ctrl:3; - unsigned int blk3_cmd_ctrl:3; - unsigned int blk4_cmd_ctrl:3; - unsigned int blk5_cmd_ctrl:3; - unsigned int blk6_cmd_ctrl:3; - unsigned int blk7_cmd_ctrl:3; - unsigned int blk8_cmd_ctrl:3; - unsigned int reserved2:7; -}; - -struct iop3xx_dma_desc_ctrl { - unsigned int pci_transaction:4; - unsigned int int_en:1; - unsigned int dac_cycle_en:1; - unsigned int mem_to_mem_en:1; - unsigned int crc_data_tx_en:1; - unsigned int crc_gen_en:1; - unsigned int crc_seed_dis:1; - unsigned int reserved:21; - unsigned int crc_tx_complete:1; -}; - -struct iop3xx_desc_dma { - u32 next_desc; - union { - u32 pci_src_addr; - u32 pci_dest_addr; - u32 src_addr; - }; - union { - u32 upper_pci_src_addr; - u32 upper_pci_dest_addr; - }; - union { - u32 local_pci_src_addr; - u32 local_pci_dest_addr; - u32 dest_addr; - }; - u32 byte_count; - union { - u32 desc_ctrl; - struct iop3xx_dma_desc_ctrl desc_ctrl_field; - }; - u32 crc_addr; -}; - -struct iop3xx_desc_aau { - u32 next_desc; - u32 src[4]; - u32 dest_addr; - u32 byte_count; - union { - u32 desc_ctrl; - struct iop3xx_aau_desc_ctrl desc_ctrl_field; - }; - union { - u32 src_addr; - u32 e_desc_ctrl; - struct iop3xx_aau_e_desc_ctrl e_desc_ctrl_field; - } src_edc[31]; -}; - -struct iop3xx_aau_gfmr { - unsigned int gfmr1:8; - unsigned int gfmr2:8; - unsigned int gfmr3:8; - unsigned int gfmr4:8; -}; - -struct iop3xx_desc_pq_xor { - u32 next_desc; - u32 src[3]; - union { - u32 data_mult1; - struct iop3xx_aau_gfmr data_mult1_field; - }; - u32 dest_addr; - u32 byte_count; - union { - u32 desc_ctrl; - struct iop3xx_aau_desc_ctrl desc_ctrl_field; - }; - union { - u32 src_addr; - u32 e_desc_ctrl; - struct iop3xx_aau_e_desc_ctrl e_desc_ctrl_field; - u32 data_multiplier; - struct iop3xx_aau_gfmr data_mult_field; - u32 reserved; - } src_edc_gfmr[19]; -}; - -struct iop3xx_desc_dual_xor { - u32 next_desc; - u32 src0_addr; - u32 src1_addr; - u32 h_src_addr; - u32 d_src_addr; - u32 h_dest_addr; - u32 byte_count; - union { - u32 desc_ctrl; - struct iop3xx_aau_desc_ctrl desc_ctrl_field; - }; - u32 d_dest_addr; -}; - -union iop3xx_desc { - struct iop3xx_desc_aau *aau; - struct iop3xx_desc_dma *dma; - struct iop3xx_desc_pq_xor *pq_xor; - struct iop3xx_desc_dual_xor *dual_xor; - void *ptr; -}; - -/* No support for p+q operations */ -static inline int -iop_chan_pq_slot_count(size_t len, int src_cnt, int *slots_per_op) -{ - BUG(); - return 0; -} - -static inline void -iop_desc_init_pq(struct iop_adma_desc_slot *desc, int src_cnt, - unsigned long flags) -{ - BUG(); -} - -static inline void -iop_desc_set_pq_addr(struct iop_adma_desc_slot *desc, dma_addr_t *addr) -{ - BUG(); -} - -static inline void -iop_desc_set_pq_src_addr(struct iop_adma_desc_slot *desc, int src_idx, - dma_addr_t addr, unsigned char coef) -{ - BUG(); -} - -static inline int -iop_chan_pq_zero_sum_slot_count(size_t len, int src_cnt, int *slots_per_op) -{ - BUG(); - return 0; -} - -static inline void -iop_desc_init_pq_zero_sum(struct iop_adma_desc_slot *desc, int src_cnt, - unsigned long flags) -{ - BUG(); -} - -static inline void -iop_desc_set_pq_zero_sum_byte_count(struct iop_adma_desc_slot *desc, u32 len) -{ - BUG(); -} - -#define iop_desc_set_pq_zero_sum_src_addr iop_desc_set_pq_src_addr - -static inline void -iop_desc_set_pq_zero_sum_addr(struct iop_adma_desc_slot *desc, int pq_idx, - dma_addr_t *src) -{ - BUG(); -} - -static inline int iop_adma_get_max_xor(void) -{ - return 32; -} - -static inline int iop_adma_get_max_pq(void) -{ - BUG(); - return 0; -} - -static inline u32 iop_chan_get_current_descriptor(struct iop_adma_chan *chan) -{ - int id = chan->device->id; - - switch (id) { - case DMA0_ID: - case DMA1_ID: - return __raw_readl(DMA_DAR(chan)); - case AAU_ID: - return __raw_readl(AAU_ADAR(chan)); - default: - BUG(); - } - return 0; -} - -static inline void iop_chan_set_next_descriptor(struct iop_adma_chan *chan, - u32 next_desc_addr) -{ - int id = chan->device->id; - - switch (id) { - case DMA0_ID: - case DMA1_ID: - __raw_writel(next_desc_addr, DMA_NDAR(chan)); - break; - case AAU_ID: - __raw_writel(next_desc_addr, AAU_ANDAR(chan)); - break; - } - -} - -#define IOP_ADMA_STATUS_BUSY (1 << 10) -#define IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT (1024) -#define IOP_ADMA_XOR_MAX_BYTE_COUNT (16 * 1024 * 1024) -#define IOP_ADMA_MAX_BYTE_COUNT (16 * 1024 * 1024) - -static inline int iop_chan_is_busy(struct iop_adma_chan *chan) -{ - u32 status = __raw_readl(DMA_CSR(chan)); - return (status & IOP_ADMA_STATUS_BUSY) ? 1 : 0; -} - -static inline int iop_desc_is_aligned(struct iop_adma_desc_slot *desc, - int num_slots) -{ - /* num_slots will only ever be 1, 2, 4, or 8 */ - return (desc->idx & (num_slots - 1)) ? 0 : 1; -} - -/* to do: support large (i.e. > hw max) buffer sizes */ -static inline int iop_chan_memcpy_slot_count(size_t len, int *slots_per_op) -{ - *slots_per_op = 1; - return 1; -} - -/* to do: support large (i.e. > hw max) buffer sizes */ -static inline int iop_chan_memset_slot_count(size_t len, int *slots_per_op) -{ - *slots_per_op = 1; - return 1; -} - -static inline int iop3xx_aau_xor_slot_count(size_t len, int src_cnt, - int *slots_per_op) -{ - static const char slot_count_table[] = { - 1, 1, 1, 1, /* 01 - 04 */ - 2, 2, 2, 2, /* 05 - 08 */ - 4, 4, 4, 4, /* 09 - 12 */ - 4, 4, 4, 4, /* 13 - 16 */ - 8, 8, 8, 8, /* 17 - 20 */ - 8, 8, 8, 8, /* 21 - 24 */ - 8, 8, 8, 8, /* 25 - 28 */ - 8, 8, 8, 8, /* 29 - 32 */ - }; - *slots_per_op = slot_count_table[src_cnt - 1]; - return *slots_per_op; -} - -static inline int -iop_chan_interrupt_slot_count(int *slots_per_op, struct iop_adma_chan *chan) -{ - switch (chan->device->id) { - case DMA0_ID: - case DMA1_ID: - return iop_chan_memcpy_slot_count(0, slots_per_op); - case AAU_ID: - return iop3xx_aau_xor_slot_count(0, 2, slots_per_op); - default: - BUG(); - } - return 0; -} - -static inline int iop_chan_xor_slot_count(size_t len, int src_cnt, - int *slots_per_op) -{ - int slot_cnt = iop3xx_aau_xor_slot_count(len, src_cnt, slots_per_op); - - if (len <= IOP_ADMA_XOR_MAX_BYTE_COUNT) - return slot_cnt; - - len -= IOP_ADMA_XOR_MAX_BYTE_COUNT; - while (len > IOP_ADMA_XOR_MAX_BYTE_COUNT) { - len -= IOP_ADMA_XOR_MAX_BYTE_COUNT; - slot_cnt += *slots_per_op; - } - - slot_cnt += *slots_per_op; - - return slot_cnt; -} - -/* zero sum on iop3xx is limited to 1k at a time so it requires multiple - * descriptors - */ -static inline int iop_chan_zero_sum_slot_count(size_t len, int src_cnt, - int *slots_per_op) -{ - int slot_cnt = iop3xx_aau_xor_slot_count(len, src_cnt, slots_per_op); - - if (len <= IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT) - return slot_cnt; - - len -= IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT; - while (len > IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT) { - len -= IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT; - slot_cnt += *slots_per_op; - } - - slot_cnt += *slots_per_op; - - return slot_cnt; -} - -static inline u32 iop_desc_get_byte_count(struct iop_adma_desc_slot *desc, - struct iop_adma_chan *chan) -{ - union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, }; - - switch (chan->device->id) { - case DMA0_ID: - case DMA1_ID: - return hw_desc.dma->byte_count; - case AAU_ID: - return hw_desc.aau->byte_count; - default: - BUG(); - } - return 0; -} - -/* translate the src_idx to a descriptor word index */ -static inline int __desc_idx(int src_idx) -{ - static const int desc_idx_table[] = { 0, 0, 0, 0, - 0, 1, 2, 3, - 5, 6, 7, 8, - 9, 10, 11, 12, - 14, 15, 16, 17, - 18, 19, 20, 21, - 23, 24, 25, 26, - 27, 28, 29, 30, - }; - - return desc_idx_table[src_idx]; -} - -static inline u32 iop_desc_get_src_addr(struct iop_adma_desc_slot *desc, - struct iop_adma_chan *chan, - int src_idx) -{ - union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, }; - - switch (chan->device->id) { - case DMA0_ID: - case DMA1_ID: - return hw_desc.dma->src_addr; - case AAU_ID: - break; - default: - BUG(); - } - - if (src_idx < 4) - return hw_desc.aau->src[src_idx]; - else - return hw_desc.aau->src_edc[__desc_idx(src_idx)].src_addr; -} - -static inline void iop3xx_aau_desc_set_src_addr(struct iop3xx_desc_aau *hw_desc, - int src_idx, dma_addr_t addr) -{ - if (src_idx < 4) - hw_desc->src[src_idx] = addr; - else - hw_desc->src_edc[__desc_idx(src_idx)].src_addr = addr; -} - -static inline void -iop_desc_init_memcpy(struct iop_adma_desc_slot *desc, unsigned long flags) -{ - struct iop3xx_desc_dma *hw_desc = desc->hw_desc; - union { - u32 value; - struct iop3xx_dma_desc_ctrl field; - } u_desc_ctrl; - - u_desc_ctrl.value = 0; - u_desc_ctrl.field.mem_to_mem_en = 1; - u_desc_ctrl.field.pci_transaction = 0xe; /* memory read block */ - u_desc_ctrl.field.int_en = flags & DMA_PREP_INTERRUPT; - hw_desc->desc_ctrl = u_desc_ctrl.value; - hw_desc->upper_pci_src_addr = 0; - hw_desc->crc_addr = 0; -} - -static inline void -iop_desc_init_memset(struct iop_adma_desc_slot *desc, unsigned long flags) -{ - struct iop3xx_desc_aau *hw_desc = desc->hw_desc; - union { - u32 value; - struct iop3xx_aau_desc_ctrl field; - } u_desc_ctrl; - - u_desc_ctrl.value = 0; - u_desc_ctrl.field.blk1_cmd_ctrl = 0x2; /* memory block fill */ - u_desc_ctrl.field.dest_write_en = 1; - u_desc_ctrl.field.int_en = flags & DMA_PREP_INTERRUPT; - hw_desc->desc_ctrl = u_desc_ctrl.value; -} - -static inline u32 -iop3xx_desc_init_xor(struct iop3xx_desc_aau *hw_desc, int src_cnt, - unsigned long flags) -{ - int i, shift; - u32 edcr; - union { - u32 value; - struct iop3xx_aau_desc_ctrl field; - } u_desc_ctrl; - - u_desc_ctrl.value = 0; - switch (src_cnt) { - case 25 ... 32: - u_desc_ctrl.field.blk_ctrl = 0x3; /* use EDCR[2:0] */ - edcr = 0; - shift = 1; - for (i = 24; i < src_cnt; i++) { - edcr |= (1 << shift); - shift += 3; - } - hw_desc->src_edc[AAU_EDCR2_IDX].e_desc_ctrl = edcr; - src_cnt = 24; - fallthrough; - case 17 ... 24: - if (!u_desc_ctrl.field.blk_ctrl) { - hw_desc->src_edc[AAU_EDCR2_IDX].e_desc_ctrl = 0; - u_desc_ctrl.field.blk_ctrl = 0x3; /* use EDCR[2:0] */ - } - edcr = 0; - shift = 1; - for (i = 16; i < src_cnt; i++) { - edcr |= (1 << shift); - shift += 3; - } - hw_desc->src_edc[AAU_EDCR1_IDX].e_desc_ctrl = edcr; - src_cnt = 16; - fallthrough; - case 9 ... 16: - if (!u_desc_ctrl.field.blk_ctrl) - u_desc_ctrl.field.blk_ctrl = 0x2; /* use EDCR0 */ - edcr = 0; - shift = 1; - for (i = 8; i < src_cnt; i++) { - edcr |= (1 << shift); - shift += 3; - } - hw_desc->src_edc[AAU_EDCR0_IDX].e_desc_ctrl = edcr; - src_cnt = 8; - fallthrough; - case 2 ... 8: - shift = 1; - for (i = 0; i < src_cnt; i++) { - u_desc_ctrl.value |= (1 << shift); - shift += 3; - } - - if (!u_desc_ctrl.field.blk_ctrl && src_cnt > 4) - u_desc_ctrl.field.blk_ctrl = 0x1; /* use mini-desc */ - } - - u_desc_ctrl.field.dest_write_en = 1; - u_desc_ctrl.field.blk1_cmd_ctrl = 0x7; /* direct fill */ - u_desc_ctrl.field.int_en = flags & DMA_PREP_INTERRUPT; - hw_desc->desc_ctrl = u_desc_ctrl.value; - - return u_desc_ctrl.value; -} - -static inline void -iop_desc_init_xor(struct iop_adma_desc_slot *desc, int src_cnt, - unsigned long flags) -{ - iop3xx_desc_init_xor(desc->hw_desc, src_cnt, flags); -} - -/* return the number of operations */ -static inline int -iop_desc_init_zero_sum(struct iop_adma_desc_slot *desc, int src_cnt, - unsigned long flags) -{ - int slot_cnt = desc->slot_cnt, slots_per_op = desc->slots_per_op; - struct iop3xx_desc_aau *hw_desc, *prev_hw_desc, *iter; - union { - u32 value; - struct iop3xx_aau_desc_ctrl field; - } u_desc_ctrl; - int i, j; - - hw_desc = desc->hw_desc; - - for (i = 0, j = 0; (slot_cnt -= slots_per_op) >= 0; - i += slots_per_op, j++) { - iter = iop_hw_desc_slot_idx(hw_desc, i); - u_desc_ctrl.value = iop3xx_desc_init_xor(iter, src_cnt, flags); - u_desc_ctrl.field.dest_write_en = 0; - u_desc_ctrl.field.zero_result_en = 1; - u_desc_ctrl.field.int_en = flags & DMA_PREP_INTERRUPT; - iter->desc_ctrl = u_desc_ctrl.value; - - /* for the subsequent descriptors preserve the store queue - * and chain them together - */ - if (i) { - prev_hw_desc = - iop_hw_desc_slot_idx(hw_desc, i - slots_per_op); - prev_hw_desc->next_desc = - (u32) (desc->async_tx.phys + (i << 5)); - } - } - - return j; -} - -static inline void -iop_desc_init_null_xor(struct iop_adma_desc_slot *desc, int src_cnt, - unsigned long flags) -{ - struct iop3xx_desc_aau *hw_desc = desc->hw_desc; - union { - u32 value; - struct iop3xx_aau_desc_ctrl field; - } u_desc_ctrl; - - u_desc_ctrl.value = 0; - switch (src_cnt) { - case 25 ... 32: - u_desc_ctrl.field.blk_ctrl = 0x3; /* use EDCR[2:0] */ - hw_desc->src_edc[AAU_EDCR2_IDX].e_desc_ctrl = 0; - fallthrough; - case 17 ... 24: - if (!u_desc_ctrl.field.blk_ctrl) { - hw_desc->src_edc[AAU_EDCR2_IDX].e_desc_ctrl = 0; - u_desc_ctrl.field.blk_ctrl = 0x3; /* use EDCR[2:0] */ - } - hw_desc->src_edc[AAU_EDCR1_IDX].e_desc_ctrl = 0; - fallthrough; - case 9 ... 16: - if (!u_desc_ctrl.field.blk_ctrl) - u_desc_ctrl.field.blk_ctrl = 0x2; /* use EDCR0 */ - hw_desc->src_edc[AAU_EDCR0_IDX].e_desc_ctrl = 0; - fallthrough; - case 1 ... 8: - if (!u_desc_ctrl.field.blk_ctrl && src_cnt > 4) - u_desc_ctrl.field.blk_ctrl = 0x1; /* use mini-desc */ - } - - u_desc_ctrl.field.dest_write_en = 0; - u_desc_ctrl.field.int_en = flags & DMA_PREP_INTERRUPT; - hw_desc->desc_ctrl = u_desc_ctrl.value; -} - -static inline void iop_desc_set_byte_count(struct iop_adma_desc_slot *desc, - struct iop_adma_chan *chan, - u32 byte_count) -{ - union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, }; - - switch (chan->device->id) { - case DMA0_ID: - case DMA1_ID: - hw_desc.dma->byte_count = byte_count; - break; - case AAU_ID: - hw_desc.aau->byte_count = byte_count; - break; - default: - BUG(); - } -} - -static inline void -iop_desc_init_interrupt(struct iop_adma_desc_slot *desc, - struct iop_adma_chan *chan) -{ - union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, }; - - switch (chan->device->id) { - case DMA0_ID: - case DMA1_ID: - iop_desc_init_memcpy(desc, 1); - hw_desc.dma->byte_count = 0; - hw_desc.dma->dest_addr = 0; - hw_desc.dma->src_addr = 0; - break; - case AAU_ID: - iop_desc_init_null_xor(desc, 2, 1); - hw_desc.aau->byte_count = 0; - hw_desc.aau->dest_addr = 0; - hw_desc.aau->src[0] = 0; - hw_desc.aau->src[1] = 0; - break; - default: - BUG(); - } -} - -static inline void -iop_desc_set_zero_sum_byte_count(struct iop_adma_desc_slot *desc, u32 len) -{ - int slots_per_op = desc->slots_per_op; - struct iop3xx_desc_aau *hw_desc = desc->hw_desc, *iter; - int i = 0; - - if (len <= IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT) { - hw_desc->byte_count = len; - } else { - do { - iter = iop_hw_desc_slot_idx(hw_desc, i); - iter->byte_count = IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT; - len -= IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT; - i += slots_per_op; - } while (len > IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT); - - iter = iop_hw_desc_slot_idx(hw_desc, i); - iter->byte_count = len; - } -} - -static inline void iop_desc_set_dest_addr(struct iop_adma_desc_slot *desc, - struct iop_adma_chan *chan, - dma_addr_t addr) -{ - union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, }; - - switch (chan->device->id) { - case DMA0_ID: - case DMA1_ID: - hw_desc.dma->dest_addr = addr; - break; - case AAU_ID: - hw_desc.aau->dest_addr = addr; - break; - default: - BUG(); - } -} - -static inline void iop_desc_set_memcpy_src_addr(struct iop_adma_desc_slot *desc, - dma_addr_t addr) -{ - struct iop3xx_desc_dma *hw_desc = desc->hw_desc; - hw_desc->src_addr = addr; -} - -static inline void -iop_desc_set_zero_sum_src_addr(struct iop_adma_desc_slot *desc, int src_idx, - dma_addr_t addr) -{ - - struct iop3xx_desc_aau *hw_desc = desc->hw_desc, *iter; - int slot_cnt = desc->slot_cnt, slots_per_op = desc->slots_per_op; - int i; - - for (i = 0; (slot_cnt -= slots_per_op) >= 0; - i += slots_per_op, addr += IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT) { - iter = iop_hw_desc_slot_idx(hw_desc, i); - iop3xx_aau_desc_set_src_addr(iter, src_idx, addr); - } -} - -static inline void iop_desc_set_xor_src_addr(struct iop_adma_desc_slot *desc, - int src_idx, dma_addr_t addr) -{ - - struct iop3xx_desc_aau *hw_desc = desc->hw_desc, *iter; - int slot_cnt = desc->slot_cnt, slots_per_op = desc->slots_per_op; - int i; - - for (i = 0; (slot_cnt -= slots_per_op) >= 0; - i += slots_per_op, addr += IOP_ADMA_XOR_MAX_BYTE_COUNT) { - iter = iop_hw_desc_slot_idx(hw_desc, i); - iop3xx_aau_desc_set_src_addr(iter, src_idx, addr); - } -} - -static inline void iop_desc_set_next_desc(struct iop_adma_desc_slot *desc, - u32 next_desc_addr) -{ - /* hw_desc->next_desc is the same location for all channels */ - union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, }; - - iop_paranoia(hw_desc.dma->next_desc); - hw_desc.dma->next_desc = next_desc_addr; -} - -static inline u32 iop_desc_get_next_desc(struct iop_adma_desc_slot *desc) -{ - /* hw_desc->next_desc is the same location for all channels */ - union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, }; - return hw_desc.dma->next_desc; -} - -static inline void iop_desc_clear_next_desc(struct iop_adma_desc_slot *desc) -{ - /* hw_desc->next_desc is the same location for all channels */ - union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, }; - hw_desc.dma->next_desc = 0; -} - -static inline void iop_desc_set_block_fill_val(struct iop_adma_desc_slot *desc, - u32 val) -{ - struct iop3xx_desc_aau *hw_desc = desc->hw_desc; - hw_desc->src[0] = val; -} - -static inline enum sum_check_flags -iop_desc_get_zero_result(struct iop_adma_desc_slot *desc) -{ - struct iop3xx_desc_aau *hw_desc = desc->hw_desc; - struct iop3xx_aau_desc_ctrl desc_ctrl = hw_desc->desc_ctrl_field; - - iop_paranoia(!(desc_ctrl.tx_complete && desc_ctrl.zero_result_en)); - return desc_ctrl.zero_result_err << SUM_CHECK_P; -} - -static inline void iop_chan_append(struct iop_adma_chan *chan) -{ - u32 dma_chan_ctrl; - - dma_chan_ctrl = __raw_readl(DMA_CCR(chan)); - dma_chan_ctrl |= 0x2; - __raw_writel(dma_chan_ctrl, DMA_CCR(chan)); -} - -static inline u32 iop_chan_get_status(struct iop_adma_chan *chan) -{ - return __raw_readl(DMA_CSR(chan)); -} - -static inline void iop_chan_disable(struct iop_adma_chan *chan) -{ - u32 dma_chan_ctrl = __raw_readl(DMA_CCR(chan)); - dma_chan_ctrl &= ~1; - __raw_writel(dma_chan_ctrl, DMA_CCR(chan)); -} - -static inline void iop_chan_enable(struct iop_adma_chan *chan) -{ - u32 dma_chan_ctrl = __raw_readl(DMA_CCR(chan)); - - dma_chan_ctrl |= 1; - __raw_writel(dma_chan_ctrl, DMA_CCR(chan)); -} - -static inline void iop_adma_device_clear_eot_status(struct iop_adma_chan *chan) -{ - u32 status = __raw_readl(DMA_CSR(chan)); - status &= (1 << 9); - __raw_writel(status, DMA_CSR(chan)); -} - -static inline void iop_adma_device_clear_eoc_status(struct iop_adma_chan *chan) -{ - u32 status = __raw_readl(DMA_CSR(chan)); - status &= (1 << 8); - __raw_writel(status, DMA_CSR(chan)); -} - -static inline void iop_adma_device_clear_err_status(struct iop_adma_chan *chan) -{ - u32 status = __raw_readl(DMA_CSR(chan)); - - switch (chan->device->id) { - case DMA0_ID: - case DMA1_ID: - status &= (1 << 5) | (1 << 3) | (1 << 2) | (1 << 1); - break; - case AAU_ID: - status &= (1 << 5); - break; - default: - BUG(); - } - - __raw_writel(status, DMA_CSR(chan)); -} - -static inline int -iop_is_err_int_parity(unsigned long status, struct iop_adma_chan *chan) -{ - return 0; -} - -static inline int -iop_is_err_mcu_abort(unsigned long status, struct iop_adma_chan *chan) -{ - return 0; -} - -static inline int -iop_is_err_int_tabort(unsigned long status, struct iop_adma_chan *chan) -{ - return 0; -} - -static inline int -iop_is_err_int_mabort(unsigned long status, struct iop_adma_chan *chan) -{ - return test_bit(5, &status); -} - -static inline int -iop_is_err_pci_tabort(unsigned long status, struct iop_adma_chan *chan) -{ - switch (chan->device->id) { - case DMA0_ID: - case DMA1_ID: - return test_bit(2, &status); - default: - return 0; - } -} - -static inline int -iop_is_err_pci_mabort(unsigned long status, struct iop_adma_chan *chan) -{ - switch (chan->device->id) { - case DMA0_ID: - case DMA1_ID: - return test_bit(3, &status); - default: - return 0; - } -} - -static inline int -iop_is_err_split_tx(unsigned long status, struct iop_adma_chan *chan) -{ - switch (chan->device->id) { - case DMA0_ID: - case DMA1_ID: - return test_bit(1, &status); - default: - return 0; - } -} -#endif /* _ADMA_H */ diff --git a/drivers/dma/qcom/gpi.c b/drivers/dma/qcom/gpi.c index 3f56514bbef8f9d3178b2a67fdaf2fb6e0bb530d..061add83229512e07f6ed90701789e395f02522d 100644 --- a/drivers/dma/qcom/gpi.c +++ b/drivers/dma/qcom/gpi.c @@ -2286,9 +2286,14 @@ static int gpi_probe(struct platform_device *pdev) } static const struct of_device_id gpi_of_match[] = { - { .compatible = "qcom,sc7280-gpi-dma", .data = (void *)0x10000 }, { .compatible = "qcom,sdm845-gpi-dma", .data = (void *)0x0 }, { .compatible = "qcom,sm6350-gpi-dma", .data = (void *)0x10000 }, + /* + * Do not grow the list for compatible devices. Instead use + * qcom,sdm845-gpi-dma (for ee_offset = 0x0) or qcom,sm6350-gpi-dma + * (for ee_offset = 0x10000). + */ + { .compatible = "qcom,sc7280-gpi-dma", .data = (void *)0x10000 }, { .compatible = "qcom,sm8150-gpi-dma", .data = (void *)0x0 }, { .compatible = "qcom,sm8250-gpi-dma", .data = (void *)0x0 }, { .compatible = "qcom,sm8350-gpi-dma", .data = (void *)0x10000 }, diff --git a/drivers/dma/sh/shdma-arm.h b/drivers/dma/sh/shdma-arm.h deleted file mode 100644 index 7459f9a13b5becc6bdaf42222504e9ece49e1804..0000000000000000000000000000000000000000 --- a/drivers/dma/sh/shdma-arm.h +++ /dev/null @@ -1,48 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * Renesas SuperH DMA Engine support - * - * Copyright (C) 2013 Renesas Electronics, Inc. - */ - -#ifndef SHDMA_ARM_H -#define SHDMA_ARM_H - -#include "shdma.h" - -/* Transmit sizes and respective CHCR register values */ -enum { - XMIT_SZ_8BIT = 0, - XMIT_SZ_16BIT = 1, - XMIT_SZ_32BIT = 2, - XMIT_SZ_64BIT = 7, - XMIT_SZ_128BIT = 3, - XMIT_SZ_256BIT = 4, - XMIT_SZ_512BIT = 5, -}; - -/* log2(size / 8) - used to calculate number of transfers */ -#define SH_DMAE_TS_SHIFT { \ - [XMIT_SZ_8BIT] = 0, \ - [XMIT_SZ_16BIT] = 1, \ - [XMIT_SZ_32BIT] = 2, \ - [XMIT_SZ_64BIT] = 3, \ - [XMIT_SZ_128BIT] = 4, \ - [XMIT_SZ_256BIT] = 5, \ - [XMIT_SZ_512BIT] = 6, \ -} - -#define TS_LOW_BIT 0x3 /* --xx */ -#define TS_HI_BIT 0xc /* xx-- */ - -#define TS_LOW_SHIFT (3) -#define TS_HI_SHIFT (20 - 2) /* 2 bits for shifted low TS */ - -#define TS_INDEX2VAL(i) \ - ((((i) & TS_LOW_BIT) << TS_LOW_SHIFT) |\ - (((i) & TS_HI_BIT) << TS_HI_SHIFT)) - -#define CHCR_TX(xmit_sz) (DM_FIX | SM_INC | RS_ERS | TS_INDEX2VAL((xmit_sz))) -#define CHCR_RX(xmit_sz) (DM_INC | SM_FIX | RS_ERS | TS_INDEX2VAL((xmit_sz))) - -#endif diff --git a/drivers/dma/tegra186-gpc-dma.c b/drivers/dma/tegra186-gpc-dma.c index fa9bda4a2bc6fe4453276fdca95c6194eaca84bb..1d1180db6d4ecdca4e540da718fcf733aa42438f 100644 --- a/drivers/dma/tegra186-gpc-dma.c +++ b/drivers/dma/tegra186-gpc-dma.c @@ -161,7 +161,10 @@ #define TEGRA_GPCDMA_BURST_COMPLETION_TIMEOUT 5000 /* 5 msec */ /* Channel base address offset from GPCDMA base address */ -#define TEGRA_GPCDMA_CHANNEL_BASE_ADD_OFFSET 0x20000 +#define TEGRA_GPCDMA_CHANNEL_BASE_ADDR_OFFSET 0x10000 + +/* Default channel mask reserving channel0 */ +#define TEGRA_GPCDMA_DEFAULT_CHANNEL_MASK 0xfffffffe struct tegra_dma; struct tegra_dma_channel; @@ -246,6 +249,7 @@ struct tegra_dma { const struct tegra_dma_chip_data *chip_data; unsigned long sid_m2d_reserved; unsigned long sid_d2m_reserved; + u32 chan_mask; void __iomem *base_addr; struct device *dev; struct dma_device dma_dev; @@ -1288,7 +1292,7 @@ static struct dma_chan *tegra_dma_of_xlate(struct of_phandle_args *dma_spec, } static const struct tegra_dma_chip_data tegra186_dma_chip_data = { - .nr_channels = 31, + .nr_channels = 32, .channel_reg_size = SZ_64K, .max_dma_count = SZ_1G, .hw_support_pause = false, @@ -1296,7 +1300,7 @@ static const struct tegra_dma_chip_data tegra186_dma_chip_data = { }; static const struct tegra_dma_chip_data tegra194_dma_chip_data = { - .nr_channels = 31, + .nr_channels = 32, .channel_reg_size = SZ_64K, .max_dma_count = SZ_1G, .hw_support_pause = true, @@ -1304,7 +1308,7 @@ static const struct tegra_dma_chip_data tegra194_dma_chip_data = { }; static const struct tegra_dma_chip_data tegra234_dma_chip_data = { - .nr_channels = 31, + .nr_channels = 32, .channel_reg_size = SZ_64K, .max_dma_count = SZ_1G, .hw_support_pause = true, @@ -1380,15 +1384,28 @@ static int tegra_dma_probe(struct platform_device *pdev) } stream_id = iommu_spec->ids[0] & 0xffff; + ret = device_property_read_u32(&pdev->dev, "dma-channel-mask", + &tdma->chan_mask); + if (ret) { + dev_warn(&pdev->dev, + "Missing dma-channel-mask property, using default channel mask %#x\n", + TEGRA_GPCDMA_DEFAULT_CHANNEL_MASK); + tdma->chan_mask = TEGRA_GPCDMA_DEFAULT_CHANNEL_MASK; + } + INIT_LIST_HEAD(&tdma->dma_dev.channels); for (i = 0; i < cdata->nr_channels; i++) { struct tegra_dma_channel *tdc = &tdma->channels[i]; + /* Check for channel mask */ + if (!(tdma->chan_mask & BIT(i))) + continue; + tdc->irq = platform_get_irq(pdev, i); if (tdc->irq < 0) return tdc->irq; - tdc->chan_base_offset = TEGRA_GPCDMA_CHANNEL_BASE_ADD_OFFSET + + tdc->chan_base_offset = TEGRA_GPCDMA_CHANNEL_BASE_ADDR_OFFSET + i * cdata->channel_reg_size; snprintf(tdc->name, sizeof(tdc->name), "gpcdma.%d", i); tdc->tdma = tdma; @@ -1449,8 +1466,8 @@ static int tegra_dma_probe(struct platform_device *pdev) return ret; } - dev_info(&pdev->dev, "GPC DMA driver register %d channels\n", - cdata->nr_channels); + dev_info(&pdev->dev, "GPC DMA driver register %lu channels\n", + hweight_long(tdma->chan_mask)); return 0; } @@ -1473,6 +1490,9 @@ static int __maybe_unused tegra_dma_pm_suspend(struct device *dev) for (i = 0; i < tdma->chip_data->nr_channels; i++) { struct tegra_dma_channel *tdc = &tdma->channels[i]; + if (!(tdma->chan_mask & BIT(i))) + continue; + if (tdc->dma_desc) { dev_err(tdma->dev, "channel %u busy\n", i); return -EBUSY; @@ -1492,6 +1512,9 @@ static int __maybe_unused tegra_dma_pm_resume(struct device *dev) for (i = 0; i < tdma->chip_data->nr_channels; i++) { struct tegra_dma_channel *tdc = &tdma->channels[i]; + if (!(tdma->chan_mask & BIT(i))) + continue; + tegra_dma_program_sid(tdc, tdc->stream_id); } diff --git a/drivers/dma/ti/Kconfig b/drivers/dma/ti/Kconfig index 79618fac119a7259a36528e5537268c976f0e8ba..2adc2cca10e926b5a2be57a3271ee6374ff9e297 100644 --- a/drivers/dma/ti/Kconfig +++ b/drivers/dma/ti/Kconfig @@ -35,7 +35,7 @@ config DMA_OMAP DMA engine is found on OMAP and DRA7xx parts. config TI_K3_UDMA - bool "Texas Instruments UDMA support" + tristate "Texas Instruments UDMA support" depends on ARCH_K3 depends on TI_SCI_PROTOCOL depends on TI_SCI_INTA_IRQCHIP @@ -48,7 +48,7 @@ config TI_K3_UDMA DMA engine is used in AM65x and j721e. config TI_K3_UDMA_GLUE_LAYER - bool "Texas Instruments UDMA Glue layer for non DMAengine users" + tristate "Texas Instruments UDMA Glue layer for non DMAengine users" depends on ARCH_K3 depends on TI_K3_UDMA help @@ -56,7 +56,8 @@ config TI_K3_UDMA_GLUE_LAYER If unsure, say N. config TI_K3_PSIL - bool + tristate + default TI_K3_UDMA config TI_DMA_CROSSBAR bool diff --git a/drivers/dma/ti/Makefile b/drivers/dma/ti/Makefile index d3a303f0d7c621b962af3cb56401fa10c1126e89..b53d05b11ca5ef9a2cb840db6a693a244bb91a08 100644 --- a/drivers/dma/ti/Makefile +++ b/drivers/dma/ti/Makefile @@ -4,11 +4,12 @@ obj-$(CONFIG_TI_EDMA) += edma.o obj-$(CONFIG_DMA_OMAP) += omap-dma.o obj-$(CONFIG_TI_K3_UDMA) += k3-udma.o obj-$(CONFIG_TI_K3_UDMA_GLUE_LAYER) += k3-udma-glue.o -obj-$(CONFIG_TI_K3_PSIL) += k3-psil.o \ - k3-psil-am654.o \ - k3-psil-j721e.o \ - k3-psil-j7200.o \ - k3-psil-am64.o \ - k3-psil-j721s2.o \ - k3-psil-am62.o +k3-psil-lib-objs := k3-psil.o \ + k3-psil-am654.o \ + k3-psil-j721e.o \ + k3-psil-j7200.o \ + k3-psil-am64.o \ + k3-psil-j721s2.o \ + k3-psil-am62.o +obj-$(CONFIG_TI_K3_PSIL) += k3-psil-lib.o obj-$(CONFIG_TI_DMA_CROSSBAR) += dma-crossbar.o diff --git a/drivers/dma/ti/k3-psil.c b/drivers/dma/ti/k3-psil.c index 761a384093d20e6345283331be3975a42246f593..8b6533a1eeeb99c0525c1dadd1135cfe5a8d7abc 100644 --- a/drivers/dma/ti/k3-psil.c +++ b/drivers/dma/ti/k3-psil.c @@ -5,6 +5,7 @@ */ #include +#include #include #include #include @@ -101,3 +102,4 @@ int psil_set_new_ep_config(struct device *dev, const char *name, return 0; } EXPORT_SYMBOL_GPL(psil_set_new_ep_config); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/dma/ti/k3-udma-glue.c b/drivers/dma/ti/k3-udma-glue.c index 4f1aeb81e9c7f280bec0a55b98eaa5c7e1013912..789193ed038650b5d73f64fd8db8cab84c031936 100644 --- a/drivers/dma/ti/k3-udma-glue.c +++ b/drivers/dma/ti/k3-udma-glue.c @@ -6,6 +6,7 @@ * */ +#include #include #include #include @@ -1436,4 +1437,6 @@ static int __init k3_udma_glue_class_init(void) { return class_register(&k3_udma_glue_devclass); } -arch_initcall(k3_udma_glue_class_init); + +module_init(k3_udma_glue_class_init); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c index 7b5081989b3d6f76b6657b901362fc219ffd7396..ce8b80bb34d7d1f2f0d1c537d776cda691590193 100644 --- a/drivers/dma/ti/k3-udma.c +++ b/drivers/dma/ti/k3-udma.c @@ -5,6 +5,7 @@ */ #include +#include #include #include #include @@ -4335,18 +4336,10 @@ static const struct of_device_id udma_of_match[] = { .compatible = "ti,j721e-navss-mcu-udmap", .data = &j721e_mcu_data, }, - { /* Sentinel */ }, -}; - -static const struct of_device_id bcdma_of_match[] = { { .compatible = "ti,am64-dmss-bcdma", .data = &am64_bcdma_data, }, - { /* Sentinel */ }, -}; - -static const struct of_device_id pktdma_of_match[] = { { .compatible = "ti,am64-dmss-pktdma", .data = &am64_pktdma_data, @@ -5271,14 +5264,9 @@ static int udma_probe(struct platform_device *pdev) return -ENOMEM; match = of_match_node(udma_of_match, dev->of_node); - if (!match) - match = of_match_node(bcdma_of_match, dev->of_node); if (!match) { - match = of_match_node(pktdma_of_match, dev->of_node); - if (!match) { - dev_err(dev, "No compatible match found\n"); - return -ENODEV; - } + dev_err(dev, "No compatible match found\n"); + return -ENODEV; } ud->match_data = match->data; @@ -5511,27 +5499,9 @@ static struct platform_driver udma_driver = { }, .probe = udma_probe, }; -builtin_platform_driver(udma_driver); -static struct platform_driver bcdma_driver = { - .driver = { - .name = "ti-bcdma", - .of_match_table = bcdma_of_match, - .suppress_bind_attrs = true, - }, - .probe = udma_probe, -}; -builtin_platform_driver(bcdma_driver); - -static struct platform_driver pktdma_driver = { - .driver = { - .name = "ti-pktdma", - .of_match_table = pktdma_of_match, - .suppress_bind_attrs = true, - }, - .probe = udma_probe, -}; -builtin_platform_driver(pktdma_driver); +module_platform_driver(udma_driver); +MODULE_LICENSE("GPL v2"); /* Private interfaces to UDMA */ #include "k3-udma-private.c" diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c index 8cd4e69dc7b4c24b334c450562c2a88925b0f452..a8d23cdf883e5b825a3a66576903b1043280deae 100644 --- a/drivers/dma/xilinx/xilinx_dma.c +++ b/drivers/dma/xilinx/xilinx_dma.c @@ -1659,6 +1659,8 @@ static void xilinx_dma_issue_pending(struct dma_chan *dchan) * xilinx_dma_device_config - Configure the DMA channel * @dchan: DMA channel * @config: channel configuration + * + * Return: 0 always. */ static int xilinx_dma_device_config(struct dma_chan *dchan, struct dma_slave_config *config) @@ -2924,7 +2926,7 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev, * @xdev: Driver specific device structure * @node: Device node * - * Return: 0 always. + * Return: '0' on success and failure value on error. */ static int xilinx_dma_child_probe(struct xilinx_dma_device *xdev, struct device_node *node) diff --git a/drivers/extcon/extcon-fsa9480.c b/drivers/extcon/extcon-fsa9480.c index 7cff66c29907eca031adef46c95f6987bdf0f559..e8b2671eb29bce67e660fa05c04cafd75b77fb2d 100644 --- a/drivers/extcon/extcon-fsa9480.c +++ b/drivers/extcon/extcon-fsa9480.c @@ -257,8 +257,7 @@ static irqreturn_t fsa9480_irq_handler(int irq, void *data) return IRQ_HANDLED; } -static int fsa9480_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int fsa9480_probe(struct i2c_client *client) { struct fsa9480_usbsw *info; int ret; @@ -370,7 +369,7 @@ static struct i2c_driver fsa9480_i2c_driver = { .pm = &fsa9480_pm_ops, .of_match_table = fsa9480_of_match, }, - .probe = fsa9480_probe, + .probe_new = fsa9480_probe, .id_table = fsa9480_id, }; diff --git a/drivers/extcon/extcon-max77843.c b/drivers/extcon/extcon-max77843.c index 8e6e97ec65a8a3e7e6673c9c4c84322ff9a5023e..1bc0426ce3f1e0959521f5bf21338a38480e3078 100644 --- a/drivers/extcon/extcon-max77843.c +++ b/drivers/extcon/extcon-max77843.c @@ -189,8 +189,7 @@ static const struct regmap_irq max77843_muic_irq[] = { static const struct regmap_irq_chip max77843_muic_irq_chip = { .name = "max77843-muic", .status_base = MAX77843_MUIC_REG_INT1, - .mask_base = MAX77843_MUIC_REG_INTMASK1, - .mask_invert = true, + .unmask_base = MAX77843_MUIC_REG_INTMASK1, .num_regs = 3, .irqs = max77843_muic_irq, .num_irqs = ARRAY_SIZE(max77843_muic_irq), diff --git a/drivers/extcon/extcon-rt8973a.c b/drivers/extcon/extcon-rt8973a.c index e6e448f6ea2f66c225fc659b6ae253b0dbbfd7f2..afc9b405d103010936a7881771bf57473d684f92 100644 --- a/drivers/extcon/extcon-rt8973a.c +++ b/drivers/extcon/extcon-rt8973a.c @@ -548,8 +548,7 @@ static void rt8973a_init_dev_type(struct rt8973a_muic_info *info) } } -static int rt8973a_muic_i2c_probe(struct i2c_client *i2c, - const struct i2c_device_id *id) +static int rt8973a_muic_i2c_probe(struct i2c_client *i2c) { struct device_node *np = i2c->dev.of_node; struct rt8973a_muic_info *info; @@ -696,7 +695,7 @@ static struct i2c_driver rt8973a_muic_i2c_driver = { .pm = &rt8973a_muic_pm_ops, .of_match_table = rt8973a_dt_match, }, - .probe = rt8973a_muic_i2c_probe, + .probe_new = rt8973a_muic_i2c_probe, .remove = rt8973a_muic_i2c_remove, .id_table = rt8973a_i2c_id, }; diff --git a/drivers/extcon/extcon-usbc-tusb320.c b/drivers/extcon/extcon-usbc-tusb320.c index 2a120d8d3c2723525efdaf9a3989997fbb5d058e..b408ce989c223900878be758ba44c9237a47966c 100644 --- a/drivers/extcon/extcon-usbc-tusb320.c +++ b/drivers/extcon/extcon-usbc-tusb320.c @@ -313,9 +313,9 @@ static void tusb320_typec_irq_handler(struct tusb320_priv *priv, u8 reg9) typec_set_pwr_opmode(port, TYPEC_PWR_MODE_USB); } -static irqreturn_t tusb320_irq_handler(int irq, void *dev_id) +static irqreturn_t tusb320_state_update_handler(struct tusb320_priv *priv, + bool force_update) { - struct tusb320_priv *priv = dev_id; unsigned int reg; if (regmap_read(priv->regmap, TUSB320_REG9, ®)) { @@ -323,7 +323,7 @@ static irqreturn_t tusb320_irq_handler(int irq, void *dev_id) return IRQ_NONE; } - if (!(reg & TUSB320_REG9_INTERRUPT_STATUS)) + if (!force_update && !(reg & TUSB320_REG9_INTERRUPT_STATUS)) return IRQ_NONE; tusb320_extcon_irq_handler(priv, reg); @@ -340,6 +340,13 @@ static irqreturn_t tusb320_irq_handler(int irq, void *dev_id) return IRQ_HANDLED; } +static irqreturn_t tusb320_irq_handler(int irq, void *dev_id) +{ + struct tusb320_priv *priv = dev_id; + + return tusb320_state_update_handler(priv, false); +} + static const struct regmap_config tusb320_regmap_config = { .reg_bits = 8, .val_bits = 8, @@ -421,8 +428,7 @@ static int tusb320_typec_probe(struct i2c_client *client, return 0; } -static int tusb320_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int tusb320_probe(struct i2c_client *client) { struct tusb320_priv *priv; const void *match_data; @@ -466,7 +472,7 @@ static int tusb320_probe(struct i2c_client *client, return ret; /* update initial state */ - tusb320_irq_handler(client->irq, priv); + tusb320_state_update_handler(priv, true); /* Reset chip to its default state */ ret = tusb320_reset(priv); @@ -477,7 +483,7 @@ static int tusb320_probe(struct i2c_client *client, * State and polarity might change after a reset, so update * them again and make sure the interrupt status bit is cleared. */ - tusb320_irq_handler(client->irq, priv); + tusb320_state_update_handler(priv, true); ret = devm_request_threaded_irq(priv->dev, client->irq, NULL, tusb320_irq_handler, @@ -495,7 +501,7 @@ static const struct of_device_id tusb320_extcon_dt_match[] = { MODULE_DEVICE_TABLE(of, tusb320_extcon_dt_match); static struct i2c_driver tusb320_extcon_driver = { - .probe = tusb320_probe, + .probe_new = tusb320_probe, .driver = { .name = "extcon-tusb320", .of_match_table = tusb320_extcon_dt_match, diff --git a/drivers/firmware/dmi-id.c b/drivers/firmware/dmi-id.c index 940ddf916202afeb7e1c7ab71d635a2a58f4fb76..5f3a3e913d28fb912df5e0ce87c1ea36140aaa96 100644 --- a/drivers/firmware/dmi-id.c +++ b/drivers/firmware/dmi-id.c @@ -155,7 +155,7 @@ static const struct attribute_group* sys_dmi_attribute_groups[] = { NULL }; -static int dmi_dev_uevent(struct device *dev, struct kobj_uevent_env *env) +static int dmi_dev_uevent(const struct device *dev, struct kobj_uevent_env *env) { ssize_t len; diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c index 31a4090c66b36c34fa9d99bd4d3f0cdb4e5d24bf..09716eebe8ac3adb2d395fa037284d9971e394bc 100644 --- a/drivers/firmware/efi/efi.c +++ b/drivers/firmware/efi/efi.c @@ -429,7 +429,9 @@ static int __init efisubsys_init(void) platform_device_register_simple("efi_secret", 0, NULL, 0); #endif - execute_with_initialized_rng(&refresh_nv_rng_seed_nb); + if (efi_rt_services_supported(EFI_RT_SUPPORTED_SET_VARIABLE)) + execute_with_initialized_rng(&refresh_nv_rng_seed_nb); + return 0; err_remove_group: diff --git a/drivers/firmware/google/Kconfig b/drivers/firmware/google/Kconfig index 983e07dc022ede84376add92c25bc651b0b16bb2..9f190eab43ed1ac1fd365a0195000fda589df0fb 100644 --- a/drivers/firmware/google/Kconfig +++ b/drivers/firmware/google/Kconfig @@ -19,6 +19,21 @@ config GOOGLE_SMI driver provides an interface for reading and writing NVRAM variables. +config GOOGLE_CBMEM + tristate "CBMEM entries in sysfs" + depends on GOOGLE_COREBOOT_TABLE + help + CBMEM is a downwards-growing memory region created by the + Coreboot BIOS containing tagged data structures from the + BIOS. These data structures expose things like the verified + boot firmware variables, flash layout, firmware event log, + and more. + + This option enables the cbmem module, which causes the + kernel to search for Coreboot CBMEM entries, and expose the + memory for each entry in sysfs under + /sys/bus/coreboot/devices/cbmem-. + config GOOGLE_COREBOOT_TABLE tristate "Coreboot Table Access" depends on HAS_IOMEM && (ACPI || OF) diff --git a/drivers/firmware/google/Makefile b/drivers/firmware/google/Makefile index d17caded5d883f76c7936af75767e6ca37eea556..8151e323cc434673b0fe56f1df1cec2fb7873295 100644 --- a/drivers/firmware/google/Makefile +++ b/drivers/firmware/google/Makefile @@ -7,5 +7,8 @@ obj-$(CONFIG_GOOGLE_MEMCONSOLE) += memconsole.o obj-$(CONFIG_GOOGLE_MEMCONSOLE_COREBOOT) += memconsole-coreboot.o obj-$(CONFIG_GOOGLE_MEMCONSOLE_X86_LEGACY) += memconsole-x86-legacy.o +# Must come after coreboot_table.o, as this driver depends on that bus type. +obj-$(CONFIG_GOOGLE_CBMEM) += cbmem.o + vpd-sysfs-y := vpd.o vpd_decode.o obj-$(CONFIG_GOOGLE_VPD) += vpd-sysfs.o diff --git a/drivers/firmware/google/cbmem.c b/drivers/firmware/google/cbmem.c new file mode 100644 index 0000000000000000000000000000000000000000..88e587ba1e0da3f5e9287c3ac42c7a0f4fb82809 --- /dev/null +++ b/drivers/firmware/google/cbmem.c @@ -0,0 +1,129 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * cbmem.c + * + * Driver for exporting cbmem entries in sysfs. + * + * Copyright 2022 Google LLC + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "coreboot_table.h" + +struct cbmem_entry { + char *mem_file_buf; + u32 size; +}; + +static struct cbmem_entry *to_cbmem_entry(struct kobject *kobj) +{ + return dev_get_drvdata(kobj_to_dev(kobj)); +} + +static ssize_t mem_read(struct file *filp, struct kobject *kobj, + struct bin_attribute *bin_attr, char *buf, loff_t pos, + size_t count) +{ + struct cbmem_entry *entry = to_cbmem_entry(kobj); + + return memory_read_from_buffer(buf, count, &pos, entry->mem_file_buf, + entry->size); +} + +static ssize_t mem_write(struct file *filp, struct kobject *kobj, + struct bin_attribute *bin_attr, char *buf, loff_t pos, + size_t count) +{ + struct cbmem_entry *entry = to_cbmem_entry(kobj); + + if (pos < 0 || pos >= entry->size) + return -EINVAL; + if (count > entry->size - pos) + count = entry->size - pos; + + memcpy(entry->mem_file_buf + pos, buf, count); + return count; +} +static BIN_ATTR_ADMIN_RW(mem, 0); + +static ssize_t address_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct coreboot_device *cbdev = dev_to_coreboot_device(dev); + + return sysfs_emit(buf, "0x%llx\n", cbdev->cbmem_entry.address); +} +static DEVICE_ATTR_RO(address); + +static ssize_t size_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct coreboot_device *cbdev = dev_to_coreboot_device(dev); + + return sysfs_emit(buf, "0x%x\n", cbdev->cbmem_entry.entry_size); +} +static DEVICE_ATTR_RO(size); + +static struct attribute *attrs[] = { + &dev_attr_address.attr, + &dev_attr_size.attr, + NULL, +}; + +static struct bin_attribute *bin_attrs[] = { + &bin_attr_mem, + NULL, +}; + +static const struct attribute_group cbmem_entry_group = { + .attrs = attrs, + .bin_attrs = bin_attrs, +}; + +static const struct attribute_group *dev_groups[] = { + &cbmem_entry_group, + NULL, +}; + +static int cbmem_entry_probe(struct coreboot_device *dev) +{ + struct cbmem_entry *entry; + + entry = devm_kzalloc(&dev->dev, sizeof(*entry), GFP_KERNEL); + if (!entry) + return -ENOMEM; + + dev_set_drvdata(&dev->dev, entry); + entry->mem_file_buf = devm_memremap(&dev->dev, dev->cbmem_entry.address, + dev->cbmem_entry.entry_size, + MEMREMAP_WB); + if (IS_ERR(entry->mem_file_buf)) + return PTR_ERR(entry->mem_file_buf); + + entry->size = dev->cbmem_entry.entry_size; + + return 0; +} + +static struct coreboot_driver cbmem_entry_driver = { + .probe = cbmem_entry_probe, + .drv = { + .name = "cbmem", + .owner = THIS_MODULE, + .dev_groups = dev_groups, + }, + .tag = LB_TAG_CBMEM_ENTRY, +}; +module_coreboot_driver(cbmem_entry_driver); + +MODULE_AUTHOR("Jack Rosenthal "); +MODULE_LICENSE("GPL"); diff --git a/drivers/firmware/google/coreboot_table.c b/drivers/firmware/google/coreboot_table.c index 9ca21feb9d454976fd2abb7cfe1da613ec5f9804..2652c396c42368e2342bfca6814a9117ed2b745f 100644 --- a/drivers/firmware/google/coreboot_table.c +++ b/drivers/firmware/google/coreboot_table.c @@ -97,12 +97,21 @@ static int coreboot_table_populate(struct device *dev, void *ptr) if (!device) return -ENOMEM; - dev_set_name(&device->dev, "coreboot%d", i); device->dev.parent = dev; device->dev.bus = &coreboot_bus_type; device->dev.release = coreboot_device_release; memcpy(&device->entry, ptr_entry, entry->size); + switch (device->entry.tag) { + case LB_TAG_CBMEM_ENTRY: + dev_set_name(&device->dev, "cbmem-%08x", + device->cbmem_entry.id); + break; + default: + dev_set_name(&device->dev, "coreboot%d", i); + break; + } + ret = device_register(&device->dev); if (ret) { put_device(&device->dev); diff --git a/drivers/firmware/google/coreboot_table.h b/drivers/firmware/google/coreboot_table.h index beb778674acdc0601248158d46a18d0bb6c1ce74..37f4d335a606d715fde2324b12e70e1e18179e2b 100644 --- a/drivers/firmware/google/coreboot_table.h +++ b/drivers/firmware/google/coreboot_table.h @@ -39,6 +39,18 @@ struct lb_cbmem_ref { u64 cbmem_addr; }; +#define LB_TAG_CBMEM_ENTRY 0x31 + +/* Corresponds to LB_TAG_CBMEM_ENTRY */ +struct lb_cbmem_entry { + u32 tag; + u32 size; + + u64 address; + u32 entry_size; + u32 id; +}; + /* Describes framebuffer setup by coreboot */ struct lb_framebuffer { u32 tag; @@ -65,10 +77,16 @@ struct coreboot_device { union { struct coreboot_table_entry entry; struct lb_cbmem_ref cbmem_ref; + struct lb_cbmem_entry cbmem_entry; struct lb_framebuffer framebuffer; }; }; +static inline struct coreboot_device *dev_to_coreboot_device(struct device *dev) +{ + return container_of(dev, struct coreboot_device, dev); +} + /* A driver for handling devices described in coreboot tables. */ struct coreboot_driver { int (*probe)(struct coreboot_device *); diff --git a/drivers/firmware/raspberrypi.c b/drivers/firmware/raspberrypi.c index ec07bf26e5eb6876a50da117c8b1806242f104fc..c3bc29e0a4880b80fd361ae7abfccb9e46556fa0 100644 --- a/drivers/firmware/raspberrypi.c +++ b/drivers/firmware/raspberrypi.c @@ -288,9 +288,11 @@ static int rpi_firmware_probe(struct platform_device *pdev) fw->cl.tx_block = true; fw->chan = mbox_request_channel(&fw->cl, 0); - if (IS_ERR(fw->chan)) - return dev_err_probe(dev, PTR_ERR(fw->chan), - "Failed to get mbox channel\n"); + if (IS_ERR(fw->chan)) { + int ret = PTR_ERR(fw->chan); + kfree(fw); + return dev_err_probe(dev, ret, "Failed to get mbox channel\n"); + } init_completion(&fw->c); kref_init(&fw->consumers); diff --git a/drivers/firmware/turris-mox-rwtm.c b/drivers/firmware/turris-mox-rwtm.c index c2d34dc8ba462828a428ee26899193c40b8c8889..6ea5789a89e2be9626928847456a3bc3b7cae47c 100644 --- a/drivers/firmware/turris-mox-rwtm.c +++ b/drivers/firmware/turris-mox-rwtm.c @@ -528,7 +528,6 @@ static int turris_mox_rwtm_probe(struct platform_device *pdev) rwtm->hwrng.name = DRIVER_NAME "_hwrng"; rwtm->hwrng.read = mox_hwrng_read; rwtm->hwrng.priv = (unsigned long) rwtm; - rwtm->hwrng.quality = 1024; ret = devm_hwrng_register(dev, &rwtm->hwrng); if (ret < 0) { diff --git a/drivers/fpga/Kconfig b/drivers/fpga/Kconfig index bbe0a7cabb75fb55ba8f8f5c7632fee120b84488..6ce143dafd049540a81b5dc57048b9a1dda8778a 100644 --- a/drivers/fpga/Kconfig +++ b/drivers/fpga/Kconfig @@ -265,4 +265,15 @@ config FPGA_MGR_MICROCHIP_SPI programming over slave SPI interface with .dat formatted bitstream image. +config FPGA_MGR_LATTICE_SYSCONFIG + tristate + +config FPGA_MGR_LATTICE_SYSCONFIG_SPI + tristate "Lattice sysCONFIG SPI FPGA manager" + depends on SPI + select FPGA_MGR_LATTICE_SYSCONFIG + help + FPGA manager driver support for Lattice FPGAs programming over slave + SPI sysCONFIG interface. + endif # FPGA diff --git a/drivers/fpga/Makefile b/drivers/fpga/Makefile index 42ae8b58abce7d284906faa24895b576a6c99a12..72e554b4d2f76e7374490dbdb7076d474f79bae7 100644 --- a/drivers/fpga/Makefile +++ b/drivers/fpga/Makefile @@ -20,6 +20,8 @@ obj-$(CONFIG_FPGA_MGR_ZYNQ_FPGA) += zynq-fpga.o obj-$(CONFIG_FPGA_MGR_ZYNQMP_FPGA) += zynqmp-fpga.o obj-$(CONFIG_FPGA_MGR_VERSAL_FPGA) += versal-fpga.o obj-$(CONFIG_FPGA_MGR_MICROCHIP_SPI) += microchip-spi.o +obj-$(CONFIG_FPGA_MGR_LATTICE_SYSCONFIG) += lattice-sysconfig.o +obj-$(CONFIG_FPGA_MGR_LATTICE_SYSCONFIG_SPI) += lattice-sysconfig-spi.o obj-$(CONFIG_ALTERA_PR_IP_CORE) += altera-pr-ip-core.o obj-$(CONFIG_ALTERA_PR_IP_CORE_PLAT) += altera-pr-ip-core-plat.o diff --git a/drivers/fpga/lattice-sysconfig-spi.c b/drivers/fpga/lattice-sysconfig-spi.c new file mode 100644 index 0000000000000000000000000000000000000000..2702b26b7f55e804cdee442369e24b56e4d3e1ea --- /dev/null +++ b/drivers/fpga/lattice-sysconfig-spi.c @@ -0,0 +1,152 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Lattice FPGA programming over slave SPI sysCONFIG interface. + */ + +#include + +#include "lattice-sysconfig.h" + +static const u32 ecp5_spi_max_speed_hz = 60000000; + +static int sysconfig_spi_cmd_transfer(struct sysconfig_priv *priv, + const void *tx_buf, size_t tx_len, + void *rx_buf, size_t rx_len) +{ + struct spi_device *spi = to_spi_device(priv->dev); + + return spi_write_then_read(spi, tx_buf, tx_len, rx_buf, rx_len); +} + +static int sysconfig_spi_bitstream_burst_init(struct sysconfig_priv *priv) +{ + const u8 lsc_bitstream_burst[] = SYSCONFIG_LSC_BITSTREAM_BURST; + struct spi_device *spi = to_spi_device(priv->dev); + struct spi_transfer xfer = {}; + struct spi_message msg; + size_t buf_len; + void *buf; + int ret; + + buf_len = sizeof(lsc_bitstream_burst); + + buf = kmemdup(lsc_bitstream_burst, buf_len, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + xfer.len = buf_len; + xfer.tx_buf = buf; + xfer.cs_change = 1; + + spi_message_init_with_transfers(&msg, &xfer, 1); + + /* + * Lock SPI bus for exclusive usage until FPGA programming is done. + * SPI bus will be released in sysconfig_spi_bitstream_burst_complete(). + */ + spi_bus_lock(spi->controller); + + ret = spi_sync_locked(spi, &msg); + if (ret) + spi_bus_unlock(spi->controller); + + kfree(buf); + + return ret; +} + +static int sysconfig_spi_bitstream_burst_write(struct sysconfig_priv *priv, + const char *buf, size_t len) +{ + struct spi_device *spi = to_spi_device(priv->dev); + struct spi_transfer xfer = { + .tx_buf = buf, + .len = len, + .cs_change = 1, + }; + struct spi_message msg; + + spi_message_init_with_transfers(&msg, &xfer, 1); + + return spi_sync_locked(spi, &msg); +} + +static int sysconfig_spi_bitstream_burst_complete(struct sysconfig_priv *priv) +{ + struct spi_device *spi = to_spi_device(priv->dev); + + /* Bitstream burst write is done, release SPI bus */ + spi_bus_unlock(spi->controller); + + /* Toggle CS to finish bitstream write */ + return spi_write(spi, NULL, 0); +} + +static int sysconfig_spi_probe(struct spi_device *spi) +{ + const struct spi_device_id *dev_id; + struct device *dev = &spi->dev; + struct sysconfig_priv *priv; + const u32 *spi_max_speed; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + spi_max_speed = device_get_match_data(dev); + if (!spi_max_speed) { + dev_id = spi_get_device_id(spi); + if (!dev_id) + return -ENODEV; + + spi_max_speed = (const u32 *)dev_id->driver_data; + } + + if (!spi_max_speed) + return -EINVAL; + + if (spi->max_speed_hz > *spi_max_speed) { + dev_err(dev, "SPI speed %u is too high, maximum speed is %u\n", + spi->max_speed_hz, *spi_max_speed); + return -EINVAL; + } + + priv->dev = dev; + priv->command_transfer = sysconfig_spi_cmd_transfer; + priv->bitstream_burst_write_init = sysconfig_spi_bitstream_burst_init; + priv->bitstream_burst_write = sysconfig_spi_bitstream_burst_write; + priv->bitstream_burst_write_complete = sysconfig_spi_bitstream_burst_complete; + + return sysconfig_probe(priv); +} + +static const struct spi_device_id sysconfig_spi_ids[] = { + { + .name = "sysconfig-ecp5", + .driver_data = (kernel_ulong_t)&ecp5_spi_max_speed_hz, + }, {}, +}; +MODULE_DEVICE_TABLE(spi, sysconfig_spi_ids); + +#if IS_ENABLED(CONFIG_OF) +static const struct of_device_id sysconfig_of_ids[] = { + { + .compatible = "lattice,sysconfig-ecp5", + .data = &ecp5_spi_max_speed_hz, + }, {}, +}; +MODULE_DEVICE_TABLE(of, sysconfig_of_ids); +#endif /* IS_ENABLED(CONFIG_OF) */ + +static struct spi_driver lattice_sysconfig_driver = { + .probe = sysconfig_spi_probe, + .id_table = sysconfig_spi_ids, + .driver = { + .name = "lattice_sysconfig_spi_fpga_mgr", + .of_match_table = of_match_ptr(sysconfig_of_ids), + }, +}; +module_spi_driver(lattice_sysconfig_driver); + +MODULE_DESCRIPTION("Lattice sysCONFIG Slave SPI FPGA Manager"); +MODULE_LICENSE("GPL"); diff --git a/drivers/fpga/lattice-sysconfig.c b/drivers/fpga/lattice-sysconfig.c new file mode 100644 index 0000000000000000000000000000000000000000..ba51a60f672feed1ca988f28341a55e4227ca827 --- /dev/null +++ b/drivers/fpga/lattice-sysconfig.c @@ -0,0 +1,397 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Lattice FPGA sysCONFIG interface functions independent of port type. + */ + +#include +#include +#include +#include + +#include "lattice-sysconfig.h" + +static int sysconfig_cmd_write(struct sysconfig_priv *priv, const void *buf, + size_t buf_len) +{ + return priv->command_transfer(priv, buf, buf_len, NULL, 0); +} + +static int sysconfig_cmd_read(struct sysconfig_priv *priv, const void *tx_buf, + size_t tx_len, void *rx_buf, size_t rx_len) +{ + return priv->command_transfer(priv, tx_buf, tx_len, rx_buf, rx_len); +} + +static int sysconfig_read_busy(struct sysconfig_priv *priv) +{ + const u8 lsc_check_busy[] = SYSCONFIG_LSC_CHECK_BUSY; + u8 busy; + int ret; + + ret = sysconfig_cmd_read(priv, lsc_check_busy, sizeof(lsc_check_busy), + &busy, sizeof(busy)); + + return ret ? : busy; +} + +static int sysconfig_poll_busy(struct sysconfig_priv *priv) +{ + int ret, busy; + + ret = read_poll_timeout(sysconfig_read_busy, busy, busy <= 0, + SYSCONFIG_POLL_INTERVAL_US, + SYSCONFIG_POLL_BUSY_TIMEOUT_US, false, priv); + + return ret ? : busy; +} + +static int sysconfig_read_status(struct sysconfig_priv *priv, u32 *status) +{ + const u8 lsc_read_status[] = SYSCONFIG_LSC_READ_STATUS; + __be32 device_status; + int ret; + + ret = sysconfig_cmd_read(priv, lsc_read_status, sizeof(lsc_read_status), + &device_status, sizeof(device_status)); + if (ret) + return ret; + + *status = be32_to_cpu(device_status); + + return 0; +} + +static int sysconfig_poll_status(struct sysconfig_priv *priv, u32 *status) +{ + int ret = sysconfig_poll_busy(priv); + + if (ret) + return ret; + + return sysconfig_read_status(priv, status); +} + +static int sysconfig_poll_gpio(struct gpio_desc *gpio, bool is_active) +{ + int ret, val; + + ret = read_poll_timeout(gpiod_get_value, val, + val < 0 || !!val == is_active, + SYSCONFIG_POLL_INTERVAL_US, + SYSCONFIG_POLL_GPIO_TIMEOUT_US, false, gpio); + + if (val < 0) + return val; + + return ret; +} + +static int sysconfig_gpio_refresh(struct sysconfig_priv *priv) +{ + struct gpio_desc *program = priv->program; + struct gpio_desc *init = priv->init; + struct gpio_desc *done = priv->done; + int ret; + + /* Enter init mode */ + gpiod_set_value(program, 1); + + ret = sysconfig_poll_gpio(init, true); + if (!ret) + ret = sysconfig_poll_gpio(done, false); + + if (ret) + return ret; + + /* Enter program mode */ + gpiod_set_value(program, 0); + + return sysconfig_poll_gpio(init, false); +} + +static int sysconfig_lsc_refresh(struct sysconfig_priv *priv) +{ + static const u8 lsc_refresh[] = SYSCONFIG_LSC_REFRESH; + int ret; + + ret = sysconfig_cmd_write(priv, lsc_refresh, sizeof(lsc_refresh)); + if (ret) + return ret; + + usleep_range(4000, 8000); + + return 0; +} + +static int sysconfig_refresh(struct sysconfig_priv *priv) +{ + struct gpio_desc *program = priv->program; + struct gpio_desc *init = priv->init; + struct gpio_desc *done = priv->done; + + if (program && init && done) + return sysconfig_gpio_refresh(priv); + + return sysconfig_lsc_refresh(priv); +} + +static int sysconfig_isc_enable(struct sysconfig_priv *priv) +{ + u8 isc_enable[] = SYSCONFIG_ISC_ENABLE; + u32 status; + int ret; + + ret = sysconfig_cmd_write(priv, isc_enable, sizeof(isc_enable)); + if (ret) + return ret; + + ret = sysconfig_poll_status(priv, &status); + if (ret) + return ret; + + if (status & SYSCONFIG_STATUS_FAIL) + return -EFAULT; + + return 0; +} + +static int sysconfig_isc_erase(struct sysconfig_priv *priv) +{ + u8 isc_erase[] = SYSCONFIG_ISC_ERASE; + u32 status; + int ret; + + ret = sysconfig_cmd_write(priv, isc_erase, sizeof(isc_erase)); + if (ret) + return ret; + + ret = sysconfig_poll_status(priv, &status); + if (ret) + return ret; + + if (status & SYSCONFIG_STATUS_FAIL) + return -EFAULT; + + return 0; +} + +static int sysconfig_isc_init(struct sysconfig_priv *priv) +{ + int ret = sysconfig_isc_enable(priv); + + if (ret) + return ret; + + return sysconfig_isc_erase(priv); +} + +static int sysconfig_lsc_init_addr(struct sysconfig_priv *priv) +{ + const u8 lsc_init_addr[] = SYSCONFIG_LSC_INIT_ADDR; + + return sysconfig_cmd_write(priv, lsc_init_addr, sizeof(lsc_init_addr)); +} + +static int sysconfig_burst_write_init(struct sysconfig_priv *priv) +{ + return priv->bitstream_burst_write_init(priv); +} + +static int sysconfig_burst_write_complete(struct sysconfig_priv *priv) +{ + return priv->bitstream_burst_write_complete(priv); +} + +static int sysconfig_bitstream_burst_write(struct sysconfig_priv *priv, + const char *buf, size_t count) +{ + int ret = priv->bitstream_burst_write(priv, buf, count); + + if (ret) + sysconfig_burst_write_complete(priv); + + return ret; +} + +static int sysconfig_isc_disable(struct sysconfig_priv *priv) +{ + const u8 isc_disable[] = SYSCONFIG_ISC_DISABLE; + + return sysconfig_cmd_write(priv, isc_disable, sizeof(isc_disable)); +} + +static void sysconfig_cleanup(struct sysconfig_priv *priv) +{ + sysconfig_isc_erase(priv); + sysconfig_refresh(priv); +} + +static int sysconfig_isc_finish(struct sysconfig_priv *priv) +{ + struct gpio_desc *done_gpio = priv->done; + u32 status; + int ret; + + if (done_gpio) { + ret = sysconfig_isc_disable(priv); + if (ret) + return ret; + + return sysconfig_poll_gpio(done_gpio, true); + } + + ret = sysconfig_poll_status(priv, &status); + if (ret) + return ret; + + if ((status & SYSCONFIG_STATUS_DONE) && + !(status & SYSCONFIG_STATUS_BUSY) && + !(status & SYSCONFIG_STATUS_ERR)) + return sysconfig_isc_disable(priv); + + return -EFAULT; +} + +static enum fpga_mgr_states sysconfig_ops_state(struct fpga_manager *mgr) +{ + struct sysconfig_priv *priv = mgr->priv; + struct gpio_desc *done = priv->done; + u32 status; + int ret; + + if (done && (gpiod_get_value(done) > 0)) + return FPGA_MGR_STATE_OPERATING; + + ret = sysconfig_read_status(priv, &status); + if (!ret && (status & SYSCONFIG_STATUS_DONE)) + return FPGA_MGR_STATE_OPERATING; + + return FPGA_MGR_STATE_UNKNOWN; +} + +static int sysconfig_ops_write_init(struct fpga_manager *mgr, + struct fpga_image_info *info, + const char *buf, size_t count) +{ + struct sysconfig_priv *priv = mgr->priv; + struct device *dev = &mgr->dev; + int ret; + + if (info->flags & FPGA_MGR_PARTIAL_RECONFIG) { + dev_err(dev, "Partial reconfiguration is not supported\n"); + return -EOPNOTSUPP; + } + + /* Enter program mode */ + ret = sysconfig_refresh(priv); + if (ret) { + dev_err(dev, "Failed to go to program mode\n"); + return ret; + } + + /* Enter ISC mode */ + ret = sysconfig_isc_init(priv); + if (ret) { + dev_err(dev, "Failed to go to ISC mode\n"); + return ret; + } + + /* Initialize the Address Shift Register */ + ret = sysconfig_lsc_init_addr(priv); + if (ret) { + dev_err(dev, + "Failed to initialize the Address Shift Register\n"); + return ret; + } + + /* Prepare for bitstream burst write */ + ret = sysconfig_burst_write_init(priv); + if (ret) + dev_err(dev, "Failed to prepare for bitstream burst write\n"); + + return ret; +} + +static int sysconfig_ops_write(struct fpga_manager *mgr, const char *buf, + size_t count) +{ + return sysconfig_bitstream_burst_write(mgr->priv, buf, count); +} + +static int sysconfig_ops_write_complete(struct fpga_manager *mgr, + struct fpga_image_info *info) +{ + struct sysconfig_priv *priv = mgr->priv; + struct device *dev = &mgr->dev; + int ret; + + ret = sysconfig_burst_write_complete(priv); + if (!ret) + ret = sysconfig_poll_busy(priv); + + if (ret) { + dev_err(dev, "Error while waiting bitstream write to finish\n"); + goto fail; + } + + ret = sysconfig_isc_finish(priv); + +fail: + if (ret) + sysconfig_cleanup(priv); + + return ret; +} + +static const struct fpga_manager_ops sysconfig_fpga_mgr_ops = { + .state = sysconfig_ops_state, + .write_init = sysconfig_ops_write_init, + .write = sysconfig_ops_write, + .write_complete = sysconfig_ops_write_complete, +}; + +int sysconfig_probe(struct sysconfig_priv *priv) +{ + struct gpio_desc *program, *init, *done; + struct device *dev = priv->dev; + struct fpga_manager *mgr; + + if (!dev) + return -ENODEV; + + if (!priv->command_transfer || + !priv->bitstream_burst_write_init || + !priv->bitstream_burst_write || + !priv->bitstream_burst_write_complete) { + dev_err(dev, "Essential callback is missing\n"); + return -EINVAL; + } + + program = devm_gpiod_get_optional(dev, "program", GPIOD_OUT_LOW); + if (IS_ERR(program)) + return dev_err_probe(dev, PTR_ERR(program), + "Failed to get PROGRAM GPIO\n"); + + init = devm_gpiod_get_optional(dev, "init", GPIOD_IN); + if (IS_ERR(init)) + return dev_err_probe(dev, PTR_ERR(init), + "Failed to get INIT GPIO\n"); + + done = devm_gpiod_get_optional(dev, "done", GPIOD_IN); + if (IS_ERR(done)) + return dev_err_probe(dev, PTR_ERR(done), + "Failed to get DONE GPIO\n"); + + priv->program = program; + priv->init = init; + priv->done = done; + + mgr = devm_fpga_mgr_register(dev, "Lattice sysCONFIG FPGA Manager", + &sysconfig_fpga_mgr_ops, priv); + + return PTR_ERR_OR_ZERO(mgr); +} +EXPORT_SYMBOL(sysconfig_probe); + +MODULE_DESCRIPTION("Lattice sysCONFIG FPGA Manager Core"); +MODULE_LICENSE("GPL"); diff --git a/drivers/fpga/lattice-sysconfig.h b/drivers/fpga/lattice-sysconfig.h new file mode 100644 index 0000000000000000000000000000000000000000..df47d9a524f63dc8bb453490a9692c602f77313f --- /dev/null +++ b/drivers/fpga/lattice-sysconfig.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __LATTICE_SYSCONFIG_H +#define __LATTICE_SYSCONFIG_H + +#define SYSCONFIG_ISC_ENABLE {0xC6, 0x00, 0x00, 0x00} +#define SYSCONFIG_ISC_DISABLE {0x26, 0x00, 0x00, 0x00} +#define SYSCONFIG_ISC_ERASE {0x0E, 0x01, 0x00, 0x00} +#define SYSCONFIG_LSC_READ_STATUS {0x3C, 0x00, 0x00, 0x00} +#define SYSCONFIG_LSC_CHECK_BUSY {0xF0, 0x00, 0x00, 0x00} +#define SYSCONFIG_LSC_REFRESH {0x79, 0x00, 0x00, 0x00} +#define SYSCONFIG_LSC_INIT_ADDR {0x46, 0x00, 0x00, 0x00} +#define SYSCONFIG_LSC_BITSTREAM_BURST {0x7a, 0x00, 0x00, 0x00} + +#define SYSCONFIG_STATUS_DONE BIT(8) +#define SYSCONFIG_STATUS_BUSY BIT(12) +#define SYSCONFIG_STATUS_FAIL BIT(13) +#define SYSCONFIG_STATUS_ERR GENMASK(25, 23) + +#define SYSCONFIG_POLL_INTERVAL_US 30 +#define SYSCONFIG_POLL_BUSY_TIMEOUT_US 1000000 +#define SYSCONFIG_POLL_GPIO_TIMEOUT_US 100000 + +struct sysconfig_priv { + struct gpio_desc *program; + struct gpio_desc *init; + struct gpio_desc *done; + struct device *dev; + int (*command_transfer)(struct sysconfig_priv *priv, const void *tx_buf, + size_t tx_len, void *rx_buf, size_t rx_len); + int (*bitstream_burst_write_init)(struct sysconfig_priv *priv); + int (*bitstream_burst_write)(struct sysconfig_priv *priv, + const char *tx_buf, size_t tx_len); + int (*bitstream_burst_write_complete)(struct sysconfig_priv *priv); +}; + +int sysconfig_probe(struct sysconfig_priv *priv); + +#endif /* __LATTICE_SYSCONFIG_H */ diff --git a/drivers/fpga/zynq-fpga.c b/drivers/fpga/zynq-fpga.c index 426aa34c6a0dcdc96a2a13caa6ee13613e7e6bec..ae0da361e6c6282448995155fa2aa6e8cd5f11be 100644 --- a/drivers/fpga/zynq-fpga.c +++ b/drivers/fpga/zynq-fpga.c @@ -582,11 +582,9 @@ static int zynq_fpga_probe(struct platform_device *pdev) return priv->irq; priv->clk = devm_clk_get(dev, "ref_clk"); - if (IS_ERR(priv->clk)) { - if (PTR_ERR(priv->clk) != -EPROBE_DEFER) - dev_err(dev, "input clock not found\n"); - return PTR_ERR(priv->clk); - } + if (IS_ERR(priv->clk)) + return dev_err_probe(dev, PTR_ERR(priv->clk), + "input clock not found\n"); err = clk_prepare_enable(priv->clk); if (err) { diff --git a/drivers/gnss/core.c b/drivers/gnss/core.c index 1e82b7967570a8f38b2411e76686ea93b4c84d40..77a4b280c552abd77fcaed1998b41daa611205a1 100644 --- a/drivers/gnss/core.c +++ b/drivers/gnss/core.c @@ -337,7 +337,7 @@ static const char * const gnss_type_names[GNSS_TYPE_COUNT] = { [GNSS_TYPE_MTK] = "MTK", }; -static const char *gnss_type_name(struct gnss_device *gdev) +static const char *gnss_type_name(const struct gnss_device *gdev) { const char *name = NULL; @@ -365,9 +365,9 @@ static struct attribute *gnss_attrs[] = { }; ATTRIBUTE_GROUPS(gnss); -static int gnss_uevent(struct device *dev, struct kobj_uevent_env *env) +static int gnss_uevent(const struct device *dev, struct kobj_uevent_env *env) { - struct gnss_device *gdev = to_gnss_device(dev); + const struct gnss_device *gdev = to_gnss_device(dev); int ret; ret = add_uevent_var(env, "GNSS_TYPE=%s", gnss_type_name(gdev)); diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig index a01af11806164c42680e2eb2a917fecca9a9f6f5..ec7cfd4f52b1eac199ffb9c58199ad6e64d6c138 100644 --- a/drivers/gpio/Kconfig +++ b/drivers/gpio/Kconfig @@ -109,6 +109,15 @@ config GPIO_REGMAP config GPIO_MAX730X tristate +config GPIO_IDIO_16 + tristate + help + Enables support for the idio-16 library functions. The idio-16 library + provides functions to facilitate communication with devices within the + ACCES IDIO-16 family such as the 104-IDIO-16 and the PCI-IDIO-16. + + If built as a module its name will be gpio-idio-16. + menu "Memory mapped GPIO drivers" depends on HAS_IOMEM @@ -219,7 +228,7 @@ config GPIO_CLPS711X Say yes here to support GPIO on CLPS711X SoCs. config GPIO_DAVINCI - bool "TI Davinci/Keystone GPIO support" + tristate "TI Davinci/Keystone GPIO support" default y if ARCH_DAVINCI depends on (ARM || ARM64) && (ARCH_DAVINCI || ARCH_KEYSTONE || ARCH_K3) help @@ -310,7 +319,7 @@ config GPIO_GRGPIO config GPIO_HISI tristate "HiSilicon GPIO controller driver" - depends on (ARM64 && ACPI) || COMPILE_TEST + depends on ARM64 || COMPILE_TEST select GPIO_GENERIC select GPIOLIB_IRQCHIP help @@ -600,14 +609,6 @@ config GPIO_SPRD help Say yes here to support Spreadtrum GPIO device. -config GPIO_STA2X11 - bool "STA2x11/ConneXt GPIO support" - depends on MFD_STA2X11 - select GENERIC_IRQ_CHIP - help - Say yes here to support the STA2x11/ConneXt GPIO device. - The GPIO module has 128 GPIO pins with alternate functions. - config GPIO_STP_XWAY bool "XWAY STP GPIOs" depends on SOC_XWAY || COMPILE_TEST @@ -857,6 +858,7 @@ config GPIO_104_IDIO_16 depends on PC104 select ISA_BUS_API select GPIOLIB_IRQCHIP + select GPIO_IDIO_16 help Enables GPIO support for the ACCES 104-IDIO-16 family (104-IDIO-16, 104-IDIO-16E, 104-IDO-16, 104-IDIO-8, 104-IDIO-8E, 104-IDO-8). The @@ -1561,6 +1563,7 @@ config GPIO_PCH config GPIO_PCI_IDIO_16 tristate "ACCES PCI-IDIO-16 GPIO support" select GPIOLIB_IRQCHIP + select GPIO_IDIO_16 help Enables GPIO support for the ACCES PCI-IDIO-16. An interrupt is generated when any of the inputs change state (low to high or high to @@ -1681,6 +1684,12 @@ config GPIO_AGGREGATOR industrial control context, to be operated from userspace using the GPIO chardev interface. +config GPIO_LATCH + tristate "GPIO latch driver" + help + Say yes here to enable a driver for GPIO multiplexers based on latches + connected to other GPIOs. + config GPIO_MOCKUP tristate "GPIO Testing Driver" select IRQ_SIM diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile index 29e3beb6548cb0361979722158179c884b4bab5e..010587025fc87197fa837511ab646a088a343530 100644 --- a/drivers/gpio/Makefile +++ b/drivers/gpio/Makefile @@ -10,6 +10,7 @@ obj-$(CONFIG_OF_GPIO) += gpiolib-of.o obj-$(CONFIG_GPIO_CDEV) += gpiolib-cdev.o obj-$(CONFIG_GPIO_SYSFS) += gpiolib-sysfs.o obj-$(CONFIG_GPIO_ACPI) += gpiolib-acpi.o +obj-$(CONFIG_GPIOLIB) += gpiolib-swnode.o # Device drivers. Generally keep list sorted alphabetically obj-$(CONFIG_GPIO_REGMAP) += gpio-regmap.o @@ -68,6 +69,7 @@ obj-$(CONFIG_GPIO_HLWD) += gpio-hlwd.o obj-$(CONFIG_HTC_EGPIO) += gpio-htc-egpio.o obj-$(CONFIG_GPIO_I8255) += gpio-i8255.o obj-$(CONFIG_GPIO_ICH) += gpio-ich.o +obj-$(CONFIG_GPIO_IDIO_16) += gpio-idio-16.o obj-$(CONFIG_GPIO_IDT3243X) += gpio-idt3243x.o obj-$(CONFIG_GPIO_IMX_SCU) += gpio-imx-scu.o obj-$(CONFIG_GPIO_IOP) += gpio-iop.o @@ -75,6 +77,7 @@ obj-$(CONFIG_GPIO_IT87) += gpio-it87.o obj-$(CONFIG_GPIO_IXP4XX) += gpio-ixp4xx.o obj-$(CONFIG_GPIO_JANZ_TTL) += gpio-janz-ttl.o obj-$(CONFIG_GPIO_KEMPLD) += gpio-kempld.o +obj-$(CONFIG_GPIO_LATCH) += gpio-latch.o obj-$(CONFIG_GPIO_LOGICVC) += gpio-logicvc.o obj-$(CONFIG_GPIO_LOONGSON1) += gpio-loongson1.o obj-$(CONFIG_GPIO_LOONGSON) += gpio-loongson.o @@ -140,7 +143,6 @@ obj-$(CONFIG_GPIO_SL28CPLD) += gpio-sl28cpld.o obj-$(CONFIG_GPIO_SODAVILLE) += gpio-sodaville.o obj-$(CONFIG_GPIO_SPEAR_SPICS) += gpio-spear-spics.o obj-$(CONFIG_GPIO_SPRD) += gpio-sprd.o -obj-$(CONFIG_GPIO_STA2X11) += gpio-sta2x11.o obj-$(CONFIG_GPIO_STMPE) += gpio-stmpe.o obj-$(CONFIG_GPIO_STP_XWAY) += gpio-stp-xway.o obj-$(CONFIG_GPIO_SYSCON) += gpio-syscon.o diff --git a/drivers/gpio/TODO b/drivers/gpio/TODO index f87ff3fa8a531d27ac537d7f1a3eb4dd894dcad2..76560744587a5fff7235a689733bc72a9073fbbf 100644 --- a/drivers/gpio/TODO +++ b/drivers/gpio/TODO @@ -124,6 +124,13 @@ Work items: this with dry-coding and sending to maintainers to test +Generic regmap GPIO + +In the very similar way to Generic MMIO GPIO convert the users which can +take advantage of using regmap over direct IO accessors. Note, even in +MMIO case the regmap MMIO with gpio-regmap.c is preferable over gpio-mmio.c. + + GPIOLIB irqchip The GPIOLIB irqchip is a helper irqchip for "simple cases" that should diff --git a/drivers/gpio/gpio-104-idio-16.c b/drivers/gpio/gpio-104-idio-16.c index 718bd54e2a25511da21604e0b3942d98fda278b0..098fbefdbe226891db337eefe77206344d5f1a8e 100644 --- a/drivers/gpio/gpio-104-idio-16.c +++ b/drivers/gpio/gpio-104-idio-16.c @@ -6,7 +6,7 @@ * This driver supports the following ACCES devices: 104-IDIO-16, * 104-IDIO-16E, 104-IDO-16, 104-IDIO-8, 104-IDIO-8E, and 104-IDO-8. */ -#include +#include #include #include #include @@ -21,6 +21,8 @@ #include #include +#include "gpio-idio-16.h" + #define IDIO_16_EXTENT 8 #define MAX_NUM_IDIO_16 max_num_isa_dev(IDIO_16_EXTENT) @@ -34,49 +36,26 @@ static unsigned int num_irq; module_param_hw_array(irq, uint, irq, &num_irq, 0); MODULE_PARM_DESC(irq, "ACCES 104-IDIO-16 interrupt line numbers"); -/** - * struct idio_16_reg - device registers structure - * @out0_7: Read: N/A - * Write: FET Drive Outputs 0-7 - * @in0_7: Read: Isolated Inputs 0-7 - * Write: Clear Interrupt - * @irq_ctl: Read: Enable IRQ - * Write: Disable IRQ - * @unused: N/A - * @out8_15: Read: N/A - * Write: FET Drive Outputs 8-15 - * @in8_15: Read: Isolated Inputs 8-15 - * Write: N/A - */ -struct idio_16_reg { - u8 out0_7; - u8 in0_7; - u8 irq_ctl; - u8 unused; - u8 out8_15; - u8 in8_15; -}; - /** * struct idio_16_gpio - GPIO device private data structure * @chip: instance of the gpio_chip * @lock: synchronization lock to prevent I/O race conditions * @irq_mask: I/O bits affected by interrupts * @reg: I/O address offset for the device registers - * @out_state: output bits state + * @state: ACCES IDIO-16 device state */ struct idio_16_gpio { struct gpio_chip chip; raw_spinlock_t lock; unsigned long irq_mask; - struct idio_16_reg __iomem *reg; - unsigned int out_state; + struct idio_16 __iomem *reg; + struct idio_16_state state; }; static int idio_16_gpio_get_direction(struct gpio_chip *chip, unsigned int offset) { - if (offset > 15) + if (idio_16_get_direction(offset)) return GPIO_LINE_DIRECTION_IN; return GPIO_LINE_DIRECTION_OUT; @@ -98,15 +77,8 @@ static int idio_16_gpio_direction_output(struct gpio_chip *chip, static int idio_16_gpio_get(struct gpio_chip *chip, unsigned int offset) { struct idio_16_gpio *const idio16gpio = gpiochip_get_data(chip); - const unsigned int mask = BIT(offset-16); - if (offset < 16) - return -EINVAL; - - if (offset < 24) - return !!(ioread8(&idio16gpio->reg->in0_7) & mask); - - return !!(ioread8(&idio16gpio->reg->in8_15) & (mask>>8)); + return idio_16_get(idio16gpio->reg, &idio16gpio->state, offset); } static int idio_16_gpio_get_multiple(struct gpio_chip *chip, @@ -114,11 +86,7 @@ static int idio_16_gpio_get_multiple(struct gpio_chip *chip, { struct idio_16_gpio *const idio16gpio = gpiochip_get_data(chip); - *bits = 0; - if (*mask & GENMASK(23, 16)) - *bits |= (unsigned long)ioread8(&idio16gpio->reg->in0_7) << 16; - if (*mask & GENMASK(31, 24)) - *bits |= (unsigned long)ioread8(&idio16gpio->reg->in8_15) << 24; + idio_16_get_multiple(idio16gpio->reg, &idio16gpio->state, mask, bits); return 0; } @@ -127,44 +95,16 @@ static void idio_16_gpio_set(struct gpio_chip *chip, unsigned int offset, int value) { struct idio_16_gpio *const idio16gpio = gpiochip_get_data(chip); - const unsigned int mask = BIT(offset); - unsigned long flags; - if (offset > 15) - return; - - raw_spin_lock_irqsave(&idio16gpio->lock, flags); - - if (value) - idio16gpio->out_state |= mask; - else - idio16gpio->out_state &= ~mask; - - if (offset > 7) - iowrite8(idio16gpio->out_state >> 8, &idio16gpio->reg->out8_15); - else - iowrite8(idio16gpio->out_state, &idio16gpio->reg->out0_7); - - raw_spin_unlock_irqrestore(&idio16gpio->lock, flags); + idio_16_set(idio16gpio->reg, &idio16gpio->state, offset, value); } static void idio_16_gpio_set_multiple(struct gpio_chip *chip, unsigned long *mask, unsigned long *bits) { struct idio_16_gpio *const idio16gpio = gpiochip_get_data(chip); - unsigned long flags; - raw_spin_lock_irqsave(&idio16gpio->lock, flags); - - idio16gpio->out_state &= ~*mask; - idio16gpio->out_state |= *mask & *bits; - - if (*mask & 0xFF) - iowrite8(idio16gpio->out_state, &idio16gpio->reg->out0_7); - if ((*mask >> 8) & 0xFF) - iowrite8(idio16gpio->out_state >> 8, &idio16gpio->reg->out8_15); - - raw_spin_unlock_irqrestore(&idio16gpio->lock, flags); + idio_16_set_multiple(idio16gpio->reg, &idio16gpio->state, mask, bits); } static void idio_16_irq_ack(struct irq_data *data) @@ -301,7 +241,10 @@ static int idio_16_probe(struct device *dev, unsigned int id) idio16gpio->chip.get_multiple = idio_16_gpio_get_multiple; idio16gpio->chip.set = idio_16_gpio_set; idio16gpio->chip.set_multiple = idio_16_gpio_set_multiple; - idio16gpio->out_state = 0xFFFF; + + idio_16_state_init(&idio16gpio->state); + /* FET off states are represented by bit values of "1" */ + bitmap_fill(idio16gpio->state.out_state, IDIO_16_NOUT); girq = &idio16gpio->chip.irq; gpio_irq_chip_set_chip(girq, &idio_16_irqchip); @@ -343,3 +286,4 @@ module_isa_driver_with_irq(idio_16_driver, num_idio_16, num_irq); MODULE_AUTHOR("William Breathitt Gray "); MODULE_DESCRIPTION("ACCES 104-IDIO-16 GPIO driver"); MODULE_LICENSE("GPL v2"); +MODULE_IMPORT_NS(GPIO_IDIO_16); diff --git a/drivers/gpio/gpio-aggregator.c b/drivers/gpio/gpio-aggregator.c index 0cb2664085cf83144a5a78253b255653d5a6a03a..6d17d262ad9170dc278fdc691b516b86d90f9a8f 100644 --- a/drivers/gpio/gpio-aggregator.c +++ b/drivers/gpio/gpio-aggregator.c @@ -23,6 +23,7 @@ #include #include +#define AGGREGATOR_MAX_GPIOS 512 /* * GPIO Aggregator sysfs interface @@ -64,7 +65,7 @@ static int aggr_parse(struct gpio_aggregator *aggr) unsigned int i, n = 0; int error = 0; - bitmap = bitmap_alloc(ARCH_NR_GPIOS, GFP_KERNEL); + bitmap = bitmap_alloc(AGGREGATOR_MAX_GPIOS, GFP_KERNEL); if (!bitmap) return -ENOMEM; @@ -84,13 +85,13 @@ static int aggr_parse(struct gpio_aggregator *aggr) } /* GPIO chip + offset(s) */ - error = bitmap_parselist(offsets, bitmap, ARCH_NR_GPIOS); + error = bitmap_parselist(offsets, bitmap, AGGREGATOR_MAX_GPIOS); if (error) { pr_err("Cannot parse %s: %d\n", offsets, error); goto free_bitmap; } - for_each_set_bit(i, bitmap, ARCH_NR_GPIOS) { + for_each_set_bit(i, bitmap, AGGREGATOR_MAX_GPIOS) { error = aggr_add_gpio(aggr, name, i, &n); if (error) goto free_bitmap; diff --git a/drivers/gpio/gpio-davinci.c b/drivers/gpio/gpio-davinci.c index 59c4c48d8296b870fd7bd243eb5df4f74213914f..fa51a91afa54fd28dff649198bc8218cf9d6fc9f 100644 --- a/drivers/gpio/gpio-davinci.c +++ b/drivers/gpio/gpio-davinci.c @@ -217,9 +217,6 @@ static int davinci_gpio_probe(struct platform_device *pdev) return -EINVAL; } - if (WARN_ON(ARCH_NR_GPIOS < ngpio)) - ngpio = ARCH_NR_GPIOS; - /* * If there are unbanked interrupts then the number of * interrupts is equal to number of gpios else all are banked so @@ -730,3 +727,14 @@ static int __init davinci_gpio_drv_reg(void) return platform_driver_register(&davinci_gpio_driver); } postcore_initcall(davinci_gpio_drv_reg); + +static void __exit davinci_gpio_exit(void) +{ + platform_driver_unregister(&davinci_gpio_driver); +} +module_exit(davinci_gpio_exit); + +MODULE_AUTHOR("Jan Kotas "); +MODULE_DESCRIPTION("DAVINCI GPIO driver"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:gpio-davinci"); diff --git a/drivers/gpio/gpio-exar.c b/drivers/gpio/gpio-exar.c index 482f678c893e5ebed813508e4e6d4d030be33971..df1bdaae441c5e5ae50a223eebd3aa14a3293f90 100644 --- a/drivers/gpio/gpio-exar.c +++ b/drivers/gpio/gpio-exar.c @@ -141,6 +141,7 @@ static const struct regmap_config exar_regmap_config = { .name = "exar-gpio", .reg_bits = 16, .val_bits = 8, + .io_port = true, }; static int gpio_exar_probe(struct platform_device *pdev) diff --git a/drivers/gpio/gpio-ftgpio010.c b/drivers/gpio/gpio-ftgpio010.c index f77a965f5780d8d9817f2546df480e3341cb3b68..2728672ef9f839c9ed441f5a9c55b7b052f77120 100644 --- a/drivers/gpio/gpio-ftgpio010.c +++ b/drivers/gpio/gpio-ftgpio010.c @@ -277,7 +277,7 @@ static int ftgpio_gpio_probe(struct platform_device *pdev) dev_err(dev, "unable to init generic GPIO\n"); goto dis_clk; } - g->gc.label = "FTGPIO010"; + g->gc.label = dev_name(dev); g->gc.base = -1; g->gc.parent = dev; g->gc.owner = THIS_MODULE; diff --git a/drivers/gpio/gpio-gw-pld.c b/drivers/gpio/gpio-gw-pld.c index 2109803ffb386af227af4ca9613db2f359bf27e6..5057fa9ad6108a26f093f24eafbc51e91fd4b5b9 100644 --- a/drivers/gpio/gpio-gw-pld.c +++ b/drivers/gpio/gpio-gw-pld.c @@ -67,8 +67,7 @@ static void gw_pld_set8(struct gpio_chip *gc, unsigned offset, int value) gw_pld_output8(gc, offset, value); } -static int gw_pld_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int gw_pld_probe(struct i2c_client *client) { struct device *dev = &client->dev; struct gw_pld *gw; @@ -126,7 +125,7 @@ static struct i2c_driver gw_pld_driver = { .name = "gw_pld", .of_match_table = gw_pld_dt_ids, }, - .probe = gw_pld_probe, + .probe_new = gw_pld_probe, .id_table = gw_pld_id, }; module_i2c_driver(gw_pld_driver); diff --git a/drivers/gpio/gpio-hisi.c b/drivers/gpio/gpio-hisi.c index 3caabef5c7a2ea069a8f9d8dd18cae40d5b1b4c5..55bd69043bf429cd9a40b3509082af2c3d134ce2 100644 --- a/drivers/gpio/gpio-hisi.c +++ b/drivers/gpio/gpio-hisi.c @@ -221,6 +221,12 @@ static const struct acpi_device_id hisi_gpio_acpi_match[] = { }; MODULE_DEVICE_TABLE(acpi, hisi_gpio_acpi_match); +static const struct of_device_id hisi_gpio_dts_match[] = { + { .compatible = "hisilicon,ascend910-gpio", }, + { } +}; +MODULE_DEVICE_TABLE(of, hisi_gpio_dts_match); + static void hisi_gpio_get_pdata(struct device *dev, struct hisi_gpio *hisi_gpio) { @@ -311,6 +317,7 @@ static struct platform_driver hisi_gpio_driver = { .driver = { .name = HISI_GPIO_DRIVER_NAME, .acpi_match_table = hisi_gpio_acpi_match, + .of_match_table = hisi_gpio_dts_match, }, .probe = hisi_gpio_probe, }; diff --git a/drivers/gpio/gpio-idio-16.c b/drivers/gpio/gpio-idio-16.c new file mode 100644 index 0000000000000000000000000000000000000000..13315242d2209e7487105cff001d21146b9dfbd1 --- /dev/null +++ b/drivers/gpio/gpio-idio-16.c @@ -0,0 +1,146 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * GPIO library for the ACCES IDIO-16 family + * Copyright (C) 2022 William Breathitt Gray + */ +#include +#include +#include +#include +#include +#include + +#include "gpio-idio-16.h" + +#define DEFAULT_SYMBOL_NAMESPACE GPIO_IDIO_16 + +/** + * idio_16_get - get signal value at signal offset + * @reg: ACCES IDIO-16 device registers + * @state: ACCES IDIO-16 device state + * @offset: offset of signal to get + * + * Returns the signal value (0=low, 1=high) for the signal at @offset. + */ +int idio_16_get(struct idio_16 __iomem *const reg, + struct idio_16_state *const state, const unsigned long offset) +{ + const unsigned long mask = BIT(offset); + + if (offset < IDIO_16_NOUT) + return test_bit(offset, state->out_state); + + if (offset < 24) + return !!(ioread8(®->in0_7) & (mask >> IDIO_16_NOUT)); + + if (offset < 32) + return !!(ioread8(®->in8_15) & (mask >> 24)); + + return -EINVAL; +} +EXPORT_SYMBOL_GPL(idio_16_get); + +/** + * idio_16_get_multiple - get multiple signal values at multiple signal offsets + * @reg: ACCES IDIO-16 device registers + * @state: ACCES IDIO-16 device state + * @mask: mask of signals to get + * @bits: bitmap to store signal values + * + * Stores in @bits the values (0=low, 1=high) for the signals defined by @mask. + */ +void idio_16_get_multiple(struct idio_16 __iomem *const reg, + struct idio_16_state *const state, + const unsigned long *const mask, + unsigned long *const bits) +{ + unsigned long flags; + const unsigned long out_mask = GENMASK(IDIO_16_NOUT - 1, 0); + + spin_lock_irqsave(&state->lock, flags); + + bitmap_replace(bits, bits, state->out_state, &out_mask, IDIO_16_NOUT); + if (*mask & GENMASK(23, 16)) + bitmap_set_value8(bits, ioread8(®->in0_7), 16); + if (*mask & GENMASK(31, 24)) + bitmap_set_value8(bits, ioread8(®->in8_15), 24); + + spin_unlock_irqrestore(&state->lock, flags); +} +EXPORT_SYMBOL_GPL(idio_16_get_multiple); + +/** + * idio_16_set - set signal value at signal offset + * @reg: ACCES IDIO-16 device registers + * @state: ACCES IDIO-16 device state + * @offset: offset of signal to set + * @value: value of signal to set + * + * Assigns output @value for the signal at @offset. + */ +void idio_16_set(struct idio_16 __iomem *const reg, + struct idio_16_state *const state, const unsigned long offset, + const unsigned long value) +{ + unsigned long flags; + + if (offset >= IDIO_16_NOUT) + return; + + spin_lock_irqsave(&state->lock, flags); + + __assign_bit(offset, state->out_state, value); + if (offset < 8) + iowrite8(bitmap_get_value8(state->out_state, 0), ®->out0_7); + else + iowrite8(bitmap_get_value8(state->out_state, 8), ®->out8_15); + + spin_unlock_irqrestore(&state->lock, flags); +} +EXPORT_SYMBOL_GPL(idio_16_set); + +/** + * idio_16_set_multiple - set signal values at multiple signal offsets + * @reg: ACCES IDIO-16 device registers + * @state: ACCES IDIO-16 device state + * @mask: mask of signals to set + * @bits: bitmap of signal output values + * + * Assigns output values defined by @bits for the signals defined by @mask. + */ +void idio_16_set_multiple(struct idio_16 __iomem *const reg, + struct idio_16_state *const state, + const unsigned long *const mask, + const unsigned long *const bits) +{ + unsigned long flags; + + spin_lock_irqsave(&state->lock, flags); + + bitmap_replace(state->out_state, state->out_state, bits, mask, + IDIO_16_NOUT); + if (*mask & GENMASK(7, 0)) + iowrite8(bitmap_get_value8(state->out_state, 0), ®->out0_7); + if (*mask & GENMASK(15, 8)) + iowrite8(bitmap_get_value8(state->out_state, 8), ®->out8_15); + + spin_unlock_irqrestore(&state->lock, flags); +} +EXPORT_SYMBOL_GPL(idio_16_set_multiple); + +/** + * idio_16_state_init - initialize idio_16_state structure + * @state: ACCES IDIO-16 device state + * + * Initializes the ACCES IDIO-16 device @state for use in idio-16 library + * functions. + */ +void idio_16_state_init(struct idio_16_state *const state) +{ + spin_lock_init(&state->lock); +} +EXPORT_SYMBOL_GPL(idio_16_state_init); + +MODULE_AUTHOR("William Breathitt Gray"); +MODULE_DESCRIPTION("ACCES IDIO-16 GPIO Library"); +MODULE_LICENSE("GPL"); diff --git a/drivers/gpio/gpio-idio-16.h b/drivers/gpio/gpio-idio-16.h new file mode 100644 index 0000000000000000000000000000000000000000..928f8251a2bd9e206026ccef197b7d8c2b2ab2c8 --- /dev/null +++ b/drivers/gpio/gpio-idio-16.h @@ -0,0 +1,71 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright 2022 William Breathitt Gray */ +#ifndef _IDIO_16_H_ +#define _IDIO_16_H_ + +#include +#include + +/** + * struct idio_16 - IDIO-16 registers structure + * @out0_7: Read: FET Drive Outputs 0-7 + * Write: FET Drive Outputs 0-7 + * @in0_7: Read: Isolated Inputs 0-7 + * Write: Clear Interrupt + * @irq_ctl: Read: Enable IRQ + * Write: Disable IRQ + * @filter_ctl: Read: Activate Input Filters 0-15 + * Write: Deactivate Input Filters 0-15 + * @out8_15: Read: FET Drive Outputs 8-15 + * Write: FET Drive Outputs 8-15 + * @in8_15: Read: Isolated Inputs 8-15 + * Write: Unused + * @irq_status: Read: Interrupt status + * Write: Unused + */ +struct idio_16 { + u8 out0_7; + u8 in0_7; + u8 irq_ctl; + u8 filter_ctl; + u8 out8_15; + u8 in8_15; + u8 irq_status; +}; + +#define IDIO_16_NOUT 16 + +/** + * struct idio_16_state - IDIO-16 state structure + * @lock: synchronization lock for accessing device state + * @out_state: output signals state + */ +struct idio_16_state { + spinlock_t lock; + DECLARE_BITMAP(out_state, IDIO_16_NOUT); +}; + +/** + * idio_16_get_direction - get the I/O direction for a signal offset + * @offset: offset of signal to get direction + * + * Returns the signal direction (0=output, 1=input) for the signal at @offset. + */ +static inline int idio_16_get_direction(const unsigned long offset) +{ + return (offset >= IDIO_16_NOUT) ? 1 : 0; +} + +int idio_16_get(struct idio_16 __iomem *reg, struct idio_16_state *state, + unsigned long offset); +void idio_16_get_multiple(struct idio_16 __iomem *reg, + struct idio_16_state *state, + const unsigned long *mask, unsigned long *bits); +void idio_16_set(struct idio_16 __iomem *reg, struct idio_16_state *state, + unsigned long offset, unsigned long value); +void idio_16_set_multiple(struct idio_16 __iomem *reg, + struct idio_16_state *state, + const unsigned long *mask, const unsigned long *bits); +void idio_16_state_init(struct idio_16_state *state); + +#endif /* _IDIO_16_H_ */ diff --git a/drivers/gpio/gpio-latch.c b/drivers/gpio/gpio-latch.c new file mode 100644 index 0000000000000000000000000000000000000000..d7c3b20c8482ec2e9594a81e2f5254ed5ce1943e --- /dev/null +++ b/drivers/gpio/gpio-latch.c @@ -0,0 +1,219 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * GPIO latch driver + * + * Copyright (C) 2022 Sascha Hauer + * + * This driver implements a GPIO (or better GPO as there is no input) + * multiplexer based on latches like this: + * + * CLK0 ----------------------. ,--------. + * CLK1 -------------------. `--------|> #0 | + * | | | + * OUT0 ----------------+--|-----------|D0 Q0|-----|< + * OUT1 --------------+-|--|-----------|D1 Q1|-----|< + * OUT2 ------------+-|-|--|-----------|D2 Q2|-----|< + * OUT3 ----------+-|-|-|--|-----------|D3 Q3|-----|< + * OUT4 --------+-|-|-|-|--|-----------|D4 Q4|-----|< + * OUT5 ------+-|-|-|-|-|--|-----------|D5 Q5|-----|< + * OUT6 ----+-|-|-|-|-|-|--|-----------|D6 Q6|-----|< + * OUT7 --+-|-|-|-|-|-|-|--|-----------|D7 Q7|-----|< + * | | | | | | | | | `--------' + * | | | | | | | | | + * | | | | | | | | | ,--------. + * | | | | | | | | `-----------|> #1 | + * | | | | | | | | | | + * | | | | | | | `--------------|D0 Q0|-----|< + * | | | | | | `----------------|D1 Q1|-----|< + * | | | | | `------------------|D2 Q2|-----|< + * | | | | `--------------------|D3 Q3|-----|< + * | | | `----------------------|D4 Q4|-----|< + * | | `------------------------|D5 Q5|-----|< + * | `--------------------------|D6 Q6|-----|< + * `----------------------------|D7 Q7|-----|< + * `--------' + * + * The above is just an example. The actual number of number of latches and + * the number of inputs per latch is derived from the number of GPIOs given + * in the corresponding device tree properties. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "gpiolib.h" + +struct gpio_latch_priv { + struct gpio_chip gc; + struct gpio_descs *clk_gpios; + struct gpio_descs *latched_gpios; + int n_latched_gpios; + unsigned int setup_duration_ns; + unsigned int clock_duration_ns; + unsigned long *shadow; + /* + * Depending on whether any of the underlying GPIOs may sleep we either + * use a mutex or a spinlock to protect our shadow map. + */ + union { + struct mutex mutex; /* protects @shadow */ + spinlock_t spinlock; /* protects @shadow */ + }; +}; + +static int gpio_latch_get_direction(struct gpio_chip *gc, unsigned int offset) +{ + return GPIO_LINE_DIRECTION_OUT; +} + +static void gpio_latch_set_unlocked(struct gpio_latch_priv *priv, + void (*set)(struct gpio_desc *desc, int value), + unsigned int offset, bool val) +{ + int latch = offset / priv->n_latched_gpios; + int i; + + assign_bit(offset, priv->shadow, val); + + for (i = 0; i < priv->n_latched_gpios; i++) + set(priv->latched_gpios->desc[i], + test_bit(latch * priv->n_latched_gpios + i, priv->shadow)); + + ndelay(priv->setup_duration_ns); + set(priv->clk_gpios->desc[latch], 1); + ndelay(priv->clock_duration_ns); + set(priv->clk_gpios->desc[latch], 0); +} + +static void gpio_latch_set(struct gpio_chip *gc, unsigned int offset, int val) +{ + struct gpio_latch_priv *priv = gpiochip_get_data(gc); + unsigned long flags; + + spin_lock_irqsave(&priv->spinlock, flags); + + gpio_latch_set_unlocked(priv, gpiod_set_value, offset, val); + + spin_unlock_irqrestore(&priv->spinlock, flags); +} + +static void gpio_latch_set_can_sleep(struct gpio_chip *gc, unsigned int offset, int val) +{ + struct gpio_latch_priv *priv = gpiochip_get_data(gc); + + mutex_lock(&priv->mutex); + + gpio_latch_set_unlocked(priv, gpiod_set_value_cansleep, offset, val); + + mutex_unlock(&priv->mutex); +} + +static bool gpio_latch_can_sleep(struct gpio_latch_priv *priv, unsigned int n_latches) +{ + int i; + + for (i = 0; i < n_latches; i++) + if (gpiod_cansleep(priv->clk_gpios->desc[i])) + return true; + + for (i = 0; i < priv->n_latched_gpios; i++) + if (gpiod_cansleep(priv->latched_gpios->desc[i])) + return true; + + return false; +} + +/* + * Some value which is still acceptable to delay in atomic context. + * If we need to go higher we might have to switch to usleep_range(), + * but that cannot ne used in atomic context and the driver would have + * to be adjusted to support that. + */ +#define DURATION_NS_MAX 5000 + +static int gpio_latch_probe(struct platform_device *pdev) +{ + struct gpio_latch_priv *priv; + unsigned int n_latches; + struct device_node *np = pdev->dev.of_node; + + priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->clk_gpios = devm_gpiod_get_array(&pdev->dev, "clk", GPIOD_OUT_LOW); + if (IS_ERR(priv->clk_gpios)) + return PTR_ERR(priv->clk_gpios); + + priv->latched_gpios = devm_gpiod_get_array(&pdev->dev, "latched", GPIOD_OUT_LOW); + if (IS_ERR(priv->latched_gpios)) + return PTR_ERR(priv->latched_gpios); + + n_latches = priv->clk_gpios->ndescs; + priv->n_latched_gpios = priv->latched_gpios->ndescs; + + priv->shadow = devm_bitmap_zalloc(&pdev->dev, n_latches * priv->n_latched_gpios, + GFP_KERNEL); + if (!priv->shadow) + return -ENOMEM; + + if (gpio_latch_can_sleep(priv, n_latches)) { + priv->gc.can_sleep = true; + priv->gc.set = gpio_latch_set_can_sleep; + mutex_init(&priv->mutex); + } else { + priv->gc.can_sleep = false; + priv->gc.set = gpio_latch_set; + spin_lock_init(&priv->spinlock); + } + + of_property_read_u32(np, "setup-duration-ns", &priv->setup_duration_ns); + if (priv->setup_duration_ns > DURATION_NS_MAX) { + dev_warn(&pdev->dev, "setup-duration-ns too high, limit to %d\n", + DURATION_NS_MAX); + priv->setup_duration_ns = DURATION_NS_MAX; + } + + of_property_read_u32(np, "clock-duration-ns", &priv->clock_duration_ns); + if (priv->clock_duration_ns > DURATION_NS_MAX) { + dev_warn(&pdev->dev, "clock-duration-ns too high, limit to %d\n", + DURATION_NS_MAX); + priv->clock_duration_ns = DURATION_NS_MAX; + } + + priv->gc.get_direction = gpio_latch_get_direction; + priv->gc.ngpio = n_latches * priv->n_latched_gpios; + priv->gc.owner = THIS_MODULE; + priv->gc.base = -1; + priv->gc.parent = &pdev->dev; + + platform_set_drvdata(pdev, priv); + + return devm_gpiochip_add_data(&pdev->dev, &priv->gc, priv); +} + +static const struct of_device_id gpio_latch_ids[] = { + { + .compatible = "gpio-latch", + }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, gpio_latch_ids); + +static struct platform_driver gpio_latch_driver = { + .driver = { + .name = "gpio-latch", + .of_match_table = gpio_latch_ids, + }, + .probe = gpio_latch_probe, +}; +module_platform_driver(gpio_latch_driver); + +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Sascha Hauer "); +MODULE_DESCRIPTION("GPIO latch driver"); diff --git a/drivers/gpio/gpio-max7300.c b/drivers/gpio/gpio-max7300.c index 43da381a4d7e7ed322a25fdc5304b9014d0345ef..cf482f4f0098297a3ab59348082ee051634fdd37 100644 --- a/drivers/gpio/gpio-max7300.c +++ b/drivers/gpio/gpio-max7300.c @@ -28,8 +28,7 @@ static int max7300_i2c_read(struct device *dev, unsigned int reg) return i2c_smbus_read_byte_data(client, reg); } -static int max7300_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int max7300_probe(struct i2c_client *client) { struct max7301 *ts; @@ -63,7 +62,7 @@ static struct i2c_driver max7300_driver = { .driver = { .name = "max7300", }, - .probe = max7300_probe, + .probe_new = max7300_probe, .remove = max7300_remove, .id_table = max7300_id, }; diff --git a/drivers/gpio/gpio-max732x.c b/drivers/gpio/gpio-max732x.c index da69721170308e3629a59809f9427addc15bbaf8..68e982cdee734ddf17761793575979e8d7fcc7e2 100644 --- a/drivers/gpio/gpio-max732x.c +++ b/drivers/gpio/gpio-max732x.c @@ -608,9 +608,9 @@ static struct max732x_platform_data *of_gpio_max732x(struct device *dev) return pdata; } -static int max732x_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int max732x_probe(struct i2c_client *client) { + const struct i2c_device_id *id = i2c_client_get_device_id(client); struct max732x_platform_data *pdata; struct device_node *node; struct max732x_chip *chip; @@ -707,7 +707,7 @@ static struct i2c_driver max732x_driver = { .name = "max732x", .of_match_table = of_match_ptr(max732x_of_table), }, - .probe = max732x_probe, + .probe_new = max732x_probe, .id_table = max732x_id, }; diff --git a/drivers/gpio/gpio-merrifield.c b/drivers/gpio/gpio-merrifield.c index 72ac09a597029ff87e7004c27a25020afa217b75..92ea8411050dda9ae5ed1c93338287443a1848e2 100644 --- a/drivers/gpio/gpio-merrifield.c +++ b/drivers/gpio/gpio-merrifield.c @@ -14,6 +14,7 @@ #include #include #include +#include #define GCCR 0x000 /* controller configuration */ #define GPLR 0x004 /* pin level r/o */ @@ -331,7 +332,7 @@ static int mrfld_irq_set_wake(struct irq_data *d, unsigned int on) raw_spin_unlock_irqrestore(&priv->lock, flags); - dev_dbg(priv->dev, "%sable wake for gpio %u\n", on ? "en" : "dis", gpio); + dev_dbg(priv->dev, "%s wake for gpio %u\n", str_enable_disable(on), gpio); return 0; } diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c index 6e67867e1dcd48ecaf919e44c6e6b8ed53649247..a59d61cd44b2e5eceac9bf97c41945ac77bde0f2 100644 --- a/drivers/gpio/gpio-pca953x.c +++ b/drivers/gpio/gpio-pca953x.c @@ -1050,9 +1050,9 @@ out: return ret; } -static int pca953x_probe(struct i2c_client *client, - const struct i2c_device_id *i2c_id) +static int pca953x_probe(struct i2c_client *client) { + const struct i2c_device_id *i2c_id = i2c_client_get_device_id(client); struct pca953x_platform_data *pdata; struct pca953x_chip *chip; int irq_base = 0; @@ -1376,7 +1376,7 @@ static struct i2c_driver pca953x_driver = { .of_match_table = pca953x_dt_ids, .acpi_match_table = pca953x_acpi_ids, }, - .probe = pca953x_probe, + .probe_new = pca953x_probe, .remove = pca953x_remove, .id_table = pca953x_id, }; diff --git a/drivers/gpio/gpio-pca9570.c b/drivers/gpio/gpio-pca9570.c index ab2a652964ec2ff93e0cbe38cda455a749fb96eb..6c07a8811a7a5e740c9421504f1f781bd221c7a3 100644 --- a/drivers/gpio/gpio-pca9570.c +++ b/drivers/gpio/gpio-pca9570.c @@ -15,14 +15,28 @@ #include #include +#define SLG7XL45106_GPO_REG 0xDB + +/** + * struct pca9570_platform_data - GPIO platformdata + * @ngpio: no of gpios + * @command: Command to be sent + */ +struct pca9570_platform_data { + u16 ngpio; + u32 command; +}; + /** * struct pca9570 - GPIO driver data * @chip: GPIO controller chip + * @p_data: GPIO controller platform data * @lock: Protects write sequences * @out: Buffer for device register */ struct pca9570 { struct gpio_chip chip; + const struct pca9570_platform_data *p_data; struct mutex lock; u8 out; }; @@ -32,7 +46,11 @@ static int pca9570_read(struct pca9570 *gpio, u8 *value) struct i2c_client *client = to_i2c_client(gpio->chip.parent); int ret; - ret = i2c_smbus_read_byte(client); + if (gpio->p_data->command != 0) + ret = i2c_smbus_read_byte_data(client, gpio->p_data->command); + else + ret = i2c_smbus_read_byte(client); + if (ret < 0) return ret; @@ -44,6 +62,9 @@ static int pca9570_write(struct pca9570 *gpio, u8 value) { struct i2c_client *client = to_i2c_client(gpio->chip.parent); + if (gpio->p_data->command != 0) + return i2c_smbus_write_byte_data(client, gpio->p_data->command, value); + return i2c_smbus_write_byte(client, value); } @@ -106,7 +127,8 @@ static int pca9570_probe(struct i2c_client *client) gpio->chip.get = pca9570_get; gpio->chip.set = pca9570_set; gpio->chip.base = -1; - gpio->chip.ngpio = (uintptr_t)device_get_match_data(&client->dev); + gpio->p_data = device_get_match_data(&client->dev); + gpio->chip.ngpio = gpio->p_data->ngpio; gpio->chip.can_sleep = true; mutex_init(&gpio->lock); @@ -119,16 +141,31 @@ static int pca9570_probe(struct i2c_client *client) return devm_gpiochip_add_data(&client->dev, &gpio->chip, gpio); } +static const struct pca9570_platform_data pca9570_gpio = { + .ngpio = 4, +}; + +static const struct pca9570_platform_data pca9571_gpio = { + .ngpio = 8, +}; + +static const struct pca9570_platform_data slg7xl45106_gpio = { + .ngpio = 8, + .command = SLG7XL45106_GPO_REG, +}; + static const struct i2c_device_id pca9570_id_table[] = { - { "pca9570", 4 }, - { "pca9571", 8 }, + { "pca9570", (kernel_ulong_t)&pca9570_gpio}, + { "pca9571", (kernel_ulong_t)&pca9571_gpio }, + { "slg7xl45106", (kernel_ulong_t)&slg7xl45106_gpio }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(i2c, pca9570_id_table); static const struct of_device_id pca9570_of_match_table[] = { - { .compatible = "nxp,pca9570", .data = (void *)4 }, - { .compatible = "nxp,pca9571", .data = (void *)8 }, + { .compatible = "dlg,slg7xl45106", .data = &slg7xl45106_gpio}, + { .compatible = "nxp,pca9570", .data = &pca9570_gpio }, + { .compatible = "nxp,pca9571", .data = &pca9571_gpio }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, pca9570_of_match_table); diff --git a/drivers/gpio/gpio-pcf857x.c b/drivers/gpio/gpio-pcf857x.c index e98ea47d7237985e51dda9016c2193f4ae58fc05..cec2f2c78255f5207de75d11b7a85ed8d69b01a1 100644 --- a/drivers/gpio/gpio-pcf857x.c +++ b/drivers/gpio/gpio-pcf857x.c @@ -247,9 +247,9 @@ static const struct irq_chip pcf857x_irq_chip = { /*-------------------------------------------------------------------------*/ -static int pcf857x_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int pcf857x_probe(struct i2c_client *client) { + const struct i2c_device_id *id = i2c_client_get_device_id(client); struct pcf857x_platform_data *pdata = dev_get_platdata(&client->dev); struct device_node *np = client->dev.of_node; struct pcf857x *gpio; @@ -422,7 +422,7 @@ static struct i2c_driver pcf857x_driver = { .name = "pcf857x", .of_match_table = of_match_ptr(pcf857x_of_table), }, - .probe = pcf857x_probe, + .probe_new = pcf857x_probe, .remove = pcf857x_remove, .shutdown = pcf857x_shutdown, .id_table = pcf857x_id, diff --git a/drivers/gpio/gpio-pci-idio-16.c b/drivers/gpio/gpio-pci-idio-16.c index 71a13a394050ffe65d1932e60e8d28ee9409af3c..a86ce748384bba0b0a9603566b068fe098f805a5 100644 --- a/drivers/gpio/gpio-pci-idio-16.c +++ b/drivers/gpio/gpio-pci-idio-16.c @@ -3,8 +3,7 @@ * GPIO driver for the ACCES PCI-IDIO-16 * Copyright (C) 2017 William Breathitt Gray */ -#include -#include +#include #include #include #include @@ -16,51 +15,28 @@ #include #include -/** - * struct idio_16_gpio_reg - GPIO device registers structure - * @out0_7: Read: FET Drive Outputs 0-7 - * Write: FET Drive Outputs 0-7 - * @in0_7: Read: Isolated Inputs 0-7 - * Write: Clear Interrupt - * @irq_ctl: Read: Enable IRQ - * Write: Disable IRQ - * @filter_ctl: Read: Activate Input Filters 0-15 - * Write: Deactivate Input Filters 0-15 - * @out8_15: Read: FET Drive Outputs 8-15 - * Write: FET Drive Outputs 8-15 - * @in8_15: Read: Isolated Inputs 8-15 - * Write: Unused - * @irq_status: Read: Interrupt status - * Write: Unused - */ -struct idio_16_gpio_reg { - u8 out0_7; - u8 in0_7; - u8 irq_ctl; - u8 filter_ctl; - u8 out8_15; - u8 in8_15; - u8 irq_status; -}; +#include "gpio-idio-16.h" /** * struct idio_16_gpio - GPIO device private data structure * @chip: instance of the gpio_chip * @lock: synchronization lock to prevent I/O race conditions * @reg: I/O address offset for the GPIO device registers + * @state: ACCES IDIO-16 device state * @irq_mask: I/O bits affected by interrupts */ struct idio_16_gpio { struct gpio_chip chip; raw_spinlock_t lock; - struct idio_16_gpio_reg __iomem *reg; + struct idio_16 __iomem *reg; + struct idio_16_state state; unsigned long irq_mask; }; static int idio_16_gpio_get_direction(struct gpio_chip *chip, unsigned int offset) { - if (offset > 15) + if (idio_16_get_direction(offset)) return GPIO_LINE_DIRECTION_IN; return GPIO_LINE_DIRECTION_OUT; @@ -82,43 +58,16 @@ static int idio_16_gpio_direction_output(struct gpio_chip *chip, static int idio_16_gpio_get(struct gpio_chip *chip, unsigned int offset) { struct idio_16_gpio *const idio16gpio = gpiochip_get_data(chip); - unsigned long mask = BIT(offset); - - if (offset < 8) - return !!(ioread8(&idio16gpio->reg->out0_7) & mask); - - if (offset < 16) - return !!(ioread8(&idio16gpio->reg->out8_15) & (mask >> 8)); - - if (offset < 24) - return !!(ioread8(&idio16gpio->reg->in0_7) & (mask >> 16)); - return !!(ioread8(&idio16gpio->reg->in8_15) & (mask >> 24)); + return idio_16_get(idio16gpio->reg, &idio16gpio->state, offset); } static int idio_16_gpio_get_multiple(struct gpio_chip *chip, unsigned long *mask, unsigned long *bits) { struct idio_16_gpio *const idio16gpio = gpiochip_get_data(chip); - unsigned long offset; - unsigned long gpio_mask; - void __iomem *ports[] = { - &idio16gpio->reg->out0_7, &idio16gpio->reg->out8_15, - &idio16gpio->reg->in0_7, &idio16gpio->reg->in8_15, - }; - void __iomem *port_addr; - unsigned long port_state; - - /* clear bits array to a clean slate */ - bitmap_zero(bits, chip->ngpio); - - for_each_set_clump8(offset, gpio_mask, mask, ARRAY_SIZE(ports) * 8) { - port_addr = ports[offset / 8]; - port_state = ioread8(port_addr) & gpio_mask; - - bitmap_set_value8(bits, port_state, offset); - } + idio_16_get_multiple(idio16gpio->reg, &idio16gpio->state, mask, bits); return 0; } @@ -126,61 +75,16 @@ static void idio_16_gpio_set(struct gpio_chip *chip, unsigned int offset, int value) { struct idio_16_gpio *const idio16gpio = gpiochip_get_data(chip); - unsigned int mask = BIT(offset); - void __iomem *base; - unsigned long flags; - unsigned int out_state; - - if (offset > 15) - return; - - if (offset > 7) { - mask >>= 8; - base = &idio16gpio->reg->out8_15; - } else - base = &idio16gpio->reg->out0_7; - - raw_spin_lock_irqsave(&idio16gpio->lock, flags); - if (value) - out_state = ioread8(base) | mask; - else - out_state = ioread8(base) & ~mask; - - iowrite8(out_state, base); - - raw_spin_unlock_irqrestore(&idio16gpio->lock, flags); + idio_16_set(idio16gpio->reg, &idio16gpio->state, offset, value); } static void idio_16_gpio_set_multiple(struct gpio_chip *chip, unsigned long *mask, unsigned long *bits) { struct idio_16_gpio *const idio16gpio = gpiochip_get_data(chip); - unsigned long offset; - unsigned long gpio_mask; - void __iomem *ports[] = { - &idio16gpio->reg->out0_7, &idio16gpio->reg->out8_15, - }; - size_t index; - void __iomem *port_addr; - unsigned long bitmask; - unsigned long flags; - unsigned long out_state; - for_each_set_clump8(offset, gpio_mask, mask, ARRAY_SIZE(ports) * 8) { - index = offset / 8; - port_addr = ports[index]; - - bitmask = bitmap_get_value8(bits, offset) & gpio_mask; - - raw_spin_lock_irqsave(&idio16gpio->lock, flags); - - out_state = ioread8(port_addr) & ~gpio_mask; - out_state |= bitmask; - iowrite8(out_state, port_addr); - - raw_spin_unlock_irqrestore(&idio16gpio->lock, flags); - } + idio_16_set_multiple(idio16gpio->reg, &idio16gpio->state, mask, bits); } static void idio_16_irq_ack(struct irq_data *data) @@ -335,6 +239,8 @@ static int idio_16_probe(struct pci_dev *pdev, const struct pci_device_id *id) idio16gpio->chip.set = idio_16_gpio_set; idio16gpio->chip.set_multiple = idio_16_gpio_set_multiple; + idio_16_state_init(&idio16gpio->state); + girq = &idio16gpio->chip.irq; girq->chip = &idio_16_irqchip; /* This will let us handle the parent IRQ in the driver */ @@ -379,3 +285,4 @@ module_pci_driver(idio_16_driver); MODULE_AUTHOR("William Breathitt Gray "); MODULE_DESCRIPTION("ACCES PCI-IDIO-16 GPIO driver"); MODULE_LICENSE("GPL v2"); +MODULE_IMPORT_NS(GPIO_IDIO_16); diff --git a/drivers/gpio/gpio-sim.c b/drivers/gpio/gpio-sim.c index 1020c2feb24964303c279fa352af7839991a8775..60514bc5454f8ceb258e0647910b91fd1642cd8e 100644 --- a/drivers/gpio/gpio-sim.c +++ b/drivers/gpio/gpio-sim.c @@ -31,6 +31,7 @@ #include "gpiolib.h" +#define GPIO_SIM_NGPIO_MAX 1024 #define GPIO_SIM_PROP_MAX 4 /* Max 3 properties + sentinel. */ #define GPIO_SIM_NUM_ATTRS 3 /* value, pull and sentinel */ @@ -371,6 +372,9 @@ static int gpio_sim_add_bank(struct fwnode_handle *swnode, struct device *dev) if (ret) return ret; + if (num_lines > GPIO_SIM_NGPIO_MAX) + return -ERANGE; + ret = fwnode_property_read_string(swnode, "gpio-sim,label", &label); if (ret) { label = devm_kasprintf(dev, GFP_KERNEL, "%s-%s", diff --git a/drivers/gpio/gpio-sl28cpld.c b/drivers/gpio/gpio-sl28cpld.c index 52404736ac861bb16ea8e9bc7ef2bfb70f54bd56..2195f88c204855a6797841f13e6eb47cccac2859 100644 --- a/drivers/gpio/gpio-sl28cpld.c +++ b/drivers/gpio/gpio-sl28cpld.c @@ -70,8 +70,7 @@ static int sl28cpld_gpio_irq_init(struct platform_device *pdev, irq_chip->num_irqs = ARRAY_SIZE(sl28cpld_gpio_irqs); irq_chip->num_regs = 1; irq_chip->status_base = base + GPIO_REG_IP; - irq_chip->mask_base = base + GPIO_REG_IE; - irq_chip->mask_invert = true; + irq_chip->unmask_base = base + GPIO_REG_IE; irq_chip->ack_base = base + GPIO_REG_IP; ret = devm_regmap_add_irq_chip_fwnode(dev, dev_fwnode(dev), diff --git a/drivers/gpio/gpio-sta2x11.c b/drivers/gpio/gpio-sta2x11.c deleted file mode 100644 index e07cca0f8d35388dafd7a6374b95f054b07063cd..0000000000000000000000000000000000000000 --- a/drivers/gpio/gpio-sta2x11.c +++ /dev/null @@ -1,411 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * STMicroelectronics ConneXt (STA2X11) GPIO driver - * - * Copyright 2012 ST Microelectronics (Alessandro Rubini) - * Based on gpio-ml-ioh.c, Copyright 2010 OKI Semiconductors Ltd. - * Also based on previous sta2x11 work, Copyright 2011 Wind River Systems, Inc. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -struct gsta_regs { - u32 dat; /* 0x00 */ - u32 dats; - u32 datc; - u32 pdis; - u32 dir; /* 0x10 */ - u32 dirs; - u32 dirc; - u32 unused_1c; - u32 afsela; /* 0x20 */ - u32 unused_24[7]; - u32 rimsc; /* 0x40 */ - u32 fimsc; - u32 is; - u32 ic; -}; - -struct gsta_gpio { - spinlock_t lock; - struct device *dev; - void __iomem *reg_base; - struct gsta_regs __iomem *regs[GSTA_NR_BLOCKS]; - struct gpio_chip gpio; - int irq_base; - /* FIXME: save the whole config here (AF, ...) */ - unsigned irq_type[GSTA_NR_GPIO]; -}; - -/* - * gpio methods - */ - -static void gsta_gpio_set(struct gpio_chip *gpio, unsigned nr, int val) -{ - struct gsta_gpio *chip = gpiochip_get_data(gpio); - struct gsta_regs __iomem *regs = chip->regs[nr / GSTA_GPIO_PER_BLOCK]; - u32 bit = BIT(nr % GSTA_GPIO_PER_BLOCK); - - if (val) - writel(bit, ®s->dats); - else - writel(bit, ®s->datc); -} - -static int gsta_gpio_get(struct gpio_chip *gpio, unsigned nr) -{ - struct gsta_gpio *chip = gpiochip_get_data(gpio); - struct gsta_regs __iomem *regs = chip->regs[nr / GSTA_GPIO_PER_BLOCK]; - u32 bit = BIT(nr % GSTA_GPIO_PER_BLOCK); - - return !!(readl(®s->dat) & bit); -} - -static int gsta_gpio_direction_output(struct gpio_chip *gpio, unsigned nr, - int val) -{ - struct gsta_gpio *chip = gpiochip_get_data(gpio); - struct gsta_regs __iomem *regs = chip->regs[nr / GSTA_GPIO_PER_BLOCK]; - u32 bit = BIT(nr % GSTA_GPIO_PER_BLOCK); - - writel(bit, ®s->dirs); - /* Data register after direction, otherwise pullup/down is selected */ - if (val) - writel(bit, ®s->dats); - else - writel(bit, ®s->datc); - return 0; -} - -static int gsta_gpio_direction_input(struct gpio_chip *gpio, unsigned nr) -{ - struct gsta_gpio *chip = gpiochip_get_data(gpio); - struct gsta_regs __iomem *regs = chip->regs[nr / GSTA_GPIO_PER_BLOCK]; - u32 bit = BIT(nr % GSTA_GPIO_PER_BLOCK); - - writel(bit, ®s->dirc); - return 0; -} - -static int gsta_gpio_to_irq(struct gpio_chip *gpio, unsigned offset) -{ - struct gsta_gpio *chip = gpiochip_get_data(gpio); - return chip->irq_base + offset; -} - -static void gsta_gpio_setup(struct gsta_gpio *chip) /* called from probe */ -{ - struct gpio_chip *gpio = &chip->gpio; - - /* - * ARCH_NR_GPIOS is currently 256 and dynamic allocation starts - * from the end. However, for compatibility, we need the first - * ConneXt device to start from gpio 0: it's the main chipset - * on most boards so documents and drivers assume gpio0..gpio127 - */ - static int gpio_base; - - gpio->label = dev_name(chip->dev); - gpio->owner = THIS_MODULE; - gpio->direction_input = gsta_gpio_direction_input; - gpio->get = gsta_gpio_get; - gpio->direction_output = gsta_gpio_direction_output; - gpio->set = gsta_gpio_set; - gpio->dbg_show = NULL; - gpio->base = gpio_base; - gpio->ngpio = GSTA_NR_GPIO; - gpio->can_sleep = false; - gpio->to_irq = gsta_gpio_to_irq; - - /* - * After the first device, turn to dynamic gpio numbers. - * For example, with ARCH_NR_GPIOS = 256 we can fit two cards - */ - if (!gpio_base) - gpio_base = -1; -} - -/* - * Special method: alternate functions and pullup/pulldown. This is only - * invoked on startup to configure gpio's according to platform data. - * FIXME : this functionality shall be managed (and exported to other drivers) - * via the pin control subsystem. - */ -static void gsta_set_config(struct gsta_gpio *chip, int nr, unsigned cfg) -{ - struct gsta_regs __iomem *regs = chip->regs[nr / GSTA_GPIO_PER_BLOCK]; - unsigned long flags; - u32 bit = BIT(nr % GSTA_GPIO_PER_BLOCK); - u32 val; - int err = 0; - - pr_info("%s: %p %i %i\n", __func__, chip, nr, cfg); - - if (cfg == PINMUX_TYPE_NONE) - return; - - /* Alternate function or not? */ - spin_lock_irqsave(&chip->lock, flags); - val = readl(®s->afsela); - if (cfg == PINMUX_TYPE_FUNCTION) - val |= bit; - else - val &= ~bit; - writel(val | bit, ®s->afsela); - if (cfg == PINMUX_TYPE_FUNCTION) { - spin_unlock_irqrestore(&chip->lock, flags); - return; - } - - /* not alternate function: set details */ - switch (cfg) { - case PINMUX_TYPE_OUTPUT_LOW: - writel(bit, ®s->dirs); - writel(bit, ®s->datc); - break; - case PINMUX_TYPE_OUTPUT_HIGH: - writel(bit, ®s->dirs); - writel(bit, ®s->dats); - break; - case PINMUX_TYPE_INPUT: - writel(bit, ®s->dirc); - val = readl(®s->pdis) | bit; - writel(val, ®s->pdis); - break; - case PINMUX_TYPE_INPUT_PULLUP: - writel(bit, ®s->dirc); - val = readl(®s->pdis) & ~bit; - writel(val, ®s->pdis); - writel(bit, ®s->dats); - break; - case PINMUX_TYPE_INPUT_PULLDOWN: - writel(bit, ®s->dirc); - val = readl(®s->pdis) & ~bit; - writel(val, ®s->pdis); - writel(bit, ®s->datc); - break; - default: - err = 1; - } - spin_unlock_irqrestore(&chip->lock, flags); - if (err) - pr_err("%s: chip %p, pin %i, cfg %i is invalid\n", - __func__, chip, nr, cfg); -} - -/* - * Irq methods - */ - -static void gsta_irq_disable(struct irq_data *data) -{ - struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data); - struct gsta_gpio *chip = gc->private; - int nr = data->irq - chip->irq_base; - struct gsta_regs __iomem *regs = chip->regs[nr / GSTA_GPIO_PER_BLOCK]; - u32 bit = BIT(nr % GSTA_GPIO_PER_BLOCK); - u32 val; - unsigned long flags; - - spin_lock_irqsave(&chip->lock, flags); - if (chip->irq_type[nr] & IRQ_TYPE_EDGE_RISING) { - val = readl(®s->rimsc) & ~bit; - writel(val, ®s->rimsc); - } - if (chip->irq_type[nr] & IRQ_TYPE_EDGE_FALLING) { - val = readl(®s->fimsc) & ~bit; - writel(val, ®s->fimsc); - } - spin_unlock_irqrestore(&chip->lock, flags); - return; -} - -static void gsta_irq_enable(struct irq_data *data) -{ - struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data); - struct gsta_gpio *chip = gc->private; - int nr = data->irq - chip->irq_base; - struct gsta_regs __iomem *regs = chip->regs[nr / GSTA_GPIO_PER_BLOCK]; - u32 bit = BIT(nr % GSTA_GPIO_PER_BLOCK); - u32 val; - int type; - unsigned long flags; - - type = chip->irq_type[nr]; - - spin_lock_irqsave(&chip->lock, flags); - val = readl(®s->rimsc); - if (type & IRQ_TYPE_EDGE_RISING) - writel(val | bit, ®s->rimsc); - else - writel(val & ~bit, ®s->rimsc); - val = readl(®s->rimsc); - if (type & IRQ_TYPE_EDGE_FALLING) - writel(val | bit, ®s->fimsc); - else - writel(val & ~bit, ®s->fimsc); - spin_unlock_irqrestore(&chip->lock, flags); - return; -} - -static int gsta_irq_type(struct irq_data *d, unsigned int type) -{ - struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); - struct gsta_gpio *chip = gc->private; - int nr = d->irq - chip->irq_base; - - /* We only support edge interrupts */ - if (!(type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING))) { - pr_debug("%s: unsupported type 0x%x\n", __func__, type); - return -EINVAL; - } - - chip->irq_type[nr] = type; /* used for enable/disable */ - - gsta_irq_enable(d); - return 0; -} - -static irqreturn_t gsta_gpio_handler(int irq, void *dev_id) -{ - struct gsta_gpio *chip = dev_id; - struct gsta_regs __iomem *regs; - u32 is; - int i, nr, base; - irqreturn_t ret = IRQ_NONE; - - for (i = 0; i < GSTA_NR_BLOCKS; i++) { - regs = chip->regs[i]; - base = chip->irq_base + i * GSTA_GPIO_PER_BLOCK; - while ((is = readl(®s->is))) { - nr = __ffs(is); - irq = base + nr; - generic_handle_irq(irq); - writel(1 << nr, ®s->ic); - ret = IRQ_HANDLED; - } - } - return ret; -} - -static int gsta_alloc_irq_chip(struct gsta_gpio *chip) -{ - struct irq_chip_generic *gc; - struct irq_chip_type *ct; - int rv; - - gc = devm_irq_alloc_generic_chip(chip->dev, KBUILD_MODNAME, 1, - chip->irq_base, - chip->reg_base, handle_simple_irq); - if (!gc) - return -ENOMEM; - - gc->private = chip; - ct = gc->chip_types; - - ct->chip.irq_set_type = gsta_irq_type; - ct->chip.irq_disable = gsta_irq_disable; - ct->chip.irq_enable = gsta_irq_enable; - - /* FIXME: this makes at most 32 interrupts. Request 0 by now */ - rv = devm_irq_setup_generic_chip(chip->dev, gc, - 0 /* IRQ_MSK(GSTA_GPIO_PER_BLOCK) */, - 0, IRQ_NOREQUEST | IRQ_NOPROBE, 0); - if (rv) - return rv; - - /* Set up all 128 interrupts: code from setup_generic_chip */ - { - struct irq_chip_type *ct = gc->chip_types; - int i, j; - for (j = 0; j < GSTA_NR_GPIO; j++) { - i = chip->irq_base + j; - irq_set_chip_and_handler(i, &ct->chip, ct->handler); - irq_set_chip_data(i, gc); - irq_clear_status_flags(i, IRQ_NOREQUEST | IRQ_NOPROBE); - } - gc->irq_cnt = i - gc->irq_base; - } - - return 0; -} - -/* The platform device used here is instantiated by the MFD device */ -static int gsta_probe(struct platform_device *dev) -{ - int i, err; - struct pci_dev *pdev; - struct sta2x11_gpio_pdata *gpio_pdata; - struct gsta_gpio *chip; - - pdev = *(struct pci_dev **)dev_get_platdata(&dev->dev); - gpio_pdata = dev_get_platdata(&pdev->dev); - - if (gpio_pdata == NULL) - dev_err(&dev->dev, "no gpio config\n"); - pr_debug("gpio config: %p\n", gpio_pdata); - - chip = devm_kzalloc(&dev->dev, sizeof(*chip), GFP_KERNEL); - if (!chip) - return -ENOMEM; - chip->dev = &dev->dev; - chip->reg_base = devm_platform_ioremap_resource(dev, 0); - if (IS_ERR(chip->reg_base)) - return PTR_ERR(chip->reg_base); - - for (i = 0; i < GSTA_NR_BLOCKS; i++) { - chip->regs[i] = chip->reg_base + i * 4096; - /* disable all irqs */ - writel(0, &chip->regs[i]->rimsc); - writel(0, &chip->regs[i]->fimsc); - writel(~0, &chip->regs[i]->ic); - } - spin_lock_init(&chip->lock); - gsta_gpio_setup(chip); - if (gpio_pdata) - for (i = 0; i < GSTA_NR_GPIO; i++) - gsta_set_config(chip, i, gpio_pdata->pinconfig[i]); - - /* 384 was used in previous code: be compatible for other drivers */ - err = devm_irq_alloc_descs(&dev->dev, -1, 384, - GSTA_NR_GPIO, NUMA_NO_NODE); - if (err < 0) { - dev_warn(&dev->dev, "sta2x11 gpio: Can't get irq base (%i)\n", - -err); - return err; - } - chip->irq_base = err; - - err = gsta_alloc_irq_chip(chip); - if (err) - return err; - - err = devm_request_irq(&dev->dev, pdev->irq, gsta_gpio_handler, - IRQF_SHARED, KBUILD_MODNAME, chip); - if (err < 0) { - dev_err(&dev->dev, "sta2x11 gpio: Can't request irq (%i)\n", - -err); - return err; - } - - return devm_gpiochip_add_data(&dev->dev, &chip->gpio, chip); -} - -static struct platform_driver sta2x11_gpio_platform_driver = { - .driver = { - .name = "sta2x11-gpio", - .suppress_bind_attrs = true, - }, - .probe = gsta_probe, -}; -builtin_platform_driver(sta2x11_gpio_platform_driver); diff --git a/drivers/gpio/gpio-tpic2810.c b/drivers/gpio/gpio-tpic2810.c index d642c35cb97c0ce61bd40cc2a3c97ce7507e31c3..349c5fbd9b0274334dc549de78b0826a75d0a9f0 100644 --- a/drivers/gpio/gpio-tpic2810.c +++ b/drivers/gpio/gpio-tpic2810.c @@ -98,8 +98,7 @@ static const struct of_device_id tpic2810_of_match_table[] = { }; MODULE_DEVICE_TABLE(of, tpic2810_of_match_table); -static int tpic2810_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int tpic2810_probe(struct i2c_client *client) { struct tpic2810 *gpio; int ret; @@ -144,7 +143,7 @@ static struct i2c_driver tpic2810_driver = { .name = "tpic2810", .of_match_table = tpic2810_of_match_table, }, - .probe = tpic2810_probe, + .probe_new = tpic2810_probe, .remove = tpic2810_remove, .id_table = tpic2810_id_table, }; diff --git a/drivers/gpio/gpio-ts4900.c b/drivers/gpio/gpio-ts4900.c index 416725c26e949e65a90dbb0bf6065c1a82107880..43e8b66e04f781deb7f466e299ead30e6f449a99 100644 --- a/drivers/gpio/gpio-ts4900.c +++ b/drivers/gpio/gpio-ts4900.c @@ -136,8 +136,7 @@ static const struct of_device_id ts4900_gpio_of_match_table[] = { }; MODULE_DEVICE_TABLE(of, ts4900_gpio_of_match_table); -static int ts4900_gpio_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int ts4900_gpio_probe(struct i2c_client *client) { struct ts4900_gpio_priv *priv; u32 ngpio; @@ -186,7 +185,7 @@ static struct i2c_driver ts4900_gpio_driver = { .name = "ts4900-gpio", .of_match_table = ts4900_gpio_of_match_table, }, - .probe = ts4900_gpio_probe, + .probe_new = ts4900_gpio_probe, .id_table = ts4900_gpio_id_table, }; module_i2c_driver(ts4900_gpio_driver); diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c index a7d2358736fe761bfdd08bc30f9c8b611f80fdb2..bed0380c51360a4e5e8cbd15959f685b4d2922fd 100644 --- a/drivers/gpio/gpiolib-acpi.c +++ b/drivers/gpio/gpiolib-acpi.c @@ -89,6 +89,30 @@ struct acpi_gpio_chip { struct list_head deferred_req_irqs_list_entry; }; +/** + * struct acpi_gpio_info - ACPI GPIO specific information + * @adev: reference to ACPI device which consumes GPIO resource + * @flags: GPIO initialization flags + * @gpioint: if %true this GPIO is of type GpioInt otherwise type is GpioIo + * @pin_config: pin bias as provided by ACPI + * @polarity: interrupt polarity as provided by ACPI + * @triggering: triggering type as provided by ACPI + * @wake_capable: wake capability as provided by ACPI + * @debounce: debounce timeout as provided by ACPI + * @quirks: Linux specific quirks as provided by struct acpi_gpio_mapping + */ +struct acpi_gpio_info { + struct acpi_device *adev; + enum gpiod_flags flags; + bool gpioint; + int pin_config; + int polarity; + int triggering; + bool wake_capable; + unsigned int debounce; + unsigned int quirks; +}; + /* * For GPIO chips which call acpi_gpiochip_request_interrupts() before late_init * (so builtin drivers) we register the ACPI GpioInt IRQ handlers from a @@ -512,7 +536,7 @@ void acpi_gpiochip_request_interrupts(struct gpio_chip *chip) if (ACPI_FAILURE(status)) return; - acpi_walk_resources(handle, "_AEI", + acpi_walk_resources(handle, METHOD_NAME__AEI, acpi_gpiochip_alloc_event, acpi_gpio); mutex_lock(&acpi_gpio_deferred_req_irqs_lock); @@ -670,8 +694,8 @@ __acpi_gpio_update_gpiod_flags(enum gpiod_flags *flags, enum gpiod_flags update) return ret; } -int -acpi_gpio_update_gpiod_flags(enum gpiod_flags *flags, struct acpi_gpio_info *info) +static int acpi_gpio_update_gpiod_flags(enum gpiod_flags *flags, + struct acpi_gpio_info *info) { struct device *dev = &info->adev->dev; enum gpiod_flags old = *flags; @@ -690,8 +714,8 @@ acpi_gpio_update_gpiod_flags(enum gpiod_flags *flags, struct acpi_gpio_info *inf return ret; } -int acpi_gpio_update_gpiod_lookup_flags(unsigned long *lookupflags, - struct acpi_gpio_info *info) +static int acpi_gpio_update_gpiod_lookup_flags(unsigned long *lookupflags, + struct acpi_gpio_info *info) { switch (info->pin_config) { case ACPI_PIN_CONFIG_PULLUP: @@ -864,8 +888,9 @@ static int acpi_gpio_property_lookup(struct fwnode_handle *fwnode, * function only returns the first. */ static struct gpio_desc *acpi_get_gpiod_by_index(struct acpi_device *adev, - const char *propname, int index, - struct acpi_gpio_info *info) + const char *propname, + int index, + struct acpi_gpio_info *info) { struct acpi_gpio_lookup lookup; int ret; @@ -896,6 +921,44 @@ static struct gpio_desc *acpi_get_gpiod_by_index(struct acpi_device *adev, return ret ? ERR_PTR(ret) : lookup.desc; } +/** + * acpi_get_gpiod_from_data() - get a GPIO descriptor from ACPI data node + * @fwnode: pointer to an ACPI firmware node to get the GPIO information from + * @propname: Property name of the GPIO + * @index: index of GpioIo/GpioInt resource (starting from %0) + * @info: info pointer to fill in (optional) + * + * This function uses the property-based GPIO lookup to get to the GPIO + * resource with the relevant information from a data-only ACPI firmware node + * and uses that to obtain the GPIO descriptor to return. + * + * If the GPIO cannot be translated or there is an error an ERR_PTR is + * returned. + */ +static struct gpio_desc *acpi_get_gpiod_from_data(struct fwnode_handle *fwnode, + const char *propname, + int index, + struct acpi_gpio_info *info) +{ + struct acpi_gpio_lookup lookup; + int ret; + + if (!is_acpi_data_node(fwnode)) + return ERR_PTR(-ENODEV); + + if (!propname) + return ERR_PTR(-EINVAL); + + lookup.index = index; + + ret = acpi_gpio_property_lookup(fwnode, propname, index, &lookup); + if (ret) + return ERR_PTR(ret); + + ret = acpi_gpio_resource_lookup(&lookup, info); + return ret ? ERR_PTR(ret) : lookup.desc; +} + static bool acpi_can_fallback_to_crs(struct acpi_device *adev, const char *con_id) { @@ -906,13 +969,13 @@ static bool acpi_can_fallback_to_crs(struct acpi_device *adev, return con_id == NULL; } -struct gpio_desc *acpi_find_gpio(struct device *dev, +struct gpio_desc *acpi_find_gpio(struct fwnode_handle *fwnode, const char *con_id, unsigned int idx, enum gpiod_flags *dflags, unsigned long *lookupflags) { - struct acpi_device *adev = ACPI_COMPANION(dev); + struct acpi_device *adev = to_acpi_device_node(fwnode); struct acpi_gpio_info info; struct gpio_desc *desc; char propname[32]; @@ -928,7 +991,12 @@ struct gpio_desc *acpi_find_gpio(struct device *dev, gpio_suffixes[i]); } - desc = acpi_get_gpiod_by_index(adev, propname, idx, &info); + if (adev) + desc = acpi_get_gpiod_by_index(adev, + propname, idx, &info); + else + desc = acpi_get_gpiod_from_data(fwnode, + propname, idx, &info); if (!IS_ERR(desc)) break; if (PTR_ERR(desc) == -EPROBE_DEFER) @@ -937,7 +1005,7 @@ struct gpio_desc *acpi_find_gpio(struct device *dev, /* Then from plain _CRS GPIOs */ if (IS_ERR(desc)) { - if (!acpi_can_fallback_to_crs(adev, con_id)) + if (!adev || !acpi_can_fallback_to_crs(adev, con_id)) return ERR_PTR(-ENOENT); desc = acpi_get_gpiod_by_index(adev, NULL, idx, &info); @@ -956,50 +1024,6 @@ struct gpio_desc *acpi_find_gpio(struct device *dev, return desc; } -/** - * acpi_node_get_gpiod() - get a GPIO descriptor from ACPI resources - * @fwnode: pointer to an ACPI firmware node to get the GPIO information from - * @propname: Property name of the GPIO - * @index: index of GpioIo/GpioInt resource (starting from %0) - * @info: info pointer to fill in (optional) - * - * If @fwnode is an ACPI device object, call acpi_get_gpiod_by_index() for it. - * Otherwise (i.e. it is a data-only non-device object), use the property-based - * GPIO lookup to get to the GPIO resource with the relevant information and use - * that to obtain the GPIO descriptor to return. - * - * If the GPIO cannot be translated or there is an error an ERR_PTR is - * returned. - */ -struct gpio_desc *acpi_node_get_gpiod(struct fwnode_handle *fwnode, - const char *propname, int index, - struct acpi_gpio_info *info) -{ - struct acpi_gpio_lookup lookup; - struct acpi_device *adev; - int ret; - - adev = to_acpi_device_node(fwnode); - if (adev) - return acpi_get_gpiod_by_index(adev, propname, index, info); - - if (!is_acpi_data_node(fwnode)) - return ERR_PTR(-ENODEV); - - if (!propname) - return ERR_PTR(-EINVAL); - - memset(&lookup, 0, sizeof(lookup)); - lookup.index = index; - - ret = acpi_gpio_property_lookup(fwnode, propname, index, &lookup); - if (ret) - return ERR_PTR(ret); - - ret = acpi_gpio_resource_lookup(&lookup, info); - return ret ? ERR_PTR(ret) : lookup.desc; -} - /** * acpi_dev_gpio_irq_wake_get_by() - Find GpioInt and translate it to Linux IRQ number * @adev: pointer to a ACPI device to get IRQ from diff --git a/drivers/gpio/gpiolib-acpi.h b/drivers/gpio/gpiolib-acpi.h index 01e0cb480a00cfba13c9a80fcbcbf36bd6316e4e..9475f99a9694148b0f22a761614fdab7de793053 100644 --- a/drivers/gpio/gpiolib-acpi.h +++ b/drivers/gpio/gpiolib-acpi.h @@ -22,30 +22,6 @@ struct gpio_chip; struct gpio_desc; struct gpio_device; -/** - * struct acpi_gpio_info - ACPI GPIO specific information - * @adev: reference to ACPI device which consumes GPIO resource - * @flags: GPIO initialization flags - * @gpioint: if %true this GPIO is of type GpioInt otherwise type is GpioIo - * @pin_config: pin bias as provided by ACPI - * @polarity: interrupt polarity as provided by ACPI - * @triggering: triggering type as provided by ACPI - * @wake_capable: wake capability as provided by ACPI - * @debounce: debounce timeout as provided by ACPI - * @quirks: Linux specific quirks as provided by struct acpi_gpio_mapping - */ -struct acpi_gpio_info { - struct acpi_device *adev; - enum gpiod_flags flags; - bool gpioint; - int pin_config; - int polarity; - int triggering; - bool wake_capable; - unsigned int debounce; - unsigned int quirks; -}; - #ifdef CONFIG_ACPI void acpi_gpiochip_add(struct gpio_chip *chip); void acpi_gpiochip_remove(struct gpio_chip *chip); @@ -55,19 +31,11 @@ void acpi_gpio_dev_init(struct gpio_chip *gc, struct gpio_device *gdev); void acpi_gpiochip_request_interrupts(struct gpio_chip *chip); void acpi_gpiochip_free_interrupts(struct gpio_chip *chip); -int acpi_gpio_update_gpiod_flags(enum gpiod_flags *flags, - struct acpi_gpio_info *info); -int acpi_gpio_update_gpiod_lookup_flags(unsigned long *lookupflags, - struct acpi_gpio_info *info); - -struct gpio_desc *acpi_find_gpio(struct device *dev, +struct gpio_desc *acpi_find_gpio(struct fwnode_handle *fwnode, const char *con_id, unsigned int idx, enum gpiod_flags *dflags, unsigned long *lookupflags); -struct gpio_desc *acpi_node_get_gpiod(struct fwnode_handle *fwnode, - const char *propname, int index, - struct acpi_gpio_info *info); int acpi_gpio_count(struct device *dev, const char *con_id); #else @@ -82,31 +50,13 @@ acpi_gpiochip_request_interrupts(struct gpio_chip *chip) { } static inline void acpi_gpiochip_free_interrupts(struct gpio_chip *chip) { } -static inline int -acpi_gpio_update_gpiod_flags(enum gpiod_flags *flags, struct acpi_gpio_info *info) -{ - return 0; -} -static inline int -acpi_gpio_update_gpiod_lookup_flags(unsigned long *lookupflags, - struct acpi_gpio_info *info) -{ - return 0; -} - static inline struct gpio_desc * -acpi_find_gpio(struct device *dev, const char *con_id, +acpi_find_gpio(struct fwnode_handle *fwnode, const char *con_id, unsigned int idx, enum gpiod_flags *dflags, unsigned long *lookupflags) { return ERR_PTR(-ENOENT); } -static inline struct gpio_desc * -acpi_node_get_gpiod(struct fwnode_handle *fwnode, const char *propname, - int index, struct acpi_gpio_info *info) -{ - return ERR_PTR(-ENXIO); -} static inline int acpi_gpio_count(struct device *dev, const char *con_id) { return -ENODEV; diff --git a/drivers/gpio/gpiolib-cdev.c b/drivers/gpio/gpiolib-cdev.c index 65b2c09a4576a1c6d680ba4c8849bc16a3caef16..e878e3f22b0e48e989c88d291f6f8de39980ba1a 100644 --- a/drivers/gpio/gpiolib-cdev.c +++ b/drivers/gpio/gpiolib-cdev.c @@ -57,6 +57,50 @@ static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_values), 8)); * interface to gpiolib GPIOs via ioctl()s. */ +typedef __poll_t (*poll_fn)(struct file *, struct poll_table_struct *); +typedef long (*ioctl_fn)(struct file *, unsigned int, unsigned long); +typedef ssize_t (*read_fn)(struct file *, char __user *, + size_t count, loff_t *); + +static __poll_t call_poll_locked(struct file *file, + struct poll_table_struct *wait, + struct gpio_device *gdev, poll_fn func) +{ + __poll_t ret; + + down_read(&gdev->sem); + ret = func(file, wait); + up_read(&gdev->sem); + + return ret; +} + +static long call_ioctl_locked(struct file *file, unsigned int cmd, + unsigned long arg, struct gpio_device *gdev, + ioctl_fn func) +{ + long ret; + + down_read(&gdev->sem); + ret = func(file, cmd, arg); + up_read(&gdev->sem); + + return ret; +} + +static ssize_t call_read_locked(struct file *file, char __user *buf, + size_t count, loff_t *f_ps, + struct gpio_device *gdev, read_fn func) +{ + ssize_t ret; + + down_read(&gdev->sem); + ret = func(file, buf, count, f_ps); + up_read(&gdev->sem); + + return ret; +} + /* * GPIO line handle management */ @@ -193,8 +237,8 @@ static long linehandle_set_config(struct linehandle_state *lh, return 0; } -static long linehandle_ioctl(struct file *file, unsigned int cmd, - unsigned long arg) +static long linehandle_ioctl_unlocked(struct file *file, unsigned int cmd, + unsigned long arg) { struct linehandle_state *lh = file->private_data; void __user *ip = (void __user *)arg; @@ -203,6 +247,9 @@ static long linehandle_ioctl(struct file *file, unsigned int cmd, unsigned int i; int ret; + if (!lh->gdev->chip) + return -ENODEV; + switch (cmd) { case GPIOHANDLE_GET_LINE_VALUES_IOCTL: /* NOTE: It's okay to read values of output lines */ @@ -249,6 +296,15 @@ static long linehandle_ioctl(struct file *file, unsigned int cmd, } } +static long linehandle_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + struct linehandle_state *lh = file->private_data; + + return call_ioctl_locked(file, cmd, arg, lh->gdev, + linehandle_ioctl_unlocked); +} + #ifdef CONFIG_COMPAT static long linehandle_ioctl_compat(struct file *file, unsigned int cmd, unsigned long arg) @@ -412,7 +468,7 @@ out_free_lh: * @desc: the GPIO descriptor for this line. * @req: the corresponding line request * @irq: the interrupt triggered in response to events on this GPIO - * @eflags: the edge flags, GPIO_V2_LINE_FLAG_EDGE_RISING and/or + * @edflags: the edge flags, GPIO_V2_LINE_FLAG_EDGE_RISING and/or * GPIO_V2_LINE_FLAG_EDGE_FALLING, indicating the edge detection applied * @timestamp_ns: cache for the timestamp storing it between hardirq and * IRQ thread, used to bring the timestamp close to the actual event @@ -1380,12 +1436,15 @@ static long linereq_set_config(struct linereq *lr, void __user *ip) return ret; } -static long linereq_ioctl(struct file *file, unsigned int cmd, - unsigned long arg) +static long linereq_ioctl_unlocked(struct file *file, unsigned int cmd, + unsigned long arg) { struct linereq *lr = file->private_data; void __user *ip = (void __user *)arg; + if (!lr->gdev->chip) + return -ENODEV; + switch (cmd) { case GPIO_V2_LINE_GET_VALUES_IOCTL: return linereq_get_values(lr, ip); @@ -1398,6 +1457,15 @@ static long linereq_ioctl(struct file *file, unsigned int cmd, } } +static long linereq_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + struct linereq *lr = file->private_data; + + return call_ioctl_locked(file, cmd, arg, lr->gdev, + linereq_ioctl_unlocked); +} + #ifdef CONFIG_COMPAT static long linereq_ioctl_compat(struct file *file, unsigned int cmd, unsigned long arg) @@ -1406,12 +1474,15 @@ static long linereq_ioctl_compat(struct file *file, unsigned int cmd, } #endif -static __poll_t linereq_poll(struct file *file, - struct poll_table_struct *wait) +static __poll_t linereq_poll_unlocked(struct file *file, + struct poll_table_struct *wait) { struct linereq *lr = file->private_data; __poll_t events = 0; + if (!lr->gdev->chip) + return EPOLLHUP | EPOLLERR; + poll_wait(file, &lr->wait, wait); if (!kfifo_is_empty_spinlocked_noirqsave(&lr->events, @@ -1421,16 +1492,25 @@ static __poll_t linereq_poll(struct file *file, return events; } -static ssize_t linereq_read(struct file *file, - char __user *buf, - size_t count, - loff_t *f_ps) +static __poll_t linereq_poll(struct file *file, + struct poll_table_struct *wait) +{ + struct linereq *lr = file->private_data; + + return call_poll_locked(file, wait, lr->gdev, linereq_poll_unlocked); +} + +static ssize_t linereq_read_unlocked(struct file *file, char __user *buf, + size_t count, loff_t *f_ps) { struct linereq *lr = file->private_data; struct gpio_v2_line_event le; ssize_t bytes_read = 0; int ret; + if (!lr->gdev->chip) + return -ENODEV; + if (count < sizeof(le)) return -EINVAL; @@ -1475,6 +1555,15 @@ static ssize_t linereq_read(struct file *file, return bytes_read; } +static ssize_t linereq_read(struct file *file, char __user *buf, + size_t count, loff_t *f_ps) +{ + struct linereq *lr = file->private_data; + + return call_read_locked(file, buf, count, f_ps, lr->gdev, + linereq_read_unlocked); +} + static void linereq_free(struct linereq *lr) { unsigned int i; @@ -1712,12 +1801,15 @@ struct lineevent_state { (GPIOEVENT_REQUEST_RISING_EDGE | \ GPIOEVENT_REQUEST_FALLING_EDGE) -static __poll_t lineevent_poll(struct file *file, - struct poll_table_struct *wait) +static __poll_t lineevent_poll_unlocked(struct file *file, + struct poll_table_struct *wait) { struct lineevent_state *le = file->private_data; __poll_t events = 0; + if (!le->gdev->chip) + return EPOLLHUP | EPOLLERR; + poll_wait(file, &le->wait, wait); if (!kfifo_is_empty_spinlocked_noirqsave(&le->events, &le->wait.lock)) @@ -1726,15 +1818,21 @@ static __poll_t lineevent_poll(struct file *file, return events; } +static __poll_t lineevent_poll(struct file *file, + struct poll_table_struct *wait) +{ + struct lineevent_state *le = file->private_data; + + return call_poll_locked(file, wait, le->gdev, lineevent_poll_unlocked); +} + struct compat_gpioeevent_data { compat_u64 timestamp; u32 id; }; -static ssize_t lineevent_read(struct file *file, - char __user *buf, - size_t count, - loff_t *f_ps) +static ssize_t lineevent_read_unlocked(struct file *file, char __user *buf, + size_t count, loff_t *f_ps) { struct lineevent_state *le = file->private_data; struct gpioevent_data ge; @@ -1742,6 +1840,9 @@ static ssize_t lineevent_read(struct file *file, ssize_t ge_size; int ret; + if (!le->gdev->chip) + return -ENODEV; + /* * When compatible system call is being used the struct gpioevent_data, * in case of at least ia32, has different size due to the alignment @@ -1799,6 +1900,15 @@ static ssize_t lineevent_read(struct file *file, return bytes_read; } +static ssize_t lineevent_read(struct file *file, char __user *buf, + size_t count, loff_t *f_ps) +{ + struct lineevent_state *le = file->private_data; + + return call_read_locked(file, buf, count, f_ps, le->gdev, + lineevent_read_unlocked); +} + static void lineevent_free(struct lineevent_state *le) { if (le->irq) @@ -1816,13 +1926,16 @@ static int lineevent_release(struct inode *inode, struct file *file) return 0; } -static long lineevent_ioctl(struct file *file, unsigned int cmd, - unsigned long arg) +static long lineevent_ioctl_unlocked(struct file *file, unsigned int cmd, + unsigned long arg) { struct lineevent_state *le = file->private_data; void __user *ip = (void __user *)arg; struct gpiohandle_data ghd; + if (!le->gdev->chip) + return -ENODEV; + /* * We can get the value for an event line but not set it, * because it is input by definition. @@ -1845,6 +1958,15 @@ static long lineevent_ioctl(struct file *file, unsigned int cmd, return -EINVAL; } +static long lineevent_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + struct lineevent_state *le = file->private_data; + + return call_ioctl_locked(file, cmd, arg, le->gdev, + lineevent_ioctl_unlocked); +} + #ifdef CONFIG_COMPAT static long lineevent_ioctl_compat(struct file *file, unsigned int cmd, unsigned long arg) @@ -2403,12 +2525,15 @@ static int lineinfo_changed_notify(struct notifier_block *nb, return NOTIFY_OK; } -static __poll_t lineinfo_watch_poll(struct file *file, - struct poll_table_struct *pollt) +static __poll_t lineinfo_watch_poll_unlocked(struct file *file, + struct poll_table_struct *pollt) { struct gpio_chardev_data *cdev = file->private_data; __poll_t events = 0; + if (!cdev->gdev->chip) + return EPOLLHUP | EPOLLERR; + poll_wait(file, &cdev->wait, pollt); if (!kfifo_is_empty_spinlocked_noirqsave(&cdev->events, @@ -2418,8 +2543,17 @@ static __poll_t lineinfo_watch_poll(struct file *file, return events; } -static ssize_t lineinfo_watch_read(struct file *file, char __user *buf, - size_t count, loff_t *off) +static __poll_t lineinfo_watch_poll(struct file *file, + struct poll_table_struct *pollt) +{ + struct gpio_chardev_data *cdev = file->private_data; + + return call_poll_locked(file, pollt, cdev->gdev, + lineinfo_watch_poll_unlocked); +} + +static ssize_t lineinfo_watch_read_unlocked(struct file *file, char __user *buf, + size_t count, loff_t *off) { struct gpio_chardev_data *cdev = file->private_data; struct gpio_v2_line_info_changed event; @@ -2427,6 +2561,9 @@ static ssize_t lineinfo_watch_read(struct file *file, char __user *buf, int ret; size_t event_size; + if (!cdev->gdev->chip) + return -ENODEV; + #ifndef CONFIG_GPIO_CDEV_V1 event_size = sizeof(struct gpio_v2_line_info_changed); if (count < event_size) @@ -2494,6 +2631,15 @@ static ssize_t lineinfo_watch_read(struct file *file, char __user *buf, return bytes_read; } +static ssize_t lineinfo_watch_read(struct file *file, char __user *buf, + size_t count, loff_t *off) +{ + struct gpio_chardev_data *cdev = file->private_data; + + return call_read_locked(file, buf, count, off, cdev->gdev, + lineinfo_watch_read_unlocked); +} + /** * gpio_chrdev_open() - open the chardev for ioctl operations * @inode: inode for this chardev @@ -2507,13 +2653,17 @@ static int gpio_chrdev_open(struct inode *inode, struct file *file) struct gpio_chardev_data *cdev; int ret = -ENOMEM; + down_read(&gdev->sem); + /* Fail on open if the backing gpiochip is gone */ - if (!gdev->chip) - return -ENODEV; + if (!gdev->chip) { + ret = -ENODEV; + goto out_unlock; + } cdev = kzalloc(sizeof(*cdev), GFP_KERNEL); if (!cdev) - return -ENOMEM; + goto out_unlock; cdev->watched_lines = bitmap_zalloc(gdev->chip->ngpio, GFP_KERNEL); if (!cdev->watched_lines) @@ -2536,6 +2686,8 @@ static int gpio_chrdev_open(struct inode *inode, struct file *file) if (ret) goto out_unregister_notifier; + up_read(&gdev->sem); + return ret; out_unregister_notifier: @@ -2545,6 +2697,8 @@ out_free_bitmap: bitmap_free(cdev->watched_lines); out_free_cdev: kfree(cdev); +out_unlock: + up_read(&gdev->sem); return ret; } diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c index 0e4e1291604d67da9fcfd3f9f1c4da86f963eb90..4fff7258ee41a6f511d303ff4e5f364ee7a71c80 100644 --- a/drivers/gpio/gpiolib-of.c +++ b/drivers/gpio/gpiolib-of.c @@ -85,7 +85,7 @@ static int of_gpiochip_match_node_and_xlate(struct gpio_chip *chip, void *data) { struct of_phandle_args *gpiospec = data; - return chip->gpiodev->dev.of_node == gpiospec->np && + return device_match_of_node(&chip->gpiodev->dev, gpiospec->np) && chip->of_xlate && chip->of_xlate(chip, gpiospec, NULL) >= 0; } @@ -112,55 +112,133 @@ static struct gpio_desc *of_xlate_and_get_gpiod_flags(struct gpio_chip *chip, return gpiochip_get_desc(chip, ret); } -/** - * of_gpio_need_valid_mask() - figure out if the OF GPIO driver needs - * to set the .valid_mask - * @gc: the target gpio_chip - * - * Return: true if the valid mask needs to be set +/* + * Overrides stated polarity of a gpio line and warns when there is a + * discrepancy. */ -bool of_gpio_need_valid_mask(const struct gpio_chip *gc) +static void of_gpio_quirk_polarity(const struct device_node *np, + bool active_high, + enum of_gpio_flags *flags) { - int size; - const struct device_node *np = gc->of_node; + if (active_high) { + if (*flags & OF_GPIO_ACTIVE_LOW) { + pr_warn("%s GPIO handle specifies active low - ignored\n", + of_node_full_name(np)); + *flags &= ~OF_GPIO_ACTIVE_LOW; + } + } else { + if (!(*flags & OF_GPIO_ACTIVE_LOW)) + pr_info("%s enforce active low on GPIO handle\n", + of_node_full_name(np)); + *flags |= OF_GPIO_ACTIVE_LOW; + } +} - size = of_property_count_u32_elems(np, "gpio-reserved-ranges"); - if (size > 0 && size % 2 == 0) - return true; - return false; +/* + * This quirk does static polarity overrides in cases where existing + * DTS specified incorrect polarity. + */ +static void of_gpio_try_fixup_polarity(const struct device_node *np, + const char *propname, + enum of_gpio_flags *flags) +{ + static const struct { + const char *compatible; + const char *propname; + bool active_high; + } gpios[] = { +#if !IS_ENABLED(CONFIG_LCD_HX8357) + /* + * Himax LCD controllers used incorrectly named + * "gpios-reset" property and also specified wrong + * polarity. + */ + { "himax,hx8357", "gpios-reset", false }, + { "himax,hx8369", "gpios-reset", false }, +#endif + }; + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(gpios); i++) { + if (of_device_is_compatible(np, gpios[i].compatible) && + !strcmp(propname, gpios[i].propname)) { + of_gpio_quirk_polarity(np, gpios[i].active_high, flags); + break; + } + } } -static void of_gpio_flags_quirks(const struct device_node *np, - const char *propname, - enum of_gpio_flags *flags, - int index) +static void of_gpio_set_polarity_by_property(const struct device_node *np, + const char *propname, + enum of_gpio_flags *flags) { - /* - * Some GPIO fixed regulator quirks. - * Note that active low is the default. - */ - if (IS_ENABLED(CONFIG_REGULATOR) && - (of_device_is_compatible(np, "regulator-fixed") || - of_device_is_compatible(np, "reg-fixed-voltage") || - (!(strcmp(propname, "enable-gpio") && - strcmp(propname, "enable-gpios")) && - of_device_is_compatible(np, "regulator-gpio")))) { - bool active_low = !of_property_read_bool(np, - "enable-active-high"); + static const struct { + const char *compatible; + const char *gpio_propname; + const char *polarity_propname; + } gpios[] = { +#if IS_ENABLED(CONFIG_FEC) + /* Freescale Fast Ethernet Controller */ + { "fsl,imx25-fec", "phy-reset-gpios", "phy-reset-active-high" }, + { "fsl,imx27-fec", "phy-reset-gpios", "phy-reset-active-high" }, + { "fsl,imx28-fec", "phy-reset-gpios", "phy-reset-active-high" }, + { "fsl,imx6q-fec", "phy-reset-gpios", "phy-reset-active-high" }, + { "fsl,mvf600-fec", "phy-reset-gpios", "phy-reset-active-high" }, + { "fsl,imx6sx-fec", "phy-reset-gpios", "phy-reset-active-high" }, + { "fsl,imx6ul-fec", "phy-reset-gpios", "phy-reset-active-high" }, + { "fsl,imx8mq-fec", "phy-reset-gpios", "phy-reset-active-high" }, + { "fsl,imx8qm-fec", "phy-reset-gpios", "phy-reset-active-high" }, + { "fsl,s32v234-fec", "phy-reset-gpios", "phy-reset-active-high" }, +#endif +#if IS_ENABLED(CONFIG_PCI_IMX6) + { "fsl,imx6q-pcie", "reset-gpio", "reset-gpio-active-high" }, + { "fsl,imx6sx-pcie", "reset-gpio", "reset-gpio-active-high" }, + { "fsl,imx6qp-pcie", "reset-gpio", "reset-gpio-active-high" }, + { "fsl,imx7d-pcie", "reset-gpio", "reset-gpio-active-high" }, + { "fsl,imx8mq-pcie", "reset-gpio", "reset-gpio-active-high" }, + { "fsl,imx8mm-pcie", "reset-gpio", "reset-gpio-active-high" }, + { "fsl,imx8mp-pcie", "reset-gpio", "reset-gpio-active-high" }, +#endif + /* * The regulator GPIO handles are specified such that the * presence or absence of "enable-active-high" solely controls * the polarity of the GPIO line. Any phandle flags must * be actively ignored. */ - if ((*flags & OF_GPIO_ACTIVE_LOW) && !active_low) { - pr_warn("%s GPIO handle specifies active low - ignored\n", - of_node_full_name(np)); - *flags &= ~OF_GPIO_ACTIVE_LOW; +#if IS_ENABLED(CONFIG_REGULATOR_FIXED_VOLTAGE) + { "regulator-fixed", "gpios", "enable-active-high" }, + { "regulator-fixed", "gpio", "enable-active-high" }, + { "reg-fixed-voltage", "gpios", "enable-active-high" }, + { "reg-fixed-voltage", "gpio", "enable-active-high" }, +#endif +#if IS_ENABLED(CONFIG_REGULATOR_GPIO) + { "regulator-gpio", "enable-gpio", "enable-active-high" }, + { "regulator-gpio", "enable-gpios", "enable-active-high" }, +#endif + }; + unsigned int i; + bool active_high; + + for (i = 0; i < ARRAY_SIZE(gpios); i++) { + if (of_device_is_compatible(np, gpios[i].compatible) && + !strcmp(propname, gpios[i].gpio_propname)) { + active_high = of_property_read_bool(np, + gpios[i].polarity_propname); + of_gpio_quirk_polarity(np, active_high, flags); + break; } - if (active_low) - *flags |= OF_GPIO_ACTIVE_LOW; } +} + +static void of_gpio_flags_quirks(const struct device_node *np, + const char *propname, + enum of_gpio_flags *flags, + int index) +{ + of_gpio_try_fixup_polarity(np, propname, flags); + of_gpio_set_polarity_by_property(np, propname, flags); + /* * Legacy open drain handling for fixed voltage regulators. */ @@ -200,18 +278,10 @@ static void of_gpio_flags_quirks(const struct device_node *np, * conflict and the "spi-cs-high" flag will * take precedence. */ - if (of_property_read_bool(child, "spi-cs-high")) { - if (*flags & OF_GPIO_ACTIVE_LOW) { - pr_warn("%s GPIO handle specifies active low - ignored\n", - of_node_full_name(child)); - *flags &= ~OF_GPIO_ACTIVE_LOW; - } - } else { - if (!(*flags & OF_GPIO_ACTIVE_LOW)) - pr_info("%s enforce active low on chipselect handle\n", - of_node_full_name(child)); - *flags |= OF_GPIO_ACTIVE_LOW; - } + bool active_high = of_property_read_bool(child, + "spi-cs-high"); + of_gpio_quirk_polarity(child, active_high, + flags); of_node_put(child); break; } @@ -365,127 +435,164 @@ struct gpio_desc *gpiod_get_from_of_node(const struct device_node *node, } EXPORT_SYMBOL_GPL(gpiod_get_from_of_node); -/* - * The SPI GPIO bindings happened before we managed to establish that GPIO - * properties should be named "foo-gpios" so we have this special kludge for - * them. - */ -static struct gpio_desc *of_find_spi_gpio(struct device_node *np, - const char *con_id, - unsigned int idx, - enum of_gpio_flags *of_flags) -{ - char prop_name[32]; /* 32 is max size of property name */ - - /* - * Hopefully the compiler stubs the rest of the function if this - * is false. - */ - if (!IS_ENABLED(CONFIG_SPI_MASTER)) - return ERR_PTR(-ENOENT); - - /* Allow this specifically for "spi-gpio" devices */ - if (!of_device_is_compatible(np, "spi-gpio") || !con_id) - return ERR_PTR(-ENOENT); - - /* Will be "gpio-sck", "gpio-mosi" or "gpio-miso" */ - snprintf(prop_name, sizeof(prop_name), "%s-%s", "gpio", con_id); - - return of_get_named_gpiod_flags(np, prop_name, idx, of_flags); -} - -/* - * The old Freescale bindings use simply "gpios" as name for the chip select - * lines rather than "cs-gpios" like all other SPI hardware. Account for this - * with a special quirk. - */ -static struct gpio_desc *of_find_spi_cs_gpio(struct device_node *np, +static struct gpio_desc *of_find_gpio_rename(struct device_node *np, const char *con_id, unsigned int idx, enum of_gpio_flags *of_flags) { - if (!IS_ENABLED(CONFIG_SPI_MASTER)) - return ERR_PTR(-ENOENT); + static const struct of_rename_gpio { + const char *con_id; + const char *legacy_id; /* NULL - same as con_id */ + /* + * Compatible string can be set to NULL in case where + * matching to a particular compatible is not practical, + * but it should only be done for gpio names that have + * vendor prefix to reduce risk of false positives. + * Addition of such entries is strongly discouraged. + */ + const char *compatible; + } gpios[] = { +#if !IS_ENABLED(CONFIG_LCD_HX8357) + /* Himax LCD controllers used "gpios-reset" */ + { "reset", "gpios-reset", "himax,hx8357" }, + { "reset", "gpios-reset", "himax,hx8369" }, +#endif +#if IS_ENABLED(CONFIG_MFD_ARIZONA) + { "wlf,reset", NULL, NULL }, +#endif +#if IS_ENABLED(CONFIG_RTC_DRV_MOXART) + { "rtc-data", "gpio-rtc-data", "moxa,moxart-rtc" }, + { "rtc-sclk", "gpio-rtc-sclk", "moxa,moxart-rtc" }, + { "rtc-reset", "gpio-rtc-reset", "moxa,moxart-rtc" }, +#endif +#if IS_ENABLED(CONFIG_NFC_MRVL_I2C) + { "reset", "reset-n-io", "marvell,nfc-i2c" }, +#endif +#if IS_ENABLED(CONFIG_NFC_MRVL_SPI) + { "reset", "reset-n-io", "marvell,nfc-spi" }, +#endif +#if IS_ENABLED(CONFIG_NFC_MRVL_UART) + { "reset", "reset-n-io", "marvell,nfc-uart" }, + { "reset", "reset-n-io", "mrvl,nfc-uart" }, +#endif +#if !IS_ENABLED(CONFIG_PCI_LANTIQ) + /* MIPS Lantiq PCI */ + { "reset", "gpios-reset", "lantiq,pci-xway" }, +#endif - /* Allow this specifically for Freescale and PPC devices */ - if (!of_device_is_compatible(np, "fsl,spi") && - !of_device_is_compatible(np, "aeroflexgaisler,spictrl") && - !of_device_is_compatible(np, "ibm,ppc4xx-spi")) - return ERR_PTR(-ENOENT); - /* Allow only if asking for "cs-gpios" */ - if (!con_id || strcmp(con_id, "cs")) - return ERR_PTR(-ENOENT); + /* + * Some regulator bindings happened before we managed to + * establish that GPIO properties should be named + * "foo-gpios" so we have this special kludge for them. + */ +#if IS_ENABLED(CONFIG_REGULATOR_ARIZONA_LDO1) + { "wlf,ldoena", NULL, NULL }, /* Arizona */ +#endif +#if IS_ENABLED(CONFIG_REGULATOR_WM8994) + { "wlf,ldo1ena", NULL, NULL }, /* WM8994 */ + { "wlf,ldo2ena", NULL, NULL }, /* WM8994 */ +#endif - /* - * While all other SPI controllers use "cs-gpios" the Freescale - * uses just "gpios" so translate to that when "cs-gpios" is - * requested. - */ - return of_get_named_gpiod_flags(np, "gpios", idx, of_flags); -} +#if IS_ENABLED(CONFIG_SND_SOC_CS42L56) + { "reset", "cirrus,gpio-nreset", "cirrus,cs42l56" }, +#endif +#if IS_ENABLED(CONFIG_SND_SOC_TLV320AIC3X) + { "reset", "gpio-reset", "ti,tlv320aic3x" }, + { "reset", "gpio-reset", "ti,tlv320aic33" }, + { "reset", "gpio-reset", "ti,tlv320aic3007" }, + { "reset", "gpio-reset", "ti,tlv320aic3104" }, + { "reset", "gpio-reset", "ti,tlv320aic3106" }, +#endif +#if IS_ENABLED(CONFIG_SPI_GPIO) + /* + * The SPI GPIO bindings happened before we managed to + * establish that GPIO properties should be named + * "foo-gpios" so we have this special kludge for them. + */ + { "miso", "gpio-miso", "spi-gpio" }, + { "mosi", "gpio-mosi", "spi-gpio" }, + { "sck", "gpio-sck", "spi-gpio" }, +#endif -/* - * Some regulator bindings happened before we managed to establish that GPIO - * properties should be named "foo-gpios" so we have this special kludge for - * them. - */ -static struct gpio_desc *of_find_regulator_gpio(struct device_node *np, - const char *con_id, - unsigned int idx, - enum of_gpio_flags *of_flags) -{ - /* These are the connection IDs we accept as legacy GPIO phandles */ - const char *whitelist[] = { - "wlf,ldoena", /* Arizona */ - "wlf,ldo1ena", /* WM8994 */ - "wlf,ldo2ena", /* WM8994 */ - }; - int i; + /* + * The old Freescale bindings use simply "gpios" as name + * for the chip select lines rather than "cs-gpios" like + * all other SPI hardware. Allow this specifically for + * Freescale and PPC devices. + */ +#if IS_ENABLED(CONFIG_SPI_FSL_SPI) + { "cs", "gpios", "fsl,spi" }, + { "cs", "gpios", "aeroflexgaisler,spictrl" }, +#endif +#if IS_ENABLED(CONFIG_SPI_PPC4xx) + { "cs", "gpios", "ibm,ppc4xx-spi" }, +#endif - if (!IS_ENABLED(CONFIG_REGULATOR)) - return ERR_PTR(-ENOENT); +#if IS_ENABLED(CONFIG_TYPEC_FUSB302) + /* + * Fairchild FUSB302 host is using undocumented "fcs,int_n" + * property without the compulsory "-gpios" suffix. + */ + { "fcs,int_n", NULL, "fcs,fusb302" }, +#endif + }; + struct gpio_desc *desc; + const char *legacy_id; + unsigned int i; if (!con_id) return ERR_PTR(-ENOENT); - i = match_string(whitelist, ARRAY_SIZE(whitelist), con_id); - if (i < 0) - return ERR_PTR(-ENOENT); + for (i = 0; i < ARRAY_SIZE(gpios); i++) { + if (strcmp(con_id, gpios[i].con_id)) + continue; + + if (gpios[i].compatible && + !of_device_is_compatible(np, gpios[i].compatible)) + continue; + + legacy_id = gpios[i].legacy_id ?: gpios[i].con_id; + desc = of_get_named_gpiod_flags(np, legacy_id, idx, of_flags); + if (!gpiod_not_found(desc)) { + pr_info("%s uses legacy gpio name '%s' instead of '%s-gpios'\n", + of_node_full_name(np), legacy_id, con_id); + return desc; + } + } - return of_get_named_gpiod_flags(np, con_id, idx, of_flags); + return ERR_PTR(-ENOENT); } -static struct gpio_desc *of_find_arizona_gpio(struct device_node *np, - const char *con_id, - unsigned int idx, - enum of_gpio_flags *of_flags) +static struct gpio_desc *of_find_mt2701_gpio(struct device_node *np, + const char *con_id, + unsigned int idx, + enum of_gpio_flags *of_flags) { - if (!IS_ENABLED(CONFIG_MFD_ARIZONA)) - return ERR_PTR(-ENOENT); + struct gpio_desc *desc; + const char *legacy_id; - if (!con_id || strcmp(con_id, "wlf,reset")) + if (!IS_ENABLED(CONFIG_SND_SOC_MT2701_CS42448)) return ERR_PTR(-ENOENT); - return of_get_named_gpiod_flags(np, con_id, idx, of_flags); -} + if (!of_device_is_compatible(np, "mediatek,mt2701-cs42448-machine")) + return ERR_PTR(-ENOENT); -static struct gpio_desc *of_find_usb_gpio(struct device_node *np, - const char *con_id, - unsigned int idx, - enum of_gpio_flags *of_flags) -{ - /* - * Currently this USB quirk is only for the Fairchild FUSB302 host - * which is using an undocumented DT GPIO line named "fcs,int_n" - * without the compulsory "-gpios" suffix. - */ - if (!IS_ENABLED(CONFIG_TYPEC_FUSB302)) + if (!con_id || strcmp(con_id, "i2s1-in-sel")) return ERR_PTR(-ENOENT); - if (!con_id || strcmp(con_id, "fcs,int_n")) + if (idx == 0) + legacy_id = "i2s1-in-sel-gpio1"; + else if (idx == 1) + legacy_id = "i2s1-in-sel-gpio2"; + else return ERR_PTR(-ENOENT); - return of_get_named_gpiod_flags(np, con_id, idx, of_flags); + desc = of_get_named_gpiod_flags(np, legacy_id, 0, of_flags); + if (!gpiod_not_found(desc)) + pr_info("%s is using legacy gpio name '%s' instead of '%s-gpios'\n", + of_node_full_name(np), legacy_id, con_id); + + return desc; } typedef struct gpio_desc *(*of_find_gpio_quirk)(struct device_node *np, @@ -493,15 +600,12 @@ typedef struct gpio_desc *(*of_find_gpio_quirk)(struct device_node *np, unsigned int idx, enum of_gpio_flags *of_flags); static const of_find_gpio_quirk of_find_gpio_quirks[] = { - of_find_spi_gpio, - of_find_spi_cs_gpio, - of_find_regulator_gpio, - of_find_arizona_gpio, - of_find_usb_gpio, + of_find_gpio_rename, + of_find_mt2701_gpio, NULL }; -struct gpio_desc *of_find_gpio(struct device *dev, const char *con_id, +struct gpio_desc *of_find_gpio(struct device_node *np, const char *con_id, unsigned int idx, unsigned long *flags) { char prop_name[32]; /* 32 is max size of property name */ @@ -519,8 +623,7 @@ struct gpio_desc *of_find_gpio(struct device *dev, const char *con_id, snprintf(prop_name, sizeof(prop_name), "%s", gpio_suffixes[i]); - desc = of_get_named_gpiod_flags(dev->of_node, prop_name, idx, - &of_flags); + desc = of_get_named_gpiod_flags(np, prop_name, idx, &of_flags); if (!gpiod_not_found(desc)) break; @@ -528,7 +631,7 @@ struct gpio_desc *of_find_gpio(struct device *dev, const char *con_id, /* Properly named GPIO was not found, try workarounds */ for (q = of_find_gpio_quirks; gpiod_not_found(desc) && *q; q++) - desc = (*q)(dev->of_node, con_id, idx, &of_flags); + desc = (*q)(np, con_id, idx, &of_flags); if (IS_ERR(desc)) return desc; @@ -831,8 +934,8 @@ int of_mm_gpiochip_add_data(struct device_node *np, if (mm_gc->save_regs) mm_gc->save_regs(mm_gc); - of_node_put(mm_gc->gc.of_node); - mm_gc->gc.of_node = of_node_get(np); + fwnode_handle_put(mm_gc->gc.fwnode); + mm_gc->gc.fwnode = fwnode_handle_get(of_fwnode_handle(np)); ret = gpiochip_add_data(gc, data); if (ret) @@ -858,37 +961,12 @@ void of_mm_gpiochip_remove(struct of_mm_gpio_chip *mm_gc) { struct gpio_chip *gc = &mm_gc->gc; - if (!mm_gc) - return; - gpiochip_remove(gc); iounmap(mm_gc->regs); kfree(gc->label); } EXPORT_SYMBOL_GPL(of_mm_gpiochip_remove); -static void of_gpiochip_init_valid_mask(struct gpio_chip *chip) -{ - int len, i; - u32 start, count; - struct device_node *np = chip->of_node; - - len = of_property_count_u32_elems(np, "gpio-reserved-ranges"); - if (len < 0 || len % 2 != 0) - return; - - for (i = 0; i < len; i += 2) { - of_property_read_u32_index(np, "gpio-reserved-ranges", - i, &start); - of_property_read_u32_index(np, "gpio-reserved-ranges", - i + 1, &count); - if (start >= chip->ngpio || start + count > chip->ngpio) - continue; - - bitmap_clear(chip->valid_mask, start, count); - } -}; - #ifdef CONFIG_PINCTRL static int of_gpiochip_add_pin_range(struct gpio_chip *chip) { @@ -982,9 +1060,11 @@ static int of_gpiochip_add_pin_range(struct gpio_chip *chip) { return 0; } int of_gpiochip_add(struct gpio_chip *chip) { + struct device_node *np; int ret; - if (!chip->of_node) + np = to_of_node(dev_fwnode(&chip->gpiodev->dev)); + if (!np) return 0; if (!chip->of_xlate) { @@ -995,24 +1075,22 @@ int of_gpiochip_add(struct gpio_chip *chip) if (chip->of_gpio_n_cells > MAX_PHANDLE_ARGS) return -EINVAL; - of_gpiochip_init_valid_mask(chip); - ret = of_gpiochip_add_pin_range(chip); if (ret) return ret; - of_node_get(chip->of_node); + fwnode_handle_get(chip->fwnode); ret = of_gpiochip_scan_gpios(chip); if (ret) - of_node_put(chip->of_node); + fwnode_handle_put(chip->fwnode); return ret; } void of_gpiochip_remove(struct gpio_chip *chip) { - of_node_put(chip->of_node); + fwnode_handle_put(chip->fwnode); } void of_gpio_dev_init(struct gpio_chip *gc, struct gpio_device *gdev) diff --git a/drivers/gpio/gpiolib-of.h b/drivers/gpio/gpiolib-of.h index 1b5df39a952e0753c865eee823cc4944b6a223fe..a6c593e6766c59d03fc7060beeefbb9b36990763 100644 --- a/drivers/gpio/gpiolib-of.h +++ b/drivers/gpio/gpiolib-of.h @@ -16,17 +16,16 @@ struct gpio_desc; struct gpio_device; #ifdef CONFIG_OF_GPIO -struct gpio_desc *of_find_gpio(struct device *dev, +struct gpio_desc *of_find_gpio(struct device_node *np, const char *con_id, unsigned int idx, unsigned long *lookupflags); int of_gpiochip_add(struct gpio_chip *gc); void of_gpiochip_remove(struct gpio_chip *gc); int of_gpio_get_count(struct device *dev, const char *con_id); -bool of_gpio_need_valid_mask(const struct gpio_chip *gc); void of_gpio_dev_init(struct gpio_chip *gc, struct gpio_device *gdev); #else -static inline struct gpio_desc *of_find_gpio(struct device *dev, +static inline struct gpio_desc *of_find_gpio(struct device_node *np, const char *con_id, unsigned int idx, unsigned long *lookupflags) @@ -39,10 +38,6 @@ static inline int of_gpio_get_count(struct device *dev, const char *con_id) { return 0; } -static inline bool of_gpio_need_valid_mask(const struct gpio_chip *gc) -{ - return false; -} static inline void of_gpio_dev_init(struct gpio_chip *gc, struct gpio_device *gdev) { diff --git a/drivers/gpio/gpiolib-swnode.c b/drivers/gpio/gpiolib-swnode.c new file mode 100644 index 0000000000000000000000000000000000000000..dd9ccac214d19ec581f609f1709e57769175b931 --- /dev/null +++ b/drivers/gpio/gpiolib-swnode.c @@ -0,0 +1,123 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Software Node helpers for the GPIO API + * + * Copyright 2022 Google LLC + */ +#include +#include +#include +#include +#include +#include +#include +#include + +#include "gpiolib.h" +#include "gpiolib-swnode.h" + +static void swnode_format_propname(const char *con_id, char *propname, + size_t max_size) +{ + /* + * Note we do not need to try both -gpios and -gpio suffixes, + * as, unlike OF and ACPI, we can fix software nodes to conform + * to the proper binding. + */ + if (con_id) + snprintf(propname, max_size, "%s-gpios", con_id); + else + strscpy(propname, "gpios", max_size); +} + +static int swnode_gpiochip_match_name(struct gpio_chip *chip, void *data) +{ + return !strcmp(chip->label, data); +} + +static struct gpio_chip *swnode_get_chip(struct fwnode_handle *fwnode) +{ + const struct software_node *chip_node; + struct gpio_chip *chip; + + chip_node = to_software_node(fwnode); + if (!chip_node || !chip_node->name) + return ERR_PTR(-EINVAL); + + chip = gpiochip_find((void *)chip_node->name, swnode_gpiochip_match_name); + return chip ?: ERR_PTR(-EPROBE_DEFER); +} + +struct gpio_desc *swnode_find_gpio(struct fwnode_handle *fwnode, + const char *con_id, unsigned int idx, + unsigned long *flags) +{ + const struct software_node *swnode; + struct fwnode_reference_args args; + struct gpio_chip *chip; + struct gpio_desc *desc; + char propname[32]; /* 32 is max size of property name */ + int error; + + swnode = to_software_node(fwnode); + if (!swnode) + return ERR_PTR(-EINVAL); + + swnode_format_propname(con_id, propname, sizeof(propname)); + + /* + * We expect all swnode-described GPIOs have GPIO number and + * polarity arguments, hence nargs is set to 2. + */ + error = fwnode_property_get_reference_args(fwnode, propname, NULL, 2, idx, &args); + if (error) { + pr_debug("%s: can't parse '%s' property of node '%pfwP[%d]'\n", + __func__, propname, fwnode, idx); + return ERR_PTR(error); + } + + chip = swnode_get_chip(args.fwnode); + fwnode_handle_put(args.fwnode); + if (IS_ERR(chip)) + return ERR_CAST(chip); + + desc = gpiochip_get_desc(chip, args.args[0]); + *flags = args.args[1]; /* We expect native GPIO flags */ + + pr_debug("%s: parsed '%s' property of node '%pfwP[%d]' - status (%d)\n", + __func__, propname, fwnode, idx, PTR_ERR_OR_ZERO(desc)); + + return desc; +} + +/** + * swnode_gpio_count - count the GPIOs associated with a device / function + * @fwnode: firmware node of the GPIO consumer, can be %NULL for + * system-global GPIOs + * @con_id: function within the GPIO consumer + * + * Return: + * The number of GPIOs associated with a device / function or %-ENOENT, + * if no GPIO has been assigned to the requested function. + */ +int swnode_gpio_count(const struct fwnode_handle *fwnode, const char *con_id) +{ + struct fwnode_reference_args args; + char propname[32]; + int count; + + swnode_format_propname(con_id, propname, sizeof(propname)); + + /* + * This is not very efficient, but GPIO lists usually have only + * 1 or 2 entries. + */ + count = 0; + while (fwnode_property_get_reference_args(fwnode, propname, NULL, 0, + count, &args) == 0) { + fwnode_handle_put(args.fwnode); + count++; + } + + return count ?: -ENOENT; +} diff --git a/drivers/gpio/gpiolib-swnode.h b/drivers/gpio/gpiolib-swnode.h new file mode 100644 index 0000000000000000000000000000000000000000..af849e56f6bce99454328117fa8cadd4397396f3 --- /dev/null +++ b/drivers/gpio/gpiolib-swnode.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef GPIOLIB_SWNODE_H +#define GPIOLIB_SWNODE_H + +struct fwnode_handle; +struct gpio_desc; + +struct gpio_desc *swnode_find_gpio(struct fwnode_handle *fwnode, + const char *con_id, unsigned int idx, + unsigned long *flags); +int swnode_gpio_count(const struct fwnode_handle *fwnode, const char *con_id); + +#endif /* GPIOLIB_SWNODE_H */ diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index a70522aef3557e3e1b63aa6f68bab187ea969b6f..5a66d9616d7ccedf5e8a43235203760f29f942ee 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c @@ -26,6 +26,7 @@ #include "gpiolib.h" #include "gpiolib-of.h" #include "gpiolib-acpi.h" +#include "gpiolib-swnode.h" #include "gpiolib-cdev.h" #include "gpiolib-sysfs.h" @@ -183,14 +184,14 @@ EXPORT_SYMBOL_GPL(gpiod_to_chip); static int gpiochip_find_base(int ngpio) { struct gpio_device *gdev; - int base = ARCH_NR_GPIOS - ngpio; + int base = GPIO_DYNAMIC_BASE; - list_for_each_entry_reverse(gdev, &gpio_devices, list) { + list_for_each_entry(gdev, &gpio_devices, list) { /* found a free space? */ - if (gdev->base + gdev->ngpio <= base) + if (gdev->base >= base + ngpio) break; - /* nope, check the space right before the chip */ - base = gdev->base - ngpio; + /* nope, check the space right after the chip */ + base = gdev->base + gdev->ngpio; } if (gpio_is_valid(base)) { @@ -366,12 +367,12 @@ static int gpiochip_set_desc_names(struct gpio_chip *gc) static int devprop_gpiochip_set_names(struct gpio_chip *chip) { struct gpio_device *gdev = chip->gpiodev; - struct fwnode_handle *fwnode = dev_fwnode(&gdev->dev); + struct device *dev = &gdev->dev; const char **names; int ret, i; int count; - count = fwnode_property_string_array_count(fwnode, "gpio-line-names"); + count = device_property_string_array_count(dev, "gpio-line-names"); if (count < 0) return 0; @@ -384,7 +385,7 @@ static int devprop_gpiochip_set_names(struct gpio_chip *chip) * gpiochips. */ if (count <= chip->offset) { - dev_warn(&gdev->dev, "gpio-line-names too short (length %d), cannot map names for the gpiochip at offset %u\n", + dev_warn(dev, "gpio-line-names too short (length %d), cannot map names for the gpiochip at offset %u\n", count, chip->offset); return 0; } @@ -393,10 +394,10 @@ static int devprop_gpiochip_set_names(struct gpio_chip *chip) if (!names) return -ENOMEM; - ret = fwnode_property_read_string_array(fwnode, "gpio-line-names", + ret = device_property_read_string_array(dev, "gpio-line-names", names, count); if (ret < 0) { - dev_warn(&gdev->dev, "failed to read GPIO line names\n"); + dev_warn(dev, "failed to read GPIO line names\n"); kfree(names); return ret; } @@ -445,9 +446,22 @@ static unsigned long *gpiochip_allocate_mask(struct gpio_chip *gc) return p; } +static unsigned int gpiochip_count_reserved_ranges(struct gpio_chip *gc) +{ + struct device *dev = &gc->gpiodev->dev; + int size; + + /* Format is "start, count, ..." */ + size = device_property_count_u32(dev, "gpio-reserved-ranges"); + if (size > 0 && size % 2 == 0) + return size; + + return 0; +} + static int gpiochip_alloc_valid_mask(struct gpio_chip *gc) { - if (!(of_gpio_need_valid_mask(gc) || gc->init_valid_mask)) + if (!(gpiochip_count_reserved_ranges(gc) || gc->init_valid_mask)) return 0; gc->valid_mask = gpiochip_allocate_mask(gc); @@ -457,8 +471,50 @@ static int gpiochip_alloc_valid_mask(struct gpio_chip *gc) return 0; } +static int gpiochip_apply_reserved_ranges(struct gpio_chip *gc) +{ + struct device *dev = &gc->gpiodev->dev; + unsigned int size; + u32 *ranges; + int ret; + + size = gpiochip_count_reserved_ranges(gc); + if (size == 0) + return 0; + + ranges = kmalloc_array(size, sizeof(*ranges), GFP_KERNEL); + if (!ranges) + return -ENOMEM; + + ret = device_property_read_u32_array(dev, "gpio-reserved-ranges", + ranges, size); + if (ret) { + kfree(ranges); + return ret; + } + + while (size) { + u32 count = ranges[--size]; + u32 start = ranges[--size]; + + if (start >= gc->ngpio || start + count > gc->ngpio) + continue; + + bitmap_clear(gc->valid_mask, start, count); + } + + kfree(ranges); + return 0; +} + static int gpiochip_init_valid_mask(struct gpio_chip *gc) { + int ret; + + ret = gpiochip_apply_reserved_ranges(gc); + if (ret) + return ret; + if (gc->init_valid_mask) return gc->init_valid_mask(gc, gc->valid_mask, @@ -493,7 +549,7 @@ EXPORT_SYMBOL_GPL(gpiochip_line_is_valid); static void gpiodevice_release(struct device *dev) { - struct gpio_device *gdev = container_of(dev, struct gpio_device, dev); + struct gpio_device *gdev = to_gpio_device(dev); unsigned long flags; spin_lock_irqsave(&gpio_lock, flags); @@ -627,7 +683,7 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data, * Assign fwnode depending on the result of the previous calls, * if none of them succeed, assign it to the parent's one. */ - gdev->dev.fwnode = dev_fwnode(&gdev->dev) ?: fwnode; + gc->fwnode = gdev->dev.fwnode = dev_fwnode(&gdev->dev) ?: fwnode; gdev->id = ida_alloc(&gpio_ida, GFP_KERNEL); if (gdev->id < 0) { @@ -719,6 +775,9 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data, * a poison instead. */ gc->base = base; + } else { + dev_warn(&gdev->dev, + "Static allocation of GPIO base is deprecated, use dynamic allocation.\n"); } gdev->base = base; @@ -735,6 +794,7 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data, spin_unlock_irqrestore(&gpio_lock, flags); BLOCKING_INIT_NOTIFIER_HEAD(&gdev->notifier); + init_rwsem(&gdev->sem); #ifdef CONFIG_PINCTRL INIT_LIST_HEAD(&gdev->pin_ranges); @@ -875,6 +935,8 @@ void gpiochip_remove(struct gpio_chip *gc) unsigned long flags; unsigned int i; + down_write(&gdev->sem); + /* FIXME: should the legacy sysfs handling be moved to gpio_device? */ gpiochip_sysfs_unregister(gdev); gpiochip_free_hogs(gc); @@ -909,6 +971,7 @@ void gpiochip_remove(struct gpio_chip *gc) * gone. */ gcdev_unregister(gdev); + up_write(&gdev->sem); put_device(&gdev->dev); } EXPORT_SYMBOL_GPL(gpiochip_remove); @@ -3808,62 +3871,88 @@ static int platform_gpio_count(struct device *dev, const char *con_id) return count; } -/** - * fwnode_get_named_gpiod - obtain a GPIO from firmware node - * @fwnode: handle of the firmware node - * @propname: name of the firmware property representing the GPIO - * @index: index of the GPIO to obtain for the consumer - * @dflags: GPIO initialization flags - * @label: label to attach to the requested GPIO - * - * This function can be used for drivers that get their configuration - * from opaque firmware. - * - * The function properly finds the corresponding GPIO using whatever is the - * underlying firmware interface and then makes sure that the GPIO - * descriptor is requested before it is returned to the caller. - * - * Returns: - * On successful request the GPIO pin is configured in accordance with - * provided @dflags. - * - * In case of error an ERR_PTR() is returned. - */ -static struct gpio_desc *fwnode_get_named_gpiod(struct fwnode_handle *fwnode, - const char *propname, int index, - enum gpiod_flags dflags, - const char *label) +static struct gpio_desc *gpiod_find_by_fwnode(struct fwnode_handle *fwnode, + struct device *consumer, + const char *con_id, + unsigned int idx, + enum gpiod_flags *flags, + unsigned long *lookupflags) { - unsigned long lflags = GPIO_LOOKUP_FLAGS_DEFAULT; - struct gpio_desc *desc = ERR_PTR(-ENODEV); - int ret; + struct gpio_desc *desc = ERR_PTR(-ENOENT); if (is_of_node(fwnode)) { - desc = gpiod_get_from_of_node(to_of_node(fwnode), - propname, index, - dflags, - label); - return desc; + dev_dbg(consumer, "using DT '%pfw' for '%s' GPIO lookup\n", + fwnode, con_id); + desc = of_find_gpio(to_of_node(fwnode), con_id, idx, lookupflags); } else if (is_acpi_node(fwnode)) { - struct acpi_gpio_info info; + dev_dbg(consumer, "using ACPI '%pfw' for '%s' GPIO lookup\n", + fwnode, con_id); + desc = acpi_find_gpio(fwnode, con_id, idx, flags, lookupflags); + } else if (is_software_node(fwnode)) { + dev_dbg(consumer, "using swnode '%pfw' for '%s' GPIO lookup\n", + fwnode, con_id); + desc = swnode_find_gpio(fwnode, con_id, idx, lookupflags); + } - desc = acpi_node_get_gpiod(fwnode, propname, index, &info); - if (IS_ERR(desc)) - return desc; + return desc; +} - acpi_gpio_update_gpiod_flags(&dflags, &info); - acpi_gpio_update_gpiod_lookup_flags(&lflags, &info); - } else { - return ERR_PTR(-EINVAL); +static struct gpio_desc *gpiod_find_and_request(struct device *consumer, + struct fwnode_handle *fwnode, + const char *con_id, + unsigned int idx, + enum gpiod_flags flags, + const char *label, + bool platform_lookup_allowed) +{ + struct gpio_desc *desc = ERR_PTR(-ENOENT); + unsigned long lookupflags; + int ret; + + if (!IS_ERR_OR_NULL(fwnode)) + desc = gpiod_find_by_fwnode(fwnode, consumer, con_id, idx, + &flags, &lookupflags); + + if (gpiod_not_found(desc) && platform_lookup_allowed) { + /* + * Either we are not using DT or ACPI, or their lookup did not + * return a result. In that case, use platform lookup as a + * fallback. + */ + dev_dbg(consumer, "using lookup tables for GPIO lookup\n"); + desc = gpiod_find(consumer, con_id, idx, &lookupflags); } - /* Currently only ACPI takes this path */ + if (IS_ERR(desc)) { + dev_dbg(consumer, "No GPIO consumer %s found\n", con_id); + return desc; + } + + /* + * If a connection label was passed use that, else attempt to use + * the device name as label + */ ret = gpiod_request(desc, label); - if (ret) - return ERR_PTR(ret); + if (ret) { + if (!(ret == -EBUSY && flags & GPIOD_FLAGS_BIT_NONEXCLUSIVE)) + return ERR_PTR(ret); + + /* + * This happens when there are several consumers for + * the same GPIO line: we just return here without + * further initialization. It is a bit of a hack. + * This is necessary to support fixed regulators. + * + * FIXME: Make this more sane and safe. + */ + dev_info(consumer, + "nonexclusive access to GPIO for %s\n", con_id); + return desc; + } - ret = gpiod_configure_flags(desc, propname, lflags, dflags); + ret = gpiod_configure_flags(desc, con_id, lookupflags, flags); if (ret < 0) { + dev_dbg(consumer, "setup of GPIO %s failed\n", con_id); gpiod_put(desc); return ERR_PTR(ret); } @@ -3896,29 +3985,12 @@ static struct gpio_desc *fwnode_get_named_gpiod(struct fwnode_handle *fwnode, * In case of error an ERR_PTR() is returned. */ struct gpio_desc *fwnode_gpiod_get_index(struct fwnode_handle *fwnode, - const char *con_id, int index, + const char *con_id, + int index, enum gpiod_flags flags, const char *label) { - struct gpio_desc *desc; - char prop_name[32]; /* 32 is max size of property name */ - unsigned int i; - - for (i = 0; i < ARRAY_SIZE(gpio_suffixes); i++) { - if (con_id) - snprintf(prop_name, sizeof(prop_name), "%s-%s", - con_id, gpio_suffixes[i]); - else - snprintf(prop_name, sizeof(prop_name), "%s", - gpio_suffixes[i]); - - desc = fwnode_get_named_gpiod(fwnode, prop_name, index, flags, - label); - if (!gpiod_not_found(desc)) - break; - } - - return desc; + return gpiod_find_and_request(NULL, fwnode, con_id, index, flags, label, false); } EXPORT_SYMBOL_GPL(fwnode_gpiod_get_index); @@ -3937,6 +4009,8 @@ int gpiod_count(struct device *dev, const char *con_id) count = of_gpio_get_count(dev, con_id); else if (is_acpi_node(fwnode)) count = acpi_gpio_count(dev, con_id); + else if (is_software_node(fwnode)) + count = swnode_gpio_count(fwnode, con_id); if (count < 0) count = platform_gpio_count(dev, con_id); @@ -4072,70 +4146,11 @@ struct gpio_desc *__must_check gpiod_get_index(struct device *dev, unsigned int idx, enum gpiod_flags flags) { - unsigned long lookupflags = GPIO_LOOKUP_FLAGS_DEFAULT; - struct gpio_desc *desc = NULL; - int ret; - /* Maybe we have a device name, maybe not */ + struct fwnode_handle *fwnode = dev ? dev_fwnode(dev) : NULL; const char *devname = dev ? dev_name(dev) : "?"; - const struct fwnode_handle *fwnode = dev ? dev_fwnode(dev) : NULL; - - dev_dbg(dev, "GPIO lookup for consumer %s\n", con_id); - - /* Using device tree? */ - if (is_of_node(fwnode)) { - dev_dbg(dev, "using device tree for GPIO lookup\n"); - desc = of_find_gpio(dev, con_id, idx, &lookupflags); - } else if (is_acpi_node(fwnode)) { - dev_dbg(dev, "using ACPI for GPIO lookup\n"); - desc = acpi_find_gpio(dev, con_id, idx, &flags, &lookupflags); - } - - /* - * Either we are not using DT or ACPI, or their lookup did not return - * a result. In that case, use platform lookup as a fallback. - */ - if (!desc || gpiod_not_found(desc)) { - dev_dbg(dev, "using lookup tables for GPIO lookup\n"); - desc = gpiod_find(dev, con_id, idx, &lookupflags); - } - - if (IS_ERR(desc)) { - dev_dbg(dev, "No GPIO consumer %s found\n", con_id); - return desc; - } - - /* - * If a connection label was passed use that, else attempt to use - * the device name as label - */ - ret = gpiod_request(desc, con_id ?: devname); - if (ret) { - if (!(ret == -EBUSY && flags & GPIOD_FLAGS_BIT_NONEXCLUSIVE)) - return ERR_PTR(ret); - - /* - * This happens when there are several consumers for - * the same GPIO line: we just return here without - * further initialization. It is a bit of a hack. - * This is necessary to support fixed regulators. - * - * FIXME: Make this more sane and safe. - */ - dev_info(dev, "nonexclusive access to GPIO for %s\n", con_id ?: devname); - return desc; - } - - ret = gpiod_configure_flags(desc, con_id, lookupflags, flags); - if (ret < 0) { - dev_dbg(dev, "setup of GPIO %s failed\n", con_id); - gpiod_put(desc); - return ERR_PTR(ret); - } + const char *label = con_id ?: devname; - blocking_notifier_call_chain(&desc->gdev->notifier, - GPIOLINE_CHANGED_REQUESTED, desc); - - return desc; + return gpiod_find_and_request(dev, fwnode, con_id, idx, flags, label, true); } EXPORT_SYMBOL_GPL(gpiod_get_index); diff --git a/drivers/gpio/gpiolib.h b/drivers/gpio/gpiolib.h index d900ecdbac46dfd4258b1e536c1f0d2fe57a874e..b3c2db6eba80cbb05f1490b476b0e7f761fa070a 100644 --- a/drivers/gpio/gpiolib.h +++ b/drivers/gpio/gpiolib.h @@ -15,14 +15,15 @@ #include #include #include +#include #define GPIOCHIP_NAME "gpiochip" /** * struct gpio_device - internal state container for GPIO devices - * @id: numerical ID number for the GPIO chip * @dev: the GPIO device struct * @chrdev: character device for the GPIO device + * @id: numerical ID number for the GPIO chip * @mockdev: class device used by the deprecated sysfs interface (may be * NULL) * @owner: helps prevent removal of modules exporting active GPIOs @@ -39,6 +40,9 @@ * @list: links gpio_device:s together for traversal * @notifier: used to notify subscribers about lines being requested, released * or reconfigured + * @sem: protects the structure from a NULL-pointer dereference of @chip by + * user-space operations when the device gets unregistered during + * a hot-unplug event * @pin_ranges: range of pins served by the GPIO driver * * This state container holds most of the runtime variable data @@ -47,9 +51,9 @@ * userspace. */ struct gpio_device { - int id; struct device dev; struct cdev chrdev; + int id; struct device *mockdev; struct module *owner; struct gpio_chip *chip; @@ -60,6 +64,7 @@ struct gpio_device { void *data; struct list_head list; struct blocking_notifier_head notifier; + struct rw_semaphore sem; #ifdef CONFIG_PINCTRL /* @@ -72,6 +77,11 @@ struct gpio_device { #endif }; +static inline struct gpio_device *to_gpio_device(struct device *dev) +{ + return container_of(dev, struct gpio_device, dev); +} + /* gpio suffixes used for ACPI and device tree lookup */ static __maybe_unused const char * const gpio_suffixes[] = { "gpios", "gpio" }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index 62e98f1ad770b1ec0621ef46d8739b4b7188ea35..bee38c326537de308e0b3bdf47bcbb1884add21b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -256,7 +256,7 @@ static int amdgpu_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_str * becoming writable and makes is_cow_mapping(vm_flags) false. */ if (is_cow_mapping(vma->vm_flags) && - !(vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC))) + !(vma->vm_flags & VM_ACCESS_FLAGS)) vma->vm_flags &= ~VM_MAYWRITE; return drm_gem_ttm_mmap(obj, vma); diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c index b8da978d85bb72db7cfb9e28d8e7123e650449a8..1831303559972526b87234af34c364def21b74bf 100644 --- a/drivers/gpu/drm/drm_sysfs.c +++ b/drivers/gpu/drm/drm_sysfs.c @@ -91,7 +91,7 @@ static void drm_sysfs_acpi_register(void) { } static void drm_sysfs_acpi_unregister(void) { } #endif -static char *drm_devnode(struct device *dev, umode_t *mode) +static char *drm_devnode(const struct device *dev, umode_t *mode) { return kasprintf(GFP_KERNEL, "dri/%s", dev_name(dev)); } diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c index 68e4446a94ad7c955d8d5ca5c05871d0952c8cc2..c5ae5492e1af066e5ddcdc760fc8b13647c0efa7 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c @@ -643,6 +643,7 @@ static int etnaviv_gem_userptr_get_pages(struct etnaviv_gem_object *etnaviv_obj) struct page **pvec = NULL; struct etnaviv_gem_userptr *userptr = &etnaviv_obj->userptr; int ret, pinned = 0, npages = etnaviv_obj->base.size >> PAGE_SHIFT; + unsigned int gup_flags = FOLL_LONGTERM; might_lock_read(¤t->mm->mmap_lock); @@ -653,14 +654,15 @@ static int etnaviv_gem_userptr_get_pages(struct etnaviv_gem_object *etnaviv_obj) if (!pvec) return -ENOMEM; + if (!userptr->ro) + gup_flags |= FOLL_WRITE; + do { unsigned num_pages = npages - pinned; uint64_t ptr = userptr->ptr + pinned * PAGE_SIZE; struct page **pages = pvec + pinned; - ret = pin_user_pages_fast(ptr, num_pages, - FOLL_WRITE | FOLL_FORCE | FOLL_LONGTERM, - pages); + ret = pin_user_pages_fast(ptr, num_pages, gup_flags, pages); if (ret < 0) { unpin_user_pages(pvec, pinned); kvfree(pvec); diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c index 471fd6c8135f2c996acb77c7b43d1fbdba0b6570..e19c2ceb3759e2f8989e4b81feca2d287d15bb96 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c +++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c @@ -477,7 +477,7 @@ static dma_addr_t *g2d_userptr_get_dma_addr(struct g2d_data *g2d, } ret = pin_user_pages_fast(start, npages, - FOLL_FORCE | FOLL_WRITE | FOLL_LONGTERM, + FOLL_WRITE | FOLL_LONGTERM, g2d_userptr->pages); if (ret != npages) { DRM_DEV_ERROR(g2d->dev, diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c index 4d4a715b429d1d4461b7a79debe829c940385655..2c2b92324a2e90fe13dcfa889986f00419c4be60 100644 --- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c +++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c @@ -60,8 +60,9 @@ static int fsl_dcu_drm_connector_get_modes(struct drm_connector *connector) return drm_panel_get_modes(fsl_connector->panel, connector); } -static int fsl_dcu_drm_connector_mode_valid(struct drm_connector *connector, - struct drm_display_mode *mode) +static enum drm_mode_status +fsl_dcu_drm_connector_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) { if (mode->hdisplay & 0xf) return MODE_ERROR; diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index 077892a9aa8fdcb1588a81ebf4f47f89ef627df2..f5451adcd4890c08cb121f1e1d00d58ce64d010f 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -669,9 +669,6 @@ static int intel_vgpu_open_device(struct vfio_device *vfio_dev) vgpu->attached = true; - kvmgt_protect_table_init(vgpu); - gvt_cache_init(vgpu); - vgpu->track_node.track_write = kvmgt_page_track_write; vgpu->track_node.track_flush_slot = kvmgt_page_track_flush_slot; kvm_get_kvm(vgpu->vfio_device.kvm); @@ -715,6 +712,11 @@ static void intel_vgpu_close_device(struct vfio_device *vfio_dev) kvmgt_protect_table_destroy(vgpu); gvt_cache_destroy(vgpu); + WARN_ON(vgpu->nr_cache_entries); + + vgpu->gfn_cache = RB_ROOT; + vgpu->dma_addr_cache = RB_ROOT; + intel_vgpu_release_msi_eventfd_ctx(vgpu); vgpu->attached = false; @@ -1445,9 +1447,17 @@ static int intel_vgpu_init_dev(struct vfio_device *vfio_dev) struct intel_vgpu *vgpu = vfio_dev_to_vgpu(vfio_dev); struct intel_vgpu_type *type = container_of(mdev->type, struct intel_vgpu_type, type); + int ret; vgpu->gvt = kdev_to_i915(mdev->type->parent->dev)->gvt; - return intel_gvt_create_vgpu(vgpu, type->conf); + ret = intel_gvt_create_vgpu(vgpu, type->conf); + if (ret) + return ret; + + kvmgt_protect_table_init(vgpu); + gvt_cache_init(vgpu); + + return 0; } static void intel_vgpu_release_dev(struct vfio_device *vfio_dev) @@ -1455,7 +1465,6 @@ static void intel_vgpu_release_dev(struct vfio_device *vfio_dev) struct intel_vgpu *vgpu = vfio_dev_to_vgpu(vfio_dev); intel_gvt_destroy_vgpu(vgpu); - vfio_free_device(vfio_dev); } static const struct vfio_device_ops intel_vgpu_dev_ops = { @@ -1468,6 +1477,9 @@ static const struct vfio_device_ops intel_vgpu_dev_ops = { .mmap = intel_vgpu_mmap, .ioctl = intel_vgpu_ioctl, .dma_unmap = intel_vgpu_dma_unmap, + .bind_iommufd = vfio_iommufd_emulated_bind, + .unbind_iommufd = vfio_iommufd_emulated_unbind, + .attach_ioas = vfio_iommufd_emulated_attach_ioas, }; static int intel_vgpu_probe(struct mdev_device *mdev) diff --git a/drivers/gpu/drm/i915/i915_user_extensions.c b/drivers/gpu/drm/i915/i915_user_extensions.c index c822d0aafd2dc7d9d480e208a386c41458b4d5c9..e3f808372c47b87bf9a91eb21e3003b9d595b2d3 100644 --- a/drivers/gpu/drm/i915/i915_user_extensions.c +++ b/drivers/gpu/drm/i915/i915_user_extensions.c @@ -51,7 +51,7 @@ int i915_user_extensions(struct i915_user_extension __user *ext, return err; if (get_user(next, &ext->next_extension) || - overflows_type(next, ext)) + overflows_type(next, uintptr_t)) return -EFAULT; ext = u64_to_user_ptr(next); diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h index 6c14d13364bf789a30edcf2b162e4557aaf32a2f..67a66d4d5c70cd8652bf762eba8ec4dcd785a9d4 100644 --- a/drivers/gpu/drm/i915/i915_utils.h +++ b/drivers/gpu/drm/i915/i915_utils.h @@ -111,10 +111,6 @@ bool i915_error_injected(void); #define range_overflows_end_t(type, start, size, max) \ range_overflows_end((type)(start), (type)(size), (type)(max)) -/* Note we don't consider signbits :| */ -#define overflows_type(x, T) \ - (sizeof(x) > sizeof(T) && (x) >> BITS_PER_TYPE(T)) - #define ptr_mask_bits(ptr, n) ({ \ unsigned long __v = (unsigned long)(ptr); \ (typeof(ptr))(__v & -BIT(n)); \ diff --git a/drivers/gpu/drm/sti/sti_dvo.c b/drivers/gpu/drm/sti/sti_dvo.c index f3a5616b7daf9310b73758f99863479679b7f76b..577c477b5f467c77e139f43c5d82e3d386a1832f 100644 --- a/drivers/gpu/drm/sti/sti_dvo.c +++ b/drivers/gpu/drm/sti/sti_dvo.c @@ -346,8 +346,9 @@ static int sti_dvo_connector_get_modes(struct drm_connector *connector) #define CLK_TOLERANCE_HZ 50 -static int sti_dvo_connector_mode_valid(struct drm_connector *connector, - struct drm_display_mode *mode) +static enum drm_mode_status +sti_dvo_connector_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) { int target = mode->clock * 1000; int target_min = target - CLK_TOLERANCE_HZ; diff --git a/drivers/gpu/drm/sti/sti_hda.c b/drivers/gpu/drm/sti/sti_hda.c index ec6656b9ee7cc82957a080b45f87344e03af04b0..15097ac6793146bf4be75dc21da4d8303ada30b9 100644 --- a/drivers/gpu/drm/sti/sti_hda.c +++ b/drivers/gpu/drm/sti/sti_hda.c @@ -601,8 +601,9 @@ static int sti_hda_connector_get_modes(struct drm_connector *connector) #define CLK_TOLERANCE_HZ 50 -static int sti_hda_connector_mode_valid(struct drm_connector *connector, - struct drm_display_mode *mode) +static enum drm_mode_status +sti_hda_connector_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) { int target = mode->clock * 1000; int target_min = target - CLK_TOLERANCE_HZ; diff --git a/drivers/gpu/drm/sti/sti_hdmi.c b/drivers/gpu/drm/sti/sti_hdmi.c index fcc2194869d68cd6978e74f7e09b01a152b9bc5c..8539fe1fedc4c6f91a8cd1196ec3a39af34fbd6e 100644 --- a/drivers/gpu/drm/sti/sti_hdmi.c +++ b/drivers/gpu/drm/sti/sti_hdmi.c @@ -1004,8 +1004,9 @@ fail: #define CLK_TOLERANCE_HZ 50 -static int sti_hdmi_connector_mode_valid(struct drm_connector *connector, - struct drm_display_mode *mode) +static enum drm_mode_status +sti_hdmi_connector_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) { int target = mode->clock * 1000; int target_min = target - CLK_TOLERANCE_HZ; diff --git a/drivers/greybus/svc.c b/drivers/greybus/svc.c index 56d2b44d6fefc3ee104266fe4d699d1c43ae9a50..16cced80867a6ad7d0bba699c985d4cc6af7082d 100644 --- a/drivers/greybus/svc.c +++ b/drivers/greybus/svc.c @@ -7,6 +7,7 @@ */ #include +#include #include #include @@ -83,7 +84,7 @@ static ssize_t watchdog_store(struct device *dev, int retval; bool user_request; - retval = strtobool(buf, &user_request); + retval = kstrtobool(buf, &user_request); if (retval) return retval; diff --git a/drivers/hid/hid-u2fzero.c b/drivers/hid/hid-u2fzero.c index ad489caf53ad80e60d2a9f2f64d47143816d139c..744a91e6e78c5d9d0a41a17b1687101463edaa62 100644 --- a/drivers/hid/hid-u2fzero.c +++ b/drivers/hid/hid-u2fzero.c @@ -261,7 +261,6 @@ static int u2fzero_init_hwrng(struct u2fzero_device *dev, dev->hwrng.name = dev->rng_name; dev->hwrng.read = u2fzero_rng_read; - dev->hwrng.quality = 1; return devm_hwrng_register(&dev->hdev->dev, &dev->hwrng); } diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c index 2fb2991dbe4c36f8f451456c6bcb845b24caa261..59cf3ddfdf78c15be74142d81cc2677c16d41b98 100644 --- a/drivers/hid/usbhid/hiddev.c +++ b/drivers/hid/usbhid/hiddev.c @@ -857,7 +857,7 @@ static const struct file_operations hiddev_fops = { .llseek = noop_llseek, }; -static char *hiddev_devnode(struct device *dev, umode_t *mode) +static char *hiddev_devnode(const struct device *dev, umode_t *mode) { return kasprintf(GFP_KERNEL, "usb/%s", dev_name(dev)); } diff --git a/drivers/hsi/clients/ssi_protocol.c b/drivers/hsi/clients/ssi_protocol.c index 274ad8443f8c282dea6af460cfa71af763dea766..38e572faff4357808d80df9cf25571651337deb6 100644 --- a/drivers/hsi/clients/ssi_protocol.c +++ b/drivers/hsi/clients/ssi_protocol.c @@ -968,7 +968,7 @@ static void ssip_xmit_work(struct work_struct *work) ssip_xmit(cl); } -static int ssip_pn_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t ssip_pn_xmit(struct sk_buff *skb, struct net_device *dev) { struct hsi_client *cl = to_hsi_client(dev->dev.parent); struct ssi_protocol *ssi = hsi_client_drvdata(cl); @@ -1027,7 +1027,7 @@ static int ssip_pn_xmit(struct sk_buff *skb, struct net_device *dev) dev->stats.tx_packets++; dev->stats.tx_bytes += skb->len; - return 0; + return NETDEV_TX_OK; drop2: hsi_free_msg(msg); drop: @@ -1035,7 +1035,7 @@ drop: inc_dropped: dev->stats.tx_dropped++; - return 0; + return NETDEV_TX_OK; } /* CMT reset event handler */ diff --git a/drivers/hsi/controllers/omap_ssi_core.c b/drivers/hsi/controllers/omap_ssi_core.c index eb982015831858b7725f85c26bce24f9c2f9c98f..26f2c3c0129783e045e7d26243a9a1493e81d976 100644 --- a/drivers/hsi/controllers/omap_ssi_core.c +++ b/drivers/hsi/controllers/omap_ssi_core.c @@ -502,8 +502,10 @@ static int ssi_probe(struct platform_device *pd) platform_set_drvdata(pd, ssi); err = ssi_add_controller(ssi, pd); - if (err < 0) + if (err < 0) { + hsi_put_controller(ssi); goto out1; + } pm_runtime_enable(&pd->dev); @@ -536,9 +538,9 @@ out3: device_for_each_child(&pd->dev, NULL, ssi_remove_ports); out2: ssi_remove_controller(ssi); + pm_runtime_disable(&pd->dev); out1: platform_set_drvdata(pd, NULL); - pm_runtime_disable(&pd->dev); return err; } @@ -629,7 +631,13 @@ static int __init ssi_init(void) { if (ret) return ret; - return platform_driver_register(&ssi_port_pdriver); + ret = platform_driver_register(&ssi_port_pdriver); + if (ret) { + platform_driver_unregister(&ssi_pdriver); + return ret; + } + + return 0; } module_init(ssi_init); diff --git a/drivers/hwtracing/coresight/coresight-cti-core.c b/drivers/hwtracing/coresight/coresight-cti-core.c index c6e8c6542f24bfa6741f4b4ef41c550d82d20be4..d2cf4f4848e1bfbdc4881389d7adf14e1d36b094 100644 --- a/drivers/hwtracing/coresight/coresight-cti-core.c +++ b/drivers/hwtracing/coresight/coresight-cti-core.c @@ -564,7 +564,7 @@ static void cti_add_assoc_to_csdev(struct coresight_device *csdev) * if we found a matching csdev then update the ECT * association pointer for the device with this CTI. */ - coresight_set_assoc_ectdev_mutex(csdev->ect_dev, + coresight_set_assoc_ectdev_mutex(csdev, ect_item->csdev); break; } diff --git a/drivers/hwtracing/coresight/coresight-etm4x-core.c b/drivers/hwtracing/coresight/coresight-etm4x-core.c index 80fefaba58eebea9f8252d1884ecee5a84e25798..1cc052979e0163e9adc442013c64c3eedb30d466 100644 --- a/drivers/hwtracing/coresight/coresight-etm4x-core.c +++ b/drivers/hwtracing/coresight/coresight-etm4x-core.c @@ -66,10 +66,13 @@ static enum cpuhp_state hp_online; struct etm4_init_arg { unsigned int pid; - struct etmv4_drvdata *drvdata; + struct device *dev; struct csdev_access *csa; }; +static DEFINE_PER_CPU(struct etm4_init_arg *, delayed_probe); +static int etm4_probe_cpu(unsigned int cpu); + /* * Check if TRCSSPCICRn(i) is implemented for a given instance. * @@ -1085,7 +1088,7 @@ static void etm4_init_arch_data(void *info) struct csdev_access *csa; int i; - drvdata = init_arg->drvdata; + drvdata = dev_get_drvdata(init_arg->dev); csa = init_arg->csa; /* @@ -1478,7 +1481,7 @@ static int etm4_set_event_filters(struct etmv4_drvdata *drvdata, /* * If filters::ssstatus == 1, trace acquisition was * started but the process was yanked away before the - * the stop address was hit. As such the start/stop + * stop address was hit. As such the start/stop * logic needs to be re-started so that tracing can * resume where it left. * @@ -1528,7 +1531,7 @@ void etm4_config_trace_mode(struct etmv4_config *config) static int etm4_online_cpu(unsigned int cpu) { if (!etmdrvdata[cpu]) - return 0; + return etm4_probe_cpu(cpu); if (etmdrvdata[cpu]->boot_enable && !etmdrvdata[cpu]->sticky_enable) coresight_enable(etmdrvdata[cpu]->csdev); @@ -1904,48 +1907,20 @@ static void etm4_pm_clear(void) } } -static int etm4_probe(struct device *dev, void __iomem *base, u32 etm_pid) +static int etm4_add_coresight_dev(struct etm4_init_arg *init_arg) { int ret; struct coresight_platform_data *pdata = NULL; - struct etmv4_drvdata *drvdata; + struct device *dev = init_arg->dev; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev); struct coresight_desc desc = { 0 }; - struct etm4_init_arg init_arg = { 0 }; u8 major, minor; char *type_name; - drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL); if (!drvdata) - return -ENOMEM; - - dev_set_drvdata(dev, drvdata); - - if (pm_save_enable == PARAM_PM_SAVE_FIRMWARE) - pm_save_enable = coresight_loses_context_with_cpu(dev) ? - PARAM_PM_SAVE_SELF_HOSTED : PARAM_PM_SAVE_NEVER; - - if (pm_save_enable != PARAM_PM_SAVE_NEVER) { - drvdata->save_state = devm_kmalloc(dev, - sizeof(struct etmv4_save_state), GFP_KERNEL); - if (!drvdata->save_state) - return -ENOMEM; - } - - drvdata->base = base; - - spin_lock_init(&drvdata->spinlock); - - drvdata->cpu = coresight_get_cpu(dev); - if (drvdata->cpu < 0) - return drvdata->cpu; - - init_arg.drvdata = drvdata; - init_arg.csa = &desc.access; - init_arg.pid = etm_pid; + return -EINVAL; - if (smp_call_function_single(drvdata->cpu, - etm4_init_arch_data, &init_arg, 1)) - dev_err(dev, "ETM arch init failed\n"); + desc.access = *init_arg->csa; if (!drvdata->arch) return -EINVAL; @@ -2016,6 +1991,68 @@ static int etm4_probe(struct device *dev, void __iomem *base, u32 etm_pid) return 0; } +static int etm4_probe(struct device *dev, void __iomem *base, u32 etm_pid) +{ + struct etmv4_drvdata *drvdata; + struct csdev_access access = { 0 }; + struct etm4_init_arg init_arg = { 0 }; + struct etm4_init_arg *delayed; + + drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL); + if (!drvdata) + return -ENOMEM; + + dev_set_drvdata(dev, drvdata); + + if (pm_save_enable == PARAM_PM_SAVE_FIRMWARE) + pm_save_enable = coresight_loses_context_with_cpu(dev) ? + PARAM_PM_SAVE_SELF_HOSTED : PARAM_PM_SAVE_NEVER; + + if (pm_save_enable != PARAM_PM_SAVE_NEVER) { + drvdata->save_state = devm_kmalloc(dev, + sizeof(struct etmv4_save_state), GFP_KERNEL); + if (!drvdata->save_state) + return -ENOMEM; + } + + drvdata->base = base; + + spin_lock_init(&drvdata->spinlock); + + drvdata->cpu = coresight_get_cpu(dev); + if (drvdata->cpu < 0) + return drvdata->cpu; + + init_arg.dev = dev; + init_arg.csa = &access; + init_arg.pid = etm_pid; + + /* + * Serialize against CPUHP callbacks to avoid race condition + * between the smp call and saving the delayed probe. + */ + cpus_read_lock(); + if (smp_call_function_single(drvdata->cpu, + etm4_init_arch_data, &init_arg, 1)) { + /* The CPU was offline, try again once it comes online. */ + delayed = devm_kmalloc(dev, sizeof(*delayed), GFP_KERNEL); + if (!delayed) { + cpus_read_unlock(); + return -ENOMEM; + } + + *delayed = init_arg; + + per_cpu(delayed_probe, drvdata->cpu) = delayed; + + cpus_read_unlock(); + return 0; + } + cpus_read_unlock(); + + return etm4_add_coresight_dev(&init_arg); +} + static int etm4_probe_amba(struct amba_device *adev, const struct amba_id *id) { void __iomem *base; @@ -2054,6 +2091,35 @@ static int etm4_probe_platform_dev(struct platform_device *pdev) return ret; } +static int etm4_probe_cpu(unsigned int cpu) +{ + int ret; + struct etm4_init_arg init_arg; + struct csdev_access access = { 0 }; + struct etm4_init_arg *iap = *this_cpu_ptr(&delayed_probe); + + if (!iap) + return 0; + + init_arg = *iap; + devm_kfree(init_arg.dev, iap); + *this_cpu_ptr(&delayed_probe) = NULL; + + ret = pm_runtime_resume_and_get(init_arg.dev); + if (ret < 0) { + dev_err(init_arg.dev, "Failed to get PM runtime!\n"); + return 0; + } + + init_arg.csa = &access; + etm4_init_arch_data(&init_arg); + + etm4_add_coresight_dev(&init_arg); + + pm_runtime_put(init_arg.dev); + return 0; +} + static struct amba_cs_uci_id uci_id_etm4[] = { { /* ETMv4 UCI data */ @@ -2068,16 +2134,20 @@ static void clear_etmdrvdata(void *info) int cpu = *(int *)info; etmdrvdata[cpu] = NULL; + per_cpu(delayed_probe, cpu) = NULL; } static int __exit etm4_remove_dev(struct etmv4_drvdata *drvdata) { - etm_perf_symlink(drvdata->csdev, false); + bool had_delayed_probe; /* * Taking hotplug lock here to avoid racing between etm4_remove_dev() * and CPU hotplug call backs. */ cpus_read_lock(); + + had_delayed_probe = per_cpu(delayed_probe, drvdata->cpu); + /* * The readers for etmdrvdata[] are CPU hotplug call backs * and PM notification call backs. Change etmdrvdata[i] on @@ -2085,12 +2155,15 @@ static int __exit etm4_remove_dev(struct etmv4_drvdata *drvdata) * inside one call back function. */ if (smp_call_function_single(drvdata->cpu, clear_etmdrvdata, &drvdata->cpu, 1)) - etmdrvdata[drvdata->cpu] = NULL; + clear_etmdrvdata(&drvdata->cpu); cpus_read_unlock(); - cscfg_unregister_csdev(drvdata->csdev); - coresight_unregister(drvdata->csdev); + if (!had_delayed_probe) { + etm_perf_symlink(drvdata->csdev, false); + cscfg_unregister_csdev(drvdata->csdev); + coresight_unregister(drvdata->csdev); + } return 0; } diff --git a/drivers/hwtracing/coresight/coresight-trbe.c b/drivers/hwtracing/coresight/coresight-trbe.c index 2b386bb848f8d7cf3b1cf32c07977a440c1b814c..1fc4fd79a1c69c7a094f47acebb8f91ba9adffab 100644 --- a/drivers/hwtracing/coresight/coresight-trbe.c +++ b/drivers/hwtracing/coresight/coresight-trbe.c @@ -1434,6 +1434,7 @@ static int arm_trbe_probe_cpuhp(struct trbe_drvdata *drvdata) static void arm_trbe_remove_cpuhp(struct trbe_drvdata *drvdata) { + cpuhp_state_remove_instance(drvdata->trbe_online, &drvdata->hotplug_node); cpuhp_remove_multi_state(drvdata->trbe_online); } diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig index e50f9603d189e8c0edbf1324398eb3cf2f636cb3..a7bfddf08fa7bab139ef1e87a0d74f2965a221fb 100644 --- a/drivers/i2c/busses/Kconfig +++ b/drivers/i2c/busses/Kconfig @@ -673,7 +673,7 @@ config I2C_HIGHLANDER config I2C_HISI tristate "HiSilicon I2C controller" - depends on (ARM64 && ACPI) || COMPILE_TEST + depends on ARM64 || COMPILE_TEST help Say Y here if you want to have Hisilicon I2C controller support available on the Kunpeng Server. diff --git a/drivers/i2c/busses/i2c-amd-mp2-pci.c b/drivers/i2c/busses/i2c-amd-mp2-pci.c index f57077a7448d1f7391104f4ea6ccfcd88561e417..14316530094968ab5a25ba03eecd0f132c2fc3ca 100644 --- a/drivers/i2c/busses/i2c-amd-mp2-pci.c +++ b/drivers/i2c/busses/i2c-amd-mp2-pci.c @@ -288,7 +288,7 @@ static void amd_mp2_clear_reg(struct amd_mp2_dev *privdata) static int amd_mp2_pci_init(struct amd_mp2_dev *privdata, struct pci_dev *pci_dev) { - int rc; + int irq_flag = 0, rc; pci_set_drvdata(pci_dev, privdata); @@ -311,17 +311,29 @@ static int amd_mp2_pci_init(struct amd_mp2_dev *privdata, if (rc) goto err_dma_mask; - /* Set up intx irq */ + /* request and enable interrupt */ writel(0, privdata->mmio + AMD_P2C_MSG_INTEN); - pci_intx(pci_dev, 1); - rc = devm_request_irq(&pci_dev->dev, pci_dev->irq, amd_mp2_irq_isr, - IRQF_SHARED, dev_name(&pci_dev->dev), privdata); - if (rc) - pci_err(pci_dev, "Failure requesting irq %i: %d\n", - pci_dev->irq, rc); + rc = pci_alloc_irq_vectors(pci_dev, 1, 1, PCI_IRQ_ALL_TYPES); + if (rc < 0) { + dev_err(&pci_dev->dev, "Failed to allocate single IRQ err=%d\n", rc); + goto err_dma_mask; + } + + privdata->dev_irq = pci_irq_vector(pci_dev, 0); + if (!pci_dev->msix_enabled && !pci_dev->msi_enabled) + irq_flag = IRQF_SHARED; + + rc = devm_request_irq(&pci_dev->dev, privdata->dev_irq, + amd_mp2_irq_isr, irq_flag, dev_name(&pci_dev->dev), privdata); + if (rc) { + pci_err(pci_dev, "Failure requesting irq %i: %d\n", privdata->dev_irq, rc); + goto free_irq_vectors; + } return rc; +free_irq_vectors: + free_irq(privdata->dev_irq, privdata); err_dma_mask: pci_clear_master(pci_dev); err_pci_enable: @@ -364,7 +376,7 @@ static void amd_mp2_pci_remove(struct pci_dev *pci_dev) pm_runtime_forbid(&pci_dev->dev); pm_runtime_get_noresume(&pci_dev->dev); - pci_intx(pci_dev, 0); + free_irq(privdata->dev_irq, privdata); pci_clear_master(pci_dev); amd_mp2_clear_reg(privdata); diff --git a/drivers/i2c/busses/i2c-amd-mp2.h b/drivers/i2c/busses/i2c-amd-mp2.h index ddecd0c8865608456b84a1a5fd8c3c57393075c3..018a42de8b1e3c53ea17ce0dcc38be98bb3b6b92 100644 --- a/drivers/i2c/busses/i2c-amd-mp2.h +++ b/drivers/i2c/busses/i2c-amd-mp2.h @@ -183,6 +183,7 @@ struct amd_mp2_dev { struct mutex c2p_lock; u8 c2p_lock_busid; unsigned int probed; + int dev_irq; }; /* PCIe communication driver */ diff --git a/drivers/i2c/busses/i2c-designware-common.c b/drivers/i2c/busses/i2c-designware-common.c index c023b691441ea7ca6570e87c3b6095089f591071..a3240ece55b2b189433c7a0e09653a4ea5caece5 100644 --- a/drivers/i2c/busses/i2c-designware-common.c +++ b/drivers/i2c/busses/i2c-designware-common.c @@ -625,10 +625,5 @@ void i2c_dw_disable(struct dw_i2c_dev *dev) i2c_dw_release_lock(dev); } -void i2c_dw_disable_int(struct dw_i2c_dev *dev) -{ - regmap_write(dev->map, DW_IC_INTR_MASK, 0); -} - MODULE_DESCRIPTION("Synopsys DesignWare I2C bus adapter core"); MODULE_LICENSE("GPL"); diff --git a/drivers/i2c/busses/i2c-designware-core.h b/drivers/i2c/busses/i2c-designware-core.h index 4d3a3b464ecd8160b659f053e22f4c6bff80e4d9..95ebc5eaa5d12cf6008e5587996950a67bd20a5e 100644 --- a/drivers/i2c/busses/i2c-designware-core.h +++ b/drivers/i2c/busses/i2c-designware-core.h @@ -18,12 +18,12 @@ #include #include -#define DW_IC_DEFAULT_FUNCTIONALITY (I2C_FUNC_I2C | \ - I2C_FUNC_SMBUS_BYTE | \ - I2C_FUNC_SMBUS_BYTE_DATA | \ - I2C_FUNC_SMBUS_WORD_DATA | \ - I2C_FUNC_SMBUS_BLOCK_DATA | \ - I2C_FUNC_SMBUS_I2C_BLOCK) +#define DW_IC_DEFAULT_FUNCTIONALITY (I2C_FUNC_I2C | \ + I2C_FUNC_SMBUS_BYTE | \ + I2C_FUNC_SMBUS_BYTE_DATA | \ + I2C_FUNC_SMBUS_WORD_DATA | \ + I2C_FUNC_SMBUS_BLOCK_DATA | \ + I2C_FUNC_SMBUS_I2C_BLOCK) #define DW_IC_CON_MASTER BIT(0) #define DW_IC_CON_SPEED_STD (1 << 1) @@ -43,98 +43,98 @@ /* * Registers offset */ -#define DW_IC_CON 0x00 -#define DW_IC_TAR 0x04 -#define DW_IC_SAR 0x08 -#define DW_IC_DATA_CMD 0x10 -#define DW_IC_SS_SCL_HCNT 0x14 -#define DW_IC_SS_SCL_LCNT 0x18 -#define DW_IC_FS_SCL_HCNT 0x1c -#define DW_IC_FS_SCL_LCNT 0x20 -#define DW_IC_HS_SCL_HCNT 0x24 -#define DW_IC_HS_SCL_LCNT 0x28 -#define DW_IC_INTR_STAT 0x2c -#define DW_IC_INTR_MASK 0x30 -#define DW_IC_RAW_INTR_STAT 0x34 -#define DW_IC_RX_TL 0x38 -#define DW_IC_TX_TL 0x3c -#define DW_IC_CLR_INTR 0x40 -#define DW_IC_CLR_RX_UNDER 0x44 -#define DW_IC_CLR_RX_OVER 0x48 -#define DW_IC_CLR_TX_OVER 0x4c -#define DW_IC_CLR_RD_REQ 0x50 -#define DW_IC_CLR_TX_ABRT 0x54 -#define DW_IC_CLR_RX_DONE 0x58 -#define DW_IC_CLR_ACTIVITY 0x5c -#define DW_IC_CLR_STOP_DET 0x60 -#define DW_IC_CLR_START_DET 0x64 -#define DW_IC_CLR_GEN_CALL 0x68 -#define DW_IC_ENABLE 0x6c -#define DW_IC_STATUS 0x70 -#define DW_IC_TXFLR 0x74 -#define DW_IC_RXFLR 0x78 -#define DW_IC_SDA_HOLD 0x7c -#define DW_IC_TX_ABRT_SOURCE 0x80 -#define DW_IC_ENABLE_STATUS 0x9c -#define DW_IC_CLR_RESTART_DET 0xa8 -#define DW_IC_COMP_PARAM_1 0xf4 -#define DW_IC_COMP_VERSION 0xf8 -#define DW_IC_SDA_HOLD_MIN_VERS 0x3131312A -#define DW_IC_COMP_TYPE 0xfc -#define DW_IC_COMP_TYPE_VALUE 0x44570140 - -#define DW_IC_INTR_RX_UNDER BIT(0) -#define DW_IC_INTR_RX_OVER BIT(1) -#define DW_IC_INTR_RX_FULL BIT(2) -#define DW_IC_INTR_TX_OVER BIT(3) -#define DW_IC_INTR_TX_EMPTY BIT(4) -#define DW_IC_INTR_RD_REQ BIT(5) -#define DW_IC_INTR_TX_ABRT BIT(6) -#define DW_IC_INTR_RX_DONE BIT(7) -#define DW_IC_INTR_ACTIVITY BIT(8) -#define DW_IC_INTR_STOP_DET BIT(9) -#define DW_IC_INTR_START_DET BIT(10) -#define DW_IC_INTR_GEN_CALL BIT(11) -#define DW_IC_INTR_RESTART_DET BIT(12) - -#define DW_IC_INTR_DEFAULT_MASK (DW_IC_INTR_RX_FULL | \ - DW_IC_INTR_TX_ABRT | \ - DW_IC_INTR_STOP_DET) -#define DW_IC_INTR_MASTER_MASK (DW_IC_INTR_DEFAULT_MASK | \ - DW_IC_INTR_TX_EMPTY) -#define DW_IC_INTR_SLAVE_MASK (DW_IC_INTR_DEFAULT_MASK | \ - DW_IC_INTR_RX_DONE | \ - DW_IC_INTR_RX_UNDER | \ - DW_IC_INTR_RD_REQ) - -#define DW_IC_STATUS_ACTIVITY BIT(0) -#define DW_IC_STATUS_TFE BIT(2) -#define DW_IC_STATUS_MASTER_ACTIVITY BIT(5) -#define DW_IC_STATUS_SLAVE_ACTIVITY BIT(6) - -#define DW_IC_SDA_HOLD_RX_SHIFT 16 -#define DW_IC_SDA_HOLD_RX_MASK GENMASK(23, 16) - -#define DW_IC_ERR_TX_ABRT 0x1 - -#define DW_IC_TAR_10BITADDR_MASTER BIT(12) +#define DW_IC_CON 0x00 +#define DW_IC_TAR 0x04 +#define DW_IC_SAR 0x08 +#define DW_IC_DATA_CMD 0x10 +#define DW_IC_SS_SCL_HCNT 0x14 +#define DW_IC_SS_SCL_LCNT 0x18 +#define DW_IC_FS_SCL_HCNT 0x1c +#define DW_IC_FS_SCL_LCNT 0x20 +#define DW_IC_HS_SCL_HCNT 0x24 +#define DW_IC_HS_SCL_LCNT 0x28 +#define DW_IC_INTR_STAT 0x2c +#define DW_IC_INTR_MASK 0x30 +#define DW_IC_RAW_INTR_STAT 0x34 +#define DW_IC_RX_TL 0x38 +#define DW_IC_TX_TL 0x3c +#define DW_IC_CLR_INTR 0x40 +#define DW_IC_CLR_RX_UNDER 0x44 +#define DW_IC_CLR_RX_OVER 0x48 +#define DW_IC_CLR_TX_OVER 0x4c +#define DW_IC_CLR_RD_REQ 0x50 +#define DW_IC_CLR_TX_ABRT 0x54 +#define DW_IC_CLR_RX_DONE 0x58 +#define DW_IC_CLR_ACTIVITY 0x5c +#define DW_IC_CLR_STOP_DET 0x60 +#define DW_IC_CLR_START_DET 0x64 +#define DW_IC_CLR_GEN_CALL 0x68 +#define DW_IC_ENABLE 0x6c +#define DW_IC_STATUS 0x70 +#define DW_IC_TXFLR 0x74 +#define DW_IC_RXFLR 0x78 +#define DW_IC_SDA_HOLD 0x7c +#define DW_IC_TX_ABRT_SOURCE 0x80 +#define DW_IC_ENABLE_STATUS 0x9c +#define DW_IC_CLR_RESTART_DET 0xa8 +#define DW_IC_COMP_PARAM_1 0xf4 +#define DW_IC_COMP_VERSION 0xf8 +#define DW_IC_SDA_HOLD_MIN_VERS 0x3131312A /* "111*" == v1.11* */ +#define DW_IC_COMP_TYPE 0xfc +#define DW_IC_COMP_TYPE_VALUE 0x44570140 /* "DW" + 0x0140 */ + +#define DW_IC_INTR_RX_UNDER BIT(0) +#define DW_IC_INTR_RX_OVER BIT(1) +#define DW_IC_INTR_RX_FULL BIT(2) +#define DW_IC_INTR_TX_OVER BIT(3) +#define DW_IC_INTR_TX_EMPTY BIT(4) +#define DW_IC_INTR_RD_REQ BIT(5) +#define DW_IC_INTR_TX_ABRT BIT(6) +#define DW_IC_INTR_RX_DONE BIT(7) +#define DW_IC_INTR_ACTIVITY BIT(8) +#define DW_IC_INTR_STOP_DET BIT(9) +#define DW_IC_INTR_START_DET BIT(10) +#define DW_IC_INTR_GEN_CALL BIT(11) +#define DW_IC_INTR_RESTART_DET BIT(12) + +#define DW_IC_INTR_DEFAULT_MASK (DW_IC_INTR_RX_FULL | \ + DW_IC_INTR_TX_ABRT | \ + DW_IC_INTR_STOP_DET) +#define DW_IC_INTR_MASTER_MASK (DW_IC_INTR_DEFAULT_MASK | \ + DW_IC_INTR_TX_EMPTY) +#define DW_IC_INTR_SLAVE_MASK (DW_IC_INTR_DEFAULT_MASK | \ + DW_IC_INTR_RX_UNDER | \ + DW_IC_INTR_RD_REQ) + +#define DW_IC_STATUS_ACTIVITY BIT(0) +#define DW_IC_STATUS_TFE BIT(2) +#define DW_IC_STATUS_RFNE BIT(3) +#define DW_IC_STATUS_MASTER_ACTIVITY BIT(5) +#define DW_IC_STATUS_SLAVE_ACTIVITY BIT(6) + +#define DW_IC_SDA_HOLD_RX_SHIFT 16 +#define DW_IC_SDA_HOLD_RX_MASK GENMASK(23, 16) + +#define DW_IC_ERR_TX_ABRT 0x1 + +#define DW_IC_TAR_10BITADDR_MASTER BIT(12) #define DW_IC_COMP_PARAM_1_SPEED_MODE_HIGH (BIT(2) | BIT(3)) #define DW_IC_COMP_PARAM_1_SPEED_MODE_MASK GENMASK(3, 2) /* - * status codes + * Sofware status flags */ -#define STATUS_IDLE 0x0 -#define STATUS_ACTIVE 0x1 -#define STATUS_WRITE_IN_PROGRESS 0x2 -#define STATUS_READ_IN_PROGRESS 0x4 +#define STATUS_ACTIVE BIT(0) +#define STATUS_WRITE_IN_PROGRESS BIT(1) +#define STATUS_READ_IN_PROGRESS BIT(2) +#define STATUS_MASK GENMASK(2, 0) /* * operation modes */ -#define DW_IC_MASTER 0 -#define DW_IC_SLAVE 1 +#define DW_IC_MASTER 0 +#define DW_IC_SLAVE 1 /* * Hardware abort codes from the DW_IC_TX_ABRT_SOURCE register @@ -142,20 +142,20 @@ * Only expected abort codes are listed here * refer to the datasheet for the full list */ -#define ABRT_7B_ADDR_NOACK 0 -#define ABRT_10ADDR1_NOACK 1 -#define ABRT_10ADDR2_NOACK 2 -#define ABRT_TXDATA_NOACK 3 -#define ABRT_GCALL_NOACK 4 -#define ABRT_GCALL_READ 5 -#define ABRT_SBYTE_ACKDET 7 -#define ABRT_SBYTE_NORSTRT 9 -#define ABRT_10B_RD_NORSTRT 10 -#define ABRT_MASTER_DIS 11 -#define ARB_LOST 12 -#define ABRT_SLAVE_FLUSH_TXFIFO 13 -#define ABRT_SLAVE_ARBLOST 14 -#define ABRT_SLAVE_RD_INTX 15 +#define ABRT_7B_ADDR_NOACK 0 +#define ABRT_10ADDR1_NOACK 1 +#define ABRT_10ADDR2_NOACK 2 +#define ABRT_TXDATA_NOACK 3 +#define ABRT_GCALL_NOACK 4 +#define ABRT_GCALL_READ 5 +#define ABRT_SBYTE_ACKDET 7 +#define ABRT_SBYTE_NORSTRT 9 +#define ABRT_10B_RD_NORSTRT 10 +#define ABRT_MASTER_DIS 11 +#define ARB_LOST 12 +#define ABRT_SLAVE_FLUSH_TXFIFO 13 +#define ABRT_SLAVE_ARBLOST 14 +#define ABRT_SLAVE_RD_INTX 15 #define DW_IC_TX_ABRT_7B_ADDR_NOACK BIT(ABRT_7B_ADDR_NOACK) #define DW_IC_TX_ABRT_10ADDR1_NOACK BIT(ABRT_10ADDR1_NOACK) @@ -172,11 +172,11 @@ #define DW_IC_RX_ABRT_SLAVE_ARBLOST BIT(ABRT_SLAVE_ARBLOST) #define DW_IC_RX_ABRT_SLAVE_FLUSH_TXFIFO BIT(ABRT_SLAVE_FLUSH_TXFIFO) -#define DW_IC_TX_ABRT_NOACK (DW_IC_TX_ABRT_7B_ADDR_NOACK | \ - DW_IC_TX_ABRT_10ADDR1_NOACK | \ - DW_IC_TX_ABRT_10ADDR2_NOACK | \ - DW_IC_TX_ABRT_TXDATA_NOACK | \ - DW_IC_TX_ABRT_GCALL_NOACK) +#define DW_IC_TX_ABRT_NOACK (DW_IC_TX_ABRT_7B_ADDR_NOACK | \ + DW_IC_TX_ABRT_10ADDR1_NOACK | \ + DW_IC_TX_ABRT_10ADDR2_NOACK | \ + DW_IC_TX_ABRT_TXDATA_NOACK | \ + DW_IC_TX_ABRT_GCALL_NOACK) struct clk; struct device; @@ -232,7 +232,6 @@ struct reset_control; * -1 if there is no semaphore. * @shared_with_punit: true if this bus is shared with the SoCs PUNIT * @disable: function to disable the controller - * @disable_int: function to disable all interrupts * @init: function to initialize the I2C hardware * @set_sda_hold_time: callback to retrieve IP specific SDA hold timing * @mode: operation mode - DW_IC_MASTER or DW_IC_SLAVE @@ -290,28 +289,27 @@ struct dw_i2c_dev { int semaphore_idx; bool shared_with_punit; void (*disable)(struct dw_i2c_dev *dev); - void (*disable_int)(struct dw_i2c_dev *dev); int (*init)(struct dw_i2c_dev *dev); int (*set_sda_hold_time)(struct dw_i2c_dev *dev); int mode; struct i2c_bus_recovery_info rinfo; }; -#define ACCESS_INTR_MASK BIT(0) -#define ACCESS_NO_IRQ_SUSPEND BIT(1) -#define ARBITRATION_SEMAPHORE BIT(2) +#define ACCESS_INTR_MASK BIT(0) +#define ACCESS_NO_IRQ_SUSPEND BIT(1) +#define ARBITRATION_SEMAPHORE BIT(2) -#define MODEL_MSCC_OCELOT BIT(8) -#define MODEL_BAIKAL_BT1 BIT(9) -#define MODEL_AMD_NAVI_GPU BIT(10) -#define MODEL_MASK GENMASK(11, 8) +#define MODEL_MSCC_OCELOT BIT(8) +#define MODEL_BAIKAL_BT1 BIT(9) +#define MODEL_AMD_NAVI_GPU BIT(10) +#define MODEL_MASK GENMASK(11, 8) /* * Enable UCSI interrupt by writing 0xd at register * offset 0x474 specified in hardware specification. */ -#define AMD_UCSI_INTR_REG 0x474 -#define AMD_UCSI_INTR_EN 0xd +#define AMD_UCSI_INTR_REG 0x474 +#define AMD_UCSI_INTR_EN 0xd struct i2c_dw_semaphore_callbacks { int (*probe)(struct dw_i2c_dev *dev); @@ -331,7 +329,6 @@ int i2c_dw_handle_tx_abort(struct dw_i2c_dev *dev); int i2c_dw_set_fifo_size(struct dw_i2c_dev *dev); u32 i2c_dw_func(struct i2c_adapter *adap); void i2c_dw_disable(struct dw_i2c_dev *dev); -void i2c_dw_disable_int(struct dw_i2c_dev *dev); static inline void __i2c_dw_enable(struct dw_i2c_dev *dev) { diff --git a/drivers/i2c/busses/i2c-designware-master.c b/drivers/i2c/busses/i2c-designware-master.c index dc3c5a15a95b9574e168932a9fa398779acdc1e0..45f569155bfe1477f681808e0e741646c5d94690 100644 --- a/drivers/i2c/busses/i2c-designware-master.c +++ b/drivers/i2c/busses/i2c-designware-master.c @@ -239,7 +239,7 @@ static void i2c_dw_xfer_init(struct dw_i2c_dev *dev) msgs[dev->msg_write_idx].addr | ic_tar); /* Enforce disabled interrupts (due to HW issues) */ - i2c_dw_disable_int(dev); + regmap_write(dev->map, DW_IC_INTR_MASK, 0); /* Enable the adapter */ __i2c_dw_enable(dev); @@ -299,7 +299,7 @@ static int amd_i2c_dw_xfer_quirk(struct i2c_adapter *adap, struct i2c_msg *msgs, dev->msgs = msgs; dev->msgs_num = num_msgs; i2c_dw_xfer_init(dev); - i2c_dw_disable_int(dev); + regmap_write(dev->map, DW_IC_INTR_MASK, 0); /* Initiate messages read/write transaction */ for (msg_wrt_idx = 0; msg_wrt_idx < num_msgs; msg_wrt_idx++) { @@ -574,7 +574,7 @@ i2c_dw_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num) dev->msg_write_idx = 0; dev->msg_read_idx = 0; dev->msg_err = 0; - dev->status = STATUS_IDLE; + dev->status = 0; dev->abort_source = 0; dev->rx_outstanding = 0; @@ -711,9 +711,18 @@ static u32 i2c_dw_read_clear_intrbits(struct dw_i2c_dev *dev) * Interrupt service routine. This gets called whenever an I2C master interrupt * occurs. */ -static int i2c_dw_irq_handler_master(struct dw_i2c_dev *dev) +static irqreturn_t i2c_dw_isr(int this_irq, void *dev_id) { - u32 stat; + struct dw_i2c_dev *dev = dev_id; + u32 stat, enabled; + + regmap_read(dev->map, DW_IC_ENABLE, &enabled); + regmap_read(dev->map, DW_IC_RAW_INTR_STAT, &stat); + if (!enabled || !(stat & ~DW_IC_INTR_ACTIVITY)) + return IRQ_NONE; + if (pm_runtime_suspended(dev->dev) || stat == GENMASK(31, 0)) + return IRQ_NONE; + dev_dbg(dev->dev, "enabled=%#x stat=%#x\n", enabled, stat); stat = i2c_dw_read_clear_intrbits(dev); @@ -726,12 +735,12 @@ static int i2c_dw_irq_handler_master(struct dw_i2c_dev *dev) * the HW active). */ regmap_write(dev->map, DW_IC_INTR_MASK, 0); - return 0; + return IRQ_HANDLED; } if (stat & DW_IC_INTR_TX_ABRT) { dev->cmd_err |= DW_IC_ERR_TX_ABRT; - dev->status = STATUS_IDLE; + dev->status &= ~STATUS_MASK; dev->rx_outstanding = 0; /* @@ -761,26 +770,10 @@ tx_aborted: else if (unlikely(dev->flags & ACCESS_INTR_MASK)) { /* Workaround to trigger pending interrupt */ regmap_read(dev->map, DW_IC_INTR_MASK, &stat); - i2c_dw_disable_int(dev); + regmap_write(dev->map, DW_IC_INTR_MASK, 0); regmap_write(dev->map, DW_IC_INTR_MASK, stat); } - return 0; -} - -static irqreturn_t i2c_dw_isr(int this_irq, void *dev_id) -{ - struct dw_i2c_dev *dev = dev_id; - u32 stat, enabled; - - regmap_read(dev->map, DW_IC_ENABLE, &enabled); - regmap_read(dev->map, DW_IC_RAW_INTR_STAT, &stat); - dev_dbg(dev->dev, "enabled=%#x stat=%#x\n", enabled, stat); - if (!enabled || !(stat & ~DW_IC_INTR_ACTIVITY)) - return IRQ_NONE; - - i2c_dw_irq_handler_master(dev); - return IRQ_HANDLED; } @@ -878,7 +871,6 @@ int i2c_dw_probe_master(struct dw_i2c_dev *dev) dev->init = i2c_dw_init_master; dev->disable = i2c_dw_disable; - dev->disable_int = i2c_dw_disable_int; ret = i2c_dw_init_regmap(dev); if (ret) @@ -917,7 +909,7 @@ int i2c_dw_probe_master(struct dw_i2c_dev *dev) if (ret) return ret; - i2c_dw_disable_int(dev); + regmap_write(dev->map, DW_IC_INTR_MASK, 0); i2c_dw_release_lock(dev); ret = devm_request_irq(dev->dev, dev->irq, i2c_dw_isr, irq_flags, diff --git a/drivers/i2c/busses/i2c-designware-slave.c b/drivers/i2c/busses/i2c-designware-slave.c index 0d15f4c1e9f7e3ae17a03f7b39465b47f8cf5abe..c6d2e4c2ac23c9021171ec4f2c3ca6036f6e6c45 100644 --- a/drivers/i2c/busses/i2c-designware-slave.c +++ b/drivers/i2c/busses/i2c-designware-slave.c @@ -78,13 +78,7 @@ static int i2c_dw_reg_slave(struct i2c_client *slave) __i2c_dw_enable(dev); - dev->cmd_err = 0; - dev->msg_write_idx = 0; - dev->msg_read_idx = 0; - dev->msg_err = 0; - dev->status = STATUS_IDLE; - dev->abort_source = 0; - dev->rx_outstanding = 0; + dev->status = 0; return 0; } @@ -93,7 +87,7 @@ static int i2c_dw_unreg_slave(struct i2c_client *slave) { struct dw_i2c_dev *dev = i2c_get_adapdata(slave->adapter); - dev->disable_int(dev); + regmap_write(dev->map, DW_IC_INTR_MASK, 0); dev->disable(dev); synchronize_irq(dev->irq); dev->slave = NULL; @@ -153,9 +147,9 @@ static u32 i2c_dw_read_clear_intrbits_slave(struct dw_i2c_dev *dev) * Interrupt service routine. This gets called whenever an I2C slave interrupt * occurs. */ - -static int i2c_dw_irq_handler_slave(struct dw_i2c_dev *dev) +static irqreturn_t i2c_dw_isr_slave(int this_irq, void *dev_id) { + struct dw_i2c_dev *dev = dev_id; u32 raw_stat, stat, enabled, tmp; u8 val = 0, slave_activity; @@ -165,7 +159,7 @@ static int i2c_dw_irq_handler_slave(struct dw_i2c_dev *dev) slave_activity = ((tmp & DW_IC_STATUS_SLAVE_ACTIVITY) >> 6); if (!enabled || !(raw_stat & ~DW_IC_INTR_ACTIVITY) || !dev->slave) - return 0; + return IRQ_NONE; stat = i2c_dw_read_clear_intrbits_slave(dev); dev_dbg(dev->dev, @@ -173,55 +167,45 @@ static int i2c_dw_irq_handler_slave(struct dw_i2c_dev *dev) enabled, slave_activity, raw_stat, stat); if (stat & DW_IC_INTR_RX_FULL) { - if (dev->status != STATUS_WRITE_IN_PROGRESS) { - dev->status = STATUS_WRITE_IN_PROGRESS; + if (!(dev->status & STATUS_WRITE_IN_PROGRESS)) { + dev->status |= STATUS_WRITE_IN_PROGRESS; + dev->status &= ~STATUS_READ_IN_PROGRESS; i2c_slave_event(dev->slave, I2C_SLAVE_WRITE_REQUESTED, &val); } - regmap_read(dev->map, DW_IC_DATA_CMD, &tmp); - val = tmp; - if (!i2c_slave_event(dev->slave, I2C_SLAVE_WRITE_RECEIVED, - &val)) - dev_vdbg(dev->dev, "Byte %X acked!", val); + do { + regmap_read(dev->map, DW_IC_DATA_CMD, &tmp); + val = tmp; + i2c_slave_event(dev->slave, I2C_SLAVE_WRITE_RECEIVED, + &val); + regmap_read(dev->map, DW_IC_STATUS, &tmp); + } while (tmp & DW_IC_STATUS_RFNE); } if (stat & DW_IC_INTR_RD_REQ) { if (slave_activity) { regmap_read(dev->map, DW_IC_CLR_RD_REQ, &tmp); - dev->status = STATUS_READ_IN_PROGRESS; - if (!i2c_slave_event(dev->slave, - I2C_SLAVE_READ_REQUESTED, - &val)) - regmap_write(dev->map, DW_IC_DATA_CMD, val); + if (!(dev->status & STATUS_READ_IN_PROGRESS)) { + i2c_slave_event(dev->slave, + I2C_SLAVE_READ_REQUESTED, + &val); + dev->status |= STATUS_READ_IN_PROGRESS; + dev->status &= ~STATUS_WRITE_IN_PROGRESS; + } else { + i2c_slave_event(dev->slave, + I2C_SLAVE_READ_PROCESSED, + &val); + } + regmap_write(dev->map, DW_IC_DATA_CMD, val); } } - if (stat & DW_IC_INTR_RX_DONE) { - if (!i2c_slave_event(dev->slave, I2C_SLAVE_READ_PROCESSED, - &val)) - regmap_read(dev->map, DW_IC_CLR_RX_DONE, &tmp); - } - - if (stat & DW_IC_INTR_STOP_DET) { - dev->status = STATUS_IDLE; + if (stat & DW_IC_INTR_STOP_DET) i2c_slave_event(dev->slave, I2C_SLAVE_STOP, &val); - } - return 1; -} - -static irqreturn_t i2c_dw_isr_slave(int this_irq, void *dev_id) -{ - struct dw_i2c_dev *dev = dev_id; - int ret; - - ret = i2c_dw_irq_handler_slave(dev); - if (ret > 0) - complete(&dev->cmd_complete); - - return IRQ_RETVAL(ret); + return IRQ_HANDLED; } static const struct i2c_algorithm i2c_dw_algo = { @@ -246,11 +230,8 @@ int i2c_dw_probe_slave(struct dw_i2c_dev *dev) struct i2c_adapter *adap = &dev->adapter; int ret; - init_completion(&dev->cmd_complete); - dev->init = i2c_dw_init_slave; dev->disable = i2c_dw_disable; - dev->disable_int = i2c_dw_disable_int; ret = i2c_dw_init_regmap(dev); if (ret) diff --git a/drivers/i2c/busses/i2c-gpio.c b/drivers/i2c/busses/i2c-gpio.c index b1985c1667e163bcb810c9c0786620bd3ebf4d04..0e4385a9bcf71c902345163298cf6f4734f3d5e6 100644 --- a/drivers/i2c/busses/i2c-gpio.c +++ b/drivers/i2c/busses/i2c-gpio.c @@ -482,19 +482,17 @@ static int i2c_gpio_remove(struct platform_device *pdev) return 0; } -#if defined(CONFIG_OF) static const struct of_device_id i2c_gpio_dt_ids[] = { { .compatible = "i2c-gpio", }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, i2c_gpio_dt_ids); -#endif static struct platform_driver i2c_gpio_driver = { .driver = { .name = "i2c-gpio", - .of_match_table = of_match_ptr(i2c_gpio_dt_ids), + .of_match_table = i2c_gpio_dt_ids, }, .probe = i2c_gpio_probe, .remove = i2c_gpio_remove, diff --git a/drivers/i2c/busses/i2c-hisi.c b/drivers/i2c/busses/i2c-hisi.c index 76c3d8f6fc3c6e4ed24b9bc4619905bfab08b41d..8c6c7075c765c502e194d222934b294ba966de89 100644 --- a/drivers/i2c/busses/i2c-hisi.c +++ b/drivers/i2c/busses/i2c-hisi.c @@ -7,6 +7,7 @@ #include #include +#include #include #include #include @@ -88,6 +89,7 @@ struct hisi_i2c_controller { struct i2c_adapter adapter; void __iomem *iobase; struct device *dev; + struct clk *clk; int irq; /* Intermediates for recording the transfer process */ @@ -454,10 +456,15 @@ static int hisi_i2c_probe(struct platform_device *pdev) return ret; } - ret = device_property_read_u64(dev, "clk_rate", &clk_rate_hz); - if (ret) { - dev_err(dev, "failed to get clock frequency, ret = %d\n", ret); - return ret; + ctlr->clk = devm_clk_get_optional_enabled(&pdev->dev, NULL); + if (IS_ERR_OR_NULL(ctlr->clk)) { + ret = device_property_read_u64(dev, "clk_rate", &clk_rate_hz); + if (ret) { + dev_err(dev, "failed to get clock frequency, ret = %d\n", ret); + return ret; + } + } else { + clk_rate_hz = clk_get_rate(ctlr->clk); } ctlr->clk_rate_khz = DIV_ROUND_UP_ULL(clk_rate_hz, HZ_PER_KHZ); @@ -489,11 +496,18 @@ static const struct acpi_device_id hisi_i2c_acpi_ids[] = { }; MODULE_DEVICE_TABLE(acpi, hisi_i2c_acpi_ids); +static const struct of_device_id hisi_i2c_dts_ids[] = { + { .compatible = "hisilicon,ascend910-i2c", }, + { } +}; +MODULE_DEVICE_TABLE(of, hisi_i2c_dts_ids); + static struct platform_driver hisi_i2c_driver = { .probe = hisi_i2c_probe, .driver = { .name = "hisi-i2c", .acpi_match_table = hisi_i2c_acpi_ids, + .of_match_table = hisi_i2c_dts_ids, }, }; module_platform_driver(hisi_i2c_driver); diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c index fc70920c4ddabab247eae7de1434d48e3d83d593..cf5bacf3a4884430047d71c181896d63a17c4f4f 100644 --- a/drivers/i2c/busses/i2c-imx.c +++ b/drivers/i2c/busses/i2c-imx.c @@ -1451,8 +1451,7 @@ static int i2c_imx_probe(struct platform_device *pdev) if (irq < 0) return irq; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - base = devm_ioremap_resource(&pdev->dev, res); + base = devm_platform_get_and_ioremap_resource(pdev, 0, &res); if (IS_ERR(base)) return PTR_ERR(base); diff --git a/drivers/i2c/busses/i2c-ismt.c b/drivers/i2c/busses/i2c-ismt.c index fe2349590f75e2c0bb87f70420ba08558644519b..c74985d77b0ecf34726099b3eded94a2f326d072 100644 --- a/drivers/i2c/busses/i2c-ismt.c +++ b/drivers/i2c/busses/i2c-ismt.c @@ -509,6 +509,9 @@ static int ismt_access(struct i2c_adapter *adap, u16 addr, if (read_write == I2C_SMBUS_WRITE) { /* Block Write */ dev_dbg(dev, "I2C_SMBUS_BLOCK_DATA: WRITE\n"); + if (data->block[0] < 1 || data->block[0] > I2C_SMBUS_BLOCK_MAX) + return -EINVAL; + dma_size = data->block[0] + 1; dma_direction = DMA_TO_DEVICE; desc->wr_len_cmd = dma_size; diff --git a/drivers/i2c/busses/i2c-mt65xx.c b/drivers/i2c/busses/i2c-mt65xx.c index fc7bfd98156ba730ff781aea280f87e4e07962e7..d80e59340d97a1e6f7790880b4f937e05cf5650e 100644 --- a/drivers/i2c/busses/i2c-mt65xx.c +++ b/drivers/i2c/busses/i2c-mt65xx.c @@ -431,6 +431,19 @@ static const struct mtk_i2c_compatible mt8168_compat = { .max_dma_support = 33, }; +static const struct mtk_i2c_compatible mt7986_compat = { + .quirks = &mt7622_i2c_quirks, + .regs = mt_i2c_regs_v1, + .pmic_i2c = 0, + .dcm = 1, + .auto_restart = 1, + .aux_len_reg = 1, + .timing_adjust = 0, + .dma_sync = 1, + .ltiming_adjust = 0, + .max_dma_support = 32, +}; + static const struct mtk_i2c_compatible mt8173_compat = { .regs = mt_i2c_regs_v1, .pmic_i2c = 0, @@ -503,6 +516,7 @@ static const struct of_device_id mtk_i2c_of_match[] = { { .compatible = "mediatek,mt6577-i2c", .data = &mt6577_compat }, { .compatible = "mediatek,mt6589-i2c", .data = &mt6589_compat }, { .compatible = "mediatek,mt7622-i2c", .data = &mt7622_compat }, + { .compatible = "mediatek,mt7986-i2c", .data = &mt7986_compat }, { .compatible = "mediatek,mt8168-i2c", .data = &mt8168_compat }, { .compatible = "mediatek,mt8173-i2c", .data = &mt8173_compat }, { .compatible = "mediatek,mt8183-i2c", .data = &mt8183_compat }, diff --git a/drivers/i2c/busses/i2c-npcm7xx.c b/drivers/i2c/busses/i2c-npcm7xx.c index 83457359ec450ed80e243b8f2feb7b1f931d93a1..38d5864d0cb5bd8d9b4baeef7f3c87693b44e2f1 100644 --- a/drivers/i2c/busses/i2c-npcm7xx.c +++ b/drivers/i2c/busses/i2c-npcm7xx.c @@ -106,7 +106,7 @@ enum i2c_addr { #define NPCM_I2CCST3 0x19 #define I2C_VER 0x1F -/*BANK0 regs*/ +/* BANK 0 regs */ #define NPCM_I2CADDR3 0x10 #define NPCM_I2CADDR7 0x11 #define NPCM_I2CADDR4 0x12 @@ -115,6 +115,20 @@ enum i2c_addr { #define NPCM_I2CADDR9 0x15 #define NPCM_I2CADDR6 0x16 #define NPCM_I2CADDR10 0x17 +#define NPCM_I2CCTL4 0x1A +#define NPCM_I2CCTL5 0x1B +#define NPCM_I2CSCLLT 0x1C /* SCL Low Time */ +#define NPCM_I2CFIF_CTL 0x1D /* FIFO Control */ +#define NPCM_I2CSCLHT 0x1E /* SCL High Time */ + +/* BANK 1 regs */ +#define NPCM_I2CFIF_CTS 0x10 /* Both FIFOs Control and Status */ +#define NPCM_I2CTXF_CTL 0x12 /* Tx-FIFO Control */ +#define NPCM_I2CT_OUT 0x14 /* Bus T.O. */ +#define NPCM_I2CPEC 0x16 /* PEC Data */ +#define NPCM_I2CTXF_STS 0x1A /* Tx-FIFO Status */ +#define NPCM_I2CRXF_STS 0x1C /* Rx-FIFO Status */ +#define NPCM_I2CRXF_CTL 0x1E /* Rx-FIFO Control */ #if IS_ENABLED(CONFIG_I2C_SLAVE) /* @@ -131,66 +145,51 @@ static const int npcm_i2caddr[I2C_NUM_OWN_ADDR] = { }; #endif -#define NPCM_I2CCTL4 0x1A -#define NPCM_I2CCTL5 0x1B -#define NPCM_I2CSCLLT 0x1C /* SCL Low Time */ -#define NPCM_I2CFIF_CTL 0x1D /* FIFO Control */ -#define NPCM_I2CSCLHT 0x1E /* SCL High Time */ - -/* BANK 1 regs */ -#define NPCM_I2CFIF_CTS 0x10 /* Both FIFOs Control and Status */ -#define NPCM_I2CTXF_CTL 0x12 /* Tx-FIFO Control */ -#define NPCM_I2CT_OUT 0x14 /* Bus T.O. */ -#define NPCM_I2CPEC 0x16 /* PEC Data */ -#define NPCM_I2CTXF_STS 0x1A /* Tx-FIFO Status */ -#define NPCM_I2CRXF_STS 0x1C /* Rx-FIFO Status */ -#define NPCM_I2CRXF_CTL 0x1E /* Rx-FIFO Control */ - /* NPCM_I2CST reg fields */ -#define NPCM_I2CST_XMIT BIT(0) -#define NPCM_I2CST_MASTER BIT(1) -#define NPCM_I2CST_NMATCH BIT(2) -#define NPCM_I2CST_STASTR BIT(3) -#define NPCM_I2CST_NEGACK BIT(4) -#define NPCM_I2CST_BER BIT(5) -#define NPCM_I2CST_SDAST BIT(6) -#define NPCM_I2CST_SLVSTP BIT(7) +#define NPCM_I2CST_XMIT BIT(0) /* Transmit mode */ +#define NPCM_I2CST_MASTER BIT(1) /* Master mode */ +#define NPCM_I2CST_NMATCH BIT(2) /* New match */ +#define NPCM_I2CST_STASTR BIT(3) /* Stall after start */ +#define NPCM_I2CST_NEGACK BIT(4) /* Negative ACK */ +#define NPCM_I2CST_BER BIT(5) /* Bus error */ +#define NPCM_I2CST_SDAST BIT(6) /* SDA status */ +#define NPCM_I2CST_SLVSTP BIT(7) /* Slave stop */ /* NPCM_I2CCST reg fields */ -#define NPCM_I2CCST_BUSY BIT(0) -#define NPCM_I2CCST_BB BIT(1) -#define NPCM_I2CCST_MATCH BIT(2) -#define NPCM_I2CCST_GCMATCH BIT(3) -#define NPCM_I2CCST_TSDA BIT(4) -#define NPCM_I2CCST_TGSCL BIT(5) -#define NPCM_I2CCST_MATCHAF BIT(6) -#define NPCM_I2CCST_ARPMATCH BIT(7) +#define NPCM_I2CCST_BUSY BIT(0) /* Busy */ +#define NPCM_I2CCST_BB BIT(1) /* Bus busy */ +#define NPCM_I2CCST_MATCH BIT(2) /* Address match */ +#define NPCM_I2CCST_GCMATCH BIT(3) /* Global call match */ +#define NPCM_I2CCST_TSDA BIT(4) /* Test SDA line */ +#define NPCM_I2CCST_TGSCL BIT(5) /* Toggle SCL line */ +#define NPCM_I2CCST_MATCHAF BIT(6) /* Match address field */ +#define NPCM_I2CCST_ARPMATCH BIT(7) /* ARP address match */ /* NPCM_I2CCTL1 reg fields */ -#define NPCM_I2CCTL1_START BIT(0) -#define NPCM_I2CCTL1_STOP BIT(1) -#define NPCM_I2CCTL1_INTEN BIT(2) +#define NPCM_I2CCTL1_START BIT(0) /* Generate start condition */ +#define NPCM_I2CCTL1_STOP BIT(1) /* Generate stop condition */ +#define NPCM_I2CCTL1_INTEN BIT(2) /* Interrupt enable */ #define NPCM_I2CCTL1_EOBINTE BIT(3) #define NPCM_I2CCTL1_ACK BIT(4) -#define NPCM_I2CCTL1_GCMEN BIT(5) -#define NPCM_I2CCTL1_NMINTE BIT(6) -#define NPCM_I2CCTL1_STASTRE BIT(7) +#define NPCM_I2CCTL1_GCMEN BIT(5) /* Global call match enable */ +#define NPCM_I2CCTL1_NMINTE BIT(6) /* New match interrupt enable */ +#define NPCM_I2CCTL1_STASTRE BIT(7) /* Stall after start enable */ /* RW1S fields (inside a RW reg): */ #define NPCM_I2CCTL1_RWS \ (NPCM_I2CCTL1_START | NPCM_I2CCTL1_STOP | NPCM_I2CCTL1_ACK) /* npcm_i2caddr reg fields */ -#define NPCM_I2CADDR_A GENMASK(6, 0) -#define NPCM_I2CADDR_SAEN BIT(7) +#define NPCM_I2CADDR_A GENMASK(6, 0) /* Address */ +#define NPCM_I2CADDR_SAEN BIT(7) /* Slave address enable */ /* NPCM_I2CCTL2 reg fields */ -#define I2CCTL2_ENABLE BIT(0) -#define I2CCTL2_SCLFRQ6_0 GENMASK(7, 1) +#define I2CCTL2_ENABLE BIT(0) /* Module enable */ +#define I2CCTL2_SCLFRQ6_0 GENMASK(7, 1) /* Bits 0:6 of frequency divisor */ /* NPCM_I2CCTL3 reg fields */ -#define I2CCTL3_SCLFRQ8_7 GENMASK(1, 0) -#define I2CCTL3_ARPMEN BIT(2) +#define I2CCTL3_SCLFRQ8_7 GENMASK(1, 0) /* Bits 7:8 of frequency divisor */ +#define I2CCTL3_ARPMEN BIT(2) /* ARP match enable */ #define I2CCTL3_IDL_START BIT(3) #define I2CCTL3_400K_MODE BIT(4) #define I2CCTL3_BNK_SEL BIT(5) diff --git a/drivers/i2c/busses/i2c-pasemi-core.c b/drivers/i2c/busses/i2c-pasemi-core.c index 9028ffb58cc079697f6acaed7714cba5cf0bbf27..7d54a9f34c74b5a3b074a469dca674eb286dd50d 100644 --- a/drivers/i2c/busses/i2c-pasemi-core.c +++ b/drivers/i2c/busses/i2c-pasemi-core.c @@ -21,6 +21,7 @@ #define REG_MTXFIFO 0x00 #define REG_MRXFIFO 0x04 #define REG_SMSTA 0x14 +#define REG_IMASK 0x18 #define REG_CTL 0x1c #define REG_REV 0x28 @@ -66,6 +67,7 @@ static void pasemi_reset(struct pasemi_smbus *smbus) val |= CTL_EN; reg_write(smbus, REG_CTL, val); + reinit_completion(&smbus->irq_completion); } static void pasemi_smb_clear(struct pasemi_smbus *smbus) @@ -78,14 +80,21 @@ static void pasemi_smb_clear(struct pasemi_smbus *smbus) static int pasemi_smb_waitready(struct pasemi_smbus *smbus) { - int timeout = 10; + int timeout = 100; unsigned int status; - status = reg_read(smbus, REG_SMSTA); - - while (!(status & SMSTA_XEN) && timeout--) { - msleep(1); + if (smbus->use_irq) { + reinit_completion(&smbus->irq_completion); + reg_write(smbus, REG_IMASK, SMSTA_XEN | SMSTA_MTN); + wait_for_completion_timeout(&smbus->irq_completion, msecs_to_jiffies(100)); + reg_write(smbus, REG_IMASK, 0); status = reg_read(smbus, REG_SMSTA); + } else { + status = reg_read(smbus, REG_SMSTA); + while (!(status & SMSTA_XEN) && timeout--) { + msleep(1); + status = reg_read(smbus, REG_SMSTA); + } } /* Got NACK? */ @@ -344,10 +353,14 @@ int pasemi_i2c_common_probe(struct pasemi_smbus *smbus) /* set up the sysfs linkage to our parent device */ smbus->adapter.dev.parent = smbus->dev; + smbus->use_irq = 0; + init_completion(&smbus->irq_completion); if (smbus->hw_rev != PASEMI_HW_REV_PCI) smbus->hw_rev = reg_read(smbus, REG_REV); + reg_write(smbus, REG_IMASK, 0); + pasemi_reset(smbus); error = devm_i2c_add_adapter(smbus->dev, &smbus->adapter); @@ -356,3 +369,12 @@ int pasemi_i2c_common_probe(struct pasemi_smbus *smbus) return 0; } + +irqreturn_t pasemi_irq_handler(int irq, void *dev_id) +{ + struct pasemi_smbus *smbus = dev_id; + + reg_write(smbus, REG_IMASK, 0); + complete(&smbus->irq_completion); + return IRQ_HANDLED; +} diff --git a/drivers/i2c/busses/i2c-pasemi-core.h b/drivers/i2c/busses/i2c-pasemi-core.h index 4655124a37f3a61bd7d0074c6d6f859317651de9..88821f4e8a9f8ac183b3d736d9b58de795bdc058 100644 --- a/drivers/i2c/busses/i2c-pasemi-core.h +++ b/drivers/i2c/busses/i2c-pasemi-core.h @@ -7,6 +7,7 @@ #include #include #include +#include #define PASEMI_HW_REV_PCI -1 @@ -16,6 +17,10 @@ struct pasemi_smbus { void __iomem *ioaddr; unsigned int clk_div; int hw_rev; + int use_irq; + struct completion irq_completion; }; int pasemi_i2c_common_probe(struct pasemi_smbus *smbus); + +irqreturn_t pasemi_irq_handler(int irq, void *dev_id); diff --git a/drivers/i2c/busses/i2c-pasemi-platform.c b/drivers/i2c/busses/i2c-pasemi-platform.c index 88a54aaf7e3c324a0595e2ff1c2080e6f353ce66..e35945a91dbef40b5c58301ca0da7d694fc4afa6 100644 --- a/drivers/i2c/busses/i2c-pasemi-platform.c +++ b/drivers/i2c/busses/i2c-pasemi-platform.c @@ -49,6 +49,7 @@ static int pasemi_platform_i2c_probe(struct platform_device *pdev) struct pasemi_smbus *smbus; u32 frequency; int error; + int irq_num; data = devm_kzalloc(dev, sizeof(struct pasemi_platform_i2c_data), GFP_KERNEL); @@ -82,6 +83,11 @@ static int pasemi_platform_i2c_probe(struct platform_device *pdev) if (error) goto out_clk_disable; + irq_num = platform_get_irq(pdev, 0); + error = devm_request_irq(smbus->dev, irq_num, pasemi_irq_handler, 0, "pasemi_apple_i2c", (void *)smbus); + + if (!error) + smbus->use_irq = 1; platform_set_drvdata(pdev, data); return 0; diff --git a/drivers/i2c/busses/i2c-pxa-pci.c b/drivers/i2c/busses/i2c-pxa-pci.c index f614cade432bb40ef7bfe8a8aaf27c52e7c67e75..30e38bc8b6db81fb56f4ee33e18662d8993adb7a 100644 --- a/drivers/i2c/busses/i2c-pxa-pci.c +++ b/drivers/i2c/busses/i2c-pxa-pci.c @@ -105,7 +105,7 @@ static int ce4100_i2c_probe(struct pci_dev *dev, int i; struct ce4100_devices *sds; - ret = pci_enable_device_mem(dev); + ret = pcim_enable_device(dev); if (ret) return ret; @@ -114,10 +114,8 @@ static int ce4100_i2c_probe(struct pci_dev *dev, return -EINVAL; } sds = kzalloc(sizeof(*sds), GFP_KERNEL); - if (!sds) { - ret = -ENOMEM; - goto err_mem; - } + if (!sds) + return -ENOMEM; for (i = 0; i < ARRAY_SIZE(sds->pdev); i++) { sds->pdev[i] = add_i2c_device(dev, i); @@ -133,8 +131,6 @@ static int ce4100_i2c_probe(struct pci_dev *dev, err_dev_add: kfree(sds); -err_mem: - pci_disable_device(dev); return ret; } diff --git a/drivers/i2c/busses/i2c-qcom-geni.c b/drivers/i2c/busses/i2c-qcom-geni.c index 8fce98bb77ff974a23bebfc51e9189e7d11d592e..fd70794bfceecf0879cf8e39c0e569ba3e420894 100644 --- a/drivers/i2c/busses/i2c-qcom-geni.c +++ b/drivers/i2c/busses/i2c-qcom-geni.c @@ -88,6 +88,7 @@ struct geni_i2c_dev { int cur_wr; int cur_rd; spinlock_t lock; + struct clk *core_clk; u32 clk_freq_out; const struct geni_i2c_clk_fld *clk_fld; int suspended; @@ -100,6 +101,13 @@ struct geni_i2c_dev { bool abort_done; }; +struct geni_i2c_desc { + bool has_core_clk; + char *icc_ddr; + bool no_dma_support; + unsigned int tx_fifo_depth; +}; + struct geni_i2c_err_log { int err; const char *msg; @@ -763,6 +771,7 @@ static int geni_i2c_probe(struct platform_device *pdev) u32 proto, tx_depth, fifo_disable; int ret; struct device *dev = &pdev->dev; + const struct geni_i2c_desc *desc = NULL; gi2c = devm_kzalloc(dev, sizeof(*gi2c), GFP_KERNEL); if (!gi2c) @@ -775,6 +784,14 @@ static int geni_i2c_probe(struct platform_device *pdev) if (IS_ERR(gi2c->se.base)) return PTR_ERR(gi2c->se.base); + desc = device_get_match_data(&pdev->dev); + + if (desc && desc->has_core_clk) { + gi2c->core_clk = devm_clk_get(dev, "core"); + if (IS_ERR(gi2c->core_clk)) + return PTR_ERR(gi2c->core_clk); + } + gi2c->se.clk = devm_clk_get(dev, "se"); if (IS_ERR(gi2c->se.clk) && !has_acpi_companion(dev)) return PTR_ERR(gi2c->se.clk); @@ -818,7 +835,7 @@ static int geni_i2c_probe(struct platform_device *pdev) gi2c->adap.dev.of_node = dev->of_node; strscpy(gi2c->adap.name, "Geni-I2C", sizeof(gi2c->adap.name)); - ret = geni_icc_get(&gi2c->se, "qup-memory"); + ret = geni_icc_get(&gi2c->se, desc ? desc->icc_ddr : "qup-memory"); if (ret) return ret; /* @@ -828,12 +845,17 @@ static int geni_i2c_probe(struct platform_device *pdev) */ gi2c->se.icc_paths[GENI_TO_CORE].avg_bw = GENI_DEFAULT_BW; gi2c->se.icc_paths[CPU_TO_GENI].avg_bw = GENI_DEFAULT_BW; - gi2c->se.icc_paths[GENI_TO_DDR].avg_bw = Bps_to_icc(gi2c->clk_freq_out); + if (!desc || desc->icc_ddr) + gi2c->se.icc_paths[GENI_TO_DDR].avg_bw = Bps_to_icc(gi2c->clk_freq_out); ret = geni_icc_set_bw(&gi2c->se); if (ret) return ret; + ret = clk_prepare_enable(gi2c->core_clk); + if (ret) + return ret; + ret = geni_se_resources_on(&gi2c->se); if (ret) { dev_err(dev, "Error turning on resources %d\n", ret); @@ -843,10 +865,15 @@ static int geni_i2c_probe(struct platform_device *pdev) if (proto != GENI_SE_I2C) { dev_err(dev, "Invalid proto %d\n", proto); geni_se_resources_off(&gi2c->se); + clk_disable_unprepare(gi2c->core_clk); return -ENXIO; } - fifo_disable = readl_relaxed(gi2c->se.base + GENI_IF_DISABLE_RO) & FIFO_IF_DISABLE; + if (desc && desc->no_dma_support) + fifo_disable = false; + else + fifo_disable = readl_relaxed(gi2c->se.base + GENI_IF_DISABLE_RO) & FIFO_IF_DISABLE; + if (fifo_disable) { /* FIFO is disabled, so we can only use GPI DMA */ gi2c->gpi_mode = true; @@ -858,6 +885,16 @@ static int geni_i2c_probe(struct platform_device *pdev) } else { gi2c->gpi_mode = false; tx_depth = geni_se_get_tx_fifo_depth(&gi2c->se); + + /* I2C Master Hub Serial Elements doesn't have the HW_PARAM_0 register */ + if (!tx_depth && desc) + tx_depth = desc->tx_fifo_depth; + + if (!tx_depth) { + dev_err(dev, "Invalid TX FIFO depth\n"); + return -EINVAL; + } + gi2c->tx_wm = tx_depth - 1; geni_se_init(&gi2c->se, gi2c->tx_wm, tx_depth); geni_se_config_packing(&gi2c->se, BITS_PER_BYTE, @@ -866,6 +903,7 @@ static int geni_i2c_probe(struct platform_device *pdev) dev_dbg(dev, "i2c fifo/se-dma mode. fifo depth:%d\n", tx_depth); } + clk_disable_unprepare(gi2c->core_clk); ret = geni_se_resources_off(&gi2c->se); if (ret) { dev_err(dev, "Error turning off resources %d\n", ret); @@ -931,6 +969,8 @@ static int __maybe_unused geni_i2c_runtime_suspend(struct device *dev) gi2c->suspended = 1; } + clk_disable_unprepare(gi2c->core_clk); + return geni_icc_disable(&gi2c->se); } @@ -943,6 +983,10 @@ static int __maybe_unused geni_i2c_runtime_resume(struct device *dev) if (ret) return ret; + ret = clk_prepare_enable(gi2c->core_clk); + if (ret) + return ret; + ret = geni_se_resources_on(&gi2c->se); if (ret) return ret; @@ -981,8 +1025,16 @@ static const struct dev_pm_ops geni_i2c_pm_ops = { NULL) }; +const struct geni_i2c_desc i2c_master_hub = { + .has_core_clk = true, + .icc_ddr = NULL, + .no_dma_support = true, + .tx_fifo_depth = 16, +}; + static const struct of_device_id geni_i2c_dt_match[] = { { .compatible = "qcom,geni-i2c" }, + { .compatible = "qcom,geni-i2c-master-hub", .data = &i2c_master_hub }, {} }; MODULE_DEVICE_TABLE(of, geni_i2c_dt_match); diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c index 3869c258a52965b8e71013e96fae23991cda4551..6aab84c8d22b49368d77cd2068c472490a45cf7c 100644 --- a/drivers/i2c/busses/i2c-tegra.c +++ b/drivers/i2c/busses/i2c-tegra.c @@ -1830,6 +1830,7 @@ static int tegra_i2c_probe(struct platform_device *pdev) i2c_dev->adapter.class = I2C_CLASS_DEPRECATED; i2c_dev->adapter.algo = &tegra_i2c_algo; i2c_dev->adapter.nr = pdev->id; + ACPI_COMPANION_SET(&i2c_dev->adapter.dev, ACPI_COMPANION(&pdev->dev)); if (i2c_dev->hw->supports_bus_clear) i2c_dev->adapter.bus_recovery_info = &tegra_i2c_recovery_info; diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c index 277a02455cddd86ce71a85279fcf94218f53094a..bee5a2ef1f229d280d2e55d67267604145f030c6 100644 --- a/drivers/i2c/busses/i2c-xiic.c +++ b/drivers/i2c/busses/i2c-xiic.c @@ -858,11 +858,14 @@ static int xiic_i2c_remove(struct platform_device *pdev) /* remove adapter & data */ i2c_del_adapter(&i2c->adap); - ret = pm_runtime_resume_and_get(i2c->dev); + ret = pm_runtime_get_sync(i2c->dev); + if (ret < 0) - return ret; + dev_warn(&pdev->dev, "Failed to activate device for removal (%pe)\n", + ERR_PTR(ret)); + else + xiic_deinit(i2c); - xiic_deinit(i2c); pm_runtime_put_sync(i2c->dev); clk_disable_unprepare(i2c->clk); pm_runtime_disable(&pdev->dev); diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c index 13fafb74bab8d947c08c84e6ff823972cf3ebc6c..087e480b624cb2dde420183fb80a8651c27d8101 100644 --- a/drivers/i2c/i2c-core-base.c +++ b/drivers/i2c/i2c-core-base.c @@ -1018,15 +1018,14 @@ static const struct i2c_device_id dummy_id[] = { { }, }; -static int dummy_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int dummy_probe(struct i2c_client *client) { return 0; } static struct i2c_driver dummy_driver = { .driver.name = "dummy", - .probe = dummy_probe, + .probe_new = dummy_probe, .id_table = dummy_id, }; diff --git a/drivers/i2c/i2c-slave-eeprom.c b/drivers/i2c/i2c-slave-eeprom.c index 4abc2d9198815498ede3775eb8789847ccdcbae1..5f25f23c4ff8cb3d3573c39f5f52118bb84e4d05 100644 --- a/drivers/i2c/i2c-slave-eeprom.c +++ b/drivers/i2c/i2c-slave-eeprom.c @@ -140,8 +140,9 @@ static int i2c_slave_init_eeprom_data(struct eeprom_data *eeprom, struct i2c_cli return 0; } -static int i2c_slave_eeprom_probe(struct i2c_client *client, const struct i2c_device_id *id) +static int i2c_slave_eeprom_probe(struct i2c_client *client) { + const struct i2c_device_id *id = i2c_client_get_device_id(client); struct eeprom_data *eeprom; int ret; unsigned int size = FIELD_GET(I2C_SLAVE_BYTELEN, id->driver_data) + 1; @@ -206,7 +207,7 @@ static struct i2c_driver i2c_slave_eeprom_driver = { .driver = { .name = "i2c-slave-eeprom", }, - .probe = i2c_slave_eeprom_probe, + .probe_new = i2c_slave_eeprom_probe, .remove = i2c_slave_eeprom_remove, .id_table = i2c_slave_eeprom_id, }; diff --git a/drivers/i2c/i2c-smbus.c b/drivers/i2c/i2c-smbus.c index 07c92c8495a3c895551507448e11661d8a4ee691..cd19546d31fcb0f9735f432ee9f15c86d2998b01 100644 --- a/drivers/i2c/i2c-smbus.c +++ b/drivers/i2c/i2c-smbus.c @@ -112,8 +112,7 @@ static void smbalert_work(struct work_struct *work) } /* Setup SMBALERT# infrastructure */ -static int smbalert_probe(struct i2c_client *ara, - const struct i2c_device_id *id) +static int smbalert_probe(struct i2c_client *ara) { struct i2c_smbus_alert_setup *setup = dev_get_platdata(&ara->dev); struct i2c_smbus_alert *alert; @@ -170,7 +169,7 @@ static struct i2c_driver smbalert_driver = { .driver = { .name = "smbus_alert", }, - .probe = smbalert_probe, + .probe_new = smbalert_probe, .remove = smbalert_remove, .id_table = smbalert_ids, }; @@ -361,9 +360,15 @@ void i2c_register_spd(struct i2c_adapter *adap) return; } + /* + * Memory types could be found at section 7.18.2 (Memory Device — Type), table 78 + * https://www.dmtf.org/sites/default/files/standards/documents/DSP0134_3.6.0.pdf + */ switch (common_mem_type) { + case 0x12: /* DDR */ case 0x13: /* DDR2 */ case 0x18: /* DDR3 */ + case 0x1B: /* LPDDR */ case 0x1C: /* LPDDR2 */ case 0x1D: /* LPDDR3 */ name = "spd"; diff --git a/drivers/i2c/muxes/i2c-mux-pca9541.c b/drivers/i2c/muxes/i2c-mux-pca9541.c index ea83de78f52db626f2c273ddf8baa29874943e29..09d1d9e67e31f39e5b863ced5e9a43e40de23312 100644 --- a/drivers/i2c/muxes/i2c-mux-pca9541.c +++ b/drivers/i2c/muxes/i2c-mux-pca9541.c @@ -283,8 +283,7 @@ static int pca9541_release_chan(struct i2c_mux_core *muxc, u32 chan) /* * I2C init/probing/exit functions */ -static int pca9541_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int pca9541_probe(struct i2c_client *client) { struct i2c_adapter *adap = client->adapter; struct i2c_mux_core *muxc; @@ -337,7 +336,7 @@ static struct i2c_driver pca9541_driver = { .name = "pca9541", .of_match_table = of_match_ptr(pca9541_of_match), }, - .probe = pca9541_probe, + .probe_new = pca9541_probe, .remove = pca9541_remove, .id_table = pca9541_id, }; diff --git a/drivers/i2c/muxes/i2c-mux-pca954x.c b/drivers/i2c/muxes/i2c-mux-pca954x.c index a5f458b635df6c3e45df5c971875b3bd50f9c2e5..3639e6d7304cd2a13ce83420ab64cc474bf1a044 100644 --- a/drivers/i2c/muxes/i2c-mux-pca954x.c +++ b/drivers/i2c/muxes/i2c-mux-pca954x.c @@ -411,9 +411,9 @@ static int pca954x_init(struct i2c_client *client, struct pca954x *data) /* * I2C init/probing/exit functions */ -static int pca954x_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int pca954x_probe(struct i2c_client *client) { + const struct i2c_device_id *id = i2c_client_get_device_id(client); struct i2c_adapter *adap = client->adapter; struct device *dev = &client->dev; struct gpio_desc *gpio; @@ -554,7 +554,7 @@ static struct i2c_driver pca954x_driver = { .pm = &pca954x_pm, .of_match_table = pca954x_of_match, }, - .probe = pca954x_probe, + .probe_new = pca954x_probe, .remove = pca954x_remove, .id_table = pca954x_id, }; diff --git a/drivers/i2c/muxes/i2c-mux-reg.c b/drivers/i2c/muxes/i2c-mux-reg.c index 0e0679f65cf77e75b9857d7a20d602c0e70da2aa..30a6de1694e0791ea6d70b75b4c35df2ff7cc0d0 100644 --- a/drivers/i2c/muxes/i2c-mux-reg.c +++ b/drivers/i2c/muxes/i2c-mux-reg.c @@ -183,13 +183,12 @@ static int i2c_mux_reg_probe(struct platform_device *pdev) if (!mux->data.reg) { dev_info(&pdev->dev, "Register not set, using platform resource\n"); - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - mux->data.reg_size = resource_size(res); - mux->data.reg = devm_ioremap_resource(&pdev->dev, res); + mux->data.reg = devm_platform_get_and_ioremap_resource(pdev, 0, &res); if (IS_ERR(mux->data.reg)) { ret = PTR_ERR(mux->data.reg); goto err_put_parent; } + mux->data.reg_size = resource_size(res); } if (mux->data.reg_size != 4 && mux->data.reg_size != 2 && diff --git a/drivers/i3c/device.c b/drivers/i3c/device.c index e92d3e9a52bd55dedc1cd4b3f83d6721a1c97025..9762630b917ea55f86a4241d35974c23892bd77a 100644 --- a/drivers/i3c/device.c +++ b/drivers/i3c/device.c @@ -50,6 +50,26 @@ int i3c_device_do_priv_xfers(struct i3c_device *dev, } EXPORT_SYMBOL_GPL(i3c_device_do_priv_xfers); +/** + * i3c_device_do_setdasa() - do I3C dynamic address assignement with + * static address + * + * @dev: device with which the DAA should be done + * + * Return: 0 in case of success, a negative error core otherwise. + */ +int i3c_device_do_setdasa(struct i3c_device *dev) +{ + int ret; + + i3c_bus_normaluse_lock(dev->bus); + ret = i3c_dev_setdasa_locked(dev->desc); + i3c_bus_normaluse_unlock(dev->bus); + + return ret; +} +EXPORT_SYMBOL_GPL(i3c_device_do_setdasa); + /** * i3c_device_get_info() - get I3C device information * diff --git a/drivers/i3c/internals.h b/drivers/i3c/internals.h index 86b7b44cfca28be75fdc028f0c5a7460dd552d25..908a807badaf9c0266bc33756d66091b1ee8368c 100644 --- a/drivers/i3c/internals.h +++ b/drivers/i3c/internals.h @@ -15,6 +15,7 @@ extern struct bus_type i3c_bus_type; void i3c_bus_normaluse_lock(struct i3c_bus *bus); void i3c_bus_normaluse_unlock(struct i3c_bus *bus); +int i3c_dev_setdasa_locked(struct i3c_dev_desc *dev); int i3c_dev_do_priv_xfers_locked(struct i3c_dev_desc *dev, struct i3c_priv_xfer *xfers, int nxfers); diff --git a/drivers/i3c/master.c b/drivers/i3c/master.c index 351c81a929a6c914b21d153d7587fe40dc695c0b..d7e6f6c99aea558f7781de8c34f354aeb0add48b 100644 --- a/drivers/i3c/master.c +++ b/drivers/i3c/master.c @@ -2708,6 +2708,25 @@ int i3c_master_unregister(struct i3c_master_controller *master) } EXPORT_SYMBOL_GPL(i3c_master_unregister); +int i3c_dev_setdasa_locked(struct i3c_dev_desc *dev) +{ + struct i3c_master_controller *master; + + if (!dev) + return -ENOENT; + + master = i3c_dev_get_master(dev); + if (!master) + return -EINVAL; + + if (!dev->boardinfo || !dev->boardinfo->init_dyn_addr || + !dev->boardinfo->static_addr) + return -EINVAL; + + return i3c_master_setdasa_locked(master, dev->info.static_addr, + dev->boardinfo->init_dyn_addr); +} + int i3c_dev_do_priv_xfers_locked(struct i3c_dev_desc *dev, struct i3c_priv_xfer *xfers, int nxfers) diff --git a/drivers/iio/TODO b/drivers/iio/TODO index 7d7326b7085a2a64ef181d4749648eb1efd181ce..2ace27d1ac62adb38953ff2ea0f26128d0a122c7 100644 --- a/drivers/iio/TODO +++ b/drivers/iio/TODO @@ -7,9 +7,6 @@ tree - ABI Documentation - Audit driviers/iio/staging/Documentation -- Replace iio_dev->mlock by either a local lock or use -iio_claim_direct.(Requires analysis of the purpose of the lock.) - - Converting drivers from device tree centric to more generic property handlers. diff --git a/drivers/iio/accel/Kconfig b/drivers/iio/accel/Kconfig index ffac66db7ac92901d74aeec22d43f303c268d8d7..03ac410c162e1d9ad6f119939b1d7f71558eff8c 100644 --- a/drivers/iio/accel/Kconfig +++ b/drivers/iio/accel/Kconfig @@ -409,6 +409,27 @@ config IIO_ST_ACCEL_SPI_3AXIS To compile this driver as a module, choose M here. The module will be called st_accel_spi. +config IIO_KX022A + tristate + +config IIO_KX022A_SPI + tristate "Kionix KX022A tri-axis digital accelerometer SPI interface" + depends on SPI + select IIO_KX022A + select REGMAP_SPI + help + Enable support for the Kionix KX022A digital tri-axis + accelerometer connected to I2C interface. + +config IIO_KX022A_I2C + tristate "Kionix KX022A tri-axis digital accelerometer I2C interface" + depends on I2C + select IIO_KX022A + select REGMAP_I2C + help + Enable support for the Kionix KX022A digital tri-axis + accelerometer connected to I2C interface. + config KXSD9 tristate "Kionix KXSD9 Accelerometer Driver" select IIO_BUFFER diff --git a/drivers/iio/accel/Makefile b/drivers/iio/accel/Makefile index 5e45b5fa5ab50f3288611ce9eb4b5e6ff6ffa8d8..311ead9c3ef18afa683cafc7214bae7e3475e57e 100644 --- a/drivers/iio/accel/Makefile +++ b/drivers/iio/accel/Makefile @@ -40,6 +40,9 @@ obj-$(CONFIG_FXLS8962AF) += fxls8962af-core.o obj-$(CONFIG_FXLS8962AF_I2C) += fxls8962af-i2c.o obj-$(CONFIG_FXLS8962AF_SPI) += fxls8962af-spi.o obj-$(CONFIG_HID_SENSOR_ACCEL_3D) += hid-sensor-accel-3d.o +obj-$(CONFIG_IIO_KX022A) += kionix-kx022a.o +obj-$(CONFIG_IIO_KX022A_I2C) += kionix-kx022a-i2c.o +obj-$(CONFIG_IIO_KX022A_SPI) += kionix-kx022a-spi.o obj-$(CONFIG_KXCJK1013) += kxcjk-1013.o obj-$(CONFIG_KXSD9) += kxsd9.o obj-$(CONFIG_KXSD9_SPI) += kxsd9-spi.o diff --git a/drivers/iio/accel/adis16201.c b/drivers/iio/accel/adis16201.c index dfb8e2e5bdf58dc089b4e5e37653e8d17fe17eb6..d054721859b3b5f0c5840b17679b6fb1884305da 100644 --- a/drivers/iio/accel/adis16201.c +++ b/drivers/iio/accel/adis16201.c @@ -281,7 +281,7 @@ static int adis16201_probe(struct spi_device *spi) if (ret) return ret; - ret = adis_initial_startup(st); + ret = __adis_initial_startup(st); if (ret) return ret; diff --git a/drivers/iio/accel/adis16209.c b/drivers/iio/accel/adis16209.c index 5a9c6e2296f1db5eb5153cb95d246210414eda07..0035e4f4db63ff46ad6e096fdc0c5edd757a9827 100644 --- a/drivers/iio/accel/adis16209.c +++ b/drivers/iio/accel/adis16209.c @@ -291,7 +291,7 @@ static int adis16209_probe(struct spi_device *spi) if (ret) return ret; - ret = adis_initial_startup(st); + ret = __adis_initial_startup(st); if (ret) return ret; diff --git a/drivers/iio/accel/adxl355.h b/drivers/iio/accel/adxl355.h index 6dd49b13e4fd076c40c388c03fb4c1bee1c4e3bf..061e66dc7057bb37534f19f31cc3458b91433a70 100644 --- a/drivers/iio/accel/adxl355.h +++ b/drivers/iio/accel/adxl355.h @@ -10,12 +10,30 @@ #include +enum adxl355_device_type { + ADXL355, + ADXL359, +}; + +struct adxl355_fractional_type { + int integer; + int decimal; +}; + struct device; +struct adxl355_chip_info { + const char *name; + u8 part_id; + struct adxl355_fractional_type accel_scale; + struct adxl355_fractional_type temp_offset; +}; + extern const struct regmap_access_table adxl355_readable_regs_tbl; extern const struct regmap_access_table adxl355_writeable_regs_tbl; +extern const struct adxl355_chip_info adxl35x_chip_info[]; int adxl355_core_probe(struct device *dev, struct regmap *regmap, - const char *name); + const struct adxl355_chip_info *chip_info); #endif /* _ADXL355_H_ */ diff --git a/drivers/iio/accel/adxl355_core.c b/drivers/iio/accel/adxl355_core.c index 4bc648eac8b29afbeb651c3c80dfd25b31d35fe6..0c9225d18fb29bf176e3a100a1684850c9462a78 100644 --- a/drivers/iio/accel/adxl355_core.c +++ b/drivers/iio/accel/adxl355_core.c @@ -60,6 +60,7 @@ #define ADXL355_DEVID_AD_VAL 0xAD #define ADXL355_DEVID_MST_VAL 0x1D #define ADXL355_PARTID_VAL 0xED +#define ADXL359_PARTID_VAL 0xE9 #define ADXL355_RESET_CODE 0x52 static const struct regmap_range adxl355_read_reg_range[] = { @@ -83,6 +84,60 @@ const struct regmap_access_table adxl355_writeable_regs_tbl = { }; EXPORT_SYMBOL_NS_GPL(adxl355_writeable_regs_tbl, IIO_ADXL355); +const struct adxl355_chip_info adxl35x_chip_info[] = { + [ADXL355] = { + .name = "adxl355", + .part_id = ADXL355_PARTID_VAL, + /* + * At +/- 2g with 20-bit resolution, scale is given in datasheet + * as 3.9ug/LSB = 0.0000039 * 9.80665 = 0.00003824593 m/s^2. + */ + .accel_scale = { + .integer = 0, + .decimal = 38245, + }, + /* + * The datasheet defines an intercept of 1885 LSB at 25 degC + * and a slope of -9.05 LSB/C. The following formula can be used + * to find the temperature: + * Temp = ((RAW - 1885)/(-9.05)) + 25 but this doesn't follow + * the format of the IIO which is Temp = (RAW + OFFSET) * SCALE. + * Hence using some rearranging we get the scale as -110.497238 + * and offset as -2111.25. + */ + .temp_offset = { + .integer = -2111, + .decimal = 250000, + }, + }, + [ADXL359] = { + .name = "adxl359", + .part_id = ADXL359_PARTID_VAL, + /* + * At +/- 10g with 20-bit resolution, scale is given in datasheet + * as 19.5ug/LSB = 0.0000195 * 9.80665 = 0.0.00019122967 m/s^2. + */ + .accel_scale = { + .integer = 0, + .decimal = 191229, + }, + /* + * The datasheet defines an intercept of 1852 LSB at 25 degC + * and a slope of -9.05 LSB/C. The following formula can be used + * to find the temperature: + * Temp = ((RAW - 1852)/(-9.05)) + 25 but this doesn't follow + * the format of the IIO which is Temp = (RAW + OFFSET) * SCALE. + * Hence using some rearranging we get the scale as -110.497238 + * and offset as -2079.25. + */ + .temp_offset = { + .integer = -2079, + .decimal = 250000, + }, + }, +}; +EXPORT_SYMBOL_NS_GPL(adxl35x_chip_info, IIO_ADXL355); + enum adxl355_op_mode { ADXL355_MEASUREMENT, ADXL355_STANDBY, @@ -162,6 +217,7 @@ static const struct adxl355_chan_info adxl355_chans[] = { }; struct adxl355_data { + const struct adxl355_chip_info *chip_info; struct regmap *regmap; struct device *dev; struct mutex lock; /* lock to protect op_mode */ @@ -262,10 +318,8 @@ static int adxl355_setup(struct adxl355_data *data) if (ret) return ret; - if (regval != ADXL355_PARTID_VAL) { - dev_err(data->dev, "Invalid DEV ID 0x%02x\n", regval); - return -ENODEV; - } + if (regval != ADXL355_PARTID_VAL) + dev_warn(data->dev, "Invalid DEV ID 0x%02x\n", regval); /* * Perform a software reset to make sure the device is in a consistent @@ -458,33 +512,25 @@ static int adxl355_read_raw(struct iio_dev *indio_dev, case IIO_CHAN_INFO_SCALE: switch (chan->type) { - /* - * The datasheet defines an intercept of 1885 LSB at 25 degC - * and a slope of -9.05 LSB/C. The following formula can be used - * to find the temperature: - * Temp = ((RAW - 1885)/(-9.05)) + 25 but this doesn't follow - * the format of the IIO which is Temp = (RAW + OFFSET) * SCALE. - * Hence using some rearranging we get the scale as -110.497238 - * and offset as -2111.25. - */ case IIO_TEMP: + /* + * Temperature scale is -110.497238. + * See the detailed explanation in adxl35x_chip_info + * definition above. + */ *val = -110; *val2 = 497238; return IIO_VAL_INT_PLUS_MICRO; - /* - * At +/- 2g with 20-bit resolution, scale is given in datasheet - * as 3.9ug/LSB = 0.0000039 * 9.80665 = 0.00003824593 m/s^2. - */ case IIO_ACCEL: - *val = 0; - *val2 = 38245; + *val = data->chip_info->accel_scale.integer; + *val2 = data->chip_info->accel_scale.decimal; return IIO_VAL_INT_PLUS_NANO; default: return -EINVAL; } case IIO_CHAN_INFO_OFFSET: - *val = -2111; - *val2 = 250000; + *val = data->chip_info->temp_offset.integer; + *val2 = data->chip_info->temp_offset.decimal; return IIO_VAL_INT_PLUS_MICRO; case IIO_CHAN_INFO_CALIBBIAS: *val = sign_extend32(data->calibbias[chan->address], 15); @@ -707,7 +753,7 @@ static int adxl355_probe_trigger(struct iio_dev *indio_dev, int irq) } int adxl355_core_probe(struct device *dev, struct regmap *regmap, - const char *name) + const struct adxl355_chip_info *chip_info) { struct adxl355_data *data; struct iio_dev *indio_dev; @@ -722,9 +768,10 @@ int adxl355_core_probe(struct device *dev, struct regmap *regmap, data->regmap = regmap; data->dev = dev; data->op_mode = ADXL355_STANDBY; + data->chip_info = chip_info; mutex_init(&data->lock); - indio_dev->name = name; + indio_dev->name = chip_info->name; indio_dev->info = &adxl355_info; indio_dev->modes = INDIO_DIRECT_MODE; indio_dev->channels = adxl355_channels; diff --git a/drivers/iio/accel/adxl355_i2c.c b/drivers/iio/accel/adxl355_i2c.c index f67d57921c81bc6fceda775a5c7e1fef0d073f29..6cde5ccac06b8597366032731f5424db5b0106ce 100644 --- a/drivers/iio/accel/adxl355_i2c.c +++ b/drivers/iio/accel/adxl355_i2c.c @@ -23,6 +23,20 @@ static const struct regmap_config adxl355_i2c_regmap_config = { static int adxl355_i2c_probe(struct i2c_client *client) { struct regmap *regmap; + const struct adxl355_chip_info *chip_data; + const struct i2c_device_id *adxl355; + + chip_data = device_get_match_data(&client->dev); + if (!chip_data) { + adxl355 = to_i2c_driver(client->dev.driver)->id_table; + if (!adxl355) + return -EINVAL; + + chip_data = (void *)i2c_match_id(adxl355, client)->driver_data; + + if (!chip_data) + return -EINVAL; + } regmap = devm_regmap_init_i2c(client, &adxl355_i2c_regmap_config); if (IS_ERR(regmap)) { @@ -32,17 +46,19 @@ static int adxl355_i2c_probe(struct i2c_client *client) return PTR_ERR(regmap); } - return adxl355_core_probe(&client->dev, regmap, client->name); + return adxl355_core_probe(&client->dev, regmap, chip_data); } static const struct i2c_device_id adxl355_i2c_id[] = { - { "adxl355", 0 }, + { "adxl355", (kernel_ulong_t)&adxl35x_chip_info[ADXL355] }, + { "adxl359", (kernel_ulong_t)&adxl35x_chip_info[ADXL359] }, { } }; MODULE_DEVICE_TABLE(i2c, adxl355_i2c_id); static const struct of_device_id adxl355_of_match[] = { - { .compatible = "adi,adxl355" }, + { .compatible = "adi,adxl355", .data = &adxl35x_chip_info[ADXL355] }, + { .compatible = "adi,adxl359", .data = &adxl35x_chip_info[ADXL359] }, { } }; MODULE_DEVICE_TABLE(of, adxl355_of_match); diff --git a/drivers/iio/accel/adxl355_spi.c b/drivers/iio/accel/adxl355_spi.c index 5fe986ae03f63d11e1153b8f5081e8e2da96c075..fc99534d91ffcc3976e101ec910617108ec5912e 100644 --- a/drivers/iio/accel/adxl355_spi.c +++ b/drivers/iio/accel/adxl355_spi.c @@ -9,6 +9,7 @@ #include #include #include +#include #include "adxl355.h" @@ -24,9 +25,17 @@ static const struct regmap_config adxl355_spi_regmap_config = { static int adxl355_spi_probe(struct spi_device *spi) { - const struct spi_device_id *id = spi_get_device_id(spi); + const struct adxl355_chip_info *chip_data; struct regmap *regmap; + chip_data = device_get_match_data(&spi->dev); + if (!chip_data) { + chip_data = (void *)spi_get_device_id(spi)->driver_data; + + if (!chip_data) + return -EINVAL; + } + regmap = devm_regmap_init_spi(spi, &adxl355_spi_regmap_config); if (IS_ERR(regmap)) { dev_err(&spi->dev, "Error initializing spi regmap: %ld\n", @@ -35,17 +44,19 @@ static int adxl355_spi_probe(struct spi_device *spi) return PTR_ERR(regmap); } - return adxl355_core_probe(&spi->dev, regmap, id->name); + return adxl355_core_probe(&spi->dev, regmap, chip_data); } static const struct spi_device_id adxl355_spi_id[] = { - { "adxl355", 0 }, + { "adxl355", (kernel_ulong_t)&adxl35x_chip_info[ADXL355] }, + { "adxl359", (kernel_ulong_t)&adxl35x_chip_info[ADXL359] }, { } }; MODULE_DEVICE_TABLE(spi, adxl355_spi_id); static const struct of_device_id adxl355_of_match[] = { - { .compatible = "adi,adxl355" }, + { .compatible = "adi,adxl355", .data = &adxl35x_chip_info[ADXL355] }, + { .compatible = "adi,adxl359", .data = &adxl35x_chip_info[ADXL359] }, { } }; MODULE_DEVICE_TABLE(of, adxl355_of_match); diff --git a/drivers/iio/accel/adxl367.c b/drivers/iio/accel/adxl367.c index 7c7d780407937a2a7cb1897685b30db7b4f72d25..90b7ae6d42b7700c9cb0a328b093352a7cec419e 100644 --- a/drivers/iio/accel/adxl367.c +++ b/drivers/iio/accel/adxl367.c @@ -160,8 +160,6 @@ struct adxl367_state { struct device *dev; struct regmap *regmap; - struct regulator_bulk_data regulators[2]; - /* * Synchronize access to members of driver state, and ensure atomicity * of consecutive regmap operations. @@ -1185,32 +1183,19 @@ static ssize_t adxl367_get_fifo_watermark(struct device *dev, return sysfs_emit(buf, "%d\n", fifo_watermark); } -static ssize_t hwfifo_watermark_min_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - return sysfs_emit(buf, "%s\n", "1"); -} - -static ssize_t hwfifo_watermark_max_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - return sysfs_emit(buf, "%s\n", __stringify(ADXL367_FIFO_MAX_WATERMARK)); -} - -static IIO_DEVICE_ATTR_RO(hwfifo_watermark_min, 0); -static IIO_DEVICE_ATTR_RO(hwfifo_watermark_max, 0); +IIO_STATIC_CONST_DEVICE_ATTR(hwfifo_watermark_min, "1"); +IIO_STATIC_CONST_DEVICE_ATTR(hwfifo_watermark_max, + __stringify(ADXL367_FIFO_MAX_WATERMARK)); static IIO_DEVICE_ATTR(hwfifo_watermark, 0444, adxl367_get_fifo_watermark, NULL, 0); static IIO_DEVICE_ATTR(hwfifo_enabled, 0444, adxl367_get_fifo_enabled, NULL, 0); -static const struct attribute *adxl367_fifo_attributes[] = { - &iio_dev_attr_hwfifo_watermark_min.dev_attr.attr, - &iio_dev_attr_hwfifo_watermark_max.dev_attr.attr, - &iio_dev_attr_hwfifo_watermark.dev_attr.attr, - &iio_dev_attr_hwfifo_enabled.dev_attr.attr, +static const struct iio_dev_attr *adxl367_fifo_attributes[] = { + &iio_dev_attr_hwfifo_watermark_min, + &iio_dev_attr_hwfifo_watermark_max, + &iio_dev_attr_hwfifo_watermark, + &iio_dev_attr_hwfifo_enabled, NULL, }; @@ -1487,16 +1472,10 @@ static int adxl367_setup(struct adxl367_state *st) return adxl367_set_measure_en(st, true); } -static void adxl367_disable_regulators(void *data) -{ - struct adxl367_state *st = data; - - regulator_bulk_disable(ARRAY_SIZE(st->regulators), st->regulators); -} - int adxl367_probe(struct device *dev, const struct adxl367_ops *ops, void *context, struct regmap *regmap, int irq) { + static const char * const regulator_names[] = { "vdd", "vddio" }; struct iio_dev *indio_dev; struct adxl367_state *st; int ret; @@ -1520,25 +1499,13 @@ int adxl367_probe(struct device *dev, const struct adxl367_ops *ops, indio_dev->info = &adxl367_info; indio_dev->modes = INDIO_DIRECT_MODE; - st->regulators[0].supply = "vdd"; - st->regulators[1].supply = "vddio"; - - ret = devm_regulator_bulk_get(st->dev, ARRAY_SIZE(st->regulators), - st->regulators); + ret = devm_regulator_bulk_get_enable(st->dev, + ARRAY_SIZE(regulator_names), + regulator_names); if (ret) return dev_err_probe(st->dev, ret, "Failed to get regulators\n"); - ret = regulator_bulk_enable(ARRAY_SIZE(st->regulators), st->regulators); - if (ret) - return dev_err_probe(st->dev, ret, - "Failed to enable regulators\n"); - - ret = devm_add_action_or_reset(st->dev, adxl367_disable_regulators, st); - if (ret) - return dev_err_probe(st->dev, ret, - "Failed to add regulators disable action\n"); - ret = regmap_write(st->regmap, ADXL367_REG_RESET, ADXL367_RESET_CODE); if (ret) return ret; diff --git a/drivers/iio/accel/adxl367_i2c.c b/drivers/iio/accel/adxl367_i2c.c index 3606efa25835eac4d0daf2f61d518ba140975716..070aad724abd41ea25a665efc3e48fc3b2891e62 100644 --- a/drivers/iio/accel/adxl367_i2c.c +++ b/drivers/iio/accel/adxl367_i2c.c @@ -41,8 +41,7 @@ static const struct adxl367_ops adxl367_i2c_ops = { .read_fifo = adxl367_i2c_read_fifo, }; -static int adxl367_i2c_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int adxl367_i2c_probe(struct i2c_client *client) { struct adxl367_i2c_state *st; struct regmap *regmap; @@ -78,7 +77,7 @@ static struct i2c_driver adxl367_i2c_driver = { .name = "adxl367_i2c", .of_match_table = adxl367_of_match, }, - .probe = adxl367_i2c_probe, + .probe_new = adxl367_i2c_probe, .id_table = adxl367_i2c_id, }; diff --git a/drivers/iio/accel/adxl372.c b/drivers/iio/accel/adxl372.c index bc53af809d5de13c3b72b6c90b62c0b868e3b74f..c4193286eb0538e51c6741f32612807c9d8068fc 100644 --- a/drivers/iio/accel/adxl372.c +++ b/drivers/iio/accel/adxl372.c @@ -998,32 +998,19 @@ static ssize_t adxl372_get_fifo_watermark(struct device *dev, return sprintf(buf, "%d\n", st->watermark); } -static ssize_t hwfifo_watermark_min_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - return sysfs_emit(buf, "%s\n", "1"); -} - -static ssize_t hwfifo_watermark_max_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - return sysfs_emit(buf, "%s\n", __stringify(ADXL372_FIFO_SIZE)); -} - -static IIO_DEVICE_ATTR_RO(hwfifo_watermark_min, 0); -static IIO_DEVICE_ATTR_RO(hwfifo_watermark_max, 0); +IIO_STATIC_CONST_DEVICE_ATTR(hwfifo_watermark_min, "1"); +IIO_STATIC_CONST_DEVICE_ATTR(hwfifo_watermark_max, + __stringify(ADXL372_FIFO_SIZE)); static IIO_DEVICE_ATTR(hwfifo_watermark, 0444, adxl372_get_fifo_watermark, NULL, 0); static IIO_DEVICE_ATTR(hwfifo_enabled, 0444, adxl372_get_fifo_enabled, NULL, 0); -static const struct attribute *adxl372_fifo_attributes[] = { - &iio_dev_attr_hwfifo_watermark_min.dev_attr.attr, - &iio_dev_attr_hwfifo_watermark_max.dev_attr.attr, - &iio_dev_attr_hwfifo_watermark.dev_attr.attr, - &iio_dev_attr_hwfifo_enabled.dev_attr.attr, +static const struct iio_dev_attr *adxl372_fifo_attributes[] = { + &iio_dev_attr_hwfifo_watermark_min, + &iio_dev_attr_hwfifo_watermark_max, + &iio_dev_attr_hwfifo_watermark, + &iio_dev_attr_hwfifo_enabled, NULL, }; diff --git a/drivers/iio/accel/adxl372_i2c.c b/drivers/iio/accel/adxl372_i2c.c index 4efb70a5fe40546a406c08d47276ca3bc30391c5..e5f310ea65fff3565eae9caa90364b2521983515 100644 --- a/drivers/iio/accel/adxl372_i2c.c +++ b/drivers/iio/accel/adxl372_i2c.c @@ -18,9 +18,9 @@ static const struct regmap_config adxl372_regmap_config = { .readable_noinc_reg = adxl372_readable_noinc_reg, }; -static int adxl372_i2c_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int adxl372_i2c_probe(struct i2c_client *client) { + const struct i2c_device_id *id = i2c_client_get_device_id(client); struct regmap *regmap; unsigned int regval; int ret; @@ -58,7 +58,7 @@ static struct i2c_driver adxl372_i2c_driver = { .name = "adxl372_i2c", .of_match_table = adxl372_of_match, }, - .probe = adxl372_i2c_probe, + .probe_new = adxl372_i2c_probe, .id_table = adxl372_i2c_id, }; diff --git a/drivers/iio/accel/bma180.c b/drivers/iio/accel/bma180.c index d03fc3400f94e41b9aee585bb6c7389bb38d3ccd..eb697eeb4301ba8dba1ddd48d09c0364483c7371 100644 --- a/drivers/iio/accel/bma180.c +++ b/drivers/iio/accel/bma180.c @@ -921,9 +921,9 @@ static const struct iio_trigger_ops bma180_trigger_ops = { .reenable = bma180_trig_reen, }; -static int bma180_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int bma180_probe(struct i2c_client *client) { + const struct i2c_device_id *id = i2c_client_get_device_id(client); struct device *dev = &client->dev; struct bma180_data *data; struct iio_dev *indio_dev; @@ -1134,7 +1134,7 @@ static struct i2c_driver bma180_driver = { .pm = pm_sleep_ptr(&bma180_pm_ops), .of_match_table = bma180_of_match, }, - .probe = bma180_probe, + .probe_new = bma180_probe, .remove = bma180_remove, .id_table = bma180_ids, }; diff --git a/drivers/iio/accel/bma400_core.c b/drivers/iio/accel/bma400_core.c index 6e4d10a7cd32210de606f70b42e845a4806e93ad..b612d0146d4dbb1892338025a75994df667424e8 100644 --- a/drivers/iio/accel/bma400_core.c +++ b/drivers/iio/accel/bma400_core.c @@ -876,14 +876,10 @@ static int bma400_init(struct bma400_data *data) ret = devm_regulator_bulk_get(data->dev, ARRAY_SIZE(data->regulators), data->regulators); - if (ret) { - if (ret != -EPROBE_DEFER) - dev_err(data->dev, - "Failed to get regulators: %d\n", - ret); + if (ret) + return dev_err_probe(data->dev, ret, "Failed to get regulators: %d\n", + ret); - return ret; - } ret = regulator_bulk_enable(ARRAY_SIZE(data->regulators), data->regulators); if (ret) { diff --git a/drivers/iio/accel/bma400_i2c.c b/drivers/iio/accel/bma400_i2c.c index 1ba2a982ea736f565ca65a19356cf85ab0e75feb..688b06dae669b1ab12f18f84467f0629f3acf7dc 100644 --- a/drivers/iio/accel/bma400_i2c.c +++ b/drivers/iio/accel/bma400_i2c.c @@ -13,9 +13,9 @@ #include "bma400.h" -static int bma400_i2c_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int bma400_i2c_probe(struct i2c_client *client) { + const struct i2c_device_id *id = i2c_client_get_device_id(client); struct regmap *regmap; regmap = devm_regmap_init_i2c(client, &bma400_regmap_config); @@ -44,7 +44,7 @@ static struct i2c_driver bma400_i2c_driver = { .name = "bma400", .of_match_table = bma400_of_i2c_match, }, - .probe = bma400_i2c_probe, + .probe_new = bma400_i2c_probe, .id_table = bma400_i2c_ids, }; diff --git a/drivers/iio/accel/bmc150-accel-core.c b/drivers/iio/accel/bmc150-accel-core.c index 92f8b139acce9e98a2a86f5355ed03d6f0978740..110591804b4c4fd5b3407e8b83d9b35ddf10b652 100644 --- a/drivers/iio/accel/bmc150-accel-core.c +++ b/drivers/iio/accel/bmc150-accel-core.c @@ -925,32 +925,19 @@ static const struct iio_chan_spec_ext_info bmc150_accel_ext_info[] = { { } }; -static ssize_t hwfifo_watermark_min_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - return sysfs_emit(buf, "%s\n", "1"); -} - -static ssize_t hwfifo_watermark_max_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - return sysfs_emit(buf, "%s\n", __stringify(BMC150_ACCEL_FIFO_LENGTH)); -} - -static IIO_DEVICE_ATTR_RO(hwfifo_watermark_min, 0); -static IIO_DEVICE_ATTR_RO(hwfifo_watermark_max, 0); +IIO_STATIC_CONST_DEVICE_ATTR(hwfifo_watermark_min, "1"); +IIO_STATIC_CONST_DEVICE_ATTR(hwfifo_watermark_max, + __stringify(BMC150_ACCEL_FIFO_LENGTH)); static IIO_DEVICE_ATTR(hwfifo_enabled, S_IRUGO, bmc150_accel_get_fifo_state, NULL, 0); static IIO_DEVICE_ATTR(hwfifo_watermark, S_IRUGO, bmc150_accel_get_fifo_watermark, NULL, 0); -static const struct attribute *bmc150_accel_fifo_attributes[] = { - &iio_dev_attr_hwfifo_watermark_min.dev_attr.attr, - &iio_dev_attr_hwfifo_watermark_max.dev_attr.attr, - &iio_dev_attr_hwfifo_watermark.dev_attr.attr, - &iio_dev_attr_hwfifo_enabled.dev_attr.attr, +static const struct iio_dev_attr *bmc150_accel_fifo_attributes[] = { + &iio_dev_attr_hwfifo_watermark_min, + &iio_dev_attr_hwfifo_watermark_max, + &iio_dev_attr_hwfifo_watermark, + &iio_dev_attr_hwfifo_enabled, NULL, }; @@ -1678,7 +1665,7 @@ int bmc150_accel_core_probe(struct device *dev, struct regmap *regmap, int irq, enum bmc150_type type, const char *name, bool block_supported) { - const struct attribute **fifo_attrs; + const struct iio_dev_attr **fifo_attrs; struct bmc150_accel_data *data; struct iio_dev *indio_dev; int ret; diff --git a/drivers/iio/accel/bmc150-accel-i2c.c b/drivers/iio/accel/bmc150-accel-i2c.c index be8cc598b88e3071c7d09c7b5e2afb8ab45eeef5..509cab2bd6947249ca95d30ec2c00529fe75c6a2 100644 --- a/drivers/iio/accel/bmc150-accel-i2c.c +++ b/drivers/iio/accel/bmc150-accel-i2c.c @@ -171,9 +171,9 @@ static void bmc150_acpi_dual_accel_probe(struct i2c_client *client) {} static void bmc150_acpi_dual_accel_remove(struct i2c_client *client) {} #endif -static int bmc150_accel_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int bmc150_accel_probe(struct i2c_client *client) { + const struct i2c_device_id *id = i2c_client_get_device_id(client); struct regmap *regmap; const char *name = NULL; enum bmc150_type type = BOSCH_UNKNOWN; @@ -269,7 +269,7 @@ static struct i2c_driver bmc150_accel_driver = { .acpi_match_table = ACPI_PTR(bmc150_accel_acpi_match), .pm = &bmc150_accel_pm_ops, }, - .probe = bmc150_accel_probe, + .probe_new = bmc150_accel_probe, .remove = bmc150_accel_remove, .id_table = bmc150_accel_id, }; diff --git a/drivers/iio/accel/da280.c b/drivers/iio/accel/da280.c index 04e9c567896474bc89f95f95e442c54526695ea0..38a7d811610e786496ee9d704a6d3efa3c1c619a 100644 --- a/drivers/iio/accel/da280.c +++ b/drivers/iio/accel/da280.c @@ -105,9 +105,9 @@ static void da280_disable(void *client) da280_enable(client, false); } -static int da280_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int da280_probe(struct i2c_client *client) { + const struct i2c_device_id *id = i2c_client_get_device_id(client); int ret; struct iio_dev *indio_dev; struct da280_data *data; @@ -184,7 +184,7 @@ static struct i2c_driver da280_driver = { .acpi_match_table = ACPI_PTR(da280_acpi_match), .pm = pm_sleep_ptr(&da280_pm_ops), }, - .probe = da280_probe, + .probe_new = da280_probe, .id_table = da280_i2c_id, }; diff --git a/drivers/iio/accel/da311.c b/drivers/iio/accel/da311.c index ec4e29d260f7ec61b6cd1b9ae473239cf5ee6bed..080335fa2ad655ccc339b226a3cd4d94afa8f55e 100644 --- a/drivers/iio/accel/da311.c +++ b/drivers/iio/accel/da311.c @@ -217,8 +217,7 @@ static void da311_disable(void *client) da311_enable(client, false); } -static int da311_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int da311_probe(struct i2c_client *client) { int ret; struct iio_dev *indio_dev; @@ -279,7 +278,7 @@ static struct i2c_driver da311_driver = { .name = "da311", .pm = pm_sleep_ptr(&da311_pm_ops), }, - .probe = da311_probe, + .probe_new = da311_probe, .id_table = da311_i2c_id, }; diff --git a/drivers/iio/accel/dmard06.c b/drivers/iio/accel/dmard06.c index 4b69c8530f5e615559cc0205069e27467a7c9aa6..7390509aaac0531308c9a917902df13b0498ef3b 100644 --- a/drivers/iio/accel/dmard06.c +++ b/drivers/iio/accel/dmard06.c @@ -125,8 +125,7 @@ static const struct iio_info dmard06_info = { .read_raw = dmard06_read_raw, }; -static int dmard06_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int dmard06_probe(struct i2c_client *client) { int ret; struct iio_dev *indio_dev; @@ -218,7 +217,7 @@ static const struct of_device_id dmard06_of_match[] = { MODULE_DEVICE_TABLE(of, dmard06_of_match); static struct i2c_driver dmard06_driver = { - .probe = dmard06_probe, + .probe_new = dmard06_probe, .id_table = dmard06_id, .driver = { .name = DMARD06_DRV_NAME, diff --git a/drivers/iio/accel/dmard09.c b/drivers/iio/accel/dmard09.c index cb0246ca72f323e38a12025a17251b4fef9c8121..4b7a537f617d51f69387ae21fbdf4ee90fd9fe1f 100644 --- a/drivers/iio/accel/dmard09.c +++ b/drivers/iio/accel/dmard09.c @@ -88,8 +88,7 @@ static const struct iio_info dmard09_info = { .read_raw = dmard09_read_raw, }; -static int dmard09_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int dmard09_probe(struct i2c_client *client) { int ret; struct iio_dev *indio_dev; @@ -136,7 +135,7 @@ static struct i2c_driver dmard09_driver = { .driver = { .name = DMARD09_DRV_NAME }, - .probe = dmard09_probe, + .probe_new = dmard09_probe, .id_table = dmard09_id, }; diff --git a/drivers/iio/accel/dmard10.c b/drivers/iio/accel/dmard10.c index 8ac62ec0a04a435996ddba0195ad19be6657b6b1..07e68aed8a13c7cb2a360743e6f71bdde7f41baa 100644 --- a/drivers/iio/accel/dmard10.c +++ b/drivers/iio/accel/dmard10.c @@ -175,8 +175,7 @@ static void dmard10_shutdown_cleanup(void *client) dmard10_shutdown(client); } -static int dmard10_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int dmard10_probe(struct i2c_client *client) { int ret; struct iio_dev *indio_dev; @@ -242,7 +241,7 @@ static struct i2c_driver dmard10_driver = { .name = "dmard10", .pm = pm_sleep_ptr(&dmard10_pm_ops), }, - .probe = dmard10_probe, + .probe_new = dmard10_probe, .id_table = dmard10_i2c_id, }; diff --git a/drivers/iio/accel/fxls8962af-core.c b/drivers/iio/accel/fxls8962af-core.c index 8874d6d617253c04233abf0c4429e102849f6c4f..0d672b1469e8d1798bfe739f81394432a2ad1ef2 100644 --- a/drivers/iio/accel/fxls8962af-core.c +++ b/drivers/iio/accel/fxls8962af-core.c @@ -159,7 +159,6 @@ struct fxls8962af_chip_info { struct fxls8962af_data { struct regmap *regmap; const struct fxls8962af_chip_info *chip_info; - struct regulator *vdd_reg; struct { __le16 channels[3]; s64 ts __aligned(8); @@ -1051,13 +1050,6 @@ static irqreturn_t fxls8962af_interrupt(int irq, void *p) return IRQ_NONE; } -static void fxls8962af_regulator_disable(void *data_ptr) -{ - struct fxls8962af_data *data = data_ptr; - - regulator_disable(data->vdd_reg); -} - static void fxls8962af_pm_disable(void *dev_ptr) { struct device *dev = dev_ptr; @@ -1171,20 +1163,10 @@ int fxls8962af_core_probe(struct device *dev, struct regmap *regmap, int irq) if (ret) return ret; - data->vdd_reg = devm_regulator_get(dev, "vdd"); - if (IS_ERR(data->vdd_reg)) - return dev_err_probe(dev, PTR_ERR(data->vdd_reg), - "Failed to get vdd regulator\n"); - - ret = regulator_enable(data->vdd_reg); - if (ret) { - dev_err(dev, "Failed to enable vdd regulator: %d\n", ret); - return ret; - } - - ret = devm_add_action_or_reset(dev, fxls8962af_regulator_disable, data); + ret = devm_regulator_get_enable(dev, "vdd"); if (ret) - return ret; + return dev_err_probe(dev, ret, + "Failed to get vdd regulator\n"); ret = regmap_read(data->regmap, FXLS8962AF_WHO_AM_I, ®); if (ret) @@ -1241,7 +1223,7 @@ int fxls8962af_core_probe(struct device *dev, struct regmap *regmap, int irq) } EXPORT_SYMBOL_NS_GPL(fxls8962af_core_probe, IIO_FXLS8962AF); -static int __maybe_unused fxls8962af_runtime_suspend(struct device *dev) +static int fxls8962af_runtime_suspend(struct device *dev) { struct fxls8962af_data *data = iio_priv(dev_get_drvdata(dev)); int ret; @@ -1255,14 +1237,14 @@ static int __maybe_unused fxls8962af_runtime_suspend(struct device *dev) return 0; } -static int __maybe_unused fxls8962af_runtime_resume(struct device *dev) +static int fxls8962af_runtime_resume(struct device *dev) { struct fxls8962af_data *data = iio_priv(dev_get_drvdata(dev)); return fxls8962af_active(data); } -static int __maybe_unused fxls8962af_suspend(struct device *dev) +static int fxls8962af_suspend(struct device *dev) { struct iio_dev *indio_dev = dev_get_drvdata(dev); struct fxls8962af_data *data = iio_priv(indio_dev); @@ -1283,7 +1265,7 @@ static int __maybe_unused fxls8962af_suspend(struct device *dev) return 0; } -static int __maybe_unused fxls8962af_resume(struct device *dev) +static int fxls8962af_resume(struct device *dev) { struct iio_dev *indio_dev = dev_get_drvdata(dev); struct fxls8962af_data *data = iio_priv(indio_dev); @@ -1300,12 +1282,10 @@ static int __maybe_unused fxls8962af_resume(struct device *dev) return 0; } -const struct dev_pm_ops fxls8962af_pm_ops = { - SET_SYSTEM_SLEEP_PM_OPS(fxls8962af_suspend, fxls8962af_resume) - SET_RUNTIME_PM_OPS(fxls8962af_runtime_suspend, - fxls8962af_runtime_resume, NULL) +EXPORT_NS_GPL_DEV_PM_OPS(fxls8962af_pm_ops, IIO_FXLS8962AF) = { + SYSTEM_SLEEP_PM_OPS(fxls8962af_suspend, fxls8962af_resume) + RUNTIME_PM_OPS(fxls8962af_runtime_suspend, fxls8962af_runtime_resume, NULL) }; -EXPORT_SYMBOL_NS_GPL(fxls8962af_pm_ops, IIO_FXLS8962AF); MODULE_AUTHOR("Sean Nyekjaer "); MODULE_DESCRIPTION("NXP FXLS8962AF/FXLS8964AF accelerometer driver"); diff --git a/drivers/iio/accel/fxls8962af-i2c.c b/drivers/iio/accel/fxls8962af-i2c.c index 8fbadfea1620ba6ad8a5a8b9bb918cc89f1f7591..22640eaebac739ad53fac7f98151d44227a775f5 100644 --- a/drivers/iio/accel/fxls8962af-i2c.c +++ b/drivers/iio/accel/fxls8962af-i2c.c @@ -45,7 +45,7 @@ static struct i2c_driver fxls8962af_driver = { .driver = { .name = "fxls8962af_i2c", .of_match_table = fxls8962af_of_match, - .pm = &fxls8962af_pm_ops, + .pm = pm_ptr(&fxls8962af_pm_ops), }, .probe_new = fxls8962af_probe, .id_table = fxls8962af_id, diff --git a/drivers/iio/accel/fxls8962af-spi.c b/drivers/iio/accel/fxls8962af-spi.c index 885b3ab7fcb58f2794df26631c2e58d637d774e9..a0d192211839f8e0638bbdfec74942db6cbc7fee 100644 --- a/drivers/iio/accel/fxls8962af-spi.c +++ b/drivers/iio/accel/fxls8962af-spi.c @@ -44,7 +44,7 @@ MODULE_DEVICE_TABLE(spi, fxls8962af_spi_id_table); static struct spi_driver fxls8962af_driver = { .driver = { .name = "fxls8962af_spi", - .pm = &fxls8962af_pm_ops, + .pm = pm_ptr(&fxls8962af_pm_ops), .of_match_table = fxls8962af_spi_of_match, }, .probe = fxls8962af_probe, diff --git a/drivers/iio/accel/kionix-kx022a-i2c.c b/drivers/iio/accel/kionix-kx022a-i2c.c new file mode 100644 index 0000000000000000000000000000000000000000..e6fd02d931b60f3b3a6aa6c1fee56f9ae055a7cb --- /dev/null +++ b/drivers/iio/accel/kionix-kx022a-i2c.c @@ -0,0 +1,51 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2022 ROHM Semiconductors + * + * ROHM/KIONIX KX022A accelerometer driver + */ + +#include +#include +#include +#include + +#include "kionix-kx022a.h" + +static int kx022a_i2c_probe(struct i2c_client *i2c) +{ + struct device *dev = &i2c->dev; + struct regmap *regmap; + + if (!i2c->irq) { + dev_err(dev, "No IRQ configured\n"); + return -EINVAL; + } + + regmap = devm_regmap_init_i2c(i2c, &kx022a_regmap); + if (IS_ERR(regmap)) + return dev_err_probe(dev, PTR_ERR(regmap), + "Failed to initialize Regmap\n"); + + return kx022a_probe_internal(dev); +} + +static const struct of_device_id kx022a_of_match[] = { + { .compatible = "kionix,kx022a", }, + { } +}; +MODULE_DEVICE_TABLE(of, kx022a_of_match); + +static struct i2c_driver kx022a_i2c_driver = { + .driver = { + .name = "kx022a-i2c", + .of_match_table = kx022a_of_match, + }, + .probe_new = kx022a_i2c_probe, +}; +module_i2c_driver(kx022a_i2c_driver); + +MODULE_DESCRIPTION("ROHM/Kionix KX022A accelerometer driver"); +MODULE_AUTHOR("Matti Vaittinen "); +MODULE_LICENSE("GPL"); +MODULE_IMPORT_NS(IIO_KX022A); diff --git a/drivers/iio/accel/kionix-kx022a-spi.c b/drivers/iio/accel/kionix-kx022a-spi.c new file mode 100644 index 0000000000000000000000000000000000000000..9cd047f7b34678a6e754232e86501688d30b31d9 --- /dev/null +++ b/drivers/iio/accel/kionix-kx022a-spi.c @@ -0,0 +1,58 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2022 ROHM Semiconductors + * + * ROHM/KIONIX KX022A accelerometer driver + */ + +#include +#include +#include +#include + +#include "kionix-kx022a.h" + +static int kx022a_spi_probe(struct spi_device *spi) +{ + struct device *dev = &spi->dev; + struct regmap *regmap; + + if (!spi->irq) { + dev_err(dev, "No IRQ configured\n"); + return -EINVAL; + } + + regmap = devm_regmap_init_spi(spi, &kx022a_regmap); + if (IS_ERR(regmap)) + return dev_err_probe(dev, PTR_ERR(regmap), + "Failed to initialize Regmap\n"); + + return kx022a_probe_internal(dev); +} + +static const struct spi_device_id kx022a_id[] = { + { "kx022a" }, + { } +}; +MODULE_DEVICE_TABLE(spi, kx022a_id); + +static const struct of_device_id kx022a_of_match[] = { + { .compatible = "kionix,kx022a", }, + { } +}; +MODULE_DEVICE_TABLE(of, kx022a_of_match); + +static struct spi_driver kx022a_spi_driver = { + .driver = { + .name = "kx022a-spi", + .of_match_table = kx022a_of_match, + }, + .probe = kx022a_spi_probe, + .id_table = kx022a_id, +}; +module_spi_driver(kx022a_spi_driver); + +MODULE_DESCRIPTION("ROHM/Kionix kx022A accelerometer driver"); +MODULE_AUTHOR("Matti Vaittinen "); +MODULE_LICENSE("GPL"); +MODULE_IMPORT_NS(IIO_KX022A); diff --git a/drivers/iio/accel/kionix-kx022a.c b/drivers/iio/accel/kionix-kx022a.c new file mode 100644 index 0000000000000000000000000000000000000000..f866859855cddc9433a421829b023952808013c8 --- /dev/null +++ b/drivers/iio/accel/kionix-kx022a.c @@ -0,0 +1,1142 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2022 ROHM Semiconductors + * + * ROHM/KIONIX KX022A accelerometer driver + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "kionix-kx022a.h" + +/* + * The KX022A has FIFO which can store 43 samples of HiRes data from 2 + * channels. This equals to 43 (samples) * 3 (channels) * 2 (bytes/sample) to + * 258 bytes of sample data. The quirk to know is that the amount of bytes in + * the FIFO is advertised via 8 bit register (max value 255). The thing to note + * is that full 258 bytes of data is indicated using the max value 255. + */ +#define KX022A_FIFO_LENGTH 43 +#define KX022A_FIFO_FULL_VALUE 255 +#define KX022A_SOFT_RESET_WAIT_TIME_US (5 * USEC_PER_MSEC) +#define KX022A_SOFT_RESET_TOTAL_WAIT_TIME_US (500 * USEC_PER_MSEC) + +/* 3 axis, 2 bytes of data for each of the axis */ +#define KX022A_FIFO_SAMPLES_SIZE_BYTES 6 +#define KX022A_FIFO_MAX_BYTES \ + (KX022A_FIFO_LENGTH * KX022A_FIFO_SAMPLES_SIZE_BYTES) + +enum { + KX022A_STATE_SAMPLE, + KX022A_STATE_FIFO, +}; + +/* Regmap configs */ +static const struct regmap_range kx022a_volatile_ranges[] = { + { + .range_min = KX022A_REG_XHP_L, + .range_max = KX022A_REG_COTR, + }, { + .range_min = KX022A_REG_TSCP, + .range_max = KX022A_REG_INT_REL, + }, { + /* The reset bit will be cleared by sensor */ + .range_min = KX022A_REG_CNTL2, + .range_max = KX022A_REG_CNTL2, + }, { + .range_min = KX022A_REG_BUF_STATUS_1, + .range_max = KX022A_REG_BUF_READ, + }, +}; + +static const struct regmap_access_table kx022a_volatile_regs = { + .yes_ranges = &kx022a_volatile_ranges[0], + .n_yes_ranges = ARRAY_SIZE(kx022a_volatile_ranges), +}; + +static const struct regmap_range kx022a_precious_ranges[] = { + { + .range_min = KX022A_REG_INT_REL, + .range_max = KX022A_REG_INT_REL, + }, +}; + +static const struct regmap_access_table kx022a_precious_regs = { + .yes_ranges = &kx022a_precious_ranges[0], + .n_yes_ranges = ARRAY_SIZE(kx022a_precious_ranges), +}; + +/* + * The HW does not set WHO_AM_I reg as read-only but we don't want to write it + * so we still include it in the read-only ranges. + */ +static const struct regmap_range kx022a_read_only_ranges[] = { + { + .range_min = KX022A_REG_XHP_L, + .range_max = KX022A_REG_INT_REL, + }, { + .range_min = KX022A_REG_BUF_STATUS_1, + .range_max = KX022A_REG_BUF_STATUS_2, + }, { + .range_min = KX022A_REG_BUF_READ, + .range_max = KX022A_REG_BUF_READ, + }, +}; + +static const struct regmap_access_table kx022a_ro_regs = { + .no_ranges = &kx022a_read_only_ranges[0], + .n_no_ranges = ARRAY_SIZE(kx022a_read_only_ranges), +}; + +static const struct regmap_range kx022a_write_only_ranges[] = { + { + .range_min = KX022A_REG_BTS_WUF_TH, + .range_max = KX022A_REG_BTS_WUF_TH, + }, { + .range_min = KX022A_REG_MAN_WAKE, + .range_max = KX022A_REG_MAN_WAKE, + }, { + .range_min = KX022A_REG_SELF_TEST, + .range_max = KX022A_REG_SELF_TEST, + }, { + .range_min = KX022A_REG_BUF_CLEAR, + .range_max = KX022A_REG_BUF_CLEAR, + }, +}; + +static const struct regmap_access_table kx022a_wo_regs = { + .no_ranges = &kx022a_write_only_ranges[0], + .n_no_ranges = ARRAY_SIZE(kx022a_write_only_ranges), +}; + +static const struct regmap_range kx022a_noinc_read_ranges[] = { + { + .range_min = KX022A_REG_BUF_READ, + .range_max = KX022A_REG_BUF_READ, + }, +}; + +static const struct regmap_access_table kx022a_nir_regs = { + .yes_ranges = &kx022a_noinc_read_ranges[0], + .n_yes_ranges = ARRAY_SIZE(kx022a_noinc_read_ranges), +}; + +const struct regmap_config kx022a_regmap = { + .reg_bits = 8, + .val_bits = 8, + .volatile_table = &kx022a_volatile_regs, + .rd_table = &kx022a_wo_regs, + .wr_table = &kx022a_ro_regs, + .rd_noinc_table = &kx022a_nir_regs, + .precious_table = &kx022a_precious_regs, + .max_register = KX022A_MAX_REGISTER, + .cache_type = REGCACHE_RBTREE, +}; +EXPORT_SYMBOL_NS_GPL(kx022a_regmap, IIO_KX022A); + +struct kx022a_data { + struct regmap *regmap; + struct iio_trigger *trig; + struct device *dev; + struct iio_mount_matrix orientation; + int64_t timestamp, old_timestamp; + + int irq; + int inc_reg; + int ien_reg; + + unsigned int g_range; + unsigned int state; + unsigned int odr_ns; + + bool trigger_enabled; + /* + * Prevent toggling the sensor stby/active state (PC1 bit) in the + * middle of a configuration, or when the fifo is enabled. Also, + * protect the data stored/retrieved from this structure from + * concurrent accesses. + */ + struct mutex mutex; + u8 watermark; + + /* 3 x 16bit accel data + timestamp */ + __le16 buffer[8] __aligned(IIO_DMA_MINALIGN); + struct { + __le16 channels[3]; + s64 ts __aligned(8); + } scan; +}; + +static const struct iio_mount_matrix * +kx022a_get_mount_matrix(const struct iio_dev *idev, + const struct iio_chan_spec *chan) +{ + struct kx022a_data *data = iio_priv(idev); + + return &data->orientation; +} + +enum { + AXIS_X, + AXIS_Y, + AXIS_Z, + AXIS_MAX +}; + +static const unsigned long kx022a_scan_masks[] = { + BIT(AXIS_X) | BIT(AXIS_Y) | BIT(AXIS_Z), 0 +}; + +static const struct iio_chan_spec_ext_info kx022a_ext_info[] = { + IIO_MOUNT_MATRIX(IIO_SHARED_BY_TYPE, kx022a_get_mount_matrix), + { } +}; + +#define KX022A_ACCEL_CHAN(axis, index) \ +{ \ + .type = IIO_ACCEL, \ + .modified = 1, \ + .channel2 = IIO_MOD_##axis, \ + .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \ + .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) | \ + BIT(IIO_CHAN_INFO_SAMP_FREQ), \ + .info_mask_shared_by_type_available = \ + BIT(IIO_CHAN_INFO_SCALE) | \ + BIT(IIO_CHAN_INFO_SAMP_FREQ), \ + .ext_info = kx022a_ext_info, \ + .address = KX022A_REG_##axis##OUT_L, \ + .scan_index = index, \ + .scan_type = { \ + .sign = 's', \ + .realbits = 16, \ + .storagebits = 16, \ + .endianness = IIO_LE, \ + }, \ +} + +static const struct iio_chan_spec kx022a_channels[] = { + KX022A_ACCEL_CHAN(X, 0), + KX022A_ACCEL_CHAN(Y, 1), + KX022A_ACCEL_CHAN(Z, 2), + IIO_CHAN_SOFT_TIMESTAMP(3), +}; + +/* + * The sensor HW can support ODR up to 1600 Hz, which is beyond what most of the + * Linux CPUs can handle without dropping samples. Also, the low power mode is + * not available for higher sample rates. Thus, the driver only supports 200 Hz + * and slower ODRs. The slowest is 0.78 Hz. + */ +static const int kx022a_accel_samp_freq_table[][2] = { + { 0, 780000 }, + { 1, 563000 }, + { 3, 125000 }, + { 6, 250000 }, + { 12, 500000 }, + { 25, 0 }, + { 50, 0 }, + { 100, 0 }, + { 200, 0 }, +}; + +static const unsigned int kx022a_odrs[] = { + 1282051282, + 639795266, + 320 * MEGA, + 160 * MEGA, + 80 * MEGA, + 40 * MEGA, + 20 * MEGA, + 10 * MEGA, + 5 * MEGA, +}; + +/* + * range is typically +-2G/4G/8G/16G, distributed over the amount of bits. + * The scale table can be calculated using + * (range / 2^bits) * g = (range / 2^bits) * 9.80665 m/s^2 + * => KX022A uses 16 bit (HiRes mode - assume the low 8 bits are zeroed + * in low-power mode(?) ) + * => +/-2G => 4 / 2^16 * 9,80665 * 10^6 (to scale to micro) + * => +/-2G - 598.550415 + * +/-4G - 1197.10083 + * +/-8G - 2394.20166 + * +/-16G - 4788.40332 + */ +static const int kx022a_scale_table[][2] = { + { 598, 550415 }, + { 1197, 100830 }, + { 2394, 201660 }, + { 4788, 403320 }, +}; + +static int kx022a_read_avail(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, + const int **vals, int *type, int *length, + long mask) +{ + switch (mask) { + case IIO_CHAN_INFO_SAMP_FREQ: + *vals = (const int *)kx022a_accel_samp_freq_table; + *length = ARRAY_SIZE(kx022a_accel_samp_freq_table) * + ARRAY_SIZE(kx022a_accel_samp_freq_table[0]); + *type = IIO_VAL_INT_PLUS_MICRO; + return IIO_AVAIL_LIST; + case IIO_CHAN_INFO_SCALE: + *vals = (const int *)kx022a_scale_table; + *length = ARRAY_SIZE(kx022a_scale_table) * + ARRAY_SIZE(kx022a_scale_table[0]); + *type = IIO_VAL_INT_PLUS_MICRO; + return IIO_AVAIL_LIST; + default: + return -EINVAL; + } +} + +#define KX022A_DEFAULT_PERIOD_NS (20 * NSEC_PER_MSEC) + +static void kx022a_reg2freq(unsigned int val, int *val1, int *val2) +{ + *val1 = kx022a_accel_samp_freq_table[val & KX022A_MASK_ODR][0]; + *val2 = kx022a_accel_samp_freq_table[val & KX022A_MASK_ODR][1]; +} + +static void kx022a_reg2scale(unsigned int val, unsigned int *val1, + unsigned int *val2) +{ + val &= KX022A_MASK_GSEL; + val >>= KX022A_GSEL_SHIFT; + + *val1 = kx022a_scale_table[val][0]; + *val2 = kx022a_scale_table[val][1]; +} + +static int kx022a_turn_on_off_unlocked(struct kx022a_data *data, bool on) +{ + int ret; + + if (on) + ret = regmap_set_bits(data->regmap, KX022A_REG_CNTL, + KX022A_MASK_PC1); + else + ret = regmap_clear_bits(data->regmap, KX022A_REG_CNTL, + KX022A_MASK_PC1); + if (ret) + dev_err(data->dev, "Turn %s fail %d\n", str_on_off(on), ret); + + return ret; + +} + +static int kx022a_turn_off_lock(struct kx022a_data *data) +{ + int ret; + + mutex_lock(&data->mutex); + ret = kx022a_turn_on_off_unlocked(data, false); + if (ret) + mutex_unlock(&data->mutex); + + return ret; +} + +static int kx022a_turn_on_unlock(struct kx022a_data *data) +{ + int ret; + + ret = kx022a_turn_on_off_unlocked(data, true); + mutex_unlock(&data->mutex); + + return ret; +} + +static int kx022a_write_raw(struct iio_dev *idev, + struct iio_chan_spec const *chan, + int val, int val2, long mask) +{ + struct kx022a_data *data = iio_priv(idev); + int ret, n; + + /* + * We should not allow changing scale or frequency when FIFO is running + * as it will mess the timestamp/scale for samples existing in the + * buffer. If this turns out to be an issue we can later change logic + * to internally flush the fifo before reconfiguring so the samples in + * fifo keep matching the freq/scale settings. (Such setup could cause + * issues if users trust the watermark to be reached within known + * time-limit). + */ + ret = iio_device_claim_direct_mode(idev); + if (ret) + return ret; + + switch (mask) { + case IIO_CHAN_INFO_SAMP_FREQ: + n = ARRAY_SIZE(kx022a_accel_samp_freq_table); + + while (n--) + if (val == kx022a_accel_samp_freq_table[n][0] && + val2 == kx022a_accel_samp_freq_table[n][1]) + break; + if (n < 0) { + ret = -EINVAL; + goto unlock_out; + } + ret = kx022a_turn_off_lock(data); + if (ret) + break; + + ret = regmap_update_bits(data->regmap, + KX022A_REG_ODCNTL, + KX022A_MASK_ODR, n); + data->odr_ns = kx022a_odrs[n]; + kx022a_turn_on_unlock(data); + break; + case IIO_CHAN_INFO_SCALE: + n = ARRAY_SIZE(kx022a_scale_table); + + while (n-- > 0) + if (val == kx022a_scale_table[n][0] && + val2 == kx022a_scale_table[n][1]) + break; + if (n < 0) { + ret = -EINVAL; + goto unlock_out; + } + + ret = kx022a_turn_off_lock(data); + if (ret) + break; + + ret = regmap_update_bits(data->regmap, KX022A_REG_CNTL, + KX022A_MASK_GSEL, + n << KX022A_GSEL_SHIFT); + kx022a_turn_on_unlock(data); + break; + default: + ret = -EINVAL; + break; + } + +unlock_out: + iio_device_release_direct_mode(idev); + + return ret; +} + +static int kx022a_fifo_set_wmi(struct kx022a_data *data) +{ + u8 threshold; + + threshold = data->watermark; + + return regmap_update_bits(data->regmap, KX022A_REG_BUF_CNTL1, + KX022A_MASK_WM_TH, threshold); +} + +static int kx022a_get_axis(struct kx022a_data *data, + struct iio_chan_spec const *chan, + int *val) +{ + int ret; + + ret = regmap_bulk_read(data->regmap, chan->address, &data->buffer[0], + sizeof(__le16)); + if (ret) + return ret; + + *val = le16_to_cpu(data->buffer[0]); + + return IIO_VAL_INT; +} + +static int kx022a_read_raw(struct iio_dev *idev, + struct iio_chan_spec const *chan, + int *val, int *val2, long mask) +{ + struct kx022a_data *data = iio_priv(idev); + unsigned int regval; + int ret; + + switch (mask) { + case IIO_CHAN_INFO_RAW: + ret = iio_device_claim_direct_mode(idev); + if (ret) + return ret; + + mutex_lock(&data->mutex); + ret = kx022a_get_axis(data, chan, val); + mutex_unlock(&data->mutex); + + iio_device_release_direct_mode(idev); + + return ret; + + case IIO_CHAN_INFO_SAMP_FREQ: + ret = regmap_read(data->regmap, KX022A_REG_ODCNTL, ®val); + if (ret) + return ret; + + if ((regval & KX022A_MASK_ODR) > + ARRAY_SIZE(kx022a_accel_samp_freq_table)) { + dev_err(data->dev, "Invalid ODR\n"); + return -EINVAL; + } + + kx022a_reg2freq(regval, val, val2); + + return IIO_VAL_INT_PLUS_MICRO; + + case IIO_CHAN_INFO_SCALE: + ret = regmap_read(data->regmap, KX022A_REG_CNTL, ®val); + if (ret < 0) + return ret; + + kx022a_reg2scale(regval, val, val2); + + return IIO_VAL_INT_PLUS_MICRO; + } + + return -EINVAL; +}; + +static int kx022a_validate_trigger(struct iio_dev *idev, + struct iio_trigger *trig) +{ + struct kx022a_data *data = iio_priv(idev); + + if (data->trig != trig) + return -EINVAL; + + return 0; +} + +static int kx022a_set_watermark(struct iio_dev *idev, unsigned int val) +{ + struct kx022a_data *data = iio_priv(idev); + + if (val > KX022A_FIFO_LENGTH) + val = KX022A_FIFO_LENGTH; + + mutex_lock(&data->mutex); + data->watermark = val; + mutex_unlock(&data->mutex); + + return 0; +} + +static ssize_t hwfifo_enabled_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct iio_dev *idev = dev_to_iio_dev(dev); + struct kx022a_data *data = iio_priv(idev); + bool state; + + mutex_lock(&data->mutex); + state = data->state; + mutex_unlock(&data->mutex); + + return sysfs_emit(buf, "%d\n", state); +} + +static ssize_t hwfifo_watermark_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct iio_dev *idev = dev_to_iio_dev(dev); + struct kx022a_data *data = iio_priv(idev); + int wm; + + mutex_lock(&data->mutex); + wm = data->watermark; + mutex_unlock(&data->mutex); + + return sysfs_emit(buf, "%d\n", wm); +} + +static IIO_DEVICE_ATTR_RO(hwfifo_enabled, 0); +static IIO_DEVICE_ATTR_RO(hwfifo_watermark, 0); + +static const struct iio_dev_attr *kx022a_fifo_attributes[] = { + &iio_dev_attr_hwfifo_watermark, + &iio_dev_attr_hwfifo_enabled, + NULL +}; + +static int kx022a_drop_fifo_contents(struct kx022a_data *data) +{ + /* + * We must clear the old time-stamp to avoid computing the timestamps + * based on samples acquired when buffer was last enabled. + * + * We don't need to protect the timestamp as long as we are only + * called from fifo-disable where we can guarantee the sensor is not + * triggering interrupts and where the mutex is locked to prevent the + * user-space access. + */ + data->timestamp = 0; + + return regmap_write(data->regmap, KX022A_REG_BUF_CLEAR, 0x0); +} + +static int __kx022a_fifo_flush(struct iio_dev *idev, unsigned int samples, + bool irq) +{ + struct kx022a_data *data = iio_priv(idev); + struct device *dev = regmap_get_device(data->regmap); + __le16 buffer[KX022A_FIFO_LENGTH * 3]; + uint64_t sample_period; + int count, fifo_bytes; + bool renable = false; + int64_t tstamp; + int ret, i; + + ret = regmap_read(data->regmap, KX022A_REG_BUF_STATUS_1, &fifo_bytes); + if (ret) { + dev_err(dev, "Error reading buffer status\n"); + return ret; + } + + /* Let's not overflow if we for some reason get bogus value from i2c */ + if (fifo_bytes == KX022A_FIFO_FULL_VALUE) + fifo_bytes = KX022A_FIFO_MAX_BYTES; + + if (fifo_bytes % KX022A_FIFO_SAMPLES_SIZE_BYTES) + dev_warn(data->dev, "Bad FIFO alignment. Data may be corrupt\n"); + + count = fifo_bytes / KX022A_FIFO_SAMPLES_SIZE_BYTES; + if (!count) + return 0; + + /* + * If we are being called from IRQ handler we know the stored timestamp + * is fairly accurate for the last stored sample. Otherwise, if we are + * called as a result of a read operation from userspace and hence + * before the watermark interrupt was triggered, take a timestamp + * now. We can fall anywhere in between two samples so the error in this + * case is at most one sample period. + */ + if (!irq) { + /* + * We need to have the IRQ disabled or we risk of messing-up + * the timestamps. If we are ran from IRQ, then the + * IRQF_ONESHOT has us covered - but if we are ran by the + * user-space read we need to disable the IRQ to be on a safe + * side. We do this usng synchronous disable so that if the + * IRQ thread is being ran on other CPU we wait for it to be + * finished. + */ + disable_irq(data->irq); + renable = true; + + data->old_timestamp = data->timestamp; + data->timestamp = iio_get_time_ns(idev); + } + + /* + * Approximate timestamps for each of the sample based on the sampling + * frequency, timestamp for last sample and number of samples. + * + * We'd better not use the current bandwidth settings to compute the + * sample period. The real sample rate varies with the device and + * small variation adds when we store a large number of samples. + * + * To avoid this issue we compute the actual sample period ourselves + * based on the timestamp delta between the last two flush operations. + */ + if (data->old_timestamp) { + sample_period = data->timestamp - data->old_timestamp; + do_div(sample_period, count); + } else { + sample_period = data->odr_ns; + } + tstamp = data->timestamp - (count - 1) * sample_period; + + if (samples && count > samples) { + /* + * Here we leave some old samples to the buffer. We need to + * adjust the timestamp to match the first sample in the buffer + * or we will miscalculate the sample_period at next round. + */ + data->timestamp -= (count - samples) * sample_period; + count = samples; + } + + fifo_bytes = count * KX022A_FIFO_SAMPLES_SIZE_BYTES; + ret = regmap_noinc_read(data->regmap, KX022A_REG_BUF_READ, + &buffer[0], fifo_bytes); + if (ret) + goto renable_out; + + for (i = 0; i < count; i++) { + __le16 *sam = &buffer[i * 3]; + __le16 *chs; + int bit; + + chs = &data->scan.channels[0]; + for_each_set_bit(bit, idev->active_scan_mask, AXIS_MAX) + chs[bit] = sam[bit]; + + iio_push_to_buffers_with_timestamp(idev, &data->scan, tstamp); + + tstamp += sample_period; + } + + ret = count; + +renable_out: + if (renable) + enable_irq(data->irq); + + return ret; +} + +static int kx022a_fifo_flush(struct iio_dev *idev, unsigned int samples) +{ + struct kx022a_data *data = iio_priv(idev); + int ret; + + mutex_lock(&data->mutex); + ret = __kx022a_fifo_flush(idev, samples, false); + mutex_unlock(&data->mutex); + + return ret; +} + +static const struct iio_info kx022a_info = { + .read_raw = &kx022a_read_raw, + .write_raw = &kx022a_write_raw, + .read_avail = &kx022a_read_avail, + + .validate_trigger = kx022a_validate_trigger, + .hwfifo_set_watermark = kx022a_set_watermark, + .hwfifo_flush_to_buffer = kx022a_fifo_flush, +}; + +static int kx022a_set_drdy_irq(struct kx022a_data *data, bool en) +{ + if (en) + return regmap_set_bits(data->regmap, KX022A_REG_CNTL, + KX022A_MASK_DRDY); + + return regmap_clear_bits(data->regmap, KX022A_REG_CNTL, + KX022A_MASK_DRDY); +} + +static int kx022a_prepare_irq_pin(struct kx022a_data *data) +{ + /* Enable IRQ1 pin. Set polarity to active low */ + int mask = KX022A_MASK_IEN | KX022A_MASK_IPOL | + KX022A_MASK_ITYP; + int val = KX022A_MASK_IEN | KX022A_IPOL_LOW | + KX022A_ITYP_LEVEL; + int ret; + + ret = regmap_update_bits(data->regmap, data->inc_reg, mask, val); + if (ret) + return ret; + + /* We enable WMI to IRQ pin only at buffer_enable */ + mask = KX022A_MASK_INS2_DRDY; + + return regmap_set_bits(data->regmap, data->ien_reg, mask); +} + +static int kx022a_fifo_disable(struct kx022a_data *data) +{ + int ret = 0; + + ret = kx022a_turn_off_lock(data); + if (ret) + return ret; + + ret = regmap_clear_bits(data->regmap, data->ien_reg, KX022A_MASK_WMI); + if (ret) + goto unlock_out; + + ret = regmap_clear_bits(data->regmap, KX022A_REG_BUF_CNTL2, + KX022A_MASK_BUF_EN); + if (ret) + goto unlock_out; + + data->state &= ~KX022A_STATE_FIFO; + + kx022a_drop_fifo_contents(data); + + return kx022a_turn_on_unlock(data); + +unlock_out: + mutex_unlock(&data->mutex); + + return ret; +} + +static int kx022a_buffer_predisable(struct iio_dev *idev) +{ + struct kx022a_data *data = iio_priv(idev); + + if (iio_device_get_current_mode(idev) == INDIO_BUFFER_TRIGGERED) + return 0; + + return kx022a_fifo_disable(data); +} + +static int kx022a_fifo_enable(struct kx022a_data *data) +{ + int ret; + + ret = kx022a_turn_off_lock(data); + if (ret) + return ret; + + /* Update watermark to HW */ + ret = kx022a_fifo_set_wmi(data); + if (ret) + goto unlock_out; + + /* Enable buffer */ + ret = regmap_set_bits(data->regmap, KX022A_REG_BUF_CNTL2, + KX022A_MASK_BUF_EN); + if (ret) + goto unlock_out; + + data->state |= KX022A_STATE_FIFO; + ret = regmap_set_bits(data->regmap, data->ien_reg, + KX022A_MASK_WMI); + if (ret) + goto unlock_out; + + return kx022a_turn_on_unlock(data); + +unlock_out: + mutex_unlock(&data->mutex); + + return ret; +} + +static int kx022a_buffer_postenable(struct iio_dev *idev) +{ + struct kx022a_data *data = iio_priv(idev); + + /* + * If we use data-ready trigger, then the IRQ masks should be handled by + * trigger enable and the hardware buffer is not used but we just update + * results to the IIO fifo when data-ready triggers. + */ + if (iio_device_get_current_mode(idev) == INDIO_BUFFER_TRIGGERED) + return 0; + + return kx022a_fifo_enable(data); +} + +static const struct iio_buffer_setup_ops kx022a_buffer_ops = { + .postenable = kx022a_buffer_postenable, + .predisable = kx022a_buffer_predisable, +}; + +static irqreturn_t kx022a_trigger_handler(int irq, void *p) +{ + struct iio_poll_func *pf = p; + struct iio_dev *idev = pf->indio_dev; + struct kx022a_data *data = iio_priv(idev); + int ret; + + ret = regmap_bulk_read(data->regmap, KX022A_REG_XOUT_L, data->buffer, + KX022A_FIFO_SAMPLES_SIZE_BYTES); + if (ret < 0) + goto err_read; + + iio_push_to_buffers_with_timestamp(idev, data->buffer, pf->timestamp); +err_read: + iio_trigger_notify_done(idev->trig); + + return IRQ_HANDLED; +} + +/* Get timestamps and wake the thread if we need to read data */ +static irqreturn_t kx022a_irq_handler(int irq, void *private) +{ + struct iio_dev *idev = private; + struct kx022a_data *data = iio_priv(idev); + + data->old_timestamp = data->timestamp; + data->timestamp = iio_get_time_ns(idev); + + if (data->state & KX022A_STATE_FIFO || data->trigger_enabled) + return IRQ_WAKE_THREAD; + + return IRQ_NONE; +} + +/* + * WMI and data-ready IRQs are acked when results are read. If we add + * TILT/WAKE or other IRQs - then we may need to implement the acking + * (which is racy). + */ +static irqreturn_t kx022a_irq_thread_handler(int irq, void *private) +{ + struct iio_dev *idev = private; + struct kx022a_data *data = iio_priv(idev); + irqreturn_t ret = IRQ_NONE; + + mutex_lock(&data->mutex); + + if (data->trigger_enabled) { + iio_trigger_poll_chained(data->trig); + ret = IRQ_HANDLED; + } + + if (data->state & KX022A_STATE_FIFO) { + int ok; + + ok = __kx022a_fifo_flush(idev, KX022A_FIFO_LENGTH, true); + if (ok > 0) + ret = IRQ_HANDLED; + } + + mutex_unlock(&data->mutex); + + return ret; +} + +static int kx022a_trigger_set_state(struct iio_trigger *trig, + bool state) +{ + struct kx022a_data *data = iio_trigger_get_drvdata(trig); + int ret = 0; + + mutex_lock(&data->mutex); + + if (data->trigger_enabled == state) + goto unlock_out; + + if (data->state & KX022A_STATE_FIFO) { + dev_warn(data->dev, "Can't set trigger when FIFO enabled\n"); + ret = -EBUSY; + goto unlock_out; + } + + ret = kx022a_turn_on_off_unlocked(data, false); + if (ret) + goto unlock_out; + + data->trigger_enabled = state; + ret = kx022a_set_drdy_irq(data, state); + if (ret) + goto unlock_out; + + ret = kx022a_turn_on_off_unlocked(data, true); + +unlock_out: + mutex_unlock(&data->mutex); + + return ret; +} + +static const struct iio_trigger_ops kx022a_trigger_ops = { + .set_trigger_state = kx022a_trigger_set_state, +}; + +static int kx022a_chip_init(struct kx022a_data *data) +{ + int ret, val; + + /* Reset the senor */ + ret = regmap_write(data->regmap, KX022A_REG_CNTL2, KX022A_MASK_SRST); + if (ret) + return ret; + + /* + * I've seen I2C read failures if we poll too fast after the sensor + * reset. Slight delay gives I2C block the time to recover. + */ + msleep(1); + + ret = regmap_read_poll_timeout(data->regmap, KX022A_REG_CNTL2, val, + !(val & KX022A_MASK_SRST), + KX022A_SOFT_RESET_WAIT_TIME_US, + KX022A_SOFT_RESET_TOTAL_WAIT_TIME_US); + if (ret) { + dev_err(data->dev, "Sensor reset %s\n", + val & KX022A_MASK_SRST ? "timeout" : "fail#"); + return ret; + } + + ret = regmap_reinit_cache(data->regmap, &kx022a_regmap); + if (ret) { + dev_err(data->dev, "Failed to reinit reg cache\n"); + return ret; + } + + /* set data res 16bit */ + ret = regmap_set_bits(data->regmap, KX022A_REG_BUF_CNTL2, + KX022A_MASK_BRES16); + if (ret) { + dev_err(data->dev, "Failed to set data resolution\n"); + return ret; + } + + return kx022a_prepare_irq_pin(data); +} + +int kx022a_probe_internal(struct device *dev) +{ + static const char * const regulator_names[] = {"io-vdd", "vdd"}; + struct iio_trigger *indio_trig; + struct fwnode_handle *fwnode; + struct kx022a_data *data; + struct regmap *regmap; + unsigned int chip_id; + struct iio_dev *idev; + int ret, irq; + char *name; + + regmap = dev_get_regmap(dev, NULL); + if (!regmap) { + dev_err(dev, "no regmap\n"); + return -EINVAL; + } + + fwnode = dev_fwnode(dev); + if (!fwnode) + return -ENODEV; + + idev = devm_iio_device_alloc(dev, sizeof(*data)); + if (!idev) + return -ENOMEM; + + data = iio_priv(idev); + + /* + * VDD is the analog and digital domain voltage supply and + * IO_VDD is the digital I/O voltage supply. + */ + ret = devm_regulator_bulk_get_enable(dev, ARRAY_SIZE(regulator_names), + regulator_names); + if (ret && ret != -ENODEV) + return dev_err_probe(dev, ret, "failed to enable regulator\n"); + + ret = regmap_read(regmap, KX022A_REG_WHO, &chip_id); + if (ret) + return dev_err_probe(dev, ret, "Failed to access sensor\n"); + + if (chip_id != KX022A_ID) { + dev_err(dev, "unsupported device 0x%x\n", chip_id); + return -EINVAL; + } + + irq = fwnode_irq_get_byname(fwnode, "INT1"); + if (irq > 0) { + data->inc_reg = KX022A_REG_INC1; + data->ien_reg = KX022A_REG_INC4; + } else { + irq = fwnode_irq_get_byname(fwnode, "INT2"); + if (irq <= 0) + return dev_err_probe(dev, irq, "No suitable IRQ\n"); + + data->inc_reg = KX022A_REG_INC5; + data->ien_reg = KX022A_REG_INC6; + } + + data->regmap = regmap; + data->dev = dev; + data->irq = irq; + data->odr_ns = KX022A_DEFAULT_PERIOD_NS; + mutex_init(&data->mutex); + + idev->channels = kx022a_channels; + idev->num_channels = ARRAY_SIZE(kx022a_channels); + idev->name = "kx022-accel"; + idev->info = &kx022a_info; + idev->modes = INDIO_DIRECT_MODE | INDIO_BUFFER_SOFTWARE; + idev->available_scan_masks = kx022a_scan_masks; + + /* Read the mounting matrix, if present */ + ret = iio_read_mount_matrix(dev, &data->orientation); + if (ret) + return ret; + + /* The sensor must be turned off for configuration */ + ret = kx022a_turn_off_lock(data); + if (ret) + return ret; + + ret = kx022a_chip_init(data); + if (ret) { + mutex_unlock(&data->mutex); + return ret; + } + + ret = kx022a_turn_on_unlock(data); + if (ret) + return ret; + + ret = devm_iio_triggered_buffer_setup_ext(dev, idev, + &iio_pollfunc_store_time, + kx022a_trigger_handler, + IIO_BUFFER_DIRECTION_IN, + &kx022a_buffer_ops, + kx022a_fifo_attributes); + + if (ret) + return dev_err_probe(data->dev, ret, + "iio_triggered_buffer_setup_ext FAIL\n"); + indio_trig = devm_iio_trigger_alloc(dev, "%sdata-rdy-dev%d", idev->name, + iio_device_id(idev)); + if (!indio_trig) + return -ENOMEM; + + data->trig = indio_trig; + + indio_trig->ops = &kx022a_trigger_ops; + iio_trigger_set_drvdata(indio_trig, data); + + /* + * No need to check for NULL. request_threaded_irq() defaults to + * dev_name() should the alloc fail. + */ + name = devm_kasprintf(data->dev, GFP_KERNEL, "%s-kx022a", + dev_name(data->dev)); + + ret = devm_request_threaded_irq(data->dev, irq, kx022a_irq_handler, + &kx022a_irq_thread_handler, + IRQF_ONESHOT, name, idev); + if (ret) + return dev_err_probe(data->dev, ret, "Could not request IRQ\n"); + + + ret = devm_iio_trigger_register(dev, indio_trig); + if (ret) + return dev_err_probe(data->dev, ret, + "Trigger registration failed\n"); + + ret = devm_iio_device_register(data->dev, idev); + if (ret < 0) + return dev_err_probe(dev, ret, + "Unable to register iio device\n"); + + return ret; +} +EXPORT_SYMBOL_NS_GPL(kx022a_probe_internal, IIO_KX022A); + +MODULE_DESCRIPTION("ROHM/Kionix KX022A accelerometer driver"); +MODULE_AUTHOR("Matti Vaittinen "); +MODULE_LICENSE("GPL"); diff --git a/drivers/iio/accel/kionix-kx022a.h b/drivers/iio/accel/kionix-kx022a.h new file mode 100644 index 0000000000000000000000000000000000000000..12424649d438b29b0fc7ef7a3c7120e029bcc33b --- /dev/null +++ b/drivers/iio/accel/kionix-kx022a.h @@ -0,0 +1,82 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (C) 2022 ROHM Semiconductors + * + * ROHM/KIONIX KX022A accelerometer driver + */ + +#ifndef _KX022A_H_ +#define _KX022A_H_ + +#include +#include + +#define KX022A_REG_WHO 0x0f +#define KX022A_ID 0xc8 + +#define KX022A_REG_CNTL2 0x19 +#define KX022A_MASK_SRST BIT(7) +#define KX022A_REG_CNTL 0x18 +#define KX022A_MASK_PC1 BIT(7) +#define KX022A_MASK_RES BIT(6) +#define KX022A_MASK_DRDY BIT(5) +#define KX022A_MASK_GSEL GENMASK(4, 3) +#define KX022A_GSEL_SHIFT 3 +#define KX022A_GSEL_2 0x0 +#define KX022A_GSEL_4 BIT(3) +#define KX022A_GSEL_8 BIT(4) +#define KX022A_GSEL_16 GENMASK(4, 3) + +#define KX022A_REG_INS2 0x13 +#define KX022A_MASK_INS2_DRDY BIT(4) +#define KX122_MASK_INS2_WMI BIT(5) + +#define KX022A_REG_XHP_L 0x0 +#define KX022A_REG_XOUT_L 0x06 +#define KX022A_REG_YOUT_L 0x08 +#define KX022A_REG_ZOUT_L 0x0a +#define KX022A_REG_COTR 0x0c +#define KX022A_REG_TSCP 0x10 +#define KX022A_REG_INT_REL 0x17 + +#define KX022A_REG_ODCNTL 0x1b + +#define KX022A_REG_BTS_WUF_TH 0x31 +#define KX022A_REG_MAN_WAKE 0x2c + +#define KX022A_REG_BUF_CNTL1 0x3a +#define KX022A_MASK_WM_TH GENMASK(6, 0) +#define KX022A_REG_BUF_CNTL2 0x3b +#define KX022A_MASK_BUF_EN BIT(7) +#define KX022A_MASK_BRES16 BIT(6) +#define KX022A_REG_BUF_STATUS_1 0x3c +#define KX022A_REG_BUF_STATUS_2 0x3d +#define KX022A_REG_BUF_CLEAR 0x3e +#define KX022A_REG_BUF_READ 0x3f +#define KX022A_MASK_ODR GENMASK(3, 0) +#define KX022A_ODR_SHIFT 3 +#define KX022A_FIFO_MAX_WMI_TH 41 + +#define KX022A_REG_INC1 0x1c +#define KX022A_REG_INC5 0x20 +#define KX022A_REG_INC6 0x21 +#define KX022A_MASK_IEN BIT(5) +#define KX022A_MASK_IPOL BIT(4) +#define KX022A_IPOL_LOW 0 +#define KX022A_IPOL_HIGH KX022A_MASK_IPOL1 +#define KX022A_MASK_ITYP BIT(3) +#define KX022A_ITYP_PULSE KX022A_MASK_ITYP +#define KX022A_ITYP_LEVEL 0 + +#define KX022A_REG_INC4 0x1f +#define KX022A_MASK_WMI BIT(5) + +#define KX022A_REG_SELF_TEST 0x60 +#define KX022A_MAX_REGISTER 0x60 + +struct device; + +int kx022a_probe_internal(struct device *dev); +extern const struct regmap_config kx022a_regmap; + +#endif diff --git a/drivers/iio/accel/kxcjk-1013.c b/drivers/iio/accel/kxcjk-1013.c index adc66b3615c0b4d14de88f24aa57fa4392e6442a..98da4bda22dfe0761d3d456147d354dd2b1921f8 100644 --- a/drivers/iio/accel/kxcjk-1013.c +++ b/drivers/iio/accel/kxcjk-1013.c @@ -241,7 +241,6 @@ enum kxcjk1013_axis { }; struct kxcjk1013_data { - struct regulator_bulk_data regulators[2]; struct i2c_client *client; struct iio_trigger *dready_trig; struct iio_trigger *motion_trig; @@ -1425,16 +1424,10 @@ static const char *kxcjk1013_match_acpi_device(struct device *dev, return dev_name(dev); } -static void kxcjk1013_disable_regulators(void *d) -{ - struct kxcjk1013_data *data = d; - - regulator_bulk_disable(ARRAY_SIZE(data->regulators), data->regulators); -} - -static int kxcjk1013_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int kxcjk1013_probe(struct i2c_client *client) { + const struct i2c_device_id *id = i2c_client_get_device_id(client); + static const char * const regulator_names[] = { "vdd", "vddio" }; struct kxcjk1013_data *data; struct iio_dev *indio_dev; struct kxcjk_1013_platform_data *pdata; @@ -1461,22 +1454,12 @@ static int kxcjk1013_probe(struct i2c_client *client, return ret; } - data->regulators[0].supply = "vdd"; - data->regulators[1].supply = "vddio"; - ret = devm_regulator_bulk_get(&client->dev, ARRAY_SIZE(data->regulators), - data->regulators); + ret = devm_regulator_bulk_get_enable(&client->dev, + ARRAY_SIZE(regulator_names), + regulator_names); if (ret) return dev_err_probe(&client->dev, ret, "Failed to get regulators\n"); - ret = regulator_bulk_enable(ARRAY_SIZE(data->regulators), - data->regulators); - if (ret) - return ret; - - ret = devm_add_action_or_reset(&client->dev, kxcjk1013_disable_regulators, data); - if (ret) - return ret; - /* * A typical delay of 10ms is required for powering up * according to the data sheets of supported chips. @@ -1749,7 +1732,7 @@ static struct i2c_driver kxcjk1013_driver = { .of_match_table = kxcjk1013_of_match, .pm = &kxcjk1013_pm_ops, }, - .probe = kxcjk1013_probe, + .probe_new = kxcjk1013_probe, .remove = kxcjk1013_remove, .id_table = kxcjk1013_id, }; diff --git a/drivers/iio/accel/kxsd9-i2c.c b/drivers/iio/accel/kxsd9-i2c.c index 61346ea8ef194d448001745af422004809104f13..6b3683ddce36d92dd2d09cd03fd22b3e91949bfd 100644 --- a/drivers/iio/accel/kxsd9-i2c.c +++ b/drivers/iio/accel/kxsd9-i2c.c @@ -10,8 +10,7 @@ #include "kxsd9.h" -static int kxsd9_i2c_probe(struct i2c_client *i2c, - const struct i2c_device_id *id) +static int kxsd9_i2c_probe(struct i2c_client *i2c) { static const struct regmap_config config = { .reg_bits = 8, @@ -55,7 +54,7 @@ static struct i2c_driver kxsd9_i2c_driver = { .of_match_table = kxsd9_of_match, .pm = pm_ptr(&kxsd9_dev_pm_ops), }, - .probe = kxsd9_i2c_probe, + .probe_new = kxsd9_i2c_probe, .remove = kxsd9_i2c_remove, .id_table = kxsd9_i2c_id, }; diff --git a/drivers/iio/accel/mc3230.c b/drivers/iio/accel/mc3230.c index 2462000e05198455eae1f170da516f753637df91..efc21871de42b7852526151675825071311408f6 100644 --- a/drivers/iio/accel/mc3230.c +++ b/drivers/iio/accel/mc3230.c @@ -106,8 +106,7 @@ static const struct iio_info mc3230_info = { .read_raw = mc3230_read_raw, }; -static int mc3230_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int mc3230_probe(struct i2c_client *client) { int ret; struct iio_dev *indio_dev; @@ -191,7 +190,7 @@ static struct i2c_driver mc3230_driver = { .name = "mc3230", .pm = pm_sleep_ptr(&mc3230_pm_ops), }, - .probe = mc3230_probe, + .probe_new = mc3230_probe, .remove = mc3230_remove, .id_table = mc3230_i2c_id, }; diff --git a/drivers/iio/accel/mma7455_i2c.c b/drivers/iio/accel/mma7455_i2c.c index c63b321b01cda596391ecab4dfe77cb623dc8549..a3864dbe27613c20686c664db94891d003b94fe1 100644 --- a/drivers/iio/accel/mma7455_i2c.c +++ b/drivers/iio/accel/mma7455_i2c.c @@ -10,9 +10,9 @@ #include "mma7455.h" -static int mma7455_i2c_probe(struct i2c_client *i2c, - const struct i2c_device_id *id) +static int mma7455_i2c_probe(struct i2c_client *i2c) { + const struct i2c_device_id *id = i2c_client_get_device_id(i2c); struct regmap *regmap; const char *name = NULL; @@ -46,7 +46,7 @@ static const struct of_device_id mma7455_of_match[] = { MODULE_DEVICE_TABLE(of, mma7455_of_match); static struct i2c_driver mma7455_i2c_driver = { - .probe = mma7455_i2c_probe, + .probe_new = mma7455_i2c_probe, .remove = mma7455_i2c_remove, .id_table = mma7455_i2c_ids, .driver = { diff --git a/drivers/iio/accel/mma7660.c b/drivers/iio/accel/mma7660.c index 85829990bbadad92a708f54cae3a4733ad7c5ad5..b279ca4dcdc05637d2e7d74ef608bfc805c25b95 100644 --- a/drivers/iio/accel/mma7660.c +++ b/drivers/iio/accel/mma7660.c @@ -169,8 +169,7 @@ static const struct iio_info mma7660_info = { .attrs = &mma7660_attribute_group, }; -static int mma7660_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int mma7660_probe(struct i2c_client *client) { int ret; struct iio_dev *indio_dev; @@ -267,7 +266,7 @@ static struct i2c_driver mma7660_driver = { .of_match_table = mma7660_of_match, .acpi_match_table = mma7660_acpi_id, }, - .probe = mma7660_probe, + .probe_new = mma7660_probe, .remove = mma7660_remove, .id_table = mma7660_i2c_id, }; diff --git a/drivers/iio/accel/mma8452.c b/drivers/iio/accel/mma8452.c index 3ba28c2ff68a368306ce6f42a4baec4181cc41b5..f97fb68e3a711dd803dfac484b88394038c80bc6 100644 --- a/drivers/iio/accel/mma8452.c +++ b/drivers/iio/accel/mma8452.c @@ -1545,9 +1545,9 @@ static const struct of_device_id mma8452_dt_ids[] = { }; MODULE_DEVICE_TABLE(of, mma8452_dt_ids); -static int mma8452_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int mma8452_probe(struct i2c_client *client) { + const struct i2c_device_id *id = i2c_client_get_device_id(client); struct mma8452_data *data; struct iio_dev *indio_dev; int ret; @@ -1846,7 +1846,7 @@ static struct i2c_driver mma8452_driver = { .of_match_table = mma8452_dt_ids, .pm = &mma8452_pm_ops, }, - .probe = mma8452_probe, + .probe_new = mma8452_probe, .remove = mma8452_remove, .id_table = mma8452_id, }; diff --git a/drivers/iio/accel/mma9551.c b/drivers/iio/accel/mma9551.c index f7a793f4a8e342dcdf111fa62746eee611df583e..aa4f5842859ef863ea5769966b76642a76ef1402 100644 --- a/drivers/iio/accel/mma9551.c +++ b/drivers/iio/accel/mma9551.c @@ -446,9 +446,9 @@ static const char *mma9551_match_acpi_device(struct device *dev) return dev_name(dev); } -static int mma9551_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int mma9551_probe(struct i2c_client *client) { + const struct i2c_device_id *id = i2c_client_get_device_id(client); struct mma9551_data *data; struct iio_dev *indio_dev; const char *name = NULL; @@ -607,7 +607,7 @@ static struct i2c_driver mma9551_driver = { .acpi_match_table = ACPI_PTR(mma9551_acpi_match), .pm = pm_ptr(&mma9551_pm_ops), }, - .probe = mma9551_probe, + .probe_new = mma9551_probe, .remove = mma9551_remove, .id_table = mma9551_id, }; diff --git a/drivers/iio/accel/mma9553.c b/drivers/iio/accel/mma9553.c index 2da0e005b13eb73d21224683733c277a0942ad57..0af578ef9d3d0e5e218beef8586c4b58dc417b29 100644 --- a/drivers/iio/accel/mma9553.c +++ b/drivers/iio/accel/mma9553.c @@ -1073,9 +1073,9 @@ static const char *mma9553_match_acpi_device(struct device *dev) return dev_name(dev); } -static int mma9553_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int mma9553_probe(struct i2c_client *client) { + const struct i2c_device_id *id = i2c_client_get_device_id(client); struct mma9553_data *data; struct iio_dev *indio_dev; const char *name = NULL; @@ -1246,7 +1246,7 @@ static struct i2c_driver mma9553_driver = { .acpi_match_table = ACPI_PTR(mma9553_acpi_match), .pm = pm_ptr(&mma9553_pm_ops), }, - .probe = mma9553_probe, + .probe_new = mma9553_probe, .remove = mma9553_remove, .id_table = mma9553_id, }; diff --git a/drivers/iio/accel/msa311.c b/drivers/iio/accel/msa311.c index 2fded37591717b7090dd996e5a8c85b9bdd7b8fa..af94d3adf6d81608557256873735bfb12114e080 100644 --- a/drivers/iio/accel/msa311.c +++ b/drivers/iio/accel/msa311.c @@ -351,7 +351,6 @@ static const struct regmap_config msa311_regmap_config = { * @chip_name: Chip name in the format "msa311-%02x" % partid * @new_data_trig: Optional NEW_DATA interrupt driven trigger used * to notify external consumers a new sample is ready - * @vdd: Optional external voltage regulator for the device power supply */ struct msa311_priv { struct regmap *regs; @@ -362,7 +361,6 @@ struct msa311_priv { char *chip_name; struct iio_trigger *new_data_trig; - struct regulator *vdd; }; enum msa311_si { @@ -1146,11 +1144,6 @@ static void msa311_powerdown(void *msa311) msa311_set_pwr_mode(msa311, MSA311_PWR_MODE_SUSPEND); } -static void msa311_vdd_disable(void *vdd) -{ - regulator_disable(vdd); -} - static int msa311_probe(struct i2c_client *i2c) { struct device *dev = &i2c->dev; @@ -1173,19 +1166,9 @@ static int msa311_probe(struct i2c_client *i2c) mutex_init(&msa311->lock); - msa311->vdd = devm_regulator_get(dev, "vdd"); - if (IS_ERR(msa311->vdd)) - return dev_err_probe(dev, PTR_ERR(msa311->vdd), - "can't get vdd supply\n"); - - err = regulator_enable(msa311->vdd); + err = devm_regulator_get_enable(dev, "vdd"); if (err) - return dev_err_probe(dev, err, "can't enable vdd supply\n"); - - err = devm_add_action_or_reset(dev, msa311_vdd_disable, msa311->vdd); - if (err) - return dev_err_probe(dev, err, - "can't add vdd disable action\n"); + return dev_err_probe(dev, err, "can't get vdd supply\n"); err = msa311_check_partid(msa311); if (err) diff --git a/drivers/iio/accel/mxc4005.c b/drivers/iio/accel/mxc4005.c index df600d2917c0ad56250922ef4bc431989bf28796..b146fc82738f6734eb45d8a18b939574094eeb7c 100644 --- a/drivers/iio/accel/mxc4005.c +++ b/drivers/iio/accel/mxc4005.c @@ -385,8 +385,7 @@ static int mxc4005_chip_init(struct mxc4005_data *data) return 0; } -static int mxc4005_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int mxc4005_probe(struct i2c_client *client) { struct mxc4005_data *data; struct iio_dev *indio_dev; @@ -489,7 +488,7 @@ static struct i2c_driver mxc4005_driver = { .name = MXC4005_DRV_NAME, .acpi_match_table = ACPI_PTR(mxc4005_acpi_match), }, - .probe = mxc4005_probe, + .probe_new = mxc4005_probe, .id_table = mxc4005_id, }; diff --git a/drivers/iio/accel/mxc6255.c b/drivers/iio/accel/mxc6255.c index 9aeeadc420d310c13b9ab27af0fd2dd5e252f4f2..aa2e660545f841bd8bae0a29c0a9e9dc8a4d8d73 100644 --- a/drivers/iio/accel/mxc6255.c +++ b/drivers/iio/accel/mxc6255.c @@ -113,8 +113,7 @@ static const struct regmap_config mxc6255_regmap_config = { .readable_reg = mxc6255_is_readable_reg, }; -static int mxc6255_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int mxc6255_probe(struct i2c_client *client) { struct mxc6255_data *data; struct iio_dev *indio_dev; @@ -184,7 +183,7 @@ static struct i2c_driver mxc6255_driver = { .name = MXC6255_DRV_NAME, .acpi_match_table = ACPI_PTR(mxc6255_acpi_match), }, - .probe = mxc6255_probe, + .probe_new = mxc6255_probe, .id_table = mxc6255_id, }; diff --git a/drivers/iio/accel/sca3300.c b/drivers/iio/accel/sca3300.c index eaa0c9cfda44c852a2230e6a26cf8a9729c4c5a0..306482b70fad7e9b1b62a7253662c1cbfd00192c 100644 --- a/drivers/iio/accel/sca3300.c +++ b/drivers/iio/accel/sca3300.c @@ -679,12 +679,20 @@ static const struct of_device_id sca3300_dt_ids[] = { }; MODULE_DEVICE_TABLE(of, sca3300_dt_ids); +static const struct spi_device_id sca3300_ids[] = { + { "sca3300" }, + { "scl3300" }, + {} +}; +MODULE_DEVICE_TABLE(spi, sca3300_ids); + static struct spi_driver sca3300_driver = { - .driver = { + .driver = { .name = SCA3300_ALIAS, .of_match_table = sca3300_dt_ids, }, - .probe = sca3300_probe, + .probe = sca3300_probe, + .id_table = sca3300_ids, }; module_spi_driver(sca3300_driver); diff --git a/drivers/iio/accel/stk8312.c b/drivers/iio/accel/stk8312.c index 7b1d6fb692b371e08e56e90682bf38ce2339eb74..68f680db750513cac3ae4677a84e9bf8650b76c6 100644 --- a/drivers/iio/accel/stk8312.c +++ b/drivers/iio/accel/stk8312.c @@ -498,8 +498,7 @@ static const struct iio_buffer_setup_ops stk8312_buffer_setup_ops = { .postdisable = stk8312_buffer_postdisable, }; -static int stk8312_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int stk8312_probe(struct i2c_client *client) { int ret; struct iio_dev *indio_dev; @@ -645,7 +644,7 @@ static struct i2c_driver stk8312_driver = { .name = STK8312_DRIVER_NAME, .pm = pm_sleep_ptr(&stk8312_pm_ops), }, - .probe = stk8312_probe, + .probe_new = stk8312_probe, .remove = stk8312_remove, .id_table = stk8312_i2c_id, }; diff --git a/drivers/iio/accel/stk8ba50.c b/drivers/iio/accel/stk8ba50.c index 2f5e4ab2a6e7d190eae12da5e4176c2acdf5a211..44f6e0fbdfcc6441394185837dcbf9a64786f7c5 100644 --- a/drivers/iio/accel/stk8ba50.c +++ b/drivers/iio/accel/stk8ba50.c @@ -379,8 +379,7 @@ static const struct iio_buffer_setup_ops stk8ba50_buffer_setup_ops = { .postdisable = stk8ba50_buffer_postdisable, }; -static int stk8ba50_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int stk8ba50_probe(struct i2c_client *client) { int ret; struct iio_dev *indio_dev; @@ -544,7 +543,7 @@ static struct i2c_driver stk8ba50_driver = { .pm = pm_sleep_ptr(&stk8ba50_pm_ops), .acpi_match_table = ACPI_PTR(stk8ba50_acpi_id), }, - .probe = stk8ba50_probe, + .probe_new = stk8ba50_probe, .remove = stk8ba50_remove, .id_table = stk8ba50_i2c_id, }; diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig index 791612ca601207b58baf709068741845aee46132..63f80d747cbd215920d913681c688a5c9f421768 100644 --- a/drivers/iio/adc/Kconfig +++ b/drivers/iio/adc/Kconfig @@ -21,6 +21,21 @@ config AD_SIGMA_DELTA select IIO_BUFFER select IIO_TRIGGERED_BUFFER +config AD4130 + tristate "Analog Device AD4130 ADC Driver" + depends on SPI + depends on GPIOLIB + select IIO_BUFFER + select IIO_KFIFO_BUF + select REGMAP_SPI + depends on COMMON_CLK + help + Say yes here to build support for Analog Devices AD4130-8 SPI analog + to digital converters (ADC). + + To compile this driver as a module, choose M here: the module will be + called ad4130. + config AD7091R5 tristate "Analog Devices AD7091R5 ADC Driver" depends on I2C @@ -667,6 +682,19 @@ config MAX11205 To compile this driver as a module, choose M here: the module will be called max11205. +config MAX11410 + tristate "Analog Devices MAX11410 ADC driver" + depends on SPI + select REGMAP_SPI + select IIO_BUFFER + select IIO_TRIGGER + select IIO_TRIGGERED_BUFFER + help + Say yes here to build support for Analog Devices MAX11410 ADCs. + + To compile this driver as a module, choose M here: the module will be + called max11410. + config MAX1241 tristate "Maxim max1241 ADC driver" depends on SPI_MASTER @@ -752,6 +780,18 @@ config MEDIATEK_MT6360_ADC is used in smartphones and tablets and supports a 11 channel general purpose ADC. +config MEDIATEK_MT6370_ADC + tristate "MediaTek MT6370 ADC driver" + depends on MFD_MT6370 + help + Say yes here to enable MediaTek MT6370 ADC support. + + This ADC driver provides 9 channels for system monitoring (charger + current, voltage, and temperature). + + This driver can also be built as a module. If so, the module + will be called "mt6370-adc". + config MEDIATEK_MT6577_AUXADC tristate "MediaTek AUXADC driver" depends on ARCH_MEDIATEK || COMPILE_TEST diff --git a/drivers/iio/adc/Makefile b/drivers/iio/adc/Makefile index 46caba7a010c9e1974032dee2dbdd534562bf9ff..4ef41a7dfac6ea9f2bc19cd013f6cb4e2dafab75 100644 --- a/drivers/iio/adc/Makefile +++ b/drivers/iio/adc/Makefile @@ -6,6 +6,7 @@ # When adding new entries keep the list in alphabetical order obj-$(CONFIG_AB8500_GPADC) += ab8500-gpadc.o obj-$(CONFIG_AD_SIGMA_DELTA) += ad_sigma_delta.o +obj-$(CONFIG_AD4130) += ad4130.o obj-$(CONFIG_AD7091R5) += ad7091r5.o ad7091r-base.o obj-$(CONFIG_AD7124) += ad7124.o obj-$(CONFIG_AD7192) += ad7192.o @@ -62,6 +63,7 @@ obj-$(CONFIG_MAX1027) += max1027.o obj-$(CONFIG_MAX11100) += max11100.o obj-$(CONFIG_MAX1118) += max1118.o obj-$(CONFIG_MAX11205) += max11205.o +obj-$(CONFIG_MAX11410) += max11410.o obj-$(CONFIG_MAX1241) += max1241.o obj-$(CONFIG_MAX1363) += max1363.o obj-$(CONFIG_MAX9611) += max9611.o @@ -69,6 +71,7 @@ obj-$(CONFIG_MCP320X) += mcp320x.o obj-$(CONFIG_MCP3422) += mcp3422.o obj-$(CONFIG_MCP3911) += mcp3911.o obj-$(CONFIG_MEDIATEK_MT6360_ADC) += mt6360-adc.o +obj-$(CONFIG_MEDIATEK_MT6370_ADC) += mt6370-adc.o obj-$(CONFIG_MEDIATEK_MT6577_AUXADC) += mt6577_auxadc.o obj-$(CONFIG_MEN_Z188_ADC) += men_z188_adc.o obj-$(CONFIG_MESON_SARADC) += meson_saradc.o diff --git a/drivers/iio/adc/ad4130.c b/drivers/iio/adc/ad4130.c new file mode 100644 index 0000000000000000000000000000000000000000..38394341fd6e7401a94662fd5524eb512ea402b4 --- /dev/null +++ b/drivers/iio/adc/ad4130.c @@ -0,0 +1,2100 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (C) 2022 Analog Devices, Inc. + * Author: Cosmin Tanislav + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include +#include + +#define AD4130_NAME "ad4130" + +#define AD4130_COMMS_READ_MASK BIT(6) + +#define AD4130_STATUS_REG 0x00 + +#define AD4130_ADC_CONTROL_REG 0x01 +#define AD4130_ADC_CONTROL_BIPOLAR_MASK BIT(14) +#define AD4130_ADC_CONTROL_INT_REF_VAL_MASK BIT(13) +#define AD4130_INT_REF_2_5V 2500000 +#define AD4130_INT_REF_1_25V 1250000 +#define AD4130_ADC_CONTROL_CSB_EN_MASK BIT(9) +#define AD4130_ADC_CONTROL_INT_REF_EN_MASK BIT(8) +#define AD4130_ADC_CONTROL_MODE_MASK GENMASK(5, 2) +#define AD4130_ADC_CONTROL_MCLK_SEL_MASK GENMASK(1, 0) +#define AD4130_MCLK_FREQ_76_8KHZ 76800 +#define AD4130_MCLK_FREQ_153_6KHZ 153600 + +#define AD4130_DATA_REG 0x02 + +#define AD4130_IO_CONTROL_REG 0x03 +#define AD4130_IO_CONTROL_INT_PIN_SEL_MASK GENMASK(9, 8) +#define AD4130_IO_CONTROL_GPIO_DATA_MASK GENMASK(7, 4) +#define AD4130_IO_CONTROL_GPIO_CTRL_MASK GENMASK(3, 0) + +#define AD4130_VBIAS_REG 0x04 + +#define AD4130_ID_REG 0x05 + +#define AD4130_ERROR_REG 0x06 + +#define AD4130_ERROR_EN_REG 0x07 + +#define AD4130_MCLK_COUNT_REG 0x08 + +#define AD4130_CHANNEL_X_REG(x) (0x09 + (x)) +#define AD4130_CHANNEL_EN_MASK BIT(23) +#define AD4130_CHANNEL_SETUP_MASK GENMASK(22, 20) +#define AD4130_CHANNEL_AINP_MASK GENMASK(17, 13) +#define AD4130_CHANNEL_AINM_MASK GENMASK(12, 8) +#define AD4130_CHANNEL_IOUT1_MASK GENMASK(7, 4) +#define AD4130_CHANNEL_IOUT2_MASK GENMASK(3, 0) + +#define AD4130_CONFIG_X_REG(x) (0x19 + (x)) +#define AD4130_CONFIG_IOUT1_VAL_MASK GENMASK(15, 13) +#define AD4130_CONFIG_IOUT2_VAL_MASK GENMASK(12, 10) +#define AD4130_CONFIG_BURNOUT_MASK GENMASK(9, 8) +#define AD4130_CONFIG_REF_BUFP_MASK BIT(7) +#define AD4130_CONFIG_REF_BUFM_MASK BIT(6) +#define AD4130_CONFIG_REF_SEL_MASK GENMASK(5, 4) +#define AD4130_CONFIG_PGA_MASK GENMASK(3, 1) + +#define AD4130_FILTER_X_REG(x) (0x21 + (x)) +#define AD4130_FILTER_MODE_MASK GENMASK(15, 12) +#define AD4130_FILTER_SELECT_MASK GENMASK(10, 0) +#define AD4130_FILTER_SELECT_MIN 1 + +#define AD4130_OFFSET_X_REG(x) (0x29 + (x)) + +#define AD4130_GAIN_X_REG(x) (0x31 + (x)) + +#define AD4130_MISC_REG 0x39 + +#define AD4130_FIFO_CONTROL_REG 0x3a +#define AD4130_FIFO_CONTROL_HEADER_MASK BIT(18) +#define AD4130_FIFO_CONTROL_MODE_MASK GENMASK(17, 16) +#define AD4130_FIFO_CONTROL_WM_INT_EN_MASK BIT(9) +#define AD4130_FIFO_CONTROL_WM_MASK GENMASK(7, 0) +#define AD4130_WATERMARK_256 0 + +#define AD4130_FIFO_STATUS_REG 0x3b + +#define AD4130_FIFO_THRESHOLD_REG 0x3c + +#define AD4130_FIFO_DATA_REG 0x3d +#define AD4130_FIFO_SIZE 256 +#define AD4130_FIFO_MAX_SAMPLE_SIZE 3 + +#define AD4130_MAX_ANALOG_PINS 16 +#define AD4130_MAX_CHANNELS 16 +#define AD4130_MAX_DIFF_INPUTS 30 +#define AD4130_MAX_GPIOS 4 +#define AD4130_MAX_ODR 2400 +#define AD4130_MAX_PGA 8 +#define AD4130_MAX_SETUPS 8 + +#define AD4130_AIN2_P1 0x2 +#define AD4130_AIN3_P2 0x3 + +#define AD4130_RESET_BUF_SIZE 8 +#define AD4130_RESET_SLEEP_US (160 * MICRO / AD4130_MCLK_FREQ_76_8KHZ) + +#define AD4130_INVALID_SLOT -1 + +static const unsigned int ad4130_reg_size[] = { + [AD4130_STATUS_REG] = 1, + [AD4130_ADC_CONTROL_REG] = 2, + [AD4130_DATA_REG] = 3, + [AD4130_IO_CONTROL_REG] = 2, + [AD4130_VBIAS_REG] = 2, + [AD4130_ID_REG] = 1, + [AD4130_ERROR_REG] = 2, + [AD4130_ERROR_EN_REG] = 2, + [AD4130_MCLK_COUNT_REG] = 1, + [AD4130_CHANNEL_X_REG(0) ... AD4130_CHANNEL_X_REG(AD4130_MAX_CHANNELS - 1)] = 3, + [AD4130_CONFIG_X_REG(0) ... AD4130_CONFIG_X_REG(AD4130_MAX_SETUPS - 1)] = 2, + [AD4130_FILTER_X_REG(0) ... AD4130_FILTER_X_REG(AD4130_MAX_SETUPS - 1)] = 3, + [AD4130_OFFSET_X_REG(0) ... AD4130_OFFSET_X_REG(AD4130_MAX_SETUPS - 1)] = 3, + [AD4130_GAIN_X_REG(0) ... AD4130_GAIN_X_REG(AD4130_MAX_SETUPS - 1)] = 3, + [AD4130_MISC_REG] = 2, + [AD4130_FIFO_CONTROL_REG] = 3, + [AD4130_FIFO_STATUS_REG] = 1, + [AD4130_FIFO_THRESHOLD_REG] = 3, + [AD4130_FIFO_DATA_REG] = 3, +}; + +enum ad4130_int_ref_val { + AD4130_INT_REF_VAL_2_5V, + AD4130_INT_REF_VAL_1_25V, +}; + +enum ad4130_mclk_sel { + AD4130_MCLK_76_8KHZ, + AD4130_MCLK_76_8KHZ_OUT, + AD4130_MCLK_76_8KHZ_EXT, + AD4130_MCLK_153_6KHZ_EXT, +}; + +enum ad4130_int_pin_sel { + AD4130_INT_PIN_INT, + AD4130_INT_PIN_CLK, + AD4130_INT_PIN_P2, + AD4130_INT_PIN_DOUT, +}; + +enum ad4130_iout { + AD4130_IOUT_OFF, + AD4130_IOUT_10000NA, + AD4130_IOUT_20000NA, + AD4130_IOUT_50000NA, + AD4130_IOUT_100000NA, + AD4130_IOUT_150000NA, + AD4130_IOUT_200000NA, + AD4130_IOUT_100NA, + AD4130_IOUT_MAX +}; + +enum ad4130_burnout { + AD4130_BURNOUT_OFF, + AD4130_BURNOUT_500NA, + AD4130_BURNOUT_2000NA, + AD4130_BURNOUT_4000NA, + AD4130_BURNOUT_MAX +}; + +enum ad4130_ref_sel { + AD4130_REF_REFIN1, + AD4130_REF_REFIN2, + AD4130_REF_REFOUT_AVSS, + AD4130_REF_AVDD_AVSS, + AD4130_REF_SEL_MAX +}; + +enum ad4130_fifo_mode { + AD4130_FIFO_MODE_DISABLED = 0b00, + AD4130_FIFO_MODE_WM = 0b01, +}; + +enum ad4130_mode { + AD4130_MODE_CONTINUOUS = 0b0000, + AD4130_MODE_IDLE = 0b0100, +}; + +enum ad4130_filter_mode { + AD4130_FILTER_SINC4, + AD4130_FILTER_SINC4_SINC1, + AD4130_FILTER_SINC3, + AD4130_FILTER_SINC3_REJ60, + AD4130_FILTER_SINC3_SINC1, + AD4130_FILTER_SINC3_PF1, + AD4130_FILTER_SINC3_PF2, + AD4130_FILTER_SINC3_PF3, + AD4130_FILTER_SINC3_PF4, +}; + +enum ad4130_pin_function { + AD4130_PIN_FN_NONE, + AD4130_PIN_FN_SPECIAL = BIT(0), + AD4130_PIN_FN_DIFF = BIT(1), + AD4130_PIN_FN_EXCITATION = BIT(2), + AD4130_PIN_FN_VBIAS = BIT(3), +}; + +struct ad4130_setup_info { + unsigned int iout0_val; + unsigned int iout1_val; + unsigned int burnout; + unsigned int pga; + unsigned int fs; + u32 ref_sel; + enum ad4130_filter_mode filter_mode; + bool ref_bufp; + bool ref_bufm; +}; + +struct ad4130_slot_info { + struct ad4130_setup_info setup; + unsigned int enabled_channels; + unsigned int channels; +}; + +struct ad4130_chan_info { + struct ad4130_setup_info setup; + u32 iout0; + u32 iout1; + int slot; + bool enabled; + bool initialized; +}; + +struct ad4130_filter_config { + enum ad4130_filter_mode filter_mode; + unsigned int odr_div; + unsigned int fs_max; + enum iio_available_type samp_freq_avail_type; + int samp_freq_avail_len; + int samp_freq_avail[3][2]; +}; + +struct ad4130_state { + struct regmap *regmap; + struct spi_device *spi; + struct clk *mclk; + struct regulator_bulk_data regulators[4]; + u32 irq_trigger; + u32 inv_irq_trigger; + + /* + * Synchronize access to members the of driver state, and ensure + * atomicity of consecutive regmap operations. + */ + struct mutex lock; + struct completion completion; + + struct iio_chan_spec chans[AD4130_MAX_CHANNELS]; + struct ad4130_chan_info chans_info[AD4130_MAX_CHANNELS]; + struct ad4130_slot_info slots_info[AD4130_MAX_SETUPS]; + enum ad4130_pin_function pins_fn[AD4130_MAX_ANALOG_PINS]; + u32 vbias_pins[AD4130_MAX_ANALOG_PINS]; + u32 num_vbias_pins; + int scale_tbls[AD4130_REF_SEL_MAX][AD4130_MAX_PGA][2]; + struct gpio_chip gc; + struct clk_hw int_clk_hw; + + u32 int_pin_sel; + u32 int_ref_uv; + u32 mclk_sel; + bool int_ref_en; + bool bipolar; + + unsigned int num_enabled_channels; + unsigned int effective_watermark; + unsigned int watermark; + + struct spi_message fifo_msg; + struct spi_transfer fifo_xfer[2]; + + /* + * DMA (thus cache coherency maintenance) requires any transfer + * buffers to live in their own cache lines. As the use of these + * buffers is synchronous, all of the buffers used for DMA in this + * driver may share a cache line. + */ + u8 reset_buf[AD4130_RESET_BUF_SIZE] __aligned(IIO_DMA_MINALIGN); + u8 reg_write_tx_buf[4]; + u8 reg_read_tx_buf[1]; + u8 reg_read_rx_buf[3]; + u8 fifo_tx_buf[2]; + u8 fifo_rx_buf[AD4130_FIFO_SIZE * + AD4130_FIFO_MAX_SAMPLE_SIZE]; +}; + +static const char * const ad4130_int_pin_names[] = { + [AD4130_INT_PIN_INT] = "int", + [AD4130_INT_PIN_CLK] = "clk", + [AD4130_INT_PIN_P2] = "p2", + [AD4130_INT_PIN_DOUT] = "dout", +}; + +static const unsigned int ad4130_iout_current_na_tbl[AD4130_IOUT_MAX] = { + [AD4130_IOUT_OFF] = 0, + [AD4130_IOUT_100NA] = 100, + [AD4130_IOUT_10000NA] = 10000, + [AD4130_IOUT_20000NA] = 20000, + [AD4130_IOUT_50000NA] = 50000, + [AD4130_IOUT_100000NA] = 100000, + [AD4130_IOUT_150000NA] = 150000, + [AD4130_IOUT_200000NA] = 200000, +}; + +static const unsigned int ad4130_burnout_current_na_tbl[AD4130_BURNOUT_MAX] = { + [AD4130_BURNOUT_OFF] = 0, + [AD4130_BURNOUT_500NA] = 500, + [AD4130_BURNOUT_2000NA] = 2000, + [AD4130_BURNOUT_4000NA] = 4000, +}; + +#define AD4130_VARIABLE_ODR_CONFIG(_filter_mode, _odr_div, _fs_max) \ +{ \ + .filter_mode = (_filter_mode), \ + .odr_div = (_odr_div), \ + .fs_max = (_fs_max), \ + .samp_freq_avail_type = IIO_AVAIL_RANGE, \ + .samp_freq_avail = { \ + { AD4130_MAX_ODR, (_odr_div) * (_fs_max) }, \ + { AD4130_MAX_ODR, (_odr_div) * (_fs_max) }, \ + { AD4130_MAX_ODR, (_odr_div) }, \ + }, \ +} + +#define AD4130_FIXED_ODR_CONFIG(_filter_mode, _odr_div) \ +{ \ + .filter_mode = (_filter_mode), \ + .odr_div = (_odr_div), \ + .fs_max = AD4130_FILTER_SELECT_MIN, \ + .samp_freq_avail_type = IIO_AVAIL_LIST, \ + .samp_freq_avail_len = 1, \ + .samp_freq_avail = { \ + { AD4130_MAX_ODR, (_odr_div) }, \ + }, \ +} + +static const struct ad4130_filter_config ad4130_filter_configs[] = { + AD4130_VARIABLE_ODR_CONFIG(AD4130_FILTER_SINC4, 1, 10), + AD4130_VARIABLE_ODR_CONFIG(AD4130_FILTER_SINC4_SINC1, 11, 10), + AD4130_VARIABLE_ODR_CONFIG(AD4130_FILTER_SINC3, 1, 2047), + AD4130_VARIABLE_ODR_CONFIG(AD4130_FILTER_SINC3_REJ60, 1, 2047), + AD4130_VARIABLE_ODR_CONFIG(AD4130_FILTER_SINC3_SINC1, 10, 2047), + AD4130_FIXED_ODR_CONFIG(AD4130_FILTER_SINC3_PF1, 92), + AD4130_FIXED_ODR_CONFIG(AD4130_FILTER_SINC3_PF2, 100), + AD4130_FIXED_ODR_CONFIG(AD4130_FILTER_SINC3_PF3, 124), + AD4130_FIXED_ODR_CONFIG(AD4130_FILTER_SINC3_PF4, 148), +}; + +static const char * const ad4130_filter_modes_str[] = { + [AD4130_FILTER_SINC4] = "sinc4", + [AD4130_FILTER_SINC4_SINC1] = "sinc4+sinc1", + [AD4130_FILTER_SINC3] = "sinc3", + [AD4130_FILTER_SINC3_REJ60] = "sinc3+rej60", + [AD4130_FILTER_SINC3_SINC1] = "sinc3+sinc1", + [AD4130_FILTER_SINC3_PF1] = "sinc3+pf1", + [AD4130_FILTER_SINC3_PF2] = "sinc3+pf2", + [AD4130_FILTER_SINC3_PF3] = "sinc3+pf3", + [AD4130_FILTER_SINC3_PF4] = "sinc3+pf4", +}; + +static int ad4130_get_reg_size(struct ad4130_state *st, unsigned int reg, + unsigned int *size) +{ + if (reg >= ARRAY_SIZE(ad4130_reg_size)) + return -EINVAL; + + *size = ad4130_reg_size[reg]; + + return 0; +} + +static unsigned int ad4130_data_reg_size(struct ad4130_state *st) +{ + unsigned int data_reg_size; + int ret; + + ret = ad4130_get_reg_size(st, AD4130_DATA_REG, &data_reg_size); + if (ret) + return 0; + + return data_reg_size; +} + +static unsigned int ad4130_resolution(struct ad4130_state *st) +{ + return ad4130_data_reg_size(st) * BITS_PER_BYTE; +} + +static int ad4130_reg_write(void *context, unsigned int reg, unsigned int val) +{ + struct ad4130_state *st = context; + unsigned int size; + int ret; + + ret = ad4130_get_reg_size(st, reg, &size); + if (ret) + return ret; + + st->reg_write_tx_buf[0] = reg; + + switch (size) { + case 3: + put_unaligned_be24(val, &st->reg_write_tx_buf[1]); + break; + case 2: + put_unaligned_be16(val, &st->reg_write_tx_buf[1]); + break; + case 1: + st->reg_write_tx_buf[1] = val; + break; + default: + return -EINVAL; + } + + return spi_write(st->spi, st->reg_write_tx_buf, size + 1); +} + +static int ad4130_reg_read(void *context, unsigned int reg, unsigned int *val) +{ + struct ad4130_state *st = context; + struct spi_transfer t[] = { + { + .tx_buf = st->reg_read_tx_buf, + .len = sizeof(st->reg_read_tx_buf), + }, + { + .rx_buf = st->reg_read_rx_buf, + }, + }; + unsigned int size; + int ret; + + ret = ad4130_get_reg_size(st, reg, &size); + if (ret) + return ret; + + st->reg_read_tx_buf[0] = AD4130_COMMS_READ_MASK | reg; + t[1].len = size; + + ret = spi_sync_transfer(st->spi, t, ARRAY_SIZE(t)); + if (ret) + return ret; + + switch (size) { + case 3: + *val = get_unaligned_be24(st->reg_read_rx_buf); + break; + case 2: + *val = get_unaligned_be16(st->reg_read_rx_buf); + break; + case 1: + *val = st->reg_read_rx_buf[0]; + break; + default: + return -EINVAL; + } + + return 0; +} + +static const struct regmap_config ad4130_regmap_config = { + .reg_read = ad4130_reg_read, + .reg_write = ad4130_reg_write, +}; + +static int ad4130_gpio_init_valid_mask(struct gpio_chip *gc, + unsigned long *valid_mask, + unsigned int ngpios) +{ + struct ad4130_state *st = gpiochip_get_data(gc); + unsigned int i; + + /* + * Output-only GPIO functionality is available on pins AIN2 through + * AIN5. If these pins are used for anything else, do not expose them. + */ + for (i = 0; i < ngpios; i++) { + unsigned int pin = i + AD4130_AIN2_P1; + bool valid = st->pins_fn[pin] == AD4130_PIN_FN_NONE; + + __assign_bit(i, valid_mask, valid); + } + + return 0; +} + +static int ad4130_gpio_get_direction(struct gpio_chip *gc, unsigned int offset) +{ + return GPIO_LINE_DIRECTION_OUT; +} + +static void ad4130_gpio_set(struct gpio_chip *gc, unsigned int offset, + int value) +{ + struct ad4130_state *st = gpiochip_get_data(gc); + unsigned int mask = FIELD_PREP(AD4130_IO_CONTROL_GPIO_DATA_MASK, + BIT(offset)); + + regmap_update_bits(st->regmap, AD4130_IO_CONTROL_REG, mask, + value ? mask : 0); +} + +static int ad4130_set_mode(struct ad4130_state *st, enum ad4130_mode mode) +{ + return regmap_update_bits(st->regmap, AD4130_ADC_CONTROL_REG, + AD4130_ADC_CONTROL_MODE_MASK, + FIELD_PREP(AD4130_ADC_CONTROL_MODE_MASK, mode)); +} + +static int ad4130_set_watermark_interrupt_en(struct ad4130_state *st, bool en) +{ + return regmap_update_bits(st->regmap, AD4130_FIFO_CONTROL_REG, + AD4130_FIFO_CONTROL_WM_INT_EN_MASK, + FIELD_PREP(AD4130_FIFO_CONTROL_WM_INT_EN_MASK, en)); +} + +static unsigned int ad4130_watermark_reg_val(unsigned int val) +{ + if (val == AD4130_FIFO_SIZE) + val = AD4130_WATERMARK_256; + + return val; +} + +static int ad4130_set_fifo_mode(struct ad4130_state *st, + enum ad4130_fifo_mode mode) +{ + return regmap_update_bits(st->regmap, AD4130_FIFO_CONTROL_REG, + AD4130_FIFO_CONTROL_MODE_MASK, + FIELD_PREP(AD4130_FIFO_CONTROL_MODE_MASK, mode)); +} + +static void ad4130_push_fifo_data(struct iio_dev *indio_dev) +{ + struct ad4130_state *st = iio_priv(indio_dev); + unsigned int data_reg_size = ad4130_data_reg_size(st); + unsigned int transfer_len = st->effective_watermark * data_reg_size; + unsigned int set_size = st->num_enabled_channels * data_reg_size; + unsigned int i; + int ret; + + st->fifo_tx_buf[1] = ad4130_watermark_reg_val(st->effective_watermark); + st->fifo_xfer[1].len = transfer_len; + + ret = spi_sync(st->spi, &st->fifo_msg); + if (ret) + return; + + for (i = 0; i < transfer_len; i += set_size) + iio_push_to_buffers(indio_dev, &st->fifo_rx_buf[i]); +} + +static irqreturn_t ad4130_irq_handler(int irq, void *private) +{ + struct iio_dev *indio_dev = private; + struct ad4130_state *st = iio_priv(indio_dev); + + if (iio_buffer_enabled(indio_dev)) + ad4130_push_fifo_data(indio_dev); + else + complete(&st->completion); + + return IRQ_HANDLED; +} + +static int ad4130_find_slot(struct ad4130_state *st, + struct ad4130_setup_info *target_setup_info, + unsigned int *slot, bool *overwrite) +{ + unsigned int i; + + *slot = AD4130_INVALID_SLOT; + *overwrite = false; + + for (i = 0; i < AD4130_MAX_SETUPS; i++) { + struct ad4130_slot_info *slot_info = &st->slots_info[i]; + + /* Immediately accept a matching setup info. */ + if (!memcmp(target_setup_info, &slot_info->setup, + sizeof(*target_setup_info))) { + *slot = i; + return 0; + } + + /* Ignore all setups which are used by enabled channels. */ + if (slot_info->enabled_channels) + continue; + + /* Find the least used slot. */ + if (*slot == AD4130_INVALID_SLOT || + slot_info->channels < st->slots_info[*slot].channels) + *slot = i; + } + + if (*slot == AD4130_INVALID_SLOT) + return -EINVAL; + + *overwrite = true; + + return 0; +} + +static void ad4130_unlink_channel(struct ad4130_state *st, unsigned int channel) +{ + struct ad4130_chan_info *chan_info = &st->chans_info[channel]; + struct ad4130_slot_info *slot_info = &st->slots_info[chan_info->slot]; + + chan_info->slot = AD4130_INVALID_SLOT; + slot_info->channels--; +} + +static int ad4130_unlink_slot(struct ad4130_state *st, unsigned int slot) +{ + unsigned int i; + + for (i = 0; i < AD4130_MAX_CHANNELS; i++) { + struct ad4130_chan_info *chan_info = &st->chans_info[i]; + + if (!chan_info->initialized || chan_info->slot != slot) + continue; + + ad4130_unlink_channel(st, i); + } + + return 0; +} + +static int ad4130_link_channel_slot(struct ad4130_state *st, + unsigned int channel, unsigned int slot) +{ + struct ad4130_slot_info *slot_info = &st->slots_info[slot]; + struct ad4130_chan_info *chan_info = &st->chans_info[channel]; + int ret; + + ret = regmap_update_bits(st->regmap, AD4130_CHANNEL_X_REG(channel), + AD4130_CHANNEL_SETUP_MASK, + FIELD_PREP(AD4130_CHANNEL_SETUP_MASK, slot)); + if (ret) + return ret; + + chan_info->slot = slot; + slot_info->channels++; + + return 0; +} + +static int ad4130_write_slot_setup(struct ad4130_state *st, + unsigned int slot, + struct ad4130_setup_info *setup_info) +{ + unsigned int val; + int ret; + + val = FIELD_PREP(AD4130_CONFIG_IOUT1_VAL_MASK, setup_info->iout0_val) | + FIELD_PREP(AD4130_CONFIG_IOUT1_VAL_MASK, setup_info->iout1_val) | + FIELD_PREP(AD4130_CONFIG_BURNOUT_MASK, setup_info->burnout) | + FIELD_PREP(AD4130_CONFIG_REF_BUFP_MASK, setup_info->ref_bufp) | + FIELD_PREP(AD4130_CONFIG_REF_BUFM_MASK, setup_info->ref_bufm) | + FIELD_PREP(AD4130_CONFIG_REF_SEL_MASK, setup_info->ref_sel) | + FIELD_PREP(AD4130_CONFIG_PGA_MASK, setup_info->pga); + + ret = regmap_write(st->regmap, AD4130_CONFIG_X_REG(slot), val); + if (ret) + return ret; + + val = FIELD_PREP(AD4130_FILTER_MODE_MASK, setup_info->filter_mode) | + FIELD_PREP(AD4130_FILTER_SELECT_MASK, setup_info->fs); + + ret = regmap_write(st->regmap, AD4130_FILTER_X_REG(slot), val); + if (ret) + return ret; + + memcpy(&st->slots_info[slot].setup, setup_info, sizeof(*setup_info)); + + return 0; +} + +static int ad4130_write_channel_setup(struct ad4130_state *st, + unsigned int channel, bool on_enable) +{ + struct ad4130_chan_info *chan_info = &st->chans_info[channel]; + struct ad4130_setup_info *setup_info = &chan_info->setup; + bool overwrite; + int slot; + int ret; + + /* + * The following cases need to be handled. + * + * 1. Enabled and linked channel with setup changes: + * - Find a slot. If not possible, return error. + * - Unlink channel from current slot. + * - If the slot has channels linked to it, unlink all channels, and + * write the new setup to it. + * - Link channel to new slot. + * + * 2. Soon to be enabled and unlinked channel: + * - Find a slot. If not possible, return error. + * - If the slot has channels linked to it, unlink all channels, and + * write the new setup to it. + * - Link channel to the slot. + * + * 3. Disabled and linked channel with setup changes: + * - Unlink channel from current slot. + * + * 4. Soon to be enabled and linked channel: + * 5. Disabled and unlinked channel with setup changes: + * - Do nothing. + */ + + /* Case 4 */ + if (on_enable && chan_info->slot != AD4130_INVALID_SLOT) + return 0; + + if (!on_enable && !chan_info->enabled) { + if (chan_info->slot != AD4130_INVALID_SLOT) + /* Case 3 */ + ad4130_unlink_channel(st, channel); + + /* Cases 3 & 5 */ + return 0; + } + + /* Cases 1 & 2 */ + ret = ad4130_find_slot(st, setup_info, &slot, &overwrite); + if (ret) + return ret; + + if (chan_info->slot != AD4130_INVALID_SLOT) + /* Case 1 */ + ad4130_unlink_channel(st, channel); + + if (overwrite) { + ret = ad4130_unlink_slot(st, slot); + if (ret) + return ret; + + ret = ad4130_write_slot_setup(st, slot, setup_info); + if (ret) + return ret; + } + + return ad4130_link_channel_slot(st, channel, slot); +} + +static int ad4130_set_channel_enable(struct ad4130_state *st, + unsigned int channel, bool status) +{ + struct ad4130_chan_info *chan_info = &st->chans_info[channel]; + struct ad4130_slot_info *slot_info; + int ret; + + if (chan_info->enabled == status) + return 0; + + if (status) { + ret = ad4130_write_channel_setup(st, channel, true); + if (ret) + return ret; + } + + slot_info = &st->slots_info[chan_info->slot]; + + ret = regmap_update_bits(st->regmap, AD4130_CHANNEL_X_REG(channel), + AD4130_CHANNEL_EN_MASK, + FIELD_PREP(AD4130_CHANNEL_EN_MASK, status)); + if (ret) + return ret; + + slot_info->enabled_channels += status ? 1 : -1; + chan_info->enabled = status; + + return 0; +} + +/* + * Table 58. FILTER_MODE_n bits and Filter Types of the datasheet describes + * the relation between filter mode, ODR and FS. + * + * Notice that the max ODR of each filter mode is not necessarily the + * absolute max ODR supported by the chip. + * + * The ODR divider is not explicitly specified, but it can be deduced based + * on the ODR range of each filter mode. + * + * For example, for Sinc4+Sinc1, max ODR is 218.18. That means that the + * absolute max ODR is divided by 11 to achieve the max ODR of this filter + * mode. + * + * The formulas for converting between ODR and FS for a specific filter + * mode can be deduced from the same table. + * + * Notice that FS = 1 actually means max ODR, and that ODR decreases by + * (maximum ODR / maximum FS) for each increment of FS. + * + * odr = MAX_ODR / odr_div * (1 - (fs - 1) / fs_max) <=> + * odr = MAX_ODR * (1 - (fs - 1) / fs_max) / odr_div <=> + * odr = MAX_ODR * (1 - (fs - 1) / fs_max) / odr_div <=> + * odr = MAX_ODR * (fs_max - fs + 1) / (fs_max * odr_div) + * (used in ad4130_fs_to_freq) + * + * For the opposite formula, FS can be extracted from the last one. + * + * MAX_ODR * (fs_max - fs + 1) = fs_max * odr_div * odr <=> + * fs_max - fs + 1 = fs_max * odr_div * odr / MAX_ODR <=> + * fs = 1 + fs_max - fs_max * odr_div * odr / MAX_ODR + * (used in ad4130_fs_to_freq) + */ + +static void ad4130_freq_to_fs(enum ad4130_filter_mode filter_mode, + int val, int val2, unsigned int *fs) +{ + const struct ad4130_filter_config *filter_config = + &ad4130_filter_configs[filter_mode]; + u64 dividend, divisor; + int temp; + + dividend = filter_config->fs_max * filter_config->odr_div * + ((u64)val * NANO + val2); + divisor = (u64)AD4130_MAX_ODR * NANO; + + temp = AD4130_FILTER_SELECT_MIN + filter_config->fs_max - + DIV64_U64_ROUND_CLOSEST(dividend, divisor); + + if (temp < AD4130_FILTER_SELECT_MIN) + temp = AD4130_FILTER_SELECT_MIN; + else if (temp > filter_config->fs_max) + temp = filter_config->fs_max; + + *fs = temp; +} + +static void ad4130_fs_to_freq(enum ad4130_filter_mode filter_mode, + unsigned int fs, int *val, int *val2) +{ + const struct ad4130_filter_config *filter_config = + &ad4130_filter_configs[filter_mode]; + unsigned int dividend, divisor; + u64 temp; + + dividend = (filter_config->fs_max - fs + AD4130_FILTER_SELECT_MIN) * + AD4130_MAX_ODR; + divisor = filter_config->fs_max * filter_config->odr_div; + + temp = div_u64((u64)dividend * NANO, divisor); + *val = div_u64_rem(temp, NANO, val2); +} + +static int ad4130_set_filter_mode(struct iio_dev *indio_dev, + const struct iio_chan_spec *chan, + unsigned int val) +{ + struct ad4130_state *st = iio_priv(indio_dev); + unsigned int channel = chan->scan_index; + struct ad4130_chan_info *chan_info = &st->chans_info[channel]; + struct ad4130_setup_info *setup_info = &chan_info->setup; + enum ad4130_filter_mode old_filter_mode; + int freq_val, freq_val2; + unsigned int old_fs; + int ret = 0; + + mutex_lock(&st->lock); + if (setup_info->filter_mode == val) + goto out; + + old_fs = setup_info->fs; + old_filter_mode = setup_info->filter_mode; + + /* + * When switching between filter modes, try to match the ODR as + * close as possible. To do this, convert the current FS into ODR + * using the old filter mode, then convert it back into FS using + * the new filter mode. + */ + ad4130_fs_to_freq(setup_info->filter_mode, setup_info->fs, + &freq_val, &freq_val2); + + ad4130_freq_to_fs(val, freq_val, freq_val2, &setup_info->fs); + + setup_info->filter_mode = val; + + ret = ad4130_write_channel_setup(st, channel, false); + if (ret) { + setup_info->fs = old_fs; + setup_info->filter_mode = old_filter_mode; + } + + out: + mutex_unlock(&st->lock); + + return ret; +} + +static int ad4130_get_filter_mode(struct iio_dev *indio_dev, + const struct iio_chan_spec *chan) +{ + struct ad4130_state *st = iio_priv(indio_dev); + unsigned int channel = chan->scan_index; + struct ad4130_setup_info *setup_info = &st->chans_info[channel].setup; + enum ad4130_filter_mode filter_mode; + + mutex_lock(&st->lock); + filter_mode = setup_info->filter_mode; + mutex_unlock(&st->lock); + + return filter_mode; +} + +static const struct iio_enum ad4130_filter_mode_enum = { + .items = ad4130_filter_modes_str, + .num_items = ARRAY_SIZE(ad4130_filter_modes_str), + .set = ad4130_set_filter_mode, + .get = ad4130_get_filter_mode, +}; + +static const struct iio_chan_spec_ext_info ad4130_filter_mode_ext_info[] = { + IIO_ENUM("filter_mode", IIO_SEPARATE, &ad4130_filter_mode_enum), + IIO_ENUM_AVAILABLE("filter_mode", IIO_SHARED_BY_TYPE, + &ad4130_filter_mode_enum), + { } +}; + +static const struct iio_chan_spec ad4130_channel_template = { + .type = IIO_VOLTAGE, + .indexed = 1, + .differential = 1, + .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | + BIT(IIO_CHAN_INFO_SCALE) | + BIT(IIO_CHAN_INFO_OFFSET) | + BIT(IIO_CHAN_INFO_SAMP_FREQ), + .info_mask_separate_available = BIT(IIO_CHAN_INFO_SCALE) | + BIT(IIO_CHAN_INFO_SAMP_FREQ), + .ext_info = ad4130_filter_mode_ext_info, + .scan_type = { + .sign = 'u', + .endianness = IIO_BE, + }, +}; + +static int ad4130_set_channel_pga(struct ad4130_state *st, unsigned int channel, + int val, int val2) +{ + struct ad4130_chan_info *chan_info = &st->chans_info[channel]; + struct ad4130_setup_info *setup_info = &chan_info->setup; + unsigned int pga, old_pga; + int ret = 0; + + for (pga = 0; pga < AD4130_MAX_PGA; pga++) + if (val == st->scale_tbls[setup_info->ref_sel][pga][0] && + val2 == st->scale_tbls[setup_info->ref_sel][pga][1]) + break; + + if (pga == AD4130_MAX_PGA) + return -EINVAL; + + mutex_lock(&st->lock); + if (pga == setup_info->pga) + goto out; + + old_pga = setup_info->pga; + setup_info->pga = pga; + + ret = ad4130_write_channel_setup(st, channel, false); + if (ret) + setup_info->pga = old_pga; + +out: + mutex_unlock(&st->lock); + + return ret; +} + +static int ad4130_set_channel_freq(struct ad4130_state *st, + unsigned int channel, int val, int val2) +{ + struct ad4130_chan_info *chan_info = &st->chans_info[channel]; + struct ad4130_setup_info *setup_info = &chan_info->setup; + unsigned int fs, old_fs; + int ret = 0; + + mutex_lock(&st->lock); + old_fs = setup_info->fs; + + ad4130_freq_to_fs(setup_info->filter_mode, val, val2, &fs); + + if (fs == setup_info->fs) + goto out; + + setup_info->fs = fs; + + ret = ad4130_write_channel_setup(st, channel, false); + if (ret) + setup_info->fs = old_fs; + +out: + mutex_unlock(&st->lock); + + return ret; +} + +static int _ad4130_read_sample(struct iio_dev *indio_dev, unsigned int channel, + int *val) +{ + struct ad4130_state *st = iio_priv(indio_dev); + int ret; + + ret = ad4130_set_channel_enable(st, channel, true); + if (ret) + return ret; + + reinit_completion(&st->completion); + + ret = ad4130_set_mode(st, AD4130_MODE_CONTINUOUS); + if (ret) + return ret; + + ret = wait_for_completion_timeout(&st->completion, + msecs_to_jiffies(1000)); + if (!ret) + return -ETIMEDOUT; + + ret = ad4130_set_mode(st, AD4130_MODE_IDLE); + if (ret) + return ret; + + ret = regmap_read(st->regmap, AD4130_DATA_REG, val); + if (ret) + return ret; + + ret = ad4130_set_channel_enable(st, channel, false); + if (ret) + return ret; + + return IIO_VAL_INT; +} + +static int ad4130_read_sample(struct iio_dev *indio_dev, unsigned int channel, + int *val) +{ + struct ad4130_state *st = iio_priv(indio_dev); + int ret; + + ret = iio_device_claim_direct_mode(indio_dev); + if (ret) + return ret; + + mutex_lock(&st->lock); + ret = _ad4130_read_sample(indio_dev, channel, val); + mutex_unlock(&st->lock); + + iio_device_release_direct_mode(indio_dev); + + return ret; +} + +static int ad4130_read_raw(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, + int *val, int *val2, long info) +{ + struct ad4130_state *st = iio_priv(indio_dev); + unsigned int channel = chan->scan_index; + struct ad4130_setup_info *setup_info = &st->chans_info[channel].setup; + + switch (info) { + case IIO_CHAN_INFO_RAW: + return ad4130_read_sample(indio_dev, channel, val); + case IIO_CHAN_INFO_SCALE: + mutex_lock(&st->lock); + *val = st->scale_tbls[setup_info->ref_sel][setup_info->pga][0]; + *val2 = st->scale_tbls[setup_info->ref_sel][setup_info->pga][1]; + mutex_unlock(&st->lock); + + return IIO_VAL_INT_PLUS_NANO; + case IIO_CHAN_INFO_OFFSET: + *val = st->bipolar ? -BIT(chan->scan_type.realbits - 1) : 0; + + return IIO_VAL_INT; + case IIO_CHAN_INFO_SAMP_FREQ: + mutex_lock(&st->lock); + ad4130_fs_to_freq(setup_info->filter_mode, setup_info->fs, + val, val2); + mutex_unlock(&st->lock); + + return IIO_VAL_INT_PLUS_NANO; + default: + return -EINVAL; + } +} + +static int ad4130_read_avail(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, + const int **vals, int *type, int *length, + long info) +{ + struct ad4130_state *st = iio_priv(indio_dev); + unsigned int channel = chan->scan_index; + struct ad4130_setup_info *setup_info = &st->chans_info[channel].setup; + const struct ad4130_filter_config *filter_config; + + switch (info) { + case IIO_CHAN_INFO_SCALE: + *vals = (int *)st->scale_tbls[setup_info->ref_sel]; + *length = ARRAY_SIZE(st->scale_tbls[setup_info->ref_sel]) * 2; + + *type = IIO_VAL_INT_PLUS_NANO; + + return IIO_AVAIL_LIST; + case IIO_CHAN_INFO_SAMP_FREQ: + mutex_lock(&st->lock); + filter_config = &ad4130_filter_configs[setup_info->filter_mode]; + mutex_unlock(&st->lock); + + *vals = (int *)filter_config->samp_freq_avail; + *length = filter_config->samp_freq_avail_len * 2; + *type = IIO_VAL_FRACTIONAL; + + return filter_config->samp_freq_avail_type; + default: + return -EINVAL; + } +} + +static int ad4130_write_raw_get_fmt(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, + long info) +{ + switch (info) { + case IIO_CHAN_INFO_SCALE: + case IIO_CHAN_INFO_SAMP_FREQ: + return IIO_VAL_INT_PLUS_NANO; + default: + return -EINVAL; + } +} + +static int ad4130_write_raw(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, + int val, int val2, long info) +{ + struct ad4130_state *st = iio_priv(indio_dev); + unsigned int channel = chan->scan_index; + + switch (info) { + case IIO_CHAN_INFO_SCALE: + return ad4130_set_channel_pga(st, channel, val, val2); + case IIO_CHAN_INFO_SAMP_FREQ: + return ad4130_set_channel_freq(st, channel, val, val2); + default: + return -EINVAL; + } +} + +static int ad4130_reg_access(struct iio_dev *indio_dev, unsigned int reg, + unsigned int writeval, unsigned int *readval) +{ + struct ad4130_state *st = iio_priv(indio_dev); + + if (readval) + return regmap_read(st->regmap, reg, readval); + + return regmap_write(st->regmap, reg, writeval); +} + +static int ad4130_update_scan_mode(struct iio_dev *indio_dev, + const unsigned long *scan_mask) +{ + struct ad4130_state *st = iio_priv(indio_dev); + unsigned int channel; + unsigned int val = 0; + int ret; + + mutex_lock(&st->lock); + + for_each_set_bit(channel, scan_mask, indio_dev->num_channels) { + ret = ad4130_set_channel_enable(st, channel, true); + if (ret) + goto out; + + val++; + } + + st->num_enabled_channels = val; + +out: + mutex_unlock(&st->lock); + + return 0; +} + +static int ad4130_set_fifo_watermark(struct iio_dev *indio_dev, unsigned int val) +{ + struct ad4130_state *st = iio_priv(indio_dev); + unsigned int eff; + int ret; + + if (val > AD4130_FIFO_SIZE) + return -EINVAL; + + eff = val * st->num_enabled_channels; + if (eff > AD4130_FIFO_SIZE) + /* + * Always set watermark to a multiple of the number of + * enabled channels to avoid making the FIFO unaligned. + */ + eff = rounddown(AD4130_FIFO_SIZE, st->num_enabled_channels); + + mutex_lock(&st->lock); + + ret = regmap_update_bits(st->regmap, AD4130_FIFO_CONTROL_REG, + AD4130_FIFO_CONTROL_WM_MASK, + FIELD_PREP(AD4130_FIFO_CONTROL_WM_MASK, + ad4130_watermark_reg_val(eff))); + if (ret) + goto out; + + st->effective_watermark = eff; + st->watermark = val; + +out: + mutex_unlock(&st->lock); + + return ret; +} + +static const struct iio_info ad4130_info = { + .read_raw = ad4130_read_raw, + .read_avail = ad4130_read_avail, + .write_raw_get_fmt = ad4130_write_raw_get_fmt, + .write_raw = ad4130_write_raw, + .update_scan_mode = ad4130_update_scan_mode, + .hwfifo_set_watermark = ad4130_set_fifo_watermark, + .debugfs_reg_access = ad4130_reg_access, +}; + +static int ad4130_buffer_postenable(struct iio_dev *indio_dev) +{ + struct ad4130_state *st = iio_priv(indio_dev); + int ret; + + mutex_lock(&st->lock); + + ret = ad4130_set_watermark_interrupt_en(st, true); + if (ret) + goto out; + + ret = irq_set_irq_type(st->spi->irq, st->inv_irq_trigger); + if (ret) + goto out; + + ret = ad4130_set_fifo_mode(st, AD4130_FIFO_MODE_WM); + if (ret) + goto out; + + ret = ad4130_set_mode(st, AD4130_MODE_CONTINUOUS); + +out: + mutex_unlock(&st->lock); + + return ret; +} + +static int ad4130_buffer_predisable(struct iio_dev *indio_dev) +{ + struct ad4130_state *st = iio_priv(indio_dev); + unsigned int i; + int ret; + + mutex_lock(&st->lock); + + ret = ad4130_set_mode(st, AD4130_MODE_IDLE); + if (ret) + goto out; + + ret = irq_set_irq_type(st->spi->irq, st->irq_trigger); + if (ret) + goto out; + + ret = ad4130_set_fifo_mode(st, AD4130_FIFO_MODE_DISABLED); + if (ret) + goto out; + + ret = ad4130_set_watermark_interrupt_en(st, false); + if (ret) + goto out; + + /* + * update_scan_mode() is not called in the disable path, disable all + * channels here. + */ + for (i = 0; i < indio_dev->num_channels; i++) { + ret = ad4130_set_channel_enable(st, i, false); + if (ret) + goto out; + } + +out: + mutex_unlock(&st->lock); + + return ret; +} + +static const struct iio_buffer_setup_ops ad4130_buffer_ops = { + .postenable = ad4130_buffer_postenable, + .predisable = ad4130_buffer_predisable, +}; + +static ssize_t hwfifo_watermark_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ad4130_state *st = iio_priv(dev_to_iio_dev(dev)); + unsigned int val; + + mutex_lock(&st->lock); + val = st->watermark; + mutex_unlock(&st->lock); + + return sysfs_emit(buf, "%d\n", val); +} + +static ssize_t hwfifo_enabled_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ad4130_state *st = iio_priv(dev_to_iio_dev(dev)); + unsigned int val; + int ret; + + ret = regmap_read(st->regmap, AD4130_FIFO_CONTROL_REG, &val); + if (ret) + return ret; + + val = FIELD_GET(AD4130_FIFO_CONTROL_MODE_MASK, val); + + return sysfs_emit(buf, "%d\n", val != AD4130_FIFO_MODE_DISABLED); +} + +static ssize_t hwfifo_watermark_min_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + return sysfs_emit(buf, "%s\n", "1"); +} + +static ssize_t hwfifo_watermark_max_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + return sysfs_emit(buf, "%s\n", __stringify(AD4130_FIFO_SIZE)); +} + +static IIO_DEVICE_ATTR_RO(hwfifo_watermark_min, 0); +static IIO_DEVICE_ATTR_RO(hwfifo_watermark_max, 0); +static IIO_DEVICE_ATTR_RO(hwfifo_watermark, 0); +static IIO_DEVICE_ATTR_RO(hwfifo_enabled, 0); + +static const struct iio_dev_attr *ad4130_fifo_attributes[] = { + &iio_dev_attr_hwfifo_watermark_min, + &iio_dev_attr_hwfifo_watermark_max, + &iio_dev_attr_hwfifo_watermark, + &iio_dev_attr_hwfifo_enabled, + NULL +}; + +static int _ad4130_find_table_index(const unsigned int *tbl, size_t len, + unsigned int val) +{ + unsigned int i; + + for (i = 0; i < len; i++) + if (tbl[i] == val) + return i; + + return -EINVAL; +} + +#define ad4130_find_table_index(table, val) \ + _ad4130_find_table_index(table, ARRAY_SIZE(table), val) + +static int ad4130_get_ref_voltage(struct ad4130_state *st, + enum ad4130_ref_sel ref_sel) +{ + switch (ref_sel) { + case AD4130_REF_REFIN1: + return regulator_get_voltage(st->regulators[2].consumer); + case AD4130_REF_REFIN2: + return regulator_get_voltage(st->regulators[3].consumer); + case AD4130_REF_AVDD_AVSS: + return regulator_get_voltage(st->regulators[0].consumer); + case AD4130_REF_REFOUT_AVSS: + return st->int_ref_uv; + default: + return -EINVAL; + } +} + +static int ad4130_parse_fw_setup(struct ad4130_state *st, + struct fwnode_handle *child, + struct ad4130_setup_info *setup_info) +{ + struct device *dev = &st->spi->dev; + u32 tmp; + int ret; + + tmp = 0; + fwnode_property_read_u32(child, "adi,excitation-current-0-nanoamp", &tmp); + ret = ad4130_find_table_index(ad4130_iout_current_na_tbl, tmp); + if (ret < 0) + return dev_err_probe(dev, ret, + "Invalid excitation current %unA\n", tmp); + setup_info->iout0_val = ret; + + tmp = 0; + fwnode_property_read_u32(child, "adi,excitation-current-1-nanoamp", &tmp); + ret = ad4130_find_table_index(ad4130_iout_current_na_tbl, tmp); + if (ret < 0) + return dev_err_probe(dev, ret, + "Invalid excitation current %unA\n", tmp); + setup_info->iout1_val = ret; + + tmp = 0; + fwnode_property_read_u32(child, "adi,burnout-current-nanoamp", &tmp); + ret = ad4130_find_table_index(ad4130_burnout_current_na_tbl, tmp); + if (ret < 0) + return dev_err_probe(dev, ret, + "Invalid burnout current %unA\n", tmp); + setup_info->burnout = ret; + + setup_info->ref_bufp = fwnode_property_read_bool(child, "adi,buffered-positive"); + setup_info->ref_bufm = fwnode_property_read_bool(child, "adi,buffered-negative"); + + setup_info->ref_sel = AD4130_REF_REFIN1; + fwnode_property_read_u32(child, "adi,reference-select", + &setup_info->ref_sel); + if (setup_info->ref_sel >= AD4130_REF_SEL_MAX) + return dev_err_probe(dev, -EINVAL, + "Invalid reference selected %u\n", + setup_info->ref_sel); + + if (setup_info->ref_sel == AD4130_REF_REFOUT_AVSS) + st->int_ref_en = true; + + ret = ad4130_get_ref_voltage(st, setup_info->ref_sel); + if (ret < 0) + return dev_err_probe(dev, ret, "Cannot use reference %u\n", + setup_info->ref_sel); + + return 0; +} + +static int ad4130_validate_diff_channel(struct ad4130_state *st, u32 pin) +{ + struct device *dev = &st->spi->dev; + + if (pin >= AD4130_MAX_DIFF_INPUTS) + return dev_err_probe(dev, -EINVAL, + "Invalid differential channel %u\n", pin); + + if (pin >= AD4130_MAX_ANALOG_PINS) + return 0; + + if (st->pins_fn[pin] == AD4130_PIN_FN_SPECIAL) + return dev_err_probe(dev, -EINVAL, + "Pin %u already used with fn %u\n", pin, + st->pins_fn[pin]); + + st->pins_fn[pin] |= AD4130_PIN_FN_DIFF; + + return 0; +} + +static int ad4130_validate_diff_channels(struct ad4130_state *st, + u32 *pins, unsigned int len) +{ + unsigned int i; + int ret; + + for (i = 0; i < len; i++) { + ret = ad4130_validate_diff_channel(st, pins[i]); + if (ret) + return ret; + } + + return 0; +} + +static int ad4130_validate_excitation_pin(struct ad4130_state *st, u32 pin) +{ + struct device *dev = &st->spi->dev; + + if (pin >= AD4130_MAX_ANALOG_PINS) + return dev_err_probe(dev, -EINVAL, + "Invalid excitation pin %u\n", pin); + + if (st->pins_fn[pin] == AD4130_PIN_FN_SPECIAL) + return dev_err_probe(dev, -EINVAL, + "Pin %u already used with fn %u\n", pin, + st->pins_fn[pin]); + + st->pins_fn[pin] |= AD4130_PIN_FN_EXCITATION; + + return 0; +} + +static int ad4130_validate_vbias_pin(struct ad4130_state *st, u32 pin) +{ + struct device *dev = &st->spi->dev; + + if (pin >= AD4130_MAX_ANALOG_PINS) + return dev_err_probe(dev, -EINVAL, "Invalid vbias pin %u\n", + pin); + + if (st->pins_fn[pin] == AD4130_PIN_FN_SPECIAL) + return dev_err_probe(dev, -EINVAL, + "Pin %u already used with fn %u\n", pin, + st->pins_fn[pin]); + + st->pins_fn[pin] |= AD4130_PIN_FN_VBIAS; + + return 0; +} + +static int ad4130_validate_vbias_pins(struct ad4130_state *st, + u32 *pins, unsigned int len) +{ + unsigned int i; + int ret; + + for (i = 0; i < st->num_vbias_pins; i++) { + ret = ad4130_validate_vbias_pin(st, pins[i]); + if (ret) + return ret; + } + + return 0; +} + +static int ad4130_parse_fw_channel(struct iio_dev *indio_dev, + struct fwnode_handle *child) +{ + struct ad4130_state *st = iio_priv(indio_dev); + unsigned int resolution = ad4130_resolution(st); + unsigned int index = indio_dev->num_channels++; + struct device *dev = &st->spi->dev; + struct ad4130_chan_info *chan_info; + struct iio_chan_spec *chan; + u32 pins[2]; + int ret; + + if (index >= AD4130_MAX_CHANNELS) + return dev_err_probe(dev, -EINVAL, "Too many channels\n"); + + chan = &st->chans[index]; + chan_info = &st->chans_info[index]; + + *chan = ad4130_channel_template; + chan->scan_type.realbits = resolution; + chan->scan_type.storagebits = resolution; + chan->scan_index = index; + + chan_info->slot = AD4130_INVALID_SLOT; + chan_info->setup.fs = AD4130_FILTER_SELECT_MIN; + chan_info->initialized = true; + + ret = fwnode_property_read_u32_array(child, "diff-channels", pins, + ARRAY_SIZE(pins)); + if (ret) + return ret; + + ret = ad4130_validate_diff_channels(st, pins, ARRAY_SIZE(pins)); + if (ret) + return ret; + + chan->channel = pins[0]; + chan->channel2 = pins[1]; + + ret = ad4130_parse_fw_setup(st, child, &chan_info->setup); + if (ret) + return ret; + + fwnode_property_read_u32(child, "adi,excitation-pin-0", + &chan_info->iout0); + if (chan_info->setup.iout0_val != AD4130_IOUT_OFF) { + ret = ad4130_validate_excitation_pin(st, chan_info->iout0); + if (ret) + return ret; + } + + fwnode_property_read_u32(child, "adi,excitation-pin-1", + &chan_info->iout1); + if (chan_info->setup.iout1_val != AD4130_IOUT_OFF) { + ret = ad4130_validate_excitation_pin(st, chan_info->iout1); + if (ret) + return ret; + } + + return 0; +} + +static int ad4130_parse_fw_children(struct iio_dev *indio_dev) +{ + struct ad4130_state *st = iio_priv(indio_dev); + struct device *dev = &st->spi->dev; + struct fwnode_handle *child; + int ret; + + indio_dev->channels = st->chans; + + device_for_each_child_node(dev, child) { + ret = ad4130_parse_fw_channel(indio_dev, child); + if (ret) { + fwnode_handle_put(child); + return ret; + } + } + + return 0; +} + +static int ad4310_parse_fw(struct iio_dev *indio_dev) +{ + struct ad4130_state *st = iio_priv(indio_dev); + struct device *dev = &st->spi->dev; + u32 ext_clk_freq = AD4130_MCLK_FREQ_76_8KHZ; + unsigned int i; + int avdd_uv; + int irq; + int ret; + + st->mclk = devm_clk_get_optional(dev, "mclk"); + if (IS_ERR(st->mclk)) + return dev_err_probe(dev, PTR_ERR(st->mclk), + "Failed to get mclk\n"); + + st->int_pin_sel = AD4130_INT_PIN_INT; + + for (i = 0; i < ARRAY_SIZE(ad4130_int_pin_names); i++) { + irq = fwnode_irq_get_byname(dev_fwnode(dev), + ad4130_int_pin_names[i]); + if (irq > 0) { + st->int_pin_sel = i; + break; + } + } + + if (st->int_pin_sel == AD4130_INT_PIN_DOUT) + return dev_err_probe(dev, -EINVAL, + "Cannot use DOUT as interrupt pin\n"); + + if (st->int_pin_sel == AD4130_INT_PIN_P2) + st->pins_fn[AD4130_AIN3_P2] = AD4130_PIN_FN_SPECIAL; + + device_property_read_u32(dev, "adi,ext-clk-freq-hz", &ext_clk_freq); + if (ext_clk_freq != AD4130_MCLK_FREQ_153_6KHZ && + ext_clk_freq != AD4130_MCLK_FREQ_76_8KHZ) + return dev_err_probe(dev, -EINVAL, + "Invalid external clock frequency %u\n", + ext_clk_freq); + + if (st->mclk && ext_clk_freq == AD4130_MCLK_FREQ_153_6KHZ) + st->mclk_sel = AD4130_MCLK_153_6KHZ_EXT; + else if (st->mclk) + st->mclk_sel = AD4130_MCLK_76_8KHZ_EXT; + else + st->mclk_sel = AD4130_MCLK_76_8KHZ; + + if (st->int_pin_sel == AD4130_INT_PIN_CLK && + st->mclk_sel != AD4130_MCLK_76_8KHZ) + return dev_err_probe(dev, -EINVAL, + "Invalid clock %u for interrupt pin %u\n", + st->mclk_sel, st->int_pin_sel); + + st->int_ref_uv = AD4130_INT_REF_2_5V; + + /* + * When the AVDD supply is set to below 2.5V the internal reference of + * 1.25V should be selected. + * See datasheet page 37, section ADC REFERENCE. + */ + avdd_uv = regulator_get_voltage(st->regulators[0].consumer); + if (avdd_uv > 0 && avdd_uv < AD4130_INT_REF_2_5V) + st->int_ref_uv = AD4130_INT_REF_1_25V; + + st->bipolar = device_property_read_bool(dev, "adi,bipolar"); + + ret = device_property_count_u32(dev, "adi,vbias-pins"); + if (ret > 0) { + if (ret > AD4130_MAX_ANALOG_PINS) + return dev_err_probe(dev, -EINVAL, + "Too many vbias pins %u\n", ret); + + st->num_vbias_pins = ret; + + ret = device_property_read_u32_array(dev, "adi,vbias-pins", + st->vbias_pins, + st->num_vbias_pins); + if (ret) + return dev_err_probe(dev, ret, + "Failed to read vbias pins\n"); + + ret = ad4130_validate_vbias_pins(st, st->vbias_pins, + st->num_vbias_pins); + if (ret) + return ret; + } + + ret = ad4130_parse_fw_children(indio_dev); + if (ret) + return ret; + + return 0; +} + +static void ad4130_fill_scale_tbls(struct ad4130_state *st) +{ + unsigned int pow = ad4130_resolution(st) - st->bipolar; + unsigned int i, j; + + for (i = 0; i < AD4130_REF_SEL_MAX; i++) { + int ret; + u64 nv; + + ret = ad4130_get_ref_voltage(st, i); + if (ret < 0) + continue; + + nv = (u64)ret * NANO; + + for (j = 0; j < AD4130_MAX_PGA; j++) + st->scale_tbls[i][j][1] = div_u64(nv >> (pow + j), MILLI); + } +} + +static void ad4130_clk_disable_unprepare(void *clk) +{ + clk_disable_unprepare(clk); +} + +static int ad4130_set_mclk_sel(struct ad4130_state *st, + enum ad4130_mclk_sel mclk_sel) +{ + return regmap_update_bits(st->regmap, AD4130_ADC_CONTROL_REG, + AD4130_ADC_CONTROL_MCLK_SEL_MASK, + FIELD_PREP(AD4130_ADC_CONTROL_MCLK_SEL_MASK, + mclk_sel)); +} + +static unsigned long ad4130_int_clk_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + return AD4130_MCLK_FREQ_76_8KHZ; +} + +static int ad4130_int_clk_is_enabled(struct clk_hw *hw) +{ + struct ad4130_state *st = container_of(hw, struct ad4130_state, int_clk_hw); + + return st->mclk_sel == AD4130_MCLK_76_8KHZ_OUT; +} + +static int ad4130_int_clk_prepare(struct clk_hw *hw) +{ + struct ad4130_state *st = container_of(hw, struct ad4130_state, int_clk_hw); + int ret; + + ret = ad4130_set_mclk_sel(st, AD4130_MCLK_76_8KHZ_OUT); + if (ret) + return ret; + + st->mclk_sel = AD4130_MCLK_76_8KHZ_OUT; + + return 0; +} + +static void ad4130_int_clk_unprepare(struct clk_hw *hw) +{ + struct ad4130_state *st = container_of(hw, struct ad4130_state, int_clk_hw); + int ret; + + ret = ad4130_set_mclk_sel(st, AD4130_MCLK_76_8KHZ); + if (ret) + return; + + st->mclk_sel = AD4130_MCLK_76_8KHZ; +} + +static const struct clk_ops ad4130_int_clk_ops = { + .recalc_rate = ad4130_int_clk_recalc_rate, + .is_enabled = ad4130_int_clk_is_enabled, + .prepare = ad4130_int_clk_prepare, + .unprepare = ad4130_int_clk_unprepare, +}; + +static int ad4130_setup_int_clk(struct ad4130_state *st) +{ + struct device *dev = &st->spi->dev; + struct device_node *of_node = dev_of_node(dev); + struct clk_init_data init; + const char *clk_name; + struct clk *clk; + + if (st->int_pin_sel == AD4130_INT_PIN_CLK || + st->mclk_sel != AD4130_MCLK_76_8KHZ) + return 0; + + if (!of_node) + return 0; + + clk_name = of_node->name; + of_property_read_string(of_node, "clock-output-names", &clk_name); + + init.name = clk_name; + init.ops = &ad4130_int_clk_ops; + + st->int_clk_hw.init = &init; + clk = devm_clk_register(dev, &st->int_clk_hw); + if (IS_ERR(clk)) + return PTR_ERR(clk); + + return of_clk_add_provider(of_node, of_clk_src_simple_get, clk); +} + +static int ad4130_setup(struct iio_dev *indio_dev) +{ + struct ad4130_state *st = iio_priv(indio_dev); + struct device *dev = &st->spi->dev; + unsigned int int_ref_val; + unsigned long rate = AD4130_MCLK_FREQ_76_8KHZ; + unsigned int val; + unsigned int i; + int ret; + + if (st->mclk_sel == AD4130_MCLK_153_6KHZ_EXT) + rate = AD4130_MCLK_FREQ_153_6KHZ; + + ret = clk_set_rate(st->mclk, rate); + if (ret) + return ret; + + ret = clk_prepare_enable(st->mclk); + if (ret) + return ret; + + ret = devm_add_action_or_reset(dev, ad4130_clk_disable_unprepare, + st->mclk); + if (ret) + return ret; + + if (st->int_ref_uv == AD4130_INT_REF_2_5V) + int_ref_val = AD4130_INT_REF_VAL_2_5V; + else + int_ref_val = AD4130_INT_REF_VAL_1_25V; + + /* Switch to SPI 4-wire mode. */ + val = FIELD_PREP(AD4130_ADC_CONTROL_CSB_EN_MASK, 1); + val |= FIELD_PREP(AD4130_ADC_CONTROL_BIPOLAR_MASK, st->bipolar); + val |= FIELD_PREP(AD4130_ADC_CONTROL_INT_REF_EN_MASK, st->int_ref_en); + val |= FIELD_PREP(AD4130_ADC_CONTROL_MODE_MASK, AD4130_MODE_IDLE); + val |= FIELD_PREP(AD4130_ADC_CONTROL_MCLK_SEL_MASK, st->mclk_sel); + val |= FIELD_PREP(AD4130_ADC_CONTROL_INT_REF_VAL_MASK, int_ref_val); + + ret = regmap_write(st->regmap, AD4130_ADC_CONTROL_REG, val); + if (ret) + return ret; + + /* + * Configure all GPIOs for output. If configured, the interrupt function + * of P2 takes priority over the GPIO out function. + */ + val = AD4130_IO_CONTROL_GPIO_CTRL_MASK; + val |= FIELD_PREP(AD4130_IO_CONTROL_INT_PIN_SEL_MASK, st->int_pin_sel); + + ret = regmap_write(st->regmap, AD4130_IO_CONTROL_REG, val); + if (ret) + return ret; + + val = 0; + for (i = 0; i < st->num_vbias_pins; i++) + val |= BIT(st->vbias_pins[i]); + + ret = regmap_write(st->regmap, AD4130_VBIAS_REG, val); + if (ret) + return ret; + + ret = regmap_update_bits(st->regmap, AD4130_FIFO_CONTROL_REG, + AD4130_FIFO_CONTROL_HEADER_MASK, 0); + if (ret) + return ret; + + /* FIFO watermark interrupt starts out as enabled, disable it. */ + ret = ad4130_set_watermark_interrupt_en(st, false); + if (ret) + return ret; + + /* Setup channels. */ + for (i = 0; i < indio_dev->num_channels; i++) { + struct ad4130_chan_info *chan_info = &st->chans_info[i]; + struct iio_chan_spec *chan = &st->chans[i]; + unsigned int val; + + val = FIELD_PREP(AD4130_CHANNEL_AINP_MASK, chan->channel) | + FIELD_PREP(AD4130_CHANNEL_AINM_MASK, chan->channel2) | + FIELD_PREP(AD4130_CHANNEL_IOUT1_MASK, chan_info->iout0) | + FIELD_PREP(AD4130_CHANNEL_IOUT2_MASK, chan_info->iout1); + + ret = regmap_write(st->regmap, AD4130_CHANNEL_X_REG(i), val); + if (ret) + return ret; + } + + return 0; +} + +static int ad4130_soft_reset(struct ad4130_state *st) +{ + int ret; + + ret = spi_write(st->spi, st->reset_buf, sizeof(st->reset_buf)); + if (ret) + return ret; + + fsleep(AD4130_RESET_SLEEP_US); + + return 0; +} + +static void ad4130_disable_regulators(void *data) +{ + struct ad4130_state *st = data; + + regulator_bulk_disable(ARRAY_SIZE(st->regulators), st->regulators); +} + +static int ad4130_probe(struct spi_device *spi) +{ + struct device *dev = &spi->dev; + struct iio_dev *indio_dev; + struct ad4130_state *st; + int ret; + + indio_dev = devm_iio_device_alloc(dev, sizeof(*st)); + if (!indio_dev) + return -ENOMEM; + + st = iio_priv(indio_dev); + + memset(st->reset_buf, 0xff, sizeof(st->reset_buf)); + init_completion(&st->completion); + mutex_init(&st->lock); + st->spi = spi; + + /* + * Xfer: [ XFR1 ] [ XFR2 ] + * Master: 0x7D N ...................... + * Slave: ...... DATA1 DATA2 ... DATAN + */ + st->fifo_tx_buf[0] = AD4130_COMMS_READ_MASK | AD4130_FIFO_DATA_REG; + st->fifo_xfer[0].tx_buf = st->fifo_tx_buf; + st->fifo_xfer[0].len = sizeof(st->fifo_tx_buf); + st->fifo_xfer[1].rx_buf = st->fifo_rx_buf; + spi_message_init_with_transfers(&st->fifo_msg, st->fifo_xfer, + ARRAY_SIZE(st->fifo_xfer)); + + indio_dev->name = AD4130_NAME; + indio_dev->modes = INDIO_DIRECT_MODE; + indio_dev->info = &ad4130_info; + + st->regmap = devm_regmap_init(dev, NULL, st, &ad4130_regmap_config); + if (IS_ERR(st->regmap)) + return PTR_ERR(st->regmap); + + st->regulators[0].supply = "avdd"; + st->regulators[1].supply = "iovdd"; + st->regulators[2].supply = "refin1"; + st->regulators[3].supply = "refin2"; + + ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(st->regulators), + st->regulators); + if (ret) + return dev_err_probe(dev, ret, "Failed to get regulators\n"); + + ret = regulator_bulk_enable(ARRAY_SIZE(st->regulators), st->regulators); + if (ret) + return dev_err_probe(dev, ret, "Failed to enable regulators\n"); + + ret = devm_add_action_or_reset(dev, ad4130_disable_regulators, st); + if (ret) + return dev_err_probe(dev, ret, + "Failed to add regulators disable action\n"); + + ret = ad4130_soft_reset(st); + if (ret) + return ret; + + ret = ad4310_parse_fw(indio_dev); + if (ret) + return ret; + + ret = ad4130_setup(indio_dev); + if (ret) + return ret; + + ret = ad4130_setup_int_clk(st); + if (ret) + return ret; + + ad4130_fill_scale_tbls(st); + + st->gc.owner = THIS_MODULE; + st->gc.label = AD4130_NAME; + st->gc.base = -1; + st->gc.ngpio = AD4130_MAX_GPIOS; + st->gc.parent = dev; + st->gc.can_sleep = true; + st->gc.init_valid_mask = ad4130_gpio_init_valid_mask; + st->gc.get_direction = ad4130_gpio_get_direction; + st->gc.set = ad4130_gpio_set; + + ret = devm_gpiochip_add_data(dev, &st->gc, st); + if (ret) + return ret; + + ret = devm_iio_kfifo_buffer_setup_ext(dev, indio_dev, + &ad4130_buffer_ops, + ad4130_fifo_attributes); + if (ret) + return ret; + + ret = devm_request_threaded_irq(dev, spi->irq, NULL, + ad4130_irq_handler, IRQF_ONESHOT, + indio_dev->name, indio_dev); + if (ret) + return dev_err_probe(dev, ret, "Failed to request irq\n"); + + /* + * When the chip enters FIFO mode, IRQ polarity is inverted. + * When the chip exits FIFO mode, IRQ polarity returns to normal. + * See datasheet pages: 65, FIFO Watermark Interrupt section, + * and 71, Bit Descriptions for STATUS Register, RDYB. + * Cache the normal and inverted IRQ triggers to set them when + * entering and exiting FIFO mode. + */ + st->irq_trigger = irq_get_trigger_type(spi->irq); + if (st->irq_trigger & IRQF_TRIGGER_RISING) + st->inv_irq_trigger = IRQF_TRIGGER_FALLING; + else if (st->irq_trigger & IRQF_TRIGGER_FALLING) + st->inv_irq_trigger = IRQF_TRIGGER_RISING; + else + return dev_err_probe(dev, -EINVAL, "Invalid irq flags: %u\n", + st->irq_trigger); + + return devm_iio_device_register(dev, indio_dev); +} + +static const struct of_device_id ad4130_of_match[] = { + { + .compatible = "adi,ad4130", + }, + { } +}; +MODULE_DEVICE_TABLE(of, ad4130_of_match); + +static struct spi_driver ad4130_driver = { + .driver = { + .name = AD4130_NAME, + .of_match_table = ad4130_of_match, + }, + .probe = ad4130_probe, +}; +module_spi_driver(ad4130_driver); + +MODULE_AUTHOR("Cosmin Tanislav "); +MODULE_DESCRIPTION("Analog Devices AD4130 SPI driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/iio/adc/ad7091r5.c b/drivers/iio/adc/ad7091r5.c index 47f5763023a403c727bb5c512b60576a07ca3443..7d6709da1005fe2cbc6021318e79bc769af1f417 100644 --- a/drivers/iio/adc/ad7091r5.c +++ b/drivers/iio/adc/ad7091r5.c @@ -69,9 +69,9 @@ static const struct ad7091r_chip_info ad7091r5_chip_info_noirq = { .vref_mV = 2500, }; -static int ad7091r5_i2c_probe(struct i2c_client *i2c, - const struct i2c_device_id *id) +static int ad7091r5_i2c_probe(struct i2c_client *i2c) { + const struct i2c_device_id *id = i2c_client_get_device_id(i2c); const struct ad7091r_chip_info *chip_info; struct regmap *map = devm_regmap_init_i2c(i2c, &ad7091r_regmap_config); @@ -103,7 +103,7 @@ static struct i2c_driver ad7091r5_driver = { .name = "ad7091r5", .of_match_table = ad7091r5_dt_ids, }, - .probe = ad7091r5_i2c_probe, + .probe_new = ad7091r5_i2c_probe, .id_table = ad7091r5_i2c_ids, }; module_i2c_driver(ad7091r5_driver); diff --git a/drivers/iio/adc/ad7124.c b/drivers/iio/adc/ad7124.c index 4088786e1026d5288d34e42f852292248ede1491..050a2fbf5c493cca3f0dc80a335b7108546ef944 100644 --- a/drivers/iio/adc/ad7124.c +++ b/drivers/iio/adc/ad7124.c @@ -944,6 +944,8 @@ static int ad7124_probe(struct spi_device *spi) int i, ret; info = of_device_get_match_data(&spi->dev); + if (!info) + info = (void *)spi_get_device_id(spi)->driver_data; if (!info) return -ENODEV; @@ -1021,12 +1023,20 @@ static const struct of_device_id ad7124_of_match[] = { }; MODULE_DEVICE_TABLE(of, ad7124_of_match); +static const struct spi_device_id ad71124_ids[] = { + { "ad7124-4", (kernel_ulong_t)&ad7124_chip_info_tbl[ID_AD7124_4] }, + { "ad7124-8", (kernel_ulong_t)&ad7124_chip_info_tbl[ID_AD7124_8] }, + {} +}; +MODULE_DEVICE_TABLE(spi, ad71124_ids); + static struct spi_driver ad71124_driver = { .driver = { .name = "ad7124", .of_match_table = ad7124_of_match, }, .probe = ad7124_probe, + .id_table = ad71124_ids, }; module_spi_driver(ad71124_driver); diff --git a/drivers/iio/adc/ad7192.c b/drivers/iio/adc/ad7192.c index d71977be7d228c9b38c84b0385437ce1111c7a27..55a6ab5910160725415b004ab523f453c97b5337 100644 --- a/drivers/iio/adc/ad7192.c +++ b/drivers/iio/adc/ad7192.c @@ -177,7 +177,6 @@ struct ad7192_chip_info { struct ad7192_state { const struct ad7192_chip_info *chip_info; struct regulator *avdd; - struct regulator *dvdd; struct clk *mclk; u16 int_vref_mv; u32 fclk; @@ -1015,19 +1014,9 @@ static int ad7192_probe(struct spi_device *spi) if (ret) return ret; - st->dvdd = devm_regulator_get(&spi->dev, "dvdd"); - if (IS_ERR(st->dvdd)) - return PTR_ERR(st->dvdd); - - ret = regulator_enable(st->dvdd); - if (ret) { - dev_err(&spi->dev, "Failed to enable specified DVdd supply\n"); - return ret; - } - - ret = devm_add_action_or_reset(&spi->dev, ad7192_reg_disable, st->dvdd); + ret = devm_regulator_get_enable(&spi->dev, "dvdd"); if (ret) - return ret; + return dev_err_probe(&spi->dev, ret, "Failed to enable specified DVdd supply\n"); ret = regulator_get_voltage(st->avdd); if (ret < 0) { @@ -1037,6 +1026,8 @@ static int ad7192_probe(struct spi_device *spi) st->int_vref_mv = ret / 1000; st->chip_info = of_device_get_match_data(&spi->dev); + if (!st->chip_info) + st->chip_info = (void *)spi_get_device_id(spi)->driver_data; indio_dev->name = st->chip_info->name; indio_dev->modes = INDIO_DIRECT_MODE; @@ -1098,12 +1089,22 @@ static const struct of_device_id ad7192_of_match[] = { }; MODULE_DEVICE_TABLE(of, ad7192_of_match); +static const struct spi_device_id ad7192_ids[] = { + { "ad7190", (kernel_ulong_t)&ad7192_chip_info_tbl[ID_AD7190] }, + { "ad7192", (kernel_ulong_t)&ad7192_chip_info_tbl[ID_AD7192] }, + { "ad7193", (kernel_ulong_t)&ad7192_chip_info_tbl[ID_AD7193] }, + { "ad7195", (kernel_ulong_t)&ad7192_chip_info_tbl[ID_AD7195] }, + {} +}; +MODULE_DEVICE_TABLE(spi, ad7192_ids); + static struct spi_driver ad7192_driver = { .driver = { .name = "ad7192", .of_match_table = ad7192_of_match, }, .probe = ad7192_probe, + .id_table = ad7192_ids, }; module_spi_driver(ad7192_driver); diff --git a/drivers/iio/adc/ad7291.c b/drivers/iio/adc/ad7291.c index e9129dac762fead96a149364f1b76478c22bb0a1..3dd0105f63d7166d36f56e6ef209bdcae9b0bba1 100644 --- a/drivers/iio/adc/ad7291.c +++ b/drivers/iio/adc/ad7291.c @@ -465,9 +465,9 @@ static void ad7291_reg_disable(void *reg) regulator_disable(reg); } -static int ad7291_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int ad7291_probe(struct i2c_client *client) { + const struct i2c_device_id *id = i2c_client_get_device_id(client); struct ad7291_chip_info *chip; struct iio_dev *indio_dev; int ret; @@ -553,7 +553,7 @@ static struct i2c_driver ad7291_driver = { .name = KBUILD_MODNAME, .of_match_table = ad7291_of_match, }, - .probe = ad7291_probe, + .probe_new = ad7291_probe, .id_table = ad7291_id, }; module_i2c_driver(ad7291_driver); diff --git a/drivers/iio/adc/ad7476.c b/drivers/iio/adc/ad7476.c index 94776f696290772d0fe02950a3ca6e373d173ded..80aebed47d1fa607ccc8ba140007bdaa0b5cf1bb 100644 --- a/drivers/iio/adc/ad7476.c +++ b/drivers/iio/adc/ad7476.c @@ -368,16 +368,7 @@ static int ad7476_probe(struct spi_device *spi) } if (st->chip_info->has_vdrive) { - reg = devm_regulator_get(&spi->dev, "vdrive"); - if (IS_ERR(reg)) - return PTR_ERR(reg); - - ret = regulator_enable(reg); - if (ret) - return ret; - - ret = devm_add_action_or_reset(&spi->dev, ad7476_reg_disable, - reg); + ret = devm_regulator_get_enable(&spi->dev, "vdrive"); if (ret) return ret; } diff --git a/drivers/iio/adc/ad7606.c b/drivers/iio/adc/ad7606.c index ba24f99523e01e71744ab9d6d62f6f5facaad394..dd6b603f65eadbed7c9d0df6159672e0058cdf22 100644 --- a/drivers/iio/adc/ad7606.c +++ b/drivers/iio/adc/ad7606.c @@ -557,13 +557,6 @@ static const struct iio_trigger_ops ad7606_trigger_ops = { .validate_device = iio_trigger_validate_own_device, }; -static void ad7606_regulator_disable(void *data) -{ - struct ad7606_state *st = data; - - regulator_disable(st->reg); -} - int ad7606_probe(struct device *dev, int irq, void __iomem *base_address, const char *name, unsigned int id, const struct ad7606_bus_ops *bops) @@ -589,19 +582,10 @@ int ad7606_probe(struct device *dev, int irq, void __iomem *base_address, st->scale_avail = ad7606_scale_avail; st->num_scales = ARRAY_SIZE(ad7606_scale_avail); - st->reg = devm_regulator_get(dev, "avcc"); - if (IS_ERR(st->reg)) - return PTR_ERR(st->reg); - - ret = regulator_enable(st->reg); - if (ret) { - dev_err(dev, "Failed to enable specified AVcc supply\n"); - return ret; - } - - ret = devm_add_action_or_reset(dev, ad7606_regulator_disable, st); + ret = devm_regulator_get_enable(dev, "avcc"); if (ret) - return ret; + return dev_err_probe(dev, ret, + "Failed to enable specified AVcc supply\n"); st->chip_info = &ad7606_chip_info_tbl[id]; diff --git a/drivers/iio/adc/ad7606.h b/drivers/iio/adc/ad7606.h index 2dc4f599f9df9b18ec23c41e716cb46cce11548d..0c6a88cc469585fd8b491b5d93643f8acde8fd46 100644 --- a/drivers/iio/adc/ad7606.h +++ b/drivers/iio/adc/ad7606.h @@ -62,7 +62,6 @@ struct ad7606_chip_info { * struct ad7606_state - driver instance specific data * @dev pointer to kernel device * @chip_info entry in the table of chips that describes this device - * @reg regulator info for the power supply of the device * @bops bus operations (SPI or parallel) * @range voltage range selection, selects which scale to apply * @oversampling oversampling selection @@ -92,7 +91,6 @@ struct ad7606_chip_info { struct ad7606_state { struct device *dev; const struct ad7606_chip_info *chip_info; - struct regulator *reg; const struct ad7606_bus_ops *bops; unsigned int range[16]; unsigned int oversampling; diff --git a/drivers/iio/adc/ad7606_par.c b/drivers/iio/adc/ad7606_par.c index b912b4df9b5638f820728cd9d8f90e888fadc464..d8408052262e4d4c25d8c8a7cea06cf39752afdc 100644 --- a/drivers/iio/adc/ad7606_par.c +++ b/drivers/iio/adc/ad7606_par.c @@ -57,8 +57,7 @@ static int ad7606_par_probe(struct platform_device *pdev) if (irq < 0) return irq; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - addr = devm_ioremap_resource(&pdev->dev, res); + addr = devm_platform_get_and_ioremap_resource(pdev, 0, &res); if (IS_ERR(addr)) return PTR_ERR(addr); diff --git a/drivers/iio/adc/ad799x.c b/drivers/iio/adc/ad799x.c index 6dbe9d5e08a2b263d8b01b285dabcfa44878df19..8f0a3a35e727e84934855c783e4a1d22e6f0c98f 100644 --- a/drivers/iio/adc/ad799x.c +++ b/drivers/iio/adc/ad799x.c @@ -28,6 +28,7 @@ #include #include #include +#include #include #include @@ -125,6 +126,8 @@ struct ad799x_state { const struct ad799x_chip_config *chip_config; struct regulator *reg; struct regulator *vref; + /* lock to protect against multiple access to the device */ + struct mutex lock; unsigned id; u16 config; @@ -290,7 +293,9 @@ static int ad799x_read_raw(struct iio_dev *indio_dev, ret = iio_device_claim_direct_mode(indio_dev); if (ret) return ret; + mutex_lock(&st->lock); ret = ad799x_scan_direct(st, chan->scan_index); + mutex_unlock(&st->lock); iio_device_release_direct_mode(indio_dev); if (ret < 0) @@ -351,7 +356,8 @@ static ssize_t ad799x_write_frequency(struct device *dev, if (ret) return ret; - mutex_lock(&indio_dev->mlock); + mutex_lock(&st->lock); + ret = i2c_smbus_read_byte_data(st->client, AD7998_CYCLE_TMR_REG); if (ret < 0) goto error_ret_mutex; @@ -373,7 +379,7 @@ static ssize_t ad799x_write_frequency(struct device *dev, ret = len; error_ret_mutex: - mutex_unlock(&indio_dev->mlock); + mutex_unlock(&st->lock); return ret; } @@ -407,6 +413,8 @@ static int ad799x_write_event_config(struct iio_dev *indio_dev, if (ret) return ret; + mutex_lock(&st->lock); + if (state) st->config |= BIT(chan->scan_index) << AD799X_CHANNEL_SHIFT; else @@ -418,6 +426,7 @@ static int ad799x_write_event_config(struct iio_dev *indio_dev, st->config &= ~AD7998_ALERT_EN; ret = ad799x_write_config(st, st->config); + mutex_unlock(&st->lock); iio_device_release_direct_mode(indio_dev); return ret; } @@ -454,11 +463,9 @@ static int ad799x_write_event_value(struct iio_dev *indio_dev, if (val < 0 || val > GENMASK(chan->scan_type.realbits - 1, 0)) return -EINVAL; - mutex_lock(&indio_dev->mlock); ret = i2c_smbus_write_word_swapped(st->client, ad799x_threshold_reg(chan, dir, info), val << chan->scan_type.shift); - mutex_unlock(&indio_dev->mlock); return ret; } @@ -473,10 +480,8 @@ static int ad799x_read_event_value(struct iio_dev *indio_dev, int ret; struct ad799x_state *st = iio_priv(indio_dev); - mutex_lock(&indio_dev->mlock); ret = i2c_smbus_read_word_swapped(st->client, ad799x_threshold_reg(chan, dir, info)); - mutex_unlock(&indio_dev->mlock); if (ret < 0) return ret; *val = (ret >> chan->scan_type.shift) & @@ -770,9 +775,9 @@ static const struct ad799x_chip_info ad799x_chip_info_tbl[] = { }, }; -static int ad799x_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int ad799x_probe(struct i2c_client *client) { + const struct i2c_device_id *id = i2c_client_get_device_id(client); int ret; int extra_config = 0; struct ad799x_state *st; @@ -863,6 +868,9 @@ static int ad799x_probe(struct i2c_client *client, if (ret) goto error_cleanup_ring; } + + mutex_init(&st->lock); + ret = iio_device_register(indio_dev); if (ret) goto error_cleanup_ring; @@ -960,7 +968,7 @@ static struct i2c_driver ad799x_driver = { .name = "ad799x", .pm = pm_sleep_ptr(&ad799x_pm_ops), }, - .probe = ad799x_probe, + .probe_new = ad799x_probe, .remove = ad799x_remove, .id_table = ad799x_id, }; diff --git a/drivers/iio/adc/ad9467.c b/drivers/iio/adc/ad9467.c index 7534572f74757f5540811e1cdd7aab85cf537558..0621cf59d614658046a08079ab2a2db530dca387 100644 --- a/drivers/iio/adc/ad9467.c +++ b/drivers/iio/adc/ad9467.c @@ -387,6 +387,8 @@ static int ad9467_probe(struct spi_device *spi) int ret; info = of_device_get_match_data(&spi->dev); + if (!info) + info = (void *)spi_get_device_id(spi)->driver_data; if (!info) return -ENODEV; @@ -447,12 +449,21 @@ static const struct of_device_id ad9467_of_match[] = { }; MODULE_DEVICE_TABLE(of, ad9467_of_match); +static const struct spi_device_id ad9467_ids[] = { + { "ad9265", (kernel_ulong_t)&ad9467_chip_tbl[ID_AD9265] }, + { "ad9434", (kernel_ulong_t)&ad9467_chip_tbl[ID_AD9434] }, + { "ad9467", (kernel_ulong_t)&ad9467_chip_tbl[ID_AD9467] }, + {} +}; +MODULE_DEVICE_TABLE(spi, ad9467_ids); + static struct spi_driver ad9467_driver = { .driver = { .name = "ad9467", .of_match_table = ad9467_of_match, }, .probe = ad9467_probe, + .id_table = ad9467_ids, }; module_spi_driver(ad9467_driver); diff --git a/drivers/iio/adc/ad_sigma_delta.c b/drivers/iio/adc/ad_sigma_delta.c index 261a9a6b45e15d8c3a2d8a1d78ea348d5a7f8e86..d8570f620785af8d8b6dfb1a11ee3b63ef579526 100644 --- a/drivers/iio/adc/ad_sigma_delta.c +++ b/drivers/iio/adc/ad_sigma_delta.c @@ -281,10 +281,10 @@ int ad_sigma_delta_single_conversion(struct iio_dev *indio_dev, unsigned int data_reg; int ret = 0; - if (iio_buffer_enabled(indio_dev)) - return -EBUSY; + ret = iio_device_claim_direct_mode(indio_dev); + if (ret) + return ret; - mutex_lock(&indio_dev->mlock); ad_sigma_delta_set_channel(sigma_delta, chan->address); spi_bus_lock(sigma_delta->spi->master); @@ -323,7 +323,7 @@ out: ad_sigma_delta_set_mode(sigma_delta, AD_SD_MODE_IDLE); sigma_delta->bus_locked = false; spi_bus_unlock(sigma_delta->spi->master); - mutex_unlock(&indio_dev->mlock); + iio_device_release_direct_mode(indio_dev); if (ret) return ret; diff --git a/drivers/iio/adc/at91-sama5d2_adc.c b/drivers/iio/adc/at91-sama5d2_adc.c index 870f4cb609237d85be6a2de25fa45a2fbf28595b..ed4f8501bda841e0c4de22ce9c843cee5d355775 100644 --- a/drivers/iio/adc/at91-sama5d2_adc.c +++ b/drivers/iio/adc/at91-sama5d2_adc.c @@ -2193,32 +2193,19 @@ static ssize_t at91_adc_get_watermark(struct device *dev, return scnprintf(buf, PAGE_SIZE, "%d\n", st->dma_st.watermark); } -static ssize_t hwfifo_watermark_min_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - return sysfs_emit(buf, "%s\n", "2"); -} - -static ssize_t hwfifo_watermark_max_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - return sysfs_emit(buf, "%s\n", AT91_HWFIFO_MAX_SIZE_STR); -} - static IIO_DEVICE_ATTR(hwfifo_enabled, 0444, at91_adc_get_fifo_state, NULL, 0); static IIO_DEVICE_ATTR(hwfifo_watermark, 0444, at91_adc_get_watermark, NULL, 0); -static IIO_DEVICE_ATTR_RO(hwfifo_watermark_min, 0); -static IIO_DEVICE_ATTR_RO(hwfifo_watermark_max, 0); - -static const struct attribute *at91_adc_fifo_attributes[] = { - &iio_dev_attr_hwfifo_watermark_min.dev_attr.attr, - &iio_dev_attr_hwfifo_watermark_max.dev_attr.attr, - &iio_dev_attr_hwfifo_watermark.dev_attr.attr, - &iio_dev_attr_hwfifo_enabled.dev_attr.attr, + +IIO_STATIC_CONST_DEVICE_ATTR(hwfifo_watermark_min, "2"); +IIO_STATIC_CONST_DEVICE_ATTR(hwfifo_watermark_max, AT91_HWFIFO_MAX_SIZE_STR); + +static const struct iio_dev_attr *at91_adc_fifo_attributes[] = { + &iio_dev_attr_hwfifo_watermark_min, + &iio_dev_attr_hwfifo_watermark_max, + &iio_dev_attr_hwfifo_watermark, + &iio_dev_attr_hwfifo_enabled, NULL, }; @@ -2235,7 +2222,7 @@ static int at91_adc_buffer_and_trigger_init(struct device *dev, struct iio_dev *indio) { struct at91_adc_state *st = iio_priv(indio); - const struct attribute **fifo_attrs; + const struct iio_dev_attr **fifo_attrs; int ret; if (st->selected_trig->hw_trig) diff --git a/drivers/iio/adc/axp288_adc.c b/drivers/iio/adc/axp288_adc.c index 580361bd98492a07654aa86b19eae18d3cfbf5ba..49fff1cabd0dc56e6d4d71d3cbb8fb7cb34c9e5b 100644 --- a/drivers/iio/adc/axp288_adc.c +++ b/drivers/iio/adc/axp288_adc.c @@ -9,6 +9,7 @@ #include #include +#include #include #include #include @@ -50,6 +51,8 @@ enum axp288_adc_id { struct axp288_adc_info { int irq; struct regmap *regmap; + /* lock to protect against multiple access to the device */ + struct mutex lock; bool ts_enabled; }; @@ -161,7 +164,7 @@ static int axp288_adc_read_raw(struct iio_dev *indio_dev, int ret; struct axp288_adc_info *info = iio_priv(indio_dev); - mutex_lock(&indio_dev->mlock); + mutex_lock(&info->lock); switch (mask) { case IIO_CHAN_INFO_RAW: if (axp288_adc_set_ts(info, AXP288_ADC_TS_CURRENT_ON_ONDEMAND, @@ -178,7 +181,7 @@ static int axp288_adc_read_raw(struct iio_dev *indio_dev, default: ret = -EINVAL; } - mutex_unlock(&indio_dev->mlock); + mutex_unlock(&info->lock); return ret; } @@ -289,6 +292,8 @@ static int axp288_adc_probe(struct platform_device *pdev) if (ret < 0) return ret; + mutex_init(&info->lock); + return devm_iio_device_register(&pdev->dev, indio_dev); } diff --git a/drivers/iio/adc/cc10001_adc.c b/drivers/iio/adc/cc10001_adc.c index e16ac935693b64af354a79971e2b92dc432f2e0f..2cde4b44fc6e482afcf8700a1422b3d194c070ea 100644 --- a/drivers/iio/adc/cc10001_adc.c +++ b/drivers/iio/adc/cc10001_adc.c @@ -305,16 +305,27 @@ static int cc10001_adc_channel_init(struct iio_dev *indio_dev, return 0; } +static void cc10001_reg_disable(void *priv) +{ + regulator_disable(priv); +} + +static void cc10001_pd_cb(void *priv) +{ + cc10001_adc_power_down(priv); +} + static int cc10001_adc_probe(struct platform_device *pdev) { - struct device_node *node = pdev->dev.of_node; + struct device *dev = &pdev->dev; + struct device_node *node = dev->of_node; struct cc10001_adc_device *adc_dev; unsigned long adc_clk_rate; struct iio_dev *indio_dev; unsigned long channel_map; int ret; - indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*adc_dev)); + indio_dev = devm_iio_device_alloc(dev, sizeof(*adc_dev)); if (indio_dev == NULL) return -ENOMEM; @@ -326,7 +337,7 @@ static int cc10001_adc_probe(struct platform_device *pdev) channel_map &= ~ret; } - adc_dev->reg = devm_regulator_get(&pdev->dev, "vref"); + adc_dev->reg = devm_regulator_get(dev, "vref"); if (IS_ERR(adc_dev->reg)) return PTR_ERR(adc_dev->reg); @@ -334,34 +345,28 @@ static int cc10001_adc_probe(struct platform_device *pdev) if (ret) return ret; - indio_dev->name = dev_name(&pdev->dev); + ret = devm_add_action_or_reset(dev, cc10001_reg_disable, adc_dev->reg); + if (ret) + return ret; + + indio_dev->name = dev_name(dev); indio_dev->info = &cc10001_adc_info; indio_dev->modes = INDIO_DIRECT_MODE; adc_dev->reg_base = devm_platform_ioremap_resource(pdev, 0); - if (IS_ERR(adc_dev->reg_base)) { - ret = PTR_ERR(adc_dev->reg_base); - goto err_disable_reg; - } + if (IS_ERR(adc_dev->reg_base)) + return PTR_ERR(adc_dev->reg_base); - adc_dev->adc_clk = devm_clk_get(&pdev->dev, "adc"); + adc_dev->adc_clk = devm_clk_get_enabled(dev, "adc"); if (IS_ERR(adc_dev->adc_clk)) { - dev_err(&pdev->dev, "failed to get the clock\n"); - ret = PTR_ERR(adc_dev->adc_clk); - goto err_disable_reg; - } - - ret = clk_prepare_enable(adc_dev->adc_clk); - if (ret) { - dev_err(&pdev->dev, "failed to enable the clock\n"); - goto err_disable_reg; + dev_err(dev, "failed to get/enable the clock\n"); + return PTR_ERR(adc_dev->adc_clk); } adc_clk_rate = clk_get_rate(adc_dev->adc_clk); if (!adc_clk_rate) { - ret = -EINVAL; - dev_err(&pdev->dev, "null clock rate!\n"); - goto err_disable_clk; + dev_err(dev, "null clock rate!\n"); + return -EINVAL; } adc_dev->eoc_delay_ns = NSEC_PER_SEC / adc_clk_rate; @@ -375,47 +380,22 @@ static int cc10001_adc_probe(struct platform_device *pdev) if (adc_dev->shared) cc10001_adc_power_up(adc_dev); + ret = devm_add_action_or_reset(dev, cc10001_pd_cb, adc_dev); + if (ret) + return ret; /* Setup the ADC channels available on the device */ ret = cc10001_adc_channel_init(indio_dev, channel_map); if (ret < 0) - goto err_disable_clk; + return ret; mutex_init(&adc_dev->lock); - ret = iio_triggered_buffer_setup(indio_dev, NULL, - &cc10001_adc_trigger_h, NULL); - if (ret < 0) - goto err_disable_clk; - - ret = iio_device_register(indio_dev); + ret = devm_iio_triggered_buffer_setup(dev, indio_dev, NULL, + &cc10001_adc_trigger_h, NULL); if (ret < 0) - goto err_cleanup_buffer; - - platform_set_drvdata(pdev, indio_dev); - - return 0; - -err_cleanup_buffer: - iio_triggered_buffer_cleanup(indio_dev); -err_disable_clk: - clk_disable_unprepare(adc_dev->adc_clk); -err_disable_reg: - regulator_disable(adc_dev->reg); - return ret; -} - -static int cc10001_adc_remove(struct platform_device *pdev) -{ - struct iio_dev *indio_dev = platform_get_drvdata(pdev); - struct cc10001_adc_device *adc_dev = iio_priv(indio_dev); - - cc10001_adc_power_down(adc_dev); - iio_device_unregister(indio_dev); - iio_triggered_buffer_cleanup(indio_dev); - clk_disable_unprepare(adc_dev->adc_clk); - regulator_disable(adc_dev->reg); + return ret; - return 0; + return devm_iio_device_register(dev, indio_dev); } static const struct of_device_id cc10001_adc_dt_ids[] = { @@ -430,7 +410,6 @@ static struct platform_driver cc10001_adc_driver = { .of_match_table = cc10001_adc_dt_ids, }, .probe = cc10001_adc_probe, - .remove = cc10001_adc_remove, }; module_platform_driver(cc10001_adc_driver); diff --git a/drivers/iio/adc/imx7d_adc.c b/drivers/iio/adc/imx7d_adc.c index 86caff1d006b93e1b4c1c606df46c2d1df9e7597..22da81bac97f16a207bad3cae09868a2e24e953d 100644 --- a/drivers/iio/adc/imx7d_adc.c +++ b/drivers/iio/adc/imx7d_adc.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include @@ -108,7 +109,8 @@ struct imx7d_adc { struct device *dev; void __iomem *regs; struct clk *clk; - + /* lock to protect against multiple access to the device */ + struct mutex lock; u32 vref_uv; u32 value; u32 channel; @@ -293,7 +295,7 @@ static int imx7d_adc_read_raw(struct iio_dev *indio_dev, switch (mask) { case IIO_CHAN_INFO_RAW: - mutex_lock(&indio_dev->mlock); + mutex_lock(&info->lock); reinit_completion(&info->completion); channel = chan->channel & 0x03; @@ -303,16 +305,16 @@ static int imx7d_adc_read_raw(struct iio_dev *indio_dev, ret = wait_for_completion_interruptible_timeout (&info->completion, IMX7D_ADC_TIMEOUT); if (ret == 0) { - mutex_unlock(&indio_dev->mlock); + mutex_unlock(&info->lock); return -ETIMEDOUT; } if (ret < 0) { - mutex_unlock(&indio_dev->mlock); + mutex_unlock(&info->lock); return ret; } *val = info->value; - mutex_unlock(&indio_dev->mlock); + mutex_unlock(&info->lock); return IIO_VAL_INT; case IIO_CHAN_INFO_SCALE: @@ -531,6 +533,8 @@ static int imx7d_adc_probe(struct platform_device *pdev) if (ret) return ret; + mutex_init(&info->lock); + ret = devm_iio_device_register(dev, indio_dev); if (ret) { dev_err(&pdev->dev, "Couldn't register the device.\n"); diff --git a/drivers/iio/adc/ina2xx-adc.c b/drivers/iio/adc/ina2xx-adc.c index 910e7e965fc486415c1f65770bc05e3fa72af678..38d9d7b2313ea1932b9897f75e23192f937eed08 100644 --- a/drivers/iio/adc/ina2xx-adc.c +++ b/drivers/iio/adc/ina2xx-adc.c @@ -946,9 +946,9 @@ static int ina2xx_init(struct ina2xx_chip_info *chip, unsigned int config) return ina2xx_set_calibration(chip); } -static int ina2xx_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int ina2xx_probe(struct i2c_client *client) { + const struct i2c_device_id *id = i2c_client_get_device_id(client); struct ina2xx_chip_info *chip; struct iio_dev *indio_dev; unsigned int val; @@ -1090,7 +1090,7 @@ static struct i2c_driver ina2xx_driver = { .name = KBUILD_MODNAME, .of_match_table = ina2xx_of_match, }, - .probe = ina2xx_probe, + .probe_new = ina2xx_probe, .remove = ina2xx_remove, .id_table = ina2xx_id, }; diff --git a/drivers/iio/adc/lpc32xx_adc.c b/drivers/iio/adc/lpc32xx_adc.c index b56ce15255cfb5c92f3f6e2f34c0f8b2ef41f593..732c924a976dd91f9ef902230a4c0af63bdd07ca 100644 --- a/drivers/iio/adc/lpc32xx_adc.c +++ b/drivers/iio/adc/lpc32xx_adc.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include @@ -49,6 +50,8 @@ struct lpc32xx_adc_state { struct clk *clk; struct completion completion; struct regulator *vref; + /* lock to protect against multiple access to the device */ + struct mutex lock; u32 value; }; @@ -64,10 +67,10 @@ static int lpc32xx_read_raw(struct iio_dev *indio_dev, switch (mask) { case IIO_CHAN_INFO_RAW: - mutex_lock(&indio_dev->mlock); + mutex_lock(&st->lock); ret = clk_prepare_enable(st->clk); if (ret) { - mutex_unlock(&indio_dev->mlock); + mutex_unlock(&st->lock); return ret; } /* Measurement setup */ @@ -80,7 +83,7 @@ static int lpc32xx_read_raw(struct iio_dev *indio_dev, wait_for_completion(&st->completion); /* set by ISR */ clk_disable_unprepare(st->clk); *val = st->value; - mutex_unlock(&indio_dev->mlock); + mutex_unlock(&st->lock); return IIO_VAL_INT; @@ -201,6 +204,8 @@ static int lpc32xx_adc_probe(struct platform_device *pdev) iodev->modes = INDIO_DIRECT_MODE; iodev->num_channels = ARRAY_SIZE(lpc32xx_adc_iio_channels); + mutex_init(&st->lock); + retval = devm_iio_device_register(&pdev->dev, iodev); if (retval) return retval; diff --git a/drivers/iio/adc/ltc2471.c b/drivers/iio/adc/ltc2471.c index 0e0fe881a8e69cff0a665e9f33e28a97d8e3e852..eeb2945829eb27207d436198fa817df761480893 100644 --- a/drivers/iio/adc/ltc2471.c +++ b/drivers/iio/adc/ltc2471.c @@ -99,9 +99,9 @@ static const struct iio_info ltc2471_info = { .read_raw = ltc2471_read_raw, }; -static int ltc2471_i2c_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int ltc2471_i2c_probe(struct i2c_client *client) { + const struct i2c_device_id *id = i2c_client_get_device_id(client); struct iio_dev *indio_dev; struct ltc2471_data *data; int ret; @@ -146,7 +146,7 @@ static struct i2c_driver ltc2471_i2c_driver = { .driver = { .name = "ltc2471", }, - .probe = ltc2471_i2c_probe, + .probe_new = ltc2471_i2c_probe, .id_table = ltc2471_i2c_id, }; diff --git a/drivers/iio/adc/ltc2485.c b/drivers/iio/adc/ltc2485.c index 37c762f8218c98fdbbaa988d50089195467fbb0f..6a23427344ecd176515ccbd65125138271ecc073 100644 --- a/drivers/iio/adc/ltc2485.c +++ b/drivers/iio/adc/ltc2485.c @@ -89,9 +89,9 @@ static const struct iio_info ltc2485_info = { .read_raw = ltc2485_read_raw, }; -static int ltc2485_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int ltc2485_probe(struct i2c_client *client) { + const struct i2c_device_id *id = i2c_client_get_device_id(client); struct iio_dev *indio_dev; struct ltc2485_data *data; int ret; @@ -133,7 +133,7 @@ static struct i2c_driver ltc2485_driver = { .driver = { .name = "ltc2485", }, - .probe = ltc2485_probe, + .probe_new = ltc2485_probe, .id_table = ltc2485_id, }; module_i2c_driver(ltc2485_driver); diff --git a/drivers/iio/adc/ltc2497-core.c b/drivers/iio/adc/ltc2497-core.c index f52d37af4d1f19d61afd85124ecffbb8ca62c60a..996f6cbbed3ce161c7ad6acda6289607197caae8 100644 --- a/drivers/iio/adc/ltc2497-core.c +++ b/drivers/iio/adc/ltc2497-core.c @@ -10,6 +10,7 @@ #include #include #include +#include #include #include "ltc2497.h" @@ -81,9 +82,9 @@ static int ltc2497core_read_raw(struct iio_dev *indio_dev, switch (mask) { case IIO_CHAN_INFO_RAW: - mutex_lock(&indio_dev->mlock); + mutex_lock(&ddata->lock); ret = ltc2497core_read(ddata, chan->address, val); - mutex_unlock(&indio_dev->mlock); + mutex_unlock(&ddata->lock); if (ret < 0) return ret; @@ -214,6 +215,8 @@ int ltc2497core_probe(struct device *dev, struct iio_dev *indio_dev) ddata->addr_prev = LTC2497_CONFIG_DEFAULT; ddata->time_prev = ktime_get(); + mutex_init(&ddata->lock); + ret = iio_device_register(indio_dev); if (ret < 0) goto err_array_unregister; diff --git a/drivers/iio/adc/ltc2497.c b/drivers/iio/adc/ltc2497.c index 556f10dfb5023f7da9f2d75247f820ba1774e6c7..17370c5eb6fe32d7e164358d0d0a6a405b06d26c 100644 --- a/drivers/iio/adc/ltc2497.c +++ b/drivers/iio/adc/ltc2497.c @@ -94,9 +94,9 @@ static int ltc2497_result_and_measure(struct ltc2497core_driverdata *ddata, return ret; } -static int ltc2497_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int ltc2497_probe(struct i2c_client *client) { + const struct i2c_device_id *id = i2c_client_get_device_id(client); const struct ltc2497_chip_info *chip_info; struct iio_dev *indio_dev; struct ltc2497_driverdata *st; @@ -165,7 +165,7 @@ static struct i2c_driver ltc2497_driver = { .name = "ltc2497", .of_match_table = ltc2497_of_match, }, - .probe = ltc2497_probe, + .probe_new = ltc2497_probe, .remove = ltc2497_remove, .id_table = ltc2497_id, }; diff --git a/drivers/iio/adc/ltc2497.h b/drivers/iio/adc/ltc2497.h index e023de0d88c46ac4701927e412d1306c035db8a4..781519b52475623fe3bd6f7e1a4e9275d77fb0e0 100644 --- a/drivers/iio/adc/ltc2497.h +++ b/drivers/iio/adc/ltc2497.h @@ -12,6 +12,8 @@ struct ltc2497_chip_info { struct ltc2497core_driverdata { struct regulator *ref; ktime_t time_prev; + /* lock to protect against multiple access to the device */ + struct mutex lock; const struct ltc2497_chip_info *chip_info; u8 addr_prev; int (*result_and_measure)(struct ltc2497core_driverdata *ddata, diff --git a/drivers/iio/adc/max11410.c b/drivers/iio/adc/max11410.c new file mode 100644 index 0000000000000000000000000000000000000000..fdc9f03135b54e65f26d4135920bbd04d239637a --- /dev/null +++ b/drivers/iio/adc/max11410.c @@ -0,0 +1,1050 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * MAX11410 SPI ADC driver + * + * Copyright 2022 Analog Devices Inc. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#define MAX11410_REG_CONV_START 0x01 +#define MAX11410_CONV_TYPE_SINGLE 0x00 +#define MAX11410_CONV_TYPE_CONTINUOUS 0x01 +#define MAX11410_REG_CAL_START 0x03 +#define MAX11410_CAL_START_SELF 0x00 +#define MAX11410_CAL_START_PGA 0x01 +#define MAX11410_REG_GPIO_CTRL(ch) ((ch) ? 0x05 : 0x04) +#define MAX11410_GPIO_INTRB 0xC1 +#define MAX11410_REG_FILTER 0x08 +#define MAX11410_FILTER_RATE_MASK GENMASK(3, 0) +#define MAX11410_FILTER_RATE_MAX 0x0F +#define MAX11410_FILTER_LINEF_MASK GENMASK(5, 4) +#define MAX11410_FILTER_50HZ BIT(5) +#define MAX11410_FILTER_60HZ BIT(4) +#define MAX11410_REG_CTRL 0x09 +#define MAX11410_CTRL_REFSEL_MASK GENMASK(2, 0) +#define MAX11410_CTRL_VREFN_BUF_BIT BIT(3) +#define MAX11410_CTRL_VREFP_BUF_BIT BIT(4) +#define MAX11410_CTRL_FORMAT_BIT BIT(5) +#define MAX11410_CTRL_UNIPOLAR_BIT BIT(6) +#define MAX11410_REG_MUX_CTRL0 0x0B +#define MAX11410_REG_PGA 0x0E +#define MAX11410_PGA_GAIN_MASK GENMASK(2, 0) +#define MAX11410_PGA_SIG_PATH_MASK GENMASK(5, 4) +#define MAX11410_PGA_SIG_PATH_BUFFERED 0x00 +#define MAX11410_PGA_SIG_PATH_BYPASS 0x01 +#define MAX11410_PGA_SIG_PATH_PGA 0x02 +#define MAX11410_REG_DATA0 0x30 +#define MAX11410_REG_STATUS 0x38 +#define MAX11410_STATUS_CONV_READY_BIT BIT(0) +#define MAX11410_STATUS_CAL_READY_BIT BIT(2) + +#define MAX11410_REFSEL_AVDD_AGND 0x03 +#define MAX11410_REFSEL_MAX 0x06 +#define MAX11410_SIG_PATH_MAX 0x02 +#define MAX11410_CHANNEL_INDEX_MAX 0x0A +#define MAX11410_AINP_AVDD 0x0A +#define MAX11410_AINN_GND 0x0A + +#define MAX11410_CONVERSION_TIMEOUT_MS 2000 +#define MAX11410_CALIB_TIMEOUT_MS 2000 + +#define MAX11410_SCALE_AVAIL_SIZE 8 + +enum max11410_filter { + MAX11410_FILTER_FIR5060, + MAX11410_FILTER_FIR50, + MAX11410_FILTER_FIR60, + MAX11410_FILTER_SINC4, +}; + +static const u8 max11410_sampling_len[] = { + [MAX11410_FILTER_FIR5060] = 5, + [MAX11410_FILTER_FIR50] = 6, + [MAX11410_FILTER_FIR60] = 6, + [MAX11410_FILTER_SINC4] = 10, +}; + +static const int max11410_sampling_rates[4][10][2] = { + [MAX11410_FILTER_FIR5060] = { + { 1, 100000 }, + { 2, 100000 }, + { 4, 200000 }, + { 8, 400000 }, + { 16, 800000 } + }, + [MAX11410_FILTER_FIR50] = { + { 1, 300000 }, + { 2, 700000 }, + { 5, 300000 }, + { 10, 700000 }, + { 21, 300000 }, + { 40 } + }, + [MAX11410_FILTER_FIR60] = { + { 1, 300000 }, + { 2, 700000 }, + { 5, 300000 }, + { 10, 700000 }, + { 21, 300000 }, + { 40 } + }, + [MAX11410_FILTER_SINC4] = { + { 4 }, + { 10 }, + { 20 }, + { 40 }, + { 60 }, + { 120 }, + { 240 }, + { 480 }, + { 960 }, + { 1920 } + } +}; + +struct max11410_channel_config { + u32 settling_time_us; + u32 *scale_avail; + u8 refsel; + u8 sig_path; + u8 gain; + bool bipolar; + bool buffered_vrefp; + bool buffered_vrefn; +}; + +struct max11410_state { + struct spi_device *spi_dev; + struct iio_trigger *trig; + struct completion completion; + struct mutex lock; /* Prevent changing channel config during sampling */ + struct regmap *regmap; + struct regulator *avdd; + struct regulator *vrefp[3]; + struct regulator *vrefn[3]; + struct max11410_channel_config *channels; + int irq; + struct { + u32 data __aligned(IIO_DMA_MINALIGN); + s64 ts __aligned(8); + } scan; +}; + +static const struct iio_chan_spec chanspec_template = { + .type = IIO_VOLTAGE, + .indexed = 1, + .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | + BIT(IIO_CHAN_INFO_SCALE) | + BIT(IIO_CHAN_INFO_OFFSET), + .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ), + .info_mask_shared_by_all_available = BIT(IIO_CHAN_INFO_SAMP_FREQ), + .scan_type = { + .sign = 's', + .realbits = 24, + .storagebits = 32, + .endianness = IIO_LE, + }, +}; + +static unsigned int max11410_reg_size(unsigned int reg) +{ + /* Registers from 0x00 to 0x10 are 1 byte, the rest are 3 bytes long. */ + return reg <= 0x10 ? 1 : 3; +} + +static int max11410_write_reg(struct max11410_state *st, unsigned int reg, + unsigned int val) +{ + /* This driver only needs to write 8-bit registers */ + if (max11410_reg_size(reg) != 1) + return -EINVAL; + + return regmap_write(st->regmap, reg, val); +} + +static int max11410_read_reg(struct max11410_state *st, unsigned int reg, + int *val) +{ + int ret; + + if (max11410_reg_size(reg) == 3) { + ret = regmap_bulk_read(st->regmap, reg, &st->scan.data, 3); + if (ret) + return ret; + + *val = get_unaligned_be24(&st->scan.data); + return 0; + } + + return regmap_read(st->regmap, reg, val); +} + +static struct regulator *max11410_get_vrefp(struct max11410_state *st, + u8 refsel) +{ + refsel = refsel % 4; + if (refsel == 3) + return st->avdd; + + return st->vrefp[refsel]; +} + +static struct regulator *max11410_get_vrefn(struct max11410_state *st, + u8 refsel) +{ + if (refsel > 2) + return NULL; + + return st->vrefn[refsel]; +} + +static const struct regmap_config regmap_config = { + .reg_bits = 8, + .val_bits = 8, + .max_register = 0x39, +}; + +static ssize_t max11410_notch_en_show(struct device *dev, + struct device_attribute *devattr, + char *buf) +{ + struct iio_dev *indio_dev = dev_get_drvdata(dev); + struct max11410_state *state = iio_priv(indio_dev); + struct iio_dev_attr *iio_attr = to_iio_dev_attr(devattr); + unsigned int val; + int ret; + + ret = max11410_read_reg(state, MAX11410_REG_FILTER, &val); + if (ret) + return ret; + + switch (iio_attr->address) { + case 0: + val = !FIELD_GET(MAX11410_FILTER_50HZ, val); + break; + case 1: + val = !FIELD_GET(MAX11410_FILTER_60HZ, val); + break; + case 2: + val = FIELD_GET(MAX11410_FILTER_LINEF_MASK, val) == 3; + break; + default: + return -EINVAL; + } + + return sysfs_emit(buf, "%d\n", val); +} + +static ssize_t max11410_notch_en_store(struct device *dev, + struct device_attribute *devattr, + const char *buf, size_t count) +{ + struct iio_dev_attr *iio_attr = to_iio_dev_attr(devattr); + struct iio_dev *indio_dev = dev_get_drvdata(dev); + struct max11410_state *state = iio_priv(indio_dev); + unsigned int filter_bits; + bool enable; + int ret; + + ret = kstrtobool(buf, &enable); + if (ret) + return ret; + + switch (iio_attr->address) { + case 0: + filter_bits = MAX11410_FILTER_50HZ; + break; + case 1: + filter_bits = MAX11410_FILTER_60HZ; + break; + case 2: + default: + filter_bits = MAX11410_FILTER_50HZ | MAX11410_FILTER_60HZ; + enable = !enable; + break; + } + + if (enable) + ret = regmap_clear_bits(state->regmap, MAX11410_REG_FILTER, + filter_bits); + else + ret = regmap_set_bits(state->regmap, MAX11410_REG_FILTER, + filter_bits); + + if (ret) + return ret; + + return count; +} + +static ssize_t in_voltage_filter2_notch_center_show(struct device *dev, + struct device_attribute *devattr, + char *buf) +{ + struct iio_dev *indio_dev = dev_get_drvdata(dev); + struct max11410_state *state = iio_priv(indio_dev); + int ret, reg, rate, filter; + + ret = regmap_read(state->regmap, MAX11410_REG_FILTER, ®); + if (ret) + return ret; + + rate = FIELD_GET(MAX11410_FILTER_RATE_MASK, reg); + rate = clamp_val(rate, 0, + max11410_sampling_len[MAX11410_FILTER_SINC4] - 1); + filter = max11410_sampling_rates[MAX11410_FILTER_SINC4][rate][0]; + + return sysfs_emit(buf, "%d\n", filter); +} + +static IIO_CONST_ATTR(in_voltage_filter0_notch_center, "50"); +static IIO_CONST_ATTR(in_voltage_filter1_notch_center, "60"); +static IIO_DEVICE_ATTR_RO(in_voltage_filter2_notch_center, 2); + +static IIO_DEVICE_ATTR(in_voltage_filter0_notch_en, 0644, + max11410_notch_en_show, max11410_notch_en_store, 0); +static IIO_DEVICE_ATTR(in_voltage_filter1_notch_en, 0644, + max11410_notch_en_show, max11410_notch_en_store, 1); +static IIO_DEVICE_ATTR(in_voltage_filter2_notch_en, 0644, + max11410_notch_en_show, max11410_notch_en_store, 2); + +static struct attribute *max11410_attributes[] = { + &iio_const_attr_in_voltage_filter0_notch_center.dev_attr.attr, + &iio_const_attr_in_voltage_filter1_notch_center.dev_attr.attr, + &iio_dev_attr_in_voltage_filter2_notch_center.dev_attr.attr, + &iio_dev_attr_in_voltage_filter0_notch_en.dev_attr.attr, + &iio_dev_attr_in_voltage_filter1_notch_en.dev_attr.attr, + &iio_dev_attr_in_voltage_filter2_notch_en.dev_attr.attr, + NULL +}; + +static const struct attribute_group max11410_attribute_group = { + .attrs = max11410_attributes, +}; + +static int max11410_set_input_mux(struct max11410_state *st, u8 ainp, u8 ainn) +{ + if (ainp > MAX11410_CHANNEL_INDEX_MAX || + ainn > MAX11410_CHANNEL_INDEX_MAX) + return -EINVAL; + + return max11410_write_reg(st, MAX11410_REG_MUX_CTRL0, + (ainp << 4) | ainn); +} + +static int max11410_configure_channel(struct max11410_state *st, + struct iio_chan_spec const *chan) +{ + struct max11410_channel_config cfg = st->channels[chan->address]; + unsigned int regval; + int ret; + + if (chan->differential) + ret = max11410_set_input_mux(st, chan->channel, chan->channel2); + else + ret = max11410_set_input_mux(st, chan->channel, + MAX11410_AINN_GND); + + if (ret) + return ret; + + regval = FIELD_PREP(MAX11410_CTRL_VREFP_BUF_BIT, cfg.buffered_vrefp) | + FIELD_PREP(MAX11410_CTRL_VREFN_BUF_BIT, cfg.buffered_vrefn) | + FIELD_PREP(MAX11410_CTRL_REFSEL_MASK, cfg.refsel) | + FIELD_PREP(MAX11410_CTRL_UNIPOLAR_BIT, cfg.bipolar ? 0 : 1); + ret = regmap_update_bits(st->regmap, MAX11410_REG_CTRL, + MAX11410_CTRL_REFSEL_MASK | + MAX11410_CTRL_VREFP_BUF_BIT | + MAX11410_CTRL_VREFN_BUF_BIT | + MAX11410_CTRL_UNIPOLAR_BIT, regval); + if (ret) + return ret; + + regval = FIELD_PREP(MAX11410_PGA_SIG_PATH_MASK, cfg.sig_path) | + FIELD_PREP(MAX11410_PGA_GAIN_MASK, cfg.gain); + ret = regmap_write(st->regmap, MAX11410_REG_PGA, regval); + if (ret) + return ret; + + if (cfg.settling_time_us) + fsleep(cfg.settling_time_us); + + return 0; +} + +static int max11410_sample(struct max11410_state *st, int *sample_raw, + struct iio_chan_spec const *chan) +{ + int val, ret; + + ret = max11410_configure_channel(st, chan); + if (ret) + return ret; + + if (st->irq > 0) + reinit_completion(&st->completion); + + /* Start Conversion */ + ret = max11410_write_reg(st, MAX11410_REG_CONV_START, + MAX11410_CONV_TYPE_SINGLE); + if (ret) + return ret; + + if (st->irq > 0) { + /* Wait for an interrupt. */ + ret = wait_for_completion_timeout(&st->completion, + msecs_to_jiffies(MAX11410_CONVERSION_TIMEOUT_MS)); + if (!ret) + return -ETIMEDOUT; + } else { + /* Wait for status register Conversion Ready flag */ + ret = read_poll_timeout(max11410_read_reg, ret, + ret || (val & MAX11410_STATUS_CONV_READY_BIT), + 5000, MAX11410_CONVERSION_TIMEOUT_MS * 1000, + true, st, MAX11410_REG_STATUS, &val); + if (ret) + return ret; + } + + /* Read ADC Data */ + return max11410_read_reg(st, MAX11410_REG_DATA0, sample_raw); +} + +static int max11410_get_scale(struct max11410_state *state, + struct max11410_channel_config cfg) +{ + struct regulator *vrefp, *vrefn; + int scale; + + vrefp = max11410_get_vrefp(state, cfg.refsel); + + scale = regulator_get_voltage(vrefp) / 1000; + vrefn = max11410_get_vrefn(state, cfg.refsel); + if (vrefn) + scale -= regulator_get_voltage(vrefn) / 1000; + + if (cfg.bipolar) + scale *= 2; + + return scale >> cfg.gain; +} + +static int max11410_read_raw(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, + int *val, int *val2, long info) +{ + struct max11410_state *state = iio_priv(indio_dev); + struct max11410_channel_config cfg = state->channels[chan->address]; + int ret, reg_val, filter, rate; + + switch (info) { + case IIO_CHAN_INFO_SCALE: + *val = max11410_get_scale(state, cfg); + *val2 = chan->scan_type.realbits; + return IIO_VAL_FRACTIONAL_LOG2; + case IIO_CHAN_INFO_OFFSET: + if (cfg.bipolar) + *val = -BIT(chan->scan_type.realbits - 1); + else + *val = 0; + + return IIO_VAL_INT; + case IIO_CHAN_INFO_RAW: + ret = iio_device_claim_direct_mode(indio_dev); + if (ret) + return ret; + + mutex_lock(&state->lock); + + ret = max11410_sample(state, ®_val, chan); + + mutex_unlock(&state->lock); + + iio_device_release_direct_mode(indio_dev); + + if (ret) + return ret; + + *val = reg_val; + + return IIO_VAL_INT; + case IIO_CHAN_INFO_SAMP_FREQ: + ret = regmap_read(state->regmap, MAX11410_REG_FILTER, ®_val); + if (ret) + return ret; + + filter = FIELD_GET(MAX11410_FILTER_LINEF_MASK, reg_val); + rate = reg_val & MAX11410_FILTER_RATE_MASK; + if (rate >= max11410_sampling_len[filter]) + rate = max11410_sampling_len[filter] - 1; + + *val = max11410_sampling_rates[filter][rate][0]; + *val2 = max11410_sampling_rates[filter][rate][1]; + + return IIO_VAL_INT_PLUS_MICRO; + } + return -EINVAL; +} + +static int max11410_write_raw(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, + int val, int val2, long mask) +{ + struct max11410_state *st = iio_priv(indio_dev); + int i, ret, reg_val, filter, gain; + u32 *scale_avail; + + switch (mask) { + case IIO_CHAN_INFO_SCALE: + scale_avail = st->channels[chan->address].scale_avail; + if (!scale_avail) + return -EOPNOTSUPP; + + /* Accept values in range 0.000001 <= scale < 1.000000 */ + if (val != 0 || val2 == 0) + return -EINVAL; + + ret = iio_device_claim_direct_mode(indio_dev); + if (ret) + return ret; + + /* Convert from INT_PLUS_MICRO to FRACTIONAL_LOG2 */ + val2 = val2 * DIV_ROUND_CLOSEST(BIT(24), 1000000); + val2 = DIV_ROUND_CLOSEST(scale_avail[0], val2); + gain = order_base_2(val2); + + st->channels[chan->address].gain = clamp_val(gain, 0, 7); + + iio_device_release_direct_mode(indio_dev); + + return 0; + case IIO_CHAN_INFO_SAMP_FREQ: + ret = iio_device_claim_direct_mode(indio_dev); + if (ret) + return ret; + + mutex_lock(&st->lock); + + ret = regmap_read(st->regmap, MAX11410_REG_FILTER, ®_val); + if (ret) + goto out; + + filter = FIELD_GET(MAX11410_FILTER_LINEF_MASK, reg_val); + + for (i = 0; i < max11410_sampling_len[filter]; ++i) { + if (val == max11410_sampling_rates[filter][i][0] && + val2 == max11410_sampling_rates[filter][i][1]) + break; + } + if (i == max11410_sampling_len[filter]) { + ret = -EINVAL; + goto out; + } + + ret = regmap_write_bits(st->regmap, MAX11410_REG_FILTER, + MAX11410_FILTER_RATE_MASK, i); + +out: + mutex_unlock(&st->lock); + iio_device_release_direct_mode(indio_dev); + + return ret; + default: + return -EINVAL; + } +} + +static int max11410_read_avail(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, + const int **vals, int *type, int *length, + long info) +{ + struct max11410_state *st = iio_priv(indio_dev); + struct max11410_channel_config cfg; + int ret, reg_val, filter; + + switch (info) { + case IIO_CHAN_INFO_SAMP_FREQ: + ret = regmap_read(st->regmap, MAX11410_REG_FILTER, ®_val); + if (ret) + return ret; + + filter = FIELD_GET(MAX11410_FILTER_LINEF_MASK, reg_val); + + *vals = (const int *)max11410_sampling_rates[filter]; + *length = max11410_sampling_len[filter] * 2; + *type = IIO_VAL_INT_PLUS_MICRO; + + return IIO_AVAIL_LIST; + case IIO_CHAN_INFO_SCALE: + cfg = st->channels[chan->address]; + + if (!cfg.scale_avail) + return -EINVAL; + + *vals = cfg.scale_avail; + *length = MAX11410_SCALE_AVAIL_SIZE * 2; + *type = IIO_VAL_FRACTIONAL_LOG2; + + return IIO_AVAIL_LIST; + } + return -EINVAL; +} + +static const struct iio_info max11410_info = { + .read_raw = max11410_read_raw, + .write_raw = max11410_write_raw, + .read_avail = max11410_read_avail, + .attrs = &max11410_attribute_group, +}; + +static irqreturn_t max11410_trigger_handler(int irq, void *p) +{ + struct iio_poll_func *pf = p; + struct iio_dev *indio_dev = pf->indio_dev; + struct max11410_state *st = iio_priv(indio_dev); + int ret; + + ret = max11410_read_reg(st, MAX11410_REG_DATA0, &st->scan.data); + if (ret) { + dev_err(&indio_dev->dev, "cannot read data\n"); + goto out; + } + + iio_push_to_buffers_with_timestamp(indio_dev, &st->scan, + iio_get_time_ns(indio_dev)); + +out: + iio_trigger_notify_done(indio_dev->trig); + + return IRQ_HANDLED; +} + +static int max11410_buffer_postenable(struct iio_dev *indio_dev) +{ + struct max11410_state *st = iio_priv(indio_dev); + int scan_ch, ret; + + scan_ch = ffs(*indio_dev->active_scan_mask) - 1; + + ret = max11410_configure_channel(st, &indio_dev->channels[scan_ch]); + if (ret) + return ret; + + /* Start continuous conversion. */ + return max11410_write_reg(st, MAX11410_REG_CONV_START, + MAX11410_CONV_TYPE_CONTINUOUS); +} + +static int max11410_buffer_predisable(struct iio_dev *indio_dev) +{ + struct max11410_state *st = iio_priv(indio_dev); + + /* Stop continuous conversion. */ + return max11410_write_reg(st, MAX11410_REG_CONV_START, + MAX11410_CONV_TYPE_SINGLE); +} + +static const struct iio_buffer_setup_ops max11410_buffer_ops = { + .postenable = &max11410_buffer_postenable, + .predisable = &max11410_buffer_predisable, + .validate_scan_mask = &iio_validate_scan_mask_onehot, +}; + +static const struct iio_trigger_ops max11410_trigger_ops = { + .validate_device = iio_trigger_validate_own_device, +}; + +static irqreturn_t max11410_interrupt(int irq, void *dev_id) +{ + struct iio_dev *indio_dev = dev_id; + struct max11410_state *st = iio_priv(indio_dev); + + if (iio_buffer_enabled(indio_dev)) + iio_trigger_poll_chained(st->trig); + else + complete(&st->completion); + + return IRQ_HANDLED; +}; + +static int max11410_parse_channels(struct max11410_state *st, + struct iio_dev *indio_dev) +{ + struct iio_chan_spec chanspec = chanspec_template; + struct device *dev = &st->spi_dev->dev; + struct max11410_channel_config *cfg; + struct iio_chan_spec *channels; + struct fwnode_handle *child; + u32 reference, sig_path; + const char *node_name; + u32 inputs[2], scale; + unsigned int num_ch; + int chan_idx = 0; + int ret, i; + + num_ch = device_get_child_node_count(dev); + if (num_ch == 0) + return dev_err_probe(&indio_dev->dev, -ENODEV, + "FW has no channels defined\n"); + + /* Reserve space for soft timestamp channel */ + num_ch++; + channels = devm_kcalloc(dev, num_ch, sizeof(*channels), GFP_KERNEL); + if (!channels) + return -ENOMEM; + + st->channels = devm_kcalloc(dev, num_ch, sizeof(*st->channels), + GFP_KERNEL); + if (!st->channels) + return -ENOMEM; + + device_for_each_child_node(dev, child) { + node_name = fwnode_get_name(child); + if (fwnode_property_present(child, "diff-channels")) { + ret = fwnode_property_read_u32_array(child, + "diff-channels", + inputs, + ARRAY_SIZE(inputs)); + + chanspec.differential = 1; + } else { + ret = fwnode_property_read_u32(child, "reg", &inputs[0]); + + inputs[1] = 0; + chanspec.differential = 0; + } + if (ret) { + fwnode_handle_put(child); + return ret; + } + + if (inputs[0] > MAX11410_CHANNEL_INDEX_MAX || + inputs[1] > MAX11410_CHANNEL_INDEX_MAX) { + fwnode_handle_put(child); + return dev_err_probe(&indio_dev->dev, -EINVAL, + "Invalid channel index for %s, should be less than %d\n", + node_name, + MAX11410_CHANNEL_INDEX_MAX + 1); + } + + cfg = &st->channels[chan_idx]; + + reference = MAX11410_REFSEL_AVDD_AGND; + fwnode_property_read_u32(child, "adi,reference", &reference); + if (reference > MAX11410_REFSEL_MAX) { + fwnode_handle_put(child); + return dev_err_probe(&indio_dev->dev, -EINVAL, + "Invalid adi,reference value for %s, should be less than %d.\n", + node_name, MAX11410_REFSEL_MAX + 1); + } + + if (!max11410_get_vrefp(st, reference) || + (!max11410_get_vrefn(st, reference) && reference <= 2)) { + fwnode_handle_put(child); + return dev_err_probe(&indio_dev->dev, -EINVAL, + "Invalid VREF configuration for %s, either specify corresponding VREF regulators or change adi,reference property.\n", + node_name); + } + + sig_path = MAX11410_PGA_SIG_PATH_BUFFERED; + fwnode_property_read_u32(child, "adi,input-mode", &sig_path); + if (sig_path > MAX11410_SIG_PATH_MAX) { + fwnode_handle_put(child); + return dev_err_probe(&indio_dev->dev, -EINVAL, + "Invalid adi,input-mode value for %s, should be less than %d.\n", + node_name, MAX11410_SIG_PATH_MAX + 1); + } + + fwnode_property_read_u32(child, "settling-time-us", + &cfg->settling_time_us); + cfg->bipolar = fwnode_property_read_bool(child, "bipolar"); + cfg->buffered_vrefp = fwnode_property_read_bool(child, "adi,buffered-vrefp"); + cfg->buffered_vrefn = fwnode_property_read_bool(child, "adi,buffered-vrefn"); + cfg->refsel = reference; + cfg->sig_path = sig_path; + cfg->gain = 0; + + /* Enable scale_available property if input mode is PGA */ + if (sig_path == MAX11410_PGA_SIG_PATH_PGA) { + __set_bit(IIO_CHAN_INFO_SCALE, + &chanspec.info_mask_separate_available); + cfg->scale_avail = devm_kcalloc(dev, MAX11410_SCALE_AVAIL_SIZE * 2, + sizeof(*cfg->scale_avail), + GFP_KERNEL); + if (!cfg->scale_avail) { + fwnode_handle_put(child); + return -ENOMEM; + } + + scale = max11410_get_scale(st, *cfg); + for (i = 0; i < MAX11410_SCALE_AVAIL_SIZE; i++) { + cfg->scale_avail[2 * i] = scale >> i; + cfg->scale_avail[2 * i + 1] = chanspec.scan_type.realbits; + } + } else { + __clear_bit(IIO_CHAN_INFO_SCALE, + &chanspec.info_mask_separate_available); + } + + chanspec.address = chan_idx; + chanspec.scan_index = chan_idx; + chanspec.channel = inputs[0]; + chanspec.channel2 = inputs[1]; + + channels[chan_idx] = chanspec; + chan_idx++; + } + + channels[chan_idx] = (struct iio_chan_spec)IIO_CHAN_SOFT_TIMESTAMP(chan_idx); + + indio_dev->num_channels = chan_idx + 1; + indio_dev->channels = channels; + + return 0; +} + +static void max11410_disable_reg(void *reg) +{ + regulator_disable(reg); +} + +static int max11410_init_vref(struct device *dev, + struct regulator **vref, + const char *id) +{ + struct regulator *reg; + int ret; + + reg = devm_regulator_get_optional(dev, id); + if (PTR_ERR(reg) == -ENODEV) { + *vref = NULL; + return 0; + } else if (IS_ERR(reg)) { + return PTR_ERR(reg); + } + ret = regulator_enable(reg); + if (ret) + return dev_err_probe(dev, ret, + "Failed to enable regulator %s\n", id); + + *vref = reg; + return devm_add_action_or_reset(dev, max11410_disable_reg, reg); +} + +static int max11410_calibrate(struct max11410_state *st, u32 cal_type) +{ + int ret, val; + + ret = max11410_write_reg(st, MAX11410_REG_CAL_START, cal_type); + if (ret) + return ret; + + /* Wait for status register Calibration Ready flag */ + return read_poll_timeout(max11410_read_reg, ret, + ret || (val & MAX11410_STATUS_CAL_READY_BIT), + 50000, MAX11410_CALIB_TIMEOUT_MS * 1000, true, + st, MAX11410_REG_STATUS, &val); +} + +static int max11410_self_calibrate(struct max11410_state *st) +{ + int ret, i; + + ret = regmap_write_bits(st->regmap, MAX11410_REG_FILTER, + MAX11410_FILTER_RATE_MASK, + FIELD_PREP(MAX11410_FILTER_RATE_MASK, + MAX11410_FILTER_RATE_MAX)); + if (ret) + return ret; + + ret = max11410_calibrate(st, MAX11410_CAL_START_SELF); + if (ret) + return ret; + + ret = regmap_write_bits(st->regmap, MAX11410_REG_PGA, + MAX11410_PGA_SIG_PATH_MASK, + FIELD_PREP(MAX11410_PGA_SIG_PATH_MASK, + MAX11410_PGA_SIG_PATH_PGA)); + if (ret) + return ret; + + /* PGA calibrations */ + for (i = 1; i < 8; ++i) { + ret = regmap_write_bits(st->regmap, MAX11410_REG_PGA, + MAX11410_PGA_GAIN_MASK, i); + if (ret) + return ret; + + ret = max11410_calibrate(st, MAX11410_CAL_START_PGA); + if (ret) + return ret; + } + + /* Cleanup */ + ret = regmap_write_bits(st->regmap, MAX11410_REG_PGA, + MAX11410_PGA_GAIN_MASK, 0); + if (ret) + return ret; + + ret = regmap_write_bits(st->regmap, MAX11410_REG_FILTER, + MAX11410_FILTER_RATE_MASK, 0); + if (ret) + return ret; + + return regmap_write_bits(st->regmap, MAX11410_REG_PGA, + MAX11410_PGA_SIG_PATH_MASK, + FIELD_PREP(MAX11410_PGA_SIG_PATH_MASK, + MAX11410_PGA_SIG_PATH_BUFFERED)); +} + +static int max11410_probe(struct spi_device *spi) +{ + const char *vrefp_regs[] = { "vref0p", "vref1p", "vref2p" }; + const char *vrefn_regs[] = { "vref0n", "vref1n", "vref2n" }; + struct device *dev = &spi->dev; + struct max11410_state *st; + struct iio_dev *indio_dev; + int ret, irqs[2]; + int i; + + indio_dev = devm_iio_device_alloc(dev, sizeof(*st)); + if (!indio_dev) + return -ENOMEM; + + st = iio_priv(indio_dev); + st->spi_dev = spi; + init_completion(&st->completion); + mutex_init(&st->lock); + + indio_dev->name = "max11410"; + indio_dev->modes = INDIO_DIRECT_MODE; + indio_dev->info = &max11410_info; + + st->regmap = devm_regmap_init_spi(spi, ®map_config); + if (IS_ERR(st->regmap)) + return dev_err_probe(dev, PTR_ERR(st->regmap), + "regmap initialization failed\n"); + + ret = max11410_init_vref(dev, &st->avdd, "avdd"); + if (ret) + return ret; + + for (i = 0; i < ARRAY_SIZE(vrefp_regs); i++) { + ret = max11410_init_vref(dev, &st->vrefp[i], vrefp_regs[i]); + if (ret) + return ret; + + ret = max11410_init_vref(dev, &st->vrefn[i], vrefn_regs[i]); + if (ret) + return ret; + } + + /* + * Regulators must be configured before parsing channels for + * validating "adi,reference" property of each channel. + */ + ret = max11410_parse_channels(st, indio_dev); + if (ret) + return ret; + + irqs[0] = fwnode_irq_get_byname(dev_fwnode(dev), "gpio0"); + irqs[1] = fwnode_irq_get_byname(dev_fwnode(dev), "gpio1"); + + if (irqs[0] > 0) { + st->irq = irqs[0]; + ret = regmap_write(st->regmap, MAX11410_REG_GPIO_CTRL(0), + MAX11410_GPIO_INTRB); + } else if (irqs[1] > 0) { + st->irq = irqs[1]; + ret = regmap_write(st->regmap, MAX11410_REG_GPIO_CTRL(1), + MAX11410_GPIO_INTRB); + } else if (spi->irq > 0) { + return dev_err_probe(dev, -ENODEV, + "no interrupt name specified"); + } + + if (ret) + return ret; + + ret = regmap_set_bits(st->regmap, MAX11410_REG_CTRL, + MAX11410_CTRL_FORMAT_BIT); + if (ret) + return ret; + + ret = devm_iio_triggered_buffer_setup(dev, indio_dev, NULL, + &max11410_trigger_handler, + &max11410_buffer_ops); + if (ret) + return ret; + + if (st->irq > 0) { + st->trig = devm_iio_trigger_alloc(dev, "%s-dev%d", + indio_dev->name, + iio_device_id(indio_dev)); + if (!st->trig) + return -ENOMEM; + + st->trig->ops = &max11410_trigger_ops; + ret = devm_iio_trigger_register(dev, st->trig); + if (ret) + return ret; + + ret = devm_request_threaded_irq(dev, st->irq, NULL, + &max11410_interrupt, + IRQF_ONESHOT, "max11410", + indio_dev); + if (ret) + return ret; + } + + ret = max11410_self_calibrate(st); + if (ret) + return dev_err_probe(dev, ret, + "cannot perform device self calibration\n"); + + return devm_iio_device_register(dev, indio_dev); +} + +static const struct of_device_id max11410_spi_of_id[] = { + { .compatible = "adi,max11410" }, + { } +}; +MODULE_DEVICE_TABLE(of, max11410_spi_of_id); + +static const struct spi_device_id max11410_id[] = { + { "max11410" }, + { } +}; +MODULE_DEVICE_TABLE(spi, max11410_id); + +static struct spi_driver max11410_driver = { + .driver = { + .name = "max11410", + .of_match_table = max11410_spi_of_id, + }, + .probe = max11410_probe, + .id_table = max11410_id, +}; +module_spi_driver(max11410_driver); + +MODULE_AUTHOR("David Jung "); +MODULE_AUTHOR("Ibrahim Tilki "); +MODULE_DESCRIPTION("Analog Devices MAX11410 ADC"); +MODULE_LICENSE("GPL"); diff --git a/drivers/iio/adc/max1241.c b/drivers/iio/adc/max1241.c index a815ad1f6913bab3c0077a3539b2bc24eb6c0d93..500bb09ab19bc98004fedb4164f8c636ee0483ed 100644 --- a/drivers/iio/adc/max1241.c +++ b/drivers/iio/adc/max1241.c @@ -22,7 +22,6 @@ enum max1241_id { struct max1241 { struct spi_device *spi; struct mutex lock; - struct regulator *vdd; struct regulator *vref; struct gpio_desc *shutdown; @@ -110,17 +109,6 @@ static const struct iio_info max1241_info = { .read_raw = max1241_read_raw, }; -static void max1241_disable_vdd_action(void *data) -{ - struct max1241 *adc = data; - struct device *dev = &adc->spi->dev; - int err; - - err = regulator_disable(adc->vdd); - if (err) - dev_err(dev, "could not disable vdd regulator.\n"); -} - static void max1241_disable_vref_action(void *data) { struct max1241 *adc = data; @@ -147,20 +135,10 @@ static int max1241_probe(struct spi_device *spi) adc->spi = spi; mutex_init(&adc->lock); - adc->vdd = devm_regulator_get(dev, "vdd"); - if (IS_ERR(adc->vdd)) - return dev_err_probe(dev, PTR_ERR(adc->vdd), - "failed to get vdd regulator\n"); - - ret = regulator_enable(adc->vdd); + ret = devm_regulator_get_enable(dev, "vdd"); if (ret) - return ret; - - ret = devm_add_action_or_reset(dev, max1241_disable_vdd_action, adc); - if (ret) { - dev_err(dev, "could not set up vdd regulator cleanup action\n"); - return ret; - } + return dev_err_probe(dev, ret, + "failed to get/enable vdd regulator\n"); adc->vref = devm_regulator_get(dev, "vref"); if (IS_ERR(adc->vref)) diff --git a/drivers/iio/adc/max1363.c b/drivers/iio/adc/max1363.c index a28cf86cdce804672572da2e679914d98f204918..73b783b430d72def9217f61b1cad9d40128b917f 100644 --- a/drivers/iio/adc/max1363.c +++ b/drivers/iio/adc/max1363.c @@ -148,7 +148,6 @@ struct max1363_chip_info { * @chip_info: chip model specific constants, available modes, etc. * @current_mode: the scan mode of this chip * @requestedmask: a valid requested set of channels - * @reg: supply regulator * @lock: lock to ensure state is consistent * @monitor_on: whether monitor mode is enabled * @monitor_speed: parameter corresponding to device monitor speed setting @@ -168,7 +167,6 @@ struct max1363_state { const struct max1363_chip_info *chip_info; const struct max1363_mode *current_mode; u32 requestedmask; - struct regulator *reg; struct mutex lock; /* Using monitor modes and buffer at the same time is @@ -1581,9 +1579,9 @@ static void max1363_reg_disable(void *reg) regulator_disable(reg); } -static int max1363_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int max1363_probe(struct i2c_client *client) { + const struct i2c_device_id *id = i2c_client_get_device_id(client); int ret; struct max1363_state *st; struct iio_dev *indio_dev; @@ -1597,15 +1595,7 @@ static int max1363_probe(struct i2c_client *client, st = iio_priv(indio_dev); mutex_init(&st->lock); - st->reg = devm_regulator_get(&client->dev, "vcc"); - if (IS_ERR(st->reg)) - return PTR_ERR(st->reg); - - ret = regulator_enable(st->reg); - if (ret) - return ret; - - ret = devm_add_action_or_reset(&client->dev, max1363_reg_disable, st->reg); + ret = devm_regulator_get_enable(&client->dev, "vcc"); if (ret) return ret; @@ -1728,7 +1718,7 @@ static struct i2c_driver max1363_driver = { .name = "max1363", .of_match_table = max1363_of_match, }, - .probe = max1363_probe, + .probe_new = max1363_probe, .id_table = max1363_id, }; module_i2c_driver(max1363_driver); diff --git a/drivers/iio/adc/max9611.c b/drivers/iio/adc/max9611.c index f982f00303dc0fcae333396207a239a4d6cf66c2..cb7f4785423a2f2da8d6778b1c36fb7a7496bee1 100644 --- a/drivers/iio/adc/max9611.c +++ b/drivers/iio/adc/max9611.c @@ -510,8 +510,7 @@ static const struct of_device_id max9611_of_table[] = { }; MODULE_DEVICE_TABLE(of, max9611_of_table); -static int max9611_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int max9611_probe(struct i2c_client *client) { const char * const shunt_res_prop = "shunt-resistor-micro-ohms"; struct max9611_dev *max9611; @@ -557,7 +556,7 @@ static struct i2c_driver max9611_driver = { .name = DRIVER_NAME, .of_match_table = max9611_of_table, }, - .probe = max9611_probe, + .probe_new = max9611_probe, }; module_i2c_driver(max9611_driver); diff --git a/drivers/iio/adc/mcp3422.c b/drivers/iio/adc/mcp3422.c index da353dcb1e9d4988aeec45731f5a6b26acbf8d82..ada844c3f7eccd2c243e86c223f1fc0f65239e08 100644 --- a/drivers/iio/adc/mcp3422.c +++ b/drivers/iio/adc/mcp3422.c @@ -330,9 +330,9 @@ static const struct iio_info mcp3422_info = { .attrs = &mcp3422_attribute_group, }; -static int mcp3422_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int mcp3422_probe(struct i2c_client *client) { + const struct i2c_device_id *id = i2c_client_get_device_id(client); struct iio_dev *indio_dev; struct mcp3422 *adc; int err; @@ -417,7 +417,7 @@ static struct i2c_driver mcp3422_driver = { .name = "mcp3422", .of_match_table = mcp3422_of_match, }, - .probe = mcp3422_probe, + .probe_new = mcp3422_probe, .id_table = mcp3422_id, }; module_i2c_driver(mcp3422_driver); diff --git a/drivers/iio/adc/mcp3911.c b/drivers/iio/adc/mcp3911.c index 76b334f5ac616326f704aee880d1b4d4fd039024..974c5bd923a685f92534f629a591590f3728ba04 100644 --- a/drivers/iio/adc/mcp3911.c +++ b/drivers/iio/adc/mcp3911.c @@ -29,6 +29,8 @@ #define MCP3911_REG_MOD 0x06 #define MCP3911_REG_PHASE 0x07 #define MCP3911_REG_GAIN 0x09 +#define MCP3911_GAIN_MASK(ch) (GENMASK(2, 0) << 3 * ch) +#define MCP3911_GAIN_VAL(ch, val) ((val << 3 * ch) & MCP3911_GAIN_MASK(ch)) #define MCP3911_REG_STATUSCOM 0x0a #define MCP3911_STATUSCOM_DRHIZ BIT(12) @@ -60,8 +62,10 @@ #define MCP3911_REG_MASK GENMASK(4, 1) #define MCP3911_NUM_CHANNELS 2 +#define MCP3911_NUM_SCALES 6 static const int mcp3911_osr_table[] = { 32, 64, 128, 256, 512, 1024, 2048, 4096 }; +static u32 mcp3911_scale_table[MCP3911_NUM_SCALES][2]; struct mcp3911 { struct spi_device *spi; @@ -70,6 +74,7 @@ struct mcp3911 { struct clk *clki; u32 dev_addr; struct iio_trigger *trig; + u32 gain[MCP3911_NUM_CHANNELS]; struct { u32 channels[MCP3911_NUM_CHANNELS]; s64 ts __aligned(8); @@ -146,6 +151,11 @@ static int mcp3911_read_avail(struct iio_dev *indio_dev, *vals = mcp3911_osr_table; *length = ARRAY_SIZE(mcp3911_osr_table); return IIO_AVAIL_LIST; + case IIO_CHAN_INFO_SCALE: + *type = IIO_VAL_INT_PLUS_NANO; + *vals = (int *)mcp3911_scale_table; + *length = ARRAY_SIZE(mcp3911_scale_table) * 2; + return IIO_AVAIL_LIST; default: return -EINVAL; } @@ -190,29 +200,9 @@ static int mcp3911_read_raw(struct iio_dev *indio_dev, break; case IIO_CHAN_INFO_SCALE: - if (adc->vref) { - ret = regulator_get_voltage(adc->vref); - if (ret < 0) { - dev_err(indio_dev->dev.parent, - "failed to get vref voltage: %d\n", - ret); - goto out; - } - - *val = ret / 1000; - } else { - *val = MCP3911_INT_VREF_MV; - } - - /* - * For 24bit Conversion - * Raw = ((Voltage)/(Vref) * 2^23 * Gain * 1.5 - * Voltage = Raw * (Vref)/(2^23 * Gain * 1.5) - */ - - /* val2 = (2^23 * 1.5) */ - *val2 = 12582912; - ret = IIO_VAL_FRACTIONAL; + *val = mcp3911_scale_table[ilog2(adc->gain[channel->channel])][0]; + *val2 = mcp3911_scale_table[ilog2(adc->gain[channel->channel])][1]; + ret = IIO_VAL_INT_PLUS_NANO; break; } @@ -230,6 +220,18 @@ static int mcp3911_write_raw(struct iio_dev *indio_dev, mutex_lock(&adc->lock); switch (mask) { + case IIO_CHAN_INFO_SCALE: + for (int i = 0; i < MCP3911_NUM_SCALES; i++) { + if (val == mcp3911_scale_table[i][0] && + val2 == mcp3911_scale_table[i][1]) { + + adc->gain[channel->channel] = BIT(i); + ret = mcp3911_update(adc, MCP3911_REG_GAIN, + MCP3911_GAIN_MASK(channel->channel), + MCP3911_GAIN_VAL(channel->channel, i), 1); + } + } + break; case IIO_CHAN_INFO_OFFSET: if (val2 != 0) { ret = -EINVAL; @@ -265,6 +267,44 @@ out: return ret; } +static int mcp3911_calc_scale_table(struct mcp3911 *adc) +{ + u32 ref = MCP3911_INT_VREF_MV; + u32 div; + int ret; + u64 tmp; + + if (adc->vref) { + ret = regulator_get_voltage(adc->vref); + if (ret < 0) { + dev_err(&adc->spi->dev, + "failed to get vref voltage: %d\n", + ret); + return ret; + } + + ref = ret / 1000; + } + + /* + * For 24-bit Conversion + * Raw = ((Voltage)/(Vref) * 2^23 * Gain * 1.5 + * Voltage = Raw * (Vref)/(2^23 * Gain * 1.5) + * + * ref = Reference voltage + * div = (2^23 * 1.5 * gain) = 12582912 * gain + */ + for (int i = 0; i < MCP3911_NUM_SCALES; i++) { + div = 12582912 * BIT(i); + tmp = div_s64((s64)ref * 1000000000LL, div); + + mcp3911_scale_table[i][0] = 0; + mcp3911_scale_table[i][1] = tmp; + } + + return 0; +} + #define MCP3911_CHAN(idx) { \ .type = IIO_VOLTAGE, \ .indexed = 1, \ @@ -274,8 +314,10 @@ out: .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \ BIT(IIO_CHAN_INFO_OFFSET) | \ BIT(IIO_CHAN_INFO_SCALE), \ - .info_mask_shared_by_type_available = \ + .info_mask_shared_by_type_available = \ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO), \ + .info_mask_separate_available = \ + BIT(IIO_CHAN_INFO_SCALE), \ .scan_type = { \ .sign = 's', \ .realbits = 24, \ @@ -482,6 +524,20 @@ static int mcp3911_probe(struct spi_device *spi) if (ret) return ret; + ret = mcp3911_calc_scale_table(adc); + if (ret) + return ret; + + /* Set gain to 1 for all channels */ + for (int i = 0; i < MCP3911_NUM_CHANNELS; i++) { + adc->gain[i] = 1; + ret = mcp3911_update(adc, MCP3911_REG_GAIN, + MCP3911_GAIN_MASK(i), + MCP3911_GAIN_VAL(i, 0), 1); + if (ret) + return ret; + } + indio_dev->name = spi_get_device_id(spi)->name; indio_dev->modes = INDIO_DIRECT_MODE; indio_dev->info = &mcp3911_info; diff --git a/drivers/iio/adc/meson_saradc.c b/drivers/iio/adc/meson_saradc.c index 1a68b099d323c1f57fe0da2586278a7ba110b5a9..85b6826cc10cf9a2b82a1666bd10ed9ef4c29dee 100644 --- a/drivers/iio/adc/meson_saradc.c +++ b/drivers/iio/adc/meson_saradc.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #include #include @@ -276,6 +277,8 @@ struct meson_sar_adc_priv { struct clk *adc_div_clk; struct clk_divider clk_div; struct completion done; + /* lock to protect against multiple access to the device */ + struct mutex lock; int calibbias; int calibscale; struct regmap *tsc_regmap; @@ -486,7 +489,7 @@ static int meson_sar_adc_lock(struct iio_dev *indio_dev) struct meson_sar_adc_priv *priv = iio_priv(indio_dev); int val, ret; - mutex_lock(&indio_dev->mlock); + mutex_lock(&priv->lock); if (priv->param->has_bl30_integration) { /* prevent BL30 from using the SAR ADC while we are using it */ @@ -504,7 +507,7 @@ static int meson_sar_adc_lock(struct iio_dev *indio_dev) !(val & MESON_SAR_ADC_DELAY_BL30_BUSY), 1, 10000); if (ret) { - mutex_unlock(&indio_dev->mlock); + mutex_unlock(&priv->lock); return ret; } } @@ -521,7 +524,7 @@ static void meson_sar_adc_unlock(struct iio_dev *indio_dev) regmap_update_bits(priv->regmap, MESON_SAR_ADC_DELAY, MESON_SAR_ADC_DELAY_KERNEL_BUSY, 0); - mutex_unlock(&indio_dev->mlock); + mutex_unlock(&priv->lock); } static void meson_sar_adc_clear_fifo(struct iio_dev *indio_dev) @@ -1250,6 +1253,8 @@ static int meson_sar_adc_probe(struct platform_device *pdev) if (ret) goto err; + mutex_init(&priv->lock); + ret = meson_sar_adc_hw_enable(indio_dev); if (ret) goto err; diff --git a/drivers/iio/adc/mt6370-adc.c b/drivers/iio/adc/mt6370-adc.c new file mode 100644 index 0000000000000000000000000000000000000000..bc62e5a9d50d11d42e6791065f2a3266d4dc2b06 --- /dev/null +++ b/drivers/iio/adc/mt6370-adc.c @@ -0,0 +1,305 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2022 Richtek Technology Corp. + * + * Author: ChiaEn Wu + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#define MT6370_REG_CHG_CTRL3 0x113 +#define MT6370_REG_CHG_CTRL7 0x117 +#define MT6370_REG_CHG_ADC 0x121 +#define MT6370_REG_ADC_DATA_H 0x14C + +#define MT6370_ADC_START_MASK BIT(0) +#define MT6370_ADC_IN_SEL_MASK GENMASK(7, 4) +#define MT6370_AICR_ICHG_MASK GENMASK(7, 2) + +#define MT6370_AICR_100_mA 0x0 +#define MT6370_AICR_150_mA 0x1 +#define MT6370_AICR_200_mA 0x2 +#define MT6370_AICR_250_mA 0x3 +#define MT6370_AICR_300_mA 0x4 +#define MT6370_AICR_350_mA 0x5 + +#define MT6370_ICHG_100_mA 0x0 +#define MT6370_ICHG_200_mA 0x1 +#define MT6370_ICHG_300_mA 0x2 +#define MT6370_ICHG_400_mA 0x3 +#define MT6370_ICHG_500_mA 0x4 +#define MT6370_ICHG_600_mA 0x5 +#define MT6370_ICHG_700_mA 0x6 +#define MT6370_ICHG_800_mA 0x7 + +#define ADC_CONV_TIME_MS 35 +#define ADC_CONV_POLLING_TIME_US 1000 + +struct mt6370_adc_data { + struct device *dev; + struct regmap *regmap; + /* + * This mutex lock is for preventing the different ADC channels + * from being read at the same time. + */ + struct mutex adc_lock; +}; + +static int mt6370_adc_read_channel(struct mt6370_adc_data *priv, int chan, + unsigned long addr, int *val) +{ + unsigned int reg_val; + __be16 be_val; + int ret; + + mutex_lock(&priv->adc_lock); + + reg_val = MT6370_ADC_START_MASK | + FIELD_PREP(MT6370_ADC_IN_SEL_MASK, addr); + ret = regmap_write(priv->regmap, MT6370_REG_CHG_ADC, reg_val); + if (ret) + goto adc_unlock; + + msleep(ADC_CONV_TIME_MS); + + ret = regmap_read_poll_timeout(priv->regmap, + MT6370_REG_CHG_ADC, reg_val, + !(reg_val & MT6370_ADC_START_MASK), + ADC_CONV_POLLING_TIME_US, + ADC_CONV_TIME_MS * MILLI * 3); + if (ret) { + dev_err(priv->dev, "Failed to read ADC register (%d)\n", ret); + goto adc_unlock; + } + + ret = regmap_raw_read(priv->regmap, MT6370_REG_ADC_DATA_H, + &be_val, sizeof(be_val)); + if (ret) + goto adc_unlock; + + *val = be16_to_cpu(be_val); + ret = IIO_VAL_INT; + +adc_unlock: + mutex_unlock(&priv->adc_lock); + + return ret; +} + +static int mt6370_adc_read_scale(struct mt6370_adc_data *priv, + int chan, int *val1, int *val2) +{ + unsigned int reg_val; + int ret; + + switch (chan) { + case MT6370_CHAN_VBAT: + case MT6370_CHAN_VSYS: + case MT6370_CHAN_CHG_VDDP: + *val1 = 5; + return IIO_VAL_INT; + case MT6370_CHAN_IBUS: + ret = regmap_read(priv->regmap, MT6370_REG_CHG_CTRL3, ®_val); + if (ret) + return ret; + + reg_val = FIELD_GET(MT6370_AICR_ICHG_MASK, reg_val); + switch (reg_val) { + case MT6370_AICR_100_mA: + case MT6370_AICR_150_mA: + case MT6370_AICR_200_mA: + case MT6370_AICR_250_mA: + case MT6370_AICR_300_mA: + case MT6370_AICR_350_mA: + *val1 = 3350; + break; + default: + *val1 = 5000; + break; + } + + *val2 = 100; + + return IIO_VAL_FRACTIONAL; + case MT6370_CHAN_IBAT: + ret = regmap_read(priv->regmap, MT6370_REG_CHG_CTRL7, ®_val); + if (ret) + return ret; + + reg_val = FIELD_GET(MT6370_AICR_ICHG_MASK, reg_val); + switch (reg_val) { + case MT6370_ICHG_100_mA: + case MT6370_ICHG_200_mA: + case MT6370_ICHG_300_mA: + case MT6370_ICHG_400_mA: + *val1 = 2375; + break; + case MT6370_ICHG_500_mA: + case MT6370_ICHG_600_mA: + case MT6370_ICHG_700_mA: + case MT6370_ICHG_800_mA: + *val1 = 2680; + break; + default: + *val1 = 5000; + break; + } + + *val2 = 100; + + return IIO_VAL_FRACTIONAL; + case MT6370_CHAN_VBUSDIV5: + *val1 = 25; + return IIO_VAL_INT; + case MT6370_CHAN_VBUSDIV2: + *val1 = 10; + return IIO_VAL_INT; + case MT6370_CHAN_TS_BAT: + *val1 = 25; + *val2 = 10000; + return IIO_VAL_FRACTIONAL; + case MT6370_CHAN_TEMP_JC: + *val1 = 2000; + return IIO_VAL_INT; + default: + return -EINVAL; + } +} + +static int mt6370_adc_read_offset(struct mt6370_adc_data *priv, + int chan, int *val) +{ + *val = -20; + + return IIO_VAL_INT; +} + +static int mt6370_adc_read_raw(struct iio_dev *iio_dev, + const struct iio_chan_spec *chan, + int *val, int *val2, long mask) +{ + struct mt6370_adc_data *priv = iio_priv(iio_dev); + + switch (mask) { + case IIO_CHAN_INFO_RAW: + return mt6370_adc_read_channel(priv, chan->channel, + chan->address, val); + case IIO_CHAN_INFO_SCALE: + return mt6370_adc_read_scale(priv, chan->channel, val, val2); + case IIO_CHAN_INFO_OFFSET: + return mt6370_adc_read_offset(priv, chan->channel, val); + default: + return -EINVAL; + } +} + +static const char * const mt6370_channel_labels[MT6370_CHAN_MAX] = { + [MT6370_CHAN_VBUSDIV5] = "vbusdiv5", + [MT6370_CHAN_VBUSDIV2] = "vbusdiv2", + [MT6370_CHAN_VSYS] = "vsys", + [MT6370_CHAN_VBAT] = "vbat", + [MT6370_CHAN_TS_BAT] = "ts_bat", + [MT6370_CHAN_IBUS] = "ibus", + [MT6370_CHAN_IBAT] = "ibat", + [MT6370_CHAN_CHG_VDDP] = "chg_vddp", + [MT6370_CHAN_TEMP_JC] = "temp_jc", +}; + +static int mt6370_adc_read_label(struct iio_dev *iio_dev, + struct iio_chan_spec const *chan, char *label) +{ + return sysfs_emit(label, "%s\n", mt6370_channel_labels[chan->channel]); +} + +static const struct iio_info mt6370_adc_iio_info = { + .read_raw = mt6370_adc_read_raw, + .read_label = mt6370_adc_read_label, +}; + +#define MT6370_ADC_CHAN(_idx, _type, _addr, _extra_info) { \ + .type = _type, \ + .channel = MT6370_CHAN_##_idx, \ + .address = _addr, \ + .scan_index = MT6370_CHAN_##_idx, \ + .indexed = 1, \ + .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \ + BIT(IIO_CHAN_INFO_SCALE) | \ + _extra_info, \ +} + +static const struct iio_chan_spec mt6370_adc_channels[] = { + MT6370_ADC_CHAN(VBUSDIV5, IIO_VOLTAGE, 1, 0), + MT6370_ADC_CHAN(VBUSDIV2, IIO_VOLTAGE, 2, 0), + MT6370_ADC_CHAN(VSYS, IIO_VOLTAGE, 3, 0), + MT6370_ADC_CHAN(VBAT, IIO_VOLTAGE, 4, 0), + MT6370_ADC_CHAN(TS_BAT, IIO_VOLTAGE, 6, 0), + MT6370_ADC_CHAN(IBUS, IIO_CURRENT, 8, 0), + MT6370_ADC_CHAN(IBAT, IIO_CURRENT, 9, 0), + MT6370_ADC_CHAN(CHG_VDDP, IIO_VOLTAGE, 11, 0), + MT6370_ADC_CHAN(TEMP_JC, IIO_TEMP, 12, BIT(IIO_CHAN_INFO_OFFSET)), +}; + +static int mt6370_adc_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct mt6370_adc_data *priv; + struct iio_dev *indio_dev; + struct regmap *regmap; + int ret; + + regmap = dev_get_regmap(pdev->dev.parent, NULL); + if (!regmap) + return dev_err_probe(dev, -ENODEV, "Failed to get regmap\n"); + + indio_dev = devm_iio_device_alloc(dev, sizeof(*priv)); + if (!indio_dev) + return -ENOMEM; + + priv = iio_priv(indio_dev); + priv->dev = dev; + priv->regmap = regmap; + mutex_init(&priv->adc_lock); + + ret = regmap_write(priv->regmap, MT6370_REG_CHG_ADC, 0); + if (ret) + return dev_err_probe(dev, ret, "Failed to reset ADC\n"); + + indio_dev->name = "mt6370-adc"; + indio_dev->info = &mt6370_adc_iio_info; + indio_dev->modes = INDIO_DIRECT_MODE; + indio_dev->channels = mt6370_adc_channels; + indio_dev->num_channels = ARRAY_SIZE(mt6370_adc_channels); + + return devm_iio_device_register(dev, indio_dev); +} + +static const struct of_device_id mt6370_adc_of_id[] = { + { .compatible = "mediatek,mt6370-adc", }, + {} +}; +MODULE_DEVICE_TABLE(of, mt6370_adc_of_id); + +static struct platform_driver mt6370_adc_driver = { + .driver = { + .name = "mt6370-adc", + .of_match_table = mt6370_adc_of_id, + }, + .probe = mt6370_adc_probe, +}; +module_platform_driver(mt6370_adc_driver); + +MODULE_AUTHOR("ChiaEn Wu "); +MODULE_DESCRIPTION("MT6370 ADC Driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/iio/adc/rockchip_saradc.c b/drivers/iio/adc/rockchip_saradc.c index b87ea7148b5869692a01158325022cb3dfebb360..79448c5ffc2ae2d4306026dd879e978a4f709214 100644 --- a/drivers/iio/adc/rockchip_saradc.c +++ b/drivers/iio/adc/rockchip_saradc.c @@ -5,6 +5,7 @@ */ #include +#include #include #include #include @@ -49,6 +50,8 @@ struct rockchip_saradc { struct clk *clk; struct completion completion; struct regulator *vref; + /* lock to protect against multiple access to the device */ + struct mutex lock; int uv_vref; struct reset_control *reset; const struct rockchip_saradc_data *data; @@ -94,17 +97,17 @@ static int rockchip_saradc_read_raw(struct iio_dev *indio_dev, switch (mask) { case IIO_CHAN_INFO_RAW: - mutex_lock(&indio_dev->mlock); + mutex_lock(&info->lock); ret = rockchip_saradc_conversion(info, chan); if (ret) { rockchip_saradc_power_down(info); - mutex_unlock(&indio_dev->mlock); + mutex_unlock(&info->lock); return ret; } *val = info->last_val; - mutex_unlock(&indio_dev->mlock); + mutex_unlock(&info->lock); return IIO_VAL_INT; case IIO_CHAN_INFO_SCALE: *val = info->uv_vref / 1000; @@ -270,7 +273,7 @@ static irqreturn_t rockchip_saradc_trigger_handler(int irq, void *p) int ret; int i, j = 0; - mutex_lock(&i_dev->mlock); + mutex_lock(&info->lock); for_each_set_bit(i, i_dev->active_scan_mask, i_dev->masklength) { const struct iio_chan_spec *chan = &i_dev->channels[i]; @@ -287,7 +290,7 @@ static irqreturn_t rockchip_saradc_trigger_handler(int irq, void *p) iio_push_to_buffers_with_timestamp(i_dev, &data, iio_get_time_ns(i_dev)); out: - mutex_unlock(&i_dev->mlock); + mutex_unlock(&info->lock); iio_trigger_notify_done(i_dev->trig); @@ -478,6 +481,8 @@ static int rockchip_saradc_probe(struct platform_device *pdev) if (ret) return ret; + mutex_init(&info->lock); + return devm_iio_device_register(&pdev->dev, indio_dev); } diff --git a/drivers/iio/adc/sc27xx_adc.c b/drivers/iio/adc/sc27xx_adc.c index f8421cbba8fa57ffa63b2ed1b63d0ddc17864f08..ff1fc329bb9b7d6bf9711a2fd6cf0f40d8d7118e 100644 --- a/drivers/iio/adc/sc27xx_adc.c +++ b/drivers/iio/adc/sc27xx_adc.c @@ -4,6 +4,7 @@ #include #include #include +#include #include #include #include @@ -83,6 +84,8 @@ struct sc27xx_adc_data { struct device *dev; struct regulator *volref; struct regmap *regmap; + /* lock to protect against multiple access to the device */ + struct mutex lock; /* * One hardware spinlock to synchronize between the multiple * subsystems which will access the unique ADC controller. @@ -664,9 +667,9 @@ static int sc27xx_adc_read_raw(struct iio_dev *indio_dev, switch (mask) { case IIO_CHAN_INFO_RAW: - mutex_lock(&indio_dev->mlock); + mutex_lock(&data->lock); ret = sc27xx_adc_read(data, chan->channel, scale, &tmp); - mutex_unlock(&indio_dev->mlock); + mutex_unlock(&data->lock); if (ret) return ret; @@ -675,10 +678,10 @@ static int sc27xx_adc_read_raw(struct iio_dev *indio_dev, return IIO_VAL_INT; case IIO_CHAN_INFO_PROCESSED: - mutex_lock(&indio_dev->mlock); + mutex_lock(&data->lock); ret = sc27xx_adc_read_processed(data, chan->channel, scale, &tmp); - mutex_unlock(&indio_dev->mlock); + mutex_unlock(&data->lock); if (ret) return ret; @@ -934,6 +937,9 @@ static int sc27xx_adc_probe(struct platform_device *pdev) indio_dev->info = &sc27xx_info; indio_dev->channels = sc27xx_channels; indio_dev->num_channels = ARRAY_SIZE(sc27xx_channels); + + mutex_init(&sc27xx_data->lock); + ret = devm_iio_device_register(dev, indio_dev); if (ret) dev_err(dev, "could not register iio (ADC)"); diff --git a/drivers/iio/adc/stm32-adc-core.c b/drivers/iio/adc/stm32-adc-core.c index 81d5db91c67bf7896be5d3884d1a61bb789c214d..48f02dcc81c1b7ed2f0fd4bedeb51444b0b0c385 100644 --- a/drivers/iio/adc/stm32-adc-core.c +++ b/drivers/iio/adc/stm32-adc-core.c @@ -22,6 +22,7 @@ #include #include #include +#include #include "stm32-adc-core.h" @@ -306,8 +307,8 @@ out: static const struct stm32_adc_common_regs stm32f4_adc_common_regs = { .csr = STM32F4_ADC_CSR, .ccr = STM32F4_ADC_CCR, - .eoc_msk = { STM32F4_EOC1, STM32F4_EOC2, STM32F4_EOC3}, - .ovr_msk = { STM32F4_OVR1, STM32F4_OVR2, STM32F4_OVR3}, + .eoc_msk = { STM32F4_EOC1, STM32F4_EOC2, STM32F4_EOC3 }, + .ovr_msk = { STM32F4_OVR1, STM32F4_OVR2, STM32F4_OVR3 }, .ier = STM32F4_ADC_CR1, .eocie_msk = STM32F4_EOCIE, }; @@ -316,8 +317,18 @@ static const struct stm32_adc_common_regs stm32f4_adc_common_regs = { static const struct stm32_adc_common_regs stm32h7_adc_common_regs = { .csr = STM32H7_ADC_CSR, .ccr = STM32H7_ADC_CCR, - .eoc_msk = { STM32H7_EOC_MST, STM32H7_EOC_SLV}, - .ovr_msk = { STM32H7_OVR_MST, STM32H7_OVR_SLV}, + .eoc_msk = { STM32H7_EOC_MST, STM32H7_EOC_SLV }, + .ovr_msk = { STM32H7_OVR_MST, STM32H7_OVR_SLV }, + .ier = STM32H7_ADC_IER, + .eocie_msk = STM32H7_EOCIE, +}; + +/* STM32MP13 common registers definitions */ +static const struct stm32_adc_common_regs stm32mp13_adc_common_regs = { + .csr = STM32H7_ADC_CSR, + .ccr = STM32H7_ADC_CCR, + .eoc_msk = { STM32H7_EOC_MST }, + .ovr_msk = { STM32H7_OVR_MST }, .ier = STM32H7_ADC_IER, .eocie_msk = STM32H7_EOCIE, }; @@ -868,6 +879,14 @@ static const struct stm32_adc_priv_cfg stm32mp1_adc_priv_cfg = { .num_irqs = 2, }; +static const struct stm32_adc_priv_cfg stm32mp13_adc_priv_cfg = { + .regs = &stm32mp13_adc_common_regs, + .clk_sel = stm32h7_adc_clk_sel, + .max_clk_rate_hz = 75 * HZ_PER_MHZ, + .ipid = STM32MP13_IPIDR_NUMBER, + .num_irqs = 1, +}; + static const struct of_device_id stm32_adc_of_match[] = { { .compatible = "st,stm32f4-adc-core", @@ -878,6 +897,9 @@ static const struct of_device_id stm32_adc_of_match[] = { }, { .compatible = "st,stm32mp1-adc-core", .data = (void *)&stm32mp1_adc_priv_cfg + }, { + .compatible = "st,stm32mp13-adc-core", + .data = (void *)&stm32mp13_adc_priv_cfg }, { }, }; diff --git a/drivers/iio/adc/stm32-adc-core.h b/drivers/iio/adc/stm32-adc-core.h index 2118ef63843d24b78b640b9ea0444c7d8d77242a..73b2c2e91c088c2e0fb2035ce63f24d05b3894d6 100644 --- a/drivers/iio/adc/stm32-adc-core.h +++ b/drivers/iio/adc/stm32-adc-core.h @@ -112,6 +112,11 @@ #define STM32MP1_ADC_IPDR 0x3F8 #define STM32MP1_ADC_SIDR 0x3FC +/* STM32MP13 - Registers for each ADC instance */ +#define STM32MP13_ADC_DIFSEL 0xB0 +#define STM32MP13_ADC_CALFACT 0xB4 +#define STM32MP13_ADC2_OR 0xC8 + /* STM32H7 - common registers for all ADC instances */ #define STM32H7_ADC_CSR (STM32_ADCX_COMN_OFFSET + 0x00) #define STM32H7_ADC_CCR (STM32_ADCX_COMN_OFFSET + 0x08) @@ -137,6 +142,7 @@ #define STM32H7_LINCALRDYW3 BIT(24) #define STM32H7_LINCALRDYW2 BIT(23) #define STM32H7_LINCALRDYW1 BIT(22) +#define STM32H7_LINCALRDYW_MASK GENMASK(27, 22) #define STM32H7_ADCALLIN BIT(16) #define STM32H7_BOOST BIT(8) #define STM32H7_ADSTP BIT(4) @@ -161,6 +167,9 @@ enum stm32h7_adc_dmngt { STM32H7_DMNGT_DMA_CIRC, /* DMA circular mode */ }; +/* STM32H7_ADC_DIFSEL - bit fields */ +#define STM32H7_DIFSEL_MASK GENMASK(19, 0) + /* STM32H7_ADC_CALFACT - bit fields */ #define STM32H7_CALFACT_D_SHIFT 16 #define STM32H7_CALFACT_D_MASK GENMASK(26, 16) @@ -210,7 +219,29 @@ enum stm32h7_adc_dmngt { /* STM32MP1_ADC_SIDR - bit fields */ #define STM32MP1_SIDR_MASK GENMASK(31, 0) +/* STM32MP13_ADC_CFGR specific bit fields */ +#define STM32MP13_DMAEN BIT(0) +#define STM32MP13_DMACFG BIT(1) +#define STM32MP13_DFSDMCFG BIT(2) +#define STM32MP13_RES_SHIFT 3 +#define STM32MP13_RES_MASK GENMASK(4, 3) + +/* STM32MP13_ADC_DIFSEL - bit fields */ +#define STM32MP13_DIFSEL_MASK GENMASK(18, 0) + +/* STM32MP13_ADC_CALFACT - bit fields */ +#define STM32MP13_CALFACT_D_SHIFT 16 +#define STM32MP13_CALFACT_D_MASK GENMASK(22, 16) +#define STM32MP13_CALFACT_S_SHIFT 0 +#define STM32MP13_CALFACT_S_MASK GENMASK(6, 0) + +/* STM32MP13_ADC2_OR - bit fields */ +#define STM32MP13_OP2 BIT(2) +#define STM32MP13_OP1 BIT(1) +#define STM32MP13_OP0 BIT(0) + #define STM32MP15_IPIDR_NUMBER 0x00110005 +#define STM32MP13_IPIDR_NUMBER 0x00110006 /** * struct stm32_adc_common - stm32 ADC driver common data (for all instances) diff --git a/drivers/iio/adc/stm32-adc.c b/drivers/iio/adc/stm32-adc.c index 3cda529f081db43c6101583711ca914e89eb75d1..45d4e79f8e55ea1fab1dfa5810640b53c453d247 100644 --- a/drivers/iio/adc/stm32-adc.c +++ b/drivers/iio/adc/stm32-adc.c @@ -7,6 +7,7 @@ */ #include +#include #include #include #include @@ -82,6 +83,8 @@ enum stm32_adc_extsel { enum stm32_adc_int_ch { STM32_ADC_INT_CH_NONE = -1, STM32_ADC_INT_CH_VDDCORE, + STM32_ADC_INT_CH_VDDCPU, + STM32_ADC_INT_CH_VDDQ_DDR, STM32_ADC_INT_CH_VREFINT, STM32_ADC_INT_CH_VBAT, STM32_ADC_INT_CH_NB, @@ -99,6 +102,8 @@ struct stm32_adc_ic { static const struct stm32_adc_ic stm32_adc_ic[STM32_ADC_INT_CH_NB] = { { "vddcore", STM32_ADC_INT_CH_VDDCORE }, + { "vddcpu", STM32_ADC_INT_CH_VDDCPU }, + { "vddq_ddr", STM32_ADC_INT_CH_VDDQ_DDR }, { "vrefint", STM32_ADC_INT_CH_VREFINT }, { "vbat", STM32_ADC_INT_CH_VBAT }, }; @@ -115,16 +120,12 @@ struct stm32_adc_trig_info { /** * struct stm32_adc_calib - optional adc calibration data - * @calfact_s: Calibration offset for single ended channels - * @calfact_d: Calibration offset in differential * @lincalfact: Linearity calibration factor - * @calibrated: Indicates calibration status + * @lincal_saved: Indicates that linear calibration factors are saved */ struct stm32_adc_calib { - u32 calfact_s; - u32 calfact_d; u32 lincalfact[STM32H7_LINCALFACT_NUM]; - bool calibrated; + bool lincal_saved; }; /** @@ -160,9 +161,12 @@ struct stm32_adc_vrefint { * @exten: trigger control register & bitfield * @extsel: trigger selection register & bitfield * @res: resolution selection register & bitfield + * @difsel: differential mode selection register & bitfield * @smpr: smpr1 & smpr2 registers offset array * @smp_bits: smpr1 & smpr2 index and bitfields - * @or_vdd: option register & vddcore bitfield + * @or_vddcore: option register & vddcore bitfield + * @or_vddcpu: option register & vddcpu bitfield + * @or_vddq_ddr: option register & vddq_ddr bitfield * @ccr_vbat: common register & vbat bitfield * @ccr_vref: common register & vrefint bitfield */ @@ -176,9 +180,12 @@ struct stm32_adc_regspec { const struct stm32_adc_regs exten; const struct stm32_adc_regs extsel; const struct stm32_adc_regs res; + const struct stm32_adc_regs difsel; const u32 smpr[2]; const struct stm32_adc_regs *smp_bits; - const struct stm32_adc_regs or_vdd; + const struct stm32_adc_regs or_vddcore; + const struct stm32_adc_regs or_vddcpu; + const struct stm32_adc_regs or_vddq_ddr; const struct stm32_adc_regs ccr_vbat; const struct stm32_adc_regs ccr_vref; }; @@ -192,13 +199,16 @@ struct stm32_adc; * @trigs: external trigger sources * @clk_required: clock is required * @has_vregready: vregready status flag presence + * @has_boostmode: boost mode support flag + * @has_linearcal: linear calibration support flag + * @has_presel: channel preselection support flag * @prepare: optional prepare routine (power-up, enable) * @start_conv: routine to start conversions * @stop_conv: routine to stop conversions * @unprepare: optional unprepare routine (disable, power-down) * @irq_clear: routine to clear irqs * @smp_cycles: programmable sampling time (ADC clock cycles) - * @ts_vrefint_ns: vrefint minimum sampling time in ns + * @ts_int_ch: pointer to array of internal channels minimum sampling time in ns */ struct stm32_adc_cfg { const struct stm32_adc_regspec *regs; @@ -206,13 +216,16 @@ struct stm32_adc_cfg { struct stm32_adc_trig_info *trigs; bool clk_required; bool has_vregready; + bool has_boostmode; + bool has_linearcal; + bool has_presel; int (*prepare)(struct iio_dev *); void (*start_conv)(struct iio_dev *, bool dma); void (*stop_conv)(struct iio_dev *); void (*unprepare)(struct iio_dev *); void (*irq_clear)(struct iio_dev *indio_dev, u32 msk); const unsigned int *smp_cycles; - const unsigned int ts_vrefint_ns; + const unsigned int *ts_int_ch; }; /** @@ -312,6 +325,13 @@ static const struct stm32_adc_info stm32h7_adc_info = { .num_res = ARRAY_SIZE(stm32h7_adc_resolutions), }; +/* stm32mp13 can have up to 19 channels */ +static const struct stm32_adc_info stm32mp13_adc_info = { + .max_channels = 19, + .resolutions = stm32f4_adc_resolutions, + .num_res = ARRAY_SIZE(stm32f4_adc_resolutions), +}; + /* * stm32f4_sq - describe regular sequence registers * - L: sequence len (register & bit field) @@ -497,10 +517,37 @@ static const struct stm32_adc_regspec stm32h7_adc_regspec = { .extsel = { STM32H7_ADC_CFGR, STM32H7_EXTSEL_MASK, STM32H7_EXTSEL_SHIFT }, .res = { STM32H7_ADC_CFGR, STM32H7_RES_MASK, STM32H7_RES_SHIFT }, + .difsel = { STM32H7_ADC_DIFSEL, STM32H7_DIFSEL_MASK}, .smpr = { STM32H7_ADC_SMPR1, STM32H7_ADC_SMPR2 }, .smp_bits = stm32h7_smp_bits, }; +/* STM32MP13 programmable sampling time (ADC clock cycles, rounded down) */ +static const unsigned int stm32mp13_adc_smp_cycles[STM32_ADC_MAX_SMP + 1] = { + 2, 6, 12, 24, 47, 92, 247, 640, +}; + +static const struct stm32_adc_regspec stm32mp13_adc_regspec = { + .dr = STM32H7_ADC_DR, + .ier_eoc = { STM32H7_ADC_IER, STM32H7_EOCIE }, + .ier_ovr = { STM32H7_ADC_IER, STM32H7_OVRIE }, + .isr_eoc = { STM32H7_ADC_ISR, STM32H7_EOC }, + .isr_ovr = { STM32H7_ADC_ISR, STM32H7_OVR }, + .sqr = stm32h7_sq, + .exten = { STM32H7_ADC_CFGR, STM32H7_EXTEN_MASK, STM32H7_EXTEN_SHIFT }, + .extsel = { STM32H7_ADC_CFGR, STM32H7_EXTSEL_MASK, + STM32H7_EXTSEL_SHIFT }, + .res = { STM32H7_ADC_CFGR, STM32MP13_RES_MASK, STM32MP13_RES_SHIFT }, + .difsel = { STM32MP13_ADC_DIFSEL, STM32MP13_DIFSEL_MASK}, + .smpr = { STM32H7_ADC_SMPR1, STM32H7_ADC_SMPR2 }, + .smp_bits = stm32h7_smp_bits, + .or_vddcore = { STM32MP13_ADC2_OR, STM32MP13_OP0 }, + .or_vddcpu = { STM32MP13_ADC2_OR, STM32MP13_OP1 }, + .or_vddq_ddr = { STM32MP13_ADC2_OR, STM32MP13_OP2 }, + .ccr_vbat = { STM32H7_ADC_CCR, STM32H7_VBATEN }, + .ccr_vref = { STM32H7_ADC_CCR, STM32H7_VREFEN }, +}; + static const struct stm32_adc_regspec stm32mp1_adc_regspec = { .dr = STM32H7_ADC_DR, .ier_eoc = { STM32H7_ADC_IER, STM32H7_EOCIE }, @@ -512,9 +559,10 @@ static const struct stm32_adc_regspec stm32mp1_adc_regspec = { .extsel = { STM32H7_ADC_CFGR, STM32H7_EXTSEL_MASK, STM32H7_EXTSEL_SHIFT }, .res = { STM32H7_ADC_CFGR, STM32H7_RES_MASK, STM32H7_RES_SHIFT }, + .difsel = { STM32H7_ADC_DIFSEL, STM32H7_DIFSEL_MASK}, .smpr = { STM32H7_ADC_SMPR1, STM32H7_ADC_SMPR2 }, .smp_bits = stm32h7_smp_bits, - .or_vdd = { STM32MP1_ADC2_OR, STM32MP1_VDDCOREEN }, + .or_vddcore = { STM32MP1_ADC2_OR, STM32MP1_VDDCOREEN }, .ccr_vbat = { STM32H7_ADC_CCR, STM32H7_VBATEN }, .ccr_vref = { STM32H7_ADC_CCR, STM32H7_VREFEN }, }; @@ -675,8 +723,18 @@ static void stm32_adc_int_ch_enable(struct iio_dev *indio_dev) switch (i) { case STM32_ADC_INT_CH_VDDCORE: dev_dbg(&indio_dev->dev, "Enable VDDCore\n"); - stm32_adc_set_bits(adc, adc->cfg->regs->or_vdd.reg, - adc->cfg->regs->or_vdd.mask); + stm32_adc_set_bits(adc, adc->cfg->regs->or_vddcore.reg, + adc->cfg->regs->or_vddcore.mask); + break; + case STM32_ADC_INT_CH_VDDCPU: + dev_dbg(&indio_dev->dev, "Enable VDDCPU\n"); + stm32_adc_set_bits(adc, adc->cfg->regs->or_vddcpu.reg, + adc->cfg->regs->or_vddcpu.mask); + break; + case STM32_ADC_INT_CH_VDDQ_DDR: + dev_dbg(&indio_dev->dev, "Enable VDDQ_DDR\n"); + stm32_adc_set_bits(adc, adc->cfg->regs->or_vddq_ddr.reg, + adc->cfg->regs->or_vddq_ddr.mask); break; case STM32_ADC_INT_CH_VREFINT: dev_dbg(&indio_dev->dev, "Enable VREFInt\n"); @@ -702,8 +760,16 @@ static void stm32_adc_int_ch_disable(struct stm32_adc *adc) switch (i) { case STM32_ADC_INT_CH_VDDCORE: - stm32_adc_clr_bits(adc, adc->cfg->regs->or_vdd.reg, - adc->cfg->regs->or_vdd.mask); + stm32_adc_clr_bits(adc, adc->cfg->regs->or_vddcore.reg, + adc->cfg->regs->or_vddcore.mask); + break; + case STM32_ADC_INT_CH_VDDCPU: + stm32_adc_clr_bits(adc, adc->cfg->regs->or_vddcpu.reg, + adc->cfg->regs->or_vddcpu.mask); + break; + case STM32_ADC_INT_CH_VDDQ_DDR: + stm32_adc_clr_bits(adc, adc->cfg->regs->or_vddq_ddr.reg, + adc->cfg->regs->or_vddq_ddr.mask); break; case STM32_ADC_INT_CH_VREFINT: stm32_adc_clr_bits_common(adc, adc->cfg->regs->ccr_vref.reg, @@ -801,6 +867,7 @@ static void stm32h7_adc_stop_conv(struct iio_dev *indio_dev) if (ret) dev_warn(&indio_dev->dev, "stop failed\n"); + /* STM32H7_DMNGT_MASK covers STM32MP13_DMAEN & STM32MP13_DMACFG */ stm32_adc_clr_bits(adc, STM32H7_ADC_CFGR, STM32H7_DMNGT_MASK); } @@ -811,6 +878,17 @@ static void stm32h7_adc_irq_clear(struct iio_dev *indio_dev, u32 msk) stm32_adc_set_bits(adc, adc->cfg->regs->isr_eoc.reg, msk); } +static void stm32mp13_adc_start_conv(struct iio_dev *indio_dev, bool dma) +{ + struct stm32_adc *adc = iio_priv(indio_dev); + + if (dma) + stm32_adc_set_bits(adc, STM32H7_ADC_CFGR, + STM32MP13_DMAEN | STM32MP13_DMACFG); + + stm32_adc_set_bits(adc, STM32H7_ADC_CR, STM32H7_ADSTART); +} + static int stm32h7_adc_exit_pwr_down(struct iio_dev *indio_dev) { struct stm32_adc *adc = iio_priv(indio_dev); @@ -821,7 +899,8 @@ static int stm32h7_adc_exit_pwr_down(struct iio_dev *indio_dev) stm32_adc_clr_bits(adc, STM32H7_ADC_CR, STM32H7_DEEPPWD); stm32_adc_set_bits(adc, STM32H7_ADC_CR, STM32H7_ADVREGEN); - if (adc->common->rate > STM32H7_BOOST_CLKRATE) + if (adc->cfg->has_boostmode && + adc->common->rate > STM32H7_BOOST_CLKRATE) stm32_adc_set_bits(adc, STM32H7_ADC_CR, STM32H7_BOOST); /* Wait for startup time */ @@ -843,7 +922,8 @@ static int stm32h7_adc_exit_pwr_down(struct iio_dev *indio_dev) static void stm32h7_adc_enter_pwr_down(struct stm32_adc *adc) { - stm32_adc_clr_bits(adc, STM32H7_ADC_CR, STM32H7_BOOST); + if (adc->cfg->has_boostmode) + stm32_adc_clr_bits(adc, STM32H7_ADC_CR, STM32H7_BOOST); /* Setting DEEPPWD disables ADC vreg and clears ADVREGEN */ stm32_adc_set_bits(adc, STM32H7_ADC_CR, STM32H7_DEEPPWD); @@ -922,14 +1002,7 @@ static int stm32h7_adc_read_selfcalib(struct iio_dev *indio_dev) lincalrdyw_mask >>= 1; } - - /* Read offset calibration */ - val = stm32_adc_readl(adc, STM32H7_ADC_CALFACT); - adc->cal.calfact_s = (val & STM32H7_CALFACT_S_MASK); - adc->cal.calfact_s >>= STM32H7_CALFACT_S_SHIFT; - adc->cal.calfact_d = (val & STM32H7_CALFACT_D_MASK); - adc->cal.calfact_d >>= STM32H7_CALFACT_D_SHIFT; - adc->cal.calibrated = true; + adc->cal.lincal_saved = true; return 0; } @@ -945,10 +1018,6 @@ static int stm32h7_adc_restore_selfcalib(struct iio_dev *indio_dev) int i, ret; u32 lincalrdyw_mask, val; - val = (adc->cal.calfact_s << STM32H7_CALFACT_S_SHIFT) | - (adc->cal.calfact_d << STM32H7_CALFACT_D_SHIFT); - stm32_adc_writel(adc, STM32H7_ADC_CALFACT, val); - lincalrdyw_mask = STM32H7_LINCALRDYW6; for (i = STM32H7_LINCALFACT_NUM - 1; i >= 0; i--) { /* @@ -1010,17 +1079,21 @@ static int stm32h7_adc_restore_selfcalib(struct iio_dev *indio_dev) /** * stm32h7_adc_selfcalib() - Procedure to calibrate ADC * @indio_dev: IIO device instance + * @do_lincal: linear calibration request flag * Note: Must be called once ADC is out of power down. + * + * Run offset calibration unconditionally. + * Run linear calibration if requested & supported. */ -static int stm32h7_adc_selfcalib(struct iio_dev *indio_dev) +static int stm32h7_adc_selfcalib(struct iio_dev *indio_dev, int do_lincal) { struct stm32_adc *adc = iio_priv(indio_dev); int ret; + u32 msk = STM32H7_ADCALDIF; u32 val; - if (adc->cal.calibrated) - return true; - + if (adc->cfg->has_linearcal && do_lincal) + msk |= STM32H7_ADCALLIN; /* ADC must be disabled for calibration */ stm32h7_adc_disable(indio_dev); @@ -1029,8 +1102,7 @@ static int stm32h7_adc_selfcalib(struct iio_dev *indio_dev) * - Offset calibration for single ended inputs * - No linearity calibration (do it later, before reading it) */ - stm32_adc_clr_bits(adc, STM32H7_ADC_CR, STM32H7_ADCALDIF); - stm32_adc_clr_bits(adc, STM32H7_ADC_CR, STM32H7_ADCALLIN); + stm32_adc_clr_bits(adc, STM32H7_ADC_CR, msk); /* Start calibration, then wait for completion */ stm32_adc_set_bits(adc, STM32H7_ADC_CR, STM32H7_ADCAL); @@ -1038,7 +1110,7 @@ static int stm32h7_adc_selfcalib(struct iio_dev *indio_dev) !(val & STM32H7_ADCAL), 100, STM32H7_ADC_CALIB_TIMEOUT_US); if (ret) { - dev_err(&indio_dev->dev, "calibration failed\n"); + dev_err(&indio_dev->dev, "calibration (single-ended) error %d\n", ret); goto out; } @@ -1048,24 +1120,50 @@ static int stm32h7_adc_selfcalib(struct iio_dev *indio_dev) * - Linearity calibration (needs to be done only once for single/diff) * will run simultaneously with offset calibration. */ - stm32_adc_set_bits(adc, STM32H7_ADC_CR, - STM32H7_ADCALDIF | STM32H7_ADCALLIN); + stm32_adc_set_bits(adc, STM32H7_ADC_CR, msk); stm32_adc_set_bits(adc, STM32H7_ADC_CR, STM32H7_ADCAL); ret = stm32_adc_readl_poll_timeout(STM32H7_ADC_CR, val, !(val & STM32H7_ADCAL), 100, STM32H7_ADC_CALIB_TIMEOUT_US); if (ret) { - dev_err(&indio_dev->dev, "calibration failed\n"); + dev_err(&indio_dev->dev, "calibration (diff%s) error %d\n", + (msk & STM32H7_ADCALLIN) ? "+linear" : "", ret); goto out; } out: - stm32_adc_clr_bits(adc, STM32H7_ADC_CR, - STM32H7_ADCALDIF | STM32H7_ADCALLIN); + stm32_adc_clr_bits(adc, STM32H7_ADC_CR, msk); return ret; } +/** + * stm32h7_adc_check_selfcalib() - Check linear calibration status + * @indio_dev: IIO device instance + * + * Used to check if linear calibration has been done. + * Return true if linear calibration factors are already saved in private data + * or if a linear calibration has been done at boot stage. + */ +static int stm32h7_adc_check_selfcalib(struct iio_dev *indio_dev) +{ + struct stm32_adc *adc = iio_priv(indio_dev); + u32 val; + + if (adc->cal.lincal_saved) + return true; + + /* + * Check if linear calibration factors are available in ADC registers, + * by checking that all LINCALRDYWx bits are set. + */ + val = stm32_adc_readl(adc, STM32H7_ADC_CR) & STM32H7_LINCALRDYW_MASK; + if (val == STM32H7_LINCALRDYW_MASK) + return true; + + return false; +} + /** * stm32h7_adc_prepare() - Leave power down mode to enable ADC. * @indio_dev: IIO device instance @@ -1080,34 +1178,41 @@ out: static int stm32h7_adc_prepare(struct iio_dev *indio_dev) { struct stm32_adc *adc = iio_priv(indio_dev); - int calib, ret; + int lincal_done = false; + int ret; ret = stm32h7_adc_exit_pwr_down(indio_dev); if (ret) return ret; - ret = stm32h7_adc_selfcalib(indio_dev); + if (adc->cfg->has_linearcal) + lincal_done = stm32h7_adc_check_selfcalib(indio_dev); + + /* Always run offset calibration. Run linear calibration only once */ + ret = stm32h7_adc_selfcalib(indio_dev, !lincal_done); if (ret < 0) goto pwr_dwn; - calib = ret; stm32_adc_int_ch_enable(indio_dev); - stm32_adc_writel(adc, STM32H7_ADC_DIFSEL, adc->difsel); + stm32_adc_writel(adc, adc->cfg->regs->difsel.reg, adc->difsel); ret = stm32h7_adc_enable(indio_dev); if (ret) goto ch_disable; - /* Either restore or read calibration result for future reference */ - if (calib) - ret = stm32h7_adc_restore_selfcalib(indio_dev); - else - ret = stm32h7_adc_read_selfcalib(indio_dev); - if (ret) - goto disable; + if (adc->cfg->has_linearcal) { + if (!adc->cal.lincal_saved) + ret = stm32h7_adc_read_selfcalib(indio_dev); + else + ret = stm32h7_adc_restore_selfcalib(indio_dev); + + if (ret) + goto disable; + } - stm32_adc_writel(adc, STM32H7_ADC_PCSEL, adc->pcsel); + if (adc->cfg->has_presel) + stm32_adc_writel(adc, STM32H7_ADC_PCSEL, adc->pcsel); return 0; @@ -1125,7 +1230,8 @@ static void stm32h7_adc_unprepare(struct iio_dev *indio_dev) { struct stm32_adc *adc = iio_priv(indio_dev); - stm32_adc_writel(adc, STM32H7_ADC_PCSEL, 0); + if (adc->cfg->has_presel) + stm32_adc_writel(adc, STM32H7_ADC_PCSEL, 0); stm32h7_adc_disable(indio_dev); stm32_adc_int_ch_disable(adc); stm32h7_adc_enter_pwr_down(adc); @@ -1774,6 +1880,23 @@ static const struct iio_chan_spec_ext_info stm32_adc_ext_info[] = { {}, }; +static void stm32_adc_debugfs_init(struct iio_dev *indio_dev) +{ + struct stm32_adc *adc = iio_priv(indio_dev); + struct dentry *d = iio_get_debugfs_dentry(indio_dev); + struct stm32_adc_calib *cal = &adc->cal; + char buf[16]; + unsigned int i; + + if (!adc->cfg->has_linearcal) + return; + + for (i = 0; i < STM32H7_LINCALFACT_NUM; i++) { + snprintf(buf, sizeof(buf), "lincalfact%d", i + 1); + debugfs_create_u32(buf, 0444, d, &cal->lincalfact[i]); + } +} + static int stm32_adc_fw_get_resolution(struct iio_dev *indio_dev) { struct device *dev = &indio_dev->dev; @@ -1802,14 +1925,15 @@ static void stm32_adc_smpr_init(struct stm32_adc *adc, int channel, u32 smp_ns) { const struct stm32_adc_regs *smpr = &adc->cfg->regs->smp_bits[channel]; u32 period_ns, shift = smpr->shift, mask = smpr->mask; - unsigned int smp, r = smpr->reg; + unsigned int i, smp, r = smpr->reg; /* - * For vrefint channel, ensure that the sampling time cannot + * For internal channels, ensure that the sampling time cannot * be lower than the one specified in the datasheet */ - if (channel == adc->int_ch[STM32_ADC_INT_CH_VREFINT]) - smp_ns = max(smp_ns, adc->cfg->ts_vrefint_ns); + for (i = 0; i < STM32_ADC_INT_CH_NB; i++) + if (channel == adc->int_ch[i] && adc->int_ch[i] != STM32_ADC_INT_CH_NONE) + smp_ns = max(smp_ns, adc->cfg->ts_int_ch[i]); /* Determine sampling time (ADC clock cycles) */ period_ns = NSEC_PER_SEC / adc->common->rate; @@ -1857,7 +1981,7 @@ static void stm32_adc_chan_init_one(struct iio_dev *indio_dev, adc->pcsel |= BIT(chan->channel); if (differential) { /* pre-build diff channels mask */ - adc->difsel |= BIT(chan->channel); + adc->difsel |= BIT(chan->channel) & adc->cfg->regs->difsel.mask; /* Also add negative input to pre-selected channels */ adc->pcsel |= BIT(chan->channel2); } @@ -1998,6 +2122,35 @@ static int stm32_adc_populate_int_ch(struct iio_dev *indio_dev, const char *ch_n for (i = 0; i < STM32_ADC_INT_CH_NB; i++) { if (!strncmp(stm32_adc_ic[i].name, ch_name, STM32_ADC_CH_SZ)) { + /* Check internal channel availability */ + switch (i) { + case STM32_ADC_INT_CH_VDDCORE: + if (!adc->cfg->regs->or_vddcore.reg) + dev_warn(&indio_dev->dev, + "%s channel not available\n", ch_name); + break; + case STM32_ADC_INT_CH_VDDCPU: + if (!adc->cfg->regs->or_vddcpu.reg) + dev_warn(&indio_dev->dev, + "%s channel not available\n", ch_name); + break; + case STM32_ADC_INT_CH_VDDQ_DDR: + if (!adc->cfg->regs->or_vddq_ddr.reg) + dev_warn(&indio_dev->dev, + "%s channel not available\n", ch_name); + break; + case STM32_ADC_INT_CH_VREFINT: + if (!adc->cfg->regs->ccr_vref.reg) + dev_warn(&indio_dev->dev, + "%s channel not available\n", ch_name); + break; + case STM32_ADC_INT_CH_VBAT: + if (!adc->cfg->regs->ccr_vbat.reg) + dev_warn(&indio_dev->dev, + "%s channel not available\n", ch_name); + break; + } + if (stm32_adc_ic[i].idx != STM32_ADC_INT_CH_VREFINT) { adc->int_ch[i] = chan; break; @@ -2330,6 +2483,9 @@ static int stm32_adc_probe(struct platform_device *pdev) pm_runtime_mark_last_busy(dev); pm_runtime_put_autosuspend(dev); + if (IS_ENABLED(CONFIG_DEBUG_FS)) + stm32_adc_debugfs_init(indio_dev); + return 0; err_hw_stop: @@ -2358,6 +2514,7 @@ static int stm32_adc_remove(struct platform_device *pdev) struct stm32_adc *adc = iio_priv(indio_dev); pm_runtime_get_sync(&pdev->dev); + /* iio_device_unregister() also removes debugfs entries */ iio_device_unregister(indio_dev); stm32_adc_hw_stop(&pdev->dev); pm_runtime_disable(&pdev->dev); @@ -2431,36 +2588,66 @@ static const struct stm32_adc_cfg stm32f4_adc_cfg = { .irq_clear = stm32f4_adc_irq_clear, }; +const unsigned int stm32_adc_min_ts_h7[] = { 0, 0, 0, 4300, 9000 }; +static_assert(ARRAY_SIZE(stm32_adc_min_ts_h7) == STM32_ADC_INT_CH_NB); + static const struct stm32_adc_cfg stm32h7_adc_cfg = { .regs = &stm32h7_adc_regspec, .adc_info = &stm32h7_adc_info, .trigs = stm32h7_adc_trigs, + .has_boostmode = true, + .has_linearcal = true, + .has_presel = true, .start_conv = stm32h7_adc_start_conv, .stop_conv = stm32h7_adc_stop_conv, .prepare = stm32h7_adc_prepare, .unprepare = stm32h7_adc_unprepare, .smp_cycles = stm32h7_adc_smp_cycles, .irq_clear = stm32h7_adc_irq_clear, + .ts_int_ch = stm32_adc_min_ts_h7, }; +const unsigned int stm32_adc_min_ts_mp1[] = { 100, 100, 100, 4300, 9800 }; +static_assert(ARRAY_SIZE(stm32_adc_min_ts_mp1) == STM32_ADC_INT_CH_NB); + static const struct stm32_adc_cfg stm32mp1_adc_cfg = { .regs = &stm32mp1_adc_regspec, .adc_info = &stm32h7_adc_info, .trigs = stm32h7_adc_trigs, .has_vregready = true, + .has_boostmode = true, + .has_linearcal = true, + .has_presel = true, .start_conv = stm32h7_adc_start_conv, .stop_conv = stm32h7_adc_stop_conv, .prepare = stm32h7_adc_prepare, .unprepare = stm32h7_adc_unprepare, .smp_cycles = stm32h7_adc_smp_cycles, .irq_clear = stm32h7_adc_irq_clear, - .ts_vrefint_ns = 4300, + .ts_int_ch = stm32_adc_min_ts_mp1, +}; + +const unsigned int stm32_adc_min_ts_mp13[] = { 100, 0, 0, 4300, 9800 }; +static_assert(ARRAY_SIZE(stm32_adc_min_ts_mp13) == STM32_ADC_INT_CH_NB); + +static const struct stm32_adc_cfg stm32mp13_adc_cfg = { + .regs = &stm32mp13_adc_regspec, + .adc_info = &stm32mp13_adc_info, + .trigs = stm32h7_adc_trigs, + .start_conv = stm32mp13_adc_start_conv, + .stop_conv = stm32h7_adc_stop_conv, + .prepare = stm32h7_adc_prepare, + .unprepare = stm32h7_adc_unprepare, + .smp_cycles = stm32mp13_adc_smp_cycles, + .irq_clear = stm32h7_adc_irq_clear, + .ts_int_ch = stm32_adc_min_ts_mp13, }; static const struct of_device_id stm32_adc_of_match[] = { { .compatible = "st,stm32f4-adc", .data = (void *)&stm32f4_adc_cfg }, { .compatible = "st,stm32h7-adc", .data = (void *)&stm32h7_adc_cfg }, { .compatible = "st,stm32mp1-adc", .data = (void *)&stm32mp1_adc_cfg }, + { .compatible = "st,stm32mp13-adc", .data = (void *)&stm32mp13_adc_cfg }, {}, }; MODULE_DEVICE_TABLE(of, stm32_adc_of_match); diff --git a/drivers/iio/adc/ti-adc081c.c b/drivers/iio/adc/ti-adc081c.c index bd48b073e7200c279a0d2cf406e4f2c00b2570a0..c663dc59d45901b2c5cd55cf172b00c6ce6e0619 100644 --- a/drivers/iio/adc/ti-adc081c.c +++ b/drivers/iio/adc/ti-adc081c.c @@ -152,9 +152,9 @@ static void adc081c_reg_disable(void *reg) regulator_disable(reg); } -static int adc081c_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int adc081c_probe(struct i2c_client *client) { + const struct i2c_device_id *id = i2c_client_get_device_id(client); struct iio_dev *iio; struct adc081c *adc; const struct adcxx1c_model *model; @@ -235,7 +235,7 @@ static struct i2c_driver adc081c_driver = { .of_match_table = adc081c_of_match, .acpi_match_table = adc081c_acpi_match, }, - .probe = adc081c_probe, + .probe_new = adc081c_probe, .id_table = adc081c_id, }; module_i2c_driver(adc081c_driver); diff --git a/drivers/iio/adc/ti-adc128s052.c b/drivers/iio/adc/ti-adc128s052.c index 622fd384983c727ad8c386e4c607eb7a73584df5..b3d5b9b7255bc3bfe50c5eb411c5c1519c75abe2 100644 --- a/drivers/iio/adc/ti-adc128s052.c +++ b/drivers/iio/adc/ti-adc128s052.c @@ -181,13 +181,13 @@ static int adc128_probe(struct spi_device *spi) } static const struct of_device_id adc128_of_match[] = { - { .compatible = "ti,adc128s052", }, - { .compatible = "ti,adc122s021", }, - { .compatible = "ti,adc122s051", }, - { .compatible = "ti,adc122s101", }, - { .compatible = "ti,adc124s021", }, - { .compatible = "ti,adc124s051", }, - { .compatible = "ti,adc124s101", }, + { .compatible = "ti,adc128s052", .data = (void*)0L, }, + { .compatible = "ti,adc122s021", .data = (void*)1L, }, + { .compatible = "ti,adc122s051", .data = (void*)1L, }, + { .compatible = "ti,adc122s101", .data = (void*)1L, }, + { .compatible = "ti,adc124s021", .data = (void*)2L, }, + { .compatible = "ti,adc124s051", .data = (void*)2L, }, + { .compatible = "ti,adc124s101", .data = (void*)2L, }, { /* sentinel */ }, }; MODULE_DEVICE_TABLE(of, adc128_of_match); diff --git a/drivers/iio/adc/ti-ads1015.c b/drivers/iio/adc/ti-ads1015.c index 8bceba69402603719c8f25e1a3cb1034aaf5f869..56af5e148802252c785cf1c626a3e570a467016d 100644 --- a/drivers/iio/adc/ti-ads1015.c +++ b/drivers/iio/adc/ti-ads1015.c @@ -974,9 +974,9 @@ static int ads1015_set_conv_mode(struct ads1015_data *data, int mode) mode << ADS1015_CFG_MOD_SHIFT); } -static int ads1015_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int ads1015_probe(struct i2c_client *client) { + const struct i2c_device_id *id = i2c_client_get_device_id(client); const struct ads1015_chip_data *chip; struct iio_dev *indio_dev; struct ads1015_data *data; @@ -1195,7 +1195,7 @@ static struct i2c_driver ads1015_driver = { .of_match_table = ads1015_of_match, .pm = &ads1015_pm_ops, }, - .probe = ads1015_probe, + .probe_new = ads1015_probe, .remove = ads1015_remove, .id_table = ads1015_id, }; diff --git a/drivers/iio/adc/ti-ads131e08.c b/drivers/iio/adc/ti-ads131e08.c index 5235a93f28bc67c6eec258f518987dbc8be5623c..fcfc4625431332263273fb9a8395fafcd05b301a 100644 --- a/drivers/iio/adc/ti-ads131e08.c +++ b/drivers/iio/adc/ti-ads131e08.c @@ -807,6 +807,8 @@ static int ads131e08_probe(struct spi_device *spi) int ret; info = device_get_match_data(&spi->dev); + if (!info) + info = (void *)spi_get_device_id(spi)->driver_data; if (!info) { dev_err(&spi->dev, "failed to get match data\n"); return -ENODEV; @@ -926,12 +928,21 @@ static const struct of_device_id ads131e08_of_match[] = { }; MODULE_DEVICE_TABLE(of, ads131e08_of_match); +static const struct spi_device_id ads131e08_ids[] = { + { "ads131e04", (kernel_ulong_t)&ads131e08_info_tbl[ads131e04] }, + { "ads131e06", (kernel_ulong_t)&ads131e08_info_tbl[ads131e06] }, + { "ads131e08", (kernel_ulong_t)&ads131e08_info_tbl[ads131e08] }, + {} +}; +MODULE_DEVICE_TABLE(spi, ads131e08_ids); + static struct spi_driver ads131e08_driver = { .driver = { .name = "ads131e08", .of_match_table = ads131e08_of_match, }, .probe = ads131e08_probe, + .id_table = ads131e08_ids, }; module_spi_driver(ads131e08_driver); diff --git a/drivers/iio/adc/vf610_adc.c b/drivers/iio/adc/vf610_adc.c index c6b16cf6e36755725ab414fd50cbdc6dcda14b9f..ae31aafd26538a6c02535cb91b6dd7c9da7dfccf 100644 --- a/drivers/iio/adc/vf610_adc.c +++ b/drivers/iio/adc/vf610_adc.c @@ -7,6 +7,7 @@ #include #include +#include #include #include #include @@ -156,6 +157,9 @@ struct vf610_adc { void __iomem *regs; struct clk *clk; + /* lock to protect against multiple access to the device */ + struct mutex lock; + u32 vref_uv; u32 value; struct regulator *vref; @@ -467,11 +471,11 @@ static int vf610_set_conversion_mode(struct iio_dev *indio_dev, { struct vf610_adc *info = iio_priv(indio_dev); - mutex_lock(&indio_dev->mlock); + mutex_lock(&info->lock); info->adc_feature.conv_mode = mode; vf610_adc_calculate_rates(info); vf610_adc_hw_init(info); - mutex_unlock(&indio_dev->mlock); + mutex_unlock(&info->lock); return 0; } @@ -622,6 +626,58 @@ static const struct attribute_group vf610_attribute_group = { .attrs = vf610_attributes, }; +static int vf610_read_sample(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, int *val) +{ + struct vf610_adc *info = iio_priv(indio_dev); + unsigned int hc_cfg; + int ret; + + ret = iio_device_claim_direct_mode(indio_dev); + if (ret) + return ret; + + mutex_lock(&info->lock); + reinit_completion(&info->completion); + hc_cfg = VF610_ADC_ADCHC(chan->channel); + hc_cfg |= VF610_ADC_AIEN; + writel(hc_cfg, info->regs + VF610_REG_ADC_HC0); + ret = wait_for_completion_interruptible_timeout(&info->completion, + VF610_ADC_TIMEOUT); + if (ret == 0) { + ret = -ETIMEDOUT; + goto out_unlock; + } + + if (ret < 0) + goto out_unlock; + + switch (chan->type) { + case IIO_VOLTAGE: + *val = info->value; + break; + case IIO_TEMP: + /* + * Calculate in degree Celsius times 1000 + * Using the typical sensor slope of 1.84 mV/°C + * and VREFH_ADC at 3.3V, V at 25°C of 699 mV + */ + *val = 25000 - ((int)info->value - VF610_VTEMP25_3V3) * + 1000000 / VF610_TEMP_SLOPE_COEFF; + + break; + default: + ret = -EINVAL; + break; + } + +out_unlock: + mutex_unlock(&info->lock); + iio_device_release_direct_mode(indio_dev); + + return ret; +} + static int vf610_read_raw(struct iio_dev *indio_dev, struct iio_chan_spec const *chan, int *val, @@ -629,53 +685,15 @@ static int vf610_read_raw(struct iio_dev *indio_dev, long mask) { struct vf610_adc *info = iio_priv(indio_dev); - unsigned int hc_cfg; long ret; switch (mask) { case IIO_CHAN_INFO_RAW: case IIO_CHAN_INFO_PROCESSED: - mutex_lock(&indio_dev->mlock); - if (iio_buffer_enabled(indio_dev)) { - mutex_unlock(&indio_dev->mlock); - return -EBUSY; - } - - reinit_completion(&info->completion); - hc_cfg = VF610_ADC_ADCHC(chan->channel); - hc_cfg |= VF610_ADC_AIEN; - writel(hc_cfg, info->regs + VF610_REG_ADC_HC0); - ret = wait_for_completion_interruptible_timeout - (&info->completion, VF610_ADC_TIMEOUT); - if (ret == 0) { - mutex_unlock(&indio_dev->mlock); - return -ETIMEDOUT; - } - if (ret < 0) { - mutex_unlock(&indio_dev->mlock); + ret = vf610_read_sample(indio_dev, chan, val); + if (ret < 0) return ret; - } - switch (chan->type) { - case IIO_VOLTAGE: - *val = info->value; - break; - case IIO_TEMP: - /* - * Calculate in degree Celsius times 1000 - * Using the typical sensor slope of 1.84 mV/°C - * and VREFH_ADC at 3.3V, V at 25°C of 699 mV - */ - *val = 25000 - ((int)info->value - VF610_VTEMP25_3V3) * - 1000000 / VF610_TEMP_SLOPE_COEFF; - - break; - default: - mutex_unlock(&indio_dev->mlock); - return -EINVAL; - } - - mutex_unlock(&indio_dev->mlock); return IIO_VAL_INT; case IIO_CHAN_INFO_SCALE: @@ -878,6 +896,8 @@ static int vf610_adc_probe(struct platform_device *pdev) goto error_iio_device_register; } + mutex_init(&info->lock); + ret = iio_device_register(indio_dev); if (ret) { dev_err(&pdev->dev, "Couldn't register the device.\n"); diff --git a/drivers/iio/addac/Kconfig b/drivers/iio/addac/Kconfig index fcf6d2269bfc25d9f497a25ef4ccf3ae5018e63a..2843fcb70e24b419a7ebf009e6cd943e2040467b 100644 --- a/drivers/iio/addac/Kconfig +++ b/drivers/iio/addac/Kconfig @@ -5,6 +5,20 @@ menu "Analog to digital and digital to analog converters" +config AD74115 + tristate "Analog Devices AD74115H driver" + depends on GPIOLIB && SPI + select CRC8 + select IIO_BUFFER + select IIO_TRIGGERED_BUFFER + select REGMAP_SPI + help + Say yes here to build support for Analog Devices AD74115H + single-channel software configurable input/output solution. + + To compile this driver as a module, choose M here: the + module will be called ad74115. + config AD74413R tristate "Analog Devices AD74412R/AD74413R driver" depends on GPIOLIB && SPI diff --git a/drivers/iio/addac/Makefile b/drivers/iio/addac/Makefile index 17de20ef0d8e9df868a33076b537e56f1ef2ee0f..577777276e439e1b743d2d1794717f8433901956 100644 --- a/drivers/iio/addac/Makefile +++ b/drivers/iio/addac/Makefile @@ -4,5 +4,6 @@ # # When adding new entries keep the list in alphabetical order +obj-$(CONFIG_AD74115) += ad74115.o obj-$(CONFIG_AD74413R) += ad74413r.o obj-$(CONFIG_STX104) += stx104.o diff --git a/drivers/iio/addac/ad74115.c b/drivers/iio/addac/ad74115.c new file mode 100644 index 0000000000000000000000000000000000000000..e6bc5eb3788df6579789059d369603a9dc874d05 --- /dev/null +++ b/drivers/iio/addac/ad74115.c @@ -0,0 +1,1943 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2022 Analog Devices, Inc. + * Author: Cosmin Tanislav + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include +#include + +#define AD74115_NAME "ad74115" + +#define AD74115_CH_FUNC_SETUP_REG 0x01 + +#define AD74115_ADC_CONFIG_REG 0x02 +#define AD74115_ADC_CONFIG_CONV2_RATE_MASK GENMASK(15, 13) +#define AD74115_ADC_CONFIG_CONV1_RATE_MASK GENMASK(12, 10) +#define AD74115_ADC_CONFIG_CONV2_RANGE_MASK GENMASK(9, 7) +#define AD74115_ADC_CONFIG_CONV1_RANGE_MASK GENMASK(6, 4) + +#define AD74115_PWR_OPTIM_CONFIG_REG 0x03 + +#define AD74115_DIN_CONFIG1_REG 0x04 +#define AD74115_DIN_COMPARATOR_EN_MASK BIT(13) +#define AD74115_DIN_SINK_MASK GENMASK(11, 7) +#define AD74115_DIN_DEBOUNCE_MASK GENMASK(4, 0) + +#define AD74115_DIN_CONFIG2_REG 0x05 +#define AD74115_COMP_THRESH_MASK GENMASK(6, 0) + +#define AD74115_OUTPUT_CONFIG_REG 0x06 +#define AD74115_OUTPUT_SLEW_EN_MASK GENMASK(6, 5) +#define AD74115_OUTPUT_SLEW_LIN_STEP_MASK GENMASK(4, 3) +#define AD74115_OUTPUT_SLEW_LIN_RATE_MASK GENMASK(2, 1) + +#define AD74115_RTD3W4W_CONFIG_REG 0x07 + +#define AD74115_BURNOUT_CONFIG_REG 0x0a +#define AD74115_BURNOUT_EXT2_EN_MASK BIT(10) +#define AD74115_BURNOUT_EXT1_EN_MASK BIT(5) +#define AD74115_BURNOUT_VIOUT_EN_MASK BIT(0) + +#define AD74115_DAC_CODE_REG 0x0b + +#define AD74115_DAC_ACTIVE_REG 0x0d + +#define AD74115_GPIO_CONFIG_X_REG(x) (0x35 + (x)) +#define AD74115_GPIO_CONFIG_GPI_DATA BIT(5) +#define AD74115_GPIO_CONFIG_GPO_DATA BIT(4) +#define AD74115_GPIO_CONFIG_SELECT_MASK GENMASK(2, 0) + +#define AD74115_CHARGE_PUMP_REG 0x3a + +#define AD74115_ADC_CONV_CTRL_REG 0x3b +#define AD74115_ADC_CONV_SEQ_MASK GENMASK(13, 12) + +#define AD74115_DIN_COMP_OUT_REG 0x40 + +#define AD74115_LIVE_STATUS_REG 0x42 +#define AD74115_ADC_DATA_RDY_MASK BIT(3) + +#define AD74115_READ_SELECT_REG 0x64 + +#define AD74115_CMD_KEY_REG 0x78 +#define AD74115_CMD_KEY_RESET1 0x15fa +#define AD74115_CMD_KEY_RESET2 0xaf51 + +#define AD74115_CRC_POLYNOMIAL 0x7 +DECLARE_CRC8_TABLE(ad74115_crc8_table); + +#define AD74115_ADC_CODE_MAX ((int)GENMASK(15, 0)) +#define AD74115_ADC_CODE_HALF (AD74115_ADC_CODE_MAX / 2) + +#define AD74115_DAC_VOLTAGE_MAX 12000 +#define AD74115_DAC_CURRENT_MAX 25 +#define AD74115_DAC_CODE_MAX ((int)GENMASK(13, 0)) +#define AD74115_DAC_CODE_HALF (AD74115_DAC_CODE_MAX / 2) + +#define AD74115_COMP_THRESH_MAX 98 + +#define AD74115_SENSE_RESISTOR_OHMS 100 +#define AD74115_REF_RESISTOR_OHMS 2100 + +#define AD74115_DIN_SINK_LOW_STEP 120 +#define AD74115_DIN_SINK_HIGH_STEP 240 +#define AD74115_DIN_SINK_MAX 31 + +#define AD74115_FRAME_SIZE 4 +#define AD74115_GPIO_NUM 4 + +#define AD74115_CONV_TIME_US 1000000 + +enum ad74115_dac_ch { + AD74115_DAC_CH_MAIN, + AD74115_DAC_CH_COMPARATOR, +}; + +enum ad74115_adc_ch { + AD74115_ADC_CH_CONV1, + AD74115_ADC_CH_CONV2, + AD74115_ADC_CH_NUM +}; + +enum ad74115_ch_func { + AD74115_CH_FUNC_HIGH_IMPEDANCE, + AD74115_CH_FUNC_VOLTAGE_OUTPUT, + AD74115_CH_FUNC_CURRENT_OUTPUT, + AD74115_CH_FUNC_VOLTAGE_INPUT, + AD74115_CH_FUNC_CURRENT_INPUT_EXT_POWER, + AD74115_CH_FUNC_CURRENT_INPUT_LOOP_POWER, + AD74115_CH_FUNC_2_WIRE_RESISTANCE_INPUT, + AD74115_CH_FUNC_3_4_WIRE_RESISTANCE_INPUT, + AD74115_CH_FUNC_DIGITAL_INPUT_LOGIC, + AD74115_CH_FUNC_DIGITAL_INPUT_LOOP_POWER, + AD74115_CH_FUNC_CURRENT_OUTPUT_HART, + AD74115_CH_FUNC_CURRENT_INPUT_EXT_POWER_HART, + AD74115_CH_FUNC_CURRENT_INPUT_LOOP_POWER_HART, + AD74115_CH_FUNC_MAX = AD74115_CH_FUNC_CURRENT_INPUT_LOOP_POWER_HART, + AD74115_CH_FUNC_NUM +}; + +enum ad74115_adc_range { + AD74115_ADC_RANGE_12V, + AD74115_ADC_RANGE_12V_BIPOLAR, + AD74115_ADC_RANGE_2_5V_BIPOLAR, + AD74115_ADC_RANGE_2_5V_NEG, + AD74115_ADC_RANGE_2_5V, + AD74115_ADC_RANGE_0_625V, + AD74115_ADC_RANGE_104MV_BIPOLAR, + AD74115_ADC_RANGE_12V_OTHER, + AD74115_ADC_RANGE_MAX = AD74115_ADC_RANGE_12V_OTHER, + AD74115_ADC_RANGE_NUM +}; + +enum ad74115_adc_conv_seq { + AD74115_ADC_CONV_SEQ_STANDBY = 0b00, + AD74115_ADC_CONV_SEQ_SINGLE = 0b01, + AD74115_ADC_CONV_SEQ_CONTINUOUS = 0b10, +}; + +enum ad74115_din_threshold_mode { + AD74115_DIN_THRESHOLD_MODE_AVDD, + AD74115_DIN_THRESHOLD_MODE_FIXED, + AD74115_DIN_THRESHOLD_MODE_MAX = AD74115_DIN_THRESHOLD_MODE_FIXED, +}; + +enum ad74115_slew_mode { + AD74115_SLEW_MODE_DISABLED, + AD74115_SLEW_MODE_LINEAR, + AD74115_SLEW_MODE_HART, +}; + +enum ad74115_slew_step { + AD74115_SLEW_STEP_0_8_PERCENT, + AD74115_SLEW_STEP_1_5_PERCENT, + AD74115_SLEW_STEP_6_1_PERCENT, + AD74115_SLEW_STEP_22_2_PERCENT, +}; + +enum ad74115_slew_rate { + AD74115_SLEW_RATE_4KHZ, + AD74115_SLEW_RATE_64KHZ, + AD74115_SLEW_RATE_150KHZ, + AD74115_SLEW_RATE_240KHZ, +}; + +enum ad74115_gpio_config { + AD74115_GPIO_CONFIG_OUTPUT_BUFFERED = 0b010, + AD74115_GPIO_CONFIG_INPUT = 0b011, +}; + +enum ad74115_gpio_mode { + AD74115_GPIO_MODE_LOGIC = 1, + AD74115_GPIO_MODE_SPECIAL = 2, +}; + +struct ad74115_channels { + struct iio_chan_spec *channels; + unsigned int num_channels; +}; + +struct ad74115_state { + struct spi_device *spi; + struct regmap *regmap; + struct iio_trigger *trig; + struct regulator *avdd; + + /* + * Synchronize consecutive operations when doing a one-shot + * conversion and when updating the ADC samples SPI message. + */ + struct mutex lock; + struct gpio_chip gc; + struct gpio_chip comp_gc; + int irq; + + unsigned int avdd_mv; + unsigned long gpio_valid_mask; + bool dac_bipolar; + bool dac_hart_slew; + bool rtd_mode_4_wire; + enum ad74115_ch_func ch_func; + enum ad74115_din_threshold_mode din_threshold_mode; + + struct completion adc_data_completion; + struct spi_message adc_samples_msg; + struct spi_transfer adc_samples_xfer[AD74115_ADC_CH_NUM + 1]; + + /* + * DMA (thus cache coherency maintenance) requires the + * transfer buffers to live in their own cache lines. + */ + u8 reg_tx_buf[AD74115_FRAME_SIZE] __aligned(IIO_DMA_MINALIGN); + u8 reg_rx_buf[AD74115_FRAME_SIZE]; + u8 adc_samples_tx_buf[AD74115_FRAME_SIZE * AD74115_ADC_CH_NUM]; + u8 adc_samples_rx_buf[AD74115_FRAME_SIZE * AD74115_ADC_CH_NUM]; +}; + +struct ad74115_fw_prop { + const char *name; + bool is_boolean; + bool negate; + unsigned int max; + unsigned int reg; + unsigned int mask; + const unsigned int *lookup_tbl; + unsigned int lookup_tbl_len; +}; + +#define AD74115_FW_PROP(_name, _max, _reg, _mask) \ +{ \ + .name = (_name), \ + .max = (_max), \ + .reg = (_reg), \ + .mask = (_mask), \ +} + +#define AD74115_FW_PROP_TBL(_name, _tbl, _reg, _mask) \ +{ \ + .name = (_name), \ + .reg = (_reg), \ + .mask = (_mask), \ + .lookup_tbl = (_tbl), \ + .lookup_tbl_len = ARRAY_SIZE(_tbl), \ +} + +#define AD74115_FW_PROP_BOOL(_name, _reg, _mask) \ +{ \ + .name = (_name), \ + .is_boolean = true, \ + .reg = (_reg), \ + .mask = (_mask), \ +} + +#define AD74115_FW_PROP_BOOL_NEG(_name, _reg, _mask) \ +{ \ + .name = (_name), \ + .is_boolean = true, \ + .negate = true, \ + .reg = (_reg), \ + .mask = (_mask), \ +} + +static const int ad74115_dac_rate_tbl[] = { + 0, + 4 * 8, + 4 * 15, + 4 * 61, + 4 * 222, + 64 * 8, + 64 * 15, + 64 * 61, + 64 * 222, + 150 * 8, + 150 * 15, + 150 * 61, + 150 * 222, + 240 * 8, + 240 * 15, + 240 * 61, + 240 * 222, +}; + +static const unsigned int ad74115_dac_rate_step_tbl[][3] = { + { AD74115_SLEW_MODE_DISABLED }, + { AD74115_SLEW_MODE_LINEAR, AD74115_SLEW_STEP_0_8_PERCENT, AD74115_SLEW_RATE_4KHZ }, + { AD74115_SLEW_MODE_LINEAR, AD74115_SLEW_STEP_1_5_PERCENT, AD74115_SLEW_RATE_4KHZ }, + { AD74115_SLEW_MODE_LINEAR, AD74115_SLEW_STEP_6_1_PERCENT, AD74115_SLEW_RATE_4KHZ }, + { AD74115_SLEW_MODE_LINEAR, AD74115_SLEW_STEP_22_2_PERCENT, AD74115_SLEW_RATE_4KHZ }, + { AD74115_SLEW_MODE_LINEAR, AD74115_SLEW_STEP_0_8_PERCENT, AD74115_SLEW_RATE_64KHZ }, + { AD74115_SLEW_MODE_LINEAR, AD74115_SLEW_STEP_1_5_PERCENT, AD74115_SLEW_RATE_64KHZ }, + { AD74115_SLEW_MODE_LINEAR, AD74115_SLEW_STEP_6_1_PERCENT, AD74115_SLEW_RATE_64KHZ }, + { AD74115_SLEW_MODE_LINEAR, AD74115_SLEW_STEP_22_2_PERCENT, AD74115_SLEW_RATE_64KHZ }, + { AD74115_SLEW_MODE_LINEAR, AD74115_SLEW_STEP_0_8_PERCENT, AD74115_SLEW_RATE_150KHZ }, + { AD74115_SLEW_MODE_LINEAR, AD74115_SLEW_STEP_1_5_PERCENT, AD74115_SLEW_RATE_150KHZ }, + { AD74115_SLEW_MODE_LINEAR, AD74115_SLEW_STEP_6_1_PERCENT, AD74115_SLEW_RATE_150KHZ }, + { AD74115_SLEW_MODE_LINEAR, AD74115_SLEW_STEP_22_2_PERCENT, AD74115_SLEW_RATE_150KHZ }, + { AD74115_SLEW_MODE_LINEAR, AD74115_SLEW_STEP_0_8_PERCENT, AD74115_SLEW_RATE_240KHZ }, + { AD74115_SLEW_MODE_LINEAR, AD74115_SLEW_STEP_1_5_PERCENT, AD74115_SLEW_RATE_240KHZ }, + { AD74115_SLEW_MODE_LINEAR, AD74115_SLEW_STEP_6_1_PERCENT, AD74115_SLEW_RATE_240KHZ }, + { AD74115_SLEW_MODE_LINEAR, AD74115_SLEW_STEP_22_2_PERCENT, AD74115_SLEW_RATE_240KHZ }, +}; + +static const unsigned int ad74115_rtd_excitation_current_ua_tbl[] = { + 250, 500, 750, 1000 +}; + +static const unsigned int ad74115_burnout_current_na_tbl[] = { + 0, 50, 0, 500, 1000, 0, 10000, 0 +}; + +static const unsigned int ad74115_viout_burnout_current_na_tbl[] = { + 0, 0, 0, 0, 1000, 0, 10000, 0 +}; + +static const unsigned int ad74115_gpio_mode_tbl[] = { + 0, 0, 0, 1, 2, 3, 4, 5 +}; + +static const unsigned int ad74115_adc_conv_rate_tbl[] = { + 10, 20, 1200, 4800, 9600 +}; + +static const unsigned int ad74115_debounce_tbl[] = { + 0, 13, 18, 24, 32, 42, 56, 75, + 100, 130, 180, 240, 320, 420, 560, 750, + 1000, 1300, 1800, 2400, 3200, 4200, 5600, 7500, + 10000, 13000, 18000, 24000, 32000, 42000, 56000, 75000, +}; + +static const unsigned int ad74115_adc_ch_data_regs_tbl[] = { + [AD74115_ADC_CH_CONV1] = 0x44, + [AD74115_ADC_CH_CONV2] = 0x46, +}; + +static const unsigned int ad74115_adc_ch_en_bit_tbl[] = { + [AD74115_ADC_CH_CONV1] = BIT(0), + [AD74115_ADC_CH_CONV2] = BIT(1), +}; + +static const bool ad74115_adc_bipolar_tbl[AD74115_ADC_RANGE_NUM] = { + [AD74115_ADC_RANGE_12V_BIPOLAR] = true, + [AD74115_ADC_RANGE_2_5V_BIPOLAR] = true, + [AD74115_ADC_RANGE_104MV_BIPOLAR] = true, +}; + +static const unsigned int ad74115_adc_conv_mul_tbl[AD74115_ADC_RANGE_NUM] = { + [AD74115_ADC_RANGE_12V] = 12000, + [AD74115_ADC_RANGE_12V_BIPOLAR] = 24000, + [AD74115_ADC_RANGE_2_5V_BIPOLAR] = 5000, + [AD74115_ADC_RANGE_2_5V_NEG] = 2500, + [AD74115_ADC_RANGE_2_5V] = 2500, + [AD74115_ADC_RANGE_0_625V] = 625, + [AD74115_ADC_RANGE_104MV_BIPOLAR] = 208, + [AD74115_ADC_RANGE_12V_OTHER] = 12000, +}; + +static const unsigned int ad74115_adc_gain_tbl[AD74115_ADC_RANGE_NUM][2] = { + [AD74115_ADC_RANGE_12V] = { 5, 24 }, + [AD74115_ADC_RANGE_12V_BIPOLAR] = { 5, 24 }, + [AD74115_ADC_RANGE_2_5V_BIPOLAR] = { 1, 1 }, + [AD74115_ADC_RANGE_2_5V_NEG] = { 1, 1 }, + [AD74115_ADC_RANGE_2_5V] = { 1, 1 }, + [AD74115_ADC_RANGE_0_625V] = { 4, 1 }, + [AD74115_ADC_RANGE_104MV_BIPOLAR] = { 24, 1 }, + [AD74115_ADC_RANGE_12V_OTHER] = { 5, 24 }, +}; + +static const int ad74115_adc_range_tbl[AD74115_ADC_RANGE_NUM][2] = { + [AD74115_ADC_RANGE_12V] = { 0, 12000000 }, + [AD74115_ADC_RANGE_12V_BIPOLAR] = { -12000000, 12000000 }, + [AD74115_ADC_RANGE_2_5V_BIPOLAR] = { -2500000, 2500000 }, + [AD74115_ADC_RANGE_2_5V_NEG] = { -2500000, 0 }, + [AD74115_ADC_RANGE_2_5V] = { 0, 2500000 }, + [AD74115_ADC_RANGE_0_625V] = { 0, 625000 }, + [AD74115_ADC_RANGE_104MV_BIPOLAR] = { -104000, 104000 }, + [AD74115_ADC_RANGE_12V_OTHER] = { 0, 12000000 }, +}; + +static int _ad74115_find_tbl_index(const unsigned int *tbl, unsigned int tbl_len, + unsigned int val, unsigned int *index) +{ + unsigned int i; + + for (i = 0; i < tbl_len; i++) + if (val == tbl[i]) { + *index = i; + return 0; + } + + return -EINVAL; +} + +#define ad74115_find_tbl_index(tbl, val, index) \ + _ad74115_find_tbl_index(tbl, ARRAY_SIZE(tbl), val, index) + +static int ad74115_crc(u8 *buf) +{ + return crc8(ad74115_crc8_table, buf, 3, 0); +} + +static void ad74115_format_reg_write(u8 reg, u16 val, u8 *buf) +{ + buf[0] = reg; + put_unaligned_be16(val, &buf[1]); + buf[3] = ad74115_crc(buf); +} + +static int ad74115_reg_write(void *context, unsigned int reg, unsigned int val) +{ + struct ad74115_state *st = context; + + ad74115_format_reg_write(reg, val, st->reg_tx_buf); + + return spi_write(st->spi, st->reg_tx_buf, AD74115_FRAME_SIZE); +} + +static int ad74115_crc_check(struct ad74115_state *st, u8 *buf) +{ + struct device *dev = &st->spi->dev; + u8 expected_crc = ad74115_crc(buf); + + if (buf[3] != expected_crc) { + dev_err(dev, "Bad CRC %02x for %02x%02x%02x, expected %02x\n", + buf[3], buf[0], buf[1], buf[2], expected_crc); + return -EINVAL; + } + + return 0; +} + +static int ad74115_reg_read(void *context, unsigned int reg, unsigned int *val) +{ + struct ad74115_state *st = context; + struct spi_transfer reg_read_xfer[] = { + { + .tx_buf = st->reg_tx_buf, + .len = sizeof(st->reg_tx_buf), + .cs_change = 1, + }, + { + .rx_buf = st->reg_rx_buf, + .len = sizeof(st->reg_rx_buf), + }, + }; + int ret; + + ad74115_format_reg_write(AD74115_READ_SELECT_REG, reg, st->reg_tx_buf); + + ret = spi_sync_transfer(st->spi, reg_read_xfer, ARRAY_SIZE(reg_read_xfer)); + if (ret) + return ret; + + ret = ad74115_crc_check(st, st->reg_rx_buf); + if (ret) + return ret; + + *val = get_unaligned_be16(&st->reg_rx_buf[1]); + + return 0; +} + +static const struct regmap_config ad74115_regmap_config = { + .reg_bits = 8, + .val_bits = 16, + .reg_read = ad74115_reg_read, + .reg_write = ad74115_reg_write, +}; + +static int ad74115_gpio_config_set(struct ad74115_state *st, unsigned int offset, + enum ad74115_gpio_config cfg) +{ + return regmap_update_bits(st->regmap, AD74115_GPIO_CONFIG_X_REG(offset), + AD74115_GPIO_CONFIG_SELECT_MASK, + FIELD_PREP(AD74115_GPIO_CONFIG_SELECT_MASK, cfg)); +} + +static int ad74115_gpio_init_valid_mask(struct gpio_chip *gc, + unsigned long *valid_mask, + unsigned int ngpios) +{ + struct ad74115_state *st = gpiochip_get_data(gc); + + *valid_mask = st->gpio_valid_mask; + + return 0; +} + +static int ad74115_gpio_get_direction(struct gpio_chip *gc, unsigned int offset) +{ + struct ad74115_state *st = gpiochip_get_data(gc); + unsigned int val; + int ret; + + ret = regmap_read(st->regmap, AD74115_GPIO_CONFIG_X_REG(offset), &val); + if (ret) + return ret; + + return FIELD_GET(AD74115_GPIO_CONFIG_SELECT_MASK, val) == AD74115_GPIO_CONFIG_INPUT; +} + +static int ad74115_gpio_direction_input(struct gpio_chip *gc, unsigned int offset) +{ + struct ad74115_state *st = gpiochip_get_data(gc); + + return ad74115_gpio_config_set(st, offset, AD74115_GPIO_CONFIG_INPUT); +} + +static int ad74115_gpio_direction_output(struct gpio_chip *gc, unsigned int offset, + int value) +{ + struct ad74115_state *st = gpiochip_get_data(gc); + + return ad74115_gpio_config_set(st, offset, AD74115_GPIO_CONFIG_OUTPUT_BUFFERED); +} + +static int ad74115_gpio_get(struct gpio_chip *gc, unsigned int offset) +{ + struct ad74115_state *st = gpiochip_get_data(gc); + unsigned int val; + int ret; + + ret = regmap_read(st->regmap, AD74115_GPIO_CONFIG_X_REG(offset), &val); + if (ret) + return ret; + + return FIELD_GET(AD74115_GPIO_CONFIG_GPI_DATA, val); +} + +static void ad74115_gpio_set(struct gpio_chip *gc, unsigned int offset, int value) +{ + struct ad74115_state *st = gpiochip_get_data(gc); + struct device *dev = &st->spi->dev; + int ret; + + ret = regmap_update_bits(st->regmap, AD74115_GPIO_CONFIG_X_REG(offset), + AD74115_GPIO_CONFIG_GPO_DATA, + FIELD_PREP(AD74115_GPIO_CONFIG_GPO_DATA, value)); + if (ret) + dev_err(dev, "Failed to set GPIO %u output value, err: %d\n", + offset, ret); +} + +static int ad74115_set_comp_debounce(struct ad74115_state *st, unsigned int val) +{ + unsigned int len = ARRAY_SIZE(ad74115_debounce_tbl); + unsigned int i; + + for (i = 0; i < len; i++) + if (val <= ad74115_debounce_tbl[i]) + break; + + if (i == len) + i = len - 1; + + return regmap_update_bits(st->regmap, AD74115_DIN_CONFIG1_REG, + AD74115_DIN_DEBOUNCE_MASK, + FIELD_PREP(AD74115_DIN_DEBOUNCE_MASK, val)); +} + +static int ad74115_comp_gpio_get_direction(struct gpio_chip *chip, + unsigned int offset) +{ + return GPIO_LINE_DIRECTION_IN; +} + +static int ad74115_comp_gpio_set_config(struct gpio_chip *chip, + unsigned int offset, + unsigned long config) +{ + struct ad74115_state *st = gpiochip_get_data(chip); + u32 param = pinconf_to_config_param(config); + u32 arg = pinconf_to_config_argument(config); + + switch (param) { + case PIN_CONFIG_INPUT_DEBOUNCE: + return ad74115_set_comp_debounce(st, arg); + default: + return -ENOTSUPP; + } +} + +static int ad74115_comp_gpio_get(struct gpio_chip *chip, unsigned int offset) +{ + struct ad74115_state *st = gpiochip_get_data(chip); + unsigned int val; + int ret; + + ret = regmap_read(st->regmap, AD74115_DIN_COMP_OUT_REG, &val); + if (ret) + return ret; + + return !!val; +} + +static irqreturn_t ad74115_trigger_handler(int irq, void *p) +{ + struct iio_poll_func *pf = p; + struct iio_dev *indio_dev = pf->indio_dev; + struct ad74115_state *st = iio_priv(indio_dev); + int ret; + + ret = spi_sync(st->spi, &st->adc_samples_msg); + if (ret) + goto out; + + iio_push_to_buffers(indio_dev, st->adc_samples_rx_buf); + +out: + iio_trigger_notify_done(indio_dev->trig); + + return IRQ_HANDLED; +} + +static irqreturn_t ad74115_adc_data_interrupt(int irq, void *data) +{ + struct iio_dev *indio_dev = data; + struct ad74115_state *st = iio_priv(indio_dev); + + if (iio_buffer_enabled(indio_dev)) + iio_trigger_poll(st->trig); + else + complete(&st->adc_data_completion); + + return IRQ_HANDLED; +} + +static int ad74115_set_adc_ch_en(struct ad74115_state *st, + enum ad74115_adc_ch channel, bool status) +{ + unsigned int mask = ad74115_adc_ch_en_bit_tbl[channel]; + + return regmap_update_bits(st->regmap, AD74115_ADC_CONV_CTRL_REG, mask, + status ? mask : 0); +} + +static int ad74115_set_adc_conv_seq(struct ad74115_state *st, + enum ad74115_adc_conv_seq conv_seq) +{ + return regmap_update_bits(st->regmap, AD74115_ADC_CONV_CTRL_REG, + AD74115_ADC_CONV_SEQ_MASK, + FIELD_PREP(AD74115_ADC_CONV_SEQ_MASK, conv_seq)); +} + +static int ad74115_update_scan_mode(struct iio_dev *indio_dev, + const unsigned long *active_scan_mask) +{ + struct ad74115_state *st = iio_priv(indio_dev); + struct spi_transfer *xfer = st->adc_samples_xfer; + u8 *rx_buf = st->adc_samples_rx_buf; + u8 *tx_buf = st->adc_samples_tx_buf; + unsigned int i; + int ret = 0; + + mutex_lock(&st->lock); + + spi_message_init(&st->adc_samples_msg); + + for_each_clear_bit(i, active_scan_mask, AD74115_ADC_CH_NUM) { + ret = ad74115_set_adc_ch_en(st, i, false); + if (ret) + goto out; + } + + /* + * The read select register is used to select which register's value + * will be sent by the slave on the next SPI frame. + * + * Create an SPI message that, on each step, writes to the read select + * register to select the ADC result of the next enabled channel, and + * reads the ADC result of the previous enabled channel. + * + * Example: + * W: [WCH1] [WCH2] [WCH2] [WCH3] [ ] + * R: [ ] [RCH1] [RCH2] [RCH3] [RCH4] + */ + for_each_set_bit(i, active_scan_mask, AD74115_ADC_CH_NUM) { + ret = ad74115_set_adc_ch_en(st, i, true); + if (ret) + goto out; + + if (xfer == st->adc_samples_xfer) + xfer->rx_buf = NULL; + else + xfer->rx_buf = rx_buf; + + xfer->tx_buf = tx_buf; + xfer->len = AD74115_FRAME_SIZE; + xfer->cs_change = 1; + + ad74115_format_reg_write(AD74115_READ_SELECT_REG, + ad74115_adc_ch_data_regs_tbl[i], tx_buf); + + spi_message_add_tail(xfer, &st->adc_samples_msg); + + tx_buf += AD74115_FRAME_SIZE; + if (xfer != st->adc_samples_xfer) + rx_buf += AD74115_FRAME_SIZE; + xfer++; + } + + xfer->rx_buf = rx_buf; + xfer->tx_buf = NULL; + xfer->len = AD74115_FRAME_SIZE; + xfer->cs_change = 0; + + spi_message_add_tail(xfer, &st->adc_samples_msg); + +out: + mutex_unlock(&st->lock); + + return ret; +} + +static int ad74115_buffer_postenable(struct iio_dev *indio_dev) +{ + struct ad74115_state *st = iio_priv(indio_dev); + + return ad74115_set_adc_conv_seq(st, AD74115_ADC_CONV_SEQ_CONTINUOUS); +} + +static int ad74115_buffer_predisable(struct iio_dev *indio_dev) +{ + struct ad74115_state *st = iio_priv(indio_dev); + unsigned int i; + int ret; + + mutex_lock(&st->lock); + + ret = ad74115_set_adc_conv_seq(st, AD74115_ADC_CONV_SEQ_STANDBY); + if (ret) + goto out; + + /* + * update_scan_mode() is not called in the disable path, disable all + * channels here. + */ + for (i = 0; i < AD74115_ADC_CH_NUM; i++) { + ret = ad74115_set_adc_ch_en(st, i, false); + if (ret) + goto out; + } + +out: + mutex_unlock(&st->lock); + + return ret; +} + +static const struct iio_buffer_setup_ops ad74115_buffer_ops = { + .postenable = &ad74115_buffer_postenable, + .predisable = &ad74115_buffer_predisable, +}; + +static const struct iio_trigger_ops ad74115_trigger_ops = { + .validate_device = iio_trigger_validate_own_device, +}; + +static int ad74115_get_adc_rate(struct ad74115_state *st, + enum ad74115_adc_ch channel, int *val) +{ + unsigned int i; + int ret; + + ret = regmap_read(st->regmap, AD74115_ADC_CONFIG_REG, &i); + if (ret) + return ret; + + if (channel == AD74115_ADC_CH_CONV1) + i = FIELD_GET(AD74115_ADC_CONFIG_CONV1_RATE_MASK, i); + else + i = FIELD_GET(AD74115_ADC_CONFIG_CONV2_RATE_MASK, i); + + *val = ad74115_adc_conv_rate_tbl[i]; + + return IIO_VAL_INT; +} + +static int _ad74115_get_adc_code(struct ad74115_state *st, + enum ad74115_adc_ch channel, int *val) +{ + unsigned int uval; + int ret; + + reinit_completion(&st->adc_data_completion); + + ret = ad74115_set_adc_ch_en(st, channel, true); + if (ret) + return ret; + + ret = ad74115_set_adc_conv_seq(st, AD74115_ADC_CONV_SEQ_SINGLE); + if (ret) + return ret; + + if (st->irq) { + ret = wait_for_completion_timeout(&st->adc_data_completion, + msecs_to_jiffies(1000)); + if (!ret) + return -ETIMEDOUT; + } else { + unsigned int regval, wait_time; + int rate; + + ret = ad74115_get_adc_rate(st, channel, &rate); + if (ret < 0) + return ret; + + wait_time = DIV_ROUND_CLOSEST(AD74115_CONV_TIME_US, rate); + + ret = regmap_read_poll_timeout(st->regmap, AD74115_LIVE_STATUS_REG, + regval, regval & AD74115_ADC_DATA_RDY_MASK, + wait_time, 5 * wait_time); + if (ret) + return ret; + + /* + * The ADC_DATA_RDY bit is W1C. + * See datasheet page 98, Table 62. Bit Descriptions for + * LIVE_STATUS. + * Although the datasheet mentions that the bit will auto-clear + * when writing to the ADC_CONV_CTRL register, this does not + * seem to happen. + */ + ret = regmap_write_bits(st->regmap, AD74115_LIVE_STATUS_REG, + AD74115_ADC_DATA_RDY_MASK, + FIELD_PREP(AD74115_ADC_DATA_RDY_MASK, 1)); + if (ret) + return ret; + } + + ret = regmap_read(st->regmap, ad74115_adc_ch_data_regs_tbl[channel], &uval); + if (ret) + return ret; + + ret = ad74115_set_adc_conv_seq(st, AD74115_ADC_CONV_SEQ_STANDBY); + if (ret) + return ret; + + ret = ad74115_set_adc_ch_en(st, channel, false); + if (ret) + return ret; + + *val = uval; + + return IIO_VAL_INT; +} + +static int ad74115_get_adc_code(struct iio_dev *indio_dev, + enum ad74115_adc_ch channel, int *val) +{ + struct ad74115_state *st = iio_priv(indio_dev); + int ret; + + ret = iio_device_claim_direct_mode(indio_dev); + if (ret) + return ret; + + mutex_lock(&st->lock); + ret = _ad74115_get_adc_code(st, channel, val); + mutex_unlock(&st->lock); + + iio_device_release_direct_mode(indio_dev); + + return ret; +} + +static int ad74115_adc_code_to_resistance(int code, int *val, int *val2) +{ + if (code == AD74115_ADC_CODE_MAX) + code--; + + *val = code * AD74115_REF_RESISTOR_OHMS; + *val2 = AD74115_ADC_CODE_MAX - code; + + return IIO_VAL_FRACTIONAL; +} + +static int ad74115_set_dac_code(struct ad74115_state *st, + enum ad74115_dac_ch channel, int val) +{ + if (val < 0) + return -EINVAL; + + if (channel == AD74115_DAC_CH_COMPARATOR) { + if (val > AD74115_COMP_THRESH_MAX) + return -EINVAL; + + return regmap_update_bits(st->regmap, AD74115_DIN_CONFIG2_REG, + AD74115_COMP_THRESH_MASK, + FIELD_PREP(AD74115_COMP_THRESH_MASK, val)); + } + + if (val > AD74115_DAC_CODE_MAX) + return -EINVAL; + + return regmap_write(st->regmap, AD74115_DAC_CODE_REG, val); +} + +static int ad74115_get_dac_code(struct ad74115_state *st, + enum ad74115_dac_ch channel, int *val) +{ + unsigned int uval; + int ret; + + if (channel == AD74115_DAC_CH_COMPARATOR) + return -EINVAL; + + ret = regmap_read(st->regmap, AD74115_DAC_ACTIVE_REG, &uval); + if (ret) + return ret; + + *val = uval; + + return IIO_VAL_INT; +} + +static int ad74115_set_adc_rate(struct ad74115_state *st, + enum ad74115_adc_ch channel, int val) +{ + unsigned int i; + int ret; + + ret = ad74115_find_tbl_index(ad74115_adc_conv_rate_tbl, val, &i); + if (ret) + return ret; + + if (channel == AD74115_ADC_CH_CONV1) + return regmap_update_bits(st->regmap, AD74115_ADC_CONFIG_REG, + AD74115_ADC_CONFIG_CONV1_RATE_MASK, + FIELD_PREP(AD74115_ADC_CONFIG_CONV1_RATE_MASK, i)); + + return regmap_update_bits(st->regmap, AD74115_ADC_CONFIG_REG, + AD74115_ADC_CONFIG_CONV2_RATE_MASK, + FIELD_PREP(AD74115_ADC_CONFIG_CONV2_RATE_MASK, i)); +} + +static int ad74115_get_dac_rate(struct ad74115_state *st, int *val) +{ + unsigned int i, en_val, step_val, rate_val, tmp; + int ret; + + ret = regmap_read(st->regmap, AD74115_OUTPUT_CONFIG_REG, &tmp); + if (ret) + return ret; + + en_val = FIELD_GET(AD74115_OUTPUT_SLEW_EN_MASK, tmp); + step_val = FIELD_GET(AD74115_OUTPUT_SLEW_LIN_STEP_MASK, tmp); + rate_val = FIELD_GET(AD74115_OUTPUT_SLEW_LIN_RATE_MASK, tmp); + + for (i = 0; i < ARRAY_SIZE(ad74115_dac_rate_step_tbl); i++) + if (en_val == ad74115_dac_rate_step_tbl[i][0] && + step_val == ad74115_dac_rate_step_tbl[i][1] && + rate_val == ad74115_dac_rate_step_tbl[i][2]) + break; + + if (i == ARRAY_SIZE(ad74115_dac_rate_step_tbl)) + return -EINVAL; + + *val = ad74115_dac_rate_tbl[i]; + + return IIO_VAL_INT; +} + +static int ad74115_set_dac_rate(struct ad74115_state *st, int val) +{ + unsigned int i, en_val, step_val, rate_val, mask, tmp; + int ret; + + ret = ad74115_find_tbl_index(ad74115_dac_rate_tbl, val, &i); + if (ret) + return ret; + + en_val = ad74115_dac_rate_step_tbl[i][0]; + step_val = ad74115_dac_rate_step_tbl[i][1]; + rate_val = ad74115_dac_rate_step_tbl[i][2]; + + mask = AD74115_OUTPUT_SLEW_EN_MASK; + mask |= AD74115_OUTPUT_SLEW_LIN_STEP_MASK; + mask |= AD74115_OUTPUT_SLEW_LIN_RATE_MASK; + + tmp = FIELD_PREP(AD74115_OUTPUT_SLEW_EN_MASK, en_val); + tmp |= FIELD_PREP(AD74115_OUTPUT_SLEW_LIN_STEP_MASK, step_val); + tmp |= FIELD_PREP(AD74115_OUTPUT_SLEW_LIN_RATE_MASK, rate_val); + + return regmap_update_bits(st->regmap, AD74115_OUTPUT_CONFIG_REG, mask, tmp); +} + +static int ad74115_get_dac_scale(struct ad74115_state *st, + struct iio_chan_spec const *chan, + int *val, int *val2) +{ + if (chan->channel == AD74115_DAC_CH_MAIN) { + if (chan->type == IIO_VOLTAGE) { + *val = AD74115_DAC_VOLTAGE_MAX; + + if (st->dac_bipolar) + *val *= 2; + + } else { + *val = AD74115_DAC_CURRENT_MAX; + } + + *val2 = AD74115_DAC_CODE_MAX; + } else { + if (st->din_threshold_mode == AD74115_DIN_THRESHOLD_MODE_AVDD) { + *val = 196 * st->avdd_mv; + *val2 = 10 * AD74115_COMP_THRESH_MAX; + } else { + *val = 49000; + *val2 = AD74115_COMP_THRESH_MAX; + } + } + + return IIO_VAL_FRACTIONAL; +} + +static int ad74115_get_dac_offset(struct ad74115_state *st, + struct iio_chan_spec const *chan, int *val) +{ + if (chan->channel == AD74115_DAC_CH_MAIN) { + if (chan->type == IIO_VOLTAGE && st->dac_bipolar) + *val = -AD74115_DAC_CODE_HALF; + else + *val = 0; + } else { + if (st->din_threshold_mode == AD74115_DIN_THRESHOLD_MODE_AVDD) + *val = -48; + else + *val = -38; + } + + return IIO_VAL_INT; +} + +static int ad74115_get_adc_range(struct ad74115_state *st, + enum ad74115_adc_ch channel, unsigned int *val) +{ + int ret; + + ret = regmap_read(st->regmap, AD74115_ADC_CONFIG_REG, val); + if (ret) + return ret; + + if (channel == AD74115_ADC_CH_CONV1) + *val = FIELD_GET(AD74115_ADC_CONFIG_CONV1_RANGE_MASK, *val); + else + *val = FIELD_GET(AD74115_ADC_CONFIG_CONV2_RANGE_MASK, *val); + + return 0; +} + +static int ad74115_get_adc_resistance_scale(struct ad74115_state *st, + unsigned int range, + int *val, int *val2) +{ + *val = ad74115_adc_gain_tbl[range][1] * AD74115_REF_RESISTOR_OHMS; + *val2 = ad74115_adc_gain_tbl[range][0]; + + if (ad74115_adc_bipolar_tbl[range]) + *val2 *= AD74115_ADC_CODE_HALF; + else + *val2 *= AD74115_ADC_CODE_MAX; + + return IIO_VAL_FRACTIONAL; +} + +static int ad74115_get_adc_scale(struct ad74115_state *st, + struct iio_chan_spec const *chan, + int *val, int *val2) +{ + unsigned int range; + int ret; + + ret = ad74115_get_adc_range(st, chan->channel, &range); + if (ret) + return ret; + + if (chan->type == IIO_RESISTANCE) + return ad74115_get_adc_resistance_scale(st, range, val, val2); + + *val = ad74115_adc_conv_mul_tbl[range]; + *val2 = AD74115_ADC_CODE_MAX; + + if (chan->type == IIO_CURRENT) + *val2 *= AD74115_SENSE_RESISTOR_OHMS; + + return IIO_VAL_FRACTIONAL; +} + +static int ad74115_get_adc_resistance_offset(struct ad74115_state *st, + unsigned int range, + int *val, int *val2) +{ + unsigned int d = 10 * AD74115_REF_RESISTOR_OHMS + * ad74115_adc_gain_tbl[range][1]; + + *val = 5; + + if (ad74115_adc_bipolar_tbl[range]) + *val -= AD74115_ADC_CODE_HALF; + + *val *= d; + + if (!st->rtd_mode_4_wire) { + /* Add 0.2 Ohm to the final result for 3-wire RTD. */ + unsigned int v = 2 * ad74115_adc_gain_tbl[range][0]; + + if (ad74115_adc_bipolar_tbl[range]) + v *= AD74115_ADC_CODE_HALF; + else + v *= AD74115_ADC_CODE_MAX; + + *val += v; + } + + *val2 = d; + + return IIO_VAL_FRACTIONAL; +} + +static int ad74115_get_adc_offset(struct ad74115_state *st, + struct iio_chan_spec const *chan, + int *val, int *val2) +{ + unsigned int range; + int ret; + + ret = ad74115_get_adc_range(st, chan->channel, &range); + if (ret) + return ret; + + if (chan->type == IIO_RESISTANCE) + return ad74115_get_adc_resistance_offset(st, range, val, val2); + + if (ad74115_adc_bipolar_tbl[range]) + *val = -AD74115_ADC_CODE_HALF; + else if (range == AD74115_ADC_RANGE_2_5V_NEG) + *val = -AD74115_ADC_CODE_MAX; + else + *val = 0; + + return IIO_VAL_INT; +} + +static int ad74115_read_raw(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, + int *val, int *val2, long info) +{ + struct ad74115_state *st = iio_priv(indio_dev); + int ret; + + switch (info) { + case IIO_CHAN_INFO_RAW: + if (chan->output) + return ad74115_get_dac_code(st, chan->channel, val); + + return ad74115_get_adc_code(indio_dev, chan->channel, val); + case IIO_CHAN_INFO_PROCESSED: + ret = ad74115_get_adc_code(indio_dev, chan->channel, val); + if (ret) + return ret; + + return ad74115_adc_code_to_resistance(*val, val, val2); + case IIO_CHAN_INFO_SCALE: + if (chan->output) + return ad74115_get_dac_scale(st, chan, val, val2); + + return ad74115_get_adc_scale(st, chan, val, val2); + case IIO_CHAN_INFO_OFFSET: + if (chan->output) + return ad74115_get_dac_offset(st, chan, val); + + return ad74115_get_adc_offset(st, chan, val, val2); + case IIO_CHAN_INFO_SAMP_FREQ: + if (chan->output) + return ad74115_get_dac_rate(st, val); + + return ad74115_get_adc_rate(st, chan->channel, val); + default: + return -EINVAL; + } +} + +static int ad74115_write_raw(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, int val, int val2, + long info) +{ + struct ad74115_state *st = iio_priv(indio_dev); + + switch (info) { + case IIO_CHAN_INFO_RAW: + if (!chan->output) + return -EINVAL; + + return ad74115_set_dac_code(st, chan->channel, val); + case IIO_CHAN_INFO_SAMP_FREQ: + if (chan->output) + return ad74115_set_dac_rate(st, val); + + return ad74115_set_adc_rate(st, chan->channel, val); + default: + return -EINVAL; + } +} + +static int ad74115_read_avail(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, + const int **vals, int *type, int *length, long info) +{ + switch (info) { + case IIO_CHAN_INFO_SAMP_FREQ: + if (chan->output) { + *vals = ad74115_dac_rate_tbl; + *length = ARRAY_SIZE(ad74115_dac_rate_tbl); + } else { + *vals = ad74115_adc_conv_rate_tbl; + *length = ARRAY_SIZE(ad74115_adc_conv_rate_tbl); + } + + *type = IIO_VAL_INT; + + return IIO_AVAIL_LIST; + default: + return -EINVAL; + } +} + +static int ad74115_reg_access(struct iio_dev *indio_dev, unsigned int reg, + unsigned int writeval, unsigned int *readval) +{ + struct ad74115_state *st = iio_priv(indio_dev); + + if (readval) + return regmap_read(st->regmap, reg, readval); + + return regmap_write(st->regmap, reg, writeval); +} + +static const struct iio_info ad74115_info = { + .read_raw = ad74115_read_raw, + .write_raw = ad74115_write_raw, + .read_avail = ad74115_read_avail, + .update_scan_mode = ad74115_update_scan_mode, + .debugfs_reg_access = ad74115_reg_access, +}; + +#define AD74115_DAC_CHANNEL(_type, index) \ + { \ + .type = (_type), \ + .channel = (index), \ + .indexed = 1, \ + .output = 1, \ + .scan_index = -1, \ + .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) \ + | BIT(IIO_CHAN_INFO_SCALE) \ + | BIT(IIO_CHAN_INFO_OFFSET), \ + } + +#define _AD74115_ADC_CHANNEL(_type, index, extra_mask_separate) \ + { \ + .type = (_type), \ + .channel = (index), \ + .indexed = 1, \ + .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) \ + | BIT(IIO_CHAN_INFO_SAMP_FREQ) \ + | (extra_mask_separate), \ + .info_mask_separate_available = \ + BIT(IIO_CHAN_INFO_SAMP_FREQ), \ + .scan_index = index, \ + .scan_type = { \ + .sign = 'u', \ + .realbits = 16, \ + .storagebits = 32, \ + .shift = 8, \ + .endianness = IIO_BE, \ + }, \ + } + +#define AD74115_ADC_CHANNEL(_type, index) \ + _AD74115_ADC_CHANNEL(_type, index, BIT(IIO_CHAN_INFO_SCALE) \ + | BIT(IIO_CHAN_INFO_OFFSET)) + +static struct iio_chan_spec ad74115_voltage_input_channels[] = { + AD74115_ADC_CHANNEL(IIO_VOLTAGE, AD74115_ADC_CH_CONV1), + AD74115_ADC_CHANNEL(IIO_VOLTAGE, AD74115_ADC_CH_CONV2), +}; + +static struct iio_chan_spec ad74115_voltage_output_channels[] = { + AD74115_DAC_CHANNEL(IIO_VOLTAGE, AD74115_DAC_CH_MAIN), + AD74115_ADC_CHANNEL(IIO_CURRENT, AD74115_ADC_CH_CONV1), + AD74115_ADC_CHANNEL(IIO_VOLTAGE, AD74115_ADC_CH_CONV2), +}; + +static struct iio_chan_spec ad74115_current_input_channels[] = { + AD74115_ADC_CHANNEL(IIO_CURRENT, AD74115_ADC_CH_CONV1), + AD74115_ADC_CHANNEL(IIO_VOLTAGE, AD74115_ADC_CH_CONV2), +}; + +static struct iio_chan_spec ad74115_current_output_channels[] = { + AD74115_DAC_CHANNEL(IIO_CURRENT, AD74115_DAC_CH_MAIN), + AD74115_ADC_CHANNEL(IIO_VOLTAGE, AD74115_ADC_CH_CONV1), + AD74115_ADC_CHANNEL(IIO_VOLTAGE, AD74115_ADC_CH_CONV2), +}; + +static struct iio_chan_spec ad74115_2_wire_resistance_input_channels[] = { + _AD74115_ADC_CHANNEL(IIO_RESISTANCE, AD74115_ADC_CH_CONV1, + BIT(IIO_CHAN_INFO_PROCESSED)), + AD74115_ADC_CHANNEL(IIO_VOLTAGE, AD74115_ADC_CH_CONV2), +}; + +static struct iio_chan_spec ad74115_3_4_wire_resistance_input_channels[] = { + AD74115_ADC_CHANNEL(IIO_RESISTANCE, AD74115_ADC_CH_CONV1), + AD74115_ADC_CHANNEL(IIO_VOLTAGE, AD74115_ADC_CH_CONV2), +}; + +static struct iio_chan_spec ad74115_digital_input_logic_channels[] = { + AD74115_DAC_CHANNEL(IIO_VOLTAGE, AD74115_DAC_CH_COMPARATOR), + AD74115_ADC_CHANNEL(IIO_VOLTAGE, AD74115_ADC_CH_CONV1), + AD74115_ADC_CHANNEL(IIO_VOLTAGE, AD74115_ADC_CH_CONV2), +}; + +static struct iio_chan_spec ad74115_digital_input_loop_channels[] = { + AD74115_DAC_CHANNEL(IIO_CURRENT, AD74115_DAC_CH_MAIN), + AD74115_DAC_CHANNEL(IIO_VOLTAGE, AD74115_DAC_CH_COMPARATOR), + AD74115_ADC_CHANNEL(IIO_VOLTAGE, AD74115_ADC_CH_CONV1), + AD74115_ADC_CHANNEL(IIO_VOLTAGE, AD74115_ADC_CH_CONV2), +}; + +#define _AD74115_CHANNELS(_channels) \ + { \ + .channels = _channels, \ + .num_channels = ARRAY_SIZE(_channels), \ + } + +#define AD74115_CHANNELS(name) \ + _AD74115_CHANNELS(ad74115_ ## name ## _channels) + +static const struct ad74115_channels ad74115_channels_map[AD74115_CH_FUNC_NUM] = { + [AD74115_CH_FUNC_HIGH_IMPEDANCE] = AD74115_CHANNELS(voltage_input), + [AD74115_CH_FUNC_VOLTAGE_INPUT] = AD74115_CHANNELS(voltage_input), + + [AD74115_CH_FUNC_VOLTAGE_OUTPUT] = AD74115_CHANNELS(voltage_output), + + [AD74115_CH_FUNC_CURRENT_INPUT_EXT_POWER] = AD74115_CHANNELS(current_input), + [AD74115_CH_FUNC_CURRENT_INPUT_LOOP_POWER] = AD74115_CHANNELS(current_input), + [AD74115_CH_FUNC_CURRENT_INPUT_EXT_POWER_HART] = AD74115_CHANNELS(current_input), + [AD74115_CH_FUNC_CURRENT_INPUT_LOOP_POWER_HART] = AD74115_CHANNELS(current_input), + + [AD74115_CH_FUNC_CURRENT_OUTPUT] = AD74115_CHANNELS(current_output), + [AD74115_CH_FUNC_CURRENT_OUTPUT_HART] = AD74115_CHANNELS(current_output), + + [AD74115_CH_FUNC_2_WIRE_RESISTANCE_INPUT] = AD74115_CHANNELS(2_wire_resistance_input), + [AD74115_CH_FUNC_3_4_WIRE_RESISTANCE_INPUT] = AD74115_CHANNELS(3_4_wire_resistance_input), + + [AD74115_CH_FUNC_DIGITAL_INPUT_LOGIC] = AD74115_CHANNELS(digital_input_logic), + + [AD74115_CH_FUNC_DIGITAL_INPUT_LOOP_POWER] = AD74115_CHANNELS(digital_input_loop), +}; + +#define AD74115_GPIO_MODE_FW_PROP(i) \ +{ \ + .name = "adi,gpio" __stringify(i) "-mode", \ + .reg = AD74115_GPIO_CONFIG_X_REG(i), \ + .mask = AD74115_GPIO_CONFIG_SELECT_MASK, \ + .lookup_tbl = ad74115_gpio_mode_tbl, \ + .lookup_tbl_len = ARRAY_SIZE(ad74115_gpio_mode_tbl), \ +} + +static const struct ad74115_fw_prop ad74115_gpio_mode_fw_props[] = { + AD74115_GPIO_MODE_FW_PROP(0), + AD74115_GPIO_MODE_FW_PROP(1), + AD74115_GPIO_MODE_FW_PROP(2), + AD74115_GPIO_MODE_FW_PROP(3), +}; + +static const struct ad74115_fw_prop ad74115_din_threshold_mode_fw_prop = + AD74115_FW_PROP_BOOL("adi,digital-input-threshold-mode-fixed", + AD74115_DIN_CONFIG2_REG, BIT(7)); + +static const struct ad74115_fw_prop ad74115_dac_bipolar_fw_prop = + AD74115_FW_PROP_BOOL("adi,dac-bipolar", AD74115_OUTPUT_CONFIG_REG, BIT(7)); + +static const struct ad74115_fw_prop ad74115_ch_func_fw_prop = + AD74115_FW_PROP("adi,ch-func", AD74115_CH_FUNC_MAX, + AD74115_CH_FUNC_SETUP_REG, GENMASK(3, 0)); + +static const struct ad74115_fw_prop ad74115_rtd_mode_fw_prop = + AD74115_FW_PROP_BOOL("adi,4-wire-rtd", AD74115_RTD3W4W_CONFIG_REG, BIT(3)); + +static const struct ad74115_fw_prop ad74115_din_range_fw_prop = + AD74115_FW_PROP_BOOL("adi,digital-input-sink-range-high", + AD74115_DIN_CONFIG1_REG, BIT(12)); + +static const struct ad74115_fw_prop ad74115_ext2_burnout_current_fw_prop = + AD74115_FW_PROP_TBL("adi,ext2-burnout-current-nanoamp", + ad74115_burnout_current_na_tbl, + AD74115_BURNOUT_CONFIG_REG, GENMASK(14, 12)); + +static const struct ad74115_fw_prop ad74115_ext1_burnout_current_fw_prop = + AD74115_FW_PROP_TBL("adi,ext1-burnout-current-nanoamp", + ad74115_burnout_current_na_tbl, + AD74115_BURNOUT_CONFIG_REG, GENMASK(9, 7)); + +static const struct ad74115_fw_prop ad74115_viout_burnout_current_fw_prop = + AD74115_FW_PROP_TBL("adi,viout-burnout-current-nanoamp", + ad74115_viout_burnout_current_na_tbl, + AD74115_BURNOUT_CONFIG_REG, GENMASK(4, 2)); + +static const struct ad74115_fw_prop ad74115_fw_props[] = { + AD74115_FW_PROP("adi,conv2-mux", 3, + AD74115_ADC_CONFIG_REG, GENMASK(3, 2)), + + AD74115_FW_PROP_BOOL_NEG("adi,sense-agnd-buffer-low-power", + AD74115_PWR_OPTIM_CONFIG_REG, BIT(4)), + AD74115_FW_PROP_BOOL_NEG("adi,lf-buffer-low-power", + AD74115_PWR_OPTIM_CONFIG_REG, BIT(3)), + AD74115_FW_PROP_BOOL_NEG("adi,hf-buffer-low-power", + AD74115_PWR_OPTIM_CONFIG_REG, BIT(2)), + AD74115_FW_PROP_BOOL_NEG("adi,ext2-buffer-low-power", + AD74115_PWR_OPTIM_CONFIG_REG, BIT(1)), + AD74115_FW_PROP_BOOL_NEG("adi,ext1-buffer-low-power", + AD74115_PWR_OPTIM_CONFIG_REG, BIT(0)), + + AD74115_FW_PROP_BOOL("adi,comparator-invert", + AD74115_DIN_CONFIG1_REG, BIT(14)), + AD74115_FW_PROP_BOOL("adi,digital-input-debounce-mode-counter-reset", + AD74115_DIN_CONFIG1_REG, BIT(6)), + + AD74115_FW_PROP_BOOL("adi,digital-input-unbuffered", + AD74115_DIN_CONFIG2_REG, BIT(10)), + AD74115_FW_PROP_BOOL("adi,digital-input-short-circuit-detection", + AD74115_DIN_CONFIG2_REG, BIT(9)), + AD74115_FW_PROP_BOOL("adi,digital-input-open-circuit-detection", + AD74115_DIN_CONFIG2_REG, BIT(8)), + + AD74115_FW_PROP_BOOL("adi,dac-current-limit-low", + AD74115_OUTPUT_CONFIG_REG, BIT(0)), + + AD74115_FW_PROP_BOOL("adi,3-wire-rtd-excitation-swap", + AD74115_RTD3W4W_CONFIG_REG, BIT(2)), + AD74115_FW_PROP_TBL("adi,rtd-excitation-current-microamp", + ad74115_rtd_excitation_current_ua_tbl, + AD74115_RTD3W4W_CONFIG_REG, GENMASK(1, 0)), + + AD74115_FW_PROP_BOOL("adi,ext2-burnout-current-polarity-sourcing", + AD74115_BURNOUT_CONFIG_REG, BIT(11)), + AD74115_FW_PROP_BOOL("adi,ext1-burnout-current-polarity-sourcing", + AD74115_BURNOUT_CONFIG_REG, BIT(6)), + AD74115_FW_PROP_BOOL("adi,viout-burnout-current-polarity-sourcing", + AD74115_BURNOUT_CONFIG_REG, BIT(1)), + + AD74115_FW_PROP_BOOL("adi,charge-pump", + AD74115_CHARGE_PUMP_REG, BIT(0)), +}; + +static int ad74115_apply_fw_prop(struct ad74115_state *st, + const struct ad74115_fw_prop *prop, u32 *retval) +{ + struct device *dev = &st->spi->dev; + u32 val = 0; + int ret; + + if (prop->is_boolean) { + val = device_property_read_bool(dev, prop->name); + } else { + ret = device_property_read_u32(dev, prop->name, &val); + if (ret && prop->lookup_tbl) + val = prop->lookup_tbl[0]; + } + + *retval = val; + + if (prop->negate) + val = !val; + + if (prop->lookup_tbl) + ret = _ad74115_find_tbl_index(prop->lookup_tbl, + prop->lookup_tbl_len, val, &val); + else if (prop->max && val > prop->max) + ret = -EINVAL; + else + ret = 0; + + if (ret) + return dev_err_probe(dev, -EINVAL, + "Invalid value %u for prop %s\n", + val, prop->name); + + WARN(!prop->mask, "Prop %s mask is empty\n", prop->name); + + val = (val << __ffs(prop->mask)) & prop->mask; + + return regmap_update_bits(st->regmap, prop->reg, prop->mask, val); +} + +static int ad74115_setup_adc_conv2_range(struct ad74115_state *st) +{ + unsigned int tbl_len = ARRAY_SIZE(ad74115_adc_range_tbl); + const char *prop_name = "adi,conv2-range-microvolt"; + s32 vals[2] = { + ad74115_adc_range_tbl[0][0], + ad74115_adc_range_tbl[0][1], + }; + struct device *dev = &st->spi->dev; + unsigned int i; + + device_property_read_u32_array(dev, prop_name, vals, 2); + + for (i = 0; i < tbl_len; i++) + if (vals[0] == ad74115_adc_range_tbl[i][0] && + vals[1] == ad74115_adc_range_tbl[i][1]) + break; + + if (i == tbl_len) + return dev_err_probe(dev, -EINVAL, + "Invalid value %d, %d for prop %s\n", + vals[0], vals[1], prop_name); + + return regmap_update_bits(st->regmap, AD74115_ADC_CONFIG_REG, + AD74115_ADC_CONFIG_CONV2_RANGE_MASK, + FIELD_PREP(AD74115_ADC_CONFIG_CONV2_RANGE_MASK, i)); +} + +static int ad74115_setup_iio_channels(struct iio_dev *indio_dev) +{ + struct ad74115_state *st = iio_priv(indio_dev); + struct device *dev = &st->spi->dev; + struct iio_chan_spec *channels; + + channels = devm_kcalloc(dev, sizeof(*channels), + indio_dev->num_channels, GFP_KERNEL); + if (!channels) + return -ENOMEM; + + indio_dev->channels = channels; + + memcpy(channels, ad74115_channels_map[st->ch_func].channels, + sizeof(*channels) * ad74115_channels_map[st->ch_func].num_channels); + + if (channels[0].output && channels[0].channel == AD74115_DAC_CH_MAIN && + channels[0].type == IIO_VOLTAGE && !st->dac_hart_slew) { + channels[0].info_mask_separate |= BIT(IIO_CHAN_INFO_SAMP_FREQ); + channels[0].info_mask_separate_available |= BIT(IIO_CHAN_INFO_SAMP_FREQ); + } + + return 0; +} + +static int ad74115_setup_gpio_chip(struct ad74115_state *st) +{ + struct device *dev = &st->spi->dev; + + if (!st->gpio_valid_mask) + return 0; + + st->gc = (struct gpio_chip) { + .owner = THIS_MODULE, + .label = AD74115_NAME, + .base = -1, + .ngpio = AD74115_GPIO_NUM, + .parent = dev, + .can_sleep = true, + .init_valid_mask = ad74115_gpio_init_valid_mask, + .get_direction = ad74115_gpio_get_direction, + .direction_input = ad74115_gpio_direction_input, + .direction_output = ad74115_gpio_direction_output, + .get = ad74115_gpio_get, + .set = ad74115_gpio_set, + }; + + return devm_gpiochip_add_data(dev, &st->gc, st); +} + +static int ad74115_setup_comp_gpio_chip(struct ad74115_state *st) +{ + struct device *dev = &st->spi->dev; + u32 val; + int ret; + + ret = regmap_read(st->regmap, AD74115_DIN_CONFIG1_REG, &val); + if (ret) + return ret; + + if (!(val & AD74115_DIN_COMPARATOR_EN_MASK)) + return 0; + + st->comp_gc = (struct gpio_chip) { + .owner = THIS_MODULE, + .label = AD74115_NAME, + .base = -1, + .ngpio = 1, + .parent = dev, + .can_sleep = true, + .get_direction = ad74115_comp_gpio_get_direction, + .get = ad74115_comp_gpio_get, + .set_config = ad74115_comp_gpio_set_config, + }; + + return devm_gpiochip_add_data(dev, &st->comp_gc, st); +} + +static int ad74115_setup(struct iio_dev *indio_dev) +{ + struct ad74115_state *st = iio_priv(indio_dev); + struct device *dev = &st->spi->dev; + u32 val, din_range_high; + unsigned int i; + int ret; + + ret = ad74115_apply_fw_prop(st, &ad74115_ch_func_fw_prop, &val); + if (ret) + return ret; + + indio_dev->num_channels += ad74115_channels_map[val].num_channels; + st->ch_func = val; + + ret = ad74115_setup_adc_conv2_range(st); + if (ret) + return ret; + + val = device_property_read_bool(dev, "adi,dac-hart-slew"); + if (val) { + st->dac_hart_slew = val; + + ret = regmap_update_bits(st->regmap, AD74115_OUTPUT_CONFIG_REG, + AD74115_OUTPUT_SLEW_EN_MASK, + FIELD_PREP(AD74115_OUTPUT_SLEW_EN_MASK, + AD74115_SLEW_MODE_HART)); + if (ret) + return ret; + } + + ret = ad74115_apply_fw_prop(st, &ad74115_din_range_fw_prop, + &din_range_high); + if (ret) + return ret; + + ret = device_property_read_u32(dev, "adi,digital-input-sink-microamp", &val); + if (!ret) { + if (din_range_high) + val = DIV_ROUND_CLOSEST(val, AD74115_DIN_SINK_LOW_STEP); + else + val = DIV_ROUND_CLOSEST(val, AD74115_DIN_SINK_HIGH_STEP); + + if (val > AD74115_DIN_SINK_MAX) + val = AD74115_DIN_SINK_MAX; + + ret = regmap_update_bits(st->regmap, AD74115_DIN_CONFIG1_REG, + AD74115_DIN_SINK_MASK, + FIELD_PREP(AD74115_DIN_SINK_MASK, val)); + if (ret) + return ret; + } + + ret = ad74115_apply_fw_prop(st, &ad74115_din_threshold_mode_fw_prop, &val); + if (ret) + return ret; + + if (val == AD74115_DIN_THRESHOLD_MODE_AVDD) { + ret = regulator_get_voltage(st->avdd); + if (ret < 0) + return ret; + + st->avdd_mv = ret / 1000; + } + + st->din_threshold_mode = val; + + ret = ad74115_apply_fw_prop(st, &ad74115_dac_bipolar_fw_prop, &val); + if (ret) + return ret; + + st->dac_bipolar = val; + + ret = ad74115_apply_fw_prop(st, &ad74115_rtd_mode_fw_prop, &val); + if (ret) + return ret; + + st->rtd_mode_4_wire = val; + + ret = ad74115_apply_fw_prop(st, &ad74115_ext2_burnout_current_fw_prop, &val); + if (ret) + return ret; + + if (val) { + ret = regmap_update_bits(st->regmap, AD74115_BURNOUT_CONFIG_REG, + AD74115_BURNOUT_EXT2_EN_MASK, + FIELD_PREP(AD74115_BURNOUT_EXT2_EN_MASK, 1)); + if (ret) + return ret; + } + + ret = ad74115_apply_fw_prop(st, &ad74115_ext1_burnout_current_fw_prop, &val); + if (ret) + return ret; + + if (val) { + ret = regmap_update_bits(st->regmap, AD74115_BURNOUT_CONFIG_REG, + AD74115_BURNOUT_EXT1_EN_MASK, + FIELD_PREP(AD74115_BURNOUT_EXT1_EN_MASK, 1)); + if (ret) + return ret; + } + + ret = ad74115_apply_fw_prop(st, &ad74115_viout_burnout_current_fw_prop, &val); + if (ret) + return ret; + + if (val) { + ret = regmap_update_bits(st->regmap, AD74115_BURNOUT_CONFIG_REG, + AD74115_BURNOUT_VIOUT_EN_MASK, + FIELD_PREP(AD74115_BURNOUT_VIOUT_EN_MASK, 1)); + if (ret) + return ret; + } + + for (i = 0; i < AD74115_GPIO_NUM; i++) { + ret = ad74115_apply_fw_prop(st, &ad74115_gpio_mode_fw_props[i], &val); + if (ret) + return ret; + + if (val == AD74115_GPIO_MODE_LOGIC) + st->gpio_valid_mask |= BIT(i); + } + + for (i = 0; i < ARRAY_SIZE(ad74115_fw_props); i++) { + ret = ad74115_apply_fw_prop(st, &ad74115_fw_props[i], &val); + if (ret) + return ret; + } + + ret = ad74115_setup_gpio_chip(st); + if (ret) + return ret; + + ret = ad74115_setup_comp_gpio_chip(st); + if (ret) + return ret; + + return ad74115_setup_iio_channels(indio_dev); +} + +static int ad74115_reset(struct ad74115_state *st) +{ + struct device *dev = &st->spi->dev; + struct gpio_desc *reset_gpio; + int ret; + + reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH); + if (IS_ERR(reset_gpio)) + return dev_err_probe(dev, PTR_ERR(reset_gpio), + "Failed to find reset GPIO\n"); + + if (reset_gpio) { + fsleep(100); + + gpiod_set_value_cansleep(reset_gpio, 0); + } else { + ret = regmap_write(st->regmap, AD74115_CMD_KEY_REG, + AD74115_CMD_KEY_RESET1); + if (ret) + return ret; + + ret = regmap_write(st->regmap, AD74115_CMD_KEY_REG, + AD74115_CMD_KEY_RESET2); + if (ret) + return ret; + } + + fsleep(1000); + + return 0; +} + +static void ad74115_regulator_disable(void *data) +{ + regulator_disable(data); +} + +static int ad74115_setup_trigger(struct iio_dev *indio_dev) +{ + struct ad74115_state *st = iio_priv(indio_dev); + struct device *dev = &st->spi->dev; + int ret; + + st->irq = fwnode_irq_get_byname(dev_fwnode(dev), "adc_rdy"); + + if (st->irq == -EPROBE_DEFER) + return -EPROBE_DEFER; + + if (st->irq < 0) { + st->irq = 0; + return 0; + } + + ret = devm_request_irq(dev, st->irq, ad74115_adc_data_interrupt, + 0, AD74115_NAME, indio_dev); + if (ret) + return ret; + + st->trig = devm_iio_trigger_alloc(dev, "%s-dev%d", AD74115_NAME, + iio_device_id(indio_dev)); + if (!st->trig) + return -ENOMEM; + + st->trig->ops = &ad74115_trigger_ops; + iio_trigger_set_drvdata(st->trig, st); + + ret = devm_iio_trigger_register(dev, st->trig); + if (ret) + return ret; + + indio_dev->trig = iio_trigger_get(st->trig); + + return 0; +} + +static int ad74115_probe(struct spi_device *spi) +{ + static const char * const regulator_names[] = { + "avcc", "dvcc", "dovdd", "refin", + }; + struct device *dev = &spi->dev; + struct ad74115_state *st; + struct iio_dev *indio_dev; + int ret; + + indio_dev = devm_iio_device_alloc(dev, sizeof(*st)); + if (!indio_dev) + return -ENOMEM; + + st = iio_priv(indio_dev); + + st->spi = spi; + mutex_init(&st->lock); + init_completion(&st->adc_data_completion); + + indio_dev->name = AD74115_NAME; + indio_dev->modes = INDIO_DIRECT_MODE; + indio_dev->info = &ad74115_info; + + st->avdd = devm_regulator_get(dev, "avdd"); + if (IS_ERR(st->avdd)) + return PTR_ERR(st->avdd); + + ret = regulator_enable(st->avdd); + if (ret) { + dev_err(dev, "Failed to enable avdd regulator\n"); + return ret; + } + + ret = devm_add_action_or_reset(dev, ad74115_regulator_disable, st->avdd); + if (ret) + return ret; + + ret = devm_regulator_bulk_get_enable(dev, ARRAY_SIZE(regulator_names), + regulator_names); + if (ret) + return ret; + + st->regmap = devm_regmap_init(dev, NULL, st, &ad74115_regmap_config); + if (IS_ERR(st->regmap)) + return PTR_ERR(st->regmap); + + ret = ad74115_reset(st); + if (ret) + return ret; + + ret = ad74115_setup(indio_dev); + if (ret) + return ret; + + ret = ad74115_setup_trigger(indio_dev); + if (ret) + return ret; + + ret = devm_iio_triggered_buffer_setup(dev, indio_dev, NULL, + ad74115_trigger_handler, + &ad74115_buffer_ops); + if (ret) + return ret; + + return devm_iio_device_register(dev, indio_dev); +} + +static int ad74115_unregister_driver(struct spi_driver *spi) +{ + spi_unregister_driver(spi); + + return 0; +} + +static int __init ad74115_register_driver(struct spi_driver *spi) +{ + crc8_populate_msb(ad74115_crc8_table, AD74115_CRC_POLYNOMIAL); + + return spi_register_driver(spi); +} + +static const struct spi_device_id ad74115_spi_id[] = { + { "ad74115h" }, + { } +}; + +MODULE_DEVICE_TABLE(spi, ad74115_spi_id); + +static const struct of_device_id ad74115_dt_id[] = { + { .compatible = "adi,ad74115h" }, + { } +}; +MODULE_DEVICE_TABLE(of, ad74115_dt_id); + +static struct spi_driver ad74115_driver = { + .driver = { + .name = "ad74115", + .of_match_table = ad74115_dt_id, + }, + .probe = ad74115_probe, + .id_table = ad74115_spi_id, +}; + +module_driver(ad74115_driver, + ad74115_register_driver, ad74115_unregister_driver); + +MODULE_AUTHOR("Cosmin Tanislav "); +MODULE_DESCRIPTION("Analog Devices AD74115 ADDAC"); +MODULE_LICENSE("GPL"); diff --git a/drivers/iio/addac/ad74413r.c b/drivers/iio/addac/ad74413r.c index 899bcd83f40bc5551a1b4ac830ee91e3d1249a88..f32c8c2fb26d2167a4fa1d222acda9a45c8ce353 100644 --- a/drivers/iio/addac/ad74413r.c +++ b/drivers/iio/addac/ad74413r.c @@ -71,6 +71,7 @@ struct ad74413r_state { struct regmap *regmap; struct device *dev; struct iio_trigger *trig; + struct gpio_desc *reset_gpio; size_t adc_active_channels; struct spi_message adc_samples_msg; @@ -393,6 +394,13 @@ static int ad74413r_reset(struct ad74413r_state *st) { int ret; + if (st->reset_gpio) { + gpiod_set_value_cansleep(st->reset_gpio, 1); + fsleep(50); + gpiod_set_value_cansleep(st->reset_gpio, 0); + return 0; + } + ret = regmap_write(st->regmap, AD74413R_REG_CMD_KEY, AD74413R_CMD_KEY_RESET1); if (ret) @@ -691,7 +699,7 @@ static int ad74413_get_input_current_offset(struct ad74413r_state *st, if (ret) return ret; - *val = voltage_offset * AD74413R_ADC_RESULT_MAX / voltage_range; + *val = voltage_offset * (int)AD74413R_ADC_RESULT_MAX / voltage_range; return IIO_VAL_INT; } @@ -1305,6 +1313,16 @@ static int ad74413r_probe(struct spi_device *spi) st->spi = spi; st->dev = &spi->dev; st->chip_info = device_get_match_data(&spi->dev); + if (!st->chip_info) { + const struct spi_device_id *id = spi_get_device_id(spi); + + if (id) + st->chip_info = + (struct ad74413r_chip_info *)id->driver_data; + if (!st->chip_info) + return -EINVAL; + } + mutex_init(&st->lock); init_completion(&st->adc_data_completion); @@ -1313,6 +1331,10 @@ static int ad74413r_probe(struct spi_device *spi) if (IS_ERR(st->regmap)) return PTR_ERR(st->regmap); + st->reset_gpio = devm_gpiod_get_optional(st->dev, "reset", GPIOD_OUT_LOW); + if (IS_ERR(st->reset_gpio)) + return PTR_ERR(st->reset_gpio); + st->refin_reg = devm_regulator_get(st->dev, "refin"); if (IS_ERR(st->refin_reg)) return dev_err_probe(st->dev, PTR_ERR(st->refin_reg), @@ -1457,12 +1479,20 @@ static const struct of_device_id ad74413r_dt_id[] = { }; MODULE_DEVICE_TABLE(of, ad74413r_dt_id); +static const struct spi_device_id ad74413r_spi_id[] = { + { .name = "ad74412r", .driver_data = (kernel_ulong_t)&ad74412r_chip_info_data }, + { .name = "ad74413r", .driver_data = (kernel_ulong_t)&ad74413r_chip_info_data }, + {} +}; +MODULE_DEVICE_TABLE(spi, ad74413r_spi_id); + static struct spi_driver ad74413r_driver = { .driver = { .name = "ad74413r", .of_match_table = ad74413r_dt_id, }, .probe = ad74413r_probe, + .id_table = ad74413r_spi_id, }; module_driver(ad74413r_driver, diff --git a/drivers/iio/amplifiers/hmc425a.c b/drivers/iio/amplifiers/hmc425a.c index ce80e0c916f4a0db8c8704733bf58188c66857e7..108f0f1685ef1661f9ac6edbadb715d20f8bbc8e 100644 --- a/drivers/iio/amplifiers/hmc425a.c +++ b/drivers/iio/amplifiers/hmc425a.c @@ -34,7 +34,6 @@ struct hmc425a_chip_info { }; struct hmc425a_state { - struct regulator *reg; struct mutex lock; /* protect sensor state */ struct hmc425a_chip_info *chip_info; struct gpio_descs *gpios; @@ -162,13 +161,6 @@ static const struct of_device_id hmc425a_of_match[] = { }; MODULE_DEVICE_TABLE(of, hmc425a_of_match); -static void hmc425a_reg_disable(void *data) -{ - struct hmc425a_state *st = data; - - regulator_disable(st->reg); -} - static struct hmc425a_chip_info hmc425a_chip_info_tbl[] = { [ID_HMC425A] = { .name = "hmc425a", @@ -211,14 +203,7 @@ static int hmc425a_probe(struct platform_device *pdev) return -ENODEV; } - st->reg = devm_regulator_get(&pdev->dev, "vcc-supply"); - if (IS_ERR(st->reg)) - return PTR_ERR(st->reg); - - ret = regulator_enable(st->reg); - if (ret) - return ret; - ret = devm_add_action_or_reset(&pdev->dev, hmc425a_reg_disable, st); + ret = devm_regulator_get_enable(&pdev->dev, "vcc-supply"); if (ret) return ret; diff --git a/drivers/iio/buffer/industrialio-buffer-dmaengine.c b/drivers/iio/buffer/industrialio-buffer-dmaengine.c index f744b62a636ad5ef526cd9e6b981b2a729fbe288..5f85ba38e6f6e7a052ebd59b2aa5a0aed14d6bf4 100644 --- a/drivers/iio/buffer/industrialio-buffer-dmaengine.c +++ b/drivers/iio/buffer/industrialio-buffer-dmaengine.c @@ -142,8 +142,8 @@ static ssize_t iio_dmaengine_buffer_get_length_align(struct device *dev, static IIO_DEVICE_ATTR(length_align_bytes, 0444, iio_dmaengine_buffer_get_length_align, NULL, 0); -static const struct attribute *iio_dmaengine_buffer_attrs[] = { - &iio_dev_attr_length_align_bytes.dev_attr.attr, +static const struct iio_dev_attr *iio_dmaengine_buffer_attrs[] = { + &iio_dev_attr_length_align_bytes, NULL, }; diff --git a/drivers/iio/buffer/industrialio-triggered-buffer.c b/drivers/iio/buffer/industrialio-triggered-buffer.c index 8d4fc97d100598230b2ed1c02fa37c1ae11e45ca..c7671b1f5eadacf262b0ab62be176ef0e6751a70 100644 --- a/drivers/iio/buffer/industrialio-triggered-buffer.c +++ b/drivers/iio/buffer/industrialio-triggered-buffer.c @@ -41,7 +41,7 @@ int iio_triggered_buffer_setup_ext(struct iio_dev *indio_dev, irqreturn_t (*thread)(int irq, void *p), enum iio_buffer_direction direction, const struct iio_buffer_setup_ops *setup_ops, - const struct attribute **buffer_attrs) + const struct iio_dev_attr **buffer_attrs) { struct iio_buffer *buffer; int ret; @@ -110,7 +110,7 @@ int devm_iio_triggered_buffer_setup_ext(struct device *dev, irqreturn_t (*thread)(int irq, void *p), enum iio_buffer_direction direction, const struct iio_buffer_setup_ops *ops, - const struct attribute **buffer_attrs) + const struct iio_dev_attr **buffer_attrs) { int ret; diff --git a/drivers/iio/buffer/kfifo_buf.c b/drivers/iio/buffer/kfifo_buf.c index 35d8b4077376c040bb605df9ddd06e837f8dfe91..05b285f0eb2258b1ffd052ddadbce67e70df4651 100644 --- a/drivers/iio/buffer/kfifo_buf.c +++ b/drivers/iio/buffer/kfifo_buf.c @@ -270,7 +270,7 @@ static struct iio_buffer *devm_iio_kfifo_allocate(struct device *dev) int devm_iio_kfifo_buffer_setup_ext(struct device *dev, struct iio_dev *indio_dev, const struct iio_buffer_setup_ops *setup_ops, - const struct attribute **buffer_attrs) + const struct iio_dev_attr **buffer_attrs) { struct iio_buffer *buffer; diff --git a/drivers/iio/cdc/ad7150.c b/drivers/iio/cdc/ad7150.c index ebe112b4618b049f922d78941e84790c09aa576d..79aeb0aaea67d05e529755d2c29585d8ed8e392f 100644 --- a/drivers/iio/cdc/ad7150.c +++ b/drivers/iio/cdc/ad7150.c @@ -536,19 +536,11 @@ static const struct iio_info ad7150_info_no_irq = { .read_raw = &ad7150_read_raw, }; -static void ad7150_reg_disable(void *data) -{ - struct regulator *reg = data; - - regulator_disable(reg); -} - -static int ad7150_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int ad7150_probe(struct i2c_client *client) { + const struct i2c_device_id *id = i2c_client_get_device_id(client); struct ad7150_chip_info *chip; struct iio_dev *indio_dev; - struct regulator *reg; int ret; indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*chip)); @@ -563,15 +555,7 @@ static int ad7150_probe(struct i2c_client *client, indio_dev->modes = INDIO_DIRECT_MODE; - reg = devm_regulator_get(&client->dev, "vdd"); - if (IS_ERR(reg)) - return PTR_ERR(reg); - - ret = regulator_enable(reg); - if (ret) - return ret; - - ret = devm_add_action_or_reset(&client->dev, ad7150_reg_disable, reg); + ret = devm_regulator_get_enable(&client->dev, "vdd"); if (ret) return ret; @@ -663,7 +647,7 @@ static struct i2c_driver ad7150_driver = { .name = "ad7150", .of_match_table = ad7150_of_match, }, - .probe = ad7150_probe, + .probe_new = ad7150_probe, .id_table = ad7150_id, }; module_i2c_driver(ad7150_driver); diff --git a/drivers/iio/cdc/ad7746.c b/drivers/iio/cdc/ad7746.c index b266f532814090c05cca23cebc60d5ba2f29a3ee..6f68651ce1d5a83ca497226b9964c7872e9f3b6e 100644 --- a/drivers/iio/cdc/ad7746.c +++ b/drivers/iio/cdc/ad7746.c @@ -717,9 +717,9 @@ static const struct iio_info ad7746_info = { .write_raw = ad7746_write_raw, }; -static int ad7746_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int ad7746_probe(struct i2c_client *client) { + const struct i2c_device_id *id = i2c_client_get_device_id(client); struct device *dev = &client->dev; struct ad7746_chip_info *chip; struct iio_dev *indio_dev; @@ -810,7 +810,7 @@ static struct i2c_driver ad7746_driver = { .name = KBUILD_MODNAME, .of_match_table = ad7746_of_match, }, - .probe = ad7746_probe, + .probe_new = ad7746_probe, .id_table = ad7746_id, }; module_i2c_driver(ad7746_driver); diff --git a/drivers/iio/chemical/ams-iaq-core.c b/drivers/iio/chemical/ams-iaq-core.c index 97be3669c5543360a7e2e800262a93d513bb7f50..0a0fbcdd446922142627448c0aa2e21d3da7cc06 100644 --- a/drivers/iio/chemical/ams-iaq-core.c +++ b/drivers/iio/chemical/ams-iaq-core.c @@ -135,8 +135,7 @@ static const struct iio_info ams_iaqcore_info = { .read_raw = ams_iaqcore_read_raw, }; -static int ams_iaqcore_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int ams_iaqcore_probe(struct i2c_client *client) { struct iio_dev *indio_dev; struct ams_iaqcore_data *data; @@ -180,7 +179,7 @@ static struct i2c_driver ams_iaqcore_driver = { .name = "ams-iaq-core", .of_match_table = ams_iaqcore_dt_ids, }, - .probe = ams_iaqcore_probe, + .probe_new = ams_iaqcore_probe, .id_table = ams_iaqcore_id, }; module_i2c_driver(ams_iaqcore_driver); diff --git a/drivers/iio/chemical/atlas-ezo-sensor.c b/drivers/iio/chemical/atlas-ezo-sensor.c index bbcf5a59c1f4e8ef79a6c027b07e32bdeaee1856..307c3488f4bdddaa78a460d7bcdf4614c9a95651 100644 --- a/drivers/iio/chemical/atlas-ezo-sensor.c +++ b/drivers/iio/chemical/atlas-ezo-sensor.c @@ -201,9 +201,9 @@ static const struct of_device_id atlas_ezo_dt_ids[] = { }; MODULE_DEVICE_TABLE(of, atlas_ezo_dt_ids); -static int atlas_ezo_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int atlas_ezo_probe(struct i2c_client *client) { + const struct i2c_device_id *id = i2c_client_get_device_id(client); const struct atlas_ezo_device *chip; struct atlas_ezo_data *data; struct iio_dev *indio_dev; @@ -238,7 +238,7 @@ static struct i2c_driver atlas_ezo_driver = { .name = ATLAS_EZO_DRV_NAME, .of_match_table = atlas_ezo_dt_ids, }, - .probe = atlas_ezo_probe, + .probe_new = atlas_ezo_probe, .id_table = atlas_ezo_id, }; module_i2c_driver(atlas_ezo_driver); diff --git a/drivers/iio/chemical/atlas-sensor.c b/drivers/iio/chemical/atlas-sensor.c index 7cac77a931c70a89f911213bd55877df6b852813..024657bc59e171e36673b484655fa2131e7ae649 100644 --- a/drivers/iio/chemical/atlas-sensor.c +++ b/drivers/iio/chemical/atlas-sensor.c @@ -608,9 +608,9 @@ static const struct of_device_id atlas_dt_ids[] = { }; MODULE_DEVICE_TABLE(of, atlas_dt_ids); -static int atlas_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int atlas_probe(struct i2c_client *client) { + const struct i2c_device_id *id = i2c_client_get_device_id(client); struct atlas_data *data; struct atlas_device *chip; struct iio_trigger *trig; @@ -767,7 +767,7 @@ static struct i2c_driver atlas_driver = { .of_match_table = atlas_dt_ids, .pm = pm_ptr(&atlas_pm_ops), }, - .probe = atlas_probe, + .probe_new = atlas_probe, .remove = atlas_remove, .id_table = atlas_id, }; diff --git a/drivers/iio/chemical/bme680_i2c.c b/drivers/iio/chemical/bme680_i2c.c index 20f2c20b6b022b131b2f21985cfe3958669bfcf4..61b12079858d21ec86f878388fb846d0d242ea63 100644 --- a/drivers/iio/chemical/bme680_i2c.c +++ b/drivers/iio/chemical/bme680_i2c.c @@ -17,9 +17,9 @@ #include "bme680.h" -static int bme680_i2c_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int bme680_i2c_probe(struct i2c_client *client) { + const struct i2c_device_id *id = i2c_client_get_device_id(client); struct regmap *regmap; const char *name = NULL; @@ -52,7 +52,7 @@ static struct i2c_driver bme680_i2c_driver = { .name = "bme680_i2c", .of_match_table = bme680_of_i2c_match, }, - .probe = bme680_i2c_probe, + .probe_new = bme680_i2c_probe, .id_table = bme680_i2c_id, }; module_i2c_driver(bme680_i2c_driver); diff --git a/drivers/iio/chemical/ccs811.c b/drivers/iio/chemical/ccs811.c index ba4045e20303585c8d6c3d7208d3472fb3321b34..6ead80c089240cfddc7110efb1a1016546e85b12 100644 --- a/drivers/iio/chemical/ccs811.c +++ b/drivers/iio/chemical/ccs811.c @@ -401,9 +401,9 @@ static int ccs811_reset(struct i2c_client *client) return 0; } -static int ccs811_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int ccs811_probe(struct i2c_client *client) { + const struct i2c_device_id *id = i2c_client_get_device_id(client); struct iio_dev *indio_dev; struct ccs811_data *data; int ret; @@ -567,7 +567,7 @@ static struct i2c_driver ccs811_driver = { .name = "ccs811", .of_match_table = ccs811_dt_ids, }, - .probe = ccs811_probe, + .probe_new = ccs811_probe, .remove = ccs811_remove, .id_table = ccs811_id, }; diff --git a/drivers/iio/chemical/scd4x.c b/drivers/iio/chemical/scd4x.c index 54066532ea4589139a0e384ffec59a43a3db691b..f7ed9455b3c8fada9e5de7c2f35738092e581988 100644 --- a/drivers/iio/chemical/scd4x.c +++ b/drivers/iio/chemical/scd4x.c @@ -615,7 +615,7 @@ out: return IRQ_HANDLED; } -static int scd4x_probe(struct i2c_client *client, const struct i2c_device_id *id) +static int scd4x_probe(struct i2c_client *client) { static const unsigned long scd4x_scan_masks[] = { 0x07, 0x00 }; struct device *dev = &client->dev; @@ -690,7 +690,7 @@ static struct i2c_driver scd4x_i2c_driver = { .of_match_table = scd4x_dt_ids, .pm = pm_sleep_ptr(&scd4x_pm_ops), }, - .probe = scd4x_probe, + .probe_new = scd4x_probe, }; module_i2c_driver(scd4x_i2c_driver); diff --git a/drivers/iio/chemical/sgp30.c b/drivers/iio/chemical/sgp30.c index e2c13c78c7e0578d3bc6bac39e4a14e397768cfb..9d0c68485b63f2e2eb3a4d399a839c8a635be9e7 100644 --- a/drivers/iio/chemical/sgp30.c +++ b/drivers/iio/chemical/sgp30.c @@ -496,9 +496,9 @@ static const struct of_device_id sgp_dt_ids[] = { { } }; -static int sgp_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int sgp_probe(struct i2c_client *client) { + const struct i2c_device_id *id = i2c_client_get_device_id(client); struct device *dev = &client->dev; struct iio_dev *indio_dev; struct sgp_data *data; @@ -575,7 +575,7 @@ static struct i2c_driver sgp_driver = { .name = "sgp30", .of_match_table = sgp_dt_ids, }, - .probe = sgp_probe, + .probe_new = sgp_probe, .remove = sgp_remove, .id_table = sgp_id, }; diff --git a/drivers/iio/chemical/sgp40.c b/drivers/iio/chemical/sgp40.c index 8a56394cea4eb2c471ad2986e32f8e8424daf25b..c0ea0130090810267b62a07ca4421fdf7ea945e6 100644 --- a/drivers/iio/chemical/sgp40.c +++ b/drivers/iio/chemical/sgp40.c @@ -311,9 +311,9 @@ static const struct iio_info sgp40_info = { .write_raw = sgp40_write_raw, }; -static int sgp40_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int sgp40_probe(struct i2c_client *client) { + const struct i2c_device_id *id = i2c_client_get_device_id(client); struct device *dev = &client->dev; struct iio_dev *indio_dev; struct sgp40_data *data; @@ -368,7 +368,7 @@ static struct i2c_driver sgp40_driver = { .name = "sgp40", .of_match_table = sgp40_dt_ids, }, - .probe = sgp40_probe, + .probe_new = sgp40_probe, .id_table = sgp40_id, }; module_i2c_driver(sgp40_driver); diff --git a/drivers/iio/chemical/vz89x.c b/drivers/iio/chemical/vz89x.c index e7e1c74a351efbeeb676ad1f7677b37c40ad605f..d4604f7ccd1e0e559f34db0beb6759e2d2837096 100644 --- a/drivers/iio/chemical/vz89x.c +++ b/drivers/iio/chemical/vz89x.c @@ -348,9 +348,9 @@ static const struct of_device_id vz89x_dt_ids[] = { }; MODULE_DEVICE_TABLE(of, vz89x_dt_ids); -static int vz89x_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int vz89x_probe(struct i2c_client *client) { + const struct i2c_device_id *id = i2c_client_get_device_id(client); struct device *dev = &client->dev; struct iio_dev *indio_dev; struct vz89x_data *data; @@ -402,7 +402,7 @@ static struct i2c_driver vz89x_driver = { .name = "vz89x", .of_match_table = vz89x_dt_ids, }, - .probe = vz89x_probe, + .probe_new = vz89x_probe, .id_table = vz89x_id, }; module_i2c_driver(vz89x_driver); diff --git a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c index 05a28d353e343b03d2b9785f2f517bc6a4270f5c..943e9e14d1e99259d87b2daefcd5d56be12bc700 100644 --- a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c +++ b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c @@ -172,9 +172,9 @@ static ssize_t hwfifo_watermark_max_show(struct device *dev, static IIO_DEVICE_ATTR_RO(hwfifo_watermark_max, 0); -static const struct attribute *cros_ec_sensor_fifo_attributes[] = { - &iio_dev_attr_hwfifo_timeout.dev_attr.attr, - &iio_dev_attr_hwfifo_watermark_max.dev_attr.attr, +static const struct iio_dev_attr *cros_ec_sensor_fifo_attributes[] = { + &iio_dev_attr_hwfifo_timeout, + &iio_dev_attr_hwfifo_watermark_max, NULL, }; diff --git a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c index 1151434038d40f16e25ab86bfa607dcf291228d6..ad8910e6ad59df1e7c306efc72586fd285670363 100644 --- a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c +++ b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c @@ -75,9 +75,9 @@ static IIO_DEVICE_ATTR(hwfifo_timeout, 0644, static IIO_DEVICE_ATTR(hwfifo_enabled, 0444, _hid_sensor_get_fifo_state, NULL, 0); -static const struct attribute *hid_sensor_fifo_attributes[] = { - &iio_dev_attr_hwfifo_timeout.dev_attr.attr, - &iio_dev_attr_hwfifo_enabled.dev_attr.attr, +static const struct iio_dev_attr *hid_sensor_fifo_attributes[] = { + &iio_dev_attr_hwfifo_timeout, + &iio_dev_attr_hwfifo_enabled, NULL, }; @@ -231,7 +231,7 @@ static const struct iio_trigger_ops hid_sensor_trigger_ops = { int hid_sensor_setup_trigger(struct iio_dev *indio_dev, const char *name, struct hid_sensor_common *attrb) { - const struct attribute **fifo_attrs; + const struct iio_dev_attr **fifo_attrs; int ret; struct iio_trigger *trig; diff --git a/drivers/iio/common/scmi_sensors/scmi_iio.c b/drivers/iio/common/scmi_sensors/scmi_iio.c index 54ccf19ab2bb1f892893b9dfb83c5240ffbc7c4b..d92f7f651f7b3f402f03f3132216ec60ad44c638 100644 --- a/drivers/iio/common/scmi_sensors/scmi_iio.c +++ b/drivers/iio/common/scmi_sensors/scmi_iio.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include #include @@ -27,6 +28,8 @@ struct scmi_iio_priv { struct scmi_protocol_handle *ph; const struct scmi_sensor_info *sensor_info; struct iio_dev *indio_dev; + /* lock to protect against multiple access to the device */ + struct mutex lock; /* adding one additional channel for timestamp */ s64 iio_buf[SCMI_IIO_NUM_OF_AXIS + 1]; struct notifier_block sensor_update_nb; @@ -198,13 +201,14 @@ static int scmi_iio_write_raw(struct iio_dev *iio_dev, struct iio_chan_spec const *chan, int val, int val2, long mask) { + struct scmi_iio_priv *sensor = iio_priv(iio_dev); int err; switch (mask) { case IIO_CHAN_INFO_SAMP_FREQ: - mutex_lock(&iio_dev->mlock); + mutex_lock(&sensor->lock); err = scmi_iio_set_odr_val(iio_dev, val, val2); - mutex_unlock(&iio_dev->mlock); + mutex_unlock(&sensor->lock); return err; default: return -EINVAL; @@ -586,6 +590,7 @@ scmi_alloc_iiodev(struct scmi_device *sdev, sensor->sensor_info = sensor_info; sensor->sensor_update_nb.notifier_call = scmi_iio_sensor_update_cb; sensor->indio_dev = iiodev; + mutex_init(&sensor->lock); /* adding one additional channel for timestamp */ iiodev->num_channels = sensor_info->num_axis + 1; diff --git a/drivers/iio/common/st_sensors/st_sensors_core.c b/drivers/iio/common/st_sensors/st_sensors_core.c index 35720c64fea8f0b0be96d87da40ac085696318f1..c77d7bdcc12163b974cb208906ac38db5adf34a8 100644 --- a/drivers/iio/common/st_sensors/st_sensors_core.c +++ b/drivers/iio/common/st_sensors/st_sensors_core.c @@ -219,47 +219,22 @@ int st_sensors_set_axis_enable(struct iio_dev *indio_dev, u8 axis_enable) } EXPORT_SYMBOL_NS(st_sensors_set_axis_enable, IIO_ST_SENSORS); -static void st_reg_disable(void *reg) -{ - regulator_disable(reg); -} int st_sensors_power_enable(struct iio_dev *indio_dev) { - struct st_sensor_data *pdata = iio_priv(indio_dev); + static const char * const regulator_names[] = { "vdd", "vddio" }; struct device *parent = indio_dev->dev.parent; int err; /* Regulators not mandatory, but if requested we should enable them. */ - pdata->vdd = devm_regulator_get(parent, "vdd"); - if (IS_ERR(pdata->vdd)) - return dev_err_probe(&indio_dev->dev, PTR_ERR(pdata->vdd), - "unable to get Vdd supply\n"); - - err = regulator_enable(pdata->vdd); - if (err != 0) { - dev_warn(&indio_dev->dev, - "Failed to enable specified Vdd supply\n"); - return err; - } - - err = devm_add_action_or_reset(parent, st_reg_disable, pdata->vdd); + err = devm_regulator_bulk_get_enable(parent, + ARRAY_SIZE(regulator_names), + regulator_names); if (err) - return err; + return dev_err_probe(&indio_dev->dev, err, + "unable to enable supplies\n"); - pdata->vdd_io = devm_regulator_get(parent, "vddio"); - if (IS_ERR(pdata->vdd_io)) - return dev_err_probe(&indio_dev->dev, PTR_ERR(pdata->vdd_io), - "unable to get Vdd_IO supply\n"); - - err = regulator_enable(pdata->vdd_io); - if (err != 0) { - dev_warn(&indio_dev->dev, - "Failed to enable specified Vdd_IO supply\n"); - return err; - } - - return devm_add_action_or_reset(parent, st_reg_disable, pdata->vdd_io); + return 0; } EXPORT_SYMBOL_NS(st_sensors_power_enable, IIO_ST_SENSORS); diff --git a/drivers/iio/dac/ad5064.c b/drivers/iio/dac/ad5064.c index 4447b88118270730ed98157de87a7ad896f20e7c..f01249c1ba93cfbc89bf470b07bd8b49109b0e22 100644 --- a/drivers/iio/dac/ad5064.c +++ b/drivers/iio/dac/ad5064.c @@ -993,9 +993,9 @@ static int ad5064_i2c_write(struct ad5064_state *st, unsigned int cmd, return 0; } -static int ad5064_i2c_probe(struct i2c_client *i2c, - const struct i2c_device_id *id) +static int ad5064_i2c_probe(struct i2c_client *i2c) { + const struct i2c_device_id *id = i2c_client_get_device_id(i2c); return ad5064_probe(&i2c->dev, id->driver_data, id->name, ad5064_i2c_write); } @@ -1056,7 +1056,7 @@ static struct i2c_driver ad5064_i2c_driver = { .driver = { .name = "ad5064", }, - .probe = ad5064_i2c_probe, + .probe_new = ad5064_i2c_probe, .id_table = ad5064_i2c_ids, }; diff --git a/drivers/iio/dac/ad5380.c b/drivers/iio/dac/ad5380.c index a81bfa47a221a8c775f40372e5e73715b8caa5a2..64b4519f8f5e6d52a5b2387f44a59b653e17064d 100644 --- a/drivers/iio/dac/ad5380.c +++ b/drivers/iio/dac/ad5380.c @@ -546,9 +546,9 @@ static inline void ad5380_spi_unregister_driver(void) #if IS_ENABLED(CONFIG_I2C) -static int ad5380_i2c_probe(struct i2c_client *i2c, - const struct i2c_device_id *id) +static int ad5380_i2c_probe(struct i2c_client *i2c) { + const struct i2c_device_id *id = i2c_client_get_device_id(i2c); struct regmap *regmap; regmap = devm_regmap_init_i2c(i2c, &ad5380_regmap_config); @@ -589,7 +589,7 @@ static struct i2c_driver ad5380_i2c_driver = { .driver = { .name = "ad5380", }, - .probe = ad5380_i2c_probe, + .probe_new = ad5380_i2c_probe, .remove = ad5380_i2c_remove, .id_table = ad5380_i2c_ids, }; diff --git a/drivers/iio/dac/ad5446.c b/drivers/iio/dac/ad5446.c index 7324065d3782173b05c344cef0e53b0a867e1cc6..aa3130b3345638e2d77056c942f7fe240e618140 100644 --- a/drivers/iio/dac/ad5446.c +++ b/drivers/iio/dac/ad5446.c @@ -568,9 +568,9 @@ static const struct ad5446_chip_info ad5446_i2c_chip_info[] = { }, }; -static int ad5446_i2c_probe(struct i2c_client *i2c, - const struct i2c_device_id *id) +static int ad5446_i2c_probe(struct i2c_client *i2c) { + const struct i2c_device_id *id = i2c_client_get_device_id(i2c); return ad5446_probe(&i2c->dev, id->name, &ad5446_i2c_chip_info[id->driver_data]); } @@ -595,7 +595,7 @@ static struct i2c_driver ad5446_i2c_driver = { .driver = { .name = "ad5446", }, - .probe = ad5446_i2c_probe, + .probe_new = ad5446_i2c_probe, .remove = ad5446_i2c_remove, .id_table = ad5446_i2c_ids, }; diff --git a/drivers/iio/dac/ad5593r.c b/drivers/iio/dac/ad5593r.c index 8e5e014e0c28b1b207db462de8b6e390657e1784..d311567ab324a68db56bd80bb7f78ba716483c0e 100644 --- a/drivers/iio/dac/ad5593r.c +++ b/drivers/iio/dac/ad5593r.c @@ -99,9 +99,9 @@ static const struct ad5592r_rw_ops ad5593r_rw_ops = { .gpio_read = ad5593r_gpio_read, }; -static int ad5593r_i2c_probe(struct i2c_client *i2c, - const struct i2c_device_id *id) +static int ad5593r_i2c_probe(struct i2c_client *i2c) { + const struct i2c_device_id *id = i2c_client_get_device_id(i2c); if (!i2c_check_functionality(i2c->adapter, I2C_FUNC_SMBUS_BYTE | I2C_FUNC_I2C)) return -EOPNOTSUPP; @@ -138,7 +138,7 @@ static struct i2c_driver ad5593r_driver = { .of_match_table = ad5593r_of_match, .acpi_match_table = ad5593r_acpi_match, }, - .probe = ad5593r_i2c_probe, + .probe_new = ad5593r_i2c_probe, .remove = ad5593r_i2c_remove, .id_table = ad5593r_i2c_ids, }; diff --git a/drivers/iio/dac/ad5696-i2c.c b/drivers/iio/dac/ad5696-i2c.c index aa36cbf0137c1f7ef33d8d9f73af974435514313..160e80cf9135f5062b56c1f64dbd7fb7f170cdcf 100644 --- a/drivers/iio/dac/ad5696-i2c.c +++ b/drivers/iio/dac/ad5696-i2c.c @@ -58,9 +58,9 @@ static int ad5686_i2c_write(struct ad5686_state *st, return (ret != 3) ? -EIO : 0; } -static int ad5686_i2c_probe(struct i2c_client *i2c, - const struct i2c_device_id *id) +static int ad5686_i2c_probe(struct i2c_client *i2c) { + const struct i2c_device_id *id = i2c_client_get_device_id(i2c); return ad5686_probe(&i2c->dev, id->driver_data, id->name, ad5686_i2c_write, ad5686_i2c_read); } @@ -113,7 +113,7 @@ static struct i2c_driver ad5686_i2c_driver = { .name = "ad5696", .of_match_table = ad5686_of_match, }, - .probe = ad5686_i2c_probe, + .probe_new = ad5686_i2c_probe, .remove = ad5686_i2c_remove, .id_table = ad5686_i2c_id, }; diff --git a/drivers/iio/dac/ds4424.c b/drivers/iio/dac/ds4424.c index 3e17a681174e8a91ff012d860ab3cae043a4bf49..a16a6a934d9d97b987028e2ba12788b68c44a7e4 100644 --- a/drivers/iio/dac/ds4424.c +++ b/drivers/iio/dac/ds4424.c @@ -213,9 +213,9 @@ static const struct iio_info ds4424_info = { .write_raw = ds4424_write_raw, }; -static int ds4424_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int ds4424_probe(struct i2c_client *client) { + const struct i2c_device_id *id = i2c_client_get_device_id(client); struct ds4424_data *data; struct iio_dev *indio_dev; int ret; @@ -312,7 +312,7 @@ static struct i2c_driver ds4424_driver = { .of_match_table = ds4424_of_match, .pm = pm_sleep_ptr(&ds4424_pm_ops), }, - .probe = ds4424_probe, + .probe_new = ds4424_probe, .remove = ds4424_remove, .id_table = ds4424_id, }; diff --git a/drivers/iio/dac/ltc2688.c b/drivers/iio/dac/ltc2688.c index 28bdde2d30889df87a427e71595cc2828abe35ca..fc8eb53c65be3b365f04a1b47e167613184d684e 100644 --- a/drivers/iio/dac/ltc2688.c +++ b/drivers/iio/dac/ltc2688.c @@ -84,7 +84,6 @@ struct ltc2688_chan { struct ltc2688_state { struct spi_device *spi; struct regmap *regmap; - struct regulator_bulk_data regulators[2]; struct ltc2688_chan channels[LTC2688_DAC_CHANNELS]; struct iio_chan_spec *iio_chan; /* lock to protect against multiple access to the device and shared data */ @@ -902,13 +901,6 @@ static int ltc2688_setup(struct ltc2688_state *st, struct regulator *vref) LTC2688_CONFIG_EXT_REF); } -static void ltc2688_disable_regulators(void *data) -{ - struct ltc2688_state *st = data; - - regulator_bulk_disable(ARRAY_SIZE(st->regulators), st->regulators); -} - static void ltc2688_disable_regulator(void *regulator) { regulator_disable(regulator); @@ -965,6 +957,7 @@ static const struct iio_info ltc2688_info = { static int ltc2688_probe(struct spi_device *spi) { + static const char * const regulators[] = { "vcc", "iovcc" }; struct ltc2688_state *st; struct iio_dev *indio_dev; struct regulator *vref_reg; @@ -988,21 +981,11 @@ static int ltc2688_probe(struct spi_device *spi) return dev_err_probe(dev, PTR_ERR(st->regmap), "Failed to init regmap"); - st->regulators[0].supply = "vcc"; - st->regulators[1].supply = "iovcc"; - ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(st->regulators), - st->regulators); - if (ret) - return dev_err_probe(dev, ret, "Failed to get regulators\n"); - - ret = regulator_bulk_enable(ARRAY_SIZE(st->regulators), st->regulators); + ret = devm_regulator_bulk_get_enable(dev, ARRAY_SIZE(regulators), + regulators); if (ret) return dev_err_probe(dev, ret, "Failed to enable regulators\n"); - ret = devm_add_action_or_reset(dev, ltc2688_disable_regulators, st); - if (ret) - return ret; - vref_reg = devm_regulator_get_optional(dev, "vref"); if (IS_ERR(vref_reg)) { if (PTR_ERR(vref_reg) != -ENODEV) diff --git a/drivers/iio/dac/m62332.c b/drivers/iio/dac/m62332.c index 5a812f87970c02a2c3ed0aa0445fb8a9c90d5d66..b692459b0f231599e31470a77bba096a2022f971 100644 --- a/drivers/iio/dac/m62332.c +++ b/drivers/iio/dac/m62332.c @@ -176,8 +176,7 @@ static const struct iio_chan_spec m62332_channels[M62332_CHANNELS] = { M62332_CHANNEL(1) }; -static int m62332_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int m62332_probe(struct i2c_client *client) { struct m62332_data *data; struct iio_dev *indio_dev; @@ -239,7 +238,7 @@ static struct i2c_driver m62332_driver = { .name = "m62332", .pm = pm_sleep_ptr(&m62332_pm_ops), }, - .probe = m62332_probe, + .probe_new = m62332_probe, .remove = m62332_remove, .id_table = m62332_id, }; diff --git a/drivers/iio/dac/max517.c b/drivers/iio/dac/max517.c index 373ce6ff83b78363b10e57fa6253824f6c909bba..25967c39365d2bbd841b49da4d230e888b61bbd7 100644 --- a/drivers/iio/dac/max517.c +++ b/drivers/iio/dac/max517.c @@ -141,9 +141,9 @@ static const struct iio_chan_spec max517_channels[] = { MAX517_CHANNEL(7), }; -static int max517_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int max517_probe(struct i2c_client *client) { + const struct i2c_device_id *id = i2c_client_get_device_id(client); struct max517_data *data; struct iio_dev *indio_dev; struct max517_platform_data *platform_data = client->dev.platform_data; @@ -203,7 +203,7 @@ static struct i2c_driver max517_driver = { .name = MAX517_DRV_NAME, .pm = pm_sleep_ptr(&max517_pm_ops), }, - .probe = max517_probe, + .probe_new = max517_probe, .id_table = max517_id, }; module_i2c_driver(max517_driver); diff --git a/drivers/iio/dac/max5821.c b/drivers/iio/dac/max5821.c index e001b594d5b19a07a853355b67fca3bbaed59c38..23da345b9250cf7570bf752c0795ccbcac785de3 100644 --- a/drivers/iio/dac/max5821.c +++ b/drivers/iio/dac/max5821.c @@ -300,9 +300,9 @@ static void max5821_regulator_disable(void *reg) regulator_disable(reg); } -static int max5821_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int max5821_probe(struct i2c_client *client) { + const struct i2c_device_id *id = i2c_client_get_device_id(client); struct max5821_data *data; struct iio_dev *indio_dev; u32 tmp; @@ -377,7 +377,7 @@ static struct i2c_driver max5821_driver = { .of_match_table = max5821_of_match, .pm = pm_sleep_ptr(&max5821_pm_ops), }, - .probe = max5821_probe, + .probe_new = max5821_probe, .id_table = max5821_id, }; module_i2c_driver(max5821_driver); diff --git a/drivers/iio/dac/mcp4725.c b/drivers/iio/dac/mcp4725.c index 446d1a8fe4bef5fb20b3607ffafcc57f9b9656ee..46bf758760f85bbd9500da020c3740fef1529e11 100644 --- a/drivers/iio/dac/mcp4725.c +++ b/drivers/iio/dac/mcp4725.c @@ -369,9 +369,9 @@ static int mcp4725_probe_dt(struct device *dev, return 0; } -static int mcp4725_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int mcp4725_probe(struct i2c_client *client) { + const struct i2c_device_id *id = i2c_client_get_device_id(client); struct mcp4725_data *data; struct iio_dev *indio_dev; struct mcp4725_platform_data *pdata, pdata_dt; @@ -524,7 +524,7 @@ static struct i2c_driver mcp4725_driver = { .of_match_table = mcp4725_of_match, .pm = pm_sleep_ptr(&mcp4725_pm_ops), }, - .probe = mcp4725_probe, + .probe_new = mcp4725_probe, .remove = mcp4725_remove, .id_table = mcp4725_id, }; diff --git a/drivers/iio/dac/ti-dac5571.c b/drivers/iio/dac/ti-dac5571.c index 3210e3098f9ae277fd69d734fd5e324a22efcfdb..40191947fea321aa2ffe54aead757e9ac9110475 100644 --- a/drivers/iio/dac/ti-dac5571.c +++ b/drivers/iio/dac/ti-dac5571.c @@ -306,9 +306,9 @@ static const struct iio_info dac5571_info = { .write_raw_get_fmt = dac5571_write_raw_get_fmt, }; -static int dac5571_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int dac5571_probe(struct i2c_client *client) { + const struct i2c_device_id *id = i2c_client_get_device_id(client); struct device *dev = &client->dev; const struct dac5571_spec *spec; struct dac5571_data *data; @@ -426,7 +426,7 @@ static struct i2c_driver dac5571_driver = { .name = "ti-dac5571", .of_match_table = dac5571_of_id, }, - .probe = dac5571_probe, + .probe_new = dac5571_probe, .remove = dac5571_remove, .id_table = dac5571_id, }; diff --git a/drivers/iio/filter/admv8818.c b/drivers/iio/filter/admv8818.c index 68de45fe21b44854329764e95824e81ef5f52e1f..fe8d46cb7f1d1b2db1dfb75ac965381da0b7ac41 100644 --- a/drivers/iio/filter/admv8818.c +++ b/drivers/iio/filter/admv8818.c @@ -265,7 +265,7 @@ static int __admv8818_read_hpf_freq(struct admv8818_state *st, u64 *hpf_freq) return ret; hpf_band = FIELD_GET(ADMV8818_SW_IN_WR0_MSK, data); - if (!hpf_band) { + if (!hpf_band || hpf_band > 4) { *hpf_freq = 0; return ret; } @@ -303,7 +303,7 @@ static int __admv8818_read_lpf_freq(struct admv8818_state *st, u64 *lpf_freq) return ret; lpf_band = FIELD_GET(ADMV8818_SW_OUT_WR0_MSK, data); - if (!lpf_band) { + if (!lpf_band || lpf_band > 4) { *lpf_freq = 0; return ret; } diff --git a/drivers/iio/frequency/Kconfig b/drivers/iio/frequency/Kconfig index f3702f36436cb85b78cd121fb754d91079f55d1d..9e85dfa585081cc02167a410cf4f353ec4cdc4a9 100644 --- a/drivers/iio/frequency/Kconfig +++ b/drivers/iio/frequency/Kconfig @@ -50,6 +50,16 @@ config ADF4371 To compile this driver as a module, choose M here: the module will be called adf4371. +config ADF4377 + tristate "Analog Devices ADF4377 Microwave Wideband Synthesizer" + depends on SPI && COMMON_CLK + help + Say yes here to build support for Analog Devices ADF4377 Microwave + Wideband Synthesizer. + + To compile this driver as a module, choose M here: the + module will be called adf4377. + config ADMV1013 tristate "Analog Devices ADMV1013 Microwave Upconverter" depends on SPI && COMMON_CLK diff --git a/drivers/iio/frequency/Makefile b/drivers/iio/frequency/Makefile index 48add732f1d3699107f0f558c5778ee14f6588ab..b616c29b4a08736515242558aa2d2c8645b55552 100644 --- a/drivers/iio/frequency/Makefile +++ b/drivers/iio/frequency/Makefile @@ -7,6 +7,7 @@ obj-$(CONFIG_AD9523) += ad9523.o obj-$(CONFIG_ADF4350) += adf4350.o obj-$(CONFIG_ADF4371) += adf4371.o +obj-$(CONFIG_ADF4377) += adf4377.o obj-$(CONFIG_ADMV1013) += admv1013.o obj-$(CONFIG_ADMV1014) += admv1014.o obj-$(CONFIG_ADMV4420) += admv4420.o diff --git a/drivers/iio/frequency/ad9523.c b/drivers/iio/frequency/ad9523.c index 97662ca1ca966f57b1163b1021e03b7f5e3a5e47..b391c6e27ab0dc5c7cac05116670533c04040077 100644 --- a/drivers/iio/frequency/ad9523.c +++ b/drivers/iio/frequency/ad9523.c @@ -265,7 +265,6 @@ enum { struct ad9523_state { struct spi_device *spi; - struct regulator *reg; struct ad9523_platform_data *pdata; struct iio_chan_spec ad9523_channels[AD9523_NUM_CHAN]; struct gpio_desc *pwrdown_gpio; @@ -969,13 +968,6 @@ static int ad9523_setup(struct iio_dev *indio_dev) return 0; } -static void ad9523_reg_disable(void *data) -{ - struct regulator *reg = data; - - regulator_disable(reg); -} - static int ad9523_probe(struct spi_device *spi) { struct ad9523_platform_data *pdata = spi->dev.platform_data; @@ -996,17 +988,9 @@ static int ad9523_probe(struct spi_device *spi) mutex_init(&st->lock); - st->reg = devm_regulator_get(&spi->dev, "vcc"); - if (!IS_ERR(st->reg)) { - ret = regulator_enable(st->reg); - if (ret) - return ret; - - ret = devm_add_action_or_reset(&spi->dev, ad9523_reg_disable, - st->reg); - if (ret) - return ret; - } + ret = devm_regulator_get_enable(&spi->dev, "vcc"); + if (ret) + return ret; st->pwrdown_gpio = devm_gpiod_get_optional(&spi->dev, "powerdown", GPIOD_OUT_HIGH); diff --git a/drivers/iio/frequency/adf4377.c b/drivers/iio/frequency/adf4377.c new file mode 100644 index 0000000000000000000000000000000000000000..26abecbd51e06611b23f3d54fc09d32430216de4 --- /dev/null +++ b/drivers/iio/frequency/adf4377.c @@ -0,0 +1,994 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * ADF4377 driver + * + * Copyright 2022 Analog Devices Inc. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +/* ADF4377 REG0000 Map */ +#define ADF4377_0000_SOFT_RESET_R_MSK BIT(7) +#define ADF4377_0000_LSB_FIRST_R_MSK BIT(6) +#define ADF4377_0000_ADDRESS_ASC_R_MSK BIT(5) +#define ADF4377_0000_SDO_ACTIVE_R_MSK BIT(4) +#define ADF4377_0000_SDO_ACTIVE_MSK BIT(3) +#define ADF4377_0000_ADDRESS_ASC_MSK BIT(2) +#define ADF4377_0000_LSB_FIRST_MSK BIT(1) +#define ADF4377_0000_SOFT_RESET_MSK BIT(0) + +/* ADF4377 REG0000 Bit Definition */ +#define ADF4377_0000_SDO_ACTIVE_SPI_3W 0x0 +#define ADF4377_0000_SDO_ACTIVE_SPI_4W 0x1 + +#define ADF4377_0000_ADDR_ASC_AUTO_DECR 0x0 +#define ADF4377_0000_ADDR_ASC_AUTO_INCR 0x1 + +#define ADF4377_0000_LSB_FIRST_MSB 0x0 +#define ADF4377_0000_LSB_FIRST_LSB 0x1 + +#define ADF4377_0000_SOFT_RESET_N_OP 0x0 +#define ADF4377_0000_SOFT_RESET_EN 0x1 + +/* ADF4377 REG0001 Map */ +#define ADF4377_0001_SINGLE_INSTR_MSK BIT(7) +#define ADF4377_0001_MASTER_RB_CTRL_MSK BIT(5) + +/* ADF4377 REG0003 Bit Definition */ +#define ADF4377_0003_CHIP_TYPE 0x06 + +/* ADF4377 REG0004 Bit Definition */ +#define ADF4377_0004_PRODUCT_ID_LSB 0x0005 + +/* ADF4377 REG0005 Bit Definition */ +#define ADF4377_0005_PRODUCT_ID_MSB 0x0005 + +/* ADF4377 REG000A Map */ +#define ADF4377_000A_SCRATCHPAD_MSK GENMASK(7, 0) + +/* ADF4377 REG000C Bit Definition */ +#define ADF4377_000C_VENDOR_ID_LSB 0x56 + +/* ADF4377 REG000D Bit Definition */ +#define ADF4377_000D_VENDOR_ID_MSB 0x04 + +/* ADF4377 REG000F Bit Definition */ +#define ADF4377_000F_R00F_RSV1_MSK GENMASK(7, 0) + +/* ADF4377 REG0010 Map*/ +#define ADF4377_0010_N_INT_LSB_MSK GENMASK(7, 0) + +/* ADF4377 REG0011 Map*/ +#define ADF4377_0011_EN_AUTOCAL_MSK BIT(7) +#define ADF4377_0011_EN_RDBLR_MSK BIT(6) +#define ADF4377_0011_DCLK_DIV2_MSK GENMASK(5, 4) +#define ADF4377_0011_N_INT_MSB_MSK GENMASK(3, 0) + +/* ADF4377 REG0011 Bit Definition */ +#define ADF4377_0011_DCLK_DIV2_1 0x0 +#define ADF4377_0011_DCLK_DIV2_2 0x1 +#define ADF4377_0011_DCLK_DIV2_4 0x2 +#define ADF4377_0011_DCLK_DIV2_8 0x3 + +/* ADF4377 REG0012 Map*/ +#define ADF4377_0012_CLKOUT_DIV_MSK GENMASK(7, 6) +#define ADF4377_0012_R_DIV_MSK GENMASK(5, 0) + +/* ADF4377 REG0012 Bit Definition */ +#define ADF4377_0012_CLKOUT_DIV_1 0x0 +#define ADF4377_0012_CLKOUT_DIV_2 0x1 +#define ADF4377_0012_CLKOUT_DIV_4 0x2 +#define ADF4377_0012_CLKOUT_DIV_8 0x3 + +/* ADF4377 REG0013 Map */ +#define ADF4377_0013_M_VCO_CORE_MSK GENMASK(5, 4) +#define ADF4377_0013_VCO_BIAS_MSK GENMASK(3, 0) + +/* ADF4377 REG0013 Bit Definition */ +#define ADF4377_0013_M_VCO_0 0x0 +#define ADF4377_0013_M_VCO_1 0x1 +#define ADF4377_0013_M_VCO_2 0x2 +#define ADF4377_0013_M_VCO_3 0x3 + +/* ADF4377 REG0014 Map */ +#define ADF4377_0014_M_VCO_BAND_MSK GENMASK(7, 0) + +/* ADF4377 REG0015 Map */ +#define ADF4377_0015_BLEED_I_LSB_MSK GENMASK(7, 6) +#define ADF4377_0015_BLEED_POL_MSK BIT(5) +#define ADF4377_0015_EN_BLEED_MSK BIT(4) +#define ADF4377_0015_CP_I_MSK GENMASK(3, 0) + +/* ADF4377 REG0015 Bit Definition */ +#define ADF4377_CURRENT_SINK 0x0 +#define ADF4377_CURRENT_SOURCE 0x1 + +#define ADF4377_0015_CP_0MA7 0x0 +#define ADF4377_0015_CP_0MA9 0x1 +#define ADF4377_0015_CP_1MA1 0x2 +#define ADF4377_0015_CP_1MA3 0x3 +#define ADF4377_0015_CP_1MA4 0x4 +#define ADF4377_0015_CP_1MA8 0x5 +#define ADF4377_0015_CP_2MA2 0x6 +#define ADF4377_0015_CP_2MA5 0x7 +#define ADF4377_0015_CP_2MA9 0x8 +#define ADF4377_0015_CP_3MA6 0x9 +#define ADF4377_0015_CP_4MA3 0xA +#define ADF4377_0015_CP_5MA0 0xB +#define ADF4377_0015_CP_5MA7 0xC +#define ADF4377_0015_CP_7MA2 0xD +#define ADF4377_0015_CP_8MA6 0xE +#define ADF4377_0015_CP_10MA1 0xF + +/* ADF4377 REG0016 Map */ +#define ADF4377_0016_BLEED_I_MSB_MSK GENMASK(7, 0) + +/* ADF4377 REG0017 Map */ +#define ADF4377_0016_INV_CLKOUT_MSK BIT(7) +#define ADF4377_0016_N_DEL_MSK GENMASK(6, 0) + +/* ADF4377 REG0018 Map */ +#define ADF4377_0018_CMOS_OV_MSK BIT(7) +#define ADF4377_0018_R_DEL_MSK GENMASK(6, 0) + +/* ADF4377 REG0018 Bit Definition */ +#define ADF4377_0018_1V8_LOGIC 0x0 +#define ADF4377_0018_3V3_LOGIC 0x1 + +/* ADF4377 REG0019 Map */ +#define ADF4377_0019_CLKOUT2_OP_MSK GENMASK(7, 6) +#define ADF4377_0019_CLKOUT1_OP_MSK GENMASK(5, 4) +#define ADF4377_0019_PD_CLK_MSK BIT(3) +#define ADF4377_0019_PD_RDET_MSK BIT(2) +#define ADF4377_0019_PD_ADC_MSK BIT(1) +#define ADF4377_0019_PD_CALADC_MSK BIT(0) + +/* ADF4377 REG0019 Bit Definition */ +#define ADF4377_0019_CLKOUT_320MV 0x0 +#define ADF4377_0019_CLKOUT_420MV 0x1 +#define ADF4377_0019_CLKOUT_530MV 0x2 +#define ADF4377_0019_CLKOUT_640MV 0x3 + +/* ADF4377 REG001A Map */ +#define ADF4377_001A_PD_ALL_MSK BIT(7) +#define ADF4377_001A_PD_RDIV_MSK BIT(6) +#define ADF4377_001A_PD_NDIV_MSK BIT(5) +#define ADF4377_001A_PD_VCO_MSK BIT(4) +#define ADF4377_001A_PD_LD_MSK BIT(3) +#define ADF4377_001A_PD_PFDCP_MSK BIT(2) +#define ADF4377_001A_PD_CLKOUT1_MSK BIT(1) +#define ADF4377_001A_PD_CLKOUT2_MSK BIT(0) + +/* ADF4377 REG001B Map */ +#define ADF4377_001B_EN_LOL_MSK BIT(7) +#define ADF4377_001B_LDWIN_PW_MSK BIT(6) +#define ADF4377_001B_EN_LDWIN_MSK BIT(5) +#define ADF4377_001B_LD_COUNT_MSK GENMASK(4, 0) + +/* ADF4377 REG001B Bit Definition */ +#define ADF4377_001B_LDWIN_PW_NARROW 0x0 +#define ADF4377_001B_LDWIN_PW_WIDE 0x1 + +/* ADF4377 REG001C Map */ +#define ADF4377_001C_EN_DNCLK_MSK BIT(7) +#define ADF4377_001C_EN_DRCLK_MSK BIT(6) +#define ADF4377_001C_RST_LD_MSK BIT(2) +#define ADF4377_001C_R01C_RSV1_MSK BIT(0) + +/* ADF4377 REG001C Bit Definition */ +#define ADF4377_001C_RST_LD_INACTIVE 0x0 +#define ADF4377_001C_RST_LD_ACTIVE 0x1 + +#define ADF4377_001C_R01C_RSV1 0x1 + +/* ADF4377 REG001D Map */ +#define ADF4377_001D_MUXOUT_MSK GENMASK(7, 4) +#define ADF4377_001D_EN_CPTEST_MSK BIT(2) +#define ADF4377_001D_CP_DOWN_MSK BIT(1) +#define ADF4377_001D_CP_UP_MSK BIT(0) + +#define ADF4377_001D_EN_CPTEST_OFF 0x0 +#define ADF4377_001D_EN_CPTEST_ON 0x1 + +#define ADF4377_001D_CP_DOWN_OFF 0x0 +#define ADF4377_001D_CP_DOWN_ON 0x1 + +#define ADF4377_001D_CP_UP_OFF 0x0 +#define ADF4377_001D_CP_UP_ON 0x1 + +/* ADF4377 REG001F Map */ +#define ADF4377_001F_BST_REF_MSK BIT(7) +#define ADF4377_001F_FILT_REF_MSK BIT(6) +#define ADF4377_001F_REF_SEL_MSK BIT(5) +#define ADF4377_001F_R01F_RSV1_MSK GENMASK(4, 0) + +/* ADF4377 REG001F Bit Definition */ +#define ADF4377_001F_BST_LARGE_REF_IN 0x0 +#define ADF4377_001F_BST_SMALL_REF_IN 0x1 + +#define ADF4377_001F_FILT_REF_OFF 0x0 +#define ADF4377_001F_FILT_REF_ON 0x1 + +#define ADF4377_001F_REF_SEL_DMA 0x0 +#define ADF4377_001F_REF_SEL_LNA 0x1 + +#define ADF4377_001F_R01F_RSV1 0x7 + +/* ADF4377 REG0020 Map */ +#define ADF4377_0020_RST_SYS_MSK BIT(4) +#define ADF4377_0020_EN_ADC_CLK_MSK BIT(3) +#define ADF4377_0020_R020_RSV1_MSK BIT(0) + +/* ADF4377 REG0021 Bit Definition */ +#define ADF4377_0021_R021_RSV1 0xD3 + +/* ADF4377 REG0022 Bit Definition */ +#define ADF4377_0022_R022_RSV1 0x32 + +/* ADF4377 REG0023 Map */ +#define ADF4377_0023_CAT_CT_SEL BIT(7) +#define ADF4377_0023_R023_RSV1_MSK GENMASK(6, 0) + +/* ADF4377 REG0023 Bit Definition */ +#define ADF4377_0023_R023_RSV1 0x18 + +/* ADF4377 REG0024 Map */ +#define ADF4377_0024_DCLK_MODE_MSK BIT(2) + +/* ADF4377 REG0025 Map */ +#define ADF4377_0025_CLKODIV_DB_MSK BIT(7) +#define ADF4377_0025_DCLK_DB_MSK BIT(6) +#define ADF4377_0025_R025_RSV1_MSK GENMASK(5, 0) + +/* ADF4377 REG0025 Bit Definition */ +#define ADF4377_0025_R025_RSV1 0x16 + +/* ADF4377 REG0026 Map */ +#define ADF4377_0026_VCO_BAND_DIV_MSK GENMASK(7, 0) + +/* ADF4377 REG0027 Map */ +#define ADF4377_0027_SYNTH_LOCK_TO_LSB_MSK GENMASK(7, 0) + +/* ADF4377 REG0028 Map */ +#define ADF4377_0028_O_VCO_DB_MSK BIT(7) +#define ADF4377_0028_SYNTH_LOCK_TO_MSB_MSK GENMASK(6, 0) + +/* ADF4377 REG0029 Map */ +#define ADF4377_0029_VCO_ALC_TO_LSB_MSK GENMASK(7, 0) + +/* ADF4377 REG002A Map */ +#define ADF4377_002A_DEL_CTRL_DB_MSK BIT(7) +#define ADF4377_002A_VCO_ALC_TO_MSB_MSK GENMASK(6, 0) + +/* ADF4377 REG002C Map */ +#define ADF4377_002C_R02C_RSV1 0xC0 + +/* ADF4377 REG002D Map */ +#define ADF4377_002D_ADC_CLK_DIV_MSK GENMASK(7, 0) + +/* ADF4377 REG002E Map */ +#define ADF4377_002E_EN_ADC_CNV_MSK BIT(7) +#define ADF4377_002E_EN_ADC_MSK BIT(1) +#define ADF4377_002E_ADC_A_CONV_MSK BIT(0) + +/* ADF4377 REG002E Bit Definition */ +#define ADF4377_002E_ADC_A_CONV_ADC_ST_CNV 0x0 +#define ADF4377_002E_ADC_A_CONV_VCO_CALIB 0x1 + +/* ADF4377 REG002F Map */ +#define ADF4377_002F_DCLK_DIV1_MSK GENMASK(1, 0) + +/* ADF4377 REG002F Bit Definition */ +#define ADF4377_002F_DCLK_DIV1_1 0x0 +#define ADF4377_002F_DCLK_DIV1_2 0x1 +#define ADF4377_002F_DCLK_DIV1_8 0x2 +#define ADF4377_002F_DCLK_DIV1_32 0x3 + +/* ADF4377 REG0031 Bit Definition */ +#define ADF4377_0031_R031_RSV1 0x09 + +/* ADF4377 REG0032 Map */ +#define ADF4377_0032_ADC_CLK_SEL_MSK BIT(6) +#define ADF4377_0032_R032_RSV1_MSK GENMASK(5, 0) + +/* ADF4377 REG0032 Bit Definition */ +#define ADF4377_0032_ADC_CLK_SEL_N_OP 0x0 +#define ADF4377_0032_ADC_CLK_SEL_SPI_CLK 0x1 + +#define ADF4377_0032_R032_RSV1 0x9 + +/* ADF4377 REG0033 Bit Definition */ +#define ADF4377_0033_R033_RSV1 0x18 + +/* ADF4377 REG0034 Bit Definition */ +#define ADF4377_0034_R034_RSV1 0x08 + +/* ADF4377 REG003A Bit Definition */ +#define ADF4377_003A_R03A_RSV1 0x5D + +/* ADF4377 REG003B Bit Definition */ +#define ADF4377_003B_R03B_RSV1 0x2B + +/* ADF4377 REG003D Map */ +#define ADF4377_003D_O_VCO_BAND_MSK BIT(3) +#define ADF4377_003D_O_VCO_CORE_MSK BIT(2) +#define ADF4377_003D_O_VCO_BIAS_MSK BIT(1) + +/* ADF4377 REG003D Bit Definition */ +#define ADF4377_003D_O_VCO_BAND_VCO_CALIB 0x0 +#define ADF4377_003D_O_VCO_BAND_M_VCO 0x1 + +#define ADF4377_003D_O_VCO_CORE_VCO_CALIB 0x0 +#define ADF4377_003D_O_VCO_CORE_M_VCO 0x1 + +#define ADF4377_003D_O_VCO_BIAS_VCO_CALIB 0x0 +#define ADF4377_003D_O_VCO_BIAS_M_VCO 0x1 + +/* ADF4377 REG0042 Map */ +#define ADF4377_0042_R042_RSV1 0x05 + +/* ADF4377 REG0045 Map */ +#define ADF4377_0045_ADC_ST_CNV_MSK BIT(0) + +/* ADF4377 REG0049 Map */ +#define ADF4377_0049_EN_CLK2_MSK BIT(7) +#define ADF4377_0049_EN_CLK1_MSK BIT(6) +#define ADF4377_0049_REF_OK_MSK BIT(3) +#define ADF4377_0049_ADC_BUSY_MSK BIT(2) +#define ADF4377_0049_FSM_BUSY_MSK BIT(1) +#define ADF4377_0049_LOCKED_MSK BIT(0) + +/* ADF4377 REG004B Map */ +#define ADF4377_004B_VCO_CORE_MSK GENMASK(1, 0) + +/* ADF4377 REG004C Map */ +#define ADF4377_004C_CHIP_TEMP_LSB_MSK GENMASK(7, 0) + +/* ADF4377 REG004D Map */ +#define ADF4377_004D_CHIP_TEMP_MSB_MSK BIT(0) + +/* ADF4377 REG004F Map */ +#define ADF4377_004F_VCO_BAND_MSK GENMASK(7, 0) + +/* ADF4377 REG0051 Map */ +#define ADF4377_0051_VCO_BIAS_MSK GENMASK(3, 0) + +/* ADF4377 REG0054 Map */ +#define ADF4377_0054_CHIP_VERSION_MSK GENMASK(7, 0) + +/* Specifications */ +#define ADF4377_SPI_READ_CMD BIT(7) +#define ADF4377_MAX_VCO_FREQ (12800ULL * HZ_PER_MHZ) +#define ADF4377_MIN_VCO_FREQ (6400ULL * HZ_PER_MHZ) +#define ADF4377_MAX_REFIN_FREQ (1000 * HZ_PER_MHZ) +#define ADF4377_MIN_REFIN_FREQ (10 * HZ_PER_MHZ) +#define ADF4377_MAX_FREQ_PFD (500 * HZ_PER_MHZ) +#define ADF4377_MIN_FREQ_PFD (3 * HZ_PER_MHZ) +#define ADF4377_MAX_CLKPN_FREQ ADF4377_MAX_VCO_FREQ +#define ADF4377_MIN_CLKPN_FREQ (ADF4377_MIN_VCO_FREQ / 8) +#define ADF4377_FREQ_PFD_80MHZ (80 * HZ_PER_MHZ) +#define ADF4377_FREQ_PFD_125MHZ (125 * HZ_PER_MHZ) +#define ADF4377_FREQ_PFD_160MHZ (160 * HZ_PER_MHZ) +#define ADF4377_FREQ_PFD_250MHZ (250 * HZ_PER_MHZ) +#define ADF4377_FREQ_PFD_320MHZ (320 * HZ_PER_MHZ) + +enum { + ADF4377_FREQ, +}; + +enum muxout_select_mode { + ADF4377_MUXOUT_HIGH_Z = 0x0, + ADF4377_MUXOUT_LKDET = 0x1, + ADF4377_MUXOUT_LOW = 0x2, + ADF4377_MUXOUT_DIV_RCLK_2 = 0x4, + ADF4377_MUXOUT_DIV_NCLK_2 = 0x5, + ADF4377_MUXOUT_HIGH = 0x8, +}; + +struct adf4377_state { + struct spi_device *spi; + struct regmap *regmap; + struct clk *clkin; + /* Protect against concurrent accesses to the device and data content */ + struct mutex lock; + struct notifier_block nb; + /* Reference Divider */ + unsigned int ref_div_factor; + /* PFD Frequency */ + unsigned int f_pfd; + /* Input Reference Clock */ + unsigned int clkin_freq; + /* CLKOUT Divider */ + u8 clkout_div_sel; + /* Feedback Divider (N) */ + u16 n_int; + u16 synth_lock_timeout; + u16 vco_alc_timeout; + u16 adc_clk_div; + u16 vco_band_div; + u8 dclk_div1; + u8 dclk_div2; + u8 dclk_mode; + unsigned int f_div_rclk; + enum muxout_select_mode muxout_select; + struct gpio_desc *gpio_ce; + struct gpio_desc *gpio_enclk1; + struct gpio_desc *gpio_enclk2; + u8 buf[2] __aligned(IIO_DMA_MINALIGN); +}; + +static const char * const adf4377_muxout_modes[] = { + [ADF4377_MUXOUT_HIGH_Z] = "high_z", + [ADF4377_MUXOUT_LKDET] = "lock_detect", + [ADF4377_MUXOUT_LOW] = "muxout_low", + [ADF4377_MUXOUT_DIV_RCLK_2] = "f_div_rclk_2", + [ADF4377_MUXOUT_DIV_NCLK_2] = "f_div_nclk_2", + [ADF4377_MUXOUT_HIGH] = "muxout_high", +}; + +static const struct reg_sequence adf4377_reg_defaults[] = { + { 0x42, ADF4377_0042_R042_RSV1 }, + { 0x3B, ADF4377_003B_R03B_RSV1 }, + { 0x3A, ADF4377_003A_R03A_RSV1 }, + { 0x34, ADF4377_0034_R034_RSV1 }, + { 0x33, ADF4377_0033_R033_RSV1 }, + { 0x32, ADF4377_0032_R032_RSV1 }, + { 0x31, ADF4377_0031_R031_RSV1 }, + { 0x2C, ADF4377_002C_R02C_RSV1 }, + { 0x25, ADF4377_0025_R025_RSV1 }, + { 0x23, ADF4377_0023_R023_RSV1 }, + { 0x22, ADF4377_0022_R022_RSV1 }, + { 0x21, ADF4377_0021_R021_RSV1 }, + { 0x1f, ADF4377_001F_R01F_RSV1 }, + { 0x1c, ADF4377_001C_R01C_RSV1 }, +}; + +static const struct regmap_config adf4377_regmap_config = { + .reg_bits = 16, + .val_bits = 8, + .read_flag_mask = BIT(7), + .max_register = 0x54, +}; + +static int adf4377_reg_access(struct iio_dev *indio_dev, + unsigned int reg, + unsigned int write_val, + unsigned int *read_val) +{ + struct adf4377_state *st = iio_priv(indio_dev); + + if (read_val) + return regmap_read(st->regmap, reg, read_val); + + return regmap_write(st->regmap, reg, write_val); +} + +static const struct iio_info adf4377_info = { + .debugfs_reg_access = &adf4377_reg_access, +}; + +static int adf4377_soft_reset(struct adf4377_state *st) +{ + unsigned int read_val; + int ret; + + ret = regmap_update_bits(st->regmap, 0x0, ADF4377_0000_SOFT_RESET_MSK | + ADF4377_0000_SOFT_RESET_R_MSK, + FIELD_PREP(ADF4377_0000_SOFT_RESET_MSK, 1) | + FIELD_PREP(ADF4377_0000_SOFT_RESET_R_MSK, 1)); + if (ret) + return ret; + + return regmap_read_poll_timeout(st->regmap, 0x0, read_val, + !(read_val & (ADF4377_0000_SOFT_RESET_R_MSK | + ADF4377_0000_SOFT_RESET_R_MSK)), 200, 200 * 100); +} + +static int adf4377_get_freq(struct adf4377_state *st, u64 *freq) +{ + unsigned int ref_div_factor, n_int; + u64 clkin_freq; + int ret; + + mutex_lock(&st->lock); + ret = regmap_read(st->regmap, 0x12, &ref_div_factor); + if (ret) + goto exit; + + ret = regmap_bulk_read(st->regmap, 0x10, st->buf, sizeof(st->buf)); + if (ret) + goto exit; + + clkin_freq = clk_get_rate(st->clkin); + ref_div_factor = FIELD_GET(ADF4377_0012_R_DIV_MSK, ref_div_factor); + n_int = FIELD_GET(ADF4377_0010_N_INT_LSB_MSK | ADF4377_0011_N_INT_MSB_MSK, + get_unaligned_le16(&st->buf)); + + *freq = div_u64(clkin_freq, ref_div_factor) * n_int; +exit: + mutex_unlock(&st->lock); + + return ret; +} + +static int adf4377_set_freq(struct adf4377_state *st, u64 freq) +{ + unsigned int read_val; + u64 f_vco; + int ret; + + mutex_lock(&st->lock); + + if (freq > ADF4377_MAX_CLKPN_FREQ || freq < ADF4377_MIN_CLKPN_FREQ) { + ret = -EINVAL; + goto exit; + } + + ret = regmap_update_bits(st->regmap, 0x1C, ADF4377_001C_EN_DNCLK_MSK | + ADF4377_001C_EN_DRCLK_MSK, + FIELD_PREP(ADF4377_001C_EN_DNCLK_MSK, 1) | + FIELD_PREP(ADF4377_001C_EN_DRCLK_MSK, 1)); + if (ret) + goto exit; + + ret = regmap_update_bits(st->regmap, 0x11, ADF4377_0011_EN_AUTOCAL_MSK | + ADF4377_0011_DCLK_DIV2_MSK, + FIELD_PREP(ADF4377_0011_EN_AUTOCAL_MSK, 1) | + FIELD_PREP(ADF4377_0011_DCLK_DIV2_MSK, st->dclk_div2)); + if (ret) + goto exit; + + ret = regmap_update_bits(st->regmap, 0x2E, ADF4377_002E_EN_ADC_CNV_MSK | + ADF4377_002E_EN_ADC_MSK | + ADF4377_002E_ADC_A_CONV_MSK, + FIELD_PREP(ADF4377_002E_EN_ADC_CNV_MSK, 1) | + FIELD_PREP(ADF4377_002E_EN_ADC_MSK, 1) | + FIELD_PREP(ADF4377_002E_ADC_A_CONV_MSK, + ADF4377_002E_ADC_A_CONV_VCO_CALIB)); + if (ret) + goto exit; + + ret = regmap_update_bits(st->regmap, 0x20, ADF4377_0020_EN_ADC_CLK_MSK, + FIELD_PREP(ADF4377_0020_EN_ADC_CLK_MSK, 1)); + if (ret) + goto exit; + + ret = regmap_update_bits(st->regmap, 0x2F, ADF4377_002F_DCLK_DIV1_MSK, + FIELD_PREP(ADF4377_002F_DCLK_DIV1_MSK, st->dclk_div1)); + if (ret) + goto exit; + + ret = regmap_update_bits(st->regmap, 0x24, ADF4377_0024_DCLK_MODE_MSK, + FIELD_PREP(ADF4377_0024_DCLK_MODE_MSK, st->dclk_mode)); + if (ret) + goto exit; + + ret = regmap_write(st->regmap, 0x27, + FIELD_PREP(ADF4377_0027_SYNTH_LOCK_TO_LSB_MSK, + st->synth_lock_timeout)); + if (ret) + goto exit; + + ret = regmap_update_bits(st->regmap, 0x28, ADF4377_0028_SYNTH_LOCK_TO_MSB_MSK, + FIELD_PREP(ADF4377_0028_SYNTH_LOCK_TO_MSB_MSK, + st->synth_lock_timeout >> 8)); + if (ret) + goto exit; + + ret = regmap_write(st->regmap, 0x29, + FIELD_PREP(ADF4377_0029_VCO_ALC_TO_LSB_MSK, + st->vco_alc_timeout)); + if (ret) + goto exit; + + ret = regmap_update_bits(st->regmap, 0x2A, ADF4377_002A_VCO_ALC_TO_MSB_MSK, + FIELD_PREP(ADF4377_002A_VCO_ALC_TO_MSB_MSK, + st->vco_alc_timeout >> 8)); + if (ret) + goto exit; + + ret = regmap_write(st->regmap, 0x26, + FIELD_PREP(ADF4377_0026_VCO_BAND_DIV_MSK, st->vco_band_div)); + if (ret) + goto exit; + + ret = regmap_write(st->regmap, 0x2D, + FIELD_PREP(ADF4377_002D_ADC_CLK_DIV_MSK, st->adc_clk_div)); + if (ret) + goto exit; + + st->clkout_div_sel = 0; + + f_vco = freq; + + while (f_vco < ADF4377_MIN_VCO_FREQ) { + f_vco <<= 1; + st->clkout_div_sel++; + } + + st->n_int = div_u64(freq, st->f_pfd); + + ret = regmap_update_bits(st->regmap, 0x11, ADF4377_0011_EN_RDBLR_MSK | + ADF4377_0011_N_INT_MSB_MSK, + FIELD_PREP(ADF4377_0011_EN_RDBLR_MSK, 0) | + FIELD_PREP(ADF4377_0011_N_INT_MSB_MSK, st->n_int >> 8)); + if (ret) + goto exit; + + ret = regmap_update_bits(st->regmap, 0x12, ADF4377_0012_R_DIV_MSK | + ADF4377_0012_CLKOUT_DIV_MSK, + FIELD_PREP(ADF4377_0012_CLKOUT_DIV_MSK, st->clkout_div_sel) | + FIELD_PREP(ADF4377_0012_R_DIV_MSK, st->ref_div_factor)); + if (ret) + goto exit; + + ret = regmap_write(st->regmap, 0x10, + FIELD_PREP(ADF4377_0010_N_INT_LSB_MSK, st->n_int)); + if (ret) + goto exit; + + ret = regmap_read_poll_timeout(st->regmap, 0x49, read_val, + !(read_val & (ADF4377_0049_FSM_BUSY_MSK)), 200, 200 * 100); + if (ret) + goto exit; + + /* Disable EN_DNCLK, EN_DRCLK */ + ret = regmap_update_bits(st->regmap, 0x1C, ADF4377_001C_EN_DNCLK_MSK | + ADF4377_001C_EN_DRCLK_MSK, + FIELD_PREP(ADF4377_001C_EN_DNCLK_MSK, 0) | + FIELD_PREP(ADF4377_001C_EN_DRCLK_MSK, 0)); + if (ret) + goto exit; + + /* Disable EN_ADC_CLK */ + ret = regmap_update_bits(st->regmap, 0x20, ADF4377_0020_EN_ADC_CLK_MSK, + FIELD_PREP(ADF4377_0020_EN_ADC_CLK_MSK, 0)); + if (ret) + goto exit; + + /* Set output Amplitude */ + ret = regmap_update_bits(st->regmap, 0x19, ADF4377_0019_CLKOUT2_OP_MSK | + ADF4377_0019_CLKOUT1_OP_MSK, + FIELD_PREP(ADF4377_0019_CLKOUT1_OP_MSK, + ADF4377_0019_CLKOUT_420MV) | + FIELD_PREP(ADF4377_0019_CLKOUT2_OP_MSK, + ADF4377_0019_CLKOUT_420MV)); + +exit: + mutex_unlock(&st->lock); + + return ret; +} + +static void adf4377_gpio_init(struct adf4377_state *st) +{ + if (st->gpio_ce) { + gpiod_set_value(st->gpio_ce, 1); + + /* Delay for SPI register bits to settle to their power-on reset state */ + fsleep(200); + } + + if (st->gpio_enclk1) + gpiod_set_value(st->gpio_enclk1, 1); + + if (st->gpio_enclk2) + gpiod_set_value(st->gpio_enclk2, 1); +} + +static int adf4377_init(struct adf4377_state *st) +{ + struct spi_device *spi = st->spi; + int ret; + + adf4377_gpio_init(st); + + ret = adf4377_soft_reset(st); + if (ret) { + dev_err(&spi->dev, "Failed to soft reset.\n"); + return ret; + } + + ret = regmap_multi_reg_write(st->regmap, adf4377_reg_defaults, + ARRAY_SIZE(adf4377_reg_defaults)); + if (ret) { + dev_err(&spi->dev, "Failed to set default registers.\n"); + return ret; + } + + ret = regmap_update_bits(st->regmap, 0x00, + ADF4377_0000_SDO_ACTIVE_MSK | ADF4377_0000_SDO_ACTIVE_R_MSK, + FIELD_PREP(ADF4377_0000_SDO_ACTIVE_MSK, + ADF4377_0000_SDO_ACTIVE_SPI_4W) | + FIELD_PREP(ADF4377_0000_SDO_ACTIVE_R_MSK, + ADF4377_0000_SDO_ACTIVE_SPI_4W)); + if (ret) { + dev_err(&spi->dev, "Failed to set 4-Wire Operation.\n"); + return ret; + } + + st->clkin_freq = clk_get_rate(st->clkin); + + /* Power Up */ + ret = regmap_write(st->regmap, 0x1a, + FIELD_PREP(ADF4377_001A_PD_ALL_MSK, 0) | + FIELD_PREP(ADF4377_001A_PD_RDIV_MSK, 0) | + FIELD_PREP(ADF4377_001A_PD_NDIV_MSK, 0) | + FIELD_PREP(ADF4377_001A_PD_VCO_MSK, 0) | + FIELD_PREP(ADF4377_001A_PD_LD_MSK, 0) | + FIELD_PREP(ADF4377_001A_PD_PFDCP_MSK, 0) | + FIELD_PREP(ADF4377_001A_PD_CLKOUT1_MSK, 0) | + FIELD_PREP(ADF4377_001A_PD_CLKOUT2_MSK, 0)); + if (ret) { + dev_err(&spi->dev, "Failed to set power down registers.\n"); + return ret; + } + + /* Set Mux Output */ + ret = regmap_update_bits(st->regmap, 0x1D, + ADF4377_001D_MUXOUT_MSK, + FIELD_PREP(ADF4377_001D_MUXOUT_MSK, st->muxout_select)); + if (ret) + return ret; + + /* Compute PFD */ + st->ref_div_factor = 0; + do { + st->ref_div_factor++; + st->f_pfd = st->clkin_freq / st->ref_div_factor; + } while (st->f_pfd > ADF4377_MAX_FREQ_PFD); + + if (st->f_pfd > ADF4377_MAX_FREQ_PFD || st->f_pfd < ADF4377_MIN_FREQ_PFD) + return -EINVAL; + + st->f_div_rclk = st->f_pfd; + + if (st->f_pfd <= ADF4377_FREQ_PFD_80MHZ) { + st->dclk_div1 = ADF4377_002F_DCLK_DIV1_1; + st->dclk_div2 = ADF4377_0011_DCLK_DIV2_1; + st->dclk_mode = 0; + } else if (st->f_pfd <= ADF4377_FREQ_PFD_125MHZ) { + st->dclk_div1 = ADF4377_002F_DCLK_DIV1_1; + st->dclk_div2 = ADF4377_0011_DCLK_DIV2_1; + st->dclk_mode = 1; + } else if (st->f_pfd <= ADF4377_FREQ_PFD_160MHZ) { + st->dclk_div1 = ADF4377_002F_DCLK_DIV1_2; + st->dclk_div2 = ADF4377_0011_DCLK_DIV2_1; + st->dclk_mode = 0; + st->f_div_rclk /= 2; + } else if (st->f_pfd <= ADF4377_FREQ_PFD_250MHZ) { + st->dclk_div1 = ADF4377_002F_DCLK_DIV1_2; + st->dclk_div2 = ADF4377_0011_DCLK_DIV2_1; + st->dclk_mode = 1; + st->f_div_rclk /= 2; + } else if (st->f_pfd <= ADF4377_FREQ_PFD_320MHZ) { + st->dclk_div1 = ADF4377_002F_DCLK_DIV1_2; + st->dclk_div2 = ADF4377_0011_DCLK_DIV2_2; + st->dclk_mode = 0; + st->f_div_rclk /= 4; + } else { + st->dclk_div1 = ADF4377_002F_DCLK_DIV1_2; + st->dclk_div2 = ADF4377_0011_DCLK_DIV2_2; + st->dclk_mode = 1; + st->f_div_rclk /= 4; + } + + st->synth_lock_timeout = DIV_ROUND_UP(st->f_div_rclk, 50000); + st->vco_alc_timeout = DIV_ROUND_UP(st->f_div_rclk, 20000); + st->vco_band_div = DIV_ROUND_UP(st->f_div_rclk, 150000 * 16 * (1 << st->dclk_mode)); + st->adc_clk_div = DIV_ROUND_UP((st->f_div_rclk / 400000 - 2), 4); + + return 0; +} + +static ssize_t adf4377_read(struct iio_dev *indio_dev, uintptr_t private, + const struct iio_chan_spec *chan, char *buf) +{ + struct adf4377_state *st = iio_priv(indio_dev); + u64 val = 0; + int ret; + + switch ((u32)private) { + case ADF4377_FREQ: + ret = adf4377_get_freq(st, &val); + if (ret) + return ret; + + return sysfs_emit(buf, "%llu\n", val); + default: + return -EINVAL; + } +} + +static ssize_t adf4377_write(struct iio_dev *indio_dev, uintptr_t private, + const struct iio_chan_spec *chan, const char *buf, + size_t len) +{ + struct adf4377_state *st = iio_priv(indio_dev); + unsigned long long freq; + int ret; + + switch ((u32)private) { + case ADF4377_FREQ: + ret = kstrtoull(buf, 10, &freq); + if (ret) + return ret; + + ret = adf4377_set_freq(st, freq); + if (ret) + return ret; + + return len; + default: + return -EINVAL; + } +} + +#define _ADF4377_EXT_INFO(_name, _shared, _ident) { \ + .name = _name, \ + .read = adf4377_read, \ + .write = adf4377_write, \ + .private = _ident, \ + .shared = _shared, \ + } + +static const struct iio_chan_spec_ext_info adf4377_ext_info[] = { + /* + * Usually we use IIO_CHAN_INFO_FREQUENCY, but there are + * values > 2^32 in order to support the entire frequency range + * in Hz. + */ + _ADF4377_EXT_INFO("frequency", IIO_SEPARATE, ADF4377_FREQ), + { } +}; + +static const struct iio_chan_spec adf4377_channels[] = { + { + .type = IIO_ALTVOLTAGE, + .indexed = 1, + .output = 1, + .channel = 0, + .ext_info = adf4377_ext_info, + }, +}; + +static int adf4377_properties_parse(struct adf4377_state *st) +{ + struct spi_device *spi = st->spi; + const char *str; + int ret; + + st->clkin = devm_clk_get_enabled(&spi->dev, "ref_in"); + if (IS_ERR(st->clkin)) + return dev_err_probe(&spi->dev, PTR_ERR(st->clkin), + "failed to get the reference input clock\n"); + + st->gpio_ce = devm_gpiod_get_optional(&st->spi->dev, "chip-enable", + GPIOD_OUT_LOW); + if (IS_ERR(st->gpio_ce)) + return dev_err_probe(&spi->dev, PTR_ERR(st->gpio_ce), + "failed to get the CE GPIO\n"); + + st->gpio_enclk1 = devm_gpiod_get_optional(&st->spi->dev, "clk1-enable", + GPIOD_OUT_LOW); + if (IS_ERR(st->gpio_enclk1)) + return dev_err_probe(&spi->dev, PTR_ERR(st->gpio_enclk1), + "failed to get the CE GPIO\n"); + + st->gpio_enclk2 = devm_gpiod_get_optional(&st->spi->dev, "clk2-enable", + GPIOD_OUT_LOW); + if (IS_ERR(st->gpio_enclk2)) + return dev_err_probe(&spi->dev, PTR_ERR(st->gpio_enclk2), + "failed to get the CE GPIO\n"); + + ret = device_property_read_string(&spi->dev, "adi,muxout-select", &str); + if (ret) { + st->muxout_select = ADF4377_MUXOUT_HIGH_Z; + } else { + ret = match_string(adf4377_muxout_modes, ARRAY_SIZE(adf4377_muxout_modes), str); + if (ret < 0) + return ret; + + st->muxout_select = ret; + } + + return 0; +} + +static int adf4377_freq_change(struct notifier_block *nb, unsigned long action, void *data) +{ + struct adf4377_state *st = container_of(nb, struct adf4377_state, nb); + int ret; + + if (action == POST_RATE_CHANGE) { + mutex_lock(&st->lock); + ret = notifier_from_errno(adf4377_init(st)); + mutex_unlock(&st->lock); + return ret; + } + + return NOTIFY_OK; +} + +static int adf4377_probe(struct spi_device *spi) +{ + struct iio_dev *indio_dev; + struct regmap *regmap; + struct adf4377_state *st; + int ret; + + indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st)); + if (!indio_dev) + return -ENOMEM; + + regmap = devm_regmap_init_spi(spi, &adf4377_regmap_config); + if (IS_ERR(regmap)) + return PTR_ERR(regmap); + + st = iio_priv(indio_dev); + + indio_dev->info = &adf4377_info; + indio_dev->name = "adf4377"; + indio_dev->channels = adf4377_channels; + indio_dev->num_channels = ARRAY_SIZE(adf4377_channels); + + st->regmap = regmap; + st->spi = spi; + mutex_init(&st->lock); + + ret = adf4377_properties_parse(st); + if (ret) + return ret; + + st->nb.notifier_call = adf4377_freq_change; + ret = devm_clk_notifier_register(&spi->dev, st->clkin, &st->nb); + if (ret) + return ret; + + ret = adf4377_init(st); + if (ret) + return ret; + + return devm_iio_device_register(&spi->dev, indio_dev); +} + +static const struct spi_device_id adf4377_id[] = { + { "adf4377", 0 }, + {} +}; +MODULE_DEVICE_TABLE(spi, adf4377_id); + +static const struct of_device_id adf4377_of_match[] = { + { .compatible = "adi,adf4377" }, + {} +}; +MODULE_DEVICE_TABLE(of, adf4377_of_match); + +static struct spi_driver adf4377_driver = { + .driver = { + .name = "adf4377", + .of_match_table = adf4377_of_match, + }, + .probe = adf4377_probe, + .id_table = adf4377_id, +}; +module_spi_driver(adf4377_driver); + +MODULE_AUTHOR("Antoniu Miclaus "); +MODULE_DESCRIPTION("Analog Devices ADF4377"); +MODULE_LICENSE("GPL"); diff --git a/drivers/iio/gyro/adis16136.c b/drivers/iio/gyro/adis16136.c index 71295709f2b969e32488b0e4648d14e0b0dca5e0..c95cf41be34b606bd971f7e9c272a2fed1ff473c 100644 --- a/drivers/iio/gyro/adis16136.c +++ b/drivers/iio/gyro/adis16136.c @@ -429,7 +429,7 @@ static int adis16136_initial_setup(struct iio_dev *indio_dev) uint16_t prod_id; int ret; - ret = adis_initial_startup(&adis16136->adis); + ret = __adis_initial_startup(&adis16136->adis); if (ret) return ret; diff --git a/drivers/iio/gyro/adis16260.c b/drivers/iio/gyro/adis16260.c index eaf57bd339edd52ec9b2a470da82a56c97d7dc26..112d635b7dfd3ded387cc2336f6294d9f4ddf027 100644 --- a/drivers/iio/gyro/adis16260.c +++ b/drivers/iio/gyro/adis16260.c @@ -395,7 +395,7 @@ static int adis16260_probe(struct spi_device *spi) return ret; /* Get the device into a sane initial state */ - ret = adis_initial_startup(&adis16260->adis); + ret = __adis_initial_startup(&adis16260->adis); if (ret) return ret; diff --git a/drivers/iio/gyro/bmg160_core.c b/drivers/iio/gyro/bmg160_core.c index cedd9f02ea2112b2601c61b3d8ea06855ea79d92..0e2eb0e98235f547d4c7206ec59ea2a013f23f09 100644 --- a/drivers/iio/gyro/bmg160_core.c +++ b/drivers/iio/gyro/bmg160_core.c @@ -93,7 +93,6 @@ struct bmg160_data { struct regmap *regmap; - struct regulator_bulk_data regulators[2]; struct iio_trigger *dready_trig; struct iio_trigger *motion_trig; struct iio_mount_matrix orientation; @@ -1067,16 +1066,10 @@ static const char *bmg160_match_acpi_device(struct device *dev) return dev_name(dev); } -static void bmg160_disable_regulators(void *d) -{ - struct bmg160_data *data = d; - - regulator_bulk_disable(ARRAY_SIZE(data->regulators), data->regulators); -} - int bmg160_core_probe(struct device *dev, struct regmap *regmap, int irq, const char *name) { + static const char * const regulators[] = { "vdd", "vddio" }; struct bmg160_data *data; struct iio_dev *indio_dev; int ret; @@ -1090,22 +1083,11 @@ int bmg160_core_probe(struct device *dev, struct regmap *regmap, int irq, data->irq = irq; data->regmap = regmap; - data->regulators[0].supply = "vdd"; - data->regulators[1].supply = "vddio"; - ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(data->regulators), - data->regulators); + ret = devm_regulator_bulk_get_enable(dev, ARRAY_SIZE(regulators), + regulators); if (ret) return dev_err_probe(dev, ret, "Failed to get regulators\n"); - ret = regulator_bulk_enable(ARRAY_SIZE(data->regulators), - data->regulators); - if (ret) - return ret; - - ret = devm_add_action_or_reset(dev, bmg160_disable_regulators, data); - if (ret) - return ret; - ret = iio_read_mount_matrix(dev, &data->orientation); if (ret) return ret; diff --git a/drivers/iio/gyro/bmg160_i2c.c b/drivers/iio/gyro/bmg160_i2c.c index 908ccc385254a8a7ad2d8403cab69f6f81d5f087..2b019ee5b2eb5ed1a27f7022698662150002a59d 100644 --- a/drivers/iio/gyro/bmg160_i2c.c +++ b/drivers/iio/gyro/bmg160_i2c.c @@ -13,9 +13,9 @@ static const struct regmap_config bmg160_regmap_i2c_conf = { .max_register = 0x3f }; -static int bmg160_i2c_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int bmg160_i2c_probe(struct i2c_client *client) { + const struct i2c_device_id *id = i2c_client_get_device_id(client); struct regmap *regmap; const char *name = NULL; @@ -70,7 +70,7 @@ static struct i2c_driver bmg160_i2c_driver = { .of_match_table = bmg160_of_match, .pm = &bmg160_pm_ops, }, - .probe = bmg160_i2c_probe, + .probe_new = bmg160_i2c_probe, .remove = bmg160_i2c_remove, .id_table = bmg160_i2c_id, }; diff --git a/drivers/iio/gyro/fxas21002c_core.c b/drivers/iio/gyro/fxas21002c_core.c index a36d71d9e3ea932a97ff38a2c84e30bf9d590239..3ea1d461308088b4e7df14f5c74a8190fb933e2f 100644 --- a/drivers/iio/gyro/fxas21002c_core.c +++ b/drivers/iio/gyro/fxas21002c_core.c @@ -998,7 +998,7 @@ pm_disable: return ret; } -EXPORT_SYMBOL_GPL(fxas21002c_core_probe); +EXPORT_SYMBOL_NS_GPL(fxas21002c_core_probe, IIO_FXAS21002C); void fxas21002c_core_remove(struct device *dev) { @@ -1009,9 +1009,9 @@ void fxas21002c_core_remove(struct device *dev) pm_runtime_disable(dev); pm_runtime_set_suspended(dev); } -EXPORT_SYMBOL_GPL(fxas21002c_core_remove); +EXPORT_SYMBOL_NS_GPL(fxas21002c_core_remove, IIO_FXAS21002C); -static int __maybe_unused fxas21002c_suspend(struct device *dev) +static int fxas21002c_suspend(struct device *dev) { struct fxas21002c_data *data = iio_priv(dev_get_drvdata(dev)); @@ -1021,7 +1021,7 @@ static int __maybe_unused fxas21002c_suspend(struct device *dev) return 0; } -static int __maybe_unused fxas21002c_resume(struct device *dev) +static int fxas21002c_resume(struct device *dev) { struct fxas21002c_data *data = iio_priv(dev_get_drvdata(dev)); int ret; @@ -1033,26 +1033,25 @@ static int __maybe_unused fxas21002c_resume(struct device *dev) return fxas21002c_mode_set(data, data->prev_mode); } -static int __maybe_unused fxas21002c_runtime_suspend(struct device *dev) +static int fxas21002c_runtime_suspend(struct device *dev) { struct fxas21002c_data *data = iio_priv(dev_get_drvdata(dev)); return fxas21002c_mode_set(data, FXAS21002C_MODE_READY); } -static int __maybe_unused fxas21002c_runtime_resume(struct device *dev) +static int fxas21002c_runtime_resume(struct device *dev) { struct fxas21002c_data *data = iio_priv(dev_get_drvdata(dev)); return fxas21002c_mode_set(data, FXAS21002C_MODE_ACTIVE); } -const struct dev_pm_ops fxas21002c_pm_ops = { - SET_SYSTEM_SLEEP_PM_OPS(fxas21002c_suspend, fxas21002c_resume) - SET_RUNTIME_PM_OPS(fxas21002c_runtime_suspend, - fxas21002c_runtime_resume, NULL) +EXPORT_NS_GPL_DEV_PM_OPS(fxas21002c_pm_ops, IIO_FXAS21002C) = { + SYSTEM_SLEEP_PM_OPS(fxas21002c_suspend, fxas21002c_resume) + RUNTIME_PM_OPS(fxas21002c_runtime_suspend, fxas21002c_runtime_resume, + NULL) }; -EXPORT_SYMBOL_GPL(fxas21002c_pm_ops); MODULE_AUTHOR("Rui Miguel Silva "); MODULE_LICENSE("GPL v2"); diff --git a/drivers/iio/gyro/fxas21002c_i2c.c b/drivers/iio/gyro/fxas21002c_i2c.c index 13bb52c594d12cfc2a10a70ccbdf2e8c4b25da4c..9e2d0f34a672ea0a473b91cd1270bbbea3f31070 100644 --- a/drivers/iio/gyro/fxas21002c_i2c.c +++ b/drivers/iio/gyro/fxas21002c_i2c.c @@ -53,7 +53,7 @@ MODULE_DEVICE_TABLE(of, fxas21002c_i2c_of_match); static struct i2c_driver fxas21002c_i2c_driver = { .driver = { .name = "fxas21002c_i2c", - .pm = &fxas21002c_pm_ops, + .pm = pm_ptr(&fxas21002c_pm_ops), .of_match_table = fxas21002c_i2c_of_match, }, .probe_new = fxas21002c_i2c_probe, @@ -65,3 +65,4 @@ module_i2c_driver(fxas21002c_i2c_driver); MODULE_AUTHOR("Rui Miguel Silva "); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("FXAS21002C I2C Gyro driver"); +MODULE_IMPORT_NS(IIO_FXAS21002C); diff --git a/drivers/iio/gyro/fxas21002c_spi.c b/drivers/iio/gyro/fxas21002c_spi.c index c3ac169facf9beaf0ce52546c6c8fe0da9a95892..4f633826547c20f2cf6ffd39793f45803685e6f1 100644 --- a/drivers/iio/gyro/fxas21002c_spi.c +++ b/drivers/iio/gyro/fxas21002c_spi.c @@ -54,7 +54,7 @@ MODULE_DEVICE_TABLE(of, fxas21002c_spi_of_match); static struct spi_driver fxas21002c_spi_driver = { .driver = { .name = "fxas21002c_spi", - .pm = &fxas21002c_pm_ops, + .pm = pm_ptr(&fxas21002c_pm_ops), .of_match_table = fxas21002c_spi_of_match, }, .probe = fxas21002c_spi_probe, @@ -66,3 +66,4 @@ module_spi_driver(fxas21002c_spi_driver); MODULE_AUTHOR("Rui Miguel Silva "); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("FXAS21002C SPI Gyro driver"); +MODULE_IMPORT_NS(IIO_FXAS21002C); diff --git a/drivers/iio/gyro/itg3200_core.c b/drivers/iio/gyro/itg3200_core.c index 4215015845875131d3e535ac14633ad5cc27f2bf..ceacd863d3eaaf23be14418e72fc5c164363c1be 100644 --- a/drivers/iio/gyro/itg3200_core.c +++ b/drivers/iio/gyro/itg3200_core.c @@ -18,6 +18,7 @@ #include #include #include +#include #include #include @@ -131,6 +132,7 @@ static int itg3200_write_raw(struct iio_dev *indio_dev, int val2, long mask) { + struct itg3200 *st = iio_priv(indio_dev); int ret; u8 t; @@ -139,11 +141,11 @@ static int itg3200_write_raw(struct iio_dev *indio_dev, if (val == 0 || val2 != 0) return -EINVAL; - mutex_lock(&indio_dev->mlock); + mutex_lock(&st->lock); ret = itg3200_read_reg_8(indio_dev, ITG3200_REG_DLPF, &t); if (ret) { - mutex_unlock(&indio_dev->mlock); + mutex_unlock(&st->lock); return ret; } t = ((t & ITG3200_DLPF_CFG_MASK) ? 1000u : 8000u) / val - 1; @@ -152,7 +154,7 @@ static int itg3200_write_raw(struct iio_dev *indio_dev, ITG3200_REG_SAMPLE_RATE_DIV, t); - mutex_unlock(&indio_dev->mlock); + mutex_unlock(&st->lock); return ret; default: @@ -293,8 +295,7 @@ static const struct iio_info itg3200_info = { static const unsigned long itg3200_available_scan_masks[] = { 0xffffffff, 0x0 }; -static int itg3200_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int itg3200_probe(struct i2c_client *client) { int ret; struct itg3200 *st; @@ -336,6 +337,8 @@ static int itg3200_probe(struct i2c_client *client, if (ret) goto error_remove_trigger; + mutex_init(&st->lock); + ret = iio_device_register(indio_dev); if (ret) goto error_remove_trigger; @@ -402,7 +405,7 @@ static struct i2c_driver itg3200_driver = { .pm = pm_sleep_ptr(&itg3200_pm_ops), }, .id_table = itg3200_id, - .probe = itg3200_probe, + .probe_new = itg3200_probe, .remove = itg3200_remove, }; diff --git a/drivers/iio/gyro/mpu3050-i2c.c b/drivers/iio/gyro/mpu3050-i2c.c index 12e3afa9dd11dec7605b70671b026e6f11a42268..2116798226bf28b6440540eef7c42442b756a84b 100644 --- a/drivers/iio/gyro/mpu3050-i2c.c +++ b/drivers/iio/gyro/mpu3050-i2c.c @@ -32,9 +32,9 @@ static int mpu3050_i2c_bypass_deselect(struct i2c_mux_core *mux, u32 chan_id) return 0; } -static int mpu3050_i2c_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int mpu3050_i2c_probe(struct i2c_client *client) { + const struct i2c_device_id *id = i2c_client_get_device_id(client); struct regmap *regmap; const char *name; struct mpu3050 *mpu3050; @@ -108,7 +108,7 @@ static const struct of_device_id mpu3050_i2c_of_match[] = { MODULE_DEVICE_TABLE(of, mpu3050_i2c_of_match); static struct i2c_driver mpu3050_i2c_driver = { - .probe = mpu3050_i2c_probe, + .probe_new = mpu3050_i2c_probe, .remove = mpu3050_i2c_remove, .id_table = mpu3050_i2c_id, .driver = { diff --git a/drivers/iio/gyro/st_gyro_i2c.c b/drivers/iio/gyro/st_gyro_i2c.c index 8c7af42b655874b8f8d662fde4d2a4295cda1301..797a1c6a0402063f8124b8f79d00fe69b165d71f 100644 --- a/drivers/iio/gyro/st_gyro_i2c.c +++ b/drivers/iio/gyro/st_gyro_i2c.c @@ -58,8 +58,7 @@ static const struct of_device_id st_gyro_of_match[] = { }; MODULE_DEVICE_TABLE(of, st_gyro_of_match); -static int st_gyro_i2c_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int st_gyro_i2c_probe(struct i2c_client *client) { const struct st_sensor_settings *settings; struct st_sensor_data *gdata; @@ -112,7 +111,7 @@ static struct i2c_driver st_gyro_driver = { .name = "st-gyro-i2c", .of_match_table = st_gyro_of_match, }, - .probe = st_gyro_i2c_probe, + .probe_new = st_gyro_i2c_probe, .id_table = st_gyro_id_table, }; module_i2c_driver(st_gyro_driver); diff --git a/drivers/iio/health/afe4404.c b/drivers/iio/health/afe4404.c index 836da31b7e30cb67eeee3daf748aa656c5e37745..21a6378b70524930e038ca0eea0bc1570b14587c 100644 --- a/drivers/iio/health/afe4404.c +++ b/drivers/iio/health/afe4404.c @@ -461,8 +461,7 @@ static int afe4404_resume(struct device *dev) static DEFINE_SIMPLE_DEV_PM_OPS(afe4404_pm_ops, afe4404_suspend, afe4404_resume); -static int afe4404_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int afe4404_probe(struct i2c_client *client) { struct iio_dev *indio_dev; struct afe4404_data *afe; @@ -610,7 +609,7 @@ static struct i2c_driver afe4404_i2c_driver = { .of_match_table = afe4404_of_match, .pm = pm_sleep_ptr(&afe4404_pm_ops), }, - .probe = afe4404_probe, + .probe_new = afe4404_probe, .remove = afe4404_remove, .id_table = afe4404_ids, }; diff --git a/drivers/iio/health/max30100.c b/drivers/iio/health/max30100.c index 2cca5e0519f89d5f5a2199014bbe59302a8a96d5..a80fa9852c220fe995c0515be9b0e45ae97aa7bc 100644 --- a/drivers/iio/health/max30100.c +++ b/drivers/iio/health/max30100.c @@ -387,18 +387,21 @@ static int max30100_read_raw(struct iio_dev *indio_dev, * Temperature reading can only be acquired while engine * is running */ - mutex_lock(&indio_dev->mlock); - - if (!iio_buffer_enabled(indio_dev)) + if (iio_device_claim_buffer_mode(indio_dev)) { + /* + * Replacing -EBUSY or other error code + * returned by iio_device_claim_buffer_mode() + * because user space may rely on the current + * one. + */ ret = -EAGAIN; - else { + } else { ret = max30100_get_temp(data, val); if (!ret) ret = IIO_VAL_INT; + iio_device_release_buffer_mode(indio_dev); } - - mutex_unlock(&indio_dev->mlock); break; case IIO_CHAN_INFO_SCALE: *val = 1; /* 0.0625 */ @@ -414,8 +417,7 @@ static const struct iio_info max30100_info = { .read_raw = max30100_read_raw, }; -static int max30100_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int max30100_probe(struct i2c_client *client) { struct max30100_data *data; struct iio_dev *indio_dev; @@ -497,7 +499,7 @@ static struct i2c_driver max30100_driver = { .name = MAX30100_DRV_NAME, .of_match_table = max30100_dt_ids, }, - .probe = max30100_probe, + .probe_new = max30100_probe, .remove = max30100_remove, .id_table = max30100_id, }; diff --git a/drivers/iio/health/max30102.c b/drivers/iio/health/max30102.c index 437298a29f2d323dcad83ece6438cb857a0d5d58..7edcf9e05687bf8475eb2a989c187a523c594942 100644 --- a/drivers/iio/health/max30102.c +++ b/drivers/iio/health/max30102.c @@ -477,12 +477,23 @@ static int max30102_read_raw(struct iio_dev *indio_dev, * Temperature reading can only be acquired when not in * shutdown; leave shutdown briefly when buffer not running */ - mutex_lock(&indio_dev->mlock); - if (!iio_buffer_enabled(indio_dev)) +any_mode_retry: + if (iio_device_claim_buffer_mode(indio_dev)) { + /* + * This one is a *bit* hacky. If we cannot claim buffer + * mode, then try direct mode so that we make sure + * things cannot concurrently change. And we just keep + * trying until we get one of the modes... + */ + if (iio_device_claim_direct_mode(indio_dev)) + goto any_mode_retry; + ret = max30102_get_temp(data, val, true); - else + iio_device_release_direct_mode(indio_dev); + } else { ret = max30102_get_temp(data, val, false); - mutex_unlock(&indio_dev->mlock); + iio_device_release_buffer_mode(indio_dev); + } if (ret) return ret; @@ -502,9 +513,9 @@ static const struct iio_info max30102_info = { .read_raw = max30102_read_raw, }; -static int max30102_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int max30102_probe(struct i2c_client *client) { + const struct i2c_device_id *id = i2c_client_get_device_id(client); struct max30102_data *data; struct iio_dev *indio_dev; int ret; @@ -620,7 +631,7 @@ static struct i2c_driver max30102_driver = { .name = MAX30102_DRV_NAME, .of_match_table = max30102_dt_ids, }, - .probe = max30102_probe, + .probe_new = max30102_probe, .remove = max30102_remove, .id_table = max30102_id, }; diff --git a/drivers/iio/humidity/am2315.c b/drivers/iio/humidity/am2315.c index 4a39f10193471a485ffe92f05c08f4b171032d2e..f246516bd45eab7b886fd7893c7f0574aba1ded7 100644 --- a/drivers/iio/humidity/am2315.c +++ b/drivers/iio/humidity/am2315.c @@ -218,8 +218,7 @@ static const struct iio_info am2315_info = { .read_raw = am2315_read_raw, }; -static int am2315_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int am2315_probe(struct i2c_client *client) { int ret; struct iio_dev *indio_dev; @@ -263,7 +262,7 @@ static struct i2c_driver am2315_driver = { .driver = { .name = "am2315", }, - .probe = am2315_probe, + .probe_new = am2315_probe, .id_table = am2315_i2c_id, }; diff --git a/drivers/iio/humidity/hdc100x.c b/drivers/iio/humidity/hdc100x.c index 47f8e8ef56d6873a7e4cd4736b88a2610e64ea41..49a950d739e44301c431b3a47fc554a948d388f5 100644 --- a/drivers/iio/humidity/hdc100x.c +++ b/drivers/iio/humidity/hdc100x.c @@ -351,8 +351,7 @@ static const struct iio_info hdc100x_info = { .attrs = &hdc100x_attribute_group, }; -static int hdc100x_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int hdc100x_probe(struct i2c_client *client) { struct iio_dev *indio_dev; struct hdc100x_data *data; @@ -429,7 +428,7 @@ static struct i2c_driver hdc100x_driver = { .of_match_table = hdc100x_dt_ids, .acpi_match_table = hdc100x_acpi_match, }, - .probe = hdc100x_probe, + .probe_new = hdc100x_probe, .id_table = hdc100x_id, }; module_i2c_driver(hdc100x_driver); diff --git a/drivers/iio/humidity/hdc2010.c b/drivers/iio/humidity/hdc2010.c index d6858ccb056e4eeaadfb796e83f43ea6c57970cd..c8fddd612e060a77d79a85bab1c19cdd562e07e8 100644 --- a/drivers/iio/humidity/hdc2010.c +++ b/drivers/iio/humidity/hdc2010.c @@ -251,8 +251,7 @@ static const struct iio_info hdc2010_info = { .attrs = &hdc2010_attribute_group, }; -static int hdc2010_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int hdc2010_probe(struct i2c_client *client) { struct iio_dev *indio_dev; struct hdc2010_data *data; @@ -339,7 +338,7 @@ static struct i2c_driver hdc2010_driver = { .name = "hdc2010", .of_match_table = hdc2010_dt_ids, }, - .probe = hdc2010_probe, + .probe_new = hdc2010_probe, .remove = hdc2010_remove, .id_table = hdc2010_id, }; diff --git a/drivers/iio/humidity/hts221.h b/drivers/iio/humidity/hts221.h index cf3d8d2dccd6bd402a35d6be74f1718dd24bcf4b..721359e226cb97f7df1c32ad5ddfa20913d82592 100644 --- a/drivers/iio/humidity/hts221.h +++ b/drivers/iio/humidity/hts221.h @@ -13,7 +13,6 @@ #define HTS221_DEV_NAME "hts221" #include -#include enum hts221_sensor_type { HTS221_SENSOR_H, @@ -30,7 +29,6 @@ struct hts221_hw { const char *name; struct device *dev; struct regmap *regmap; - struct regulator *vdd; struct iio_trigger *trig; int irq; diff --git a/drivers/iio/humidity/hts221_core.c b/drivers/iio/humidity/hts221_core.c index 517158307d8c22ca129e1b5a4c060487c30e43cd..2a413da87b760f42e74a80e7515606743eb3d1c4 100644 --- a/drivers/iio/humidity/hts221_core.c +++ b/drivers/iio/humidity/hts221_core.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include "hts221.h" @@ -549,33 +550,17 @@ static const unsigned long hts221_scan_masks[] = {0x3, 0x0}; static int hts221_init_regulators(struct device *dev) { - struct iio_dev *iio_dev = dev_get_drvdata(dev); - struct hts221_hw *hw = iio_priv(iio_dev); int err; - hw->vdd = devm_regulator_get(dev, "vdd"); - if (IS_ERR(hw->vdd)) - return dev_err_probe(dev, PTR_ERR(hw->vdd), - "failed to get vdd regulator\n"); - - err = regulator_enable(hw->vdd); - if (err) { - dev_err(dev, "failed to enable vdd regulator: %d\n", err); - return err; - } + err = devm_regulator_get_enable(dev, "vdd"); + if (err) + return dev_err_probe(dev, err, "failed to get vdd regulator\n"); msleep(50); return 0; } -static void hts221_chip_uninit(void *data) -{ - struct hts221_hw *hw = data; - - regulator_disable(hw->vdd); -} - int hts221_probe(struct device *dev, int irq, const char *name, struct regmap *regmap) { @@ -600,10 +585,6 @@ int hts221_probe(struct device *dev, int irq, const char *name, if (err) return err; - err = devm_add_action_or_reset(dev, hts221_chip_uninit, hw); - if (err) - return err; - err = hts221_check_whoami(hw); if (err < 0) return err; diff --git a/drivers/iio/humidity/hts221_i2c.c b/drivers/iio/humidity/hts221_i2c.c index afbc611f7712f53bbe08c3a8bc75ee8fadbce4de..d81869423cf031564a07d095e20339b68bbaf918 100644 --- a/drivers/iio/humidity/hts221_i2c.c +++ b/drivers/iio/humidity/hts221_i2c.c @@ -25,8 +25,7 @@ static const struct regmap_config hts221_i2c_regmap_config = { .read_flag_mask = HTS221_I2C_AUTO_INCREMENT, }; -static int hts221_i2c_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int hts221_i2c_probe(struct i2c_client *client) { struct regmap *regmap; @@ -66,7 +65,7 @@ static struct i2c_driver hts221_driver = { .of_match_table = hts221_i2c_of_match, .acpi_match_table = ACPI_PTR(hts221_acpi_match), }, - .probe = hts221_i2c_probe, + .probe_new = hts221_i2c_probe, .id_table = hts221_i2c_id_table, }; module_i2c_driver(hts221_driver); diff --git a/drivers/iio/humidity/htu21.c b/drivers/iio/humidity/htu21.c index fd9e2565f8a2d8d8bcc69f7aaa2e48a3585067e2..8411a9f3e828aaca3cb2cbdc9ddbc083a76c2b4d 100644 --- a/drivers/iio/humidity/htu21.c +++ b/drivers/iio/humidity/htu21.c @@ -177,9 +177,9 @@ static const struct iio_info htu21_info = { .attrs = &htu21_attribute_group, }; -static int htu21_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int htu21_probe(struct i2c_client *client) { + const struct i2c_device_id *id = i2c_client_get_device_id(client); struct ms_ht_dev *dev_data; struct iio_dev *indio_dev; int ret; @@ -244,7 +244,7 @@ static const struct of_device_id htu21_of_match[] = { MODULE_DEVICE_TABLE(of, htu21_of_match); static struct i2c_driver htu21_driver = { - .probe = htu21_probe, + .probe_new = htu21_probe, .id_table = htu21_id, .driver = { .name = "htu21", diff --git a/drivers/iio/humidity/si7005.c b/drivers/iio/humidity/si7005.c index 160b3d92df619c3ece14db020b84d8966b748d8f..fa1faf168c8d9e00304cf0e7d878ee429b186e94 100644 --- a/drivers/iio/humidity/si7005.c +++ b/drivers/iio/humidity/si7005.c @@ -123,8 +123,7 @@ static const struct iio_info si7005_info = { .read_raw = si7005_read_raw, }; -static int si7005_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int si7005_probe(struct i2c_client *client) { struct iio_dev *indio_dev; struct si7005_data *data; @@ -174,7 +173,7 @@ static struct i2c_driver si7005_driver = { .driver = { .name = "si7005", }, - .probe = si7005_probe, + .probe_new = si7005_probe, .id_table = si7005_id, }; module_i2c_driver(si7005_driver); diff --git a/drivers/iio/humidity/si7020.c b/drivers/iio/humidity/si7020.c index ab6537f136ba1bb9263ffa8f585ef7a64e70041b..3e50592e8e68bee1b1343422a4db19bb2674e9a7 100644 --- a/drivers/iio/humidity/si7020.c +++ b/drivers/iio/humidity/si7020.c @@ -103,8 +103,7 @@ static const struct iio_info si7020_info = { .read_raw = si7020_read_raw, }; -static int si7020_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int si7020_probe(struct i2c_client *client) { struct iio_dev *indio_dev; struct i2c_client **data; @@ -156,7 +155,7 @@ static struct i2c_driver si7020_driver = { .name = "si7020", .of_match_table = si7020_dt_ids, }, - .probe = si7020_probe, + .probe_new = si7020_probe, .id_table = si7020_id, }; diff --git a/drivers/iio/imu/adis.c b/drivers/iio/imu/adis.c index f7fcfd04f659da8cec1138f40203aa5b5960f20d..bc40240b29e26b1887e5b9d0e43f91aeef5d3774 100644 --- a/drivers/iio/imu/adis.c +++ b/drivers/iio/imu/adis.c @@ -270,23 +270,19 @@ EXPORT_SYMBOL_NS(adis_debugfs_reg_access, IIO_ADISLIB); #endif /** - * adis_enable_irq() - Enable or disable data ready IRQ + * __adis_enable_irq() - Enable or disable data ready IRQ (unlocked) * @adis: The adis device * @enable: Whether to enable the IRQ * * Returns 0 on success, negative error code otherwise */ -int adis_enable_irq(struct adis *adis, bool enable) +int __adis_enable_irq(struct adis *adis, bool enable) { - int ret = 0; + int ret; u16 msc; - mutex_lock(&adis->state_lock); - - if (adis->data->enable_irq) { - ret = adis->data->enable_irq(adis, enable); - goto out_unlock; - } + if (adis->data->enable_irq) + return adis->data->enable_irq(adis, enable); if (adis->data->unmasked_drdy) { if (enable) @@ -294,12 +290,12 @@ int adis_enable_irq(struct adis *adis, bool enable) else disable_irq(adis->spi->irq); - goto out_unlock; + return 0; } ret = __adis_read_reg_16(adis, adis->data->msc_ctrl_reg, &msc); if (ret) - goto out_unlock; + return ret; msc |= ADIS_MSC_CTRL_DATA_RDY_POL_HIGH; msc &= ~ADIS_MSC_CTRL_DATA_RDY_DIO2; @@ -308,13 +304,9 @@ int adis_enable_irq(struct adis *adis, bool enable) else msc &= ~ADIS_MSC_CTRL_DATA_RDY_EN; - ret = __adis_write_reg_16(adis, adis->data->msc_ctrl_reg, msc); - -out_unlock: - mutex_unlock(&adis->state_lock); - return ret; + return __adis_write_reg_16(adis, adis->data->msc_ctrl_reg, msc); } -EXPORT_SYMBOL_NS(adis_enable_irq, IIO_ADISLIB); +EXPORT_SYMBOL_NS(__adis_enable_irq, IIO_ADISLIB); /** * __adis_check_status() - Check the device for error conditions (unlocked) @@ -445,7 +437,7 @@ int __adis_initial_startup(struct adis *adis) * with 'IRQF_NO_AUTOEN' anyways. */ if (!adis->data->unmasked_drdy) - adis_enable_irq(adis, false); + __adis_enable_irq(adis, false); if (!adis->data->prod_id_reg) return 0; diff --git a/drivers/iio/imu/adis16400.c b/drivers/iio/imu/adis16400.c index 17bb0c40a1495cb55c677633427c40c8404e2b2b..c02fc35dceb40407fbdbdb17b7d9571760440217 100644 --- a/drivers/iio/imu/adis16400.c +++ b/drivers/iio/imu/adis16400.c @@ -445,7 +445,7 @@ static int adis16400_initial_setup(struct iio_dev *indio_dev) st->adis.spi->mode = SPI_MODE_3; spi_setup(st->adis.spi); - ret = adis_initial_startup(&st->adis); + ret = __adis_initial_startup(&st->adis); if (ret) return ret; diff --git a/drivers/iio/imu/bmi160/bmi160_i2c.c b/drivers/iio/imu/bmi160/bmi160_i2c.c index d93f4fa2ad55425e9efafb6a17c5df846a8b21cf..2ca907d396a01579e724c11fc633af6d7a6b1125 100644 --- a/drivers/iio/imu/bmi160/bmi160_i2c.c +++ b/drivers/iio/imu/bmi160/bmi160_i2c.c @@ -15,9 +15,9 @@ #include "bmi160.h" -static int bmi160_i2c_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int bmi160_i2c_probe(struct i2c_client *client) { + const struct i2c_device_id *id = i2c_client_get_device_id(client); struct regmap *regmap; const char *name; @@ -60,7 +60,7 @@ static struct i2c_driver bmi160_i2c_driver = { .acpi_match_table = bmi160_acpi_match, .of_match_table = bmi160_of_match, }, - .probe = bmi160_i2c_probe, + .probe_new = bmi160_i2c_probe, .id_table = bmi160_i2c_id, }; module_i2c_driver(bmi160_i2c_driver); diff --git a/drivers/iio/imu/fxos8700_i2c.c b/drivers/iio/imu/fxos8700_i2c.c index 40a570325b0aca3b7c6783d6ae121b30409732d3..a74a15fda8cbb5975d650e565c7ddb893fb00ab3 100644 --- a/drivers/iio/imu/fxos8700_i2c.c +++ b/drivers/iio/imu/fxos8700_i2c.c @@ -18,9 +18,9 @@ #include "fxos8700.h" -static int fxos8700_i2c_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int fxos8700_i2c_probe(struct i2c_client *client) { + const struct i2c_device_id *id = i2c_client_get_device_id(client); struct regmap *regmap; const char *name = NULL; @@ -60,7 +60,7 @@ static struct i2c_driver fxos8700_i2c_driver = { .acpi_match_table = ACPI_PTR(fxos8700_acpi_match), .of_match_table = fxos8700_of_match, }, - .probe = fxos8700_i2c_probe, + .probe_new = fxos8700_i2c_probe, .id_table = fxos8700_i2c_id, }; module_i2c_driver(fxos8700_i2c_driver); diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600.h b/drivers/iio/imu/inv_icm42600/inv_icm42600.h index 3d91469beccbb523962be0980f9fa2920b6923be..0e290c807b0f912b94d8b6562d06339e59d1c28f 100644 --- a/drivers/iio/imu/inv_icm42600/inv_icm42600.h +++ b/drivers/iio/imu/inv_icm42600/inv_icm42600.h @@ -22,6 +22,7 @@ enum inv_icm42600_chip { INV_CHIP_ICM42602, INV_CHIP_ICM42605, INV_CHIP_ICM42622, + INV_CHIP_ICM42631, INV_CHIP_NB, }; @@ -303,6 +304,7 @@ struct inv_icm42600_state { #define INV_ICM42600_WHOAMI_ICM42602 0x41 #define INV_ICM42600_WHOAMI_ICM42605 0x42 #define INV_ICM42600_WHOAMI_ICM42622 0x46 +#define INV_ICM42600_WHOAMI_ICM42631 0x5C /* User bank 1 (MSB 0x10) */ #define INV_ICM42600_REG_SENSOR_CONFIG0 0x1003 diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c index ca85fccc98393a2d3a8302bda6be318b0cac3e4f..7b3a2a0dc2cb0b04d132760f5ba0a4876db134f3 100644 --- a/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c +++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c @@ -41,7 +41,7 @@ const struct regmap_config inv_icm42600_regmap_config = { .ranges = inv_icm42600_regmap_ranges, .num_ranges = ARRAY_SIZE(inv_icm42600_regmap_ranges), }; -EXPORT_SYMBOL_GPL(inv_icm42600_regmap_config); +EXPORT_SYMBOL_NS_GPL(inv_icm42600_regmap_config, IIO_ICM42600); struct inv_icm42600_hw { uint8_t whoami; @@ -87,6 +87,11 @@ static const struct inv_icm42600_hw inv_icm42600_hw[INV_CHIP_NB] = { .name = "icm42622", .conf = &inv_icm42600_default_conf, }, + [INV_CHIP_ICM42631] = { + .whoami = INV_ICM42600_WHOAMI_ICM42631, + .name = "icm42631", + .conf = &inv_icm42600_default_conf, + }, }; const struct iio_mount_matrix * @@ -660,13 +665,13 @@ int inv_icm42600_core_probe(struct regmap *regmap, int chip, int irq, return devm_add_action_or_reset(dev, inv_icm42600_disable_pm, dev); } -EXPORT_SYMBOL_GPL(inv_icm42600_core_probe); +EXPORT_SYMBOL_NS_GPL(inv_icm42600_core_probe, IIO_ICM42600); /* * Suspend saves sensors state and turns everything off. * Check first if runtime suspend has not already done the job. */ -static int __maybe_unused inv_icm42600_suspend(struct device *dev) +static int inv_icm42600_suspend(struct device *dev) { struct inv_icm42600_state *st = dev_get_drvdata(dev); int ret; @@ -706,7 +711,7 @@ out_unlock: * System resume gets the system back on and restores the sensors state. * Manually put runtime power management in system active state. */ -static int __maybe_unused inv_icm42600_resume(struct device *dev) +static int inv_icm42600_resume(struct device *dev) { struct inv_icm42600_state *st = dev_get_drvdata(dev); int ret; @@ -739,7 +744,7 @@ out_unlock: } /* Runtime suspend will turn off sensors that are enabled by iio devices. */ -static int __maybe_unused inv_icm42600_runtime_suspend(struct device *dev) +static int inv_icm42600_runtime_suspend(struct device *dev) { struct inv_icm42600_state *st = dev_get_drvdata(dev); int ret; @@ -761,7 +766,7 @@ error_unlock: } /* Sensors are enabled by iio devices, no need to turn them back on here. */ -static int __maybe_unused inv_icm42600_runtime_resume(struct device *dev) +static int inv_icm42600_runtime_resume(struct device *dev) { struct inv_icm42600_state *st = dev_get_drvdata(dev); int ret; @@ -774,12 +779,11 @@ static int __maybe_unused inv_icm42600_runtime_resume(struct device *dev) return ret; } -const struct dev_pm_ops inv_icm42600_pm_ops = { - SET_SYSTEM_SLEEP_PM_OPS(inv_icm42600_suspend, inv_icm42600_resume) - SET_RUNTIME_PM_OPS(inv_icm42600_runtime_suspend, - inv_icm42600_runtime_resume, NULL) +EXPORT_NS_GPL_DEV_PM_OPS(inv_icm42600_pm_ops, IIO_ICM42600) = { + SYSTEM_SLEEP_PM_OPS(inv_icm42600_suspend, inv_icm42600_resume) + RUNTIME_PM_OPS(inv_icm42600_runtime_suspend, + inv_icm42600_runtime_resume, NULL) }; -EXPORT_SYMBOL_GPL(inv_icm42600_pm_ops); MODULE_AUTHOR("InvenSense, Inc."); MODULE_DESCRIPTION("InvenSense ICM-426xx device driver"); diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_i2c.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_i2c.c index d4a692b838d0f0c6dff8d779a7e6004a74ebc22e..eb2681ad375f224b3b0f07dfbbc49ba59abf1c8c 100644 --- a/drivers/iio/imu/inv_icm42600/inv_icm42600_i2c.c +++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_i2c.c @@ -84,6 +84,9 @@ static const struct of_device_id inv_icm42600_of_matches[] = { }, { .compatible = "invensense,icm42622", .data = (void *)INV_CHIP_ICM42622, + }, { + .compatible = "invensense,icm42631", + .data = (void *)INV_CHIP_ICM42631, }, {} }; @@ -93,7 +96,7 @@ static struct i2c_driver inv_icm42600_driver = { .driver = { .name = "inv-icm42600-i2c", .of_match_table = inv_icm42600_of_matches, - .pm = &inv_icm42600_pm_ops, + .pm = pm_ptr(&inv_icm42600_pm_ops), }, .probe_new = inv_icm42600_probe, }; @@ -102,3 +105,4 @@ module_i2c_driver(inv_icm42600_driver); MODULE_AUTHOR("InvenSense, Inc."); MODULE_DESCRIPTION("InvenSense ICM-426xx I2C driver"); MODULE_LICENSE("GPL"); +MODULE_IMPORT_NS(IIO_ICM42600); diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_spi.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_spi.c index e6305e5fa9756d7857b40b8213612e26036ebc56..6be4ac79493794a92006d61bb66106eca85305f9 100644 --- a/drivers/iio/imu/inv_icm42600/inv_icm42600_spi.c +++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_spi.c @@ -80,6 +80,9 @@ static const struct of_device_id inv_icm42600_of_matches[] = { }, { .compatible = "invensense,icm42622", .data = (void *)INV_CHIP_ICM42622, + }, { + .compatible = "invensense,icm42631", + .data = (void *)INV_CHIP_ICM42631, }, {} }; @@ -89,7 +92,7 @@ static struct spi_driver inv_icm42600_driver = { .driver = { .name = "inv-icm42600-spi", .of_match_table = inv_icm42600_of_matches, - .pm = &inv_icm42600_pm_ops, + .pm = pm_ptr(&inv_icm42600_pm_ops), }, .probe = inv_icm42600_probe, }; @@ -98,3 +101,4 @@ module_spi_driver(inv_icm42600_driver); MODULE_AUTHOR("InvenSense, Inc."); MODULE_DESCRIPTION("InvenSense ICM-426xx SPI driver"); MODULE_LICENSE("GPL"); +MODULE_IMPORT_NS(IIO_ICM42600); diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c index 86fbbe9040503b0b66f39cbf80908fc0f6b5546d..8a129120b73dd1f7fd1016ea7e5fc762d0a136ce 100644 --- a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c +++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c @@ -1653,9 +1653,9 @@ error_power_off: inv_mpu6050_set_power_itg(st, false); return result; } -EXPORT_SYMBOL_GPL(inv_mpu_core_probe); +EXPORT_SYMBOL_NS_GPL(inv_mpu_core_probe, IIO_MPU6050); -static int __maybe_unused inv_mpu_resume(struct device *dev) +static int inv_mpu_resume(struct device *dev) { struct iio_dev *indio_dev = dev_get_drvdata(dev); struct inv_mpu6050_state *st = iio_priv(indio_dev); @@ -1687,7 +1687,7 @@ out_unlock: return result; } -static int __maybe_unused inv_mpu_suspend(struct device *dev) +static int inv_mpu_suspend(struct device *dev) { struct iio_dev *indio_dev = dev_get_drvdata(dev); struct inv_mpu6050_state *st = iio_priv(indio_dev); @@ -1730,7 +1730,7 @@ out_unlock: return result; } -static int __maybe_unused inv_mpu_runtime_suspend(struct device *dev) +static int inv_mpu_runtime_suspend(struct device *dev) { struct inv_mpu6050_state *st = iio_priv(dev_get_drvdata(dev)); unsigned int sensors; @@ -1755,7 +1755,7 @@ out_unlock: return ret; } -static int __maybe_unused inv_mpu_runtime_resume(struct device *dev) +static int inv_mpu_runtime_resume(struct device *dev) { struct inv_mpu6050_state *st = iio_priv(dev_get_drvdata(dev)); int ret; @@ -1767,11 +1767,10 @@ static int __maybe_unused inv_mpu_runtime_resume(struct device *dev) return inv_mpu6050_set_power_itg(st, true); } -const struct dev_pm_ops inv_mpu_pmops = { - SET_SYSTEM_SLEEP_PM_OPS(inv_mpu_suspend, inv_mpu_resume) - SET_RUNTIME_PM_OPS(inv_mpu_runtime_suspend, inv_mpu_runtime_resume, NULL) +EXPORT_NS_GPL_DEV_PM_OPS(inv_mpu_pmops, IIO_MPU6050) = { + SYSTEM_SLEEP_PM_OPS(inv_mpu_suspend, inv_mpu_resume) + RUNTIME_PM_OPS(inv_mpu_runtime_suspend, inv_mpu_runtime_resume, NULL) }; -EXPORT_SYMBOL_GPL(inv_mpu_pmops); MODULE_AUTHOR("Invensense Corporation"); MODULE_DESCRIPTION("Invensense device MPU6050 driver"); diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c index 14255a918eb1d797abc38f047d5fabfd8aba65bc..2f2da4cb732156fc4e7874027464ca17f2e4b9e7 100644 --- a/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c +++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c @@ -91,13 +91,12 @@ static int inv_mpu_i2c_aux_setup(struct iio_dev *indio_dev) /** * inv_mpu_probe() - probe function. * @client: i2c client. - * @id: i2c device id. * * Returns 0 on success, a negative error code otherwise. */ -static int inv_mpu_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int inv_mpu_probe(struct i2c_client *client) { + const struct i2c_device_id *id = i2c_client_get_device_id(client); const void *match; struct inv_mpu6050_state *st; int result; @@ -260,14 +259,14 @@ static const struct acpi_device_id inv_acpi_match[] = { MODULE_DEVICE_TABLE(acpi, inv_acpi_match); static struct i2c_driver inv_mpu_driver = { - .probe = inv_mpu_probe, + .probe_new = inv_mpu_probe, .remove = inv_mpu_remove, .id_table = inv_mpu_id, .driver = { .of_match_table = inv_of_match, .acpi_match_table = inv_acpi_match, .name = "inv-mpu6050-i2c", - .pm = &inv_mpu_pmops, + .pm = pm_ptr(&inv_mpu_pmops), }, }; @@ -276,3 +275,4 @@ module_i2c_driver(inv_mpu_driver); MODULE_AUTHOR("Invensense Corporation"); MODULE_DESCRIPTION("Invensense device MPU6050 driver"); MODULE_LICENSE("GPL"); +MODULE_IMPORT_NS(IIO_MPU6050); diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c index e6107b0cc38ff6dcaa3c5df1a9c32952b5d8c6c9..89f46c2f213d8fb179075027cce6305ca6cc3dce 100644 --- a/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c +++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c @@ -154,7 +154,7 @@ static struct spi_driver inv_mpu_driver = { .of_match_table = inv_of_match, .acpi_match_table = inv_acpi_match, .name = "inv-mpu6000-spi", - .pm = &inv_mpu_pmops, + .pm = pm_ptr(&inv_mpu_pmops), }, }; @@ -163,3 +163,4 @@ module_spi_driver(inv_mpu_driver); MODULE_AUTHOR("Adriana Reus "); MODULE_DESCRIPTION("Invensense device MPU6000 driver"); MODULE_LICENSE("GPL"); +MODULE_IMPORT_NS(IIO_MPU6050); diff --git a/drivers/iio/imu/kmx61.c b/drivers/iio/imu/kmx61.c index b10c0dcac0bbec3f4870d38b625bf927cdd02d8a..e692dfeeda44c5887d0aaeb6239a4cd89ef792f7 100644 --- a/drivers/iio/imu/kmx61.c +++ b/drivers/iio/imu/kmx61.c @@ -1276,9 +1276,9 @@ static struct iio_trigger *kmx61_trigger_setup(struct kmx61_data *data, return trig; } -static int kmx61_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int kmx61_probe(struct i2c_client *client) { + const struct i2c_device_id *id = i2c_client_get_device_id(client); int ret; struct kmx61_data *data; const char *name = NULL; @@ -1517,7 +1517,7 @@ static struct i2c_driver kmx61_driver = { .acpi_match_table = ACPI_PTR(kmx61_acpi_match), .pm = pm_ptr(&kmx61_pm_ops), }, - .probe = kmx61_probe, + .probe_new = kmx61_probe, .remove = kmx61_remove, .id_table = kmx61_id, }; diff --git a/drivers/iio/imu/st_lsm6dsx/Kconfig b/drivers/iio/imu/st_lsm6dsx/Kconfig index 2ed2b3f40c0b15524756d3ce5c5dc089a347addf..f6660847fb58d4c3c886d9911e6925f4193541bf 100644 --- a/drivers/iio/imu/st_lsm6dsx/Kconfig +++ b/drivers/iio/imu/st_lsm6dsx/Kconfig @@ -13,7 +13,8 @@ config IIO_ST_LSM6DSX sensor. Supported devices: lsm6ds3, lsm6ds3h, lsm6dsl, lsm6dsm, ism330dlc, lsm6dso, lsm6dsox, asm330lhh, asm330lhhx, lsm6dsr, lsm6ds3tr-c, ism330dhcx, lsm6dsrx, lsm6ds0, lsm6dsop, lsm6dstx, - the accelerometer/gyroscope of lsm9ds1 and lsm6dst. + lsm6dsv, lsm6dsv16x, lsm6dso16is, ism330is, lsm6dst and the + accelerometer/gyroscope of lsm9ds1. To compile this driver as a module, choose M here: the module will be called st_lsm6dsx. diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h index 6b57d47be69ea9e1ef6dec4384fe97e07c877d64..5b6f195748fc693bd78acccc70d6da22f765d269 100644 --- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h +++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h @@ -33,6 +33,10 @@ #define ST_LSM6DSOP_DEV_NAME "lsm6dsop" #define ST_ASM330LHHX_DEV_NAME "asm330lhhx" #define ST_LSM6DSTX_DEV_NAME "lsm6dstx" +#define ST_LSM6DSV_DEV_NAME "lsm6dsv" +#define ST_LSM6DSV16X_DEV_NAME "lsm6dsv16x" +#define ST_LSM6DSO16IS_DEV_NAME "lsm6dso16is" +#define ST_ISM330IS_DEV_NAME "ism330is" enum st_lsm6dsx_hw_id { ST_LSM6DS3_ID, @@ -53,6 +57,10 @@ enum st_lsm6dsx_hw_id { ST_LSM6DSOP_ID, ST_ASM330LHHX_ID, ST_LSM6DSTX_ID, + ST_LSM6DSV_ID, + ST_LSM6DSV16X_ID, + ST_LSM6DSO16IS_ID, + ST_ISM330IS_ID, ST_LSM6DSX_MAX_ID, }; @@ -374,7 +382,6 @@ struct st_lsm6dsx_sensor { * struct st_lsm6dsx_hw - ST IMU MEMS hw instance * @dev: Pointer to instance of struct device (I2C or SPI). * @regmap: Register map of the device. - * @regulators: VDD/VDDIO voltage regulators. * @irq: Device interrupt line (I2C or SPI). * @fifo_lock: Mutex to prevent concurrent access to the hw FIFO. * @conf_lock: Mutex to prevent concurrent FIFO configuration update. @@ -397,7 +404,6 @@ struct st_lsm6dsx_sensor { struct st_lsm6dsx_hw { struct device *dev; struct regmap *regmap; - struct regulator_bulk_data regulators[2]; int irq; struct mutex fifo_lock; @@ -426,7 +432,7 @@ struct st_lsm6dsx_hw { struct { __le16 channels[3]; s64 ts __aligned(8); - } scan[3]; + } scan[ST_LSM6DSX_ID_MAX]; }; static __maybe_unused const struct iio_event_spec st_lsm6dsx_event = { @@ -458,6 +464,7 @@ int st_lsm6dsx_read_tagged_fifo(struct st_lsm6dsx_hw *hw); int st_lsm6dsx_check_odr(struct st_lsm6dsx_sensor *sensor, u32 odr, u8 *val); int st_lsm6dsx_shub_probe(struct st_lsm6dsx_hw *hw, const char *name); int st_lsm6dsx_shub_set_enable(struct st_lsm6dsx_sensor *sensor, bool enable); +int st_lsm6dsx_shub_read_output(struct st_lsm6dsx_hw *hw, u8 *data, int len); int st_lsm6dsx_set_page(struct st_lsm6dsx_hw *hw, bool enable); static inline int @@ -509,6 +516,17 @@ st_lsm6dsx_get_mount_matrix(const struct iio_dev *iio_dev, return &hw->orientation; } +static inline int +st_lsm6dsx_device_set_enable(struct st_lsm6dsx_sensor *sensor, bool enable) +{ + if (sensor->id == ST_LSM6DSX_ID_EXT0 || + sensor->id == ST_LSM6DSX_ID_EXT1 || + sensor->id == ST_LSM6DSX_ID_EXT2) + return st_lsm6dsx_shub_set_enable(sensor, enable); + + return st_lsm6dsx_sensor_set_enable(sensor, enable); +} + static const struct iio_chan_spec_ext_info __maybe_unused st_lsm6dsx_accel_ext_info[] = { IIO_MOUNT_MATRIX(IIO_SHARED_BY_ALL, st_lsm6dsx_get_mount_matrix), diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c index e49f2d120ed38bf9558e37e789619f96c5f9d58f..7dd5205aea5b4867484e211fe83a0a51fd91e7a9 100644 --- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c +++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c @@ -15,7 +15,7 @@ * value of the decimation factor and ODR set for each FIFO data set. * * LSM6DSO/LSM6DSOX/ASM330LHH/ASM330LHHX/LSM6DSR/LSM6DSRX/ISM330DHCX/ - * LSM6DST/LSM6DSOP/LSM6DSTX: + * LSM6DST/LSM6DSOP/LSM6DSTX/LSM6DSV: * The FIFO buffer can be configured to store data from gyroscope and * accelerometer. Each sample is queued with a tag (1B) indicating data * source (gyroscope, accelerometer, hw timer). @@ -673,17 +673,9 @@ int st_lsm6dsx_update_fifo(struct st_lsm6dsx_sensor *sensor, bool enable) goto out; } - if (sensor->id == ST_LSM6DSX_ID_EXT0 || - sensor->id == ST_LSM6DSX_ID_EXT1 || - sensor->id == ST_LSM6DSX_ID_EXT2) { - err = st_lsm6dsx_shub_set_enable(sensor, enable); - if (err < 0) - goto out; - } else { - err = st_lsm6dsx_sensor_set_enable(sensor, enable); - if (err < 0) - goto out; - } + err = st_lsm6dsx_device_set_enable(sensor, enable); + if (err < 0) + goto out; err = st_lsm6dsx_set_fifo_odr(sensor, enable); if (err < 0) diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c index f8bbb005718edaa17a0dfdf167e20920d7e4695b..3f6060c64f32bf0117779a87e594077c9d2fb49c 100644 --- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c +++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c @@ -27,13 +27,20 @@ * - FIFO size: 4KB * * - LSM6DSO/LSM6DSOX/ASM330LHH/ASM330LHHX/LSM6DSR/ISM330DHCX/LSM6DST/LSM6DSOP/ - * LSM6DSTX: + * LSM6DSTX/LSM6DSO16IS/ISM330IS: * - Accelerometer/Gyroscope supported ODR [Hz]: 12.5, 26, 52, 104, 208, 416, * 833 * - Accelerometer supported full-scale [g]: +-2/+-4/+-8/+-16 * - Gyroscope supported full-scale [dps]: +-125/+-245/+-500/+-1000/+-2000 * - FIFO size: 3KB * + * - LSM6DSV/LSM6DSV16X: + * - Accelerometer/Gyroscope supported ODR [Hz]: 7.5, 15, 30, 60, 120, 240, + * 480, 960 + * - Accelerometer supported full-scale [g]: +-2/+-4/+-8/+-16 + * - Gyroscope supported full-scale [dps]: +-125/+-250/+-500/+-1000/+-2000 + * - FIFO size: 3KB + * * - LSM9DS1/LSM6DS0: * - Accelerometer supported ODR [Hz]: 10, 50, 119, 238, 476, 952 * - Accelerometer supported full-scale [g]: +-2/+-4/+-8/+-16 @@ -53,6 +60,8 @@ #include #include #include +#include +#include #include #include #include @@ -1160,6 +1169,342 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = { .wakeup_src_x_mask = BIT(2), }, }, + { + .reset = { + .addr = 0x12, + .mask = BIT(0), + }, + .boot = { + .addr = 0x12, + .mask = BIT(7), + }, + .bdu = { + .addr = 0x12, + .mask = BIT(6), + }, + .id = { + { + .hw_id = ST_LSM6DSV_ID, + .name = ST_LSM6DSV_DEV_NAME, + .wai = 0x70, + }, { + .hw_id = ST_LSM6DSV16X_ID, + .name = ST_LSM6DSV16X_DEV_NAME, + .wai = 0x70, + }, + }, + .channels = { + [ST_LSM6DSX_ID_ACC] = { + .chan = st_lsm6dsx_acc_channels, + .len = ARRAY_SIZE(st_lsm6dsx_acc_channels), + }, + [ST_LSM6DSX_ID_GYRO] = { + .chan = st_lsm6dsx_gyro_channels, + .len = ARRAY_SIZE(st_lsm6dsx_gyro_channels), + }, + }, + .drdy_mask = { + .addr = 0x13, + .mask = BIT(3), + }, + .odr_table = { + [ST_LSM6DSX_ID_ACC] = { + .reg = { + .addr = 0x10, + .mask = GENMASK(3, 0), + }, + .odr_avl[0] = { 7500, 0x02 }, + .odr_avl[1] = { 15000, 0x03 }, + .odr_avl[2] = { 30000, 0x04 }, + .odr_avl[3] = { 60000, 0x05 }, + .odr_avl[4] = { 120000, 0x06 }, + .odr_avl[5] = { 240000, 0x07 }, + .odr_avl[6] = { 480000, 0x08 }, + .odr_avl[7] = { 960000, 0x09 }, + .odr_len = 8, + }, + [ST_LSM6DSX_ID_GYRO] = { + .reg = { + .addr = 0x11, + .mask = GENMASK(3, 0), + }, + .odr_avl[0] = { 7500, 0x02 }, + .odr_avl[1] = { 15000, 0x03 }, + .odr_avl[2] = { 30000, 0x04 }, + .odr_avl[3] = { 60000, 0x05 }, + .odr_avl[4] = { 120000, 0x06 }, + .odr_avl[5] = { 240000, 0x07 }, + .odr_avl[6] = { 480000, 0x08 }, + .odr_avl[7] = { 960000, 0x09 }, + .odr_len = 8, + }, + }, + .fs_table = { + [ST_LSM6DSX_ID_ACC] = { + .reg = { + .addr = 0x17, + .mask = GENMASK(1, 0), + }, + .fs_avl[0] = { IIO_G_TO_M_S_2(61000), 0x0 }, + .fs_avl[1] = { IIO_G_TO_M_S_2(122000), 0x1 }, + .fs_avl[2] = { IIO_G_TO_M_S_2(244000), 0x2 }, + .fs_avl[3] = { IIO_G_TO_M_S_2(488000), 0x3 }, + .fs_len = 4, + }, + [ST_LSM6DSX_ID_GYRO] = { + .reg = { + .addr = 0x15, + .mask = GENMASK(3, 0), + }, + .fs_avl[0] = { IIO_DEGREE_TO_RAD(8750000), 0x1 }, + .fs_avl[1] = { IIO_DEGREE_TO_RAD(17500000), 0x2 }, + .fs_avl[2] = { IIO_DEGREE_TO_RAD(35000000), 0x3 }, + .fs_avl[3] = { IIO_DEGREE_TO_RAD(70000000), 0x4 }, + .fs_len = 4, + }, + }, + .irq_config = { + .irq1 = { + .addr = 0x0d, + .mask = BIT(3), + }, + .irq2 = { + .addr = 0x0e, + .mask = BIT(3), + }, + .lir = { + .addr = 0x56, + .mask = BIT(0), + }, + .irq1_func = { + .addr = 0x5e, + .mask = BIT(5), + }, + .irq2_func = { + .addr = 0x5f, + .mask = BIT(5), + }, + .hla = { + .addr = 0x03, + .mask = BIT(4), + }, + .od = { + .addr = 0x03, + .mask = BIT(3), + }, + }, + .batch = { + [ST_LSM6DSX_ID_ACC] = { + .addr = 0x09, + .mask = GENMASK(3, 0), + }, + [ST_LSM6DSX_ID_GYRO] = { + .addr = 0x09, + .mask = GENMASK(7, 4), + }, + }, + .fifo_ops = { + .update_fifo = st_lsm6dsx_update_fifo, + .read_fifo = st_lsm6dsx_read_tagged_fifo, + .fifo_th = { + .addr = 0x07, + .mask = GENMASK(7, 0), + }, + .fifo_diff = { + .addr = 0x1b, + .mask = GENMASK(8, 0), + }, + .max_size = 512, + .th_wl = 1, + }, + .ts_settings = { + .timer_en = { + .addr = 0x50, + .mask = BIT(6), + }, + .decimator = { + .addr = 0x0a, + .mask = GENMASK(7, 6), + }, + .freq_fine = 0x4f, + }, + .shub_settings = { + .page_mux = { + .addr = 0x01, + .mask = BIT(6), + }, + .master_en = { + .sec_page = true, + .addr = 0x14, + .mask = BIT(2), + }, + .pullup_en = { + .addr = 0x03, + .mask = BIT(6), + }, + .aux_sens = { + .addr = 0x14, + .mask = GENMASK(1, 0), + }, + .wr_once = { + .addr = 0x14, + .mask = BIT(6), + }, + .num_ext_dev = 3, + .shub_out = { + .sec_page = true, + .addr = 0x02, + }, + .slv0_addr = 0x15, + .dw_slv0_addr = 0x21, + .batch_en = BIT(3), + }, + .event_settings = { + .enable_reg = { + .addr = 0x50, + .mask = BIT(7), + }, + .wakeup_reg = { + .addr = 0x5b, + .mask = GENMASK(5, 0), + }, + .wakeup_src_reg = 0x45, + .wakeup_src_status_mask = BIT(3), + .wakeup_src_z_mask = BIT(0), + .wakeup_src_y_mask = BIT(1), + .wakeup_src_x_mask = BIT(2), + }, + }, + { + .reset = { + .addr = 0x12, + .mask = BIT(0), + }, + .boot = { + .addr = 0x12, + .mask = BIT(7), + }, + .bdu = { + .addr = 0x12, + .mask = BIT(6), + }, + .id = { + { + .hw_id = ST_LSM6DSO16IS_ID, + .name = ST_LSM6DSO16IS_DEV_NAME, + .wai = 0x22, + }, { + .hw_id = ST_ISM330IS_ID, + .name = ST_ISM330IS_DEV_NAME, + .wai = 0x22, + } + }, + .channels = { + [ST_LSM6DSX_ID_ACC] = { + .chan = st_lsm6dsx_acc_channels, + .len = ARRAY_SIZE(st_lsm6dsx_acc_channels), + }, + [ST_LSM6DSX_ID_GYRO] = { + .chan = st_lsm6dsx_gyro_channels, + .len = ARRAY_SIZE(st_lsm6dsx_gyro_channels), + }, + }, + .odr_table = { + [ST_LSM6DSX_ID_ACC] = { + .reg = { + .addr = 0x10, + .mask = GENMASK(7, 4), + }, + .odr_avl[0] = { 12500, 0x01 }, + .odr_avl[1] = { 26000, 0x02 }, + .odr_avl[2] = { 52000, 0x03 }, + .odr_avl[3] = { 104000, 0x04 }, + .odr_avl[4] = { 208000, 0x05 }, + .odr_avl[5] = { 416000, 0x06 }, + .odr_avl[6] = { 833000, 0x07 }, + .odr_len = 7, + }, + [ST_LSM6DSX_ID_GYRO] = { + .reg = { + .addr = 0x11, + .mask = GENMASK(7, 4), + }, + .odr_avl[0] = { 12500, 0x01 }, + .odr_avl[1] = { 26000, 0x02 }, + .odr_avl[2] = { 52000, 0x03 }, + .odr_avl[3] = { 104000, 0x04 }, + .odr_avl[4] = { 208000, 0x05 }, + .odr_avl[5] = { 416000, 0x06 }, + .odr_avl[6] = { 833000, 0x07 }, + .odr_len = 7, + }, + }, + .fs_table = { + [ST_LSM6DSX_ID_ACC] = { + .reg = { + .addr = 0x10, + .mask = GENMASK(3, 2), + }, + .fs_avl[0] = { IIO_G_TO_M_S_2(61000), 0x0 }, + .fs_avl[1] = { IIO_G_TO_M_S_2(122000), 0x2 }, + .fs_avl[2] = { IIO_G_TO_M_S_2(244000), 0x3 }, + .fs_avl[3] = { IIO_G_TO_M_S_2(488000), 0x1 }, + .fs_len = 4, + }, + [ST_LSM6DSX_ID_GYRO] = { + .reg = { + .addr = 0x11, + .mask = GENMASK(3, 2), + }, + .fs_avl[0] = { IIO_DEGREE_TO_RAD(8750000), 0x0 }, + .fs_avl[1] = { IIO_DEGREE_TO_RAD(17500000), 0x1 }, + .fs_avl[2] = { IIO_DEGREE_TO_RAD(35000000), 0x2 }, + .fs_avl[3] = { IIO_DEGREE_TO_RAD(70000000), 0x3 }, + .fs_len = 4, + }, + }, + .irq_config = { + .hla = { + .addr = 0x12, + .mask = BIT(5), + }, + .od = { + .addr = 0x12, + .mask = BIT(4), + }, + }, + .shub_settings = { + .page_mux = { + .addr = 0x01, + .mask = BIT(6), + }, + .master_en = { + .sec_page = true, + .addr = 0x14, + .mask = BIT(2), + }, + .pullup_en = { + .sec_page = true, + .addr = 0x14, + .mask = BIT(3), + }, + .aux_sens = { + .addr = 0x14, + .mask = GENMASK(1, 0), + }, + .wr_once = { + .addr = 0x14, + .mask = BIT(6), + }, + .num_ext_dev = 3, + .shub_out = { + .sec_page = true, + .addr = 0x02, + }, + .slv0_addr = 0x15, + .dw_slv0_addr = 0x21, + }, + }, }; int st_lsm6dsx_set_page(struct st_lsm6dsx_hw *hw, bool enable) @@ -2117,6 +2462,32 @@ static irqreturn_t st_lsm6dsx_handler_thread(int irq, void *private) return fifo_len || event ? IRQ_HANDLED : IRQ_NONE; } +static irqreturn_t st_lsm6dsx_sw_trigger_handler_thread(int irq, + void *private) +{ + struct iio_poll_func *pf = private; + struct iio_dev *iio_dev = pf->indio_dev; + struct st_lsm6dsx_sensor *sensor = iio_priv(iio_dev); + struct st_lsm6dsx_hw *hw = sensor->hw; + + if (sensor->id == ST_LSM6DSX_ID_EXT0 || + sensor->id == ST_LSM6DSX_ID_EXT1 || + sensor->id == ST_LSM6DSX_ID_EXT2) + st_lsm6dsx_shub_read_output(hw, + (u8 *)hw->scan[sensor->id].channels, + sizeof(hw->scan[sensor->id].channels)); + else + st_lsm6dsx_read_locked(hw, iio_dev->channels[0].address, + hw->scan[sensor->id].channels, + sizeof(hw->scan[sensor->id].channels)); + + iio_push_to_buffers_with_timestamp(iio_dev, &hw->scan[sensor->id], + iio_get_time_ns(iio_dev)); + iio_trigger_notify_done(iio_dev->trig); + + return IRQ_HANDLED; +} + static int st_lsm6dsx_irq_setup(struct st_lsm6dsx_hw *hw) { struct st_sensors_platform_data *pdata; @@ -2175,36 +2546,60 @@ static int st_lsm6dsx_irq_setup(struct st_lsm6dsx_hw *hw) return 0; } -static int st_lsm6dsx_init_regulators(struct device *dev) +static int st_lsm6dsx_sw_buffer_preenable(struct iio_dev *iio_dev) { - struct st_lsm6dsx_hw *hw = dev_get_drvdata(dev); - int err; + struct st_lsm6dsx_sensor *sensor = iio_priv(iio_dev); - /* vdd-vddio power regulators */ - hw->regulators[0].supply = "vdd"; - hw->regulators[1].supply = "vddio"; - err = devm_regulator_bulk_get(dev, ARRAY_SIZE(hw->regulators), - hw->regulators); - if (err) - return dev_err_probe(dev, err, "failed to get regulators\n"); + return st_lsm6dsx_device_set_enable(sensor, true); +} - err = regulator_bulk_enable(ARRAY_SIZE(hw->regulators), - hw->regulators); - if (err) { - dev_err(dev, "failed to enable regulators: %d\n", err); - return err; - } +static int st_lsm6dsx_sw_buffer_postdisable(struct iio_dev *iio_dev) +{ + struct st_lsm6dsx_sensor *sensor = iio_priv(iio_dev); - msleep(50); + return st_lsm6dsx_device_set_enable(sensor, false); +} + +static const struct iio_buffer_setup_ops st_lsm6dsx_sw_buffer_ops = { + .preenable = st_lsm6dsx_sw_buffer_preenable, + .postdisable = st_lsm6dsx_sw_buffer_postdisable, +}; + +static int st_lsm6dsx_sw_buffers_setup(struct st_lsm6dsx_hw *hw) +{ + int i; + + for (i = 0; i < ST_LSM6DSX_ID_MAX; i++) { + int err; + + if (!hw->iio_devs[i]) + continue; + + err = devm_iio_triggered_buffer_setup(hw->dev, + hw->iio_devs[i], NULL, + st_lsm6dsx_sw_trigger_handler_thread, + &st_lsm6dsx_sw_buffer_ops); + if (err) + return err; + } return 0; } -static void st_lsm6dsx_chip_uninit(void *data) +static int st_lsm6dsx_init_regulators(struct device *dev) { - struct st_lsm6dsx_hw *hw = data; + /* vdd-vddio power regulators */ + static const char * const regulators[] = { "vdd", "vddio" }; + int err; - regulator_bulk_disable(ARRAY_SIZE(hw->regulators), hw->regulators); + err = devm_regulator_bulk_get_enable(dev, ARRAY_SIZE(regulators), + regulators); + if (err) + return dev_err_probe(dev, err, "failed to enable regulators\n"); + + msleep(50); + + return 0; } int st_lsm6dsx_probe(struct device *dev, int irq, int hw_id, @@ -2230,10 +2625,6 @@ int st_lsm6dsx_probe(struct device *dev, int irq, int hw_id, if (err) return err; - err = devm_add_action_or_reset(dev, st_lsm6dsx_chip_uninit, hw); - if (err) - return err; - hw->buff = devm_kzalloc(dev, ST_LSM6DSX_BUFF_SIZE, GFP_KERNEL); if (!hw->buff) return -ENOMEM; @@ -2275,6 +2666,16 @@ int st_lsm6dsx_probe(struct device *dev, int irq, int hw_id, return err; } + if (!hw->irq || !hw->settings->fifo_ops.read_fifo) { + /* + * Rely on sw triggers (e.g. hr-timers) if irq pin is not + * connected of if the device does not support HW FIFO + */ + err = st_lsm6dsx_sw_buffers_setup(hw); + if (err) + return err; + } + err = iio_read_mount_matrix(hw->dev, &hw->orientation); if (err) return err; @@ -2317,12 +2718,7 @@ static int st_lsm6dsx_suspend(struct device *dev) continue; } - if (sensor->id == ST_LSM6DSX_ID_EXT0 || - sensor->id == ST_LSM6DSX_ID_EXT1 || - sensor->id == ST_LSM6DSX_ID_EXT2) - err = st_lsm6dsx_shub_set_enable(sensor, false); - else - err = st_lsm6dsx_sensor_set_enable(sensor, false); + err = st_lsm6dsx_device_set_enable(sensor, false); if (err < 0) return err; @@ -2353,12 +2749,7 @@ static int st_lsm6dsx_resume(struct device *dev) if (!(hw->suspend_mask & BIT(sensor->id))) continue; - if (sensor->id == ST_LSM6DSX_ID_EXT0 || - sensor->id == ST_LSM6DSX_ID_EXT1 || - sensor->id == ST_LSM6DSX_ID_EXT2) - err = st_lsm6dsx_shub_set_enable(sensor, true); - else - err = st_lsm6dsx_sensor_set_enable(sensor, true); + err = st_lsm6dsx_device_set_enable(sensor, true); if (err < 0) return err; diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i2c.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i2c.c index 307c8c436862aa31de1a93339423131fcba4a231..df5f609252600a7908ccfeed9b8e143a500101e8 100644 --- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i2c.c +++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i2c.c @@ -21,9 +21,9 @@ static const struct regmap_config st_lsm6dsx_i2c_regmap_config = { .val_bits = 8, }; -static int st_lsm6dsx_i2c_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int st_lsm6dsx_i2c_probe(struct i2c_client *client) { + const struct i2c_device_id *id = i2c_client_get_device_id(client); int hw_id = id->driver_data; struct regmap *regmap; @@ -109,6 +109,22 @@ static const struct of_device_id st_lsm6dsx_i2c_of_match[] = { .compatible = "st,lsm6dstx", .data = (void *)ST_LSM6DSTX_ID, }, + { + .compatible = "st,lsm6dsv", + .data = (void *)ST_LSM6DSV_ID, + }, + { + .compatible = "st,lsm6dsv16x", + .data = (void *)ST_LSM6DSV16X_ID, + }, + { + .compatible = "st,lsm6dso16is", + .data = (void *)ST_LSM6DSO16IS_ID, + }, + { + .compatible = "st,ism330is", + .data = (void *)ST_ISM330IS_ID, + }, {}, }; MODULE_DEVICE_TABLE(of, st_lsm6dsx_i2c_of_match); @@ -132,6 +148,10 @@ static const struct i2c_device_id st_lsm6dsx_i2c_id_table[] = { { ST_LSM6DSOP_DEV_NAME, ST_LSM6DSOP_ID }, { ST_ASM330LHHX_DEV_NAME, ST_ASM330LHHX_ID }, { ST_LSM6DSTX_DEV_NAME, ST_LSM6DSTX_ID }, + { ST_LSM6DSV_DEV_NAME, ST_LSM6DSV_ID }, + { ST_LSM6DSV16X_DEV_NAME, ST_LSM6DSV16X_ID }, + { ST_LSM6DSO16IS_DEV_NAME, ST_LSM6DSO16IS_ID }, + { ST_ISM330IS_DEV_NAME, ST_ISM330IS_ID }, {}, }; MODULE_DEVICE_TABLE(i2c, st_lsm6dsx_i2c_id_table); @@ -142,7 +162,7 @@ static struct i2c_driver st_lsm6dsx_driver = { .pm = pm_sleep_ptr(&st_lsm6dsx_pm_ops), .of_match_table = st_lsm6dsx_i2c_of_match, }, - .probe = st_lsm6dsx_i2c_probe, + .probe_new = st_lsm6dsx_i2c_probe, .id_table = st_lsm6dsx_i2c_id_table, }; module_i2c_driver(st_lsm6dsx_driver); diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_shub.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_shub.c index 99562ba85ee430989beca58cca7f234d98d1c596..f2b64b4956a33e51c1ec6664b0679e9893efb23b 100644 --- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_shub.c +++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_shub.c @@ -170,9 +170,7 @@ static void st_lsm6dsx_shub_wait_complete(struct st_lsm6dsx_hw *hw) * * Read st_lsm6dsx i2c controller register */ -static int -st_lsm6dsx_shub_read_output(struct st_lsm6dsx_hw *hw, u8 *data, - int len) +int st_lsm6dsx_shub_read_output(struct st_lsm6dsx_hw *hw, u8 *data, int len) { const struct st_lsm6dsx_shub_settings *hub_settings; int err; diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_spi.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_spi.c index 6a4eecf4bb050b8e0a92ce9e392ed7045c224f5c..974584bda875ea2d44a9c578247523023669609f 100644 --- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_spi.c +++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_spi.c @@ -109,6 +109,22 @@ static const struct of_device_id st_lsm6dsx_spi_of_match[] = { .compatible = "st,lsm6dstx", .data = (void *)ST_LSM6DSTX_ID, }, + { + .compatible = "st,lsm6dsv", + .data = (void *)ST_LSM6DSV_ID, + }, + { + .compatible = "st,lsm6dsv16x", + .data = (void *)ST_LSM6DSV16X_ID, + }, + { + .compatible = "st,lsm6dso16is", + .data = (void *)ST_LSM6DSO16IS_ID, + }, + { + .compatible = "st,ism330is", + .data = (void *)ST_ISM330IS_ID, + }, {}, }; MODULE_DEVICE_TABLE(of, st_lsm6dsx_spi_of_match); @@ -132,6 +148,10 @@ static const struct spi_device_id st_lsm6dsx_spi_id_table[] = { { ST_LSM6DSOP_DEV_NAME, ST_LSM6DSOP_ID }, { ST_ASM330LHHX_DEV_NAME, ST_ASM330LHHX_ID }, { ST_LSM6DSTX_DEV_NAME, ST_LSM6DSTX_ID }, + { ST_LSM6DSV_DEV_NAME, ST_LSM6DSV_ID }, + { ST_LSM6DSV16X_DEV_NAME, ST_LSM6DSV16X_ID }, + { ST_LSM6DSO16IS_DEV_NAME, ST_LSM6DSO16IS_ID }, + { ST_ISM330IS_DEV_NAME, ST_ISM330IS_ID }, {}, }; MODULE_DEVICE_TABLE(spi, st_lsm6dsx_spi_id_table); diff --git a/drivers/iio/imu/st_lsm9ds0/st_lsm9ds0_core.c b/drivers/iio/imu/st_lsm9ds0/st_lsm9ds0_core.c index ae7bc815382fb62f4ff6bb6d75c8a55936f4d196..e887b45cdbcd7f741bd3d823ce80a148ef1d6689 100644 --- a/drivers/iio/imu/st_lsm9ds0/st_lsm9ds0_core.c +++ b/drivers/iio/imu/st_lsm9ds0/st_lsm9ds0_core.c @@ -18,58 +18,6 @@ #include "st_lsm9ds0.h" -static int st_lsm9ds0_power_enable(struct device *dev, struct st_lsm9ds0 *lsm9ds0) -{ - int ret; - - /* Regulators not mandatory, but if requested we should enable them. */ - lsm9ds0->vdd = devm_regulator_get(dev, "vdd"); - if (IS_ERR(lsm9ds0->vdd)) - return dev_err_probe(dev, PTR_ERR(lsm9ds0->vdd), - "unable to get Vdd supply\n"); - - ret = regulator_enable(lsm9ds0->vdd); - if (ret) { - dev_warn(dev, "Failed to enable specified Vdd supply\n"); - return ret; - } - - lsm9ds0->vdd_io = devm_regulator_get(dev, "vddio"); - if (IS_ERR(lsm9ds0->vdd_io)) { - regulator_disable(lsm9ds0->vdd); - return dev_err_probe(dev, PTR_ERR(lsm9ds0->vdd_io), - "unable to get Vdd_IO supply\n"); - } - ret = regulator_enable(lsm9ds0->vdd_io); - if (ret) { - dev_warn(dev, "Failed to enable specified Vdd_IO supply\n"); - regulator_disable(lsm9ds0->vdd); - return ret; - } - - return 0; -} - -static void st_lsm9ds0_power_disable(void *data) -{ - struct st_lsm9ds0 *lsm9ds0 = data; - - regulator_disable(lsm9ds0->vdd_io); - regulator_disable(lsm9ds0->vdd); -} - -static int devm_st_lsm9ds0_power_enable(struct st_lsm9ds0 *lsm9ds0) -{ - struct device *dev = lsm9ds0->dev; - int ret; - - ret = st_lsm9ds0_power_enable(dev, lsm9ds0); - if (ret) - return ret; - - return devm_add_action_or_reset(dev, st_lsm9ds0_power_disable, lsm9ds0); -} - static int st_lsm9ds0_probe_accel(struct st_lsm9ds0 *lsm9ds0, struct regmap *regmap) { const struct st_sensor_settings *settings; @@ -92,8 +40,6 @@ static int st_lsm9ds0_probe_accel(struct st_lsm9ds0 *lsm9ds0, struct regmap *reg data->sensor_settings = (struct st_sensor_settings *)settings; data->irq = lsm9ds0->irq; data->regmap = regmap; - data->vdd = lsm9ds0->vdd; - data->vdd_io = lsm9ds0->vdd_io; return st_accel_common_probe(lsm9ds0->accel); } @@ -120,19 +66,22 @@ static int st_lsm9ds0_probe_magn(struct st_lsm9ds0 *lsm9ds0, struct regmap *regm data->sensor_settings = (struct st_sensor_settings *)settings; data->irq = lsm9ds0->irq; data->regmap = regmap; - data->vdd = lsm9ds0->vdd; - data->vdd_io = lsm9ds0->vdd_io; return st_magn_common_probe(lsm9ds0->magn); } int st_lsm9ds0_probe(struct st_lsm9ds0 *lsm9ds0, struct regmap *regmap) { + struct device *dev = lsm9ds0->dev; + static const char * const regulator_names[] = { "vdd", "vddio" }; int ret; - ret = devm_st_lsm9ds0_power_enable(lsm9ds0); + /* Regulators not mandatory, but if requested we should enable them. */ + ret = devm_regulator_bulk_get_enable(dev, ARRAY_SIZE(regulator_names), + regulator_names); if (ret) - return ret; + return dev_err_probe(dev, ret, + "unable to enable Vdd supply\n"); /* Setup accelerometer device */ ret = st_lsm9ds0_probe_accel(lsm9ds0, regmap); diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c index 228598b82a2f3609527edef61f4f5351c8ce5db7..80c78bd6bbef4970fe39149bddd06b361ad9c74c 100644 --- a/drivers/iio/industrialio-buffer.c +++ b/drivers/iio/industrialio-buffer.c @@ -507,13 +507,14 @@ static ssize_t iio_scan_el_store(struct device *dev, int ret; bool state; struct iio_dev *indio_dev = dev_to_iio_dev(dev); + struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); struct iio_buffer *buffer = this_attr->buffer; ret = kstrtobool(buf, &state); if (ret < 0) return ret; - mutex_lock(&indio_dev->mlock); + mutex_lock(&iio_dev_opaque->mlock); if (iio_buffer_is_active(buffer)) { ret = -EBUSY; goto error_ret; @@ -532,7 +533,7 @@ static ssize_t iio_scan_el_store(struct device *dev, } error_ret: - mutex_unlock(&indio_dev->mlock); + mutex_unlock(&iio_dev_opaque->mlock); return ret < 0 ? ret : len; @@ -554,6 +555,7 @@ static ssize_t iio_scan_el_ts_store(struct device *dev, { int ret; struct iio_dev *indio_dev = dev_to_iio_dev(dev); + struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer; bool state; @@ -561,14 +563,14 @@ static ssize_t iio_scan_el_ts_store(struct device *dev, if (ret < 0) return ret; - mutex_lock(&indio_dev->mlock); + mutex_lock(&iio_dev_opaque->mlock); if (iio_buffer_is_active(buffer)) { ret = -EBUSY; goto error_ret; } buffer->scan_timestamp = state; error_ret: - mutex_unlock(&indio_dev->mlock); + mutex_unlock(&iio_dev_opaque->mlock); return ret ? ret : len; } @@ -642,6 +644,7 @@ static ssize_t length_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct iio_dev *indio_dev = dev_to_iio_dev(dev); + struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer; unsigned int val; int ret; @@ -653,7 +656,7 @@ static ssize_t length_store(struct device *dev, struct device_attribute *attr, if (val == buffer->length) return len; - mutex_lock(&indio_dev->mlock); + mutex_lock(&iio_dev_opaque->mlock); if (iio_buffer_is_active(buffer)) { ret = -EBUSY; } else { @@ -665,7 +668,7 @@ static ssize_t length_store(struct device *dev, struct device_attribute *attr, if (buffer->length && buffer->length < buffer->watermark) buffer->watermark = buffer->length; out: - mutex_unlock(&indio_dev->mlock); + mutex_unlock(&iio_dev_opaque->mlock); return ret ? ret : len; } @@ -1256,7 +1259,7 @@ int iio_update_buffers(struct iio_dev *indio_dev, return -EINVAL; mutex_lock(&iio_dev_opaque->info_exist_lock); - mutex_lock(&indio_dev->mlock); + mutex_lock(&iio_dev_opaque->mlock); if (insert_buffer && iio_buffer_is_active(insert_buffer)) insert_buffer = NULL; @@ -1277,7 +1280,7 @@ int iio_update_buffers(struct iio_dev *indio_dev, ret = __iio_update_buffers(indio_dev, insert_buffer, remove_buffer); out_unlock: - mutex_unlock(&indio_dev->mlock); + mutex_unlock(&iio_dev_opaque->mlock); mutex_unlock(&iio_dev_opaque->info_exist_lock); return ret; @@ -1296,6 +1299,7 @@ static ssize_t enable_store(struct device *dev, struct device_attribute *attr, int ret; bool requested_state; struct iio_dev *indio_dev = dev_to_iio_dev(dev); + struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer; bool inlist; @@ -1303,7 +1307,7 @@ static ssize_t enable_store(struct device *dev, struct device_attribute *attr, if (ret < 0) return ret; - mutex_lock(&indio_dev->mlock); + mutex_lock(&iio_dev_opaque->mlock); /* Find out if it is in the list */ inlist = iio_buffer_is_active(buffer); @@ -1317,7 +1321,7 @@ static ssize_t enable_store(struct device *dev, struct device_attribute *attr, ret = __iio_update_buffers(indio_dev, NULL, buffer); done: - mutex_unlock(&indio_dev->mlock); + mutex_unlock(&iio_dev_opaque->mlock); return (ret < 0) ? ret : len; } @@ -1334,6 +1338,7 @@ static ssize_t watermark_store(struct device *dev, const char *buf, size_t len) { struct iio_dev *indio_dev = dev_to_iio_dev(dev); + struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer; unsigned int val; int ret; @@ -1344,7 +1349,7 @@ static ssize_t watermark_store(struct device *dev, if (!val) return -EINVAL; - mutex_lock(&indio_dev->mlock); + mutex_lock(&iio_dev_opaque->mlock); if (val > buffer->length) { ret = -EINVAL; @@ -1358,7 +1363,7 @@ static ssize_t watermark_store(struct device *dev, buffer->watermark = val; out: - mutex_unlock(&indio_dev->mlock); + mutex_unlock(&iio_dev_opaque->mlock); return ret ? ret : len; } @@ -1600,6 +1605,7 @@ static int __iio_buffer_alloc_sysfs_and_mask(struct iio_buffer *buffer, { struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); struct iio_dev_attr *p; + const struct iio_dev_attr *id_attr; struct attribute **attr; int ret, i, attrn, scan_el_attrcount, buffer_attrcount; const struct iio_chan_spec *channels; @@ -1609,6 +1615,7 @@ static int __iio_buffer_alloc_sysfs_and_mask(struct iio_buffer *buffer, while (buffer->attrs[buffer_attrcount] != NULL) buffer_attrcount++; } + buffer_attrcount += ARRAY_SIZE(iio_buffer_attrs); scan_el_attrcount = 0; INIT_LIST_HEAD(&buffer->buffer_attr_list); @@ -1651,7 +1658,7 @@ static int __iio_buffer_alloc_sysfs_and_mask(struct iio_buffer *buffer, } } - attrn = buffer_attrcount + scan_el_attrcount + ARRAY_SIZE(iio_buffer_attrs); + attrn = buffer_attrcount + scan_el_attrcount; attr = kcalloc(attrn + 1, sizeof(*attr), GFP_KERNEL); if (!attr) { ret = -ENOMEM; @@ -1666,10 +1673,11 @@ static int __iio_buffer_alloc_sysfs_and_mask(struct iio_buffer *buffer, attr[2] = &dev_attr_watermark_ro.attr; if (buffer->attrs) - memcpy(&attr[ARRAY_SIZE(iio_buffer_attrs)], buffer->attrs, - sizeof(struct attribute *) * buffer_attrcount); + for (i = 0, id_attr = buffer->attrs[i]; + (id_attr = buffer->attrs[i]); i++) + attr[ARRAY_SIZE(iio_buffer_attrs) + i] = + (struct attribute *)&id_attr->dev_attr.attr; - buffer_attrcount += ARRAY_SIZE(iio_buffer_attrs); buffer->buffer_group.attrs = attr; for (i = 0; i < buffer_attrcount; i++) { diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c index 151ff39933548c73b3362b89e230ad7bb30c807d..52e690f031cb8794a0fd56c8a2a4062f77d29ae0 100644 --- a/drivers/iio/industrialio-core.c +++ b/drivers/iio/industrialio-core.c @@ -285,16 +285,16 @@ int iio_device_set_clock(struct iio_dev *indio_dev, clockid_t clock_id) struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); const struct iio_event_interface *ev_int = iio_dev_opaque->event_interface; - ret = mutex_lock_interruptible(&indio_dev->mlock); + ret = mutex_lock_interruptible(&iio_dev_opaque->mlock); if (ret) return ret; if ((ev_int && iio_event_enabled(ev_int)) || iio_buffer_enabled(indio_dev)) { - mutex_unlock(&indio_dev->mlock); + mutex_unlock(&iio_dev_opaque->mlock); return -EBUSY; } iio_dev_opaque->clock_id = clock_id; - mutex_unlock(&indio_dev->mlock); + mutex_unlock(&iio_dev_opaque->mlock); return 0; } @@ -1674,7 +1674,7 @@ struct iio_dev *iio_device_alloc(struct device *parent, int sizeof_priv) indio_dev->dev.type = &iio_device_type; indio_dev->dev.bus = &iio_bus_type; device_initialize(&indio_dev->dev); - mutex_init(&indio_dev->mlock); + mutex_init(&iio_dev_opaque->mlock); mutex_init(&iio_dev_opaque->info_exist_lock); INIT_LIST_HEAD(&iio_dev_opaque->channel_attr_list); @@ -1696,7 +1696,7 @@ struct iio_dev *iio_device_alloc(struct device *parent, int sizeof_priv) INIT_LIST_HEAD(&iio_dev_opaque->ioctl_handlers); lockdep_register_key(&iio_dev_opaque->mlock_key); - lockdep_set_class(&indio_dev->mlock, &iio_dev_opaque->mlock_key); + lockdep_set_class(&iio_dev_opaque->mlock, &iio_dev_opaque->mlock_key); return indio_dev; } @@ -2058,10 +2058,12 @@ EXPORT_SYMBOL_GPL(__devm_iio_device_register); */ int iio_device_claim_direct_mode(struct iio_dev *indio_dev) { - mutex_lock(&indio_dev->mlock); + struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); + + mutex_lock(&iio_dev_opaque->mlock); if (iio_buffer_enabled(indio_dev)) { - mutex_unlock(&indio_dev->mlock); + mutex_unlock(&iio_dev_opaque->mlock); return -EBUSY; } return 0; @@ -2079,10 +2081,50 @@ EXPORT_SYMBOL_GPL(iio_device_claim_direct_mode); */ void iio_device_release_direct_mode(struct iio_dev *indio_dev) { - mutex_unlock(&indio_dev->mlock); + mutex_unlock(&to_iio_dev_opaque(indio_dev)->mlock); } EXPORT_SYMBOL_GPL(iio_device_release_direct_mode); +/** + * iio_device_claim_buffer_mode - Keep device in buffer mode + * @indio_dev: the iio_dev associated with the device + * + * If the device is in buffer mode it is guaranteed to stay + * that way until iio_device_release_buffer_mode() is called. + * + * Use with iio_device_release_buffer_mode(). + * + * Returns: 0 on success, -EBUSY on failure. + */ +int iio_device_claim_buffer_mode(struct iio_dev *indio_dev) +{ + struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); + + mutex_lock(&iio_dev_opaque->mlock); + + if (iio_buffer_enabled(indio_dev)) + return 0; + + mutex_unlock(&iio_dev_opaque->mlock); + return -EBUSY; +} +EXPORT_SYMBOL_GPL(iio_device_claim_buffer_mode); + +/** + * iio_device_release_buffer_mode - releases claim on buffer mode + * @indio_dev: the iio_dev associated with the device + * + * Release the claim. Device is no longer guaranteed to stay + * in buffer mode. + * + * Use with iio_device_claim_buffer_mode(). + */ +void iio_device_release_buffer_mode(struct iio_dev *indio_dev) +{ + mutex_unlock(&to_iio_dev_opaque(indio_dev)->mlock); +} +EXPORT_SYMBOL_GPL(iio_device_release_buffer_mode); + /** * iio_device_get_current_mode() - helper function providing read-only access to * the opaque @currentmode variable diff --git a/drivers/iio/industrialio-event.c b/drivers/iio/industrialio-event.c index 3d78da2531a9a2c7d5992d2fb5e762341ab4309b..f77ce49d4c363475e686598d4ee639b07d7811f6 100644 --- a/drivers/iio/industrialio-event.c +++ b/drivers/iio/industrialio-event.c @@ -198,7 +198,7 @@ static int iio_event_getfd(struct iio_dev *indio_dev) if (ev_int == NULL) return -ENODEV; - fd = mutex_lock_interruptible(&indio_dev->mlock); + fd = mutex_lock_interruptible(&iio_dev_opaque->mlock); if (fd) return fd; @@ -219,7 +219,7 @@ static int iio_event_getfd(struct iio_dev *indio_dev) } unlock: - mutex_unlock(&indio_dev->mlock); + mutex_unlock(&iio_dev_opaque->mlock); return fd; } @@ -556,7 +556,7 @@ int iio_device_register_eventset(struct iio_dev *indio_dev) ret = iio_device_register_sysfs_group(indio_dev, &ev_int->group); if (ret) - goto error_free_setup_event_lines; + goto error_free_group_attrs; ev_int->ioctl_handler.ioctl = iio_event_ioctl; iio_device_ioctl_handler_register(&iio_dev_opaque->indio_dev, @@ -564,6 +564,8 @@ int iio_device_register_eventset(struct iio_dev *indio_dev) return 0; +error_free_group_attrs: + kfree(ev_int->group.attrs); error_free_setup_event_lines: iio_free_chan_devattr_list(&ev_int->dev_attr_list); kfree(ev_int); diff --git a/drivers/iio/industrialio-trigger.c b/drivers/iio/industrialio-trigger.c index 6885a186fe27ad8bd100524aa7f9ab1aebfa4335..a2f3cc2f65efbeaebe8470d12fd9452e244e7a90 100644 --- a/drivers/iio/industrialio-trigger.c +++ b/drivers/iio/industrialio-trigger.c @@ -120,12 +120,12 @@ int iio_trigger_set_immutable(struct iio_dev *indio_dev, struct iio_trigger *tri return -EINVAL; iio_dev_opaque = to_iio_dev_opaque(indio_dev); - mutex_lock(&indio_dev->mlock); + mutex_lock(&iio_dev_opaque->mlock); WARN_ON(iio_dev_opaque->trig_readonly); indio_dev->trig = iio_trigger_get(trig); iio_dev_opaque->trig_readonly = true; - mutex_unlock(&indio_dev->mlock); + mutex_unlock(&iio_dev_opaque->mlock); return 0; } @@ -438,16 +438,16 @@ static ssize_t current_trigger_store(struct device *dev, struct iio_trigger *trig; int ret; - mutex_lock(&indio_dev->mlock); + mutex_lock(&iio_dev_opaque->mlock); if (iio_dev_opaque->currentmode == INDIO_BUFFER_TRIGGERED) { - mutex_unlock(&indio_dev->mlock); + mutex_unlock(&iio_dev_opaque->mlock); return -EBUSY; } if (iio_dev_opaque->trig_readonly) { - mutex_unlock(&indio_dev->mlock); + mutex_unlock(&iio_dev_opaque->mlock); return -EPERM; } - mutex_unlock(&indio_dev->mlock); + mutex_unlock(&iio_dev_opaque->mlock); trig = iio_trigger_acquire_by_name(buf); if (oldtrig == trig) { diff --git a/drivers/iio/light/adjd_s311.c b/drivers/iio/light/adjd_s311.c index 6b33975c8d7380fd5114ed2eb0c227580f0e490d..210a90f44c5323d2cccb635fd38e5ceece6233ab 100644 --- a/drivers/iio/light/adjd_s311.c +++ b/drivers/iio/light/adjd_s311.c @@ -233,8 +233,7 @@ static const struct iio_info adjd_s311_info = { .write_raw = adjd_s311_write_raw, }; -static int adjd_s311_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int adjd_s311_probe(struct i2c_client *client) { struct adjd_s311_data *data; struct iio_dev *indio_dev; @@ -271,7 +270,7 @@ static struct i2c_driver adjd_s311_driver = { .driver = { .name = ADJD_S311_DRV_NAME, }, - .probe = adjd_s311_probe, + .probe_new = adjd_s311_probe, .id_table = adjd_s311_id, }; module_i2c_driver(adjd_s311_driver); diff --git a/drivers/iio/light/adux1020.c b/drivers/iio/light/adux1020.c index 9aa28695e6f19cc5246d38591618fb0521b9698e..606075350d01c559ea5ceee132855b5f2d5db05d 100644 --- a/drivers/iio/light/adux1020.c +++ b/drivers/iio/light/adux1020.c @@ -774,8 +774,7 @@ static int adux1020_chip_init(struct adux1020_data *data) ADUX1020_MODE_INT_MASK, ADUX1020_MODE_INT_DISABLE); } -static int adux1020_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int adux1020_probe(struct i2c_client *client) { struct adux1020_data *data; struct iio_dev *indio_dev; @@ -838,7 +837,7 @@ static struct i2c_driver adux1020_driver = { .name = ADUX1020_DRV_NAME, .of_match_table = adux1020_of_match, }, - .probe = adux1020_probe, + .probe_new = adux1020_probe, .id_table = adux1020_id, }; module_i2c_driver(adux1020_driver); diff --git a/drivers/iio/light/al3010.c b/drivers/iio/light/al3010.c index ce5363845b225ed4103ac3b0b7a9026f30c10d4d..69cc723e2ac4ad6b9b36d50f0f8d1f1f5493b093 100644 --- a/drivers/iio/light/al3010.c +++ b/drivers/iio/light/al3010.c @@ -164,8 +164,7 @@ static const struct iio_info al3010_info = { .attrs = &al3010_attribute_group, }; -static int al3010_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int al3010_probe(struct i2c_client *client) { struct al3010_data *data; struct iio_dev *indio_dev; @@ -230,7 +229,7 @@ static struct i2c_driver al3010_driver = { .of_match_table = al3010_of_match, .pm = pm_sleep_ptr(&al3010_pm_ops), }, - .probe = al3010_probe, + .probe_new = al3010_probe, .id_table = al3010_id, }; module_i2c_driver(al3010_driver); diff --git a/drivers/iio/light/al3320a.c b/drivers/iio/light/al3320a.c index bc99179728edcfd6fc8452715c400ca64afc09fd..9ff28bbf34bbed0d8d6ec71b1f63b2d25a008178 100644 --- a/drivers/iio/light/al3320a.c +++ b/drivers/iio/light/al3320a.c @@ -187,8 +187,7 @@ static const struct iio_info al3320a_info = { .attrs = &al3320a_attribute_group, }; -static int al3320a_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int al3320a_probe(struct i2c_client *client) { struct al3320a_data *data; struct iio_dev *indio_dev; @@ -254,7 +253,7 @@ static struct i2c_driver al3320a_driver = { .of_match_table = al3320a_of_match, .pm = pm_sleep_ptr(&al3320a_pm_ops), }, - .probe = al3320a_probe, + .probe_new = al3320a_probe, .id_table = al3320a_id, }; diff --git a/drivers/iio/light/apds9300.c b/drivers/iio/light/apds9300.c index b70f2681bcb33dd11682b31880d8fb05769342c7..15dfb753734fa0ceea947128a39274eec166862e 100644 --- a/drivers/iio/light/apds9300.c +++ b/drivers/iio/light/apds9300.c @@ -398,8 +398,7 @@ static irqreturn_t apds9300_interrupt_handler(int irq, void *private) return IRQ_HANDLED; } -static int apds9300_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int apds9300_probe(struct i2c_client *client) { struct apds9300_data *data; struct iio_dev *indio_dev; @@ -505,7 +504,7 @@ static struct i2c_driver apds9300_driver = { .name = APDS9300_DRV_NAME, .pm = pm_sleep_ptr(&apds9300_pm_ops), }, - .probe = apds9300_probe, + .probe_new = apds9300_probe, .remove = apds9300_remove, .id_table = apds9300_id, }; diff --git a/drivers/iio/light/apds9960.c b/drivers/iio/light/apds9960.c index 38d4c7644befc71031aca0f75d4b93b9c01dd274..cc5974a95bd3364995a991bd91edeea1dfacd7b2 100644 --- a/drivers/iio/light/apds9960.c +++ b/drivers/iio/light/apds9960.c @@ -223,14 +223,16 @@ static const struct iio_event_spec apds9960_pxs_event_spec[] = { { .type = IIO_EV_TYPE_THRESH, .dir = IIO_EV_DIR_RISING, - .mask_separate = BIT(IIO_EV_INFO_VALUE) | - BIT(IIO_EV_INFO_ENABLE), + .mask_separate = BIT(IIO_EV_INFO_VALUE), }, { .type = IIO_EV_TYPE_THRESH, .dir = IIO_EV_DIR_FALLING, - .mask_separate = BIT(IIO_EV_INFO_VALUE) | - BIT(IIO_EV_INFO_ENABLE), + .mask_separate = BIT(IIO_EV_INFO_VALUE), + }, + { + .type = IIO_EV_TYPE_THRESH, + .mask_separate = BIT(IIO_EV_INFO_ENABLE), }, }; @@ -238,14 +240,16 @@ static const struct iio_event_spec apds9960_als_event_spec[] = { { .type = IIO_EV_TYPE_THRESH, .dir = IIO_EV_DIR_RISING, - .mask_separate = BIT(IIO_EV_INFO_VALUE) | - BIT(IIO_EV_INFO_ENABLE), + .mask_separate = BIT(IIO_EV_INFO_VALUE), }, { .type = IIO_EV_TYPE_THRESH, .dir = IIO_EV_DIR_FALLING, - .mask_separate = BIT(IIO_EV_INFO_VALUE) | - BIT(IIO_EV_INFO_ENABLE), + .mask_separate = BIT(IIO_EV_INFO_VALUE), + }, + { + .type = IIO_EV_TYPE_THRESH, + .mask_separate = BIT(IIO_EV_INFO_ENABLE), }, }; @@ -984,8 +988,7 @@ static int apds9960_chip_init(struct apds9960_data *data) return apds9960_set_powermode(data, 1); } -static int apds9960_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int apds9960_probe(struct i2c_client *client) { struct apds9960_data *data; struct iio_dev *indio_dev; @@ -1128,7 +1131,7 @@ static struct i2c_driver apds9960_driver = { .pm = &apds9960_pm_ops, .acpi_match_table = apds9960_acpi_match, }, - .probe = apds9960_probe, + .probe_new = apds9960_probe, .remove = apds9960_remove, .id_table = apds9960_id, }; diff --git a/drivers/iio/light/bh1750.c b/drivers/iio/light/bh1750.c index 3e92820bc82007111ff4a87f9da5fb528be183de..390c5b3ad4f6717ee3bb29bff44b6441c49c3921 100644 --- a/drivers/iio/light/bh1750.c +++ b/drivers/iio/light/bh1750.c @@ -228,9 +228,9 @@ static const struct iio_chan_spec bh1750_channels[] = { } }; -static int bh1750_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int bh1750_probe(struct i2c_client *client) { + const struct i2c_device_id *id = i2c_client_get_device_id(client); int ret, usec; struct bh1750_data *data; struct iio_dev *indio_dev; @@ -320,7 +320,7 @@ static struct i2c_driver bh1750_driver = { .of_match_table = bh1750_of_match, .pm = pm_sleep_ptr(&bh1750_pm_ops), }, - .probe = bh1750_probe, + .probe_new = bh1750_probe, .remove = bh1750_remove, .id_table = bh1750_id, diff --git a/drivers/iio/light/bh1780.c b/drivers/iio/light/bh1780.c index 90bca392b262009502859799b40b536fd0fcc032..da9039e5a83918f364b00f9f761e1da2c4a0e071 100644 --- a/drivers/iio/light/bh1780.c +++ b/drivers/iio/light/bh1780.c @@ -141,8 +141,7 @@ static const struct iio_chan_spec bh1780_channels[] = { } }; -static int bh1780_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int bh1780_probe(struct i2c_client *client) { int ret; struct bh1780_data *bh1780; @@ -270,7 +269,7 @@ static const struct of_device_id of_bh1780_match[] = { MODULE_DEVICE_TABLE(of, of_bh1780_match); static struct i2c_driver bh1780_driver = { - .probe = bh1780_probe, + .probe_new = bh1780_probe, .remove = bh1780_remove, .id_table = bh1780_id, .driver = { diff --git a/drivers/iio/light/cm3232.c b/drivers/iio/light/cm3232.c index 5214cd014cf8b747c3232751a29dc3258cd66b19..43e492f5051d1718a6e0daa6e8947ea800ffa5a7 100644 --- a/drivers/iio/light/cm3232.c +++ b/drivers/iio/light/cm3232.c @@ -325,9 +325,9 @@ static const struct iio_info cm3232_info = { .attrs = &cm3232_attribute_group, }; -static int cm3232_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int cm3232_probe(struct i2c_client *client) { + const struct i2c_device_id *id = i2c_client_get_device_id(client); struct cm3232_chip *chip; struct iio_dev *indio_dev; int ret; @@ -417,7 +417,7 @@ static struct i2c_driver cm3232_driver = { .pm = pm_sleep_ptr(&cm3232_pm_ops), }, .id_table = cm3232_id, - .probe = cm3232_probe, + .probe_new = cm3232_probe, .remove = cm3232_remove, }; diff --git a/drivers/iio/light/cm3323.c b/drivers/iio/light/cm3323.c index fd9a8c27de2e278b7d78353b7706d3f2d9489254..e5ce7d0fd272e0f629e757541eff9b053a23ec27 100644 --- a/drivers/iio/light/cm3323.c +++ b/drivers/iio/light/cm3323.c @@ -214,8 +214,7 @@ static const struct iio_info cm3323_info = { .attrs = &cm3323_attribute_group, }; -static int cm3323_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int cm3323_probe(struct i2c_client *client) { struct cm3323_data *data; struct iio_dev *indio_dev; @@ -267,7 +266,7 @@ static struct i2c_driver cm3323_driver = { .name = CM3323_DRV_NAME, .of_match_table = cm3323_of_match, }, - .probe = cm3323_probe, + .probe_new = cm3323_probe, .id_table = cm3323_id, }; diff --git a/drivers/iio/light/cm36651.c b/drivers/iio/light/cm36651.c index 6615c98b601c3feed34ce17feada7b79c9977c66..1707dbf2ce26784b5f62f1c7542de17d37717e32 100644 --- a/drivers/iio/light/cm36651.c +++ b/drivers/iio/light/cm36651.c @@ -618,9 +618,9 @@ static const struct iio_info cm36651_info = { .attrs = &cm36651_attribute_group, }; -static int cm36651_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int cm36651_probe(struct i2c_client *client) { + const struct i2c_device_id *id = i2c_client_get_device_id(client); struct cm36651_data *cm36651; struct iio_dev *indio_dev; int ret; @@ -730,7 +730,7 @@ static struct i2c_driver cm36651_driver = { .name = "cm36651", .of_match_table = cm36651_of_match, }, - .probe = cm36651_probe, + .probe_new = cm36651_probe, .remove = cm36651_remove, .id_table = cm36651_id, }; diff --git a/drivers/iio/light/gp2ap002.c b/drivers/iio/light/gp2ap002.c index 8000fa347344fd82f11b202546cb63aea9b82cde..c0430db0038afde7db8fde3a5ca98ca27a355b05 100644 --- a/drivers/iio/light/gp2ap002.c +++ b/drivers/iio/light/gp2ap002.c @@ -425,8 +425,7 @@ static struct regmap_bus gp2ap002_regmap_bus = { .reg_write = gp2ap002_regmap_i2c_write, }; -static int gp2ap002_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int gp2ap002_probe(struct i2c_client *client) { struct gp2ap002 *gp2ap002; struct iio_dev *indio_dev; @@ -711,7 +710,7 @@ static struct i2c_driver gp2ap002_driver = { .of_match_table = gp2ap002_of_match, .pm = pm_ptr(&gp2ap002_dev_pm_ops), }, - .probe = gp2ap002_probe, + .probe_new = gp2ap002_probe, .remove = gp2ap002_remove, .id_table = gp2ap002_id_table, }; diff --git a/drivers/iio/light/gp2ap020a00f.c b/drivers/iio/light/gp2ap020a00f.c index 826439299e8b03899425cab94064bfb6a765931e..a5bf9da0d2f38f0335121b303b689e70545930fa 100644 --- a/drivers/iio/light/gp2ap020a00f.c +++ b/drivers/iio/light/gp2ap020a00f.c @@ -1467,9 +1467,9 @@ static const struct iio_buffer_setup_ops gp2ap020a00f_buffer_setup_ops = { .predisable = &gp2ap020a00f_buffer_predisable, }; -static int gp2ap020a00f_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int gp2ap020a00f_probe(struct i2c_client *client) { + const struct i2c_device_id *id = i2c_client_get_device_id(client); struct gp2ap020a00f_data *data; struct iio_dev *indio_dev; struct regmap *regmap; @@ -1609,7 +1609,7 @@ static struct i2c_driver gp2ap020a00f_driver = { .name = GP2A_I2C_NAME, .of_match_table = gp2ap020a00f_of_match, }, - .probe = gp2ap020a00f_probe, + .probe_new = gp2ap020a00f_probe, .remove = gp2ap020a00f_remove, .id_table = gp2ap020a00f_id, }; diff --git a/drivers/iio/light/isl29018.c b/drivers/iio/light/isl29018.c index b36f8b7ca68e7c672a3c6f03ba43df03eba6c8e4..141845fb47f960e427c180c71dac9c5eeab35511 100644 --- a/drivers/iio/light/isl29018.c +++ b/drivers/iio/light/isl29018.c @@ -711,9 +711,9 @@ static void isl29018_disable_regulator_action(void *_data) pr_err("failed to disable isl29018's VCC regulator!\n"); } -static int isl29018_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int isl29018_probe(struct i2c_client *client) { + const struct i2c_device_id *id = i2c_client_get_device_id(client); struct isl29018_chip *chip; struct iio_dev *indio_dev; int err; @@ -865,7 +865,7 @@ static struct i2c_driver isl29018_driver = { .pm = pm_sleep_ptr(&isl29018_pm_ops), .of_match_table = isl29018_of_match, }, - .probe = isl29018_probe, + .probe_new = isl29018_probe, .id_table = isl29018_id, }; module_i2c_driver(isl29018_driver); diff --git a/drivers/iio/light/isl29028.c b/drivers/iio/light/isl29028.c index 32d58e18f26da3485c1de7b4fe2f5f05a2c24a4c..bcf3a556e41ad711b8bbc279ae5a991086c31078 100644 --- a/drivers/iio/light/isl29028.c +++ b/drivers/iio/light/isl29028.c @@ -565,9 +565,9 @@ static const struct regmap_config isl29028_regmap_config = { .cache_type = REGCACHE_RBTREE, }; -static int isl29028_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int isl29028_probe(struct i2c_client *client) { + const struct i2c_device_id *id = i2c_client_get_device_id(client); struct isl29028_chip *chip; struct iio_dev *indio_dev; int ret; @@ -698,7 +698,7 @@ static struct i2c_driver isl29028_driver = { .pm = pm_ptr(&isl29028_pm_ops), .of_match_table = isl29028_of_match, }, - .probe = isl29028_probe, + .probe_new = isl29028_probe, .remove = isl29028_remove, .id_table = isl29028_id, }; diff --git a/drivers/iio/light/isl29125.c b/drivers/iio/light/isl29125.c index c199e63cce8289ddda3c35738127ba3720414dca..b4bd656ca16984459c1074c908c32ecc0cdd63cf 100644 --- a/drivers/iio/light/isl29125.c +++ b/drivers/iio/light/isl29125.c @@ -241,8 +241,7 @@ static const struct iio_buffer_setup_ops isl29125_buffer_setup_ops = { .predisable = isl29125_buffer_predisable, }; -static int isl29125_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int isl29125_probe(struct i2c_client *client) { struct isl29125_data *data; struct iio_dev *indio_dev; @@ -338,7 +337,7 @@ static struct i2c_driver isl29125_driver = { .name = ISL29125_DRV_NAME, .pm = pm_sleep_ptr(&isl29125_pm_ops), }, - .probe = isl29125_probe, + .probe_new = isl29125_probe, .remove = isl29125_remove, .id_table = isl29125_id, }; diff --git a/drivers/iio/light/jsa1212.c b/drivers/iio/light/jsa1212.c index 57ce6d75966c63d6ac907c4ff01510e233aa1fab..d3834d0a0635aa034feb743fe4a90d900a9113c2 100644 --- a/drivers/iio/light/jsa1212.c +++ b/drivers/iio/light/jsa1212.c @@ -308,8 +308,7 @@ static const struct regmap_config jsa1212_regmap_config = { .volatile_reg = jsa1212_is_volatile_reg, }; -static int jsa1212_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int jsa1212_probe(struct i2c_client *client) { struct jsa1212_data *data; struct iio_dev *indio_dev; @@ -441,7 +440,7 @@ static struct i2c_driver jsa1212_driver = { .pm = pm_sleep_ptr(&jsa1212_pm_ops), .acpi_match_table = ACPI_PTR(jsa1212_acpi_match), }, - .probe = jsa1212_probe, + .probe_new = jsa1212_probe, .remove = jsa1212_remove, .id_table = jsa1212_id, }; diff --git a/drivers/iio/light/ltr501.c b/drivers/iio/light/ltr501.c index 74a1ccda8b9c4a6d0ee8d059671eb62297d29c4d..bdbd918213e45cfc402e1afcab88397c33796485 100644 --- a/drivers/iio/light/ltr501.c +++ b/drivers/iio/light/ltr501.c @@ -153,7 +153,6 @@ struct ltr501_chip_info { struct ltr501_data { struct i2c_client *client; - struct regulator_bulk_data regulators[2]; struct mutex lock_als, lock_ps; const struct ltr501_chip_info *chip_info; u8 als_contr, ps_contr; @@ -1415,13 +1414,6 @@ static const struct regmap_config ltr501_regmap_config = { .volatile_reg = ltr501_is_volatile_reg, }; -static void ltr501_disable_regulators(void *d) -{ - struct ltr501_data *data = d; - - regulator_bulk_disable(ARRAY_SIZE(data->regulators), data->regulators); -} - static int ltr501_powerdown(struct ltr501_data *data) { return ltr501_write_contr(data, data->als_contr & @@ -1440,9 +1432,10 @@ static const char *ltr501_match_acpi_device(struct device *dev, int *chip_idx) return dev_name(dev); } -static int ltr501_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int ltr501_probe(struct i2c_client *client) { + const struct i2c_device_id *id = i2c_client_get_device_id(client); + static const char * const regulator_names[] = { "vdd", "vddio" }; struct ltr501_data *data; struct iio_dev *indio_dev; struct regmap *regmap; @@ -1466,25 +1459,13 @@ static int ltr501_probe(struct i2c_client *client, mutex_init(&data->lock_als); mutex_init(&data->lock_ps); - data->regulators[0].supply = "vdd"; - data->regulators[1].supply = "vddio"; - ret = devm_regulator_bulk_get(&client->dev, - ARRAY_SIZE(data->regulators), - data->regulators); + ret = devm_regulator_bulk_get_enable(&client->dev, + ARRAY_SIZE(regulator_names), + regulator_names); if (ret) return dev_err_probe(&client->dev, ret, "Failed to get regulators\n"); - ret = regulator_bulk_enable(ARRAY_SIZE(data->regulators), - data->regulators); - if (ret) - return ret; - - ret = devm_add_action_or_reset(&client->dev, - ltr501_disable_regulators, data); - if (ret) - return ret; - data->reg_it = devm_regmap_field_alloc(&client->dev, regmap, reg_field_it); if (IS_ERR(data->reg_it)) { @@ -1660,7 +1641,7 @@ static struct i2c_driver ltr501_driver = { .pm = pm_sleep_ptr(<r501_pm_ops), .acpi_match_table = ACPI_PTR(ltr_acpi_match), }, - .probe = ltr501_probe, + .probe_new = ltr501_probe, .remove = ltr501_remove, .id_table = ltr501_id, }; diff --git a/drivers/iio/light/lv0104cs.c b/drivers/iio/light/lv0104cs.c index c2aef88f4e6349f16918a5e3ae64a7f29ac402c5..c041fa0faa5df61cf0720fb02963a23eee29b105 100644 --- a/drivers/iio/light/lv0104cs.c +++ b/drivers/iio/light/lv0104cs.c @@ -474,8 +474,7 @@ static const struct iio_chan_spec lv0104cs_channels[] = { }, }; -static int lv0104cs_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int lv0104cs_probe(struct i2c_client *client) { struct iio_dev *indio_dev; struct lv0104cs_private *lv0104cs; @@ -521,7 +520,7 @@ static struct i2c_driver lv0104cs_i2c_driver = { .name = "lv0104cs", }, .id_table = lv0104cs_id, - .probe = lv0104cs_probe, + .probe_new = lv0104cs_probe, }; module_i2c_driver(lv0104cs_i2c_driver); diff --git a/drivers/iio/light/max44000.c b/drivers/iio/light/max44000.c index 85689dffbcbf75e58b4e6628627603e87f0f307f..5dcabc43a30e6e3694f505180ae614cc8ea8325f 100644 --- a/drivers/iio/light/max44000.c +++ b/drivers/iio/light/max44000.c @@ -523,8 +523,7 @@ out_unlock: return IRQ_HANDLED; } -static int max44000_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int max44000_probe(struct i2c_client *client) { struct max44000_data *data; struct iio_dev *indio_dev; @@ -617,7 +616,7 @@ static struct i2c_driver max44000_driver = { .name = MAX44000_DRV_NAME, .acpi_match_table = ACPI_PTR(max44000_acpi_match), }, - .probe = max44000_probe, + .probe_new = max44000_probe, .id_table = max44000_id, }; diff --git a/drivers/iio/light/noa1305.c b/drivers/iio/light/noa1305.c index ee81fe083e4ccec41dedbf0a616a2b564209ed90..eaf548d4649ea9ea811e2d718c27fcdf73a47548 100644 --- a/drivers/iio/light/noa1305.c +++ b/drivers/iio/light/noa1305.c @@ -46,7 +46,6 @@ struct noa1305_priv { struct i2c_client *client; struct regmap *regmap; - struct regulator *vin_reg; }; static int noa1305_measure(struct noa1305_priv *priv) @@ -187,15 +186,7 @@ static const struct regmap_config noa1305_regmap_config = { .writeable_reg = noa1305_writable_reg, }; -static void noa1305_reg_remove(void *data) -{ - struct noa1305_priv *priv = data; - - regulator_disable(priv->vin_reg); -} - -static int noa1305_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int noa1305_probe(struct i2c_client *client) { struct noa1305_priv *priv; struct iio_dev *indio_dev; @@ -216,23 +207,11 @@ static int noa1305_probe(struct i2c_client *client, priv = iio_priv(indio_dev); - priv->vin_reg = devm_regulator_get(&client->dev, "vin"); - if (IS_ERR(priv->vin_reg)) - return dev_err_probe(&client->dev, PTR_ERR(priv->vin_reg), + ret = devm_regulator_get_enable(&client->dev, "vin"); + if (ret) + return dev_err_probe(&client->dev, ret, "get regulator vin failed\n"); - ret = regulator_enable(priv->vin_reg); - if (ret) { - dev_err(&client->dev, "enable regulator vin failed\n"); - return ret; - } - - ret = devm_add_action_or_reset(&client->dev, noa1305_reg_remove, priv); - if (ret) { - dev_err(&client->dev, "addition of devm action failed\n"); - return ret; - } - i2c_set_clientdata(client, indio_dev); priv->client = client; priv->regmap = regmap; @@ -299,7 +278,7 @@ static struct i2c_driver noa1305_driver = { .name = NOA1305_DRIVER_NAME, .of_match_table = noa1305_of_match, }, - .probe = noa1305_probe, + .probe_new = noa1305_probe, .id_table = noa1305_ids, }; diff --git a/drivers/iio/light/opt3001.c b/drivers/iio/light/opt3001.c index a26d1c3f954375d53c4a0eab3dae924dd0a40a44..ec4f5c2369c42640a2de69a026a6df6f4dba745c 100644 --- a/drivers/iio/light/opt3001.c +++ b/drivers/iio/light/opt3001.c @@ -735,8 +735,7 @@ out: return IRQ_HANDLED; } -static int opt3001_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int opt3001_probe(struct i2c_client *client) { struct device *dev = &client->dev; @@ -835,7 +834,7 @@ static const struct of_device_id opt3001_of_match[] = { MODULE_DEVICE_TABLE(of, opt3001_of_match); static struct i2c_driver opt3001_driver = { - .probe = opt3001_probe, + .probe_new = opt3001_probe, .remove = opt3001_remove, .id_table = opt3001_id, diff --git a/drivers/iio/light/pa12203001.c b/drivers/iio/light/pa12203001.c index 3cb2de51f4aa8749c9d56832b6ab3834bc200462..15a666f15c275f23f44390c2807efcd75aedde83 100644 --- a/drivers/iio/light/pa12203001.c +++ b/drivers/iio/light/pa12203001.c @@ -338,8 +338,7 @@ out: return ret; } -static int pa12203001_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int pa12203001_probe(struct i2c_client *client) { struct pa12203001_data *data; struct iio_dev *indio_dev; @@ -475,7 +474,7 @@ static struct i2c_driver pa12203001_driver = { .pm = &pa12203001_pm_ops, .acpi_match_table = ACPI_PTR(pa12203001_acpi_match), }, - .probe = pa12203001_probe, + .probe_new = pa12203001_probe, .remove = pa12203001_remove, .id_table = pa12203001_id, diff --git a/drivers/iio/light/rpr0521.c b/drivers/iio/light/rpr0521.c index d1c16dd76058398bb9b5bf65fb5edc894e37d4d6..668e444f604983fb00efc4755eed1c57e1aa821d 100644 --- a/drivers/iio/light/rpr0521.c +++ b/drivers/iio/light/rpr0521.c @@ -927,8 +927,7 @@ static const struct regmap_config rpr0521_regmap_config = { .volatile_reg = rpr0521_is_volatile_reg, }; -static int rpr0521_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int rpr0521_probe(struct i2c_client *client) { struct rpr0521_data *data; struct iio_dev *indio_dev; @@ -1122,7 +1121,7 @@ static struct i2c_driver rpr0521_driver = { .pm = pm_ptr(&rpr0521_pm_ops), .acpi_match_table = ACPI_PTR(rpr0521_acpi_match), }, - .probe = rpr0521_probe, + .probe_new = rpr0521_probe, .remove = rpr0521_remove, .id_table = rpr0521_id, }; diff --git a/drivers/iio/light/si1133.c b/drivers/iio/light/si1133.c index f8c9b2cc322e8d0d41aa4711ee213e0bf8e2ae09..a08fbc8f5adb1bdfd0624b7f7672c510f68849c5 100644 --- a/drivers/iio/light/si1133.c +++ b/drivers/iio/light/si1133.c @@ -990,9 +990,9 @@ static int si1133_validate_ids(struct iio_dev *iio_dev) return 0; } -static int si1133_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int si1133_probe(struct i2c_client *client) { + const struct i2c_device_id *id = i2c_client_get_device_id(client); struct si1133_data *data; struct iio_dev *iio_dev; int err; @@ -1064,7 +1064,7 @@ static struct i2c_driver si1133_driver = { .driver = { .name = "si1133", }, - .probe = si1133_probe, + .probe_new = si1133_probe, .id_table = si1133_ids, }; diff --git a/drivers/iio/light/si1145.c b/drivers/iio/light/si1145.c index e8f6cdf26f22a7356c8525a222c1de926c22cd9c..f7126235f94c91cbd954e774c2edf4fa388acd04 100644 --- a/drivers/iio/light/si1145.c +++ b/drivers/iio/light/si1145.c @@ -1269,9 +1269,9 @@ static int si1145_probe_trigger(struct iio_dev *indio_dev) return 0; } -static int si1145_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int si1145_probe(struct i2c_client *client) { + const struct i2c_device_id *id = i2c_client_get_device_id(client); struct si1145_data *data; struct iio_dev *indio_dev; u8 part_id, rev_id, seq_id; @@ -1352,7 +1352,7 @@ static struct i2c_driver si1145_driver = { .driver = { .name = "si1145", }, - .probe = si1145_probe, + .probe_new = si1145_probe, .id_table = si1145_ids, }; diff --git a/drivers/iio/light/st_uvis25_i2c.c b/drivers/iio/light/st_uvis25_i2c.c index c982b0b255cf858b0aa8aa7c750b84def5cc44c6..2160e87bb498e7790153a328aa15ff4bf29be439 100644 --- a/drivers/iio/light/st_uvis25_i2c.c +++ b/drivers/iio/light/st_uvis25_i2c.c @@ -25,8 +25,7 @@ static const struct regmap_config st_uvis25_i2c_regmap_config = { .read_flag_mask = UVIS25_I2C_AUTO_INCREMENT, }; -static int st_uvis25_i2c_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int st_uvis25_i2c_probe(struct i2c_client *client) { struct regmap *regmap; @@ -58,7 +57,7 @@ static struct i2c_driver st_uvis25_driver = { .pm = pm_sleep_ptr(&st_uvis25_pm_ops), .of_match_table = st_uvis25_i2c_of_match, }, - .probe = st_uvis25_i2c_probe, + .probe_new = st_uvis25_i2c_probe, .id_table = st_uvis25_i2c_id_table, }; module_i2c_driver(st_uvis25_driver); diff --git a/drivers/iio/light/stk3310.c b/drivers/iio/light/stk3310.c index 7b8e0da6aabc143296fba096f64bb15321311388..48ae6ff0015e1eb91b9ad26638a41f5667537105 100644 --- a/drivers/iio/light/stk3310.c +++ b/drivers/iio/light/stk3310.c @@ -586,8 +586,7 @@ out: return IRQ_HANDLED; } -static int stk3310_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int stk3310_probe(struct i2c_client *client) { int ret; struct iio_dev *indio_dev; @@ -715,7 +714,7 @@ static struct i2c_driver stk3310_driver = { .pm = pm_sleep_ptr(&stk3310_pm_ops), .acpi_match_table = ACPI_PTR(stk3310_acpi_id), }, - .probe = stk3310_probe, + .probe_new = stk3310_probe, .remove = stk3310_remove, .id_table = stk3310_i2c_id, }; diff --git a/drivers/iio/light/tcs3414.c b/drivers/iio/light/tcs3414.c index 3951536022b3d78bb0e63a5d618de6985ab14240..5100732fbaf0a1255c89e17e622ada20601a5c0e 100644 --- a/drivers/iio/light/tcs3414.c +++ b/drivers/iio/light/tcs3414.c @@ -279,8 +279,7 @@ static void tcs3414_powerdown_cleanup(void *data) tcs3414_powerdown(data); } -static int tcs3414_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int tcs3414_probe(struct i2c_client *client) { struct tcs3414_data *data; struct iio_dev *indio_dev; @@ -374,7 +373,7 @@ static struct i2c_driver tcs3414_driver = { .name = TCS3414_DRV_NAME, .pm = pm_sleep_ptr(&tcs3414_pm_ops), }, - .probe = tcs3414_probe, + .probe_new = tcs3414_probe, .id_table = tcs3414_id, }; module_i2c_driver(tcs3414_driver); diff --git a/drivers/iio/light/tcs3472.c b/drivers/iio/light/tcs3472.c index db17fec634be1e284f3b5e3bb51e456f5a915089..6187c54879162d035e0129737c3116eeda948d33 100644 --- a/drivers/iio/light/tcs3472.c +++ b/drivers/iio/light/tcs3472.c @@ -442,8 +442,7 @@ static const struct iio_info tcs3472_info = { .attrs = &tcs3472_attribute_group, }; -static int tcs3472_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int tcs3472_probe(struct i2c_client *client) { struct tcs3472_data *data; struct iio_dev *indio_dev; @@ -610,7 +609,7 @@ static struct i2c_driver tcs3472_driver = { .name = TCS3472_DRV_NAME, .pm = pm_sleep_ptr(&tcs3472_pm_ops), }, - .probe = tcs3472_probe, + .probe_new = tcs3472_probe, .remove = tcs3472_remove, .id_table = tcs3472_id, }; diff --git a/drivers/iio/light/tsl2563.c b/drivers/iio/light/tsl2563.c index 951f35ef3f41de994dd88f5339c7eb862ae3eb93..d0e42b73203a65a07953f73cb00872add8abd8cc 100644 --- a/drivers/iio/light/tsl2563.c +++ b/drivers/iio/light/tsl2563.c @@ -699,8 +699,7 @@ static const struct iio_info tsl2563_info = { .write_event_config = &tsl2563_write_interrupt_config, }; -static int tsl2563_probe(struct i2c_client *client, - const struct i2c_device_id *device_id) +static int tsl2563_probe(struct i2c_client *client) { struct iio_dev *indio_dev; struct tsl2563_chip *chip; @@ -880,7 +879,7 @@ static struct i2c_driver tsl2563_i2c_driver = { .of_match_table = tsl2563_of_match, .pm = pm_sleep_ptr(&tsl2563_pm_ops), }, - .probe = tsl2563_probe, + .probe_new = tsl2563_probe, .remove = tsl2563_remove, .id_table = tsl2563_id, }; diff --git a/drivers/iio/light/tsl2583.c b/drivers/iio/light/tsl2583.c index 7bcb5c7189224b9b35c2061f09806a8458a2e745..a05f1c0453d16d3455283b765b1cdce339141e02 100644 --- a/drivers/iio/light/tsl2583.c +++ b/drivers/iio/light/tsl2583.c @@ -809,8 +809,7 @@ static const struct iio_info tsl2583_info = { .write_raw = tsl2583_write_raw, }; -static int tsl2583_probe(struct i2c_client *clientp, - const struct i2c_device_id *idp) +static int tsl2583_probe(struct i2c_client *clientp) { int ret; struct tsl2583_chip *chip; @@ -943,7 +942,7 @@ static struct i2c_driver tsl2583_driver = { .of_match_table = tsl2583_of_match, }, .id_table = tsl2583_idtable, - .probe = tsl2583_probe, + .probe_new = tsl2583_probe, .remove = tsl2583_remove, }; module_i2c_driver(tsl2583_driver); diff --git a/drivers/iio/light/tsl2772.c b/drivers/iio/light/tsl2772.c index dd9051f1cc1a0a957d470ebc33630cf718100819..ad50baa0202cceda80600b8fdd187d478c4351fb 100644 --- a/drivers/iio/light/tsl2772.c +++ b/drivers/iio/light/tsl2772.c @@ -1750,9 +1750,9 @@ static const struct tsl2772_chip_info tsl2772_chip_info_tbl[] = { }, }; -static int tsl2772_probe(struct i2c_client *clientp, - const struct i2c_device_id *id) +static int tsl2772_probe(struct i2c_client *clientp) { + const struct i2c_device_id *id = i2c_client_get_device_id(clientp); struct iio_dev *indio_dev; struct tsl2772_chip *chip; int ret; @@ -1931,7 +1931,7 @@ static struct i2c_driver tsl2772_driver = { .pm = &tsl2772_pm_ops, }, .id_table = tsl2772_idtable, - .probe = tsl2772_probe, + .probe_new = tsl2772_probe, }; module_i2c_driver(tsl2772_driver); diff --git a/drivers/iio/light/tsl4531.c b/drivers/iio/light/tsl4531.c index 090038fed88919f49026134f74d30b220cd8b5e0..d95397eb15263bd456bef5922e4991e69207de15 100644 --- a/drivers/iio/light/tsl4531.c +++ b/drivers/iio/light/tsl4531.c @@ -160,8 +160,7 @@ static int tsl4531_check_id(struct i2c_client *client) } } -static int tsl4531_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int tsl4531_probe(struct i2c_client *client) { struct tsl4531_data *data; struct iio_dev *indio_dev; @@ -238,7 +237,7 @@ static struct i2c_driver tsl4531_driver = { .name = TSL4531_DRV_NAME, .pm = pm_sleep_ptr(&tsl4531_pm_ops), }, - .probe = tsl4531_probe, + .probe_new = tsl4531_probe, .remove = tsl4531_remove, .id_table = tsl4531_id, }; diff --git a/drivers/iio/light/us5182d.c b/drivers/iio/light/us5182d.c index 3e652d7f3b0ebf6b370c278dde4477622acf082a..8b2a0c99c8e6f856be4f5e322512a4c7d1bdae2d 100644 --- a/drivers/iio/light/us5182d.c +++ b/drivers/iio/light/us5182d.c @@ -832,8 +832,7 @@ static irqreturn_t us5182d_irq_thread_handler(int irq, void *private) return IRQ_HANDLED; } -static int us5182d_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int us5182d_probe(struct i2c_client *client) { struct us5182d_data *data; struct iio_dev *indio_dev; @@ -975,7 +974,7 @@ static struct i2c_driver us5182d_driver = { .of_match_table = us5182d_of_match, .acpi_match_table = ACPI_PTR(us5182d_acpi_match), }, - .probe = us5182d_probe, + .probe_new = us5182d_probe, .remove = us5182d_remove, .id_table = us5182d_id, diff --git a/drivers/iio/light/vcnl4000.c b/drivers/iio/light/vcnl4000.c index f6c83ecaad8b584db22266a0b9576fc4f9a2c4f3..cc1a2062e76d6cead8d2e4d468ac918a97c6091e 100644 --- a/drivers/iio/light/vcnl4000.c +++ b/drivers/iio/light/vcnl4000.c @@ -17,6 +17,7 @@ * interrupts (VCNL4040, VCNL4200) */ +#include #include #include #include @@ -74,6 +75,10 @@ #define VCNL4000_PROX_EN BIT(1) /* start proximity measurement */ #define VCNL4000_SELF_TIMED_EN BIT(0) /* start self-timed measurement */ +#define VCNL4040_ALS_CONF_ALS_SHUTDOWN BIT(0) +#define VCNL4040_PS_CONF1_PS_SHUTDOWN BIT(0) +#define VCNL4040_PS_CONF2_PS_IT GENMASK(3, 1) /* Proximity integration time */ + /* Bit masks for interrupt registers. */ #define VCNL4010_INT_THR_SEL BIT(0) /* Select threshold interrupt source */ #define VCNL4010_INT_THR_EN BIT(1) /* Threshold interrupt type */ @@ -101,6 +106,17 @@ static const int vcnl4010_prox_sampling_frequency[][2] = { {250, 0}, }; +static const int vcnl4040_ps_it_times[][2] = { + {0, 100}, + {0, 150}, + {0, 200}, + {0, 250}, + {0, 300}, + {0, 350}, + {0, 400}, + {0, 800}, +}; + #define VCNL4000_SLEEP_DELAY_MS 2000 /* before we enter pm_runtime_suspend */ enum vcnl4000_device_ids { @@ -188,16 +204,61 @@ static int vcnl4000_init(struct vcnl4000_data *data) return data->chip_spec->set_power_state(data, true); }; +static ssize_t vcnl4000_write_als_enable(struct vcnl4000_data *data, bool en) +{ + int ret; + + mutex_lock(&data->vcnl4000_lock); + + ret = i2c_smbus_read_word_data(data->client, VCNL4200_AL_CONF); + if (ret < 0) + goto out; + + if (en) + ret &= ~VCNL4040_ALS_CONF_ALS_SHUTDOWN; + else + ret |= VCNL4040_ALS_CONF_ALS_SHUTDOWN; + + ret = i2c_smbus_write_word_data(data->client, VCNL4200_AL_CONF, ret); + +out: + mutex_unlock(&data->vcnl4000_lock); + + return ret; +} + +static ssize_t vcnl4000_write_ps_enable(struct vcnl4000_data *data, bool en) +{ + int ret; + + mutex_lock(&data->vcnl4000_lock); + + ret = i2c_smbus_read_word_data(data->client, VCNL4200_PS_CONF1); + if (ret < 0) + goto out; + + if (en) + ret &= ~VCNL4040_PS_CONF1_PS_SHUTDOWN; + else + ret |= VCNL4040_PS_CONF1_PS_SHUTDOWN; + + ret = i2c_smbus_write_word_data(data->client, VCNL4200_PS_CONF1, ret); + +out: + mutex_unlock(&data->vcnl4000_lock); + + return ret; +} + static int vcnl4200_set_power_state(struct vcnl4000_data *data, bool on) { - u16 val = on ? 0 /* power on */ : 1 /* shut down */; int ret; - ret = i2c_smbus_write_word_data(data->client, VCNL4200_AL_CONF, val); + ret = vcnl4000_write_als_enable(data, on); if (ret < 0) return ret; - ret = i2c_smbus_write_word_data(data->client, VCNL4200_PS_CONF1, val); + ret = vcnl4000_write_ps_enable(data, on); if (ret < 0) return ret; @@ -422,6 +483,57 @@ static int vcnl4000_set_pm_runtime_state(struct vcnl4000_data *data, bool on) return ret; } +static int vcnl4040_read_ps_it(struct vcnl4000_data *data, int *val, int *val2) +{ + int ret; + + ret = i2c_smbus_read_word_data(data->client, VCNL4200_PS_CONF1); + if (ret < 0) + return ret; + + ret = FIELD_GET(VCNL4040_PS_CONF2_PS_IT, ret); + + if (ret >= ARRAY_SIZE(vcnl4040_ps_it_times)) + return -EINVAL; + + *val = vcnl4040_ps_it_times[ret][0]; + *val2 = vcnl4040_ps_it_times[ret][1]; + + return 0; +} + +static ssize_t vcnl4040_write_ps_it(struct vcnl4000_data *data, int val) +{ + unsigned int i; + int ret, index = -1; + u16 regval; + + for (i = 0; i < ARRAY_SIZE(vcnl4040_ps_it_times); i++) { + if (val == vcnl4040_ps_it_times[i][1]) { + index = i; + break; + } + } + + if (index < 0) + return -EINVAL; + + mutex_lock(&data->vcnl4000_lock); + + ret = i2c_smbus_read_word_data(data->client, VCNL4200_PS_CONF1); + if (ret < 0) + goto out; + + regval = (ret & ~VCNL4040_PS_CONF2_PS_IT) | + FIELD_PREP(VCNL4040_PS_CONF2_PS_IT, index); + ret = i2c_smbus_write_word_data(data->client, VCNL4200_PS_CONF1, + regval); + +out: + mutex_unlock(&data->vcnl4000_lock); + return ret; +} + static int vcnl4000_read_raw(struct iio_dev *indio_dev, struct iio_chan_spec const *chan, int *val, int *val2, long mask) @@ -458,6 +570,47 @@ static int vcnl4000_read_raw(struct iio_dev *indio_dev, *val = 0; *val2 = data->al_scale; return IIO_VAL_INT_PLUS_MICRO; + case IIO_CHAN_INFO_INT_TIME: + if (chan->type != IIO_PROXIMITY) + return -EINVAL; + ret = vcnl4040_read_ps_it(data, val, val2); + if (ret < 0) + return ret; + return IIO_VAL_INT_PLUS_MICRO; + default: + return -EINVAL; + } +} + +static int vcnl4040_write_raw(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, + int val, int val2, long mask) +{ + struct vcnl4000_data *data = iio_priv(indio_dev); + + switch (mask) { + case IIO_CHAN_INFO_INT_TIME: + if (val != 0) + return -EINVAL; + if (chan->type != IIO_PROXIMITY) + return -EINVAL; + return vcnl4040_write_ps_it(data, val2); + default: + return -EINVAL; + } +} + +static int vcnl4040_read_avail(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, + const int **vals, int *type, int *length, + long mask) +{ + switch (mask) { + case IIO_CHAN_INFO_INT_TIME: + *vals = (int *)vcnl4040_ps_it_times; + *type = IIO_VAL_INT_PLUS_MICRO; + *length = 2 * ARRAY_SIZE(vcnl4040_ps_it_times); + return IIO_AVAIL_LIST; default: return -EINVAL; } @@ -796,6 +949,20 @@ static const struct iio_chan_spec vcnl4010_channels[] = { IIO_CHAN_SOFT_TIMESTAMP(1), }; +static const struct iio_chan_spec vcnl4040_channels[] = { + { + .type = IIO_LIGHT, + .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | + BIT(IIO_CHAN_INFO_SCALE), + }, { + .type = IIO_PROXIMITY, + .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | + BIT(IIO_CHAN_INFO_INT_TIME), + .info_mask_separate_available = BIT(IIO_CHAN_INFO_INT_TIME), + .ext_info = vcnl4000_ext_info, + } +}; + static const struct iio_info vcnl4000_info = { .read_raw = vcnl4000_read_raw, }; @@ -810,6 +977,12 @@ static const struct iio_info vcnl4010_info = { .write_event_config = vcnl4010_write_event_config, }; +static const struct iio_info vcnl4040_info = { + .read_raw = vcnl4000_read_raw, + .write_raw = vcnl4040_write_raw, + .read_avail = vcnl4040_read_avail, +}; + static const struct vcnl4000_chip_spec vcnl4000_chip_spec_cfg[] = { [VCNL4000] = { .prod = "VCNL4000", @@ -839,9 +1012,9 @@ static const struct vcnl4000_chip_spec vcnl4000_chip_spec_cfg[] = { .measure_light = vcnl4200_measure_light, .measure_proximity = vcnl4200_measure_proximity, .set_power_state = vcnl4200_set_power_state, - .channels = vcnl4000_channels, - .num_channels = ARRAY_SIZE(vcnl4000_channels), - .info = &vcnl4000_info, + .channels = vcnl4040_channels, + .num_channels = ARRAY_SIZE(vcnl4040_channels), + .info = &vcnl4040_info, .irq_support = false, }, [VCNL4200] = { @@ -1007,9 +1180,9 @@ static int vcnl4010_probe_trigger(struct iio_dev *indio_dev) return devm_iio_trigger_register(&client->dev, trigger); } -static int vcnl4000_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int vcnl4000_probe(struct i2c_client *client) { + const struct i2c_device_id *id = i2c_client_get_device_id(client); struct vcnl4000_data *data; struct iio_dev *indio_dev; int ret; @@ -1153,7 +1326,7 @@ static struct i2c_driver vcnl4000_driver = { .pm = pm_ptr(&vcnl4000_pm_ops), .of_match_table = vcnl_4000_of_match, }, - .probe = vcnl4000_probe, + .probe_new = vcnl4000_probe, .id_table = vcnl4000_id, .remove = vcnl4000_remove, }; diff --git a/drivers/iio/light/vcnl4035.c b/drivers/iio/light/vcnl4035.c index 3ed37f6057fbf429573054a8e801d9bedc6fd75c..84148b9440006d34c278cffbeea906a621f03688 100644 --- a/drivers/iio/light/vcnl4035.c +++ b/drivers/iio/light/vcnl4035.c @@ -539,8 +539,7 @@ static int vcnl4035_probe_trigger(struct iio_dev *indio_dev) return ret; } -static int vcnl4035_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int vcnl4035_probe(struct i2c_client *client) { struct vcnl4035_data *data; struct iio_dev *indio_dev; @@ -668,7 +667,7 @@ static struct i2c_driver vcnl4035_driver = { .pm = pm_ptr(&vcnl4035_pm_ops), .of_match_table = vcnl4035_of_match, }, - .probe = vcnl4035_probe, + .probe_new = vcnl4035_probe, .remove = vcnl4035_remove, .id_table = vcnl4035_id, }; diff --git a/drivers/iio/light/veml6030.c b/drivers/iio/light/veml6030.c index 9a7800cdfee26c3916a1344c302719324022bfdd..e7d2d5d177d43c8cc28d3a8ddb4a59d4946c27b3 100644 --- a/drivers/iio/light/veml6030.c +++ b/drivers/iio/light/veml6030.c @@ -786,8 +786,7 @@ static int veml6030_hw_init(struct iio_dev *indio_dev) return ret; } -static int veml6030_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int veml6030_probe(struct i2c_client *client) { int ret; struct veml6030_data *data; @@ -893,7 +892,7 @@ static struct i2c_driver veml6030_driver = { .of_match_table = veml6030_of_match, .pm = pm_ptr(&veml6030_pm_ops), }, - .probe = veml6030_probe, + .probe_new = veml6030_probe, .id_table = veml6030_id, }; module_i2c_driver(veml6030_driver); diff --git a/drivers/iio/light/veml6070.c b/drivers/iio/light/veml6070.c index cfa4e9e7c8038395a9183c3687600deef9a9ed69..ee76a68deb24af1ed34dd149812af29149ea9b10 100644 --- a/drivers/iio/light/veml6070.c +++ b/drivers/iio/light/veml6070.c @@ -135,8 +135,7 @@ static const struct iio_info veml6070_info = { .read_raw = veml6070_read_raw, }; -static int veml6070_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int veml6070_probe(struct i2c_client *client) { struct veml6070_data *data; struct iio_dev *indio_dev; @@ -199,7 +198,7 @@ static struct i2c_driver veml6070_driver = { .driver = { .name = VEML6070_DRV_NAME, }, - .probe = veml6070_probe, + .probe_new = veml6070_probe, .remove = veml6070_remove, .id_table = veml6070_id, }; diff --git a/drivers/iio/light/vl6180.c b/drivers/iio/light/vl6180.c index d47a4f6f4e8701acbf33b06c5bb6a16a7de04533..8b56df26c59e8fff7f6a2a79ad8dc8c3792cb888 100644 --- a/drivers/iio/light/vl6180.c +++ b/drivers/iio/light/vl6180.c @@ -493,8 +493,7 @@ static int vl6180_init(struct vl6180_data *data) return vl6180_hold(data, false); } -static int vl6180_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int vl6180_probe(struct i2c_client *client) { struct vl6180_data *data; struct iio_dev *indio_dev; @@ -539,7 +538,7 @@ static struct i2c_driver vl6180_driver = { .name = VL6180_DRV_NAME, .of_match_table = vl6180_of_match, }, - .probe = vl6180_probe, + .probe_new = vl6180_probe, .id_table = vl6180_id, }; diff --git a/drivers/iio/light/zopt2201.c b/drivers/iio/light/zopt2201.c index e0bc9df9c88b00106f36235a2d04f94017c9749c..e3bac8b563807427dd4f3982996682f75d99ccc9 100644 --- a/drivers/iio/light/zopt2201.c +++ b/drivers/iio/light/zopt2201.c @@ -501,8 +501,7 @@ static const struct iio_info zopt2201_info = { .attrs = &zopt2201_attribute_group, }; -static int zopt2201_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int zopt2201_probe(struct i2c_client *client) { struct zopt2201_data *data; struct iio_dev *indio_dev; @@ -555,7 +554,7 @@ static struct i2c_driver zopt2201_driver = { .driver = { .name = ZOPT2201_DRV_NAME, }, - .probe = zopt2201_probe, + .probe_new = zopt2201_probe, .id_table = zopt2201_id, }; diff --git a/drivers/iio/magnetometer/ak8974.c b/drivers/iio/magnetometer/ak8974.c index 7ec9ab3beb45a97bda232ad56fea599182a4480c..45abdcce6bc0ad1f3849827238a215c2ad974846 100644 --- a/drivers/iio/magnetometer/ak8974.c +++ b/drivers/iio/magnetometer/ak8974.c @@ -814,8 +814,7 @@ static const struct regmap_config ak8974_regmap_config = { .precious_reg = ak8974_precious_reg, }; -static int ak8974_probe(struct i2c_client *i2c, - const struct i2c_device_id *id) +static int ak8974_probe(struct i2c_client *i2c) { struct iio_dev *indio_dev; struct ak8974 *ak8974; @@ -1047,7 +1046,7 @@ static struct i2c_driver ak8974_driver = { .pm = pm_ptr(&ak8974_dev_pm_ops), .of_match_table = ak8974_of_match, }, - .probe = ak8974_probe, + .probe_new = ak8974_probe, .remove = ak8974_remove, .id_table = ak8974_id, }; diff --git a/drivers/iio/magnetometer/ak8975.c b/drivers/iio/magnetometer/ak8975.c index caf03a2a98a5decc3433ca932f5af778bc5d6573..924b481a3034aac393a8d1bb0e621caba0bb139c 100644 --- a/drivers/iio/magnetometer/ak8975.c +++ b/drivers/iio/magnetometer/ak8975.c @@ -876,9 +876,9 @@ static irqreturn_t ak8975_handle_trigger(int irq, void *p) return IRQ_HANDLED; } -static int ak8975_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int ak8975_probe(struct i2c_client *client) { + const struct i2c_device_id *id = i2c_client_get_device_id(client); struct ak8975_data *data; struct iio_dev *indio_dev; struct gpio_desc *eoc_gpiod; @@ -1110,7 +1110,7 @@ static struct i2c_driver ak8975_driver = { .of_match_table = ak8975_of_match, .acpi_match_table = ak_acpi_match, }, - .probe = ak8975_probe, + .probe_new = ak8975_probe, .remove = ak8975_remove, .id_table = ak8975_id, }; diff --git a/drivers/iio/magnetometer/bmc150_magn_i2c.c b/drivers/iio/magnetometer/bmc150_magn_i2c.c index 570deaa87836b267ac657ab3a13d0d0c1ea42287..44b8960eea175fb744e772fa5dbad5bcb2bf3de3 100644 --- a/drivers/iio/magnetometer/bmc150_magn_i2c.c +++ b/drivers/iio/magnetometer/bmc150_magn_i2c.c @@ -16,9 +16,9 @@ #include "bmc150_magn.h" -static int bmc150_magn_i2c_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int bmc150_magn_i2c_probe(struct i2c_client *client) { + const struct i2c_device_id *id = i2c_client_get_device_id(client); struct regmap *regmap; const char *name = NULL; @@ -71,7 +71,7 @@ static struct i2c_driver bmc150_magn_driver = { .acpi_match_table = ACPI_PTR(bmc150_magn_acpi_match), .pm = &bmc150_magn_pm_ops, }, - .probe = bmc150_magn_i2c_probe, + .probe_new = bmc150_magn_i2c_probe, .remove = bmc150_magn_i2c_remove, .id_table = bmc150_magn_i2c_id, }; diff --git a/drivers/iio/magnetometer/hmc5843_i2c.c b/drivers/iio/magnetometer/hmc5843_i2c.c index 18a13dd512961feb6a8d912ee17780bb238f8a4a..7ef2b1d562898ee4d21c106759b9355e5e076371 100644 --- a/drivers/iio/magnetometer/hmc5843_i2c.c +++ b/drivers/iio/magnetometer/hmc5843_i2c.c @@ -52,9 +52,9 @@ static const struct regmap_config hmc5843_i2c_regmap_config = { .cache_type = REGCACHE_RBTREE, }; -static int hmc5843_i2c_probe(struct i2c_client *cli, - const struct i2c_device_id *id) +static int hmc5843_i2c_probe(struct i2c_client *cli) { + const struct i2c_device_id *id = i2c_client_get_device_id(cli); struct regmap *regmap = devm_regmap_init_i2c(cli, &hmc5843_i2c_regmap_config); if (IS_ERR(regmap)) @@ -95,7 +95,7 @@ static struct i2c_driver hmc5843_driver = { .of_match_table = hmc5843_of_match, }, .id_table = hmc5843_id, - .probe = hmc5843_i2c_probe, + .probe_new = hmc5843_i2c_probe, .remove = hmc5843_i2c_remove, }; module_i2c_driver(hmc5843_driver); diff --git a/drivers/iio/magnetometer/mag3110.c b/drivers/iio/magnetometer/mag3110.c index b870ad8038626e7eed8fbe88409971544f49450e..661176a885ad763e8a387a88c6e184551c76de70 100644 --- a/drivers/iio/magnetometer/mag3110.c +++ b/drivers/iio/magnetometer/mag3110.c @@ -469,9 +469,9 @@ static const struct iio_info mag3110_info = { static const unsigned long mag3110_scan_masks[] = {0x7, 0xf, 0}; -static int mag3110_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int mag3110_probe(struct i2c_client *client) { + const struct i2c_device_id *id = i2c_client_get_device_id(client); struct mag3110_data *data; struct iio_dev *indio_dev; int ret; @@ -641,7 +641,7 @@ static struct i2c_driver mag3110_driver = { .of_match_table = mag3110_of_match, .pm = pm_sleep_ptr(&mag3110_pm_ops), }, - .probe = mag3110_probe, + .probe_new = mag3110_probe, .remove = mag3110_remove, .id_table = mag3110_id, }; diff --git a/drivers/iio/magnetometer/mmc35240.c b/drivers/iio/magnetometer/mmc35240.c index 186edfcda0b70be56ccfa07abfe3663cc8e584e3..756dadbad106f73d0736b1ca2af8188c38a05c1b 100644 --- a/drivers/iio/magnetometer/mmc35240.c +++ b/drivers/iio/magnetometer/mmc35240.c @@ -481,8 +481,7 @@ static const struct regmap_config mmc35240_regmap_config = { .num_reg_defaults = ARRAY_SIZE(mmc35240_reg_defaults), }; -static int mmc35240_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int mmc35240_probe(struct i2c_client *client) { struct mmc35240_data *data; struct iio_dev *indio_dev; @@ -576,7 +575,7 @@ static struct i2c_driver mmc35240_driver = { .pm = pm_sleep_ptr(&mmc35240_pm_ops), .acpi_match_table = ACPI_PTR(mmc35240_acpi_match), }, - .probe = mmc35240_probe, + .probe_new = mmc35240_probe, .id_table = mmc35240_id, }; diff --git a/drivers/iio/magnetometer/st_magn_i2c.c b/drivers/iio/magnetometer/st_magn_i2c.c index c5d8c303db4ef46ecd555e3c4f99eb79a2a8b143..b4098d3b38137b1b6af457746cd1ff6649fabd7d 100644 --- a/drivers/iio/magnetometer/st_magn_i2c.c +++ b/drivers/iio/magnetometer/st_magn_i2c.c @@ -54,8 +54,7 @@ static const struct of_device_id st_magn_of_match[] = { }; MODULE_DEVICE_TABLE(of, st_magn_of_match); -static int st_magn_i2c_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int st_magn_i2c_probe(struct i2c_client *client) { const struct st_sensor_settings *settings; struct st_sensor_data *mdata; @@ -107,7 +106,7 @@ static struct i2c_driver st_magn_driver = { .name = "st-magn-i2c", .of_match_table = st_magn_of_match, }, - .probe = st_magn_i2c_probe, + .probe_new = st_magn_i2c_probe, .id_table = st_magn_id_table, }; module_i2c_driver(st_magn_driver); diff --git a/drivers/iio/magnetometer/yamaha-yas530.c b/drivers/iio/magnetometer/yamaha-yas530.c index 801c760feb4d19756e9ea5716609ec3cd910231d..753717158b073c24884dcad168107957733acd59 100644 --- a/drivers/iio/magnetometer/yamaha-yas530.c +++ b/drivers/iio/magnetometer/yamaha-yas530.c @@ -1384,9 +1384,9 @@ static const struct yas5xx_chip_info yas5xx_chip_info_tbl[] = { }, }; -static int yas5xx_probe(struct i2c_client *i2c, - const struct i2c_device_id *id) +static int yas5xx_probe(struct i2c_client *i2c) { + const struct i2c_device_id *id = i2c_client_get_device_id(i2c); struct iio_dev *indio_dev; struct device *dev = &i2c->dev; struct yas5xx *yas5xx; @@ -1605,7 +1605,7 @@ static struct i2c_driver yas5xx_driver = { .of_match_table = yas5xx_of_match, .pm = pm_ptr(&yas5xx_dev_pm_ops), }, - .probe = yas5xx_probe, + .probe_new = yas5xx_probe, .remove = yas5xx_remove, .id_table = yas5xx_id, }; diff --git a/drivers/iio/multiplexer/iio-mux.c b/drivers/iio/multiplexer/iio-mux.c index 93558fddfa9b41a2a03ce4133cc08543be9452b4..edd8c69f6d2e1c9ffac0ddcf5f0b4a306bc2f2d9 100644 --- a/drivers/iio/multiplexer/iio-mux.c +++ b/drivers/iio/multiplexer/iio-mux.c @@ -416,11 +416,9 @@ static int mux_probe(struct platform_device *pdev) } mux->control = devm_mux_control_get(dev, NULL); - if (IS_ERR(mux->control)) { - if (PTR_ERR(mux->control) != -EPROBE_DEFER) - dev_err(dev, "failed to get control-mux\n"); - return PTR_ERR(mux->control); - } + if (IS_ERR(mux->control)) + return dev_err_probe(dev, PTR_ERR(mux->control), + "failed to get control-mux\n"); i = 0; for (state = 0; state < all_children; state++) { diff --git a/drivers/iio/potentiometer/ad5272.c b/drivers/iio/potentiometer/ad5272.c index ed5fc0b50fe979c9090f5397089c8731d01996a7..aa140d63210161d736900455ce88726abca57879 100644 --- a/drivers/iio/potentiometer/ad5272.c +++ b/drivers/iio/potentiometer/ad5272.c @@ -158,9 +158,9 @@ static int ad5272_reset(struct ad5272_data *data) return 0; } -static int ad5272_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int ad5272_probe(struct i2c_client *client) { + const struct i2c_device_id *id = i2c_client_get_device_id(client); struct device *dev = &client->dev; struct iio_dev *indio_dev; struct ad5272_data *data; @@ -218,7 +218,7 @@ static struct i2c_driver ad5272_driver = { .name = "ad5272", .of_match_table = ad5272_dt_ids, }, - .probe = ad5272_probe, + .probe_new = ad5272_probe, .id_table = ad5272_id, }; diff --git a/drivers/iio/potentiometer/ds1803.c b/drivers/iio/potentiometer/ds1803.c index 5c212ed7a93133de40c129e9f766b4c4eb490aa6..0b5e475807cbcaea920d7fe836090a9c8351e6be 100644 --- a/drivers/iio/potentiometer/ds1803.c +++ b/drivers/iio/potentiometer/ds1803.c @@ -202,8 +202,9 @@ static const struct iio_info ds1803_info = { .read_avail = ds1803_read_avail, }; -static int ds1803_probe(struct i2c_client *client, const struct i2c_device_id *id) +static int ds1803_probe(struct i2c_client *client) { + const struct i2c_device_id *id = i2c_client_get_device_id(client); struct device *dev = &client->dev; struct ds1803_data *data; struct iio_dev *indio_dev; @@ -251,7 +252,7 @@ static struct i2c_driver ds1803_driver = { .name = "ds1803", .of_match_table = ds1803_dt_ids, }, - .probe = ds1803_probe, + .probe_new = ds1803_probe, .id_table = ds1803_id, }; diff --git a/drivers/iio/potentiometer/max5432.c b/drivers/iio/potentiometer/max5432.c index aed3b6ab82a21f9d2e247bd35d57acba669be96a..94ef27ef3fb57981947e37b665e0647383ad6ef9 100644 --- a/drivers/iio/potentiometer/max5432.c +++ b/drivers/iio/potentiometer/max5432.c @@ -85,8 +85,7 @@ static const struct iio_info max5432_info = { .write_raw = max5432_write_raw, }; -static int max5432_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int max5432_probe(struct i2c_client *client) { struct device *dev = &client->dev; struct iio_dev *indio_dev; @@ -124,7 +123,7 @@ static struct i2c_driver max5432_driver = { .name = "max5432", .of_match_table = max5432_dt_ids, }, - .probe = max5432_probe, + .probe_new = max5432_probe, }; module_i2c_driver(max5432_driver); diff --git a/drivers/iio/potentiometer/tpl0102.c b/drivers/iio/potentiometer/tpl0102.c index d996dc367fb77920646cb10ea173c1dfc6b25081..a3465b413b0caa4f22fbadb36464b2e968070568 100644 --- a/drivers/iio/potentiometer/tpl0102.c +++ b/drivers/iio/potentiometer/tpl0102.c @@ -120,9 +120,9 @@ static const struct iio_info tpl0102_info = { .write_raw = tpl0102_write_raw, }; -static int tpl0102_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int tpl0102_probe(struct i2c_client *client) { + const struct i2c_device_id *id = i2c_client_get_device_id(client); struct device *dev = &client->dev; struct tpl0102_data *data; struct iio_dev *indio_dev; @@ -161,7 +161,7 @@ static struct i2c_driver tpl0102_driver = { .driver = { .name = "tpl0102", }, - .probe = tpl0102_probe, + .probe_new = tpl0102_probe, .id_table = tpl0102_id, }; diff --git a/drivers/iio/potentiostat/lmp91000.c b/drivers/iio/potentiostat/lmp91000.c index 5ec7060d31d901cc88a4aefbc9d34f09e0c93304..b82f093f1e6a44b28dd715c602c2e0ea75ac2c1a 100644 --- a/drivers/iio/potentiostat/lmp91000.c +++ b/drivers/iio/potentiostat/lmp91000.c @@ -292,8 +292,7 @@ static const struct iio_buffer_setup_ops lmp91000_buffer_setup_ops = { .predisable = lmp91000_buffer_predisable, }; -static int lmp91000_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int lmp91000_probe(struct i2c_client *client) { struct device *dev = &client->dev; struct lmp91000_data *data; @@ -417,7 +416,7 @@ static struct i2c_driver lmp91000_driver = { .name = LMP91000_DRV_NAME, .of_match_table = lmp91000_of_match, }, - .probe = lmp91000_probe, + .probe_new = lmp91000_probe, .remove = lmp91000_remove, .id_table = lmp91000_id, }; diff --git a/drivers/iio/pressure/abp060mg.c b/drivers/iio/pressure/abp060mg.c index e1c3bdb371ee6e31574b13a2bcb6c925516a9377..c0140779366aff6e6e14d7c7332f386dadb22e31 100644 --- a/drivers/iio/pressure/abp060mg.c +++ b/drivers/iio/pressure/abp060mg.c @@ -174,9 +174,9 @@ static void abp060mg_init_device(struct iio_dev *indio_dev, unsigned long id) state->offset -= ABP060MG_NUM_COUNTS >> 1; } -static int abp060mg_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int abp060mg_probe(struct i2c_client *client) { + const struct i2c_device_id *id = i2c_client_get_device_id(client); struct iio_dev *indio_dev; struct abp_state *state; unsigned long cfg_id = id->driver_data; @@ -255,7 +255,7 @@ static struct i2c_driver abp060mg_driver = { .driver = { .name = "abp060mg", }, - .probe = abp060mg_probe, + .probe_new = abp060mg_probe, .id_table = abp060mg_id_table, }; module_i2c_driver(abp060mg_driver); diff --git a/drivers/iio/pressure/bmp280-i2c.c b/drivers/iio/pressure/bmp280-i2c.c index 0c27211f3ea076417143a6eb3fea15f0581a929f..14eab086d24a67fdfa3ef6e77ac58efcb722263c 100644 --- a/drivers/iio/pressure/bmp280-i2c.c +++ b/drivers/iio/pressure/bmp280-i2c.c @@ -5,11 +5,11 @@ #include "bmp280.h" -static int bmp280_i2c_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int bmp280_i2c_probe(struct i2c_client *client) { struct regmap *regmap; const struct regmap_config *regmap_config; + const struct i2c_device_id *id = i2c_client_get_device_id(client); switch (id->driver_data) { case BMP180_CHIP_ID: @@ -65,7 +65,7 @@ static struct i2c_driver bmp280_i2c_driver = { .of_match_table = bmp280_of_i2c_match, .pm = pm_ptr(&bmp280_dev_pm_ops), }, - .probe = bmp280_i2c_probe, + .probe_new = bmp280_i2c_probe, .id_table = bmp280_i2c_id, }; module_i2c_driver(bmp280_i2c_driver); diff --git a/drivers/iio/pressure/dlhl60d.c b/drivers/iio/pressure/dlhl60d.c index f0b0d198c6d472a5533a26c063775f595123bc10..43650b048d6295e2f9fac9cd4dc1e20f0e524977 100644 --- a/drivers/iio/pressure/dlhl60d.c +++ b/drivers/iio/pressure/dlhl60d.c @@ -282,9 +282,9 @@ static irqreturn_t dlh_interrupt(int irq, void *private) return IRQ_HANDLED; }; -static int dlh_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int dlh_probe(struct i2c_client *client) { + const struct i2c_device_id *id = i2c_client_get_device_id(client); struct dlh_state *st; struct iio_dev *indio_dev; int ret; @@ -362,7 +362,7 @@ static struct i2c_driver dlh_driver = { .name = "dlhl60d", .of_match_table = dlh_of_match, }, - .probe = dlh_probe, + .probe_new = dlh_probe, .id_table = dlh_id, }; module_i2c_driver(dlh_driver); diff --git a/drivers/iio/pressure/dps310.c b/drivers/iio/pressure/dps310.c index 984a3f511a1ae31d14472edf1fe3edf5f3187c6f..2af275a24ff9e29fbab8d7aed58a5b63c7e03eea 100644 --- a/drivers/iio/pressure/dps310.c +++ b/drivers/iio/pressure/dps310.c @@ -827,9 +827,9 @@ static const struct iio_info dps310_info = { .write_raw = dps310_write_raw, }; -static int dps310_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int dps310_probe(struct i2c_client *client) { + const struct i2c_device_id *id = i2c_client_get_device_id(client); struct dps310_data *data; struct iio_dev *iio; int rc; @@ -887,7 +887,7 @@ static struct i2c_driver dps310_driver = { .name = DPS310_DEV_NAME, .acpi_match_table = dps310_acpi_match, }, - .probe = dps310_probe, + .probe_new = dps310_probe, .id_table = dps310_id, }; module_i2c_driver(dps310_driver); diff --git a/drivers/iio/pressure/hp03.c b/drivers/iio/pressure/hp03.c index 9538118c96489c89164c9a7c5af73fc8d47b3128..bd1f71a99cfa3d24f96b947514117224ce1de2c0 100644 --- a/drivers/iio/pressure/hp03.c +++ b/drivers/iio/pressure/hp03.c @@ -208,9 +208,9 @@ static const struct iio_info hp03_info = { .read_raw = &hp03_read_raw, }; -static int hp03_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int hp03_probe(struct i2c_client *client) { + const struct i2c_device_id *id = i2c_client_get_device_id(client); struct device *dev = &client->dev; struct iio_dev *indio_dev; struct hp03_priv *priv; @@ -282,7 +282,7 @@ static struct i2c_driver hp03_driver = { .name = "hp03", .of_match_table = hp03_of_match, }, - .probe = hp03_probe, + .probe_new = hp03_probe, .id_table = hp03_id, }; module_i2c_driver(hp03_driver); diff --git a/drivers/iio/pressure/hp206c.c b/drivers/iio/pressure/hp206c.c index 986b7a59712e01906d3d22e27be7893fd496299e..b6d2ff46434194a681bd9e96acf20fdb0256d97e 100644 --- a/drivers/iio/pressure/hp206c.c +++ b/drivers/iio/pressure/hp206c.c @@ -352,9 +352,9 @@ static const struct iio_info hp206c_info = { .write_raw = hp206c_write_raw, }; -static int hp206c_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int hp206c_probe(struct i2c_client *client) { + const struct i2c_device_id *id = i2c_client_get_device_id(client); struct iio_dev *indio_dev; struct hp206c_data *data; int ret; @@ -409,7 +409,7 @@ MODULE_DEVICE_TABLE(acpi, hp206c_acpi_match); #endif static struct i2c_driver hp206c_driver = { - .probe = hp206c_probe, + .probe_new = hp206c_probe, .id_table = hp206c_id, .driver = { .name = "hp206c", diff --git a/drivers/iio/pressure/icp10100.c b/drivers/iio/pressure/icp10100.c index b62f28585db5b8d66cd1b931a65fcf36f35adf13..407cf25ea0e335e7dde1373260db4d01b76f8323 100644 --- a/drivers/iio/pressure/icp10100.c +++ b/drivers/iio/pressure/icp10100.c @@ -530,8 +530,7 @@ static void icp10100_pm_disable(void *data) pm_runtime_disable(dev); } -static int icp10100_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int icp10100_probe(struct i2c_client *client) { struct iio_dev *indio_dev; struct icp10100_state *st; @@ -649,7 +648,7 @@ static struct i2c_driver icp10100_driver = { .pm = pm_ptr(&icp10100_pm), .of_match_table = icp10100_of_match, }, - .probe = icp10100_probe, + .probe_new = icp10100_probe, .id_table = icp10100_id, }; module_i2c_driver(icp10100_driver); diff --git a/drivers/iio/pressure/mpl115.c b/drivers/iio/pressure/mpl115.c index 5bf5b9abe6f1a4e6161da0978fdf92ebf545ee96..02ea38c8a3e4312264ca3078d3c31b3c94158c96 100644 --- a/drivers/iio/pressure/mpl115.c +++ b/drivers/iio/pressure/mpl115.c @@ -4,12 +4,13 @@ * * Copyright (c) 2014 Peter Meerwald * - * TODO: shutdown pin + * TODO: synchronization with system suspend */ #include #include #include +#include #include "mpl115.h" @@ -27,6 +28,7 @@ struct mpl115_data { s16 a0; s16 b1, b2; s16 c12; + struct gpio_desc *shutdown; const struct mpl115_ops *ops; }; @@ -102,16 +104,24 @@ static int mpl115_read_raw(struct iio_dev *indio_dev, switch (mask) { case IIO_CHAN_INFO_PROCESSED: + pm_runtime_get_sync(data->dev); ret = mpl115_comp_pressure(data, val, val2); if (ret < 0) return ret; + pm_runtime_mark_last_busy(data->dev); + pm_runtime_put_autosuspend(data->dev); + return IIO_VAL_INT_PLUS_MICRO; case IIO_CHAN_INFO_RAW: + pm_runtime_get_sync(data->dev); /* temperature -5.35 C / LSB, 472 LSB is 25 C */ ret = mpl115_read_temp(data); if (ret < 0) return ret; + pm_runtime_mark_last_busy(data->dev); + pm_runtime_put_autosuspend(data->dev); *val = ret >> 6; + return IIO_VAL_INT; case IIO_CHAN_INFO_OFFSET: *val = -605; @@ -168,6 +178,8 @@ int mpl115_probe(struct device *dev, const char *name, if (ret) return ret; + dev_set_drvdata(dev, indio_dev); + ret = data->ops->read(data->dev, MPL115_A0); if (ret < 0) return ret; @@ -185,10 +197,58 @@ int mpl115_probe(struct device *dev, const char *name, return ret; data->c12 = ret; + data->shutdown = devm_gpiod_get_optional(dev, "shutdown", + GPIOD_OUT_LOW); + if (IS_ERR(data->shutdown)) + return dev_err_probe(dev, PTR_ERR(data->shutdown), + "cannot get shutdown gpio\n"); + + if (data->shutdown) { + /* Enable runtime PM */ + pm_runtime_get_noresume(dev); + pm_runtime_set_active(dev); + pm_runtime_enable(dev); + + /* + * As the device takes 3 ms to come up with a fresh + * reading after power-on and 5 ms to actually power-on, + * do not shut it down unnecessarily. Set autosuspend to + * 2000 ms. + */ + pm_runtime_set_autosuspend_delay(dev, 2000); + pm_runtime_use_autosuspend(dev); + pm_runtime_put(dev); + + dev_dbg(dev, "low-power mode enabled"); + } else + dev_dbg(dev, "low-power mode disabled"); + return devm_iio_device_register(dev, indio_dev); } EXPORT_SYMBOL_NS_GPL(mpl115_probe, IIO_MPL115); +static int mpl115_runtime_suspend(struct device *dev) +{ + struct mpl115_data *data = iio_priv(dev_get_drvdata(dev)); + + gpiod_set_value(data->shutdown, 1); + + return 0; +} + +static int mpl115_runtime_resume(struct device *dev) +{ + struct mpl115_data *data = iio_priv(dev_get_drvdata(dev)); + + gpiod_set_value(data->shutdown, 0); + usleep_range(5000, 6000); + + return 0; +} + +EXPORT_NS_RUNTIME_DEV_PM_OPS(mpl115_dev_pm_ops, mpl115_runtime_suspend, + mpl115_runtime_resume, NULL, IIO_MPL115); + MODULE_AUTHOR("Peter Meerwald "); MODULE_DESCRIPTION("Freescale MPL115 pressure/temperature driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/iio/pressure/mpl115.h b/drivers/iio/pressure/mpl115.h index 57d55eb8e661789f675b2993893774e7995af34a..78a0068a17bbcf415cd9c881c60524b5dfec2269 100644 --- a/drivers/iio/pressure/mpl115.h +++ b/drivers/iio/pressure/mpl115.h @@ -6,6 +6,8 @@ * Copyright (c) 2016 Akinobu Mita */ +#include + #ifndef _MPL115_H_ #define _MPL115_H_ @@ -18,4 +20,7 @@ struct mpl115_ops { int mpl115_probe(struct device *dev, const char *name, const struct mpl115_ops *ops); +/*PM ops */ +extern const struct dev_pm_ops mpl115_dev_pm_ops; + #endif diff --git a/drivers/iio/pressure/mpl115_i2c.c b/drivers/iio/pressure/mpl115_i2c.c index 099ab1c6832c5d2b0a4039d7ee720925b497ac33..ade4dd854ddf1b57f3ef1aa30086d0d1efeb8313 100644 --- a/drivers/iio/pressure/mpl115_i2c.c +++ b/drivers/iio/pressure/mpl115_i2c.c @@ -35,9 +35,9 @@ static const struct mpl115_ops mpl115_i2c_ops = { .write = mpl115_i2c_write, }; -static int mpl115_i2c_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int mpl115_i2c_probe(struct i2c_client *client) { + const struct i2c_device_id *id = i2c_client_get_device_id(client); if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_WORD_DATA)) return -EOPNOTSUPP; @@ -53,8 +53,9 @@ MODULE_DEVICE_TABLE(i2c, mpl115_i2c_id); static struct i2c_driver mpl115_i2c_driver = { .driver = { .name = "mpl115", + .pm = pm_ptr(&mpl115_dev_pm_ops), }, - .probe = mpl115_i2c_probe, + .probe_new = mpl115_i2c_probe, .id_table = mpl115_i2c_id, }; module_i2c_driver(mpl115_i2c_driver); diff --git a/drivers/iio/pressure/mpl115_spi.c b/drivers/iio/pressure/mpl115_spi.c index 7feec87e2704ef590000be5c24061f8113d46311..58d218fd90dcb9666a530b9011ad7394983f7d19 100644 --- a/drivers/iio/pressure/mpl115_spi.c +++ b/drivers/iio/pressure/mpl115_spi.c @@ -92,6 +92,7 @@ MODULE_DEVICE_TABLE(spi, mpl115_spi_ids); static struct spi_driver mpl115_spi_driver = { .driver = { .name = "mpl115", + .pm = pm_ptr(&mpl115_dev_pm_ops), }, .probe = mpl115_spi_probe, .id_table = mpl115_spi_ids, diff --git a/drivers/iio/pressure/mpl3115.c b/drivers/iio/pressure/mpl3115.c index 2f22aba61e4dd39a9afcd1c768fdfb53f7ce02e2..72e811a5c96e607bf63e26eb5b755263d916cf26 100644 --- a/drivers/iio/pressure/mpl3115.c +++ b/drivers/iio/pressure/mpl3115.c @@ -230,9 +230,9 @@ static const struct iio_info mpl3115_info = { .read_raw = &mpl3115_read_raw, }; -static int mpl3115_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int mpl3115_probe(struct i2c_client *client) { + const struct i2c_device_id *id = i2c_client_get_device_id(client); struct mpl3115_data *data; struct iio_dev *indio_dev; int ret; @@ -335,7 +335,7 @@ static struct i2c_driver mpl3115_driver = { .of_match_table = mpl3115_of_match, .pm = pm_sleep_ptr(&mpl3115_pm_ops), }, - .probe = mpl3115_probe, + .probe_new = mpl3115_probe, .remove = mpl3115_remove, .id_table = mpl3115_id, }; diff --git a/drivers/iio/pressure/ms5611_i2c.c b/drivers/iio/pressure/ms5611_i2c.c index b681a4183909891cda293561ba51b148c9af9b8e..caf882497656d57a6fb0d87acdfa1884b0bd5baa 100644 --- a/drivers/iio/pressure/ms5611_i2c.c +++ b/drivers/iio/pressure/ms5611_i2c.c @@ -79,9 +79,9 @@ static int ms5611_i2c_read_adc_temp_and_pressure(struct ms5611_state *st, return ms5611_i2c_read_adc(st, pressure); } -static int ms5611_i2c_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int ms5611_i2c_probe(struct i2c_client *client) { + const struct i2c_device_id *id = i2c_client_get_device_id(client); struct ms5611_state *st; struct iio_dev *indio_dev; @@ -130,7 +130,7 @@ static struct i2c_driver ms5611_driver = { .of_match_table = ms5611_i2c_matches, }, .id_table = ms5611_id, - .probe = ms5611_i2c_probe, + .probe_new = ms5611_i2c_probe, .remove = ms5611_i2c_remove, }; module_i2c_driver(ms5611_driver); diff --git a/drivers/iio/pressure/ms5637.c b/drivers/iio/pressure/ms5637.c index 70c70019142ae1badc73f6cc37d3d92c19ff7164..c4981b29dccb1716c87af5a793962d81c709c7d6 100644 --- a/drivers/iio/pressure/ms5637.c +++ b/drivers/iio/pressure/ms5637.c @@ -142,9 +142,9 @@ static const struct iio_info ms5637_info = { .attrs = &ms5637_attribute_group, }; -static int ms5637_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int ms5637_probe(struct i2c_client *client) { + const struct i2c_device_id *id = i2c_client_get_device_id(client); const struct ms_tp_data *data; struct ms_tp_dev *dev_data; struct iio_dev *indio_dev; @@ -238,7 +238,7 @@ static const struct of_device_id ms5637_of_match[] = { MODULE_DEVICE_TABLE(of, ms5637_of_match); static struct i2c_driver ms5637_driver = { - .probe = ms5637_probe, + .probe_new = ms5637_probe, .id_table = ms5637_id, .driver = { .name = "ms5637", diff --git a/drivers/iio/pressure/st_pressure_i2c.c b/drivers/iio/pressure/st_pressure_i2c.c index 58fede8618911a9bd2df50859ed04476749a1951..f2c3bb568d16ba3f1bb71b37f8ec450090757c96 100644 --- a/drivers/iio/pressure/st_pressure_i2c.c +++ b/drivers/iio/pressure/st_pressure_i2c.c @@ -76,8 +76,7 @@ static const struct i2c_device_id st_press_id_table[] = { }; MODULE_DEVICE_TABLE(i2c, st_press_id_table); -static int st_press_i2c_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int st_press_i2c_probe(struct i2c_client *client) { const struct st_sensor_settings *settings; struct st_sensor_data *press_data; @@ -117,7 +116,7 @@ static struct i2c_driver st_press_driver = { .of_match_table = st_press_of_match, .acpi_match_table = ACPI_PTR(st_press_acpi_match), }, - .probe = st_press_i2c_probe, + .probe_new = st_press_i2c_probe, .id_table = st_press_id_table, }; module_i2c_driver(st_press_driver); diff --git a/drivers/iio/pressure/t5403.c b/drivers/iio/pressure/t5403.c index 685fcf65334fcdeea0f6c8d6001ab229617c47e9..2fbf14aff03392e29d6b58bf585f85084d21ef6d 100644 --- a/drivers/iio/pressure/t5403.c +++ b/drivers/iio/pressure/t5403.c @@ -208,9 +208,9 @@ static const struct iio_info t5403_info = { .attrs = &t5403_attribute_group, }; -static int t5403_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int t5403_probe(struct i2c_client *client) { + const struct i2c_device_id *id = i2c_client_get_device_id(client); struct t5403_data *data; struct iio_dev *indio_dev; int ret; @@ -260,7 +260,7 @@ static struct i2c_driver t5403_driver = { .driver = { .name = "t5403", }, - .probe = t5403_probe, + .probe_new = t5403_probe, .id_table = t5403_id, }; module_i2c_driver(t5403_driver); diff --git a/drivers/iio/pressure/zpa2326_i2c.c b/drivers/iio/pressure/zpa2326_i2c.c index f26dd8cbb387c0e4e2783909aec526fc5445eda8..ade465014be1ed61f7910689ce442771b2200d97 100644 --- a/drivers/iio/pressure/zpa2326_i2c.c +++ b/drivers/iio/pressure/zpa2326_i2c.c @@ -38,9 +38,9 @@ static unsigned int zpa2326_i2c_hwid(const struct i2c_client *client) (ZPA2326_SA0(client->addr) << ZPA2326_DEVICE_ID_SA0_SHIFT)); } -static int zpa2326_probe_i2c(struct i2c_client *client, - const struct i2c_device_id *i2c_id) +static int zpa2326_probe_i2c(struct i2c_client *client) { + const struct i2c_device_id *i2c_id = i2c_client_get_device_id(client); struct regmap *regmap; regmap = devm_regmap_init_i2c(client, &zpa2326_regmap_i2c_config); @@ -76,7 +76,7 @@ static struct i2c_driver zpa2326_i2c_driver = { .of_match_table = zpa2326_i2c_matches, .pm = ZPA2326_PM_OPS, }, - .probe = zpa2326_probe_i2c, + .probe_new = zpa2326_probe_i2c, .remove = zpa2326_remove_i2c, .id_table = zpa2326_i2c_ids, }; diff --git a/drivers/iio/proximity/isl29501.c b/drivers/iio/proximity/isl29501.c index 5b6ea783795d91dc0c6a40afd6c6e7058efd4454..7b8f40b7ccf33081654b6d4dcdf95f06677bd457 100644 --- a/drivers/iio/proximity/isl29501.c +++ b/drivers/iio/proximity/isl29501.c @@ -949,8 +949,7 @@ static irqreturn_t isl29501_trigger_handler(int irq, void *p) return IRQ_HANDLED; } -static int isl29501_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int isl29501_probe(struct i2c_client *client) { struct iio_dev *indio_dev; struct isl29501_private *isl29501; @@ -1009,7 +1008,7 @@ static struct i2c_driver isl29501_driver = { .name = "isl29501", }, .id_table = isl29501_id, - .probe = isl29501_probe, + .probe_new = isl29501_probe, }; module_i2c_driver(isl29501_driver); diff --git a/drivers/iio/proximity/mb1232.c b/drivers/iio/proximity/mb1232.c index 0bca5f74de68afb8a9b44311ccfd20145b2a8fee..e70cac8240afc7d72d1d8d172b22ad3a6e76c0aa 100644 --- a/drivers/iio/proximity/mb1232.c +++ b/drivers/iio/proximity/mb1232.c @@ -180,9 +180,9 @@ static const struct iio_info mb1232_info = { .read_raw = mb1232_read_raw, }; -static int mb1232_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int mb1232_probe(struct i2c_client *client) { + const struct i2c_device_id *id = i2c_client_get_device_id(client); struct iio_dev *indio_dev; struct mb1232_data *data; int ret; @@ -264,7 +264,7 @@ static struct i2c_driver mb1232_driver = { .name = "maxbotix-mb1232", .of_match_table = of_mb1232_match, }, - .probe = mb1232_probe, + .probe_new = mb1232_probe, .id_table = mb1232_id, }; module_i2c_driver(mb1232_driver); diff --git a/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c b/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c index 791a33d5286c666f636b07ad5c47de5c48567f52..c9eead01a0314c92affc6cef7f3a7d33f4e8d7e4 100644 --- a/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c +++ b/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c @@ -253,8 +253,7 @@ static const struct iio_info lidar_info = { .read_raw = lidar_read_raw, }; -static int lidar_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int lidar_probe(struct i2c_client *client) { struct lidar_data *data; struct iio_dev *indio_dev; @@ -366,7 +365,7 @@ static struct i2c_driver lidar_driver = { .of_match_table = lidar_dt_ids, .pm = pm_ptr(&lidar_pm_ops), }, - .probe = lidar_probe, + .probe_new = lidar_probe, .remove = lidar_remove, .id_table = lidar_id, }; diff --git a/drivers/iio/proximity/rfd77402.c b/drivers/iio/proximity/rfd77402.c index cb80b3c9d07351ba1e771264b5bcf78a4f75284c..44f72b78bd5063a797c10e3f12d7f570575495fd 100644 --- a/drivers/iio/proximity/rfd77402.c +++ b/drivers/iio/proximity/rfd77402.c @@ -257,8 +257,7 @@ static void rfd77402_disable(void *client) rfd77402_powerdown(client); } -static int rfd77402_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int rfd77402_probe(struct i2c_client *client) { struct rfd77402_data *data; struct iio_dev *indio_dev; @@ -319,7 +318,7 @@ static struct i2c_driver rfd77402_driver = { .name = RFD77402_DRV_NAME, .pm = pm_sleep_ptr(&rfd77402_pm_ops), }, - .probe = rfd77402_probe, + .probe_new = rfd77402_probe, .id_table = rfd77402_id, }; diff --git a/drivers/iio/proximity/srf08.c b/drivers/iio/proximity/srf08.c index 7ed11339c31e174823eafb723a65ce7fc92f7390..61866d0440f76f7827ee6d64aea51395d96d9da3 100644 --- a/drivers/iio/proximity/srf08.c +++ b/drivers/iio/proximity/srf08.c @@ -443,9 +443,9 @@ static const struct iio_info srf02_info = { .read_raw = srf08_read_raw, }; -static int srf08_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int srf08_probe(struct i2c_client *client) { + const struct i2c_device_id *id = i2c_client_get_device_id(client); struct iio_dev *indio_dev; struct srf08_data *data; int ret; @@ -549,7 +549,7 @@ static struct i2c_driver srf08_driver = { .name = "srf08", .of_match_table = of_srf08_match, }, - .probe = srf08_probe, + .probe_new = srf08_probe, .id_table = srf08_id, }; module_i2c_driver(srf08_driver); diff --git a/drivers/iio/proximity/sx9360.c b/drivers/iio/proximity/sx9360.c index 7fa2213d23bafe01056f807ffe48a06b6095f419..6e19d22e6a01503792584c1a9eebf548e36ed3d8 100644 --- a/drivers/iio/proximity/sx9360.c +++ b/drivers/iio/proximity/sx9360.c @@ -865,6 +865,7 @@ static DEFINE_SIMPLE_DEV_PM_OPS(sx9360_pm_ops, sx9360_suspend, sx9360_resume); static const struct acpi_device_id sx9360_acpi_match[] = { { "STH9360", SX9360_WHOAMI_VALUE }, + { "SAMM0208", SX9360_WHOAMI_VALUE }, { } }; MODULE_DEVICE_TABLE(acpi, sx9360_acpi_match); diff --git a/drivers/iio/proximity/sx9500.c b/drivers/iio/proximity/sx9500.c index d4670864ddc7eb003602bcf8f16ae3a26a108fdb..8794e75e5bf9c45d96a674e3484449b76ebb3983 100644 --- a/drivers/iio/proximity/sx9500.c +++ b/drivers/iio/proximity/sx9500.c @@ -901,8 +901,7 @@ static void sx9500_gpio_probe(struct i2c_client *client, } } -static int sx9500_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int sx9500_probe(struct i2c_client *client) { int ret; struct iio_dev *indio_dev; @@ -1056,7 +1055,7 @@ static struct i2c_driver sx9500_driver = { .of_match_table = of_match_ptr(sx9500_of_match), .pm = pm_sleep_ptr(&sx9500_pm_ops), }, - .probe = sx9500_probe, + .probe_new = sx9500_probe, .remove = sx9500_remove, .id_table = sx9500_id, }; diff --git a/drivers/iio/proximity/sx_common.c b/drivers/iio/proximity/sx_common.c index d70a6b4f0bf864e2174957abcae397eba91e53af..eba9256730ec1a4cc4ea44dfbc6c786b941d0c2d 100644 --- a/drivers/iio/proximity/sx_common.c +++ b/drivers/iio/proximity/sx_common.c @@ -424,13 +424,6 @@ static const struct iio_buffer_setup_ops sx_common_buffer_setup_ops = { .postdisable = sx_common_buffer_postdisable, }; -static void sx_common_regulator_disable(void *_data) -{ - struct sx_common_data *data = _data; - - regulator_bulk_disable(ARRAY_SIZE(data->supplies), data->supplies); -} - #define SX_COMMON_SOFT_RESET 0xde static int sx_common_init_device(struct device *dev, struct iio_dev *indio_dev) @@ -474,6 +467,7 @@ int sx_common_probe(struct i2c_client *client, const struct sx_common_chip_info *chip_info, const struct regmap_config *regmap_config) { + static const char * const regulator_names[] = { "vdd", "svdd" }; struct device *dev = &client->dev; struct iio_dev *indio_dev; struct sx_common_data *data; @@ -487,8 +481,6 @@ int sx_common_probe(struct i2c_client *client, data->chip_info = chip_info; data->client = client; - data->supplies[0].supply = "vdd"; - data->supplies[1].supply = "svdd"; mutex_init(&data->mutex); init_completion(&data->completion); @@ -497,23 +489,14 @@ int sx_common_probe(struct i2c_client *client, return dev_err_probe(dev, PTR_ERR(data->regmap), "Could init register map\n"); - ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(data->supplies), - data->supplies); + ret = devm_regulator_bulk_get_enable(dev, ARRAY_SIZE(regulator_names), + regulator_names); if (ret) return dev_err_probe(dev, ret, "Unable to get regulators\n"); - ret = regulator_bulk_enable(ARRAY_SIZE(data->supplies), data->supplies); - if (ret) - return dev_err_probe(dev, ret, "Unable to enable regulators\n"); - /* Must wait for Tpor time after initial power up */ usleep_range(1000, 1100); - ret = devm_add_action_or_reset(dev, sx_common_regulator_disable, data); - if (ret) - return dev_err_probe(dev, ret, - "Unable to register regulators deleter\n"); - ret = data->chip_info->ops.check_whoami(dev, indio_dev); if (ret) return dev_err_probe(dev, ret, "error reading WHOAMI\n"); diff --git a/drivers/iio/proximity/sx_common.h b/drivers/iio/proximity/sx_common.h index 5d3edeb75f4e026998196c14afe92f1c22f7d72e..49d4517103b0f49b064c58b5b4fe69e6a643c171 100644 --- a/drivers/iio/proximity/sx_common.h +++ b/drivers/iio/proximity/sx_common.h @@ -102,7 +102,6 @@ struct sx_common_chip_info { * @trig: IIO trigger object. * @regmap: Register map. * @num_default_regs: Number of default registers to set at init. - * @supplies: Power supplies object. * @chan_prox_stat: Last reading of the proximity status for each channel. * We only send an event to user space when this changes. * @trigger_enabled: True when the device trigger is enabled. @@ -120,7 +119,6 @@ struct sx_common_data { struct iio_trigger *trig; struct regmap *regmap; - struct regulator_bulk_data supplies[2]; unsigned long chan_prox_stat; bool trigger_enabled; diff --git a/drivers/iio/temperature/Kconfig b/drivers/iio/temperature/Kconfig index e8ed849e3b7662c401633165badb0797d68a23fc..ed384f33e0c76809fcf8b40c7c4c994566bf05d2 100644 --- a/drivers/iio/temperature/Kconfig +++ b/drivers/iio/temperature/Kconfig @@ -128,6 +128,16 @@ config TSYS02D This driver can also be built as a module. If so, the module will be called tsys02d. +config MAX30208 + tristate "Maxim MAX30208 digital temperature sensor" + depends on I2C + help + If you say yes here you get support for Maxim MAX30208 + digital temperature sensor connected via I2C. + + This driver can also be built as a module. If so, the module + will be called max30208. + config MAX31856 tristate "MAX31856 thermocouple sensor" depends on SPI diff --git a/drivers/iio/temperature/Makefile b/drivers/iio/temperature/Makefile index dd08e562ffe0e6e74fa35f92de6f5a52bf5dfcc4..dfec8c6d301934befae6125b6bed6184d4a70301 100644 --- a/drivers/iio/temperature/Makefile +++ b/drivers/iio/temperature/Makefile @@ -7,6 +7,7 @@ obj-$(CONFIG_IQS620AT_TEMP) += iqs620at-temp.o obj-$(CONFIG_LTC2983) += ltc2983.o obj-$(CONFIG_HID_SENSOR_TEMP) += hid-sensor-temperature.o obj-$(CONFIG_MAXIM_THERMOCOUPLE) += maxim_thermocouple.o +obj-$(CONFIG_MAX30208) += max30208.o obj-$(CONFIG_MAX31856) += max31856.o obj-$(CONFIG_MAX31865) += max31865.o obj-$(CONFIG_MLX90614) += mlx90614.o diff --git a/drivers/iio/temperature/ltc2983.c b/drivers/iio/temperature/ltc2983.c index a60ccf183687266fabc3ae062d2603c00de6f434..fcb96c44d9540793d20084579ab597155f3a3764 100644 --- a/drivers/iio/temperature/ltc2983.c +++ b/drivers/iio/temperature/ltc2983.c @@ -25,9 +25,12 @@ #define LTC2983_STATUS_REG 0x0000 #define LTC2983_TEMP_RES_START_REG 0x0010 #define LTC2983_TEMP_RES_END_REG 0x005F +#define LTC2983_EEPROM_KEY_REG 0x00B0 +#define LTC2983_EEPROM_READ_STATUS_REG 0x00D0 #define LTC2983_GLOBAL_CONFIG_REG 0x00F0 #define LTC2983_MULT_CHANNEL_START_REG 0x00F4 #define LTC2983_MULT_CHANNEL_END_REG 0x00F7 +#define LTC2986_EEPROM_STATUS_REG 0x00F9 #define LTC2983_MUX_CONFIG_REG 0x00FF #define LTC2983_CHAN_ASSIGN_START_REG 0x0200 #define LTC2983_CHAN_ASSIGN_END_REG 0x024F @@ -35,13 +38,21 @@ #define LTC2983_CUST_SENS_TBL_END_REG 0x03CF #define LTC2983_DIFFERENTIAL_CHAN_MIN 2 -#define LTC2983_MAX_CHANNELS_NR 20 #define LTC2983_MIN_CHANNELS_NR 1 #define LTC2983_SLEEP 0x97 #define LTC2983_CUSTOM_STEINHART_SIZE 24 #define LTC2983_CUSTOM_SENSOR_ENTRY_SZ 6 #define LTC2983_CUSTOM_STEINHART_ENTRY_SZ 4 +#define LTC2983_EEPROM_KEY 0xA53C0F5A +#define LTC2983_EEPROM_WRITE_CMD 0x15 +#define LTC2983_EEPROM_READ_CMD 0x16 +#define LTC2983_EEPROM_STATUS_FAILURE_MASK GENMASK(3, 1) +#define LTC2983_EEPROM_READ_FAILURE_MASK GENMASK(7, 0) + +#define LTC2983_EEPROM_WRITE_TIME_MS 2600 +#define LTC2983_EEPROM_READ_TIME_MS 20 + #define LTC2983_CHAN_START_ADDR(chan) \ (((chan - 1) * 4) + LTC2983_CHAN_ASSIGN_START_REG) #define LTC2983_CHAN_RES_ADDR(chan) \ @@ -171,6 +182,7 @@ enum { LTC2983_SENSOR_DIODE = 28, LTC2983_SENSOR_SENSE_RESISTOR = 29, LTC2983_SENSOR_DIRECT_ADC = 30, + LTC2983_SENSOR_ACTIVE_TEMP = 31, }; #define to_thermocouple(_sensor) \ @@ -191,7 +203,17 @@ enum { #define to_adc(_sensor) \ container_of(_sensor, struct ltc2983_adc, sensor) +#define to_temp(_sensor) \ + container_of(_sensor, struct ltc2983_temp, sensor) + +struct ltc2983_chip_info { + unsigned int max_channels_nr; + bool has_temp; + bool has_eeprom; +}; + struct ltc2983_data { + const struct ltc2983_chip_info *info; struct regmap *regmap; struct spi_device *spi; struct mutex lock; @@ -209,6 +231,8 @@ struct ltc2983_data { * Holds the converted temperature */ __be32 temp __aligned(IIO_DMA_MINALIGN); + __be32 chan_val; + __be32 eeprom_key; }; struct ltc2983_sensor { @@ -271,6 +295,12 @@ struct ltc2983_adc { bool single_ended; }; +struct ltc2983_temp { + struct ltc2983_sensor sensor; + struct ltc2983_custom_sensor *custom; + bool single_ended; +}; + /* * Convert to Q format numbers. These number's are integers where * the number of integer and fractional bits are specified. The resolution @@ -313,19 +343,18 @@ static int __ltc2983_fault_handler(const struct ltc2983_data *st, return 0; } -static int __ltc2983_chan_assign_common(const struct ltc2983_data *st, +static int __ltc2983_chan_assign_common(struct ltc2983_data *st, const struct ltc2983_sensor *sensor, u32 chan_val) { u32 reg = LTC2983_CHAN_START_ADDR(sensor->chan); - __be32 __chan_val; chan_val |= LTC2983_CHAN_TYPE(sensor->type); dev_dbg(&st->spi->dev, "Assign reg:0x%04X, val:0x%08X\n", reg, chan_val); - __chan_val = cpu_to_be32(chan_val); - return regmap_bulk_write(st->regmap, reg, &__chan_val, - sizeof(__chan_val)); + st->chan_val = cpu_to_be32(chan_val); + return regmap_bulk_write(st->regmap, reg, &st->chan_val, + sizeof(st->chan_val)); } static int __ltc2983_chan_custom_sensor_assign(struct ltc2983_data *st, @@ -606,6 +635,22 @@ static int ltc2983_adc_assign_chan(struct ltc2983_data *st, return __ltc2983_chan_assign_common(st, sensor, chan_val); } +static int ltc2983_temp_assign_chan(struct ltc2983_data *st, + const struct ltc2983_sensor *sensor) +{ + struct ltc2983_temp *temp = to_temp(sensor); + u32 chan_val; + int ret; + + chan_val = LTC2983_ADC_SINGLE_ENDED(temp->single_ended); + + ret = __ltc2983_chan_custom_sensor_assign(st, temp->custom, &chan_val); + if (ret) + return ret; + + return __ltc2983_chan_assign_common(st, sensor, chan_val); +} + static struct ltc2983_sensor * ltc2983_thermocouple_new(const struct fwnode_handle *child, struct ltc2983_data *st, const struct ltc2983_sensor *sensor) @@ -771,10 +816,10 @@ ltc2983_rtd_new(const struct fwnode_handle *child, struct ltc2983_data *st, if (rtd->sensor_config & LTC2983_RTD_4_WIRE_MASK) { /* 4-wire */ u8 min = LTC2983_DIFFERENTIAL_CHAN_MIN, - max = LTC2983_MAX_CHANNELS_NR; + max = st->info->max_channels_nr; if (rtd->sensor_config & LTC2983_RTD_ROTATION_MASK) - max = LTC2983_MAX_CHANNELS_NR - 1; + max = st->info->max_channels_nr - 1; if (((rtd->sensor_config & LTC2983_RTD_KELVIN_R_SENSE_MASK) == LTC2983_RTD_KELVIN_R_SENSE_MASK) && @@ -1143,6 +1188,38 @@ static struct ltc2983_sensor *ltc2983_adc_new(struct fwnode_handle *child, return &adc->sensor; } +static struct ltc2983_sensor *ltc2983_temp_new(struct fwnode_handle *child, + struct ltc2983_data *st, + const struct ltc2983_sensor *sensor) +{ + struct ltc2983_temp *temp; + + temp = devm_kzalloc(&st->spi->dev, sizeof(*temp), GFP_KERNEL); + if (!temp) + return ERR_PTR(-ENOMEM); + + if (fwnode_property_read_bool(child, "adi,single-ended")) + temp->single_ended = true; + + if (!temp->single_ended && + sensor->chan < LTC2983_DIFFERENTIAL_CHAN_MIN) { + dev_err(&st->spi->dev, "Invalid chan:%d for differential temp\n", + sensor->chan); + return ERR_PTR(-EINVAL); + } + + temp->custom = __ltc2983_custom_sensor_new(st, child, "adi,custom-temp", + false, 4096, true); + if (IS_ERR(temp->custom)) + return ERR_CAST(temp->custom); + + /* set common parameters */ + temp->sensor.assign_chan = ltc2983_temp_assign_chan; + temp->sensor.fault_handler = ltc2983_common_fault_handler; + + return &temp->sensor; +} + static int ltc2983_chan_read(struct ltc2983_data *st, const struct ltc2983_sensor *sensor, int *val) { @@ -1302,10 +1379,10 @@ static int ltc2983_parse_dt(struct ltc2983_data *st) /* check if we have a valid channel */ if (sensor.chan < LTC2983_MIN_CHANNELS_NR || - sensor.chan > LTC2983_MAX_CHANNELS_NR) { + sensor.chan > st->info->max_channels_nr) { ret = -EINVAL; dev_err(dev, "chan:%d must be from %u to %u\n", sensor.chan, - LTC2983_MIN_CHANNELS_NR, LTC2983_MAX_CHANNELS_NR); + LTC2983_MIN_CHANNELS_NR, st->info->max_channels_nr); goto put_child; } else if (channel_avail_mask & BIT(sensor.chan)) { ret = -EINVAL; @@ -1345,6 +1422,9 @@ static int ltc2983_parse_dt(struct ltc2983_data *st) st->iio_channels--; } else if (sensor.type == LTC2983_SENSOR_DIRECT_ADC) { st->sensors[chan] = ltc2983_adc_new(child, st, &sensor); + } else if (st->info->has_temp && + sensor.type == LTC2983_SENSOR_ACTIVE_TEMP) { + st->sensors[chan] = ltc2983_temp_new(child, st, &sensor); } else { dev_err(dev, "Unknown sensor type %d\n", sensor.type); ret = -EINVAL; @@ -1371,6 +1451,45 @@ put_child: return ret; } +static int ltc2983_eeprom_cmd(struct ltc2983_data *st, unsigned int cmd, + unsigned int wait_time, unsigned int status_reg, + unsigned long status_fail_mask) +{ + unsigned long time; + unsigned int val; + int ret; + + ret = regmap_bulk_write(st->regmap, LTC2983_EEPROM_KEY_REG, + &st->eeprom_key, sizeof(st->eeprom_key)); + if (ret) + return ret; + + reinit_completion(&st->completion); + + ret = regmap_write(st->regmap, LTC2983_STATUS_REG, + LTC2983_STATUS_START(true) | cmd); + if (ret) + return ret; + + time = wait_for_completion_timeout(&st->completion, + msecs_to_jiffies(wait_time)); + if (!time) { + dev_err(&st->spi->dev, "EEPROM command timed out\n"); + return -ETIMEDOUT; + } + + ret = regmap_read(st->regmap, status_reg, &val); + if (ret) + return ret; + + if (val & status_fail_mask) { + dev_err(&st->spi->dev, "EEPROM command failed: 0x%02X\n", val); + return -EINVAL; + } + + return 0; +} + static int ltc2983_setup(struct ltc2983_data *st, bool assign_iio) { u32 iio_chan_t = 0, iio_chan_v = 0, chan, iio_idx = 0, status; @@ -1396,6 +1515,15 @@ static int ltc2983_setup(struct ltc2983_data *st, bool assign_iio) if (ret) return ret; + if (st->info->has_eeprom && !assign_iio) { + ret = ltc2983_eeprom_cmd(st, LTC2983_EEPROM_READ_CMD, + LTC2983_EEPROM_READ_TIME_MS, + LTC2983_EEPROM_READ_STATUS_REG, + LTC2983_EEPROM_READ_FAILURE_MASK); + if (!ret) + return 0; + } + for (chan = 0; chan < st->num_channels; chan++) { u32 chan_type = 0, *iio_chan; @@ -1435,9 +1563,13 @@ static int ltc2983_setup(struct ltc2983_data *st, bool assign_iio) static const struct regmap_range ltc2983_reg_ranges[] = { regmap_reg_range(LTC2983_STATUS_REG, LTC2983_STATUS_REG), regmap_reg_range(LTC2983_TEMP_RES_START_REG, LTC2983_TEMP_RES_END_REG), + regmap_reg_range(LTC2983_EEPROM_KEY_REG, LTC2983_EEPROM_KEY_REG), + regmap_reg_range(LTC2983_EEPROM_READ_STATUS_REG, + LTC2983_EEPROM_READ_STATUS_REG), regmap_reg_range(LTC2983_GLOBAL_CONFIG_REG, LTC2983_GLOBAL_CONFIG_REG), regmap_reg_range(LTC2983_MULT_CHANNEL_START_REG, LTC2983_MULT_CHANNEL_END_REG), + regmap_reg_range(LTC2986_EEPROM_STATUS_REG, LTC2986_EEPROM_STATUS_REG), regmap_reg_range(LTC2983_MUX_CONFIG_REG, LTC2983_MUX_CONFIG_REG), regmap_reg_range(LTC2983_CHAN_ASSIGN_START_REG, LTC2983_CHAN_ASSIGN_END_REG), @@ -1482,6 +1614,12 @@ static int ltc2983_probe(struct spi_device *spi) st = iio_priv(indio_dev); + st->info = device_get_match_data(&spi->dev); + if (!st->info) + st->info = (void *)spi_get_device_id(spi)->driver_data; + if (!st->info) + return -ENODEV; + st->regmap = devm_regmap_init_spi(spi, <c2983_regmap_config); if (IS_ERR(st->regmap)) { dev_err(&spi->dev, "Failed to initialize regmap\n"); @@ -1491,6 +1629,7 @@ static int ltc2983_probe(struct spi_device *spi) mutex_init(&st->lock); init_completion(&st->completion); st->spi = spi; + st->eeprom_key = cpu_to_be32(LTC2983_EEPROM_KEY); spi_set_drvdata(spi, st); ret = ltc2983_parse_dt(st); @@ -1524,6 +1663,15 @@ static int ltc2983_probe(struct spi_device *spi) return ret; } + if (st->info->has_eeprom) { + ret = ltc2983_eeprom_cmd(st, LTC2983_EEPROM_WRITE_CMD, + LTC2983_EEPROM_WRITE_TIME_MS, + LTC2986_EEPROM_STATUS_REG, + LTC2983_EEPROM_STATUS_FAILURE_MASK); + if (ret) + return ret; + } + indio_dev->name = name; indio_dev->num_channels = st->iio_channels; indio_dev->channels = st->iio_chan; @@ -1554,14 +1702,35 @@ static int ltc2983_suspend(struct device *dev) static DEFINE_SIMPLE_DEV_PM_OPS(ltc2983_pm_ops, ltc2983_suspend, ltc2983_resume); +static const struct ltc2983_chip_info ltc2983_chip_info_data = { + .max_channels_nr = 20, +}; + +static const struct ltc2983_chip_info ltc2984_chip_info_data = { + .max_channels_nr = 20, + .has_eeprom = true, +}; + +static const struct ltc2983_chip_info ltc2986_chip_info_data = { + .max_channels_nr = 10, + .has_temp = true, + .has_eeprom = true, +}; + static const struct spi_device_id ltc2983_id_table[] = { - { "ltc2983" }, + { "ltc2983", (kernel_ulong_t)<c2983_chip_info_data }, + { "ltc2984", (kernel_ulong_t)<c2984_chip_info_data }, + { "ltc2986", (kernel_ulong_t)<c2986_chip_info_data }, + { "ltm2985", (kernel_ulong_t)<c2986_chip_info_data }, {}, }; MODULE_DEVICE_TABLE(spi, ltc2983_id_table); static const struct of_device_id ltc2983_of_match[] = { - { .compatible = "adi,ltc2983" }, + { .compatible = "adi,ltc2983", .data = <c2983_chip_info_data }, + { .compatible = "adi,ltc2984", .data = <c2984_chip_info_data }, + { .compatible = "adi,ltc2986", .data = <c2986_chip_info_data }, + { .compatible = "adi,ltm2985", .data = <c2986_chip_info_data }, {}, }; MODULE_DEVICE_TABLE(of, ltc2983_of_match); diff --git a/drivers/iio/temperature/max30208.c b/drivers/iio/temperature/max30208.c new file mode 100644 index 0000000000000000000000000000000000000000..c85c21474711f47d8a345271b135d6c7645cfb5b --- /dev/null +++ b/drivers/iio/temperature/max30208.c @@ -0,0 +1,252 @@ +// SPDX-License-Identifier: GPL-2.0-only + +/* + * Copyright (c) Rajat Khandelwal + * + * Maxim MAX30208 digital temperature sensor with 0.1°C accuracy + * (7-bit I2C slave address (0x50 - 0x53)) + */ + +#include +#include +#include +#include +#include +#include + +#define MAX30208_STATUS 0x00 +#define MAX30208_STATUS_TEMP_RDY BIT(0) +#define MAX30208_INT_ENABLE 0x01 +#define MAX30208_INT_ENABLE_TEMP_RDY BIT(0) + +#define MAX30208_FIFO_OVF_CNTR 0x06 +#define MAX30208_FIFO_DATA_CNTR 0x07 +#define MAX30208_FIFO_DATA 0x08 + +#define MAX30208_FIFO_CONFIG 0x0a +#define MAX30208_FIFO_CONFIG_RO BIT(1) + +#define MAX30208_SYSTEM_CTRL 0x0c +#define MAX30208_SYSTEM_CTRL_RESET 0x01 + +#define MAX30208_TEMP_SENSOR_SETUP 0x14 +#define MAX30208_TEMP_SENSOR_SETUP_CONV BIT(0) + +struct max30208_data { + struct i2c_client *client; + struct iio_dev *indio_dev; + struct mutex lock; /* Lock to prevent concurrent reads of temperature readings */ +}; + +static const struct iio_chan_spec max30208_channels[] = { + { + .type = IIO_TEMP, + .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE), + }, +}; + +/** + * max30208_request() - Request a reading + * @data: Struct comprising member elements of the device + * + * Requests a reading from the device and waits until the conversion is ready. + */ +static int max30208_request(struct max30208_data *data) +{ + /* + * Sensor can take up to 500 ms to respond so execute a total of + * 10 retries to give the device sufficient time. + */ + int retries = 10; + u8 regval; + int ret; + + ret = i2c_smbus_read_byte_data(data->client, MAX30208_TEMP_SENSOR_SETUP); + if (ret < 0) + return ret; + + regval = ret | MAX30208_TEMP_SENSOR_SETUP_CONV; + + ret = i2c_smbus_write_byte_data(data->client, MAX30208_TEMP_SENSOR_SETUP, regval); + if (ret) + return ret; + + while (retries--) { + ret = i2c_smbus_read_byte_data(data->client, MAX30208_STATUS); + if (ret < 0) + return ret; + + if (ret & MAX30208_STATUS_TEMP_RDY) + return 0; + + msleep(50); + } + dev_err(&data->client->dev, "Temperature conversion failed\n"); + + return -ETIMEDOUT; +} + +static int max30208_update_temp(struct max30208_data *data) +{ + u8 data_count; + int ret; + + mutex_lock(&data->lock); + + ret = max30208_request(data); + if (ret) + goto unlock; + + ret = i2c_smbus_read_byte_data(data->client, MAX30208_FIFO_OVF_CNTR); + if (ret < 0) + goto unlock; + else if (!ret) { + ret = i2c_smbus_read_byte_data(data->client, MAX30208_FIFO_DATA_CNTR); + if (ret < 0) + goto unlock; + + data_count = ret; + } else + data_count = 1; + + while (data_count) { + ret = i2c_smbus_read_word_swapped(data->client, MAX30208_FIFO_DATA); + if (ret < 0) + goto unlock; + + data_count--; + } + +unlock: + mutex_unlock(&data->lock); + return ret; +} + +/** + * max30208_config_setup() - Set up FIFO configuration register + * @data: Struct comprising member elements of the device + * + * Sets the rollover bit to '1' to enable overwriting FIFO during overflow. + */ +static int max30208_config_setup(struct max30208_data *data) +{ + u8 regval; + int ret; + + ret = i2c_smbus_read_byte_data(data->client, MAX30208_FIFO_CONFIG); + if (ret < 0) + return ret; + + regval = ret | MAX30208_FIFO_CONFIG_RO; + + ret = i2c_smbus_write_byte_data(data->client, MAX30208_FIFO_CONFIG, regval); + if (ret) + return ret; + + return 0; +} + +static int max30208_read(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, + int *val, int *val2, long mask) +{ + struct max30208_data *data = iio_priv(indio_dev); + int ret; + + switch (mask) { + case IIO_CHAN_INFO_RAW: + ret = max30208_update_temp(data); + if (ret < 0) + return ret; + + *val = sign_extend32(ret, 15); + return IIO_VAL_INT; + + case IIO_CHAN_INFO_SCALE: + *val = 5; + return IIO_VAL_INT; + + default: + return -EINVAL; + } +} + +static const struct iio_info max30208_info = { + .read_raw = max30208_read, +}; + +static int max30208_probe(struct i2c_client *i2c) +{ + struct device *dev = &i2c->dev; + struct max30208_data *data; + struct iio_dev *indio_dev; + int ret; + + indio_dev = devm_iio_device_alloc(dev, sizeof(*data)); + if (!indio_dev) + return -ENOMEM; + + data = iio_priv(indio_dev); + data->client = i2c; + mutex_init(&data->lock); + + indio_dev->name = "max30208"; + indio_dev->channels = max30208_channels; + indio_dev->num_channels = ARRAY_SIZE(max30208_channels); + indio_dev->info = &max30208_info; + indio_dev->modes = INDIO_DIRECT_MODE; + + ret = i2c_smbus_write_byte_data(data->client, MAX30208_SYSTEM_CTRL, + MAX30208_SYSTEM_CTRL_RESET); + if (ret) { + dev_err(dev, "Failure in performing reset\n"); + return ret; + } + + msleep(50); + + ret = max30208_config_setup(data); + if (ret) + return ret; + + ret = devm_iio_device_register(dev, indio_dev); + if (ret) { + dev_err(dev, "Failed to register IIO device\n"); + return ret; + } + + return 0; +} + +static const struct i2c_device_id max30208_id_table[] = { + { "max30208" }, + { } +}; +MODULE_DEVICE_TABLE(i2c, max30208_id_table); + +static const struct acpi_device_id max30208_acpi_match[] = { + { "MAX30208" }, + { } +}; +MODULE_DEVICE_TABLE(acpi, max30208_acpi_match); + +static const struct of_device_id max30208_of_match[] = { + { .compatible = "maxim,max30208" }, + { } +}; +MODULE_DEVICE_TABLE(of, max30208_of_match); + +static struct i2c_driver max30208_driver = { + .driver = { + .name = "max30208", + .of_match_table = max30208_of_match, + .acpi_match_table = max30208_acpi_match, + }, + .probe_new = max30208_probe, + .id_table = max30208_id_table, +}; +module_i2c_driver(max30208_driver); + +MODULE_AUTHOR("Rajat Khandelwal "); +MODULE_DESCRIPTION("Maxim MAX30208 digital temperature sensor"); +MODULE_LICENSE("GPL"); diff --git a/drivers/iio/temperature/mlx90614.c b/drivers/iio/temperature/mlx90614.c index 8eb0f962ed25cdd638aa079e5549e3c7f4c13ea7..909fadb623491ee7fe0ef940772aeba48371cf6e 100644 --- a/drivers/iio/temperature/mlx90614.c +++ b/drivers/iio/temperature/mlx90614.c @@ -537,9 +537,9 @@ static int mlx90614_probe_num_ir_sensors(struct i2c_client *client) return (ret & MLX90614_CONFIG_DUAL_MASK) ? 1 : 0; } -static int mlx90614_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int mlx90614_probe(struct i2c_client *client) { + const struct i2c_device_id *id = i2c_client_get_device_id(client); struct iio_dev *indio_dev; struct mlx90614_data *data; int ret; @@ -675,7 +675,7 @@ static struct i2c_driver mlx90614_driver = { .of_match_table = mlx90614_of_match, .pm = pm_ptr(&mlx90614_pm_ops), }, - .probe = mlx90614_probe, + .probe_new = mlx90614_probe, .remove = mlx90614_remove, .id_table = mlx90614_id, }; diff --git a/drivers/iio/temperature/mlx90632.c b/drivers/iio/temperature/mlx90632.c index f6dec0e5f0973c4c9414e6d97010b152c5bba54e..753b7a4ccfddc2886f01f283fb692c3c16b582b7 100644 --- a/drivers/iio/temperature/mlx90632.c +++ b/drivers/iio/temperature/mlx90632.c @@ -6,11 +6,14 @@ * * Driver for the Melexis MLX90632 I2C 16-bit IR thermopile sensor */ +#include #include +#include #include #include #include #include +#include #include #include #include @@ -55,6 +58,12 @@ #define MLX90632_EE_Ha 0x2481 /* Ha customer calib value reg 16bit */ #define MLX90632_EE_Hb 0x2482 /* Hb customer calib value reg 16bit */ +#define MLX90632_EE_MEDICAL_MEAS1 0x24E1 /* Medical measurement 1 16bit */ +#define MLX90632_EE_MEDICAL_MEAS2 0x24E2 /* Medical measurement 2 16bit */ +#define MLX90632_EE_EXTENDED_MEAS1 0x24F1 /* Extended measurement 1 16bit */ +#define MLX90632_EE_EXTENDED_MEAS2 0x24F2 /* Extended measurement 2 16bit */ +#define MLX90632_EE_EXTENDED_MEAS3 0x24F3 /* Extended measurement 3 16bit */ + /* Register addresses - volatile */ #define MLX90632_REG_I2C_ADDR 0x3000 /* Chip I2C address register */ @@ -62,13 +71,19 @@ #define MLX90632_REG_CONTROL 0x3001 /* Control Register address */ #define MLX90632_CFG_PWR_MASK GENMASK(2, 1) /* PowerMode Mask */ #define MLX90632_CFG_MTYP_MASK GENMASK(8, 4) /* Meas select Mask */ +#define MLX90632_CFG_SOB_MASK BIT(11) /* PowerModes statuses */ #define MLX90632_PWR_STATUS(ctrl_val) (ctrl_val << 1) #define MLX90632_PWR_STATUS_HALT MLX90632_PWR_STATUS(0) /* hold */ -#define MLX90632_PWR_STATUS_SLEEP_STEP MLX90632_PWR_STATUS(1) /* sleep step*/ +#define MLX90632_PWR_STATUS_SLEEP_STEP MLX90632_PWR_STATUS(1) /* sleep step */ #define MLX90632_PWR_STATUS_STEP MLX90632_PWR_STATUS(2) /* step */ -#define MLX90632_PWR_STATUS_CONTINUOUS MLX90632_PWR_STATUS(3) /* continuous*/ +#define MLX90632_PWR_STATUS_CONTINUOUS MLX90632_PWR_STATUS(3) /* continuous */ + +#define MLX90632_EE_RR GENMASK(10, 8) /* Only Refresh Rate bits */ +#define MLX90632_REFRESH_RATE(ee_val) FIELD_GET(MLX90632_EE_RR, ee_val) + /* Extract Refresh Rate from ee register */ +#define MLX90632_REFRESH_RATE_STATUS(refresh_rate) (refresh_rate << 8) /* Measurement types */ #define MLX90632_MTYP_MEDICAL 0 @@ -116,8 +131,9 @@ #define MLX90632_REF_12 12LL /* ResCtrlRef value of Ch 1 or Ch 2 */ #define MLX90632_REF_3 12LL /* ResCtrlRef value of Channel 3 */ #define MLX90632_MAX_MEAS_NUM 31 /* Maximum measurements in list */ -#define MLX90632_SLEEP_DELAY_MS 3000 /* Autosleep delay */ +#define MLX90632_SLEEP_DELAY_MS 6000 /* Autosleep delay */ #define MLX90632_EXTENDED_LIMIT 27000 /* Extended mode raw value limit */ +#define MLX90632_MEAS_MAX_TIME 2000 /* Max measurement time in ms for the lowest refresh rate */ /** * struct mlx90632_data - private data for the MLX90632 device @@ -130,6 +146,9 @@ * @object_ambient_temperature: Ambient temperature at object (might differ of * the ambient temperature of sensor. * @regulator: Regulator of the device + * @powerstatus: Current POWER status of the device + * @interaction_ts: Timestamp of the last temperature read that is used + * for power management in jiffies */ struct mlx90632_data { struct i2c_client *client; @@ -139,6 +158,8 @@ struct mlx90632_data { u8 mtyp; u32 object_ambient_temperature; struct regulator *regulator; + int powerstatus; + unsigned long interaction_ts; }; static const struct regmap_range mlx90632_volatile_reg_range[] = { @@ -158,6 +179,8 @@ static const struct regmap_range mlx90632_read_reg_range[] = { regmap_reg_range(MLX90632_EE_VERSION, MLX90632_EE_Ka), regmap_reg_range(MLX90632_EE_CTRL, MLX90632_EE_I2C_ADDR), regmap_reg_range(MLX90632_EE_Ha, MLX90632_EE_Hb), + regmap_reg_range(MLX90632_EE_MEDICAL_MEAS1, MLX90632_EE_MEDICAL_MEAS2), + regmap_reg_range(MLX90632_EE_EXTENDED_MEAS1, MLX90632_EE_EXTENDED_MEAS3), regmap_reg_range(MLX90632_REG_I2C_ADDR, MLX90632_REG_CONTROL), regmap_reg_range(MLX90632_REG_I2C_CMD, MLX90632_REG_I2C_CMD), regmap_reg_range(MLX90632_REG_STATUS, MLX90632_REG_STATUS), @@ -196,18 +219,40 @@ static const struct regmap_config mlx90632_regmap = { .cache_type = REGCACHE_RBTREE, }; -static s32 mlx90632_pwr_set_sleep_step(struct regmap *regmap) +static int mlx90632_pwr_set_sleep_step(struct regmap *regmap) { - return regmap_update_bits(regmap, MLX90632_REG_CONTROL, - MLX90632_CFG_PWR_MASK, - MLX90632_PWR_STATUS_SLEEP_STEP); + struct mlx90632_data *data = + iio_priv(dev_get_drvdata(regmap_get_device(regmap))); + int ret; + + if (data->powerstatus == MLX90632_PWR_STATUS_SLEEP_STEP) + return 0; + + ret = regmap_write_bits(regmap, MLX90632_REG_CONTROL, MLX90632_CFG_PWR_MASK, + MLX90632_PWR_STATUS_SLEEP_STEP); + if (ret < 0) + return ret; + + data->powerstatus = MLX90632_PWR_STATUS_SLEEP_STEP; + return 0; } -static s32 mlx90632_pwr_continuous(struct regmap *regmap) +static int mlx90632_pwr_continuous(struct regmap *regmap) { - return regmap_update_bits(regmap, MLX90632_REG_CONTROL, - MLX90632_CFG_PWR_MASK, - MLX90632_PWR_STATUS_CONTINUOUS); + struct mlx90632_data *data = + iio_priv(dev_get_drvdata(regmap_get_device(regmap))); + int ret; + + if (data->powerstatus == MLX90632_PWR_STATUS_CONTINUOUS) + return 0; + + ret = regmap_write_bits(regmap, MLX90632_REG_CONTROL, MLX90632_CFG_PWR_MASK, + MLX90632_PWR_STATUS_CONTINUOUS); + if (ret < 0) + return ret; + + data->powerstatus = MLX90632_PWR_STATUS_CONTINUOUS; + return 0; } /** @@ -219,6 +264,63 @@ static void mlx90632_reset_delay(void) usleep_range(150, 200); } +static int mlx90632_get_measurement_time(struct regmap *regmap, u16 meas) +{ + unsigned int reg; + int ret; + + ret = regmap_read(regmap, meas, ®); + if (ret < 0) + return ret; + + return MLX90632_MEAS_MAX_TIME >> FIELD_GET(MLX90632_EE_RR, reg); +} + +static int mlx90632_calculate_dataset_ready_time(struct mlx90632_data *data) +{ + unsigned int refresh_time; + int ret; + + if (data->mtyp == MLX90632_MTYP_MEDICAL) { + ret = mlx90632_get_measurement_time(data->regmap, + MLX90632_EE_MEDICAL_MEAS1); + if (ret < 0) + return ret; + + refresh_time = ret; + + ret = mlx90632_get_measurement_time(data->regmap, + MLX90632_EE_MEDICAL_MEAS2); + if (ret < 0) + return ret; + + refresh_time += ret; + } else { + ret = mlx90632_get_measurement_time(data->regmap, + MLX90632_EE_EXTENDED_MEAS1); + if (ret < 0) + return ret; + + refresh_time = ret; + + ret = mlx90632_get_measurement_time(data->regmap, + MLX90632_EE_EXTENDED_MEAS2); + if (ret < 0) + return ret; + + refresh_time += ret; + + ret = mlx90632_get_measurement_time(data->regmap, + MLX90632_EE_EXTENDED_MEAS3); + if (ret < 0) + return ret; + + refresh_time += ret; + } + + return refresh_time; +} + /** * mlx90632_perform_measurement() - Trigger and retrieve current measurement cycle * @data: pointer to mlx90632_data object containing regmap information @@ -249,26 +351,75 @@ static int mlx90632_perform_measurement(struct mlx90632_data *data) return (reg_status & MLX90632_STAT_CYCLE_POS) >> 2; } -static int mlx90632_set_meas_type(struct regmap *regmap, u8 type) +/** + * mlx90632_perform_measurement_burst() - Trigger and retrieve current measurement + * cycle in step sleep mode + * @data: pointer to mlx90632_data object containing regmap information + * + * Perform a measurement and return 2 as measurement cycle position reported + * by sensor. This is a blocking function for amount dependent on the sensor + * refresh rate. + */ +static int mlx90632_perform_measurement_burst(struct mlx90632_data *data) +{ + unsigned int reg_status; + int ret; + + ret = regmap_write_bits(data->regmap, MLX90632_REG_CONTROL, + MLX90632_CFG_SOB_MASK, MLX90632_CFG_SOB_MASK); + if (ret < 0) + return ret; + + ret = mlx90632_calculate_dataset_ready_time(data); + if (ret < 0) + return ret; + + msleep(ret); /* Wait minimum time for dataset to be ready */ + + ret = regmap_read_poll_timeout(data->regmap, MLX90632_REG_STATUS, + reg_status, + (reg_status & MLX90632_STAT_BUSY) == 0, + 10000, 100 * 10000); + if (ret < 0) { + dev_err(&data->client->dev, "data not ready"); + return -ETIMEDOUT; + } + + return 2; +} + +static int mlx90632_set_meas_type(struct mlx90632_data *data, u8 type) { + int current_powerstatus; int ret; - if ((type != MLX90632_MTYP_MEDICAL) && (type != MLX90632_MTYP_EXTENDED)) - return -EINVAL; + if (data->mtyp == type) + return 0; + + current_powerstatus = data->powerstatus; + ret = mlx90632_pwr_continuous(data->regmap); + if (ret < 0) + return ret; - ret = regmap_write(regmap, MLX90632_REG_I2C_CMD, MLX90632_RESET_CMD); + ret = regmap_write(data->regmap, MLX90632_REG_I2C_CMD, MLX90632_RESET_CMD); if (ret < 0) return ret; mlx90632_reset_delay(); - ret = regmap_write_bits(regmap, MLX90632_REG_CONTROL, + ret = regmap_update_bits(data->regmap, MLX90632_REG_CONTROL, (MLX90632_CFG_MTYP_MASK | MLX90632_CFG_PWR_MASK), (MLX90632_MTYP_STATUS(type) | MLX90632_PWR_STATUS_HALT)); if (ret < 0) return ret; - return mlx90632_pwr_continuous(regmap); + data->mtyp = type; + data->powerstatus = MLX90632_PWR_STATUS_HALT; + + if (current_powerstatus == MLX90632_PWR_STATUS_SLEEP_STEP) + return mlx90632_pwr_set_sleep_step(data->regmap); + + return mlx90632_pwr_continuous(data->regmap); } static int mlx90632_channel_new_select(int perform_ret, uint8_t *channel_new, @@ -284,7 +435,7 @@ static int mlx90632_channel_new_select(int perform_ret, uint8_t *channel_new, *channel_old = 1; break; default: - return -EINVAL; + return -ECHRNG; } return 0; @@ -293,8 +444,8 @@ static int mlx90632_channel_new_select(int perform_ret, uint8_t *channel_new, static int mlx90632_read_ambient_raw(struct regmap *regmap, s16 *ambient_new_raw, s16 *ambient_old_raw) { - int ret; unsigned int read_tmp; + int ret; ret = regmap_read(regmap, MLX90632_RAM_3(1), &read_tmp); if (ret < 0) @@ -313,11 +464,11 @@ static int mlx90632_read_object_raw(struct regmap *regmap, int perform_measurement_ret, s16 *object_new_raw, s16 *object_old_raw) { - int ret; unsigned int read_tmp; - s16 read; - u8 channel = 0; u8 channel_old = 0; + u8 channel = 0; + s16 read; + int ret; ret = mlx90632_channel_new_select(perform_measurement_ret, &channel, &channel_old); @@ -352,14 +503,34 @@ static int mlx90632_read_all_channel(struct mlx90632_data *data, s16 *ambient_new_raw, s16 *ambient_old_raw, s16 *object_new_raw, s16 *object_old_raw) { - s32 ret, measurement; + s32 measurement; + int ret; mutex_lock(&data->lock); - measurement = mlx90632_perform_measurement(data); - if (measurement < 0) { - ret = measurement; + ret = mlx90632_set_meas_type(data, MLX90632_MTYP_MEDICAL); + if (ret < 0) + goto read_unlock; + + switch (data->powerstatus) { + case MLX90632_PWR_STATUS_CONTINUOUS: + ret = mlx90632_perform_measurement(data); + if (ret < 0) + goto read_unlock; + + break; + case MLX90632_PWR_STATUS_SLEEP_STEP: + ret = mlx90632_perform_measurement_burst(data); + if (ret < 0) + goto read_unlock; + + break; + default: + ret = -EOPNOTSUPP; goto read_unlock; } + + measurement = ret; /* If we came here ret holds the measurement position */ + ret = mlx90632_read_ambient_raw(data->regmap, ambient_new_raw, ambient_old_raw); if (ret < 0) @@ -441,14 +612,26 @@ static int mlx90632_read_all_channel_extended(struct mlx90632_data *data, s16 *o s32 ret, meas; mutex_lock(&data->lock); - ret = mlx90632_set_meas_type(data->regmap, MLX90632_MTYP_EXTENDED); + ret = mlx90632_set_meas_type(data, MLX90632_MTYP_EXTENDED); if (ret < 0) goto read_unlock; - ret = read_poll_timeout(mlx90632_perform_measurement, meas, meas == 19, - 50000, 800000, false, data); - if (ret != 0) + switch (data->powerstatus) { + case MLX90632_PWR_STATUS_CONTINUOUS: + ret = read_poll_timeout(mlx90632_perform_measurement, meas, meas == 19, + 50000, 800000, false, data); + if (ret) + goto read_unlock; + break; + case MLX90632_PWR_STATUS_SLEEP_STEP: + ret = mlx90632_perform_measurement_burst(data); + if (ret < 0) + goto read_unlock; + break; + default: + ret = -EOPNOTSUPP; goto read_unlock; + } ret = mlx90632_read_object_raw_extended(data->regmap, object_new_raw); if (ret < 0) @@ -457,8 +640,6 @@ static int mlx90632_read_all_channel_extended(struct mlx90632_data *data, s16 *o ret = mlx90632_read_ambient_raw_extended(data->regmap, ambient_new_raw, ambient_old_raw); read_unlock: - (void) mlx90632_set_meas_type(data->regmap, MLX90632_MTYP_MEDICAL); - mutex_unlock(&data->lock); return ret; } @@ -466,9 +647,9 @@ read_unlock: static int mlx90632_read_ee_register(struct regmap *regmap, u16 reg_lsb, s32 *reg_value) { - s32 ret; unsigned int read; u32 value; + int ret; ret = regmap_read(regmap, reg_lsb, &read); if (ret < 0) @@ -632,12 +813,12 @@ static s32 mlx90632_calc_temp_object_extended(s64 object, s64 ambient, s64 refle static int mlx90632_calc_object_dsp105(struct mlx90632_data *data, int *val) { - s32 ret; + s16 ambient_new_raw, ambient_old_raw, object_new_raw, object_old_raw; s32 Ea, Eb, Fa, Fb, Ga; unsigned int read_tmp; - s16 Ha, Hb, Gb, Ka; - s16 ambient_new_raw, ambient_old_raw, object_new_raw, object_old_raw; s64 object, ambient; + s16 Ha, Hb, Gb, Ka; + int ret; ret = mlx90632_read_ee_register(data->regmap, MLX90632_EE_Ea, &Ea); if (ret < 0) @@ -711,11 +892,11 @@ static int mlx90632_calc_object_dsp105(struct mlx90632_data *data, int *val) static int mlx90632_calc_ambient_dsp105(struct mlx90632_data *data, int *val) { - s32 ret; + s16 ambient_new_raw, ambient_old_raw; unsigned int read_tmp; s32 PT, PR, PG, PO; + int ret; s16 Gb; - s16 ambient_new_raw, ambient_old_raw; ret = mlx90632_read_ee_register(data->regmap, MLX90632_EE_P_R, &PR); if (ret < 0) @@ -743,12 +924,73 @@ static int mlx90632_calc_ambient_dsp105(struct mlx90632_data *data, int *val) return ret; } +static int mlx90632_get_refresh_rate(struct mlx90632_data *data, + int *refresh_rate) +{ + unsigned int meas1; + int ret; + + ret = regmap_read(data->regmap, MLX90632_EE_MEDICAL_MEAS1, &meas1); + if (ret < 0) + return ret; + + *refresh_rate = MLX90632_REFRESH_RATE(meas1); + + return ret; +} + +static const int mlx90632_freqs[][2] = { + {0, 500000}, + {1, 0}, + {2, 0}, + {4, 0}, + {8, 0}, + {16, 0}, + {32, 0}, + {64, 0} +}; + +/** + * mlx90632_pm_interraction_wakeup() - Measure time between user interactions to change powermode + * @data: pointer to mlx90632_data object containing interaction_ts information + * + * Switch to continuous mode when interaction is faster than MLX90632_MEAS_MAX_TIME. Update the + * interaction_ts for each function call with the jiffies to enable measurement between function + * calls. Initial value of the interaction_ts needs to be set before this function call. + */ +static int mlx90632_pm_interraction_wakeup(struct mlx90632_data *data) +{ + unsigned long now; + int ret; + + now = jiffies; + if (time_in_range(now, data->interaction_ts, + data->interaction_ts + + msecs_to_jiffies(MLX90632_MEAS_MAX_TIME + 100))) { + if (data->powerstatus == MLX90632_PWR_STATUS_SLEEP_STEP) { + ret = mlx90632_pwr_continuous(data->regmap); + if (ret < 0) + return ret; + } + } + + data->interaction_ts = now; + + return 0; +} + static int mlx90632_read_raw(struct iio_dev *indio_dev, struct iio_chan_spec const *channel, int *val, int *val2, long mask) { struct mlx90632_data *data = iio_priv(indio_dev); int ret; + int cr; + + pm_runtime_get_sync(&data->client->dev); + ret = mlx90632_pm_interraction_wakeup(data); + if (ret < 0) + goto mlx90632_read_raw_pm; switch (mask) { case IIO_CHAN_INFO_PROCESSED: @@ -756,16 +998,22 @@ static int mlx90632_read_raw(struct iio_dev *indio_dev, case IIO_MOD_TEMP_AMBIENT: ret = mlx90632_calc_ambient_dsp105(data, val); if (ret < 0) - return ret; - return IIO_VAL_INT; + goto mlx90632_read_raw_pm; + + ret = IIO_VAL_INT; + break; case IIO_MOD_TEMP_OBJECT: ret = mlx90632_calc_object_dsp105(data, val); if (ret < 0) - return ret; - return IIO_VAL_INT; + goto mlx90632_read_raw_pm; + + ret = IIO_VAL_INT; + break; default: - return -EINVAL; + ret = -EINVAL; + break; } + break; case IIO_CHAN_INFO_CALIBEMISSIVITY: if (data->emissivity == 1000) { *val = 1; @@ -774,13 +1022,30 @@ static int mlx90632_read_raw(struct iio_dev *indio_dev, *val = 0; *val2 = data->emissivity * 1000; } - return IIO_VAL_INT_PLUS_MICRO; + ret = IIO_VAL_INT_PLUS_MICRO; + break; case IIO_CHAN_INFO_CALIBAMBIENT: *val = data->object_ambient_temperature; - return IIO_VAL_INT; + ret = IIO_VAL_INT; + break; + case IIO_CHAN_INFO_SAMP_FREQ: + ret = mlx90632_get_refresh_rate(data, &cr); + if (ret < 0) + goto mlx90632_read_raw_pm; + + *val = mlx90632_freqs[cr][0]; + *val2 = mlx90632_freqs[cr][1]; + ret = IIO_VAL_INT_PLUS_MICRO; + break; default: - return -EINVAL; + ret = -EINVAL; + break; } + +mlx90632_read_raw_pm: + pm_runtime_mark_last_busy(&data->client->dev); + pm_runtime_put_autosuspend(&data->client->dev); + return ret; } static int mlx90632_write_raw(struct iio_dev *indio_dev, @@ -805,12 +1070,30 @@ static int mlx90632_write_raw(struct iio_dev *indio_dev, } } +static int mlx90632_read_avail(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, + const int **vals, int *type, int *length, + long mask) +{ + switch (mask) { + case IIO_CHAN_INFO_SAMP_FREQ: + *vals = (int *)mlx90632_freqs; + *type = IIO_VAL_INT_PLUS_MICRO; + *length = 2 * ARRAY_SIZE(mlx90632_freqs); + return IIO_AVAIL_LIST; + default: + return -EINVAL; + } +} + static const struct iio_chan_spec mlx90632_channels[] = { { .type = IIO_TEMP, .modified = 1, .channel2 = IIO_MOD_TEMP_AMBIENT, .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED), + .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ), + .info_mask_shared_by_all_available = BIT(IIO_CHAN_INFO_SAMP_FREQ), }, { .type = IIO_TEMP, @@ -818,19 +1101,29 @@ static const struct iio_chan_spec mlx90632_channels[] = { .channel2 = IIO_MOD_TEMP_OBJECT, .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED) | BIT(IIO_CHAN_INFO_CALIBEMISSIVITY) | BIT(IIO_CHAN_INFO_CALIBAMBIENT), + .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ), + .info_mask_shared_by_all_available = BIT(IIO_CHAN_INFO_SAMP_FREQ), }, }; static const struct iio_info mlx90632_info = { .read_raw = mlx90632_read_raw, .write_raw = mlx90632_write_raw, + .read_avail = mlx90632_read_avail, }; -static int mlx90632_sleep(struct mlx90632_data *data) +static void mlx90632_sleep(void *_data) +{ + struct mlx90632_data *data = _data; + + mlx90632_pwr_set_sleep_step(data->regmap); +} + +static int mlx90632_suspend(struct mlx90632_data *data) { regcache_mark_dirty(data->regmap); - dev_dbg(&data->client->dev, "Requesting sleep"); + dev_dbg(&data->client->dev, "Requesting suspend"); return mlx90632_pwr_set_sleep_step(data->regmap); } @@ -875,14 +1168,14 @@ static int mlx90632_enable_regulator(struct mlx90632_data *data) return ret; } -static int mlx90632_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int mlx90632_probe(struct i2c_client *client) { - struct iio_dev *indio_dev; + const struct i2c_device_id *id = i2c_client_get_device_id(client); struct mlx90632_data *mlx90632; + struct iio_dev *indio_dev; struct regmap *regmap; - int ret; unsigned int read; + int ret; indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*mlx90632)); if (!indio_dev) { @@ -902,6 +1195,7 @@ static int mlx90632_probe(struct i2c_client *client, mlx90632->client = client; mlx90632->regmap = regmap; mlx90632->mtyp = MLX90632_MTYP_MEDICAL; + mlx90632->powerstatus = MLX90632_PWR_STATUS_HALT; mutex_init(&mlx90632->lock); indio_dev->name = id->name; @@ -933,6 +1227,13 @@ static int mlx90632_probe(struct i2c_client *client, return ret; } + ret = devm_add_action_or_reset(&client->dev, mlx90632_sleep, mlx90632); + if (ret < 0) { + dev_err(&client->dev, "Failed to setup low power cleanup action %d\n", + ret); + return ret; + } + ret = regmap_read(mlx90632->regmap, MLX90632_EE_VERSION, &read); if (ret < 0) { dev_err(&client->dev, "read of version failed: %d\n", ret); @@ -961,32 +1262,20 @@ static int mlx90632_probe(struct i2c_client *client, mlx90632->emissivity = 1000; mlx90632->object_ambient_temperature = 25000; /* 25 degrees milliCelsius */ + mlx90632->interaction_ts = jiffies; /* Set initial value */ - pm_runtime_disable(&client->dev); - ret = pm_runtime_set_active(&client->dev); - if (ret < 0) { - mlx90632_sleep(mlx90632); + pm_runtime_get_noresume(&client->dev); + pm_runtime_set_active(&client->dev); + + ret = devm_pm_runtime_enable(&client->dev); + if (ret) return ret; - } - pm_runtime_enable(&client->dev); + pm_runtime_set_autosuspend_delay(&client->dev, MLX90632_SLEEP_DELAY_MS); pm_runtime_use_autosuspend(&client->dev); + pm_runtime_put_autosuspend(&client->dev); - return iio_device_register(indio_dev); -} - -static void mlx90632_remove(struct i2c_client *client) -{ - struct iio_dev *indio_dev = i2c_get_clientdata(client); - struct mlx90632_data *data = iio_priv(indio_dev); - - iio_device_unregister(indio_dev); - - pm_runtime_disable(&client->dev); - pm_runtime_set_suspended(&client->dev); - pm_runtime_put_noidle(&client->dev); - - mlx90632_sleep(data); + return devm_iio_device_register(&client->dev, indio_dev); } static const struct i2c_device_id mlx90632_id[] = { @@ -1001,33 +1290,54 @@ static const struct of_device_id mlx90632_of_match[] = { }; MODULE_DEVICE_TABLE(of, mlx90632_of_match); -static int __maybe_unused mlx90632_pm_suspend(struct device *dev) +static int mlx90632_pm_suspend(struct device *dev) { - struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev)); - struct mlx90632_data *data = iio_priv(indio_dev); + struct mlx90632_data *data = iio_priv(dev_get_drvdata(dev)); + int ret; - return mlx90632_sleep(data); + ret = mlx90632_suspend(data); + if (ret < 0) + return ret; + + ret = regulator_disable(data->regulator); + if (ret < 0) + dev_err(regmap_get_device(data->regmap), + "Failed to disable power regulator: %d\n", ret); + + return ret; } -static int __maybe_unused mlx90632_pm_resume(struct device *dev) +static int mlx90632_pm_resume(struct device *dev) { - struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev)); - struct mlx90632_data *data = iio_priv(indio_dev); + struct mlx90632_data *data = iio_priv(dev_get_drvdata(dev)); + int ret; + + ret = mlx90632_enable_regulator(data); + if (ret < 0) + return ret; return mlx90632_wakeup(data); } -static UNIVERSAL_DEV_PM_OPS(mlx90632_pm_ops, mlx90632_pm_suspend, - mlx90632_pm_resume, NULL); +static int mlx90632_pm_runtime_suspend(struct device *dev) +{ + struct mlx90632_data *data = iio_priv(dev_get_drvdata(dev)); + + return mlx90632_pwr_set_sleep_step(data->regmap); +} + +static const struct dev_pm_ops mlx90632_pm_ops = { + SYSTEM_SLEEP_PM_OPS(mlx90632_pm_suspend, mlx90632_pm_resume) + RUNTIME_PM_OPS(mlx90632_pm_runtime_suspend, NULL, NULL) +}; static struct i2c_driver mlx90632_driver = { .driver = { .name = "mlx90632", .of_match_table = mlx90632_of_match, - .pm = &mlx90632_pm_ops, + .pm = pm_ptr(&mlx90632_pm_ops), }, - .probe = mlx90632_probe, - .remove = mlx90632_remove, + .probe_new = mlx90632_probe, .id_table = mlx90632_id, }; module_i2c_driver(mlx90632_driver); diff --git a/drivers/iio/temperature/tmp006.c b/drivers/iio/temperature/tmp006.c index 706a760f30b4799ceea1ee194d522e3f53ef2744..cdf08477e63f7bc04649ce2f6ddad2643dba442d 100644 --- a/drivers/iio/temperature/tmp006.c +++ b/drivers/iio/temperature/tmp006.c @@ -212,8 +212,7 @@ static void tmp006_powerdown_cleanup(void *dev) tmp006_power(dev, false); } -static int tmp006_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int tmp006_probe(struct i2c_client *client) { struct iio_dev *indio_dev; struct tmp006_data *data; @@ -284,7 +283,7 @@ static struct i2c_driver tmp006_driver = { .name = "tmp006", .pm = pm_sleep_ptr(&tmp006_pm_ops), }, - .probe = tmp006_probe, + .probe_new = tmp006_probe, .id_table = tmp006_id, }; module_i2c_driver(tmp006_driver); diff --git a/drivers/iio/temperature/tmp007.c b/drivers/iio/temperature/tmp007.c index f3420d8a0e3594d6513a50b47d00be0748ccdb7a..8d27aa3bdd6dae45e4b5963d75d519531dfede0e 100644 --- a/drivers/iio/temperature/tmp007.c +++ b/drivers/iio/temperature/tmp007.c @@ -446,9 +446,9 @@ static void tmp007_powerdown_action_cb(void *priv) tmp007_powerdown(data); } -static int tmp007_probe(struct i2c_client *client, - const struct i2c_device_id *tmp007_id) +static int tmp007_probe(struct i2c_client *client) { + const struct i2c_device_id *tmp007_id = i2c_client_get_device_id(client); struct tmp007_data *data; struct iio_dev *indio_dev; int ret; @@ -574,7 +574,7 @@ static struct i2c_driver tmp007_driver = { .of_match_table = tmp007_of_match, .pm = pm_sleep_ptr(&tmp007_pm_ops), }, - .probe = tmp007_probe, + .probe_new = tmp007_probe, .id_table = tmp007_id, }; module_i2c_driver(tmp007_driver); diff --git a/drivers/iio/temperature/tsys01.c b/drivers/iio/temperature/tsys01.c index 60d58ec5b063619b9f474e6c019b36813adbc86c..30b268ba82ccf91521582b2132886c4db68f38db 100644 --- a/drivers/iio/temperature/tsys01.c +++ b/drivers/iio/temperature/tsys01.c @@ -176,8 +176,7 @@ static int tsys01_probe(struct iio_dev *indio_dev, struct device *dev) return devm_iio_device_register(dev, indio_dev); } -static int tsys01_i2c_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int tsys01_i2c_probe(struct i2c_client *client) { struct tsys01_dev *dev_data; struct iio_dev *indio_dev; @@ -219,7 +218,7 @@ static const struct of_device_id tsys01_of_match[] = { MODULE_DEVICE_TABLE(of, tsys01_of_match); static struct i2c_driver tsys01_driver = { - .probe = tsys01_i2c_probe, + .probe_new = tsys01_i2c_probe, .id_table = tsys01_id, .driver = { .name = "tsys01", diff --git a/drivers/iio/temperature/tsys02d.c b/drivers/iio/temperature/tsys02d.c index 49c275e4f51068852032a7cc87aa76183226704e..cdefe046ab17d784f664f8ddd0119226b5e0fe39 100644 --- a/drivers/iio/temperature/tsys02d.c +++ b/drivers/iio/temperature/tsys02d.c @@ -121,9 +121,9 @@ static const struct iio_info tsys02d_info = { .attrs = &tsys02d_attribute_group, }; -static int tsys02d_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int tsys02d_probe(struct i2c_client *client) { + const struct i2c_device_id *id = i2c_client_get_device_id(client); struct ms_ht_dev *dev_data; struct iio_dev *indio_dev; int ret; @@ -174,7 +174,7 @@ static const struct i2c_device_id tsys02d_id[] = { MODULE_DEVICE_TABLE(i2c, tsys02d_id); static struct i2c_driver tsys02d_driver = { - .probe = tsys02d_probe, + .probe_new = tsys02d_probe, .id_table = tsys02d_id, .driver = { .name = "tsys02d", diff --git a/drivers/iio/trigger/iio-trig-sysfs.c b/drivers/iio/trigger/iio-trig-sysfs.c index 6b05eed41612bdcb864fa837a6706a4d5d7895e9..575d725696a94030b2505454a55fd19b458d55e8 100644 --- a/drivers/iio/trigger/iio-trig-sysfs.c +++ b/drivers/iio/trigger/iio-trig-sysfs.c @@ -138,18 +138,18 @@ static int iio_sysfs_trigger_probe(int id) } if (foundit) { ret = -EINVAL; - goto out1; + goto err_unlock; } t = kmalloc(sizeof(*t), GFP_KERNEL); if (t == NULL) { ret = -ENOMEM; - goto out1; + goto err_unlock; } t->id = id; t->trig = iio_trigger_alloc(&iio_sysfs_trig_dev, "sysfstrig%d", id); if (!t->trig) { ret = -ENOMEM; - goto free_t; + goto err_free_sys_trig; } t->trig->dev.groups = iio_sysfs_trigger_attr_groups; @@ -159,17 +159,17 @@ static int iio_sysfs_trigger_probe(int id) ret = iio_trigger_register(t->trig); if (ret) - goto out2; + goto err_free_trig; list_add(&t->l, &iio_sysfs_trig_list); __module_get(THIS_MODULE); mutex_unlock(&iio_sysfs_trig_list_mut); return 0; -out2: +err_free_trig: iio_trigger_free(t->trig); -free_t: +err_free_sys_trig: kfree(t); -out1: +err_unlock: mutex_unlock(&iio_sysfs_trig_list_mut); return ret; } diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig index aa36ac618e7291067aa218a8f5bce35e8266e021..a5827d11e9346a890c55804052c9bfa21076dde1 100644 --- a/drivers/infiniband/Kconfig +++ b/drivers/infiniband/Kconfig @@ -78,6 +78,7 @@ config INFINIBAND_VIRT_DMA def_bool !HIGHMEM if INFINIBAND_USER_ACCESS || !INFINIBAND_USER_ACCESS +if !UML source "drivers/infiniband/hw/bnxt_re/Kconfig" source "drivers/infiniband/hw/cxgb4/Kconfig" source "drivers/infiniband/hw/efa/Kconfig" @@ -85,6 +86,7 @@ source "drivers/infiniband/hw/erdma/Kconfig" source "drivers/infiniband/hw/hfi1/Kconfig" source "drivers/infiniband/hw/hns/Kconfig" source "drivers/infiniband/hw/irdma/Kconfig" +source "drivers/infiniband/hw/mana/Kconfig" source "drivers/infiniband/hw/mlx4/Kconfig" source "drivers/infiniband/hw/mlx5/Kconfig" source "drivers/infiniband/hw/mthca/Kconfig" @@ -94,6 +96,7 @@ source "drivers/infiniband/hw/qib/Kconfig" source "drivers/infiniband/hw/usnic/Kconfig" source "drivers/infiniband/hw/vmw_pvrdma/Kconfig" source "drivers/infiniband/sw/rdmavt/Kconfig" +endif # !UML source "drivers/infiniband/sw/rxe/Kconfig" source "drivers/infiniband/sw/siw/Kconfig" endif diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c index 4084d05a45102dbbedaacf6e38d09d8231f53bac..2e91d88793265800acb86f6e8b9b320ccbc16be3 100644 --- a/drivers/infiniband/core/cache.c +++ b/drivers/infiniband/core/cache.c @@ -1422,7 +1422,7 @@ int rdma_read_gid_l2_fields(const struct ib_gid_attr *attr, *vlan_id = vlan_dev_vlan_id(ndev); } else { /* If the netdev is upper device and if it's lower - * device is vlan device, consider vlan id of the + * device is vlan device, consider vlan id of * the lower vlan device for this gid entry. */ netdev_walk_all_lower_dev_rcu(attr->ndev, diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c index 1f9938a2c47522b8255239243edd26c7dacf402a..603c0aecc361444d475085016ff6cb60bab620f4 100644 --- a/drivers/infiniband/core/cm.c +++ b/drivers/infiniband/core/cm.c @@ -4094,9 +4094,18 @@ static int cm_init_qp_init_attr(struct cm_id_private *cm_id_priv, *qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS | IB_QP_PKEY_INDEX | IB_QP_PORT; qp_attr->qp_access_flags = IB_ACCESS_REMOTE_WRITE; - if (cm_id_priv->responder_resources) + if (cm_id_priv->responder_resources) { + struct ib_device *ib_dev = cm_id_priv->id.device; + u64 support_flush = ib_dev->attrs.device_cap_flags & + (IB_DEVICE_FLUSH_GLOBAL | IB_DEVICE_FLUSH_PERSISTENT); + u32 flushable = support_flush ? + (IB_ACCESS_FLUSH_GLOBAL | + IB_ACCESS_FLUSH_PERSISTENT) : 0; + qp_attr->qp_access_flags |= IB_ACCESS_REMOTE_READ | - IB_ACCESS_REMOTE_ATOMIC; + IB_ACCESS_REMOTE_ATOMIC | + flushable; + } qp_attr->pkey_index = cm_id_priv->av.pkey_index; if (cm_id_priv->av.port) qp_attr->port_num = cm_id_priv->av.port->port_num; diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index aacd6254df77aa73aac87ee6af66abace597a37f..68721ff10255e2c60b15f9c3b5f4b66c73b1525e 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -47,7 +47,7 @@ MODULE_LICENSE("Dual BSD/GPL"); #define CMA_CM_RESPONSE_TIMEOUT 20 #define CMA_MAX_CM_RETRIES 15 #define CMA_CM_MRA_SETTING (IB_CM_MRA_FLAG_DELAY | 24) -#define CMA_IBOE_PACKET_LIFETIME 18 +#define CMA_IBOE_PACKET_LIFETIME 16 #define CMA_PREFERRED_ROCE_GID_TYPE IB_GID_TYPE_ROCE_UDP_ENCAP static const char * const cma_events[] = { diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c index b69e2c4e4d2a40443966836e602641a2767cf796..a666847bd7143e14e3af07e2c767b59a2a93420e 100644 --- a/drivers/infiniband/core/device.c +++ b/drivers/infiniband/core/device.c @@ -511,7 +511,7 @@ static void ib_device_release(struct device *device) kfree_rcu(dev, rcu_head); } -static int ib_device_uevent(struct device *device, +static int ib_device_uevent(const struct device *device, struct kobj_uevent_env *env) { if (add_uevent_var(env, "NAME=%s", dev_name(device))) @@ -524,9 +524,9 @@ static int ib_device_uevent(struct device *device, return 0; } -static const void *net_namespace(struct device *d) +static const void *net_namespace(const struct device *d) { - struct ib_core_device *coredev = + const struct ib_core_device *coredev = container_of(d, struct ib_core_device, dev); return read_pnet(&coredev->rdma_net); @@ -2159,14 +2159,16 @@ int ib_device_set_netdev(struct ib_device *ib_dev, struct net_device *ndev, return 0; } + if (old_ndev) + netdev_tracker_free(ndev, &pdata->netdev_tracker); if (ndev) - dev_hold(ndev); + netdev_hold(ndev, &pdata->netdev_tracker, GFP_ATOMIC); rcu_assign_pointer(pdata->netdev, ndev); spin_unlock_irqrestore(&pdata->netdev_lock, flags); add_ndev_hash(pdata); if (old_ndev) - dev_put(old_ndev); + __dev_put(old_ndev); return 0; } @@ -2199,7 +2201,7 @@ static void free_netdevs(struct ib_device *ib_dev) * comparisons after the put */ rcu_assign_pointer(pdata->netdev, NULL); - dev_put(ndev); + netdev_put(ndev, &pdata->netdev_tracker); } spin_unlock_irqrestore(&pdata->netdev_lock, flags); } @@ -2851,8 +2853,8 @@ err: static void __exit ib_core_cleanup(void) { roce_gid_mgmt_cleanup(); - nldev_exit(); rdma_nl_unregister(RDMA_NL_LS); + nldev_exit(); unregister_pernet_device(&rdma_dev_net_ops); unregister_blocking_lsm_notifier(&ibdev_lsm_nb); ib_sa_cleanup(); diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c index 1893aa613ad734f096f0b9b3eaee346fc47ae954..674344eb8e2f48562f3c49694fd5bc86515a8e36 100644 --- a/drivers/infiniband/core/mad.c +++ b/drivers/infiniband/core/mad.c @@ -59,9 +59,6 @@ static void create_mad_addr_info(struct ib_mad_send_wr_private *mad_send_wr, struct ib_mad_qp_info *qp_info, struct trace_event_raw_ib_mad_send_template *entry) { - u16 pkey; - struct ib_device *dev = qp_info->port_priv->device; - u32 pnum = qp_info->port_priv->port_num; struct ib_ud_wr *wr = &mad_send_wr->send_wr; struct rdma_ah_attr attr = {}; @@ -69,8 +66,6 @@ static void create_mad_addr_info(struct ib_mad_send_wr_private *mad_send_wr, /* These are common */ entry->sl = attr.sl; - ib_query_pkey(dev, pnum, wr->pkey_index, &pkey); - entry->pkey = pkey; entry->rqpn = wr->remote_qpn; entry->rqkey = wr->remote_qkey; entry->dlid = rdma_ah_get_dlid(&attr); diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c index 12dc97067ed2b8e5c22e023101247efb4ba78ef9..d5d3e4f0de779ef1e82f06482287b3de355323e1 100644 --- a/drivers/infiniband/core/nldev.c +++ b/drivers/infiniband/core/nldev.c @@ -513,7 +513,7 @@ static int fill_res_qp_entry(struct sk_buff *msg, bool has_cap_net_admin, /* In create_qp() port is not set yet */ if (qp->port && nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, qp->port)) - return -EINVAL; + return -EMSGSIZE; ret = nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, qp->qp_num); if (ret) @@ -552,7 +552,7 @@ static int fill_res_cm_id_entry(struct sk_buff *msg, bool has_cap_net_admin, struct rdma_cm_id *cm_id = &id_priv->id; if (port && port != cm_id->port_num) - return 0; + return -EAGAIN; if (cm_id->port_num && nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, cm_id->port_num)) @@ -894,6 +894,8 @@ static int fill_stat_counter_qps(struct sk_buff *msg, int ret = 0; table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_QP); + if (!table_attr) + return -EMSGSIZE; rt = &counter->device->res[RDMA_RESTRACK_QP]; xa_lock(&rt->xa); @@ -1041,6 +1043,10 @@ static int nldev_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh, nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET), 0, 0); + if (!nlh) { + err = -EMSGSIZE; + goto err_free; + } err = fill_dev_info(msg, device); if (err) @@ -1126,7 +1132,7 @@ static int _nldev_get_dumpit(struct ib_device *device, RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET), 0, NLM_F_MULTI); - if (fill_dev_info(skb, device)) { + if (!nlh || fill_dev_info(skb, device)) { nlmsg_cancel(skb, nlh); goto out; } @@ -1185,6 +1191,10 @@ static int nldev_port_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh, nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET), 0, 0); + if (!nlh) { + err = -EMSGSIZE; + goto err_free; + } err = fill_port_info(msg, device, port, sock_net(skb->sk)); if (err) @@ -1246,7 +1256,7 @@ static int nldev_port_get_dumpit(struct sk_buff *skb, RDMA_NLDEV_CMD_PORT_GET), 0, NLM_F_MULTI); - if (fill_port_info(skb, device, p, sock_net(skb->sk))) { + if (!nlh || fill_port_info(skb, device, p, sock_net(skb->sk))) { nlmsg_cancel(skb, nlh); goto out; } @@ -1288,6 +1298,10 @@ static int nldev_res_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh, nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_RES_GET), 0, 0); + if (!nlh) { + ret = -EMSGSIZE; + goto err_free; + } ret = fill_res_info(msg, device); if (ret) @@ -1319,7 +1333,7 @@ static int _nldev_res_get_dumpit(struct ib_device *device, RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_RES_GET), 0, NLM_F_MULTI); - if (fill_res_info(skb, device)) { + if (!nlh || fill_res_info(skb, device)) { nlmsg_cancel(skb, nlh); goto out; } @@ -1454,7 +1468,7 @@ static int res_get_common_doit(struct sk_buff *skb, struct nlmsghdr *nlh, RDMA_NL_GET_OP(nlh->nlmsg_type)), 0, 0); - if (fill_nldev_handle(msg, device)) { + if (!nlh || fill_nldev_handle(msg, device)) { ret = -EMSGSIZE; goto err_free; } @@ -1533,7 +1547,7 @@ static int res_get_common_dumpit(struct sk_buff *skb, RDMA_NL_GET_OP(cb->nlh->nlmsg_type)), 0, NLM_F_MULTI); - if (fill_nldev_handle(skb, device)) { + if (!nlh || fill_nldev_handle(skb, device)) { ret = -EMSGSIZE; goto err; } @@ -1795,6 +1809,10 @@ static int nldev_get_chardev(struct sk_buff *skb, struct nlmsghdr *nlh, RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET_CHARDEV), 0, 0); + if (!nlh) { + err = -EMSGSIZE; + goto out_nlmsg; + } data.nl_msg = msg; err = ib_get_client_nl_info(ibdev, client_name, &data); @@ -1852,6 +1870,10 @@ static int nldev_sys_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh, RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_SYS_GET), 0, 0); + if (!nlh) { + nlmsg_free(msg); + return -EMSGSIZE; + } err = nla_put_u8(msg, RDMA_NLDEV_SYS_ATTR_NETNS_MODE, (u8)ib_devices_shared_netns); @@ -2032,7 +2054,7 @@ static int nldev_stat_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh, RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_STAT_SET), 0, 0); - if (fill_nldev_handle(msg, device) || + if (!nlh || fill_nldev_handle(msg, device) || nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port)) { ret = -EMSGSIZE; goto err_free_msg; @@ -2101,6 +2123,10 @@ static int nldev_stat_del_doit(struct sk_buff *skb, struct nlmsghdr *nlh, RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_STAT_SET), 0, 0); + if (!nlh) { + ret = -EMSGSIZE; + goto err_fill; + } cntn = nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_COUNTER_ID]); qpn = nla_get_u32(tb[RDMA_NLDEV_ATTR_RES_LQPN]); @@ -2171,7 +2197,7 @@ static int stat_get_doit_default_counter(struct sk_buff *skb, RDMA_NLDEV_CMD_STAT_GET), 0, 0); - if (fill_nldev_handle(msg, device) || + if (!nlh || fill_nldev_handle(msg, device) || nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port)) { ret = -EMSGSIZE; goto err_msg; @@ -2259,6 +2285,10 @@ static int stat_get_doit_qp(struct sk_buff *skb, struct nlmsghdr *nlh, RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_STAT_GET), 0, 0); + if (!nlh) { + ret = -EMSGSIZE; + goto err_msg; + } ret = rdma_counter_get_mode(device, port, &mode, &mask); if (ret) @@ -2391,7 +2421,7 @@ static int nldev_stat_get_counter_status_doit(struct sk_buff *skb, 0, 0); ret = -EMSGSIZE; - if (fill_nldev_handle(msg, device) || + if (!nlh || fill_nldev_handle(msg, device) || nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port)) goto err_msg; diff --git a/drivers/infiniband/core/restrack.c b/drivers/infiniband/core/restrack.c index 1f935d9f6178548548ddfe624d95b9e4ca76ecc7..01a499a8b88dbd5474542bc1c08ac0180f699777 100644 --- a/drivers/infiniband/core/restrack.c +++ b/drivers/infiniband/core/restrack.c @@ -343,8 +343,6 @@ void rdma_restrack_del(struct rdma_restrack_entry *res) rt = &dev->res[res->type]; old = xa_erase(&rt->xa, res->id); - if (res->type == RDMA_RESTRACK_MR) - return; WARN_ON(old != res); out: diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c index 84c53bd2a52db9f034d2cca76ea4a7c044651eca..ee59d7391568993ac24f3126d12842f2044d67ad 100644 --- a/drivers/infiniband/core/sysfs.c +++ b/drivers/infiniband/core/sysfs.c @@ -1213,6 +1213,9 @@ static struct ib_port *setup_port(struct ib_core_device *coredev, int port_num, p->port_num = port_num; kobject_init(&p->kobj, &port_type); + if (device->port_data && is_full_dev) + device->port_data[port_num].sysfs = p; + cur_group = p->groups_list; ret = alloc_port_table_group("gids", &p->groups[0], p->attrs_list, attr->gid_tbl_len, show_port_gid); @@ -1258,9 +1261,6 @@ static struct ib_port *setup_port(struct ib_core_device *coredev, int port_num, } list_add_tail(&p->kobj.entry, &coredev->port_list); - if (device->port_data && is_full_dev) - device->port_data[port_num].sysfs = p; - return p; err_groups: @@ -1268,6 +1268,8 @@ err_groups: err_del: kobject_del(&p->kobj); err_put: + if (device->port_data && is_full_dev) + device->port_data[port_num].sysfs = NULL; kobject_put(&p->kobj); return ERR_PTR(ret); } @@ -1276,14 +1278,17 @@ static void destroy_port(struct ib_core_device *coredev, struct ib_port *port) { bool is_full_dev = &port->ibdev->coredev == coredev; - if (port->ibdev->port_data && - port->ibdev->port_data[port->port_num].sysfs == port) - port->ibdev->port_data[port->port_num].sysfs = NULL; list_del(&port->kobj.entry); if (is_full_dev) sysfs_remove_groups(&port->kobj, port->ibdev->ops.port_groups); + sysfs_remove_groups(&port->kobj, port->groups_list); kobject_del(&port->kobj); + + if (port->ibdev->port_data && + port->ibdev->port_data[port->port_num].sysfs == port) + port->ibdev->port_data[port->port_num].sysfs = NULL; + kobject_put(&port->kobj); } diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c index 86d479772fbc64d4dcd1ad00d00a76dec0e58ad2..755a9c57db6f3a7e8809a5fd75b9a5182ee3a064 100644 --- a/drivers/infiniband/core/umem.c +++ b/drivers/infiniband/core/umem.c @@ -156,7 +156,7 @@ struct ib_umem *ib_umem_get(struct ib_device *device, unsigned long addr, struct mm_struct *mm; unsigned long npages; int pinned, ret; - unsigned int gup_flags = FOLL_WRITE; + unsigned int gup_flags = FOLL_LONGTERM; /* * If the combination of the addr and size requested for this memory @@ -210,8 +210,8 @@ struct ib_umem *ib_umem_get(struct ib_device *device, unsigned long addr, cur_base = addr & PAGE_MASK; - if (!umem->writable) - gup_flags |= FOLL_FORCE; + if (umem->writable) + gup_flags |= FOLL_WRITE; while (npages) { cond_resched(); @@ -219,7 +219,7 @@ struct ib_umem *ib_umem_get(struct ib_device *device, unsigned long addr, min_t(unsigned long, npages, PAGE_SIZE / sizeof(struct page *)), - gup_flags | FOLL_LONGTERM, page_list); + gup_flags, page_list); if (pinned < 0) { ret = pinned; goto umem_release; diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c index 98cb594cd9a699bbf4ddb27267a440130826efe6..f83954180a338997b7b7775d5d9ab719b5d79fa3 100644 --- a/drivers/infiniband/core/user_mad.c +++ b/drivers/infiniband/core/user_mad.c @@ -1224,7 +1224,7 @@ static struct attribute *umad_class_dev_attrs[] = { }; ATTRIBUTE_GROUPS(umad_class_dev); -static char *umad_devnode(struct device *dev, umode_t *mode) +static char *umad_devnode(const struct device *dev, umode_t *mode) { return kasprintf(GFP_KERNEL, "infiniband/%s", dev_name(dev)); } diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c index d544340887277368c6be58897be2f94b96ea850c..bdb179a09d77cfc35b9a13c66161dfcade4a3ff5 100644 --- a/drivers/infiniband/core/uverbs_main.c +++ b/drivers/infiniband/core/uverbs_main.c @@ -1237,7 +1237,7 @@ static void ib_uverbs_remove_one(struct ib_device *device, void *client_data) put_device(&uverbs_dev->dev); } -static char *uverbs_devnode(struct device *dev, umode_t *mode) +static char *uverbs_devnode(const struct device *dev, umode_t *mode) { if (mode) *mode = 0666; diff --git a/drivers/infiniband/core/uverbs_std_types_qp.c b/drivers/infiniband/core/uverbs_std_types_qp.c index dd1075466f61be67006017192a7edaf901abe052..7b4773fa4bc0bda36693465422ceac28b735116f 100644 --- a/drivers/infiniband/core/uverbs_std_types_qp.c +++ b/drivers/infiniband/core/uverbs_std_types_qp.c @@ -163,7 +163,7 @@ static int UVERBS_HANDLER(UVERBS_METHOD_QP_CREATE)( UVERBS_ATTR_CREATE_QP_SRQ_HANDLE)) return -EINVAL; - /* send_cq is optinal */ + /* send_cq is optional */ if (cap.max_send_wr) { send_cq = uverbs_attr_get_obj(attrs, UVERBS_ATTR_CREATE_QP_SEND_CQ_HANDLE); diff --git a/drivers/infiniband/hw/Makefile b/drivers/infiniband/hw/Makefile index 6b3a88046125adfa58944a22b33534f10a35d2fd..1211f4317a9f4fdab32278e2000c1b2b392e64d5 100644 --- a/drivers/infiniband/hw/Makefile +++ b/drivers/infiniband/hw/Makefile @@ -4,6 +4,7 @@ obj-$(CONFIG_INFINIBAND_QIB) += qib/ obj-$(CONFIG_INFINIBAND_CXGB4) += cxgb4/ obj-$(CONFIG_INFINIBAND_EFA) += efa/ obj-$(CONFIG_INFINIBAND_IRDMA) += irdma/ +obj-$(CONFIG_MANA_INFINIBAND) += mana/ obj-$(CONFIG_MLX4_INFINIBAND) += mlx4/ obj-$(CONFIG_MLX5_INFINIBAND) += mlx5/ obj-$(CONFIG_INFINIBAND_OCRDMA) += ocrdma/ diff --git a/drivers/infiniband/hw/erdma/erdma.h b/drivers/infiniband/hw/erdma/erdma.h index 730783fbc8949217c7e4af3ebfc170411145495b..3d8c11aa23a26fd6847f76cb3293fa2795d6ec96 100644 --- a/drivers/infiniband/hw/erdma/erdma.h +++ b/drivers/infiniband/hw/erdma/erdma.h @@ -124,6 +124,7 @@ struct erdma_devattr { u32 fw_version; unsigned char peer_addr[ETH_ALEN]; + unsigned long cap_flags; int numa_node; enum erdma_cc_alg cc; @@ -189,6 +190,7 @@ struct erdma_dev { struct net_device *netdev; struct pci_dev *pdev; struct notifier_block netdev_nb; + struct workqueue_struct *reflush_wq; resource_size_t func_bar_addr; resource_size_t func_bar_len; @@ -218,7 +220,7 @@ struct erdma_dev { DECLARE_BITMAP(sdb_page, ERDMA_DWQE_TYPE0_CNT); /* * We provide max 496 uContexts that each has one SQ normal Db, - * and one directWQE db。 + * and one directWQE db. */ DECLARE_BITMAP(sdb_entry, ERDMA_DWQE_TYPE1_CNT); diff --git a/drivers/infiniband/hw/erdma/erdma_cq.c b/drivers/infiniband/hw/erdma/erdma_cq.c index 58e0dc5c75d1d26ae30c1536b7576d8fafca251a..cabd8678b3558963b3069304ba5372ea0233b3a0 100644 --- a/drivers/infiniband/hw/erdma/erdma_cq.c +++ b/drivers/infiniband/hw/erdma/erdma_cq.c @@ -64,6 +64,8 @@ static const enum ib_wc_opcode wc_mapping_table[ERDMA_NUM_OPCODES] = { [ERDMA_OP_REG_MR] = IB_WC_REG_MR, [ERDMA_OP_LOCAL_INV] = IB_WC_LOCAL_INV, [ERDMA_OP_READ_WITH_INV] = IB_WC_RDMA_READ, + [ERDMA_OP_ATOMIC_CAS] = IB_WC_COMP_SWAP, + [ERDMA_OP_ATOMIC_FAD] = IB_WC_FETCH_ADD, }; static const struct { diff --git a/drivers/infiniband/hw/erdma/erdma_hw.h b/drivers/infiniband/hw/erdma/erdma_hw.h index e788887732e1f5e62185cc244d675ae99cebb1f4..ab371fec610c32e3ad236bac9828d4bd8098582f 100644 --- a/drivers/infiniband/hw/erdma/erdma_hw.h +++ b/drivers/infiniband/hw/erdma/erdma_hw.h @@ -145,6 +145,7 @@ enum CMDQ_RDMA_OPCODE { CMDQ_OPCODE_MODIFY_QP = 3, CMDQ_OPCODE_CREATE_CQ = 4, CMDQ_OPCODE_DESTROY_CQ = 5, + CMDQ_OPCODE_REFLUSH = 6, CMDQ_OPCODE_REG_MR = 8, CMDQ_OPCODE_DEREG_MR = 9 }; @@ -224,8 +225,7 @@ struct erdma_cmdq_create_cq_req { /* regmr cfg1 */ #define ERDMA_CMD_REGMR_PD_MASK GENMASK(31, 12) #define ERDMA_CMD_REGMR_TYPE_MASK GENMASK(7, 6) -#define ERDMA_CMD_REGMR_RIGHT_MASK GENMASK(5, 2) -#define ERDMA_CMD_REGMR_ACC_MODE_MASK GENMASK(1, 0) +#define ERDMA_CMD_REGMR_RIGHT_MASK GENMASK(5, 1) /* regmr cfg2 */ #define ERDMA_CMD_REGMR_PAGESIZE_MASK GENMASK(31, 27) @@ -302,8 +302,16 @@ struct erdma_cmdq_destroy_qp_req { u32 qpn; }; +struct erdma_cmdq_reflush_req { + u64 hdr; + u32 qpn; + u32 sq_pi; + u32 rq_pi; +}; + /* cap qword 0 definition */ #define ERDMA_CMD_DEV_CAP_MAX_CQE_MASK GENMASK_ULL(47, 40) +#define ERDMA_CMD_DEV_CAP_FLAGS_MASK GENMASK_ULL(31, 24) #define ERDMA_CMD_DEV_CAP_MAX_RECV_WR_MASK GENMASK_ULL(23, 16) #define ERDMA_CMD_DEV_CAP_MAX_MR_SIZE_MASK GENMASK_ULL(7, 0) @@ -315,6 +323,10 @@ struct erdma_cmdq_destroy_qp_req { #define ERDMA_NQP_PER_QBLOCK 1024 +enum { + ERDMA_DEV_CAP_FLAGS_ATOMIC = 1 << 7, +}; + #define ERDMA_CMD_INFO0_FW_VER_MASK GENMASK_ULL(31, 0) /* CQE hdr */ @@ -340,9 +352,9 @@ struct erdma_cqe { }; struct erdma_sge { - __aligned_le64 laddr; + __aligned_le64 addr; __le32 length; - __le32 lkey; + __le32 key; }; /* Receive Queue Element */ @@ -370,8 +382,7 @@ struct erdma_rqe { #define ERDMA_SQE_HDR_WQEBB_INDEX_MASK GENMASK_ULL(15, 0) /* REG MR attrs */ -#define ERDMA_SQE_MR_MODE_MASK GENMASK(1, 0) -#define ERDMA_SQE_MR_ACCESS_MASK GENMASK(5, 2) +#define ERDMA_SQE_MR_ACCESS_MASK GENMASK(5, 1) #define ERDMA_SQE_MR_MTT_TYPE_MASK GENMASK(7, 6) #define ERDMA_SQE_MR_MTT_CNT_MASK GENMASK(31, 12) @@ -410,6 +421,16 @@ struct erdma_readreq_sqe { __le32 rsvd; }; +struct erdma_atomic_sqe { + __le64 hdr; + __le64 rsvd; + __le64 fetchadd_swap_data; + __le64 cmp_data; + + struct erdma_sge remote; + struct erdma_sge sgl; +}; + struct erdma_reg_mr_sqe { __le64 hdr; __le64 addr; @@ -469,7 +490,9 @@ enum erdma_opcode { ERDMA_OP_REG_MR = 14, ERDMA_OP_LOCAL_INV = 15, ERDMA_OP_READ_WITH_INV = 16, - ERDMA_NUM_OPCODES = 17, + ERDMA_OP_ATOMIC_CAS = 17, + ERDMA_OP_ATOMIC_FAD = 18, + ERDMA_NUM_OPCODES = 19, ERDMA_OP_INVALID = ERDMA_NUM_OPCODES + 1 }; diff --git a/drivers/infiniband/hw/erdma/erdma_main.c b/drivers/infiniband/hw/erdma/erdma_main.c index 49778bb294ae4386095bddf08149454f70875d07..5dc31e5df5cba78ca1025120186fbe891e4551c3 100644 --- a/drivers/infiniband/hw/erdma/erdma_main.c +++ b/drivers/infiniband/hw/erdma/erdma_main.c @@ -374,6 +374,7 @@ static int erdma_dev_attrs_init(struct erdma_dev *dev) dev->attrs.max_qp = ERDMA_NQP_PER_QBLOCK * ERDMA_GET_CAP(QBLOCK, cap1); dev->attrs.max_mr = dev->attrs.max_qp << 1; dev->attrs.max_cq = dev->attrs.max_qp << 1; + dev->attrs.cap_flags = ERDMA_GET_CAP(FLAGS, cap0); dev->attrs.max_send_wr = ERDMA_MAX_SEND_WR; dev->attrs.max_ord = ERDMA_MAX_ORD; @@ -520,13 +521,22 @@ static int erdma_ib_device_add(struct pci_dev *pdev) u64_to_ether_addr(mac, dev->attrs.peer_addr); + dev->reflush_wq = alloc_workqueue("erdma-reflush-wq", WQ_UNBOUND, + WQ_UNBOUND_MAX_ACTIVE); + if (!dev->reflush_wq) { + ret = -ENOMEM; + goto err_alloc_workqueue; + } + ret = erdma_device_register(dev); if (ret) - goto err_out; + goto err_register; return 0; -err_out: +err_register: + destroy_workqueue(dev->reflush_wq); +err_alloc_workqueue: xa_destroy(&dev->qp_xa); xa_destroy(&dev->cq_xa); @@ -542,6 +552,7 @@ static void erdma_ib_device_remove(struct pci_dev *pdev) unregister_netdevice_notifier(&dev->netdev_nb); ib_unregister_device(&dev->ibdev); + destroy_workqueue(dev->reflush_wq); erdma_res_cb_free(dev); xa_destroy(&dev->qp_xa); xa_destroy(&dev->cq_xa); diff --git a/drivers/infiniband/hw/erdma/erdma_qp.c b/drivers/infiniband/hw/erdma/erdma_qp.c index 5fe1a339a43543904f528dacbec6d748d70498e3..d088d6bef431afa8c6936219dd2f60a6ce46942e 100644 --- a/drivers/infiniband/hw/erdma/erdma_qp.c +++ b/drivers/infiniband/hw/erdma/erdma_qp.c @@ -120,6 +120,7 @@ static int erdma_modify_qp_state_to_stop(struct erdma_qp *qp, int erdma_modify_qp_internal(struct erdma_qp *qp, struct erdma_qp_attrs *attrs, enum erdma_qp_attr_mask mask) { + bool need_reflush = false; int drop_conn, ret = 0; if (!mask) @@ -135,6 +136,7 @@ int erdma_modify_qp_internal(struct erdma_qp *qp, struct erdma_qp_attrs *attrs, ret = erdma_modify_qp_state_to_rts(qp, attrs, mask); } else if (attrs->state == ERDMA_QP_STATE_ERROR) { qp->attrs.state = ERDMA_QP_STATE_ERROR; + need_reflush = true; if (qp->cep) { erdma_cep_put(qp->cep); qp->cep = NULL; @@ -145,17 +147,12 @@ int erdma_modify_qp_internal(struct erdma_qp *qp, struct erdma_qp_attrs *attrs, case ERDMA_QP_STATE_RTS: drop_conn = 0; - if (attrs->state == ERDMA_QP_STATE_CLOSING) { + if (attrs->state == ERDMA_QP_STATE_CLOSING || + attrs->state == ERDMA_QP_STATE_TERMINATE || + attrs->state == ERDMA_QP_STATE_ERROR) { ret = erdma_modify_qp_state_to_stop(qp, attrs, mask); drop_conn = 1; - } else if (attrs->state == ERDMA_QP_STATE_TERMINATE) { - qp->attrs.state = ERDMA_QP_STATE_TERMINATE; - ret = erdma_modify_qp_state_to_stop(qp, attrs, mask); - drop_conn = 1; - } else if (attrs->state == ERDMA_QP_STATE_ERROR) { - ret = erdma_modify_qp_state_to_stop(qp, attrs, mask); - qp->attrs.state = ERDMA_QP_STATE_ERROR; - drop_conn = 1; + need_reflush = true; } if (drop_conn) @@ -180,6 +177,12 @@ int erdma_modify_qp_internal(struct erdma_qp *qp, struct erdma_qp_attrs *attrs, break; } + if (need_reflush && !ret && rdma_is_kernel_res(&qp->ibqp.res)) { + qp->flags |= ERDMA_QP_IN_FLUSHING; + mod_delayed_work(qp->dev->reflush_wq, &qp->reflush_dwork, + usecs_to_jiffies(100)); + } + return ret; } @@ -285,15 +288,16 @@ static int erdma_push_one_sqe(struct erdma_qp *qp, u16 *pi, u32 wqe_size, wqebb_cnt, hw_op, flags, sgl_offset; u32 idx = *pi & (qp->attrs.sq_size - 1); enum ib_wr_opcode op = send_wr->opcode; + struct erdma_atomic_sqe *atomic_sqe; struct erdma_readreq_sqe *read_sqe; struct erdma_reg_mr_sqe *regmr_sge; struct erdma_write_sqe *write_sqe; struct erdma_send_sqe *send_sqe; struct ib_rdma_wr *rdma_wr; - struct erdma_mr *mr; + struct erdma_sge *sge; __le32 *length_field; + struct erdma_mr *mr; u64 wqe_hdr, *entry; - struct ib_sge *sge; u32 attrs; int ret; @@ -360,9 +364,9 @@ static int erdma_push_one_sqe(struct erdma_qp *qp, u16 *pi, sge = get_queue_entry(qp->kern_qp.sq_buf, idx + 1, qp->attrs.sq_size, SQEBB_SHIFT); - sge->addr = rdma_wr->remote_addr; - sge->lkey = rdma_wr->rkey; - sge->length = send_wr->sg_list[0].length; + sge->addr = cpu_to_le64(rdma_wr->remote_addr); + sge->key = cpu_to_le32(rdma_wr->rkey); + sge->length = cpu_to_le32(send_wr->sg_list[0].length); wqe_size = sizeof(struct erdma_readreq_sqe) + send_wr->num_sge * sizeof(struct ib_sge); @@ -397,8 +401,7 @@ static int erdma_push_one_sqe(struct erdma_qp *qp, u16 *pi, regmr_sge->addr = cpu_to_le64(mr->ibmr.iova); regmr_sge->length = cpu_to_le32(mr->ibmr.length); regmr_sge->stag = cpu_to_le32(reg_wr(send_wr)->key); - attrs = FIELD_PREP(ERDMA_SQE_MR_MODE_MASK, 0) | - FIELD_PREP(ERDMA_SQE_MR_ACCESS_MASK, mr->access) | + attrs = FIELD_PREP(ERDMA_SQE_MR_ACCESS_MASK, mr->access) | FIELD_PREP(ERDMA_SQE_MR_MTT_CNT_MASK, mr->mem.mtt_nents); @@ -424,6 +427,35 @@ static int erdma_push_one_sqe(struct erdma_qp *qp, u16 *pi, regmr_sge->stag = cpu_to_le32(send_wr->ex.invalidate_rkey); wqe_size = sizeof(struct erdma_reg_mr_sqe); goto out; + case IB_WR_ATOMIC_CMP_AND_SWP: + case IB_WR_ATOMIC_FETCH_AND_ADD: + atomic_sqe = (struct erdma_atomic_sqe *)entry; + if (op == IB_WR_ATOMIC_CMP_AND_SWP) { + wqe_hdr |= FIELD_PREP(ERDMA_SQE_HDR_OPCODE_MASK, + ERDMA_OP_ATOMIC_CAS); + atomic_sqe->fetchadd_swap_data = + cpu_to_le64(atomic_wr(send_wr)->swap); + atomic_sqe->cmp_data = + cpu_to_le64(atomic_wr(send_wr)->compare_add); + } else { + wqe_hdr |= FIELD_PREP(ERDMA_SQE_HDR_OPCODE_MASK, + ERDMA_OP_ATOMIC_FAD); + atomic_sqe->fetchadd_swap_data = + cpu_to_le64(atomic_wr(send_wr)->compare_add); + } + + sge = get_queue_entry(qp->kern_qp.sq_buf, idx + 1, + qp->attrs.sq_size, SQEBB_SHIFT); + sge->addr = cpu_to_le64(atomic_wr(send_wr)->remote_addr); + sge->key = cpu_to_le32(atomic_wr(send_wr)->rkey); + sge++; + + sge->addr = cpu_to_le64(send_wr->sg_list[0].addr); + sge->key = cpu_to_le32(send_wr->sg_list[0].lkey); + sge->length = cpu_to_le32(send_wr->sg_list[0].length); + + wqe_size = sizeof(*atomic_sqe); + goto out; default: return -EOPNOTSUPP; } @@ -498,6 +530,10 @@ int erdma_post_send(struct ib_qp *ibqp, const struct ib_send_wr *send_wr, } spin_unlock_irqrestore(&qp->lock, flags); + if (unlikely(qp->flags & ERDMA_QP_IN_FLUSHING)) + mod_delayed_work(qp->dev->reflush_wq, &qp->reflush_dwork, + usecs_to_jiffies(100)); + return ret; } @@ -551,5 +587,10 @@ int erdma_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *recv_wr, } spin_unlock_irqrestore(&qp->lock, flags); + + if (unlikely(qp->flags & ERDMA_QP_IN_FLUSHING)) + mod_delayed_work(qp->dev->reflush_wq, &qp->reflush_dwork, + usecs_to_jiffies(100)); + return ret; } diff --git a/drivers/infiniband/hw/erdma/erdma_verbs.c b/drivers/infiniband/hw/erdma/erdma_verbs.c index 62be98e2b94142f916f25fc9b7b602a59554deaa..5dab1e87975ba225c00f42a55a487c73bb2a5200 100644 --- a/drivers/infiniband/hw/erdma/erdma_verbs.c +++ b/drivers/infiniband/hw/erdma/erdma_verbs.c @@ -118,8 +118,7 @@ static int regmr_cmd(struct erdma_dev *dev, struct erdma_mr *mr) FIELD_PREP(ERDMA_CMD_MR_MPT_IDX_MASK, mr->ibmr.lkey >> 8); req.cfg1 = FIELD_PREP(ERDMA_CMD_REGMR_PD_MASK, pd->pdn) | FIELD_PREP(ERDMA_CMD_REGMR_TYPE_MASK, mr->type) | - FIELD_PREP(ERDMA_CMD_REGMR_RIGHT_MASK, mr->access) | - FIELD_PREP(ERDMA_CMD_REGMR_ACC_MODE_MASK, 0); + FIELD_PREP(ERDMA_CMD_REGMR_RIGHT_MASK, mr->access); req.cfg2 = FIELD_PREP(ERDMA_CMD_REGMR_PAGESIZE_MASK, ilog2(mr->mem.page_size)) | FIELD_PREP(ERDMA_CMD_REGMR_MTT_TYPE_MASK, mr->mem.mtt_type) | @@ -289,6 +288,10 @@ int erdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr, attr->max_mw = dev->attrs.max_mw; attr->max_fast_reg_page_list_len = ERDMA_MAX_FRMR_PA; attr->page_size_cap = ERDMA_PAGE_SIZE_SUPPORT; + + if (dev->attrs.cap_flags & ERDMA_DEV_CAP_FLAGS_ATOMIC) + attr->atomic_cap = IB_ATOMIC_GLOB; + attr->fw_ver = dev->attrs.fw_version; if (dev->netdev) @@ -376,6 +379,21 @@ int erdma_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) return 0; } +static void erdma_flush_worker(struct work_struct *work) +{ + struct delayed_work *dwork = to_delayed_work(work); + struct erdma_qp *qp = + container_of(dwork, struct erdma_qp, reflush_dwork); + struct erdma_cmdq_reflush_req req; + + erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_RDMA, + CMDQ_OPCODE_REFLUSH); + req.qpn = QP_ID(qp); + req.sq_pi = qp->kern_qp.sq_pi; + req.rq_pi = qp->kern_qp.rq_pi; + erdma_post_cmd_wait(&qp->dev->cmdq, &req, sizeof(req), NULL, NULL); +} + static int erdma_qp_validate_cap(struct erdma_dev *dev, struct ib_qp_init_attr *attrs) { @@ -732,6 +750,7 @@ int erdma_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attrs, qp->attrs.max_send_sge = attrs->cap.max_send_sge; qp->attrs.max_recv_sge = attrs->cap.max_recv_sge; qp->attrs.state = ERDMA_QP_STATE_IDLE; + INIT_DELAYED_WORK(&qp->reflush_dwork, erdma_flush_worker); ret = create_qp_cmd(dev, qp); if (ret) @@ -1025,6 +1044,8 @@ int erdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata) erdma_modify_qp_internal(qp, &qp_attrs, ERDMA_QP_ATTR_STATE); up_write(&qp->state_lock); + cancel_delayed_work_sync(&qp->reflush_dwork); + erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_RDMA, CMDQ_OPCODE_DESTROY_QP); req.qpn = QP_ID(qp); diff --git a/drivers/infiniband/hw/erdma/erdma_verbs.h b/drivers/infiniband/hw/erdma/erdma_verbs.h index ab6380635e9e64b35f6f757df671ed9bb19af13e..e0a993bc032a44aaa76e399ead33ccf8f6f0734e 100644 --- a/drivers/infiniband/hw/erdma/erdma_verbs.h +++ b/drivers/infiniband/hw/erdma/erdma_verbs.h @@ -71,16 +71,18 @@ struct erdma_pd { #define ERDMA_MR_INLINE_MTT 0 #define ERDMA_MR_INDIRECT_MTT 1 -#define ERDMA_MR_ACC_LR BIT(0) -#define ERDMA_MR_ACC_LW BIT(1) -#define ERDMA_MR_ACC_RR BIT(2) -#define ERDMA_MR_ACC_RW BIT(3) +#define ERDMA_MR_ACC_RA BIT(0) +#define ERDMA_MR_ACC_LR BIT(1) +#define ERDMA_MR_ACC_LW BIT(2) +#define ERDMA_MR_ACC_RR BIT(3) +#define ERDMA_MR_ACC_RW BIT(4) static inline u8 to_erdma_access_flags(int access) { return (access & IB_ACCESS_REMOTE_READ ? ERDMA_MR_ACC_RR : 0) | (access & IB_ACCESS_LOCAL_WRITE ? ERDMA_MR_ACC_LW : 0) | - (access & IB_ACCESS_REMOTE_WRITE ? ERDMA_MR_ACC_RW : 0); + (access & IB_ACCESS_REMOTE_WRITE ? ERDMA_MR_ACC_RW : 0) | + (access & IB_ACCESS_REMOTE_ATOMIC ? ERDMA_MR_ACC_RA : 0); } struct erdma_mem { @@ -171,6 +173,10 @@ enum erdma_qp_attr_mask { ERDMA_QP_ATTR_MPA = (1 << 7) }; +enum erdma_qp_flags { + ERDMA_QP_IN_FLUSHING = (1 << 0), +}; + struct erdma_qp_attrs { enum erdma_qp_state state; enum erdma_cc_alg cc; /* Congestion control algorithm */ @@ -195,6 +201,9 @@ struct erdma_qp { struct erdma_cep *cep; struct rw_semaphore state_lock; + unsigned long flags; + struct delayed_work reflush_dwork; + union { struct erdma_kqp kern_qp; struct erdma_uqp user_qp; diff --git a/drivers/infiniband/hw/hfi1/affinity.c b/drivers/infiniband/hw/hfi1/affinity.c index 877f8e84a672a4ccd5278aef24b3582400f9e31a..77ee77d4000fbfdedde9978473a1865a998eec9b 100644 --- a/drivers/infiniband/hw/hfi1/affinity.c +++ b/drivers/infiniband/hw/hfi1/affinity.c @@ -177,6 +177,8 @@ out: for (node = 0; node < node_affinity.num_possible_nodes; node++) hfi1_per_node_cntr[node] = 1; + pci_dev_put(dev); + return 0; } diff --git a/drivers/infiniband/hw/hfi1/device.c b/drivers/infiniband/hw/hfi1/device.c index 8ceff7141bafeb871bc9d71cc232d11c83818bb3..1f44960321704a31df18d7b1fd033e15b5d5d1e4 100644 --- a/drivers/infiniband/hw/hfi1/device.c +++ b/drivers/infiniband/hw/hfi1/device.c @@ -72,7 +72,7 @@ const char *class_name(void) return hfi1_class_name; } -static char *hfi1_devnode(struct device *dev, umode_t *mode) +static char *hfi1_devnode(const struct device *dev, umode_t *mode) { if (mode) *mode = 0600; @@ -85,7 +85,7 @@ static const char *class_name_user(void) return hfi1_class_name_user; } -static char *hfi1_user_devnode(struct device *dev, umode_t *mode) +static char *hfi1_user_devnode(const struct device *dev, umode_t *mode) { if (mode) *mode = 0666; diff --git a/drivers/infiniband/hw/hfi1/driver.c b/drivers/infiniband/hw/hfi1/driver.c index 8e71bef9d982652fb5018ef877ddfe048eab426e..bcc6bc0540f0330caed530b3f47a5b836e4387c7 100644 --- a/drivers/infiniband/hw/hfi1/driver.c +++ b/drivers/infiniband/hw/hfi1/driver.c @@ -112,7 +112,7 @@ static int hfi1_caps_get(char *buffer, const struct kernel_param *kp) cap_mask &= ~HFI1_CAP_LOCKED_SMASK; cap_mask |= ((cap_mask & HFI1_CAP_K2U) << HFI1_CAP_USER_SHIFT); - return scnprintf(buffer, PAGE_SIZE, "0x%lx", cap_mask); + return sysfs_emit(buffer, "0x%lx\n", cap_mask); } struct pci_dev *get_pci_dev(struct rvt_dev_info *rdi) diff --git a/drivers/infiniband/hw/hfi1/firmware.c b/drivers/infiniband/hw/hfi1/firmware.c index 1d77514ebbee0dad6c3162c8b86fc45d5763be46..0c0cef5b1e0e555afdf9e5dc577e5ade38009d09 100644 --- a/drivers/infiniband/hw/hfi1/firmware.c +++ b/drivers/infiniband/hw/hfi1/firmware.c @@ -1743,6 +1743,7 @@ int parse_platform_config(struct hfi1_devdata *dd) if (!dd->platform_config.data) { dd_dev_err(dd, "%s: Missing config file\n", __func__); + ret = -EINVAL; goto bail; } ptr = (u32 *)dd->platform_config.data; @@ -1751,6 +1752,7 @@ int parse_platform_config(struct hfi1_devdata *dd) ptr++; if (magic_num != PLATFORM_CONFIG_MAGIC_NUM) { dd_dev_err(dd, "%s: Bad config file\n", __func__); + ret = -EINVAL; goto bail; } @@ -1774,6 +1776,7 @@ int parse_platform_config(struct hfi1_devdata *dd) if (file_length > dd->platform_config.size) { dd_dev_info(dd, "%s:File claims to be larger than read size\n", __func__); + ret = -EINVAL; goto bail; } else if (file_length < dd->platform_config.size) { dd_dev_info(dd, @@ -1794,6 +1797,7 @@ int parse_platform_config(struct hfi1_devdata *dd) dd_dev_err(dd, "%s: Failed validation at offset %ld\n", __func__, (ptr - (u32 *) dd->platform_config.data)); + ret = -EINVAL; goto bail; } @@ -1837,6 +1841,7 @@ int parse_platform_config(struct hfi1_devdata *dd) __func__, table_type, (ptr - (u32 *) dd->platform_config.data)); + ret = -EINVAL; goto bail; /* We don't trust this file now */ } pcfgcache->config_tables[table_type].table = ptr; @@ -1856,6 +1861,7 @@ int parse_platform_config(struct hfi1_devdata *dd) __func__, table_type, (ptr - (u32 *)dd->platform_config.data)); + ret = -EINVAL; goto bail; /* We don't trust this file now */ } pcfgcache->config_tables[table_type].table_metadata = diff --git a/drivers/infiniband/hw/hfi1/mad.c b/drivers/infiniband/hw/hfi1/mad.c index 4146a2113a9540ea0e7435364c6ee5b7ef158beb..e5e783c45810b2a0135e7eeb977f609f939bcb26 100644 --- a/drivers/infiniband/hw/hfi1/mad.c +++ b/drivers/infiniband/hw/hfi1/mad.c @@ -2437,9 +2437,9 @@ struct opa_port_data_counters_msg { __be64 port_vl_xmit_wait_data; __be64 port_vl_rcv_bubble; __be64 port_vl_mark_fecn; - } vls[0]; + } vls[]; /* array size defined by #bits set in vl_select_mask*/ - } port[1]; /* array size defined by #ports in attribute modifier */ + } port; }; struct opa_port_error_counters64_msg { @@ -2470,9 +2470,9 @@ struct opa_port_error_counters64_msg { u8 reserved3[7]; struct _vls_ectrs { __be64 port_vl_xmit_discards; - } vls[0]; + } vls[]; /* array size defined by #bits set in vl_select_mask */ - } port[1]; /* array size defined by #ports in attribute modifier */ + } port; }; struct opa_port_error_info_msg { @@ -2543,7 +2543,7 @@ struct opa_port_error_info_msg { u8 error_info; } __packed fm_config_ei; __u32 reserved9; - } port[1]; /* actual array size defined by #ports in attr modifier */ + } port; }; /* opa_port_error_info_msg error_info_select_mask bit definitions */ @@ -2966,7 +2966,7 @@ static int pma_get_opa_datacounters(struct opa_pma_mad *pmp, } /* Sanity check */ - response_data_size = struct_size(req, port[0].vls, num_vls); + response_data_size = struct_size(req, port.vls, num_vls); if (response_data_size > sizeof(pmp->data)) { pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD; @@ -2986,7 +2986,7 @@ static int pma_get_opa_datacounters(struct opa_pma_mad *pmp, return reply((struct ib_mad_hdr *)pmp); } - rsp = &req->port[0]; + rsp = &req->port; memset(rsp, 0, sizeof(*rsp)); rsp->port_number = port; @@ -3182,7 +3182,7 @@ static int pma_get_opa_porterrors(struct opa_pma_mad *pmp, return reply((struct ib_mad_hdr *)pmp); } - response_data_size = struct_size(req, port[0].vls, num_vls); + response_data_size = struct_size(req, port.vls, num_vls); if (response_data_size > sizeof(pmp->data)) { pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD; @@ -3201,7 +3201,7 @@ static int pma_get_opa_porterrors(struct opa_pma_mad *pmp, return reply((struct ib_mad_hdr *)pmp); } - rsp = &req->port[0]; + rsp = &req->port; ibp = to_iport(ibdev, port_num); ppd = ppd_from_ibp(ibp); @@ -3340,7 +3340,7 @@ static int pma_get_opa_errorinfo(struct opa_pma_mad *pmp, u64 reg; req = (struct opa_port_error_info_msg *)pmp->data; - rsp = &req->port[0]; + rsp = &req->port; num_ports = OPA_AM_NPORT(be32_to_cpu(pmp->mad_hdr.attr_mod)); num_pslm = hweight64(be64_to_cpu(req->port_select_mask[3])); @@ -3590,7 +3590,7 @@ static int pma_set_opa_errorinfo(struct opa_pma_mad *pmp, u32 error_info_select; req = (struct opa_port_error_info_msg *)pmp->data; - rsp = &req->port[0]; + rsp = &req->port; num_ports = OPA_AM_NPORT(be32_to_cpu(pmp->mad_hdr.attr_mod)); num_pslm = hweight64(be64_to_cpu(req->port_select_mask[3])); diff --git a/drivers/infiniband/hw/hfi1/netdev_rx.c b/drivers/infiniband/hw/hfi1/netdev_rx.c index 3dfa5aff251256624541eba3926b5f009c0f5632..720d4c85c9c93fd09b87143e53e394270725fb8a 100644 --- a/drivers/infiniband/hw/hfi1/netdev_rx.c +++ b/drivers/infiniband/hw/hfi1/netdev_rx.c @@ -216,7 +216,7 @@ static int hfi1_netdev_rxq_init(struct hfi1_netdev_rx *rx) * right now. */ set_bit(NAPI_STATE_NO_BUSY_POLL, &rxq->napi.state); - netif_napi_add_weight(dev, &rxq->napi, hfi1_netdev_rx_napi, 64); + netif_napi_add(dev, &rxq->napi, hfi1_netdev_rx_napi); rc = msix_netdev_request_rcd_irq(rxq->rcd); if (rc) goto bail_context_irq_failure; diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h index 723e55a7de8d9c8b9d7c57df08c6fcb9d72a2001..f701cc86896b38379dd5fdc180f813d9916a4176 100644 --- a/drivers/infiniband/hw/hns/hns_roce_device.h +++ b/drivers/infiniband/hw/hns/hns_roce_device.h @@ -202,6 +202,7 @@ struct hns_roce_ucontext { struct list_head page_list; struct mutex page_mutex; struct hns_user_mmap_entry *db_mmap_entry; + u32 config; }; struct hns_roce_pd { @@ -334,6 +335,7 @@ struct hns_roce_wq { u32 head; u32 tail; void __iomem *db_reg; + u32 ext_sge_cnt; }; struct hns_roce_sge { @@ -635,6 +637,7 @@ struct hns_roce_qp { struct list_head rq_node; /* all recv qps are on a list */ struct list_head sq_node; /* all send qps are on a list */ struct hns_user_mmap_entry *dwqe_mmap_entry; + u32 config; }; struct hns_roce_ib_iboe { diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c index 1435fe2ea176f31c8796f17bf71398b6cab8742d..b2421883993b104ad1e48c5bdd01a501d66eccb9 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c @@ -192,7 +192,6 @@ static int fill_ext_sge_inl_data(struct hns_roce_qp *qp, unsigned int *sge_idx, u32 msg_len) { struct ib_device *ibdev = &(to_hr_dev(qp->ibqp.device))->ib_dev; - unsigned int ext_sge_sz = qp->sq.max_gs * HNS_ROCE_SGE_SIZE; unsigned int left_len_in_pg; unsigned int idx = *sge_idx; unsigned int i = 0; @@ -200,7 +199,7 @@ static int fill_ext_sge_inl_data(struct hns_roce_qp *qp, void *addr; void *dseg; - if (msg_len > ext_sge_sz) { + if (msg_len > qp->sq.ext_sge_cnt * HNS_ROCE_SGE_SIZE) { ibdev_err(ibdev, "no enough extended sge space for inline data.\n"); return -EINVAL; @@ -1274,6 +1273,30 @@ static void update_cmdq_status(struct hns_roce_dev *hr_dev) hr_dev->cmd.state = HNS_ROCE_CMDQ_STATE_FATAL_ERR; } +static int hns_roce_cmd_err_convert_errno(u16 desc_ret) +{ + struct hns_roce_cmd_errcode errcode_table[] = { + {CMD_EXEC_SUCCESS, 0}, + {CMD_NO_AUTH, -EPERM}, + {CMD_NOT_EXIST, -EOPNOTSUPP}, + {CMD_CRQ_FULL, -EXFULL}, + {CMD_NEXT_ERR, -ENOSR}, + {CMD_NOT_EXEC, -ENOTBLK}, + {CMD_PARA_ERR, -EINVAL}, + {CMD_RESULT_ERR, -ERANGE}, + {CMD_TIMEOUT, -ETIME}, + {CMD_HILINK_ERR, -ENOLINK}, + {CMD_INFO_ILLEGAL, -ENXIO}, + {CMD_INVALID, -EBADR}, + }; + u16 i; + + for (i = 0; i < ARRAY_SIZE(errcode_table); i++) + if (desc_ret == errcode_table[i].return_status) + return errcode_table[i].errno; + return -EIO; +} + static int __hns_roce_cmq_send(struct hns_roce_dev *hr_dev, struct hns_roce_cmq_desc *desc, int num) { @@ -1319,7 +1342,7 @@ static int __hns_roce_cmq_send(struct hns_roce_dev *hr_dev, dev_err_ratelimited(hr_dev->dev, "Cmdq IO error, opcode = 0x%x, return = 0x%x.\n", desc->opcode, desc_ret); - ret = -EIO; + ret = hns_roce_cmd_err_convert_errno(desc_ret); } } else { /* FW/HW reset or incorrect number of desc */ @@ -2024,13 +2047,14 @@ static void set_default_caps(struct hns_roce_dev *hr_dev) caps->flags |= HNS_ROCE_CAP_FLAG_ATOMIC | HNS_ROCE_CAP_FLAG_MW | HNS_ROCE_CAP_FLAG_SRQ | HNS_ROCE_CAP_FLAG_FRMR | - HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL | HNS_ROCE_CAP_FLAG_XRC; + HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL; caps->gid_table_len[0] = HNS_ROCE_V2_GID_INDEX_NUM; if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) { caps->flags |= HNS_ROCE_CAP_FLAG_STASH | - HNS_ROCE_CAP_FLAG_DIRECT_WQE; + HNS_ROCE_CAP_FLAG_DIRECT_WQE | + HNS_ROCE_CAP_FLAG_XRC; caps->max_sq_inline = HNS_ROCE_V3_MAX_SQ_INLINE; } else { caps->max_sq_inline = HNS_ROCE_V2_MAX_SQ_INLINE; @@ -2342,6 +2366,9 @@ static int hns_roce_query_pf_caps(struct hns_roce_dev *hr_dev) caps->wqe_sge_hop_num = hr_reg_read(resp_d, PF_CAPS_D_EX_SGE_HOP_NUM); caps->wqe_rq_hop_num = hr_reg_read(resp_d, PF_CAPS_D_RQWQE_HOP_NUM); + if (!(caps->page_size_cap & PAGE_SIZE)) + caps->page_size_cap = HNS_ROCE_V2_PAGE_SIZE_SUPPORTED; + return 0; } @@ -2631,31 +2658,124 @@ static void free_dip_list(struct hns_roce_dev *hr_dev) spin_unlock_irqrestore(&hr_dev->dip_list_lock, flags); } -static void free_mr_exit(struct hns_roce_dev *hr_dev) +static struct ib_pd *free_mr_init_pd(struct hns_roce_dev *hr_dev) +{ + struct hns_roce_v2_priv *priv = hr_dev->priv; + struct hns_roce_v2_free_mr *free_mr = &priv->free_mr; + struct ib_device *ibdev = &hr_dev->ib_dev; + struct hns_roce_pd *hr_pd; + struct ib_pd *pd; + + hr_pd = kzalloc(sizeof(*hr_pd), GFP_KERNEL); + if (ZERO_OR_NULL_PTR(hr_pd)) + return NULL; + pd = &hr_pd->ibpd; + pd->device = ibdev; + + if (hns_roce_alloc_pd(pd, NULL)) { + ibdev_err(ibdev, "failed to create pd for free mr.\n"); + kfree(hr_pd); + return NULL; + } + free_mr->rsv_pd = to_hr_pd(pd); + free_mr->rsv_pd->ibpd.device = &hr_dev->ib_dev; + free_mr->rsv_pd->ibpd.uobject = NULL; + free_mr->rsv_pd->ibpd.__internal_mr = NULL; + atomic_set(&free_mr->rsv_pd->ibpd.usecnt, 0); + + return pd; +} + +static struct ib_cq *free_mr_init_cq(struct hns_roce_dev *hr_dev) { struct hns_roce_v2_priv *priv = hr_dev->priv; struct hns_roce_v2_free_mr *free_mr = &priv->free_mr; + struct ib_device *ibdev = &hr_dev->ib_dev; + struct ib_cq_init_attr cq_init_attr = {}; + struct hns_roce_cq *hr_cq; + struct ib_cq *cq; + + cq_init_attr.cqe = HNS_ROCE_FREE_MR_USED_CQE_NUM; + + hr_cq = kzalloc(sizeof(*hr_cq), GFP_KERNEL); + if (ZERO_OR_NULL_PTR(hr_cq)) + return NULL; + + cq = &hr_cq->ib_cq; + cq->device = ibdev; + + if (hns_roce_create_cq(cq, &cq_init_attr, NULL)) { + ibdev_err(ibdev, "failed to create cq for free mr.\n"); + kfree(hr_cq); + return NULL; + } + free_mr->rsv_cq = to_hr_cq(cq); + free_mr->rsv_cq->ib_cq.device = &hr_dev->ib_dev; + free_mr->rsv_cq->ib_cq.uobject = NULL; + free_mr->rsv_cq->ib_cq.comp_handler = NULL; + free_mr->rsv_cq->ib_cq.event_handler = NULL; + free_mr->rsv_cq->ib_cq.cq_context = NULL; + atomic_set(&free_mr->rsv_cq->ib_cq.usecnt, 0); + + return cq; +} + +static int free_mr_init_qp(struct hns_roce_dev *hr_dev, struct ib_cq *cq, + struct ib_qp_init_attr *init_attr, int i) +{ + struct hns_roce_v2_priv *priv = hr_dev->priv; + struct hns_roce_v2_free_mr *free_mr = &priv->free_mr; + struct ib_device *ibdev = &hr_dev->ib_dev; + struct hns_roce_qp *hr_qp; + struct ib_qp *qp; int ret; + + hr_qp = kzalloc(sizeof(*hr_qp), GFP_KERNEL); + if (ZERO_OR_NULL_PTR(hr_qp)) + return -ENOMEM; + + qp = &hr_qp->ibqp; + qp->device = ibdev; + + ret = hns_roce_create_qp(qp, init_attr, NULL); + if (ret) { + ibdev_err(ibdev, "failed to create qp for free mr.\n"); + kfree(hr_qp); + return ret; + } + + free_mr->rsv_qp[i] = hr_qp; + free_mr->rsv_qp[i]->ibqp.recv_cq = cq; + free_mr->rsv_qp[i]->ibqp.send_cq = cq; + + return 0; +} + +static void free_mr_exit(struct hns_roce_dev *hr_dev) +{ + struct hns_roce_v2_priv *priv = hr_dev->priv; + struct hns_roce_v2_free_mr *free_mr = &priv->free_mr; + struct ib_qp *qp; int i; for (i = 0; i < ARRAY_SIZE(free_mr->rsv_qp); i++) { if (free_mr->rsv_qp[i]) { - ret = ib_destroy_qp(free_mr->rsv_qp[i]); - if (ret) - ibdev_err(&hr_dev->ib_dev, - "failed to destroy qp in free mr.\n"); - + qp = &free_mr->rsv_qp[i]->ibqp; + hns_roce_v2_destroy_qp(qp, NULL); + kfree(free_mr->rsv_qp[i]); free_mr->rsv_qp[i] = NULL; } } if (free_mr->rsv_cq) { - ib_destroy_cq(free_mr->rsv_cq); + hns_roce_destroy_cq(&free_mr->rsv_cq->ib_cq, NULL); + kfree(free_mr->rsv_cq); free_mr->rsv_cq = NULL; } if (free_mr->rsv_pd) { - ib_dealloc_pd(free_mr->rsv_pd); + hns_roce_dealloc_pd(&free_mr->rsv_pd->ibpd, NULL); + kfree(free_mr->rsv_pd); free_mr->rsv_pd = NULL; } } @@ -2664,55 +2784,46 @@ static int free_mr_alloc_res(struct hns_roce_dev *hr_dev) { struct hns_roce_v2_priv *priv = hr_dev->priv; struct hns_roce_v2_free_mr *free_mr = &priv->free_mr; - struct ib_device *ibdev = &hr_dev->ib_dev; - struct ib_cq_init_attr cq_init_attr = {}; struct ib_qp_init_attr qp_init_attr = {}; struct ib_pd *pd; struct ib_cq *cq; - struct ib_qp *qp; int ret; int i; - pd = ib_alloc_pd(ibdev, 0); - if (IS_ERR(pd)) { - ibdev_err(ibdev, "failed to create pd for free mr.\n"); - return PTR_ERR(pd); - } - free_mr->rsv_pd = pd; + pd = free_mr_init_pd(hr_dev); + if (!pd) + return -ENOMEM; - cq_init_attr.cqe = HNS_ROCE_FREE_MR_USED_CQE_NUM; - cq = ib_create_cq(ibdev, NULL, NULL, NULL, &cq_init_attr); - if (IS_ERR(cq)) { - ibdev_err(ibdev, "failed to create cq for free mr.\n"); - ret = PTR_ERR(cq); - goto create_failed; + cq = free_mr_init_cq(hr_dev); + if (!cq) { + ret = -ENOMEM; + goto create_failed_cq; } - free_mr->rsv_cq = cq; qp_init_attr.qp_type = IB_QPT_RC; qp_init_attr.sq_sig_type = IB_SIGNAL_ALL_WR; - qp_init_attr.send_cq = free_mr->rsv_cq; - qp_init_attr.recv_cq = free_mr->rsv_cq; + qp_init_attr.send_cq = cq; + qp_init_attr.recv_cq = cq; for (i = 0; i < ARRAY_SIZE(free_mr->rsv_qp); i++) { qp_init_attr.cap.max_send_wr = HNS_ROCE_FREE_MR_USED_SQWQE_NUM; qp_init_attr.cap.max_send_sge = HNS_ROCE_FREE_MR_USED_SQSGE_NUM; qp_init_attr.cap.max_recv_wr = HNS_ROCE_FREE_MR_USED_RQWQE_NUM; qp_init_attr.cap.max_recv_sge = HNS_ROCE_FREE_MR_USED_RQSGE_NUM; - qp = ib_create_qp(free_mr->rsv_pd, &qp_init_attr); - if (IS_ERR(qp)) { - ibdev_err(ibdev, "failed to create qp for free mr.\n"); - ret = PTR_ERR(qp); - goto create_failed; - } - - free_mr->rsv_qp[i] = qp; + ret = free_mr_init_qp(hr_dev, cq, &qp_init_attr, i); + if (ret) + goto create_failed_qp; } return 0; -create_failed: - free_mr_exit(hr_dev); +create_failed_qp: + hns_roce_destroy_cq(cq, NULL); + kfree(cq); + +create_failed_cq: + hns_roce_dealloc_pd(pd, NULL); + kfree(pd); return ret; } @@ -2728,14 +2839,17 @@ static int free_mr_modify_rsv_qp(struct hns_roce_dev *hr_dev, int mask; int ret; - hr_qp = to_hr_qp(free_mr->rsv_qp[sl_num]); + hr_qp = to_hr_qp(&free_mr->rsv_qp[sl_num]->ibqp); hr_qp->free_mr_en = 1; + hr_qp->ibqp.device = ibdev; + hr_qp->ibqp.qp_type = IB_QPT_RC; mask = IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT | IB_QP_ACCESS_FLAGS; attr->qp_state = IB_QPS_INIT; attr->port_num = 1; attr->qp_access_flags = IB_ACCESS_REMOTE_WRITE; - ret = ib_modify_qp(&hr_qp->ibqp, attr, mask); + ret = hr_dev->hw->modify_qp(&hr_qp->ibqp, attr, mask, IB_QPS_INIT, + IB_QPS_INIT); if (ret) { ibdev_err(ibdev, "failed to modify qp to init, ret = %d.\n", ret); @@ -2756,7 +2870,8 @@ static int free_mr_modify_rsv_qp(struct hns_roce_dev *hr_dev, rdma_ah_set_sl(&attr->ah_attr, (u8)sl_num); - ret = ib_modify_qp(&hr_qp->ibqp, attr, mask); + ret = hr_dev->hw->modify_qp(&hr_qp->ibqp, attr, mask, IB_QPS_INIT, + IB_QPS_RTR); hr_dev->loop_idc = loopback; if (ret) { ibdev_err(ibdev, "failed to modify qp to rtr, ret = %d.\n", @@ -2770,7 +2885,8 @@ static int free_mr_modify_rsv_qp(struct hns_roce_dev *hr_dev, attr->sq_psn = HNS_ROCE_FREE_MR_USED_PSN; attr->retry_cnt = HNS_ROCE_FREE_MR_USED_QP_RETRY_CNT; attr->timeout = HNS_ROCE_FREE_MR_USED_QP_TIMEOUT; - ret = ib_modify_qp(&hr_qp->ibqp, attr, mask); + ret = hr_dev->hw->modify_qp(&hr_qp->ibqp, attr, mask, IB_QPS_RTR, + IB_QPS_RTS); if (ret) ibdev_err(ibdev, "failed to modify qp to rts, ret = %d.\n", ret); @@ -3186,7 +3302,8 @@ static int set_mtpt_pbl(struct hns_roce_dev *hr_dev, int i, count; count = hns_roce_mtr_find(hr_dev, &mr->pbl_mtr, 0, pages, - ARRAY_SIZE(pages), &pbl_ba); + min_t(int, ARRAY_SIZE(pages), mr->npages), + &pbl_ba); if (count < 1) { ibdev_err(ibdev, "failed to find PBL mtr, count = %d.\n", count); @@ -3414,7 +3531,7 @@ static void free_mr_send_cmd_to_hw(struct hns_roce_dev *hr_dev) mutex_lock(&free_mr->mutex); for (i = 0; i < ARRAY_SIZE(free_mr->rsv_qp); i++) { - hr_qp = to_hr_qp(free_mr->rsv_qp[i]); + hr_qp = free_mr->rsv_qp[i]; ret = free_mr_post_send_lp_wqe(hr_qp); if (ret) { @@ -3429,7 +3546,7 @@ static void free_mr_send_cmd_to_hw(struct hns_roce_dev *hr_dev) end = msecs_to_jiffies(HNS_ROCE_V2_FREE_MR_TIMEOUT) + jiffies; while (cqe_cnt) { - npolled = hns_roce_v2_poll_cq(free_mr->rsv_cq, cqe_cnt, wc); + npolled = hns_roce_v2_poll_cq(&free_mr->rsv_cq->ib_cq, cqe_cnt, wc); if (npolled < 0) { ibdev_err(ibdev, "failed to poll cqe for free mr, remain %d cqe.\n", @@ -5375,6 +5492,8 @@ static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, rdma_ah_set_sl(&qp_attr->ah_attr, hr_reg_read(&context, QPC_SL)); + rdma_ah_set_port_num(&qp_attr->ah_attr, hr_qp->port + 1); + rdma_ah_set_ah_flags(&qp_attr->ah_attr, IB_AH_GRH); grh->flow_label = hr_reg_read(&context, QPC_FL); grh->sgid_index = hr_reg_read(&context, QPC_GMV_IDX); grh->hop_limit = hr_reg_read(&context, QPC_HOPLIMIT); @@ -5468,7 +5587,7 @@ static int hns_roce_v2_destroy_qp_common(struct hns_roce_dev *hr_dev, return ret; } -static int hns_roce_v2_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata) +int hns_roce_v2_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata) { struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h index c7bf2d52c1cdb254ffe3dbbacfcf061944679909..b1b3e1e0b84e55d8774285e75a8760204e8cad6a 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h @@ -272,6 +272,11 @@ enum hns_roce_cmd_return_status { CMD_OTHER_ERR = 0xff }; +struct hns_roce_cmd_errcode { + enum hns_roce_cmd_return_status return_status; + int errno; +}; + enum hns_roce_sgid_type { GID_TYPE_FLAG_ROCE_V1 = 0, GID_TYPE_FLAG_ROCE_V2_IPV4, @@ -1327,9 +1332,9 @@ struct hns_roce_link_table { #define HNS_ROCE_EXT_LLM_MIN_PAGES(que_num) ((que_num) * 4 + 2) struct hns_roce_v2_free_mr { - struct ib_qp *rsv_qp[HNS_ROCE_FREE_MR_USED_QP_NUM]; - struct ib_cq *rsv_cq; - struct ib_pd *rsv_pd; + struct hns_roce_qp *rsv_qp[HNS_ROCE_FREE_MR_USED_QP_NUM]; + struct hns_roce_cq *rsv_cq; + struct hns_roce_pd *rsv_pd; struct mutex mutex; }; @@ -1459,6 +1464,8 @@ struct hns_roce_sccc_clr_done { __le32 rsv[5]; }; +int hns_roce_v2_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata); + static inline void hns_roce_write64(struct hns_roce_dev *hr_dev, __le32 val[2], void __iomem *dest) { diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c index dcf89689a4c628d908af905131b01f54716caf37..8ba68ac12388de16e4fff6825a5965a79a8ff5b7 100644 --- a/drivers/infiniband/hw/hns/hns_roce_main.c +++ b/drivers/infiniband/hw/hns/hns_roce_main.c @@ -354,10 +354,11 @@ static int hns_roce_alloc_uar_entry(struct ib_ucontext *uctx) static int hns_roce_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata) { - int ret; struct hns_roce_ucontext *context = to_hr_ucontext(uctx); - struct hns_roce_ib_alloc_ucontext_resp resp = {}; struct hns_roce_dev *hr_dev = to_hr_dev(uctx->device); + struct hns_roce_ib_alloc_ucontext_resp resp = {}; + struct hns_roce_ib_alloc_ucontext ucmd = {}; + int ret; if (!hr_dev->active) return -EAGAIN; @@ -365,6 +366,19 @@ static int hns_roce_alloc_ucontext(struct ib_ucontext *uctx, resp.qp_tab_size = hr_dev->caps.num_qps; resp.srq_tab_size = hr_dev->caps.num_srqs; + ret = ib_copy_from_udata(&ucmd, udata, + min(udata->inlen, sizeof(ucmd))); + if (ret) + return ret; + + if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) + context->config = ucmd.config & HNS_ROCE_EXSGE_FLAGS; + + if (context->config & HNS_ROCE_EXSGE_FLAGS) { + resp.config |= HNS_ROCE_RSP_EXSGE_FLAGS; + resp.max_inline_data = hr_dev->caps.max_sq_inline; + } + ret = hns_roce_uar_alloc(hr_dev, &context->uar); if (ret) goto error_fail_uar_alloc; diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c index 845ac7d3831f420d26f446ed7e36dca5f6201a03..37a5cf62f88b48b12293946e311bdd5d067edbaf 100644 --- a/drivers/infiniband/hw/hns/hns_roce_mr.c +++ b/drivers/infiniband/hw/hns/hns_roce_mr.c @@ -392,10 +392,10 @@ struct ib_mr *hns_roce_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, return &mr->ibmr; -err_key: - free_mr_key(hr_dev, mr); err_pbl: free_mr_pbl(hr_dev, mr); +err_key: + free_mr_key(hr_dev, mr); err_free: kfree(mr); return ERR_PTR(ret); diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c index f0bd82a18069a69054251d9be85c57d4ba21e683..0ae335fb205cadb4df9a99d8ebfaa9fe1d7dbe20 100644 --- a/drivers/infiniband/hw/hns/hns_roce_qp.c +++ b/drivers/infiniband/hw/hns/hns_roce_qp.c @@ -476,38 +476,109 @@ static int set_rq_size(struct hns_roce_dev *hr_dev, struct ib_qp_cap *cap, return 0; } -static u32 get_wqe_ext_sge_cnt(struct hns_roce_qp *qp) +static u32 get_max_inline_data(struct hns_roce_dev *hr_dev, + struct ib_qp_cap *cap) { - /* GSI/UD QP only has extended sge */ - if (qp->ibqp.qp_type == IB_QPT_GSI || qp->ibqp.qp_type == IB_QPT_UD) - return qp->sq.max_gs; - - if (qp->sq.max_gs > HNS_ROCE_SGE_IN_WQE) - return qp->sq.max_gs - HNS_ROCE_SGE_IN_WQE; + if (cap->max_inline_data) { + cap->max_inline_data = roundup_pow_of_two(cap->max_inline_data); + return min(cap->max_inline_data, + hr_dev->caps.max_sq_inline); + } return 0; } +static void update_inline_data(struct hns_roce_qp *hr_qp, + struct ib_qp_cap *cap) +{ + u32 sge_num = hr_qp->sq.ext_sge_cnt; + + if (hr_qp->config & HNS_ROCE_EXSGE_FLAGS) { + if (!(hr_qp->ibqp.qp_type == IB_QPT_GSI || + hr_qp->ibqp.qp_type == IB_QPT_UD)) + sge_num = max((u32)HNS_ROCE_SGE_IN_WQE, sge_num); + + cap->max_inline_data = max(cap->max_inline_data, + sge_num * HNS_ROCE_SGE_SIZE); + } + + hr_qp->max_inline_data = cap->max_inline_data; +} + +static u32 get_sge_num_from_max_send_sge(bool is_ud_or_gsi, + u32 max_send_sge) +{ + unsigned int std_sge_num; + unsigned int min_sge; + + std_sge_num = is_ud_or_gsi ? 0 : HNS_ROCE_SGE_IN_WQE; + min_sge = is_ud_or_gsi ? 1 : 0; + return max_send_sge > std_sge_num ? (max_send_sge - std_sge_num) : + min_sge; +} + +static unsigned int get_sge_num_from_max_inl_data(bool is_ud_or_gsi, + u32 max_inline_data) +{ + unsigned int inline_sge; + + inline_sge = roundup_pow_of_two(max_inline_data) / HNS_ROCE_SGE_SIZE; + + /* + * if max_inline_data less than + * HNS_ROCE_SGE_IN_WQE * HNS_ROCE_SGE_SIZE, + * In addition to ud's mode, no need to extend sge. + */ + if (!is_ud_or_gsi && inline_sge <= HNS_ROCE_SGE_IN_WQE) + inline_sge = 0; + + return inline_sge; +} + static void set_ext_sge_param(struct hns_roce_dev *hr_dev, u32 sq_wqe_cnt, struct hns_roce_qp *hr_qp, struct ib_qp_cap *cap) { + bool is_ud_or_gsi = (hr_qp->ibqp.qp_type == IB_QPT_GSI || + hr_qp->ibqp.qp_type == IB_QPT_UD); + unsigned int std_sge_num; + u32 inline_ext_sge = 0; + u32 ext_wqe_sge_cnt; u32 total_sge_cnt; - u32 wqe_sge_cnt; + + cap->max_inline_data = get_max_inline_data(hr_dev, cap); hr_qp->sge.sge_shift = HNS_ROCE_SGE_SHIFT; + std_sge_num = is_ud_or_gsi ? 0 : HNS_ROCE_SGE_IN_WQE; + ext_wqe_sge_cnt = get_sge_num_from_max_send_sge(is_ud_or_gsi, + cap->max_send_sge); - hr_qp->sq.max_gs = max(1U, cap->max_send_sge); + if (hr_qp->config & HNS_ROCE_EXSGE_FLAGS) { + inline_ext_sge = max(ext_wqe_sge_cnt, + get_sge_num_from_max_inl_data(is_ud_or_gsi, + cap->max_inline_data)); + hr_qp->sq.ext_sge_cnt = inline_ext_sge ? + roundup_pow_of_two(inline_ext_sge) : 0; - wqe_sge_cnt = get_wqe_ext_sge_cnt(hr_qp); + hr_qp->sq.max_gs = max(1U, (hr_qp->sq.ext_sge_cnt + std_sge_num)); + hr_qp->sq.max_gs = min(hr_qp->sq.max_gs, hr_dev->caps.max_sq_sg); + + ext_wqe_sge_cnt = hr_qp->sq.ext_sge_cnt; + } else { + hr_qp->sq.max_gs = max(1U, cap->max_send_sge); + hr_qp->sq.max_gs = min(hr_qp->sq.max_gs, hr_dev->caps.max_sq_sg); + hr_qp->sq.ext_sge_cnt = hr_qp->sq.max_gs; + } /* If the number of extended sge is not zero, they MUST use the * space of HNS_HW_PAGE_SIZE at least. */ - if (wqe_sge_cnt) { - total_sge_cnt = roundup_pow_of_two(sq_wqe_cnt * wqe_sge_cnt); + if (ext_wqe_sge_cnt) { + total_sge_cnt = roundup_pow_of_two(sq_wqe_cnt * ext_wqe_sge_cnt); hr_qp->sge.sge_cnt = max(total_sge_cnt, (u32)HNS_HW_PAGE_SIZE / HNS_ROCE_SGE_SIZE); } + + update_inline_data(hr_qp, cap); } static int check_sq_size_with_integrity(struct hns_roce_dev *hr_dev, @@ -556,6 +627,7 @@ static int set_user_sq_size(struct hns_roce_dev *hr_dev, hr_qp->sq.wqe_shift = ucmd->log_sq_stride; hr_qp->sq.wqe_cnt = cnt; + cap->max_send_sge = hr_qp->sq.max_gs; return 0; } @@ -986,13 +1058,9 @@ static int set_qp_param(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, struct hns_roce_ib_create_qp *ucmd) { struct ib_device *ibdev = &hr_dev->ib_dev; + struct hns_roce_ucontext *uctx; int ret; - if (init_attr->cap.max_inline_data > hr_dev->caps.max_sq_inline) - init_attr->cap.max_inline_data = hr_dev->caps.max_sq_inline; - - hr_qp->max_inline_data = init_attr->cap.max_inline_data; - if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) hr_qp->sq_signal_bits = IB_SIGNAL_ALL_WR; else @@ -1015,12 +1083,17 @@ static int set_qp_param(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, return ret; } + uctx = rdma_udata_to_drv_context(udata, struct hns_roce_ucontext, + ibucontext); + hr_qp->config = uctx->config; ret = set_user_sq_size(hr_dev, &init_attr->cap, hr_qp, ucmd); if (ret) ibdev_err(ibdev, "failed to set user SQ size, ret = %d.\n", ret); } else { + if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) + hr_qp->config = HNS_ROCE_EXSGE_FLAGS; ret = set_kernel_sq_size(hr_dev, &init_attr->cap, hr_qp); if (ret) ibdev_err(ibdev, diff --git a/drivers/infiniband/hw/irdma/uk.c b/drivers/infiniband/hw/irdma/uk.c index a6e5d350a94ce8f1e3499fe736bd56cb4abf1fce..16183e894da77b1742291591e15321c56405b06e 100644 --- a/drivers/infiniband/hw/irdma/uk.c +++ b/drivers/infiniband/hw/irdma/uk.c @@ -566,21 +566,37 @@ static void irdma_set_mw_bind_wqe_gen_1(__le64 *wqe, /** * irdma_copy_inline_data_gen_1 - Copy inline data to wqe - * @dest: pointer to wqe - * @src: pointer to inline data - * @len: length of inline data to copy + * @wqe: pointer to wqe + * @sge_list: table of pointers to inline data + * @num_sges: Total inline data length * @polarity: compatibility parameter */ -static void irdma_copy_inline_data_gen_1(u8 *dest, u8 *src, u32 len, - u8 polarity) +static void irdma_copy_inline_data_gen_1(u8 *wqe, struct ib_sge *sge_list, + u32 num_sges, u8 polarity) { - if (len <= 16) { - memcpy(dest, src, len); - } else { - memcpy(dest, src, 16); - src += 16; - dest = dest + 32; - memcpy(dest, src, len - 16); + u32 quanta_bytes_remaining = 16; + int i; + + for (i = 0; i < num_sges; i++) { + u8 *cur_sge = (u8 *)(uintptr_t)sge_list[i].addr; + u32 sge_len = sge_list[i].length; + + while (sge_len) { + u32 bytes_copied; + + bytes_copied = min(sge_len, quanta_bytes_remaining); + memcpy(wqe, cur_sge, bytes_copied); + wqe += bytes_copied; + cur_sge += bytes_copied; + quanta_bytes_remaining -= bytes_copied; + sge_len -= bytes_copied; + + if (!quanta_bytes_remaining) { + /* Remaining inline bytes reside after hdr */ + wqe += 16; + quanta_bytes_remaining = 32; + } + } } } @@ -612,35 +628,51 @@ static void irdma_set_mw_bind_wqe(__le64 *wqe, /** * irdma_copy_inline_data - Copy inline data to wqe - * @dest: pointer to wqe - * @src: pointer to inline data - * @len: length of inline data to copy + * @wqe: pointer to wqe + * @sge_list: table of pointers to inline data + * @num_sges: number of SGE's * @polarity: polarity of wqe valid bit */ -static void irdma_copy_inline_data(u8 *dest, u8 *src, u32 len, u8 polarity) +static void irdma_copy_inline_data(u8 *wqe, struct ib_sge *sge_list, + u32 num_sges, u8 polarity) { u8 inline_valid = polarity << IRDMA_INLINE_VALID_S; - u32 copy_size; - - dest += 8; - if (len <= 8) { - memcpy(dest, src, len); - return; - } - - *((u64 *)dest) = *((u64 *)src); - len -= 8; - src += 8; - dest += 24; /* point to additional 32 byte quanta */ - - while (len) { - copy_size = len < 31 ? len : 31; - memcpy(dest, src, copy_size); - *(dest + 31) = inline_valid; - len -= copy_size; - dest += 32; - src += copy_size; + u32 quanta_bytes_remaining = 8; + bool first_quanta = true; + int i; + + wqe += 8; + + for (i = 0; i < num_sges; i++) { + u8 *cur_sge = (u8 *)(uintptr_t)sge_list[i].addr; + u32 sge_len = sge_list[i].length; + + while (sge_len) { + u32 bytes_copied; + + bytes_copied = min(sge_len, quanta_bytes_remaining); + memcpy(wqe, cur_sge, bytes_copied); + wqe += bytes_copied; + cur_sge += bytes_copied; + quanta_bytes_remaining -= bytes_copied; + sge_len -= bytes_copied; + + if (!quanta_bytes_remaining) { + quanta_bytes_remaining = 31; + + /* Remaining inline bytes reside after hdr */ + if (first_quanta) { + first_quanta = false; + wqe += 16; + } else { + *wqe = inline_valid; + wqe++; + } + } + } } + if (!first_quanta && quanta_bytes_remaining < 31) + *(wqe + quanta_bytes_remaining) = inline_valid; } /** @@ -679,20 +711,27 @@ int irdma_uk_inline_rdma_write(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info, bool post_sq) { __le64 *wqe; - struct irdma_inline_rdma_write *op_info; + struct irdma_rdma_write *op_info; u64 hdr = 0; u32 wqe_idx; bool read_fence = false; + u32 i, total_size = 0; u16 quanta; info->push_wqe = qp->push_db ? true : false; - op_info = &info->op.inline_rdma_write; + op_info = &info->op.rdma_write; + + if (unlikely(qp->max_sq_frag_cnt < op_info->num_lo_sges)) + return -EINVAL; + + for (i = 0; i < op_info->num_lo_sges; i++) + total_size += op_info->lo_sg_list[i].length; - if (op_info->len > qp->max_inline_data) + if (unlikely(total_size > qp->max_inline_data)) return -EINVAL; - quanta = qp->wqe_ops.iw_inline_data_size_to_quanta(op_info->len); - wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, op_info->len, + quanta = qp->wqe_ops.iw_inline_data_size_to_quanta(total_size); + wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, total_size, info); if (!wqe) return -ENOMEM; @@ -705,7 +744,7 @@ int irdma_uk_inline_rdma_write(struct irdma_qp_uk *qp, hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, op_info->rem_addr.lkey) | FIELD_PREP(IRDMAQPSQ_OPCODE, info->op_type) | - FIELD_PREP(IRDMAQPSQ_INLINEDATALEN, op_info->len) | + FIELD_PREP(IRDMAQPSQ_INLINEDATALEN, total_size) | FIELD_PREP(IRDMAQPSQ_REPORTRTT, info->report_rtt ? 1 : 0) | FIELD_PREP(IRDMAQPSQ_INLINEDATAFLAG, 1) | FIELD_PREP(IRDMAQPSQ_IMMDATAFLAG, info->imm_data_valid ? 1 : 0) | @@ -719,7 +758,8 @@ int irdma_uk_inline_rdma_write(struct irdma_qp_uk *qp, set_64bit_val(wqe, 0, FIELD_PREP(IRDMAQPSQ_IMMDATA, info->imm_data)); - qp->wqe_ops.iw_copy_inline_data((u8 *)wqe, op_info->data, op_info->len, + qp->wqe_ops.iw_copy_inline_data((u8 *)wqe, op_info->lo_sg_list, + op_info->num_lo_sges, qp->swqe_polarity); dma_wmb(); /* make sure WQE is populated before valid bit is set */ @@ -745,20 +785,27 @@ int irdma_uk_inline_send(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info, bool post_sq) { __le64 *wqe; - struct irdma_post_inline_send *op_info; + struct irdma_post_send *op_info; u64 hdr; u32 wqe_idx; bool read_fence = false; + u32 i, total_size = 0; u16 quanta; info->push_wqe = qp->push_db ? true : false; - op_info = &info->op.inline_send; + op_info = &info->op.send; + + if (unlikely(qp->max_sq_frag_cnt < op_info->num_sges)) + return -EINVAL; - if (op_info->len > qp->max_inline_data) + for (i = 0; i < op_info->num_sges; i++) + total_size += op_info->sg_list[i].length; + + if (unlikely(total_size > qp->max_inline_data)) return -EINVAL; - quanta = qp->wqe_ops.iw_inline_data_size_to_quanta(op_info->len); - wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, op_info->len, + quanta = qp->wqe_ops.iw_inline_data_size_to_quanta(total_size); + wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, total_size, info); if (!wqe) return -ENOMEM; @@ -773,7 +820,7 @@ int irdma_uk_inline_send(struct irdma_qp_uk *qp, hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, info->stag_to_inv) | FIELD_PREP(IRDMAQPSQ_AHID, op_info->ah_id) | FIELD_PREP(IRDMAQPSQ_OPCODE, info->op_type) | - FIELD_PREP(IRDMAQPSQ_INLINEDATALEN, op_info->len) | + FIELD_PREP(IRDMAQPSQ_INLINEDATALEN, total_size) | FIELD_PREP(IRDMAQPSQ_IMMDATAFLAG, (info->imm_data_valid ? 1 : 0)) | FIELD_PREP(IRDMAQPSQ_REPORTRTT, (info->report_rtt ? 1 : 0)) | @@ -789,8 +836,8 @@ int irdma_uk_inline_send(struct irdma_qp_uk *qp, if (info->imm_data_valid) set_64bit_val(wqe, 0, FIELD_PREP(IRDMAQPSQ_IMMDATA, info->imm_data)); - qp->wqe_ops.iw_copy_inline_data((u8 *)wqe, op_info->data, op_info->len, - qp->swqe_polarity); + qp->wqe_ops.iw_copy_inline_data((u8 *)wqe, op_info->sg_list, + op_info->num_sges, qp->swqe_polarity); dma_wmb(); /* make sure WQE is populated before valid bit is set */ @@ -1002,11 +1049,10 @@ int irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq, __le64 *cqe; struct irdma_qp_uk *qp; struct irdma_ring *pring = NULL; - u32 wqe_idx, q_type; + u32 wqe_idx; int ret_code; bool move_cq_head = true; u8 polarity; - u8 op_type; bool ext_valid; __le64 *ext_cqe; @@ -1074,7 +1120,7 @@ int irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq, info->ud_vlan_valid = false; } - q_type = (u8)FIELD_GET(IRDMA_CQ_SQ, qword3); + info->q_type = (u8)FIELD_GET(IRDMA_CQ_SQ, qword3); info->error = (bool)FIELD_GET(IRDMA_CQ_ERROR, qword3); info->push_dropped = (bool)FIELD_GET(IRDMACQ_PSHDROP, qword3); info->ipv4 = (bool)FIELD_GET(IRDMACQ_IPV4, qword3); @@ -1113,8 +1159,9 @@ int irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq, } wqe_idx = (u32)FIELD_GET(IRDMA_CQ_WQEIDX, qword3); info->qp_handle = (irdma_qp_handle)(unsigned long)qp; + info->op_type = (u8)FIELD_GET(IRDMA_CQ_SQ, qword3); - if (q_type == IRDMA_CQE_QTYPE_RQ) { + if (info->q_type == IRDMA_CQE_QTYPE_RQ) { u32 array_idx; array_idx = wqe_idx / qp->rq_wqe_size_multiplier; @@ -1134,10 +1181,6 @@ int irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq, info->bytes_xfered = (u32)FIELD_GET(IRDMACQ_PAYLDLEN, qword0); - if (info->imm_valid) - info->op_type = IRDMA_OP_TYPE_REC_IMM; - else - info->op_type = IRDMA_OP_TYPE_REC; if (qword3 & IRDMACQ_STAG) { info->stag_invalid_set = true; info->inv_stag = (u32)FIELD_GET(IRDMACQ_INVSTAG, qword2); @@ -1195,17 +1238,18 @@ int irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq, sw_wqe = qp->sq_base[tail].elem; get_64bit_val(sw_wqe, 24, &wqe_qword); - op_type = (u8)FIELD_GET(IRDMAQPSQ_OPCODE, wqe_qword); - info->op_type = op_type; + info->op_type = (u8)FIELD_GET(IRDMAQPSQ_OPCODE, + wqe_qword); IRDMA_RING_SET_TAIL(qp->sq_ring, tail + qp->sq_wrtrk_array[tail].quanta); - if (op_type != IRDMAQP_OP_NOP) { + if (info->op_type != IRDMAQP_OP_NOP) { info->wr_id = qp->sq_wrtrk_array[tail].wrid; info->bytes_xfered = qp->sq_wrtrk_array[tail].wr_len; break; } } while (1); - if (op_type == IRDMA_OP_TYPE_BIND_MW && info->minor_err == FLUSH_PROT_ERR) + if (info->op_type == IRDMA_OP_TYPE_BIND_MW && + info->minor_err == FLUSH_PROT_ERR) info->minor_err = FLUSH_MW_BIND_ERR; qp->sq_flush_seen = true; if (!IRDMA_RING_MORE_WORK(qp->sq_ring)) diff --git a/drivers/infiniband/hw/irdma/user.h b/drivers/infiniband/hw/irdma/user.h index 2ef61923c92685a5164008eceff7fcb246b664df..d0cdf609f5e06aa71c24a5471b63b8f8aab2ba36 100644 --- a/drivers/infiniband/hw/irdma/user.h +++ b/drivers/infiniband/hw/irdma/user.h @@ -173,14 +173,6 @@ struct irdma_post_send { u32 ah_id; }; -struct irdma_post_inline_send { - void *data; - u32 len; - u32 qkey; - u32 dest_qp; - u32 ah_id; -}; - struct irdma_post_rq_info { u64 wr_id; struct ib_sge *sg_list; @@ -193,12 +185,6 @@ struct irdma_rdma_write { struct ib_sge rem_addr; }; -struct irdma_inline_rdma_write { - void *data; - u32 len; - struct ib_sge rem_addr; -}; - struct irdma_rdma_read { struct ib_sge *lo_sg_list; u32 num_lo_sges; @@ -241,8 +227,6 @@ struct irdma_post_sq_info { struct irdma_rdma_read rdma_read; struct irdma_bind_window bind_window; struct irdma_inv_local_stag inv_local_stag; - struct irdma_inline_rdma_write inline_rdma_write; - struct irdma_post_inline_send inline_send; } op; }; @@ -261,6 +245,7 @@ struct irdma_cq_poll_info { u16 ud_vlan; u8 ud_smac[6]; u8 op_type; + u8 q_type; bool stag_invalid_set:1; /* or L_R_Key set */ bool push_dropped:1; bool error:1; @@ -291,7 +276,8 @@ int irdma_uk_stag_local_invalidate(struct irdma_qp_uk *qp, bool post_sq); struct irdma_wqe_uk_ops { - void (*iw_copy_inline_data)(u8 *dest, u8 *src, u32 len, u8 polarity); + void (*iw_copy_inline_data)(u8 *dest, struct ib_sge *sge_list, + u32 num_sges, u8 polarity); u16 (*iw_inline_data_size_to_quanta)(u32 data_size); void (*iw_set_fragment)(__le64 *wqe, u32 offset, struct ib_sge *sge, u8 valid); diff --git a/drivers/infiniband/hw/irdma/utils.c b/drivers/infiniband/hw/irdma/utils.c index 8dfc9e154d733aba6dbfc47b82276dfdfe710c75..445e69e864097ef85adbf351387eccd08150ffcb 100644 --- a/drivers/infiniband/hw/irdma/utils.c +++ b/drivers/infiniband/hw/irdma/utils.c @@ -2591,6 +2591,7 @@ void irdma_generate_flush_completions(struct irdma_qp *iwqp) sw_wqe = qp->sq_base[wqe_idx].elem; get_64bit_val(sw_wqe, 24, &wqe_qword); cmpl->cpi.op_type = (u8)FIELD_GET(IRDMAQPSQ_OPCODE, IRDMAQPSQ_OPCODE); + cmpl->cpi.q_type = IRDMA_CQE_QTYPE_SQ; /* remove the SQ WR by moving SQ tail*/ IRDMA_RING_SET_TAIL(*sq_ring, sq_ring->tail + qp->sq_wrtrk_array[sq_ring->tail].quanta); @@ -2629,6 +2630,7 @@ void irdma_generate_flush_completions(struct irdma_qp *iwqp) cmpl->cpi.wr_id = qp->rq_wrid_array[wqe_idx]; cmpl->cpi.op_type = IRDMA_OP_TYPE_REC; + cmpl->cpi.q_type = IRDMA_CQE_QTYPE_RQ; /* remove the RQ WR by moving RQ tail */ IRDMA_RING_SET_TAIL(*rq_ring, rq_ring->tail + 1); ibdev_dbg(iwqp->iwrcq->ibcq.device, diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c index a22afbb25bc58d3870e15f09cddbd3dc4880052a..f6973ea55eda7eefc518231b997d3a9811286bee 100644 --- a/drivers/infiniband/hw/irdma/verbs.c +++ b/drivers/infiniband/hw/irdma/verbs.c @@ -63,36 +63,6 @@ static int irdma_query_device(struct ib_device *ibdev, return 0; } -/** - * irdma_get_eth_speed_and_width - Get IB port speed and width from netdev speed - * @link_speed: netdev phy link speed - * @active_speed: IB port speed - * @active_width: IB port width - */ -static void irdma_get_eth_speed_and_width(u32 link_speed, u16 *active_speed, - u8 *active_width) -{ - if (link_speed <= SPEED_1000) { - *active_width = IB_WIDTH_1X; - *active_speed = IB_SPEED_SDR; - } else if (link_speed <= SPEED_10000) { - *active_width = IB_WIDTH_1X; - *active_speed = IB_SPEED_FDR10; - } else if (link_speed <= SPEED_20000) { - *active_width = IB_WIDTH_4X; - *active_speed = IB_SPEED_DDR; - } else if (link_speed <= SPEED_25000) { - *active_width = IB_WIDTH_1X; - *active_speed = IB_SPEED_EDR; - } else if (link_speed <= SPEED_40000) { - *active_width = IB_WIDTH_4X; - *active_speed = IB_SPEED_FDR10; - } else { - *active_width = IB_WIDTH_4X; - *active_speed = IB_SPEED_EDR; - } -} - /** * irdma_query_port - get port attributes * @ibdev: device pointer from stack @@ -120,8 +90,9 @@ static int irdma_query_port(struct ib_device *ibdev, u32 port, props->state = IB_PORT_DOWN; props->phys_state = IB_PORT_PHYS_STATE_DISABLED; } - irdma_get_eth_speed_and_width(SPEED_100000, &props->active_speed, - &props->active_width); + + ib_get_eth_speed(ibdev, port, &props->active_speed, + &props->active_width); if (rdma_protocol_roce(ibdev, 1)) { props->gid_tbl_len = 32; @@ -1242,6 +1213,7 @@ int irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr, av->attrs = attr->ah_attr; rdma_gid2ip((struct sockaddr *)&av->sgid_addr, &sgid_attr->gid); rdma_gid2ip((struct sockaddr *)&av->dgid_addr, &attr->ah_attr.grh.dgid); + av->net_type = rdma_gid_attr_network_type(sgid_attr); if (av->net_type == RDMA_NETWORK_IPV6) { __be32 *daddr = av->dgid_addr.saddr_in6.sin6_addr.in6_u.u6_addr32; @@ -2358,9 +2330,10 @@ static bool irdma_check_mr_contiguous(struct irdma_pble_alloc *palloc, * @rf: RDMA PCI function * @iwmr: mr pointer for this memory registration * @use_pbles: flag if to use pble's + * @lvl_1_only: request only level 1 pble if true */ static int irdma_setup_pbles(struct irdma_pci_f *rf, struct irdma_mr *iwmr, - bool use_pbles) + bool use_pbles, bool lvl_1_only) { struct irdma_pbl *iwpbl = &iwmr->iwpbl; struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc; @@ -2371,7 +2344,7 @@ static int irdma_setup_pbles(struct irdma_pci_f *rf, struct irdma_mr *iwmr, if (use_pbles) { status = irdma_get_pble(rf->pble_rsrc, palloc, iwmr->page_cnt, - false); + lvl_1_only); if (status) return status; @@ -2414,16 +2387,10 @@ static int irdma_handle_q_mem(struct irdma_device *iwdev, bool ret = true; pg_size = iwmr->page_size; - err = irdma_setup_pbles(iwdev->rf, iwmr, use_pbles); + err = irdma_setup_pbles(iwdev->rf, iwmr, use_pbles, true); if (err) return err; - if (use_pbles && palloc->level != PBLE_LEVEL_1) { - irdma_free_pble(iwdev->rf->pble_rsrc, palloc); - iwpbl->pbl_allocated = false; - return -ENOMEM; - } - if (use_pbles) arr = palloc->level1.addr; @@ -2899,7 +2866,7 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len, case IRDMA_MEMREG_TYPE_MEM: use_pbles = (iwmr->page_cnt != 1); - err = irdma_setup_pbles(iwdev->rf, iwmr, use_pbles); + err = irdma_setup_pbles(iwdev->rf, iwmr, use_pbles, false); if (err) goto error; @@ -3165,30 +3132,20 @@ static int irdma_post_send(struct ib_qp *ibqp, info.stag_to_inv = ib_wr->ex.invalidate_rkey; } - if (ib_wr->send_flags & IB_SEND_INLINE) { - info.op.inline_send.data = (void *)(unsigned long) - ib_wr->sg_list[0].addr; - info.op.inline_send.len = ib_wr->sg_list[0].length; - if (iwqp->ibqp.qp_type == IB_QPT_UD || - iwqp->ibqp.qp_type == IB_QPT_GSI) { - ah = to_iwah(ud_wr(ib_wr)->ah); - info.op.inline_send.ah_id = ah->sc_ah.ah_info.ah_idx; - info.op.inline_send.qkey = ud_wr(ib_wr)->remote_qkey; - info.op.inline_send.dest_qp = ud_wr(ib_wr)->remote_qpn; - } + info.op.send.num_sges = ib_wr->num_sge; + info.op.send.sg_list = ib_wr->sg_list; + if (iwqp->ibqp.qp_type == IB_QPT_UD || + iwqp->ibqp.qp_type == IB_QPT_GSI) { + ah = to_iwah(ud_wr(ib_wr)->ah); + info.op.send.ah_id = ah->sc_ah.ah_info.ah_idx; + info.op.send.qkey = ud_wr(ib_wr)->remote_qkey; + info.op.send.dest_qp = ud_wr(ib_wr)->remote_qpn; + } + + if (ib_wr->send_flags & IB_SEND_INLINE) err = irdma_uk_inline_send(ukqp, &info, false); - } else { - info.op.send.num_sges = ib_wr->num_sge; - info.op.send.sg_list = ib_wr->sg_list; - if (iwqp->ibqp.qp_type == IB_QPT_UD || - iwqp->ibqp.qp_type == IB_QPT_GSI) { - ah = to_iwah(ud_wr(ib_wr)->ah); - info.op.send.ah_id = ah->sc_ah.ah_info.ah_idx; - info.op.send.qkey = ud_wr(ib_wr)->remote_qkey; - info.op.send.dest_qp = ud_wr(ib_wr)->remote_qpn; - } + else err = irdma_uk_send(ukqp, &info, false); - } break; case IB_WR_RDMA_WRITE_WITH_IMM: if (ukqp->qp_caps & IRDMA_WRITE_WITH_IMM) { @@ -3205,22 +3162,15 @@ static int irdma_post_send(struct ib_qp *ibqp, else info.op_type = IRDMA_OP_TYPE_RDMA_WRITE; - if (ib_wr->send_flags & IB_SEND_INLINE) { - info.op.inline_rdma_write.data = (void *)(uintptr_t)ib_wr->sg_list[0].addr; - info.op.inline_rdma_write.len = - ib_wr->sg_list[0].length; - info.op.inline_rdma_write.rem_addr.addr = - rdma_wr(ib_wr)->remote_addr; - info.op.inline_rdma_write.rem_addr.lkey = - rdma_wr(ib_wr)->rkey; + info.op.rdma_write.num_lo_sges = ib_wr->num_sge; + info.op.rdma_write.lo_sg_list = ib_wr->sg_list; + info.op.rdma_write.rem_addr.addr = + rdma_wr(ib_wr)->remote_addr; + info.op.rdma_write.rem_addr.lkey = rdma_wr(ib_wr)->rkey; + if (ib_wr->send_flags & IB_SEND_INLINE) err = irdma_uk_inline_rdma_write(ukqp, &info, false); - } else { - info.op.rdma_write.lo_sg_list = (void *)ib_wr->sg_list; - info.op.rdma_write.num_lo_sges = ib_wr->num_sge; - info.op.rdma_write.rem_addr.addr = rdma_wr(ib_wr)->remote_addr; - info.op.rdma_write.rem_addr.lkey = rdma_wr(ib_wr)->rkey; + else err = irdma_uk_rdma_write(ukqp, &info, false); - } break; case IB_WR_RDMA_READ_WITH_INV: inv_stag = true; @@ -3380,7 +3330,6 @@ static enum ib_wc_status irdma_flush_err_to_ib_wc_status(enum irdma_flush_opcode static void irdma_process_cqe(struct ib_wc *entry, struct irdma_cq_poll_info *cq_poll_info) { - struct irdma_qp *iwqp; struct irdma_sc_qp *qp; entry->wc_flags = 0; @@ -3388,7 +3337,6 @@ static void irdma_process_cqe(struct ib_wc *entry, entry->wr_id = cq_poll_info->wr_id; qp = cq_poll_info->qp_handle; - iwqp = qp->qp_uk.back_qp; entry->qp = qp->qp_uk.back_qp; if (cq_poll_info->error) { @@ -3421,42 +3369,17 @@ static void irdma_process_cqe(struct ib_wc *entry, } } - switch (cq_poll_info->op_type) { - case IRDMA_OP_TYPE_RDMA_WRITE: - case IRDMA_OP_TYPE_RDMA_WRITE_SOL: - entry->opcode = IB_WC_RDMA_WRITE; - break; - case IRDMA_OP_TYPE_RDMA_READ_INV_STAG: - case IRDMA_OP_TYPE_RDMA_READ: - entry->opcode = IB_WC_RDMA_READ; - break; - case IRDMA_OP_TYPE_SEND_INV: - case IRDMA_OP_TYPE_SEND_SOL: - case IRDMA_OP_TYPE_SEND_SOL_INV: - case IRDMA_OP_TYPE_SEND: - entry->opcode = IB_WC_SEND; - break; - case IRDMA_OP_TYPE_FAST_REG_NSMR: - entry->opcode = IB_WC_REG_MR; - break; - case IRDMA_OP_TYPE_INV_STAG: - entry->opcode = IB_WC_LOCAL_INV; - break; - case IRDMA_OP_TYPE_REC_IMM: - case IRDMA_OP_TYPE_REC: - entry->opcode = cq_poll_info->op_type == IRDMA_OP_TYPE_REC_IMM ? - IB_WC_RECV_RDMA_WITH_IMM : IB_WC_RECV; + if (cq_poll_info->q_type == IRDMA_CQE_QTYPE_SQ) { + set_ib_wc_op_sq(cq_poll_info, entry); + } else { + set_ib_wc_op_rq(cq_poll_info, entry, + qp->qp_uk.qp_caps & IRDMA_SEND_WITH_IMM ? + true : false); if (qp->qp_uk.qp_type != IRDMA_QP_TYPE_ROCE_UD && cq_poll_info->stag_invalid_set) { entry->ex.invalidate_rkey = cq_poll_info->inv_stag; entry->wc_flags |= IB_WC_WITH_INVALIDATE; } - break; - default: - ibdev_err(&iwqp->iwdev->ibdev, - "Invalid opcode = %d in CQE\n", cq_poll_info->op_type); - entry->status = IB_WC_GENERAL_ERR; - return; } if (qp->qp_uk.qp_type == IRDMA_QP_TYPE_ROCE_UD) { diff --git a/drivers/infiniband/hw/irdma/verbs.h b/drivers/infiniband/hw/irdma/verbs.h index 4309b7159f42c8c634cb1e4293c3d5e5f64985c7..a536e9fa85ebf1b84a5865401e787dc3278b9e8e 100644 --- a/drivers/infiniband/hw/irdma/verbs.h +++ b/drivers/infiniband/hw/irdma/verbs.h @@ -232,6 +232,59 @@ static inline u16 irdma_fw_minor_ver(struct irdma_sc_dev *dev) return (u16)FIELD_GET(IRDMA_FW_VER_MINOR, dev->feature_info[IRDMA_FEATURE_FW_INFO]); } +static inline void set_ib_wc_op_sq(struct irdma_cq_poll_info *cq_poll_info, + struct ib_wc *entry) +{ + switch (cq_poll_info->op_type) { + case IRDMA_OP_TYPE_RDMA_WRITE: + case IRDMA_OP_TYPE_RDMA_WRITE_SOL: + entry->opcode = IB_WC_RDMA_WRITE; + break; + case IRDMA_OP_TYPE_RDMA_READ_INV_STAG: + case IRDMA_OP_TYPE_RDMA_READ: + entry->opcode = IB_WC_RDMA_READ; + break; + case IRDMA_OP_TYPE_SEND_SOL: + case IRDMA_OP_TYPE_SEND_SOL_INV: + case IRDMA_OP_TYPE_SEND_INV: + case IRDMA_OP_TYPE_SEND: + entry->opcode = IB_WC_SEND; + break; + case IRDMA_OP_TYPE_FAST_REG_NSMR: + entry->opcode = IB_WC_REG_MR; + break; + case IRDMA_OP_TYPE_INV_STAG: + entry->opcode = IB_WC_LOCAL_INV; + break; + default: + entry->status = IB_WC_GENERAL_ERR; + } +} + +static inline void set_ib_wc_op_rq(struct irdma_cq_poll_info *cq_poll_info, + struct ib_wc *entry, bool send_imm_support) +{ + /** + * iWARP does not support sendImm, so the presence of Imm data + * must be WriteImm. + */ + if (!send_imm_support) { + entry->opcode = cq_poll_info->imm_valid ? + IB_WC_RECV_RDMA_WITH_IMM : + IB_WC_RECV; + return; + } + + switch (cq_poll_info->op_type) { + case IB_OPCODE_RDMA_WRITE_ONLY_WITH_IMMEDIATE: + case IB_OPCODE_RDMA_WRITE_LAST_WITH_IMMEDIATE: + entry->opcode = IB_WC_RECV_RDMA_WITH_IMM; + break; + default: + entry->opcode = IB_WC_RECV; + } +} + void irdma_mcast_mac(u32 *ip_addr, u8 *mac, bool ipv4); int irdma_ib_register_device(struct irdma_device *iwdev); void irdma_ib_unregister_device(struct irdma_device *iwdev); diff --git a/drivers/infiniband/hw/mana/Kconfig b/drivers/infiniband/hw/mana/Kconfig new file mode 100644 index 0000000000000000000000000000000000000000..546640657bac28370d53baa73fb14b0039a66815 --- /dev/null +++ b/drivers/infiniband/hw/mana/Kconfig @@ -0,0 +1,10 @@ +# SPDX-License-Identifier: GPL-2.0-only +config MANA_INFINIBAND + tristate "Microsoft Azure Network Adapter support" + depends on NETDEVICES && ETHERNET && PCI && MICROSOFT_MANA + help + This driver provides low-level RDMA support for Microsoft Azure + Network Adapter (MANA). MANA supports RDMA features that can be used + for workloads (e.g. DPDK, MPI etc) that uses RDMA verbs to directly + access hardware from user-mode processes in Microsoft Azure cloud + environment. diff --git a/drivers/infiniband/hw/mana/Makefile b/drivers/infiniband/hw/mana/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..88655fe5e398a8d18a7e61cfe4012a5ca674c49c --- /dev/null +++ b/drivers/infiniband/hw/mana/Makefile @@ -0,0 +1,4 @@ +# SPDX-License-Identifier: GPL-2.0-only +obj-$(CONFIG_MANA_INFINIBAND) += mana_ib.o + +mana_ib-y := device.o main.o wq.o qp.o cq.o mr.o diff --git a/drivers/infiniband/hw/mana/cq.c b/drivers/infiniband/hw/mana/cq.c new file mode 100644 index 0000000000000000000000000000000000000000..d141cab8a1e6986ac24a4dc51ea1b8050af078a1 --- /dev/null +++ b/drivers/infiniband/hw/mana/cq.c @@ -0,0 +1,79 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2022, Microsoft Corporation. All rights reserved. + */ + +#include "mana_ib.h" + +int mana_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, + struct ib_udata *udata) +{ + struct mana_ib_cq *cq = container_of(ibcq, struct mana_ib_cq, ibcq); + struct ib_device *ibdev = ibcq->device; + struct mana_ib_create_cq ucmd = {}; + struct mana_ib_dev *mdev; + int err; + + mdev = container_of(ibdev, struct mana_ib_dev, ib_dev); + + if (udata->inlen < sizeof(ucmd)) + return -EINVAL; + + err = ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen)); + if (err) { + ibdev_dbg(ibdev, + "Failed to copy from udata for create cq, %d\n", err); + return err; + } + + if (attr->cqe > MAX_SEND_BUFFERS_PER_QUEUE) { + ibdev_dbg(ibdev, "CQE %d exceeding limit\n", attr->cqe); + return -EINVAL; + } + + cq->cqe = attr->cqe; + cq->umem = ib_umem_get(ibdev, ucmd.buf_addr, cq->cqe * COMP_ENTRY_SIZE, + IB_ACCESS_LOCAL_WRITE); + if (IS_ERR(cq->umem)) { + err = PTR_ERR(cq->umem); + ibdev_dbg(ibdev, "Failed to get umem for create cq, err %d\n", + err); + return err; + } + + err = mana_ib_gd_create_dma_region(mdev, cq->umem, &cq->gdma_region); + if (err) { + ibdev_dbg(ibdev, + "Failed to create dma region for create cq, %d\n", + err); + goto err_release_umem; + } + + ibdev_dbg(ibdev, + "mana_ib_gd_create_dma_region ret %d gdma_region 0x%llx\n", + err, cq->gdma_region); + + /* + * The CQ ID is not known at this time. The ID is generated at create_qp + */ + + return 0; + +err_release_umem: + ib_umem_release(cq->umem); + return err; +} + +int mana_ib_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata) +{ + struct mana_ib_cq *cq = container_of(ibcq, struct mana_ib_cq, ibcq); + struct ib_device *ibdev = ibcq->device; + struct mana_ib_dev *mdev; + + mdev = container_of(ibdev, struct mana_ib_dev, ib_dev); + + mana_ib_gd_destroy_dma_region(mdev, cq->gdma_region); + ib_umem_release(cq->umem); + + return 0; +} diff --git a/drivers/infiniband/hw/mana/device.c b/drivers/infiniband/hw/mana/device.c new file mode 100644 index 0000000000000000000000000000000000000000..d4541b8707e4c75a191a09dd2890447b45895ed1 --- /dev/null +++ b/drivers/infiniband/hw/mana/device.c @@ -0,0 +1,117 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2022, Microsoft Corporation. All rights reserved. + */ + +#include "mana_ib.h" +#include + +MODULE_DESCRIPTION("Microsoft Azure Network Adapter IB driver"); +MODULE_LICENSE("GPL"); +MODULE_IMPORT_NS(NET_MANA); + +static const struct ib_device_ops mana_ib_dev_ops = { + .owner = THIS_MODULE, + .driver_id = RDMA_DRIVER_MANA, + .uverbs_abi_ver = MANA_IB_UVERBS_ABI_VERSION, + + .alloc_pd = mana_ib_alloc_pd, + .alloc_ucontext = mana_ib_alloc_ucontext, + .create_cq = mana_ib_create_cq, + .create_qp = mana_ib_create_qp, + .create_rwq_ind_table = mana_ib_create_rwq_ind_table, + .create_wq = mana_ib_create_wq, + .dealloc_pd = mana_ib_dealloc_pd, + .dealloc_ucontext = mana_ib_dealloc_ucontext, + .dereg_mr = mana_ib_dereg_mr, + .destroy_cq = mana_ib_destroy_cq, + .destroy_qp = mana_ib_destroy_qp, + .destroy_rwq_ind_table = mana_ib_destroy_rwq_ind_table, + .destroy_wq = mana_ib_destroy_wq, + .disassociate_ucontext = mana_ib_disassociate_ucontext, + .get_port_immutable = mana_ib_get_port_immutable, + .mmap = mana_ib_mmap, + .modify_qp = mana_ib_modify_qp, + .modify_wq = mana_ib_modify_wq, + .query_device = mana_ib_query_device, + .query_gid = mana_ib_query_gid, + .query_port = mana_ib_query_port, + .reg_user_mr = mana_ib_reg_user_mr, + + INIT_RDMA_OBJ_SIZE(ib_cq, mana_ib_cq, ibcq), + INIT_RDMA_OBJ_SIZE(ib_pd, mana_ib_pd, ibpd), + INIT_RDMA_OBJ_SIZE(ib_qp, mana_ib_qp, ibqp), + INIT_RDMA_OBJ_SIZE(ib_ucontext, mana_ib_ucontext, ibucontext), + INIT_RDMA_OBJ_SIZE(ib_rwq_ind_table, mana_ib_rwq_ind_table, + ib_ind_table), +}; + +static int mana_ib_probe(struct auxiliary_device *adev, + const struct auxiliary_device_id *id) +{ + struct mana_adev *madev = container_of(adev, struct mana_adev, adev); + struct gdma_dev *mdev = madev->mdev; + struct mana_context *mc; + struct mana_ib_dev *dev; + int ret; + + mc = mdev->driver_data; + + dev = ib_alloc_device(mana_ib_dev, ib_dev); + if (!dev) + return -ENOMEM; + + ib_set_device_ops(&dev->ib_dev, &mana_ib_dev_ops); + + dev->ib_dev.phys_port_cnt = mc->num_ports; + + ibdev_dbg(&dev->ib_dev, "mdev=%p id=%d num_ports=%d\n", mdev, + mdev->dev_id.as_uint32, dev->ib_dev.phys_port_cnt); + + dev->gdma_dev = mdev; + dev->ib_dev.node_type = RDMA_NODE_IB_CA; + + /* + * num_comp_vectors needs to set to the max MSIX index + * when interrupts and event queues are implemented + */ + dev->ib_dev.num_comp_vectors = 1; + dev->ib_dev.dev.parent = mdev->gdma_context->dev; + + ret = ib_register_device(&dev->ib_dev, "mana_%d", + mdev->gdma_context->dev); + if (ret) { + ib_dealloc_device(&dev->ib_dev); + return ret; + } + + dev_set_drvdata(&adev->dev, dev); + + return 0; +} + +static void mana_ib_remove(struct auxiliary_device *adev) +{ + struct mana_ib_dev *dev = dev_get_drvdata(&adev->dev); + + ib_unregister_device(&dev->ib_dev); + ib_dealloc_device(&dev->ib_dev); +} + +static const struct auxiliary_device_id mana_id_table[] = { + { + .name = "mana.rdma", + }, + {}, +}; + +MODULE_DEVICE_TABLE(auxiliary, mana_id_table); + +static struct auxiliary_driver mana_driver = { + .name = "rdma", + .probe = mana_ib_probe, + .remove = mana_ib_remove, + .id_table = mana_id_table, +}; + +module_auxiliary_driver(mana_driver); diff --git a/drivers/infiniband/hw/mana/main.c b/drivers/infiniband/hw/mana/main.c new file mode 100644 index 0000000000000000000000000000000000000000..8b3bc302d6f3a3cd7984e7653e1636de98978dd2 --- /dev/null +++ b/drivers/infiniband/hw/mana/main.c @@ -0,0 +1,521 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2022, Microsoft Corporation. All rights reserved. + */ + +#include "mana_ib.h" + +void mana_ib_uncfg_vport(struct mana_ib_dev *dev, struct mana_ib_pd *pd, + u32 port) +{ + struct gdma_dev *gd = dev->gdma_dev; + struct mana_port_context *mpc; + struct net_device *ndev; + struct mana_context *mc; + + mc = gd->driver_data; + ndev = mc->ports[port]; + mpc = netdev_priv(ndev); + + mutex_lock(&pd->vport_mutex); + + pd->vport_use_count--; + WARN_ON(pd->vport_use_count < 0); + + if (!pd->vport_use_count) + mana_uncfg_vport(mpc); + + mutex_unlock(&pd->vport_mutex); +} + +int mana_ib_cfg_vport(struct mana_ib_dev *dev, u32 port, struct mana_ib_pd *pd, + u32 doorbell_id) +{ + struct gdma_dev *mdev = dev->gdma_dev; + struct mana_port_context *mpc; + struct mana_context *mc; + struct net_device *ndev; + int err; + + mc = mdev->driver_data; + ndev = mc->ports[port]; + mpc = netdev_priv(ndev); + + mutex_lock(&pd->vport_mutex); + + pd->vport_use_count++; + if (pd->vport_use_count > 1) { + ibdev_dbg(&dev->ib_dev, + "Skip as this PD is already configured vport\n"); + mutex_unlock(&pd->vport_mutex); + return 0; + } + + err = mana_cfg_vport(mpc, pd->pdn, doorbell_id); + if (err) { + pd->vport_use_count--; + mutex_unlock(&pd->vport_mutex); + + ibdev_dbg(&dev->ib_dev, "Failed to configure vPort %d\n", err); + return err; + } + + mutex_unlock(&pd->vport_mutex); + + pd->tx_shortform_allowed = mpc->tx_shortform_allowed; + pd->tx_vp_offset = mpc->tx_vp_offset; + + ibdev_dbg(&dev->ib_dev, "vport handle %llx pdid %x doorbell_id %x\n", + mpc->port_handle, pd->pdn, doorbell_id); + + return 0; +} + +int mana_ib_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) +{ + struct mana_ib_pd *pd = container_of(ibpd, struct mana_ib_pd, ibpd); + struct ib_device *ibdev = ibpd->device; + struct gdma_create_pd_resp resp = {}; + struct gdma_create_pd_req req = {}; + enum gdma_pd_flags flags = 0; + struct mana_ib_dev *dev; + struct gdma_dev *mdev; + int err; + + dev = container_of(ibdev, struct mana_ib_dev, ib_dev); + mdev = dev->gdma_dev; + + mana_gd_init_req_hdr(&req.hdr, GDMA_CREATE_PD, sizeof(req), + sizeof(resp)); + + req.flags = flags; + err = mana_gd_send_request(mdev->gdma_context, sizeof(req), &req, + sizeof(resp), &resp); + + if (err || resp.hdr.status) { + ibdev_dbg(&dev->ib_dev, + "Failed to get pd_id err %d status %u\n", err, + resp.hdr.status); + if (!err) + err = -EPROTO; + + return err; + } + + pd->pd_handle = resp.pd_handle; + pd->pdn = resp.pd_id; + ibdev_dbg(&dev->ib_dev, "pd_handle 0x%llx pd_id %d\n", + pd->pd_handle, pd->pdn); + + mutex_init(&pd->vport_mutex); + pd->vport_use_count = 0; + return 0; +} + +int mana_ib_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) +{ + struct mana_ib_pd *pd = container_of(ibpd, struct mana_ib_pd, ibpd); + struct ib_device *ibdev = ibpd->device; + struct gdma_destory_pd_resp resp = {}; + struct gdma_destroy_pd_req req = {}; + struct mana_ib_dev *dev; + struct gdma_dev *mdev; + int err; + + dev = container_of(ibdev, struct mana_ib_dev, ib_dev); + mdev = dev->gdma_dev; + + mana_gd_init_req_hdr(&req.hdr, GDMA_DESTROY_PD, sizeof(req), + sizeof(resp)); + + req.pd_handle = pd->pd_handle; + err = mana_gd_send_request(mdev->gdma_context, sizeof(req), &req, + sizeof(resp), &resp); + + if (err || resp.hdr.status) { + ibdev_dbg(&dev->ib_dev, + "Failed to destroy pd_handle 0x%llx err %d status %u", + pd->pd_handle, err, resp.hdr.status); + if (!err) + err = -EPROTO; + } + + return err; +} + +static int mana_gd_destroy_doorbell_page(struct gdma_context *gc, + int doorbell_page) +{ + struct gdma_destroy_resource_range_req req = {}; + struct gdma_resp_hdr resp = {}; + int err; + + mana_gd_init_req_hdr(&req.hdr, GDMA_DESTROY_RESOURCE_RANGE, + sizeof(req), sizeof(resp)); + + req.resource_type = GDMA_RESOURCE_DOORBELL_PAGE; + req.num_resources = 1; + req.allocated_resources = doorbell_page; + + err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp); + if (err || resp.status) { + dev_err(gc->dev, + "Failed to destroy doorbell page: ret %d, 0x%x\n", + err, resp.status); + return err ?: -EPROTO; + } + + return 0; +} + +static int mana_gd_allocate_doorbell_page(struct gdma_context *gc, + int *doorbell_page) +{ + struct gdma_allocate_resource_range_req req = {}; + struct gdma_allocate_resource_range_resp resp = {}; + int err; + + mana_gd_init_req_hdr(&req.hdr, GDMA_ALLOCATE_RESOURCE_RANGE, + sizeof(req), sizeof(resp)); + + req.resource_type = GDMA_RESOURCE_DOORBELL_PAGE; + req.num_resources = 1; + req.alignment = 1; + + /* Have GDMA start searching from 0 */ + req.allocated_resources = 0; + + err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp); + if (err || resp.hdr.status) { + dev_err(gc->dev, + "Failed to allocate doorbell page: ret %d, 0x%x\n", + err, resp.hdr.status); + return err ?: -EPROTO; + } + + *doorbell_page = resp.allocated_resources; + + return 0; +} + +int mana_ib_alloc_ucontext(struct ib_ucontext *ibcontext, + struct ib_udata *udata) +{ + struct mana_ib_ucontext *ucontext = + container_of(ibcontext, struct mana_ib_ucontext, ibucontext); + struct ib_device *ibdev = ibcontext->device; + struct mana_ib_dev *mdev; + struct gdma_context *gc; + struct gdma_dev *dev; + int doorbell_page; + int ret; + + mdev = container_of(ibdev, struct mana_ib_dev, ib_dev); + dev = mdev->gdma_dev; + gc = dev->gdma_context; + + /* Allocate a doorbell page index */ + ret = mana_gd_allocate_doorbell_page(gc, &doorbell_page); + if (ret) { + ibdev_dbg(ibdev, "Failed to allocate doorbell page %d\n", ret); + return ret; + } + + ibdev_dbg(ibdev, "Doorbell page allocated %d\n", doorbell_page); + + ucontext->doorbell = doorbell_page; + + return 0; +} + +void mana_ib_dealloc_ucontext(struct ib_ucontext *ibcontext) +{ + struct mana_ib_ucontext *mana_ucontext = + container_of(ibcontext, struct mana_ib_ucontext, ibucontext); + struct ib_device *ibdev = ibcontext->device; + struct mana_ib_dev *mdev; + struct gdma_context *gc; + int ret; + + mdev = container_of(ibdev, struct mana_ib_dev, ib_dev); + gc = mdev->gdma_dev->gdma_context; + + ret = mana_gd_destroy_doorbell_page(gc, mana_ucontext->doorbell); + if (ret) + ibdev_dbg(ibdev, "Failed to destroy doorbell page %d\n", ret); +} + +static int +mana_ib_gd_first_dma_region(struct mana_ib_dev *dev, + struct gdma_context *gc, + struct gdma_create_dma_region_req *create_req, + size_t num_pages, mana_handle_t *gdma_region) +{ + struct gdma_create_dma_region_resp create_resp = {}; + unsigned int create_req_msg_size; + int err; + + create_req_msg_size = + struct_size(create_req, page_addr_list, num_pages); + create_req->page_addr_list_len = num_pages; + + err = mana_gd_send_request(gc, create_req_msg_size, create_req, + sizeof(create_resp), &create_resp); + if (err || create_resp.hdr.status) { + ibdev_dbg(&dev->ib_dev, + "Failed to create DMA region: %d, 0x%x\n", + err, create_resp.hdr.status); + if (!err) + err = -EPROTO; + + return err; + } + + *gdma_region = create_resp.dma_region_handle; + ibdev_dbg(&dev->ib_dev, "Created DMA region handle 0x%llx\n", + *gdma_region); + + return 0; +} + +static int +mana_ib_gd_add_dma_region(struct mana_ib_dev *dev, struct gdma_context *gc, + struct gdma_dma_region_add_pages_req *add_req, + unsigned int num_pages, u32 expected_status) +{ + unsigned int add_req_msg_size = + struct_size(add_req, page_addr_list, num_pages); + struct gdma_general_resp add_resp = {}; + int err; + + mana_gd_init_req_hdr(&add_req->hdr, GDMA_DMA_REGION_ADD_PAGES, + add_req_msg_size, sizeof(add_resp)); + add_req->page_addr_list_len = num_pages; + + err = mana_gd_send_request(gc, add_req_msg_size, add_req, + sizeof(add_resp), &add_resp); + if (err || add_resp.hdr.status != expected_status) { + ibdev_dbg(&dev->ib_dev, + "Failed to create DMA region: %d, 0x%x\n", + err, add_resp.hdr.status); + + if (!err) + err = -EPROTO; + + return err; + } + + return 0; +} + +int mana_ib_gd_create_dma_region(struct mana_ib_dev *dev, struct ib_umem *umem, + mana_handle_t *gdma_region) +{ + struct gdma_dma_region_add_pages_req *add_req = NULL; + size_t num_pages_processed = 0, num_pages_to_handle; + struct gdma_create_dma_region_req *create_req; + unsigned int create_req_msg_size; + struct hw_channel_context *hwc; + struct ib_block_iter biter; + size_t max_pgs_add_cmd = 0; + size_t max_pgs_create_cmd; + struct gdma_context *gc; + size_t num_pages_total; + struct gdma_dev *mdev; + unsigned long page_sz; + unsigned int tail = 0; + u64 *page_addr_list; + void *request_buf; + int err; + + mdev = dev->gdma_dev; + gc = mdev->gdma_context; + hwc = gc->hwc.driver_data; + + /* Hardware requires dma region to align to chosen page size */ + page_sz = ib_umem_find_best_pgsz(umem, PAGE_SZ_BM, 0); + if (!page_sz) { + ibdev_dbg(&dev->ib_dev, "failed to find page size.\n"); + return -ENOMEM; + } + num_pages_total = ib_umem_num_dma_blocks(umem, page_sz); + + max_pgs_create_cmd = + (hwc->max_req_msg_size - sizeof(*create_req)) / sizeof(u64); + num_pages_to_handle = + min_t(size_t, num_pages_total, max_pgs_create_cmd); + create_req_msg_size = + struct_size(create_req, page_addr_list, num_pages_to_handle); + + request_buf = kzalloc(hwc->max_req_msg_size, GFP_KERNEL); + if (!request_buf) + return -ENOMEM; + + create_req = request_buf; + mana_gd_init_req_hdr(&create_req->hdr, GDMA_CREATE_DMA_REGION, + create_req_msg_size, + sizeof(struct gdma_create_dma_region_resp)); + + create_req->length = umem->length; + create_req->offset_in_page = umem->address & (page_sz - 1); + create_req->gdma_page_type = order_base_2(page_sz) - PAGE_SHIFT; + create_req->page_count = num_pages_total; + + ibdev_dbg(&dev->ib_dev, "size_dma_region %lu num_pages_total %lu\n", + umem->length, num_pages_total); + + ibdev_dbg(&dev->ib_dev, "page_sz %lu offset_in_page %u\n", + page_sz, create_req->offset_in_page); + + ibdev_dbg(&dev->ib_dev, "num_pages_to_handle %lu, gdma_page_type %u", + num_pages_to_handle, create_req->gdma_page_type); + + page_addr_list = create_req->page_addr_list; + rdma_umem_for_each_dma_block(umem, &biter, page_sz) { + page_addr_list[tail++] = rdma_block_iter_dma_address(&biter); + if (tail < num_pages_to_handle) + continue; + + if (!num_pages_processed) { + /* First create message */ + err = mana_ib_gd_first_dma_region(dev, gc, create_req, + tail, gdma_region); + if (err) + goto out; + + max_pgs_add_cmd = (hwc->max_req_msg_size - + sizeof(*add_req)) / sizeof(u64); + + add_req = request_buf; + add_req->dma_region_handle = *gdma_region; + add_req->reserved3 = 0; + page_addr_list = add_req->page_addr_list; + } else { + /* Subsequent create messages */ + u32 expected_s = 0; + + if (num_pages_processed + num_pages_to_handle < + num_pages_total) + expected_s = GDMA_STATUS_MORE_ENTRIES; + + err = mana_ib_gd_add_dma_region(dev, gc, add_req, tail, + expected_s); + if (err) + break; + } + + num_pages_processed += tail; + tail = 0; + + /* The remaining pages to create */ + num_pages_to_handle = + min_t(size_t, + num_pages_total - num_pages_processed, + max_pgs_add_cmd); + } + + if (err) + mana_ib_gd_destroy_dma_region(dev, *gdma_region); + +out: + kfree(request_buf); + return err; +} + +int mana_ib_gd_destroy_dma_region(struct mana_ib_dev *dev, u64 gdma_region) +{ + struct gdma_dev *mdev = dev->gdma_dev; + struct gdma_context *gc; + + gc = mdev->gdma_context; + ibdev_dbg(&dev->ib_dev, "destroy dma region 0x%llx\n", gdma_region); + + return mana_gd_destroy_dma_region(gc, gdma_region); +} + +int mana_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma) +{ + struct mana_ib_ucontext *mana_ucontext = + container_of(ibcontext, struct mana_ib_ucontext, ibucontext); + struct ib_device *ibdev = ibcontext->device; + struct mana_ib_dev *mdev; + struct gdma_context *gc; + phys_addr_t pfn; + pgprot_t prot; + int ret; + + mdev = container_of(ibdev, struct mana_ib_dev, ib_dev); + gc = mdev->gdma_dev->gdma_context; + + if (vma->vm_pgoff != 0) { + ibdev_dbg(ibdev, "Unexpected vm_pgoff %lu\n", vma->vm_pgoff); + return -EINVAL; + } + + /* Map to the page indexed by ucontext->doorbell */ + pfn = (gc->phys_db_page_base + + gc->db_page_size * mana_ucontext->doorbell) >> + PAGE_SHIFT; + prot = pgprot_writecombine(vma->vm_page_prot); + + ret = rdma_user_mmap_io(ibcontext, vma, pfn, gc->db_page_size, prot, + NULL); + if (ret) + ibdev_dbg(ibdev, "can't rdma_user_mmap_io ret %d\n", ret); + else + ibdev_dbg(ibdev, "mapped I/O pfn 0x%llx page_size %u, ret %d\n", + pfn, gc->db_page_size, ret); + + return ret; +} + +int mana_ib_get_port_immutable(struct ib_device *ibdev, u32 port_num, + struct ib_port_immutable *immutable) +{ + /* + * This version only support RAW_PACKET + * other values need to be filled for other types + */ + immutable->core_cap_flags = RDMA_CORE_PORT_RAW_PACKET; + + return 0; +} + +int mana_ib_query_device(struct ib_device *ibdev, struct ib_device_attr *props, + struct ib_udata *uhw) +{ + props->max_qp = MANA_MAX_NUM_QUEUES; + props->max_qp_wr = MAX_SEND_BUFFERS_PER_QUEUE; + + /* + * max_cqe could be potentially much bigger. + * As this version of driver only support RAW QP, set it to the same + * value as max_qp_wr + */ + props->max_cqe = MAX_SEND_BUFFERS_PER_QUEUE; + + props->max_mr_size = MANA_IB_MAX_MR_SIZE; + props->max_mr = MANA_IB_MAX_MR; + props->max_send_sge = MAX_TX_WQE_SGL_ENTRIES; + props->max_recv_sge = MAX_RX_WQE_SGL_ENTRIES; + + return 0; +} + +int mana_ib_query_port(struct ib_device *ibdev, u32 port, + struct ib_port_attr *props) +{ + /* This version doesn't return port properties */ + return 0; +} + +int mana_ib_query_gid(struct ib_device *ibdev, u32 port, int index, + union ib_gid *gid) +{ + /* This version doesn't return GID properties */ + return 0; +} + +void mana_ib_disassociate_ucontext(struct ib_ucontext *ibcontext) +{ +} diff --git a/drivers/infiniband/hw/mana/mana_ib.h b/drivers/infiniband/hw/mana/mana_ib.h new file mode 100644 index 0000000000000000000000000000000000000000..502cc8672eefa2cf7711841fca93fa94d99d1df8 --- /dev/null +++ b/drivers/infiniband/hw/mana/mana_ib.h @@ -0,0 +1,162 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2022 Microsoft Corporation. All rights reserved. + */ + +#ifndef _MANA_IB_H_ +#define _MANA_IB_H_ + +#include +#include +#include +#include +#include + +#include + +#define PAGE_SZ_BM \ + (SZ_4K | SZ_8K | SZ_16K | SZ_32K | SZ_64K | SZ_128K | SZ_256K | \ + SZ_512K | SZ_1M | SZ_2M) + +/* MANA doesn't have any limit for MR size */ +#define MANA_IB_MAX_MR_SIZE U64_MAX + +/* + * The hardware limit of number of MRs is greater than maximum number of MRs + * that can possibly represent in 24 bits + */ +#define MANA_IB_MAX_MR 0xFFFFFFu + +struct mana_ib_dev { + struct ib_device ib_dev; + struct gdma_dev *gdma_dev; +}; + +struct mana_ib_wq { + struct ib_wq ibwq; + struct ib_umem *umem; + int wqe; + u32 wq_buf_size; + u64 gdma_region; + u64 id; + mana_handle_t rx_object; +}; + +struct mana_ib_pd { + struct ib_pd ibpd; + u32 pdn; + mana_handle_t pd_handle; + + /* Mutex for sharing access to vport_use_count */ + struct mutex vport_mutex; + int vport_use_count; + + bool tx_shortform_allowed; + u32 tx_vp_offset; +}; + +struct mana_ib_mr { + struct ib_mr ibmr; + struct ib_umem *umem; + mana_handle_t mr_handle; +}; + +struct mana_ib_cq { + struct ib_cq ibcq; + struct ib_umem *umem; + int cqe; + u64 gdma_region; + u64 id; +}; + +struct mana_ib_qp { + struct ib_qp ibqp; + + /* Work queue info */ + struct ib_umem *sq_umem; + int sqe; + u64 sq_gdma_region; + u64 sq_id; + mana_handle_t tx_object; + + /* The port on the IB device, starting with 1 */ + u32 port; +}; + +struct mana_ib_ucontext { + struct ib_ucontext ibucontext; + u32 doorbell; +}; + +struct mana_ib_rwq_ind_table { + struct ib_rwq_ind_table ib_ind_table; +}; + +int mana_ib_gd_create_dma_region(struct mana_ib_dev *dev, struct ib_umem *umem, + mana_handle_t *gdma_region); + +int mana_ib_gd_destroy_dma_region(struct mana_ib_dev *dev, + mana_handle_t gdma_region); + +struct ib_wq *mana_ib_create_wq(struct ib_pd *pd, + struct ib_wq_init_attr *init_attr, + struct ib_udata *udata); + +int mana_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr, + u32 wq_attr_mask, struct ib_udata *udata); + +int mana_ib_destroy_wq(struct ib_wq *ibwq, struct ib_udata *udata); + +int mana_ib_create_rwq_ind_table(struct ib_rwq_ind_table *ib_rwq_ind_table, + struct ib_rwq_ind_table_init_attr *init_attr, + struct ib_udata *udata); + +int mana_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *ib_rwq_ind_tbl); + +struct ib_mr *mana_ib_get_dma_mr(struct ib_pd *ibpd, int access_flags); + +struct ib_mr *mana_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, + u64 iova, int access_flags, + struct ib_udata *udata); + +int mana_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata); + +int mana_ib_create_qp(struct ib_qp *qp, struct ib_qp_init_attr *qp_init_attr, + struct ib_udata *udata); + +int mana_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, + int attr_mask, struct ib_udata *udata); + +int mana_ib_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata); + +int mana_ib_cfg_vport(struct mana_ib_dev *dev, u32 port_id, + struct mana_ib_pd *pd, u32 doorbell_id); +void mana_ib_uncfg_vport(struct mana_ib_dev *dev, struct mana_ib_pd *pd, + u32 port); + +int mana_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, + struct ib_udata *udata); + +int mana_ib_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata); + +int mana_ib_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata); +int mana_ib_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata); + +int mana_ib_alloc_ucontext(struct ib_ucontext *ibcontext, + struct ib_udata *udata); +void mana_ib_dealloc_ucontext(struct ib_ucontext *ibcontext); + +int mana_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma); + +int mana_ib_get_port_immutable(struct ib_device *ibdev, u32 port_num, + struct ib_port_immutable *immutable); +int mana_ib_query_device(struct ib_device *ibdev, struct ib_device_attr *props, + struct ib_udata *uhw); +int mana_ib_query_port(struct ib_device *ibdev, u32 port, + struct ib_port_attr *props); +int mana_ib_query_gid(struct ib_device *ibdev, u32 port, int index, + union ib_gid *gid); + +void mana_ib_disassociate_ucontext(struct ib_ucontext *ibcontext); + +#endif diff --git a/drivers/infiniband/hw/mana/mr.c b/drivers/infiniband/hw/mana/mr.c new file mode 100644 index 0000000000000000000000000000000000000000..351207c60eb65da7505aeac271a310a79b346fc6 --- /dev/null +++ b/drivers/infiniband/hw/mana/mr.c @@ -0,0 +1,197 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2022, Microsoft Corporation. All rights reserved. + */ + +#include "mana_ib.h" + +#define VALID_MR_FLAGS \ + (IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_READ) + +static enum gdma_mr_access_flags +mana_ib_verbs_to_gdma_access_flags(int access_flags) +{ + enum gdma_mr_access_flags flags = GDMA_ACCESS_FLAG_LOCAL_READ; + + if (access_flags & IB_ACCESS_LOCAL_WRITE) + flags |= GDMA_ACCESS_FLAG_LOCAL_WRITE; + + if (access_flags & IB_ACCESS_REMOTE_WRITE) + flags |= GDMA_ACCESS_FLAG_REMOTE_WRITE; + + if (access_flags & IB_ACCESS_REMOTE_READ) + flags |= GDMA_ACCESS_FLAG_REMOTE_READ; + + return flags; +} + +static int mana_ib_gd_create_mr(struct mana_ib_dev *dev, struct mana_ib_mr *mr, + struct gdma_create_mr_params *mr_params) +{ + struct gdma_create_mr_response resp = {}; + struct gdma_create_mr_request req = {}; + struct gdma_dev *mdev = dev->gdma_dev; + struct gdma_context *gc; + int err; + + gc = mdev->gdma_context; + + mana_gd_init_req_hdr(&req.hdr, GDMA_CREATE_MR, sizeof(req), + sizeof(resp)); + req.pd_handle = mr_params->pd_handle; + req.mr_type = mr_params->mr_type; + + switch (mr_params->mr_type) { + case GDMA_MR_TYPE_GVA: + req.gva.dma_region_handle = mr_params->gva.dma_region_handle; + req.gva.virtual_address = mr_params->gva.virtual_address; + req.gva.access_flags = mr_params->gva.access_flags; + break; + + default: + ibdev_dbg(&dev->ib_dev, + "invalid param (GDMA_MR_TYPE) passed, type %d\n", + req.mr_type); + return -EINVAL; + } + + err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp); + + if (err || resp.hdr.status) { + ibdev_dbg(&dev->ib_dev, "Failed to create mr %d, %u", err, + resp.hdr.status); + if (!err) + err = -EPROTO; + + return err; + } + + mr->ibmr.lkey = resp.lkey; + mr->ibmr.rkey = resp.rkey; + mr->mr_handle = resp.mr_handle; + + return 0; +} + +static int mana_ib_gd_destroy_mr(struct mana_ib_dev *dev, u64 mr_handle) +{ + struct gdma_destroy_mr_response resp = {}; + struct gdma_destroy_mr_request req = {}; + struct gdma_dev *mdev = dev->gdma_dev; + struct gdma_context *gc; + int err; + + gc = mdev->gdma_context; + + mana_gd_init_req_hdr(&req.hdr, GDMA_DESTROY_MR, sizeof(req), + sizeof(resp)); + + req.mr_handle = mr_handle; + + err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp); + if (err || resp.hdr.status) { + dev_err(gc->dev, "Failed to destroy MR: %d, 0x%x\n", err, + resp.hdr.status); + if (!err) + err = -EPROTO; + return err; + } + + return 0; +} + +struct ib_mr *mana_ib_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 length, + u64 iova, int access_flags, + struct ib_udata *udata) +{ + struct mana_ib_pd *pd = container_of(ibpd, struct mana_ib_pd, ibpd); + struct gdma_create_mr_params mr_params = {}; + struct ib_device *ibdev = ibpd->device; + struct mana_ib_dev *dev; + struct mana_ib_mr *mr; + u64 dma_region_handle; + int err; + + dev = container_of(ibdev, struct mana_ib_dev, ib_dev); + + ibdev_dbg(ibdev, + "start 0x%llx, iova 0x%llx length 0x%llx access_flags 0x%x", + start, iova, length, access_flags); + + if (access_flags & ~VALID_MR_FLAGS) + return ERR_PTR(-EINVAL); + + mr = kzalloc(sizeof(*mr), GFP_KERNEL); + if (!mr) + return ERR_PTR(-ENOMEM); + + mr->umem = ib_umem_get(ibdev, start, length, access_flags); + if (IS_ERR(mr->umem)) { + err = PTR_ERR(mr->umem); + ibdev_dbg(ibdev, + "Failed to get umem for register user-mr, %d\n", err); + goto err_free; + } + + err = mana_ib_gd_create_dma_region(dev, mr->umem, &dma_region_handle); + if (err) { + ibdev_dbg(ibdev, "Failed create dma region for user-mr, %d\n", + err); + goto err_umem; + } + + ibdev_dbg(ibdev, + "mana_ib_gd_create_dma_region ret %d gdma_region %llx\n", err, + dma_region_handle); + + mr_params.pd_handle = pd->pd_handle; + mr_params.mr_type = GDMA_MR_TYPE_GVA; + mr_params.gva.dma_region_handle = dma_region_handle; + mr_params.gva.virtual_address = iova; + mr_params.gva.access_flags = + mana_ib_verbs_to_gdma_access_flags(access_flags); + + err = mana_ib_gd_create_mr(dev, mr, &mr_params); + if (err) + goto err_dma_region; + + /* + * There is no need to keep track of dma_region_handle after MR is + * successfully created. The dma_region_handle is tracked in the PF + * as part of the lifecycle of this MR. + */ + + return &mr->ibmr; + +err_dma_region: + mana_gd_destroy_dma_region(dev->gdma_dev->gdma_context, + dma_region_handle); + +err_umem: + ib_umem_release(mr->umem); + +err_free: + kfree(mr); + return ERR_PTR(err); +} + +int mana_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata) +{ + struct mana_ib_mr *mr = container_of(ibmr, struct mana_ib_mr, ibmr); + struct ib_device *ibdev = ibmr->device; + struct mana_ib_dev *dev; + int err; + + dev = container_of(ibdev, struct mana_ib_dev, ib_dev); + + err = mana_ib_gd_destroy_mr(dev, mr->mr_handle); + if (err) + return err; + + if (mr->umem) + ib_umem_release(mr->umem); + + kfree(mr); + + return 0; +} diff --git a/drivers/infiniband/hw/mana/qp.c b/drivers/infiniband/hw/mana/qp.c new file mode 100644 index 0000000000000000000000000000000000000000..ea15ec77e3212a16db49f1a1476a07f1e2a7edd2 --- /dev/null +++ b/drivers/infiniband/hw/mana/qp.c @@ -0,0 +1,506 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2022, Microsoft Corporation. All rights reserved. + */ + +#include "mana_ib.h" + +static int mana_ib_cfg_vport_steering(struct mana_ib_dev *dev, + struct net_device *ndev, + mana_handle_t default_rxobj, + mana_handle_t ind_table[], + u32 log_ind_tbl_size, u32 rx_hash_key_len, + u8 *rx_hash_key) +{ + struct mana_port_context *mpc = netdev_priv(ndev); + struct mana_cfg_rx_steer_req *req = NULL; + struct mana_cfg_rx_steer_resp resp = {}; + mana_handle_t *req_indir_tab; + struct gdma_context *gc; + struct gdma_dev *mdev; + u32 req_buf_size; + int i, err; + + mdev = dev->gdma_dev; + gc = mdev->gdma_context; + + req_buf_size = + sizeof(*req) + sizeof(mana_handle_t) * MANA_INDIRECT_TABLE_SIZE; + req = kzalloc(req_buf_size, GFP_KERNEL); + if (!req) + return -ENOMEM; + + mana_gd_init_req_hdr(&req->hdr, MANA_CONFIG_VPORT_RX, req_buf_size, + sizeof(resp)); + + req->vport = mpc->port_handle; + req->rx_enable = 1; + req->update_default_rxobj = 1; + req->default_rxobj = default_rxobj; + req->hdr.dev_id = mdev->dev_id; + + /* If there are more than 1 entries in indirection table, enable RSS */ + if (log_ind_tbl_size) + req->rss_enable = true; + + req->num_indir_entries = MANA_INDIRECT_TABLE_SIZE; + req->indir_tab_offset = sizeof(*req); + req->update_indir_tab = true; + + req_indir_tab = (mana_handle_t *)(req + 1); + /* The ind table passed to the hardware must have + * MANA_INDIRECT_TABLE_SIZE entries. Adjust the verb + * ind_table to MANA_INDIRECT_TABLE_SIZE if required + */ + ibdev_dbg(&dev->ib_dev, "ind table size %u\n", 1 << log_ind_tbl_size); + for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++) { + req_indir_tab[i] = ind_table[i % (1 << log_ind_tbl_size)]; + ibdev_dbg(&dev->ib_dev, "index %u handle 0x%llx\n", i, + req_indir_tab[i]); + } + + req->update_hashkey = true; + if (rx_hash_key_len) + memcpy(req->hashkey, rx_hash_key, rx_hash_key_len); + else + netdev_rss_key_fill(req->hashkey, MANA_HASH_KEY_SIZE); + + ibdev_dbg(&dev->ib_dev, "vport handle %llu default_rxobj 0x%llx\n", + req->vport, default_rxobj); + + err = mana_gd_send_request(gc, req_buf_size, req, sizeof(resp), &resp); + if (err) { + netdev_err(ndev, "Failed to configure vPort RX: %d\n", err); + goto out; + } + + if (resp.hdr.status) { + netdev_err(ndev, "vPort RX configuration failed: 0x%x\n", + resp.hdr.status); + err = -EPROTO; + goto out; + } + + netdev_info(ndev, "Configured steering vPort %llu log_entries %u\n", + mpc->port_handle, log_ind_tbl_size); + +out: + kfree(req); + return err; +} + +static int mana_ib_create_qp_rss(struct ib_qp *ibqp, struct ib_pd *pd, + struct ib_qp_init_attr *attr, + struct ib_udata *udata) +{ + struct mana_ib_qp *qp = container_of(ibqp, struct mana_ib_qp, ibqp); + struct mana_ib_dev *mdev = + container_of(pd->device, struct mana_ib_dev, ib_dev); + struct ib_rwq_ind_table *ind_tbl = attr->rwq_ind_tbl; + struct mana_ib_create_qp_rss_resp resp = {}; + struct mana_ib_create_qp_rss ucmd = {}; + struct gdma_dev *gd = mdev->gdma_dev; + mana_handle_t *mana_ind_table; + struct mana_port_context *mpc; + struct mana_context *mc; + struct net_device *ndev; + struct mana_ib_cq *cq; + struct mana_ib_wq *wq; + unsigned int ind_tbl_size; + struct ib_cq *ibcq; + struct ib_wq *ibwq; + int i = 0; + u32 port; + int ret; + + mc = gd->driver_data; + + if (!udata || udata->inlen < sizeof(ucmd)) + return -EINVAL; + + ret = ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen)); + if (ret) { + ibdev_dbg(&mdev->ib_dev, + "Failed copy from udata for create rss-qp, err %d\n", + ret); + return ret; + } + + if (attr->cap.max_recv_wr > MAX_SEND_BUFFERS_PER_QUEUE) { + ibdev_dbg(&mdev->ib_dev, + "Requested max_recv_wr %d exceeding limit\n", + attr->cap.max_recv_wr); + return -EINVAL; + } + + if (attr->cap.max_recv_sge > MAX_RX_WQE_SGL_ENTRIES) { + ibdev_dbg(&mdev->ib_dev, + "Requested max_recv_sge %d exceeding limit\n", + attr->cap.max_recv_sge); + return -EINVAL; + } + + ind_tbl_size = 1 << ind_tbl->log_ind_tbl_size; + if (ind_tbl_size > MANA_INDIRECT_TABLE_SIZE) { + ibdev_dbg(&mdev->ib_dev, + "Indirect table size %d exceeding limit\n", + ind_tbl_size); + return -EINVAL; + } + + if (ucmd.rx_hash_function != MANA_IB_RX_HASH_FUNC_TOEPLITZ) { + ibdev_dbg(&mdev->ib_dev, + "RX Hash function is not supported, %d\n", + ucmd.rx_hash_function); + return -EINVAL; + } + + /* IB ports start with 1, MANA start with 0 */ + port = ucmd.port; + if (port < 1 || port > mc->num_ports) { + ibdev_dbg(&mdev->ib_dev, "Invalid port %u in creating qp\n", + port); + return -EINVAL; + } + ndev = mc->ports[port - 1]; + mpc = netdev_priv(ndev); + + ibdev_dbg(&mdev->ib_dev, "rx_hash_function %d port %d\n", + ucmd.rx_hash_function, port); + + mana_ind_table = kcalloc(ind_tbl_size, sizeof(mana_handle_t), + GFP_KERNEL); + if (!mana_ind_table) { + ret = -ENOMEM; + goto fail; + } + + qp->port = port; + + for (i = 0; i < ind_tbl_size; i++) { + struct mana_obj_spec wq_spec = {}; + struct mana_obj_spec cq_spec = {}; + + ibwq = ind_tbl->ind_tbl[i]; + wq = container_of(ibwq, struct mana_ib_wq, ibwq); + + ibcq = ibwq->cq; + cq = container_of(ibcq, struct mana_ib_cq, ibcq); + + wq_spec.gdma_region = wq->gdma_region; + wq_spec.queue_size = wq->wq_buf_size; + + cq_spec.gdma_region = cq->gdma_region; + cq_spec.queue_size = cq->cqe * COMP_ENTRY_SIZE; + cq_spec.modr_ctx_id = 0; + cq_spec.attached_eq = GDMA_CQ_NO_EQ; + + ret = mana_create_wq_obj(mpc, mpc->port_handle, GDMA_RQ, + &wq_spec, &cq_spec, &wq->rx_object); + if (ret) + goto fail; + + /* The GDMA regions are now owned by the WQ object */ + wq->gdma_region = GDMA_INVALID_DMA_REGION; + cq->gdma_region = GDMA_INVALID_DMA_REGION; + + wq->id = wq_spec.queue_index; + cq->id = cq_spec.queue_index; + + ibdev_dbg(&mdev->ib_dev, + "ret %d rx_object 0x%llx wq id %llu cq id %llu\n", + ret, wq->rx_object, wq->id, cq->id); + + resp.entries[i].cqid = cq->id; + resp.entries[i].wqid = wq->id; + + mana_ind_table[i] = wq->rx_object; + } + resp.num_entries = i; + + ret = mana_ib_cfg_vport_steering(mdev, ndev, wq->rx_object, + mana_ind_table, + ind_tbl->log_ind_tbl_size, + ucmd.rx_hash_key_len, + ucmd.rx_hash_key); + if (ret) + goto fail; + + ret = ib_copy_to_udata(udata, &resp, sizeof(resp)); + if (ret) { + ibdev_dbg(&mdev->ib_dev, + "Failed to copy to udata create rss-qp, %d\n", + ret); + goto fail; + } + + kfree(mana_ind_table); + + return 0; + +fail: + while (i-- > 0) { + ibwq = ind_tbl->ind_tbl[i]; + wq = container_of(ibwq, struct mana_ib_wq, ibwq); + mana_destroy_wq_obj(mpc, GDMA_RQ, wq->rx_object); + } + + kfree(mana_ind_table); + + return ret; +} + +static int mana_ib_create_qp_raw(struct ib_qp *ibqp, struct ib_pd *ibpd, + struct ib_qp_init_attr *attr, + struct ib_udata *udata) +{ + struct mana_ib_pd *pd = container_of(ibpd, struct mana_ib_pd, ibpd); + struct mana_ib_qp *qp = container_of(ibqp, struct mana_ib_qp, ibqp); + struct mana_ib_dev *mdev = + container_of(ibpd->device, struct mana_ib_dev, ib_dev); + struct mana_ib_cq *send_cq = + container_of(attr->send_cq, struct mana_ib_cq, ibcq); + struct mana_ib_ucontext *mana_ucontext = + rdma_udata_to_drv_context(udata, struct mana_ib_ucontext, + ibucontext); + struct mana_ib_create_qp_resp resp = {}; + struct gdma_dev *gd = mdev->gdma_dev; + struct mana_ib_create_qp ucmd = {}; + struct mana_obj_spec wq_spec = {}; + struct mana_obj_spec cq_spec = {}; + struct mana_port_context *mpc; + struct mana_context *mc; + struct net_device *ndev; + struct ib_umem *umem; + int err; + u32 port; + + mc = gd->driver_data; + + if (!mana_ucontext || udata->inlen < sizeof(ucmd)) + return -EINVAL; + + err = ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen)); + if (err) { + ibdev_dbg(&mdev->ib_dev, + "Failed to copy from udata create qp-raw, %d\n", err); + return err; + } + + /* IB ports start with 1, MANA Ethernet ports start with 0 */ + port = ucmd.port; + if (ucmd.port > mc->num_ports) + return -EINVAL; + + if (attr->cap.max_send_wr > MAX_SEND_BUFFERS_PER_QUEUE) { + ibdev_dbg(&mdev->ib_dev, + "Requested max_send_wr %d exceeding limit\n", + attr->cap.max_send_wr); + return -EINVAL; + } + + if (attr->cap.max_send_sge > MAX_TX_WQE_SGL_ENTRIES) { + ibdev_dbg(&mdev->ib_dev, + "Requested max_send_sge %d exceeding limit\n", + attr->cap.max_send_sge); + return -EINVAL; + } + + ndev = mc->ports[port - 1]; + mpc = netdev_priv(ndev); + ibdev_dbg(&mdev->ib_dev, "port %u ndev %p mpc %p\n", port, ndev, mpc); + + err = mana_ib_cfg_vport(mdev, port - 1, pd, mana_ucontext->doorbell); + if (err) + return -ENODEV; + + qp->port = port; + + ibdev_dbg(&mdev->ib_dev, "ucmd sq_buf_addr 0x%llx port %u\n", + ucmd.sq_buf_addr, ucmd.port); + + umem = ib_umem_get(ibpd->device, ucmd.sq_buf_addr, ucmd.sq_buf_size, + IB_ACCESS_LOCAL_WRITE); + if (IS_ERR(umem)) { + err = PTR_ERR(umem); + ibdev_dbg(&mdev->ib_dev, + "Failed to get umem for create qp-raw, err %d\n", + err); + goto err_free_vport; + } + qp->sq_umem = umem; + + err = mana_ib_gd_create_dma_region(mdev, qp->sq_umem, + &qp->sq_gdma_region); + if (err) { + ibdev_dbg(&mdev->ib_dev, + "Failed to create dma region for create qp-raw, %d\n", + err); + goto err_release_umem; + } + + ibdev_dbg(&mdev->ib_dev, + "mana_ib_gd_create_dma_region ret %d gdma_region 0x%llx\n", + err, qp->sq_gdma_region); + + /* Create a WQ on the same port handle used by the Ethernet */ + wq_spec.gdma_region = qp->sq_gdma_region; + wq_spec.queue_size = ucmd.sq_buf_size; + + cq_spec.gdma_region = send_cq->gdma_region; + cq_spec.queue_size = send_cq->cqe * COMP_ENTRY_SIZE; + cq_spec.modr_ctx_id = 0; + cq_spec.attached_eq = GDMA_CQ_NO_EQ; + + err = mana_create_wq_obj(mpc, mpc->port_handle, GDMA_SQ, &wq_spec, + &cq_spec, &qp->tx_object); + if (err) { + ibdev_dbg(&mdev->ib_dev, + "Failed to create wq for create raw-qp, err %d\n", + err); + goto err_destroy_dma_region; + } + + /* The GDMA regions are now owned by the WQ object */ + qp->sq_gdma_region = GDMA_INVALID_DMA_REGION; + send_cq->gdma_region = GDMA_INVALID_DMA_REGION; + + qp->sq_id = wq_spec.queue_index; + send_cq->id = cq_spec.queue_index; + + ibdev_dbg(&mdev->ib_dev, + "ret %d qp->tx_object 0x%llx sq id %llu cq id %llu\n", err, + qp->tx_object, qp->sq_id, send_cq->id); + + resp.sqid = qp->sq_id; + resp.cqid = send_cq->id; + resp.tx_vp_offset = pd->tx_vp_offset; + + err = ib_copy_to_udata(udata, &resp, sizeof(resp)); + if (err) { + ibdev_dbg(&mdev->ib_dev, + "Failed copy udata for create qp-raw, %d\n", + err); + goto err_destroy_wq_obj; + } + + return 0; + +err_destroy_wq_obj: + mana_destroy_wq_obj(mpc, GDMA_SQ, qp->tx_object); + +err_destroy_dma_region: + mana_ib_gd_destroy_dma_region(mdev, qp->sq_gdma_region); + +err_release_umem: + ib_umem_release(umem); + +err_free_vport: + mana_ib_uncfg_vport(mdev, pd, port - 1); + + return err; +} + +int mana_ib_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attr, + struct ib_udata *udata) +{ + switch (attr->qp_type) { + case IB_QPT_RAW_PACKET: + /* When rwq_ind_tbl is used, it's for creating WQs for RSS */ + if (attr->rwq_ind_tbl) + return mana_ib_create_qp_rss(ibqp, ibqp->pd, attr, + udata); + + return mana_ib_create_qp_raw(ibqp, ibqp->pd, attr, udata); + default: + /* Creating QP other than IB_QPT_RAW_PACKET is not supported */ + ibdev_dbg(ibqp->device, "Creating QP type %u not supported\n", + attr->qp_type); + } + + return -EINVAL; +} + +int mana_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, + int attr_mask, struct ib_udata *udata) +{ + /* modify_qp is not supported by this version of the driver */ + return -EOPNOTSUPP; +} + +static int mana_ib_destroy_qp_rss(struct mana_ib_qp *qp, + struct ib_rwq_ind_table *ind_tbl, + struct ib_udata *udata) +{ + struct mana_ib_dev *mdev = + container_of(qp->ibqp.device, struct mana_ib_dev, ib_dev); + struct gdma_dev *gd = mdev->gdma_dev; + struct mana_port_context *mpc; + struct mana_context *mc; + struct net_device *ndev; + struct mana_ib_wq *wq; + struct ib_wq *ibwq; + int i; + + mc = gd->driver_data; + ndev = mc->ports[qp->port - 1]; + mpc = netdev_priv(ndev); + + for (i = 0; i < (1 << ind_tbl->log_ind_tbl_size); i++) { + ibwq = ind_tbl->ind_tbl[i]; + wq = container_of(ibwq, struct mana_ib_wq, ibwq); + ibdev_dbg(&mdev->ib_dev, "destroying wq->rx_object %llu\n", + wq->rx_object); + mana_destroy_wq_obj(mpc, GDMA_RQ, wq->rx_object); + } + + return 0; +} + +static int mana_ib_destroy_qp_raw(struct mana_ib_qp *qp, struct ib_udata *udata) +{ + struct mana_ib_dev *mdev = + container_of(qp->ibqp.device, struct mana_ib_dev, ib_dev); + struct gdma_dev *gd = mdev->gdma_dev; + struct ib_pd *ibpd = qp->ibqp.pd; + struct mana_port_context *mpc; + struct mana_context *mc; + struct net_device *ndev; + struct mana_ib_pd *pd; + + mc = gd->driver_data; + ndev = mc->ports[qp->port - 1]; + mpc = netdev_priv(ndev); + pd = container_of(ibpd, struct mana_ib_pd, ibpd); + + mana_destroy_wq_obj(mpc, GDMA_SQ, qp->tx_object); + + if (qp->sq_umem) { + mana_ib_gd_destroy_dma_region(mdev, qp->sq_gdma_region); + ib_umem_release(qp->sq_umem); + } + + mana_ib_uncfg_vport(mdev, pd, qp->port - 1); + + return 0; +} + +int mana_ib_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata) +{ + struct mana_ib_qp *qp = container_of(ibqp, struct mana_ib_qp, ibqp); + + switch (ibqp->qp_type) { + case IB_QPT_RAW_PACKET: + if (ibqp->rwq_ind_tbl) + return mana_ib_destroy_qp_rss(qp, ibqp->rwq_ind_tbl, + udata); + + return mana_ib_destroy_qp_raw(qp, udata); + + default: + ibdev_dbg(ibqp->device, "Unexpected QP type %u\n", + ibqp->qp_type); + } + + return -ENOENT; +} diff --git a/drivers/infiniband/hw/mana/wq.c b/drivers/infiniband/hw/mana/wq.c new file mode 100644 index 0000000000000000000000000000000000000000..372d361510e0ca0ea9060f5b53d1a517a72cb178 --- /dev/null +++ b/drivers/infiniband/hw/mana/wq.c @@ -0,0 +1,115 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2022, Microsoft Corporation. All rights reserved. + */ + +#include "mana_ib.h" + +struct ib_wq *mana_ib_create_wq(struct ib_pd *pd, + struct ib_wq_init_attr *init_attr, + struct ib_udata *udata) +{ + struct mana_ib_dev *mdev = + container_of(pd->device, struct mana_ib_dev, ib_dev); + struct mana_ib_create_wq ucmd = {}; + struct mana_ib_wq *wq; + struct ib_umem *umem; + int err; + + if (udata->inlen < sizeof(ucmd)) + return ERR_PTR(-EINVAL); + + err = ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen)); + if (err) { + ibdev_dbg(&mdev->ib_dev, + "Failed to copy from udata for create wq, %d\n", err); + return ERR_PTR(err); + } + + wq = kzalloc(sizeof(*wq), GFP_KERNEL); + if (!wq) + return ERR_PTR(-ENOMEM); + + ibdev_dbg(&mdev->ib_dev, "ucmd wq_buf_addr 0x%llx\n", ucmd.wq_buf_addr); + + umem = ib_umem_get(pd->device, ucmd.wq_buf_addr, ucmd.wq_buf_size, + IB_ACCESS_LOCAL_WRITE); + if (IS_ERR(umem)) { + err = PTR_ERR(umem); + ibdev_dbg(&mdev->ib_dev, + "Failed to get umem for create wq, err %d\n", err); + goto err_free_wq; + } + + wq->umem = umem; + wq->wqe = init_attr->max_wr; + wq->wq_buf_size = ucmd.wq_buf_size; + wq->rx_object = INVALID_MANA_HANDLE; + + err = mana_ib_gd_create_dma_region(mdev, wq->umem, &wq->gdma_region); + if (err) { + ibdev_dbg(&mdev->ib_dev, + "Failed to create dma region for create wq, %d\n", + err); + goto err_release_umem; + } + + ibdev_dbg(&mdev->ib_dev, + "mana_ib_gd_create_dma_region ret %d gdma_region 0x%llx\n", + err, wq->gdma_region); + + /* WQ ID is returned at wq_create time, doesn't know the value yet */ + + return &wq->ibwq; + +err_release_umem: + ib_umem_release(umem); + +err_free_wq: + kfree(wq); + + return ERR_PTR(err); +} + +int mana_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr, + u32 wq_attr_mask, struct ib_udata *udata) +{ + /* modify_wq is not supported by this version of the driver */ + return -EOPNOTSUPP; +} + +int mana_ib_destroy_wq(struct ib_wq *ibwq, struct ib_udata *udata) +{ + struct mana_ib_wq *wq = container_of(ibwq, struct mana_ib_wq, ibwq); + struct ib_device *ib_dev = ibwq->device; + struct mana_ib_dev *mdev; + + mdev = container_of(ib_dev, struct mana_ib_dev, ib_dev); + + mana_ib_gd_destroy_dma_region(mdev, wq->gdma_region); + ib_umem_release(wq->umem); + + kfree(wq); + + return 0; +} + +int mana_ib_create_rwq_ind_table(struct ib_rwq_ind_table *ib_rwq_ind_table, + struct ib_rwq_ind_table_init_attr *init_attr, + struct ib_udata *udata) +{ + /* + * There is no additional data in ind_table to be maintained by this + * driver, do nothing + */ + return 0; +} + +int mana_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *ib_rwq_ind_tbl) +{ + /* + * There is no additional data in ind_table to be maintained by this + * driver, do nothing + */ + return 0; +} diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index ba47874f90d381695f06d732bc1e520400c7c647..dceebcd885bbd05c228e79408565a7b466f56e02 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c @@ -144,8 +144,7 @@ static struct net_device *mlx4_ib_get_netdev(struct ib_device *device, } } } - if (dev) - dev_hold(dev); + dev_hold(dev); rcu_read_unlock(); return dev; @@ -1307,8 +1306,7 @@ int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp, spin_lock_bh(&mdev->iboe.lock); ndev = mdev->iboe.netdevs[mqp->port - 1]; - if (ndev) - dev_hold(ndev); + dev_hold(ndev); spin_unlock_bh(&mdev->iboe.lock); if (ndev) { @@ -1955,11 +1953,9 @@ static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) if (ge) { spin_lock_bh(&mdev->iboe.lock); ndev = ge->added ? mdev->iboe.netdevs[ge->port - 1] : NULL; - if (ndev) - dev_hold(ndev); + dev_hold(ndev); spin_unlock_bh(&mdev->iboe.lock); - if (ndev) - dev_put(ndev); + dev_put(ndev); list_del(&ge->list); kfree(ge); } else diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c index be189e0525de6c33549f68b727c5c95317455d80..efc9e4a6df04a2b623440c4b7d00d1da08d230d2 100644 --- a/drivers/infiniband/hw/mlx5/cq.c +++ b/drivers/infiniband/hw/mlx5/cq.c @@ -267,17 +267,20 @@ static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe, wc->wc_flags |= IB_WC_WITH_NETWORK_HDR_TYPE; } -static void dump_cqe(struct mlx5_ib_dev *dev, struct mlx5_err_cqe *cqe) +static void dump_cqe(struct mlx5_ib_dev *dev, struct mlx5_err_cqe *cqe, + struct ib_wc *wc, const char *level) { - mlx5_ib_warn(dev, "dump error cqe\n"); - mlx5_dump_err_cqe(dev->mdev, cqe); + mlx5_ib_log(level, dev, "WC error: %d, Message: %s\n", wc->status, + ib_wc_status_msg(wc->status)); + print_hex_dump(level, "cqe_dump: ", DUMP_PREFIX_OFFSET, 16, 1, + cqe, sizeof(*cqe), false); } static void mlx5_handle_error_cqe(struct mlx5_ib_dev *dev, struct mlx5_err_cqe *cqe, struct ib_wc *wc) { - int dump = 1; + const char *dump = KERN_WARNING; switch (cqe->syndrome) { case MLX5_CQE_SYNDROME_LOCAL_LENGTH_ERR: @@ -287,10 +290,11 @@ static void mlx5_handle_error_cqe(struct mlx5_ib_dev *dev, wc->status = IB_WC_LOC_QP_OP_ERR; break; case MLX5_CQE_SYNDROME_LOCAL_PROT_ERR: + dump = KERN_DEBUG; wc->status = IB_WC_LOC_PROT_ERR; break; case MLX5_CQE_SYNDROME_WR_FLUSH_ERR: - dump = 0; + dump = NULL; wc->status = IB_WC_WR_FLUSH_ERR; break; case MLX5_CQE_SYNDROME_MW_BIND_ERR: @@ -306,18 +310,20 @@ static void mlx5_handle_error_cqe(struct mlx5_ib_dev *dev, wc->status = IB_WC_REM_INV_REQ_ERR; break; case MLX5_CQE_SYNDROME_REMOTE_ACCESS_ERR: + dump = KERN_DEBUG; wc->status = IB_WC_REM_ACCESS_ERR; break; case MLX5_CQE_SYNDROME_REMOTE_OP_ERR: + dump = KERN_DEBUG; wc->status = IB_WC_REM_OP_ERR; break; case MLX5_CQE_SYNDROME_TRANSPORT_RETRY_EXC_ERR: + dump = NULL; wc->status = IB_WC_RETRY_EXC_ERR; - dump = 0; break; case MLX5_CQE_SYNDROME_RNR_RETRY_EXC_ERR: + dump = NULL; wc->status = IB_WC_RNR_RETRY_EXC_ERR; - dump = 0; break; case MLX5_CQE_SYNDROME_REMOTE_ABORTED_ERR: wc->status = IB_WC_REM_ABORT_ERR; @@ -328,11 +334,8 @@ static void mlx5_handle_error_cqe(struct mlx5_ib_dev *dev, } wc->vendor_err = cqe->vendor_err_synd; - if (dump) { - mlx5_ib_warn(dev, "WC error: %d, Message: %s\n", wc->status, - ib_wc_status_msg(wc->status)); - dump_cqe(dev, cqe); - } + if (dump) + dump_cqe(dev, cqe, wc, dump); } static void handle_atomics(struct mlx5_ib_qp *qp, struct mlx5_cqe64 *cqe64, diff --git a/drivers/infiniband/hw/mlx5/fs.c b/drivers/infiniband/hw/mlx5/fs.c index 490ec308e3098f094c2cbb85ab42c30ba0b142fb..3008632a6c2064a81c5c8d5ded5e2f98385319de 100644 --- a/drivers/infiniband/hw/mlx5/fs.c +++ b/drivers/infiniband/hw/mlx5/fs.c @@ -127,7 +127,6 @@ static int check_mpls_supp_fields(u32 field_support, const __be32 *set_mask) } #define LAST_ETH_FIELD vlan_tag -#define LAST_IB_FIELD sl #define LAST_IPV4_FIELD tos #define LAST_IPV6_FIELD traffic_class #define LAST_TCP_UDP_FIELD src_port diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h index 4a7f7064bd0eb5221f49113cd8e52ed1b18af734..8b91babdd4c084f5328cddd6c2d26ff081c0b9a5 100644 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h @@ -38,6 +38,10 @@ dev_warn(&(_dev)->ib_dev.dev, "%s:%d:(pid %d): " format, __func__, \ __LINE__, current->pid, ##arg) +#define mlx5_ib_log(lvl, _dev, format, arg...) \ + dev_printk(lvl, &(_dev)->ib_dev.dev, "%s:%d:(pid %d): " format, \ + __func__, __LINE__, current->pid, ##arg) + #define MLX5_IB_DEFAULT_UIDX 0xffffff #define MLX5_USER_ASSIGNED_UIDX_MASK __mlx5_mask(qpc, user_index) diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c index 410cc5fd25239db3a784360720048080ecc7d88b..053fe946e45aef44d62a8fe137a04beb258493cf 100644 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c @@ -1929,10 +1929,8 @@ int mlx5_ib_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata) ndescs = req.num_klms ? roundup(req.num_klms, 4) : roundup(1, 4); in = kzalloc(inlen, GFP_KERNEL); - if (!in) { - err = -ENOMEM; - goto free; - } + if (!in) + return -ENOMEM; mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c index ba0c3e4c07d85cda8c517afbb9304b262da26989..ecdfeff3d44f364fc728b018fdb0b1b3da6d7984 100644 --- a/drivers/infiniband/hw/qedr/main.c +++ b/drivers/infiniband/hw/qedr/main.c @@ -479,7 +479,7 @@ static irqreturn_t qedr_irq_handler(int irq, void *handle) /* The CQ's CNQ notification counter is checked before * destroying the CQ in a busy-wait loop that waits for all of * the CQ's CNQ interrupts to be processed. It is increased - * here, only after the completion handler, to ensure that the + * here, only after the completion handler, to ensure that * the handler is not running when the CQ is destroyed. */ cq->cnq_notif++; diff --git a/drivers/infiniband/hw/qib/qib_iba6120.c b/drivers/infiniband/hw/qib/qib_iba6120.c index 07386117f21ad8ffb822471eaa90e4fe6d35b37a..277769fa97452c5dd5760d378a54126a0d3441c1 100644 --- a/drivers/infiniband/hw/qib/qib_iba6120.c +++ b/drivers/infiniband/hw/qib/qib_iba6120.c @@ -799,12 +799,9 @@ static void qib_handle_6120_hwerrors(struct qib_devdata *dd, char *msg, hwerrs &= ~TXE_PIO_PARITY; } - if (!hwerrs) { - static u32 freeze_cnt; - - freeze_cnt++; + if (!hwerrs) qib_6120_clear_freeze(dd); - } else + else isfatal = 1; } diff --git a/drivers/infiniband/hw/qib/qib_tx.c b/drivers/infiniband/hw/qib/qib_tx.c index 6a8148851f21d32fbaf9b78a9b15fa0cc4694595..1325110237cd9ae4cea2c752dc6999dfdd3f1cbe 100644 --- a/drivers/infiniband/hw/qib/qib_tx.c +++ b/drivers/infiniband/hw/qib/qib_tx.c @@ -82,7 +82,6 @@ int qib_disarm_piobufs_ifneeded(struct qib_ctxtdata *rcd) struct qib_devdata *dd = rcd->dd; unsigned i; unsigned last; - unsigned n = 0; last = rcd->pio_base + rcd->piocnt; /* @@ -102,10 +101,8 @@ int qib_disarm_piobufs_ifneeded(struct qib_ctxtdata *rcd) } spin_lock_irq(&dd->pioavail_lock); for (i = rcd->pio_base; i < last; i++) { - if (__test_and_clear_bit(i, dd->pio_need_disarm)) { - n++; + if (__test_and_clear_bit(i, dd->pio_need_disarm)) dd->f_sendctrl(rcd->ppd, QIB_SENDCTRL_DISARM_BUF(i)); - } } spin_unlock_irq(&dd->pioavail_lock); return 0; diff --git a/drivers/infiniband/hw/qib/qib_user_pages.c b/drivers/infiniband/hw/qib/qib_user_pages.c index f4b5f05058e4c6713a9bfb8a2afff4e106bf4497..f693bc753b6b1de8a45cd1e379ef98ea9342d44e 100644 --- a/drivers/infiniband/hw/qib/qib_user_pages.c +++ b/drivers/infiniband/hw/qib/qib_user_pages.c @@ -110,7 +110,7 @@ int qib_get_user_pages(unsigned long start_page, size_t num_pages, for (got = 0; got < num_pages; got += ret) { ret = pin_user_pages(start_page + got * PAGE_SIZE, num_pages - got, - FOLL_LONGTERM | FOLL_WRITE | FOLL_FORCE, + FOLL_LONGTERM | FOLL_WRITE, p + got, NULL); if (ret < 0) { mmap_read_unlock(current->mm); diff --git a/drivers/infiniband/hw/qib/qib_user_sdma.c b/drivers/infiniband/hw/qib/qib_user_sdma.c index bf2f30d67949dc386404a632c1ec28834d7269ff..9fe03d6ffac1a7890a34a2827d485516ef29637b 100644 --- a/drivers/infiniband/hw/qib/qib_user_sdma.c +++ b/drivers/infiniband/hw/qib/qib_user_sdma.c @@ -851,7 +851,7 @@ static int qib_user_sdma_queue_pkts(const struct qib_devdata *dd, } /* - * This assignment is a bit strange. it's because the + * This assignment is a bit strange. it's because * the pbc counts the number of 32 bit words in the full * packet _except_ the first word of the pbc itself... */ diff --git a/drivers/infiniband/hw/usnic/usnic_uiom.c b/drivers/infiniband/hw/usnic/usnic_uiom.c index 67923ced6e2d193426d1ca720d1b5c55161df555..c301b3be9f303dde94229ebf09e3d5fc96976fc6 100644 --- a/drivers/infiniband/hw/usnic/usnic_uiom.c +++ b/drivers/infiniband/hw/usnic/usnic_uiom.c @@ -85,6 +85,7 @@ static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable, int dmasync, struct usnic_uiom_reg *uiomr) { struct list_head *chunk_list = &uiomr->chunk_list; + unsigned int gup_flags = FOLL_LONGTERM; struct page **page_list; struct scatterlist *sg; struct usnic_uiom_chunk *chunk; @@ -96,7 +97,6 @@ static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable, int off; int i; dma_addr_t pa; - unsigned int gup_flags; struct mm_struct *mm; /* @@ -131,8 +131,8 @@ static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable, goto out; } - gup_flags = FOLL_WRITE; - gup_flags |= (writable) ? 0 : FOLL_FORCE; + if (writable) + gup_flags |= FOLL_WRITE; cur_base = addr & PAGE_MASK; ret = 0; @@ -140,8 +140,7 @@ static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable, ret = pin_user_pages(cur_base, min_t(unsigned long, npages, PAGE_SIZE / sizeof(struct page *)), - gup_flags | FOLL_LONGTERM, - page_list, NULL); + gup_flags, page_list, NULL); if (ret < 0) goto out; diff --git a/drivers/infiniband/sw/rxe/rxe.c b/drivers/infiniband/sw/rxe/rxe.c index 51daac5c4feb75b847bb06f8a32818ebd941ac73..136c2efe34660a4fe7eb432c2b46e28aa3de6e4a 100644 --- a/drivers/infiniband/sw/rxe/rxe.c +++ b/drivers/infiniband/sw/rxe/rxe.c @@ -187,14 +187,14 @@ static int rxe_newlink(const char *ibdev_name, struct net_device *ndev) exists = rxe_get_dev_from_net(ndev); if (exists) { ib_device_put(&exists->ib_dev); - pr_err("already configured on %s\n", ndev->name); + rxe_dbg(exists, "already configured on %s\n", ndev->name); err = -EEXIST; goto err; } err = rxe_net_add(ibdev_name, ndev); if (err) { - pr_err("failed to add %s\n", ndev->name); + rxe_dbg(exists, "failed to add %s\n", ndev->name); goto err; } err: diff --git a/drivers/infiniband/sw/rxe/rxe.h b/drivers/infiniband/sw/rxe/rxe.h index 30fbdf3bc76a34dc3cfa2e21279ebb88226adbee..ab334900fcc3d7fc9bda4f7cd05fddbbc2ecfb82 100644 --- a/drivers/infiniband/sw/rxe/rxe.h +++ b/drivers/infiniband/sw/rxe/rxe.h @@ -38,6 +38,25 @@ #define RXE_ROCE_V2_SPORT (0xc000) +#define rxe_dbg(rxe, fmt, ...) ibdev_dbg(&(rxe)->ib_dev, \ + "%s: " fmt, __func__, ##__VA_ARGS__) +#define rxe_dbg_uc(uc, fmt, ...) ibdev_dbg((uc)->ibuc.device, \ + "uc#%d %s: " fmt, (uc)->elem.index, __func__, ##__VA_ARGS__) +#define rxe_dbg_pd(pd, fmt, ...) ibdev_dbg((pd)->ibpd.device, \ + "pd#%d %s: " fmt, (pd)->elem.index, __func__, ##__VA_ARGS__) +#define rxe_dbg_ah(ah, fmt, ...) ibdev_dbg((ah)->ibah.device, \ + "ah#%d %s: " fmt, (ah)->elem.index, __func__, ##__VA_ARGS__) +#define rxe_dbg_srq(srq, fmt, ...) ibdev_dbg((srq)->ibsrq.device, \ + "srq#%d %s: " fmt, (srq)->elem.index, __func__, ##__VA_ARGS__) +#define rxe_dbg_qp(qp, fmt, ...) ibdev_dbg((qp)->ibqp.device, \ + "qp#%d %s: " fmt, (qp)->elem.index, __func__, ##__VA_ARGS__) +#define rxe_dbg_cq(cq, fmt, ...) ibdev_dbg((cq)->ibcq.device, \ + "cq#%d %s: " fmt, (cq)->elem.index, __func__, ##__VA_ARGS__) +#define rxe_dbg_mr(mr, fmt, ...) ibdev_dbg((mr)->ibmr.device, \ + "mr#%d %s: " fmt, (mr)->elem.index, __func__, ##__VA_ARGS__) +#define rxe_dbg_mw(mw, fmt, ...) ibdev_dbg((mw)->ibmw.device, \ + "mw#%d %s: " fmt, (mw)->elem.index, __func__, ##__VA_ARGS__) + void rxe_set_mtu(struct rxe_dev *rxe, unsigned int dev_mtu); int rxe_add(struct rxe_dev *rxe, unsigned int mtu, const char *ibdev_name); diff --git a/drivers/infiniband/sw/rxe/rxe_av.c b/drivers/infiniband/sw/rxe/rxe_av.c index 3b05314ca739ec91c070829ab6123bd01a7ba986..889d7adbd45504af87543b41c545aa4e6423b92d 100644 --- a/drivers/infiniband/sw/rxe/rxe_av.c +++ b/drivers/infiniband/sw/rxe/rxe_av.c @@ -14,26 +14,45 @@ void rxe_init_av(struct rdma_ah_attr *attr, struct rxe_av *av) memcpy(av->dmac, attr->roce.dmac, ETH_ALEN); } -int rxe_av_chk_attr(struct rxe_dev *rxe, struct rdma_ah_attr *attr) +static int chk_attr(void *obj, struct rdma_ah_attr *attr, bool obj_is_ah) { const struct ib_global_route *grh = rdma_ah_read_grh(attr); struct rxe_port *port; + struct rxe_dev *rxe; + struct rxe_qp *qp; + struct rxe_ah *ah; int type; + if (obj_is_ah) { + ah = obj; + rxe = to_rdev(ah->ibah.device); + } else { + qp = obj; + rxe = to_rdev(qp->ibqp.device); + } + port = &rxe->port; if (rdma_ah_get_ah_flags(attr) & IB_AH_GRH) { if (grh->sgid_index > port->attr.gid_tbl_len) { - pr_warn("invalid sgid index = %d\n", - grh->sgid_index); + if (obj_is_ah) + rxe_dbg_ah(ah, "invalid sgid index = %d\n", + grh->sgid_index); + else + rxe_dbg_qp(qp, "invalid sgid index = %d\n", + grh->sgid_index); return -EINVAL; } type = rdma_gid_attr_network_type(grh->sgid_attr); if (type < RDMA_NETWORK_IPV4 || type > RDMA_NETWORK_IPV6) { - pr_warn("invalid network type for rdma_rxe = %d\n", - type); + if (obj_is_ah) + rxe_dbg_ah(ah, "invalid network type for rdma_rxe = %d\n", + type); + else + rxe_dbg_qp(qp, "invalid network type for rdma_rxe = %d\n", + type); return -EINVAL; } } @@ -41,6 +60,16 @@ int rxe_av_chk_attr(struct rxe_dev *rxe, struct rdma_ah_attr *attr) return 0; } +int rxe_av_chk_attr(struct rxe_qp *qp, struct rdma_ah_attr *attr) +{ + return chk_attr(qp, attr, false); +} + +int rxe_ah_chk_attr(struct rxe_ah *ah, struct rdma_ah_attr *attr) +{ + return chk_attr(ah, attr, true); +} + void rxe_av_from_attr(u8 port_num, struct rxe_av *av, struct rdma_ah_attr *attr) { @@ -121,12 +150,12 @@ struct rxe_av *rxe_get_av(struct rxe_pkt_info *pkt, struct rxe_ah **ahp) /* only new user provider or kernel client */ ah = rxe_pool_get_index(&pkt->rxe->ah_pool, ah_num); if (!ah) { - pr_warn("Unable to find AH matching ah_num\n"); + rxe_dbg_qp(pkt->qp, "Unable to find AH matching ah_num\n"); return NULL; } if (rxe_ah_pd(ah) != pkt->qp->pd) { - pr_warn("PDs don't match for AH and QP\n"); + rxe_dbg_qp(pkt->qp, "PDs don't match for AH and QP\n"); rxe_put(ah); return NULL; } diff --git a/drivers/infiniband/sw/rxe/rxe_comp.c b/drivers/infiniband/sw/rxe/rxe_comp.c index fb0c008af78cc2db16808165d3548567405df035..20737fec392bf7efae659316c4afbbfaddfab998 100644 --- a/drivers/infiniband/sw/rxe/rxe_comp.c +++ b/drivers/infiniband/sw/rxe/rxe_comp.c @@ -104,6 +104,8 @@ static enum ib_wc_opcode wr_to_wc_opcode(enum ib_wr_opcode opcode) case IB_WR_LOCAL_INV: return IB_WC_LOCAL_INV; case IB_WR_REG_MR: return IB_WC_REG_MR; case IB_WR_BIND_MW: return IB_WC_BIND_MW; + case IB_WR_ATOMIC_WRITE: return IB_WC_ATOMIC_WRITE; + case IB_WR_FLUSH: return IB_WC_FLUSH; default: return 0xff; @@ -114,11 +116,11 @@ void retransmit_timer(struct timer_list *t) { struct rxe_qp *qp = from_timer(qp, t, retrans_timer); - pr_debug("%s: fired for qp#%d\n", __func__, qp->elem.index); + rxe_dbg_qp(qp, "retransmit timer fired\n"); if (qp->valid) { qp->comp.timeout = 1; - rxe_run_task(&qp->comp.task, 1); + rxe_sched_task(&qp->comp.task); } } @@ -132,7 +134,10 @@ void rxe_comp_queue_pkt(struct rxe_qp *qp, struct sk_buff *skb) if (must_sched != 0) rxe_counter_inc(SKB_TO_PKT(skb)->rxe, RXE_CNT_COMPLETER_SCHED); - rxe_run_task(&qp->comp.task, must_sched); + if (must_sched) + rxe_sched_task(&qp->comp.task); + else + rxe_run_task(&qp->comp.task); } static inline enum comp_state get_wqe(struct rxe_qp *qp, @@ -200,6 +205,10 @@ static inline enum comp_state check_psn(struct rxe_qp *qp, */ if (pkt->psn == wqe->last_psn) return COMPST_COMP_ACK; + else if (pkt->opcode == IB_OPCODE_RC_ACKNOWLEDGE && + (qp->comp.opcode == IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST || + qp->comp.opcode == IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE)) + return COMPST_CHECK_ACK; else return COMPST_DONE; } else if ((diff > 0) && (wqe->mask & WR_ATOMIC_OR_READ_MASK)) { @@ -228,6 +237,10 @@ static inline enum comp_state check_ack(struct rxe_qp *qp, case IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST: case IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE: + /* Check NAK code to handle a remote error */ + if (pkt->opcode == IB_OPCODE_RC_ACKNOWLEDGE) + break; + if (pkt->opcode != IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE && pkt->opcode != IB_OPCODE_RC_RDMA_READ_RESPONSE_LAST) { /* read retries of partial data may restart from @@ -258,12 +271,16 @@ static inline enum comp_state check_ack(struct rxe_qp *qp, if ((syn & AETH_TYPE_MASK) != AETH_ACK) return COMPST_ERROR; + if (wqe->wr.opcode == IB_WR_ATOMIC_WRITE) + return COMPST_WRITE_SEND; + fallthrough; /* (IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE doesn't have an AETH) */ case IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE: if (wqe->wr.opcode != IB_WR_RDMA_READ && - wqe->wr.opcode != IB_WR_RDMA_READ_WITH_INV) { + wqe->wr.opcode != IB_WR_RDMA_READ_WITH_INV && + wqe->wr.opcode != IB_WR_FLUSH) { wqe->status = IB_WC_FATAL_ERR; return COMPST_ERROR; } @@ -305,7 +322,7 @@ static inline enum comp_state check_ack(struct rxe_qp *qp, qp->comp.psn = pkt->psn; if (qp->req.wait_psn) { qp->req.wait_psn = 0; - rxe_run_task(&qp->req.task, 0); + rxe_run_task(&qp->req.task); } } return COMPST_ERROR_RETRY; @@ -323,7 +340,7 @@ static inline enum comp_state check_ack(struct rxe_qp *qp, return COMPST_ERROR; default: - pr_warn("unexpected nak %x\n", syn); + rxe_dbg_qp(qp, "unexpected nak %x\n", syn); wqe->status = IB_WC_REM_OP_ERR; return COMPST_ERROR; } @@ -334,7 +351,7 @@ static inline enum comp_state check_ack(struct rxe_qp *qp, break; default: - pr_warn("unexpected opcode\n"); + rxe_dbg_qp(qp, "unexpected opcode\n"); } return COMPST_ERROR; @@ -452,7 +469,7 @@ static void do_complete(struct rxe_qp *qp, struct rxe_send_wqe *wqe) */ if (qp->req.wait_fence) { qp->req.wait_fence = 0; - rxe_run_task(&qp->req.task, 0); + rxe_run_task(&qp->req.task); } } @@ -466,7 +483,7 @@ static inline enum comp_state complete_ack(struct rxe_qp *qp, if (qp->req.need_rd_atomic) { qp->comp.timeout_retry = 0; qp->req.need_rd_atomic = 0; - rxe_run_task(&qp->req.task, 0); + rxe_run_task(&qp->req.task); } } @@ -512,7 +529,7 @@ static inline enum comp_state complete_wqe(struct rxe_qp *qp, if (qp->req.wait_psn) { qp->req.wait_psn = 0; - rxe_run_task(&qp->req.task, 1); + rxe_sched_task(&qp->req.task); } } @@ -587,8 +604,7 @@ int rxe_completer(void *arg) state = COMPST_GET_ACK; while (1) { - pr_debug("qp#%d state = %s\n", qp_num(qp), - comp_state_name[state]); + rxe_dbg_qp(qp, "state = %s\n", comp_state_name[state]); switch (state) { case COMPST_GET_ACK: skb = skb_dequeue(&qp->resp_pkts); @@ -646,7 +662,7 @@ int rxe_completer(void *arg) if (qp->req.wait_psn) { qp->req.wait_psn = 0; - rxe_run_task(&qp->req.task, 1); + rxe_sched_task(&qp->req.task); } state = COMPST_DONE; @@ -714,7 +730,7 @@ int rxe_completer(void *arg) RXE_CNT_COMP_RETRY); qp->req.need_retry = 1; qp->comp.started_retry = 1; - rxe_run_task(&qp->req.task, 0); + rxe_run_task(&qp->req.task); } goto done; @@ -735,8 +751,7 @@ int rxe_completer(void *arg) * rnr timer has fired */ qp->req.wait_for_rnr_timer = 1; - pr_debug("qp#%d set rnr nak timer\n", - qp_num(qp)); + rxe_dbg_qp(qp, "set rnr nak timer\n"); mod_timer(&qp->rnr_nak_timer, jiffies + rnrnak_jiffies(aeth_syn(pkt) & ~AETH_TYPE_MASK)); diff --git a/drivers/infiniband/sw/rxe/rxe_cq.c b/drivers/infiniband/sw/rxe/rxe_cq.c index b1a0ab3cd4bd1864a151cd5add6aee422ec3d13e..1df186534639aa507f100801fca835ed6ec59852 100644 --- a/drivers/infiniband/sw/rxe/rxe_cq.c +++ b/drivers/infiniband/sw/rxe/rxe_cq.c @@ -14,12 +14,12 @@ int rxe_cq_chk_attr(struct rxe_dev *rxe, struct rxe_cq *cq, int count; if (cqe <= 0) { - pr_warn("cqe(%d) <= 0\n", cqe); + rxe_dbg(rxe, "cqe(%d) <= 0\n", cqe); goto err1; } if (cqe > rxe->attr.max_cqe) { - pr_debug("cqe(%d) > max_cqe(%d)\n", + rxe_dbg(rxe, "cqe(%d) > max_cqe(%d)\n", cqe, rxe->attr.max_cqe); goto err1; } @@ -27,7 +27,7 @@ int rxe_cq_chk_attr(struct rxe_dev *rxe, struct rxe_cq *cq, if (cq) { count = queue_count(cq->queue, QUEUE_TYPE_TO_CLIENT); if (cqe < count) { - pr_debug("cqe(%d) < current # elements in queue (%d)", + rxe_dbg_cq(cq, "cqe(%d) < current # elements in queue (%d)", cqe, count); goto err1; } @@ -65,7 +65,7 @@ int rxe_cq_from_init(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe, cq->queue = rxe_queue_init(rxe, &cqe, sizeof(struct rxe_cqe), type); if (!cq->queue) { - pr_warn("unable to create cq\n"); + rxe_dbg(rxe, "unable to create cq\n"); return -ENOMEM; } diff --git a/drivers/infiniband/sw/rxe/rxe_hdr.h b/drivers/infiniband/sw/rxe/rxe_hdr.h index e432f9e37795ef703206d091b19bbf78cf1b5cc1..46f82b27fcd2f540ed32d7495f530164b2d2e452 100644 --- a/drivers/infiniband/sw/rxe/rxe_hdr.h +++ b/drivers/infiniband/sw/rxe/rxe_hdr.h @@ -607,6 +607,52 @@ static inline void reth_set_len(struct rxe_pkt_info *pkt, u32 len) rxe_opcode[pkt->opcode].offset[RXE_RETH], len); } +/****************************************************************************** + * FLUSH Extended Transport Header + ******************************************************************************/ + +struct rxe_feth { + __be32 bits; +}; + +#define FETH_PLT_MASK (0x0000000f) /* bits 3-0 */ +#define FETH_SEL_MASK (0x00000030) /* bits 5-4 */ +#define FETH_SEL_SHIFT (4U) + +static inline u32 __feth_plt(void *arg) +{ + struct rxe_feth *feth = arg; + + return be32_to_cpu(feth->bits) & FETH_PLT_MASK; +} + +static inline u32 __feth_sel(void *arg) +{ + struct rxe_feth *feth = arg; + + return (be32_to_cpu(feth->bits) & FETH_SEL_MASK) >> FETH_SEL_SHIFT; +} + +static inline u32 feth_plt(struct rxe_pkt_info *pkt) +{ + return __feth_plt(pkt->hdr + rxe_opcode[pkt->opcode].offset[RXE_FETH]); +} + +static inline u32 feth_sel(struct rxe_pkt_info *pkt) +{ + return __feth_sel(pkt->hdr + rxe_opcode[pkt->opcode].offset[RXE_FETH]); +} + +static inline void feth_init(struct rxe_pkt_info *pkt, u8 type, u8 level) +{ + struct rxe_feth *feth = (struct rxe_feth *) + (pkt->hdr + rxe_opcode[pkt->opcode].offset[RXE_FETH]); + u32 bits = ((level << FETH_SEL_SHIFT) & FETH_SEL_MASK) | + (type & FETH_PLT_MASK); + + feth->bits = cpu_to_be32(bits); +} + /****************************************************************************** * Atomic Extended Transport Header ******************************************************************************/ @@ -742,7 +788,6 @@ enum aeth_syndrome { AETH_NAK_INVALID_REQ = 0x61, AETH_NAK_REM_ACC_ERR = 0x62, AETH_NAK_REM_OP_ERR = 0x63, - AETH_NAK_INV_RD_REQ = 0x64, }; static inline u8 __aeth_syn(void *arg) @@ -910,6 +955,7 @@ enum rxe_hdr_length { RXE_ATMETH_BYTES = sizeof(struct rxe_atmeth), RXE_IETH_BYTES = sizeof(struct rxe_ieth), RXE_RDETH_BYTES = sizeof(struct rxe_rdeth), + RXE_FETH_BYTES = sizeof(struct rxe_feth), }; static inline size_t header_size(struct rxe_pkt_info *pkt) diff --git a/drivers/infiniband/sw/rxe/rxe_icrc.c b/drivers/infiniband/sw/rxe/rxe_icrc.c index 46bb07c5c4df2532736a724e818c70534c748450..71bc2c1895888f8fce3a53c8da38a698987cd7ae 100644 --- a/drivers/infiniband/sw/rxe/rxe_icrc.c +++ b/drivers/infiniband/sw/rxe/rxe_icrc.c @@ -21,7 +21,7 @@ int rxe_icrc_init(struct rxe_dev *rxe) tfm = crypto_alloc_shash("crc32", 0, 0); if (IS_ERR(tfm)) { - pr_warn("failed to init crc32 algorithm err:%ld\n", + rxe_dbg(rxe, "failed to init crc32 algorithm err: %ld\n", PTR_ERR(tfm)); return PTR_ERR(tfm); } @@ -51,7 +51,7 @@ static __be32 rxe_crc32(struct rxe_dev *rxe, __be32 crc, void *next, size_t len) *(__be32 *)shash_desc_ctx(shash) = crc; err = crypto_shash_update(shash, next, len); if (unlikely(err)) { - pr_warn_ratelimited("failed crc calculation, err: %d\n", err); + rxe_dbg(rxe, "failed crc calculation, err: %d\n", err); return (__force __be32)crc32_le((__force u32)crc, next, len); } diff --git a/drivers/infiniband/sw/rxe/rxe_loc.h b/drivers/infiniband/sw/rxe/rxe_loc.h index c2a5c8814a48bbefac18316fe81014a9cdad649f..948ce4902b10fe38bb2aedd88849f08f8d6b5159 100644 --- a/drivers/infiniband/sw/rxe/rxe_loc.h +++ b/drivers/infiniband/sw/rxe/rxe_loc.h @@ -9,16 +9,12 @@ /* rxe_av.c */ void rxe_init_av(struct rdma_ah_attr *attr, struct rxe_av *av); - -int rxe_av_chk_attr(struct rxe_dev *rxe, struct rdma_ah_attr *attr); - +int rxe_av_chk_attr(struct rxe_qp *qp, struct rdma_ah_attr *attr); +int rxe_ah_chk_attr(struct rxe_ah *ah, struct rdma_ah_attr *attr); void rxe_av_from_attr(u8 port_num, struct rxe_av *av, struct rdma_ah_attr *attr); - void rxe_av_to_attr(struct rxe_av *av, struct rdma_ah_attr *attr); - void rxe_av_fill_ip_info(struct rxe_av *av, struct rdma_ah_attr *attr); - struct rxe_av *rxe_get_av(struct rxe_pkt_info *pkt, struct rxe_ah **ahp); /* rxe_cq.c */ @@ -68,6 +64,7 @@ void rxe_mr_init_dma(int access, struct rxe_mr *mr); int rxe_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length, u64 iova, int access, struct rxe_mr *mr); int rxe_mr_init_fast(int max_pages, struct rxe_mr *mr); +int rxe_flush_pmem_iova(struct rxe_mr *mr, u64 iova, int length); int rxe_mr_copy(struct rxe_mr *mr, u64 iova, void *addr, int length, enum rxe_mr_copy_dir dir); int copy_data(struct rxe_pd *pd, int access, struct rxe_dma_info *dma, diff --git a/drivers/infiniband/sw/rxe/rxe_mmap.c b/drivers/infiniband/sw/rxe/rxe_mmap.c index 9149b60954296004de0b1babb97e03fc0e6c1f47..a47d72dbc5376dd9239057ca659ce3f177549353 100644 --- a/drivers/infiniband/sw/rxe/rxe_mmap.c +++ b/drivers/infiniband/sw/rxe/rxe_mmap.c @@ -79,7 +79,7 @@ int rxe_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) /* Don't allow a mmap larger than the object. */ if (size > ip->info.size) { - pr_err("mmap region is larger than the object!\n"); + rxe_dbg(rxe, "mmap region is larger than the object!\n"); spin_unlock_bh(&rxe->pending_lock); ret = -EINVAL; goto done; @@ -87,7 +87,7 @@ int rxe_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) goto found_it; } - pr_warn("unable to find pending mmap info\n"); + rxe_dbg(rxe, "unable to find pending mmap info\n"); spin_unlock_bh(&rxe->pending_lock); ret = -EINVAL; goto done; @@ -98,7 +98,7 @@ found_it: ret = remap_vmalloc_range(vma, ip->obj, 0); if (ret) { - pr_err("err %d from remap_vmalloc_range\n", ret); + rxe_dbg(rxe, "err %d from remap_vmalloc_range\n", ret); goto done; } diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c index 502e9ada99b307a09291a5c38a6d02d155e2187e..072eac4b65d2966874ab3ab2958238727baa0472 100644 --- a/drivers/infiniband/sw/rxe/rxe_mr.c +++ b/drivers/infiniband/sw/rxe/rxe_mr.c @@ -4,6 +4,8 @@ * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved. */ +#include + #include "rxe.h" #include "rxe_loc.h" @@ -26,7 +28,7 @@ int mr_check_range(struct rxe_mr *mr, u64 iova, size_t length) { - switch (mr->type) { + switch (mr->ibmr.type) { case IB_MR_TYPE_DMA: return 0; @@ -38,8 +40,7 @@ int mr_check_range(struct rxe_mr *mr, u64 iova, size_t length) return 0; default: - pr_warn("%s: mr type (%d) not supported\n", - __func__, mr->type); + rxe_dbg_mr(mr, "type (%d) not supported\n", mr->ibmr.type); return -EFAULT; } } @@ -62,7 +63,6 @@ static void rxe_mr_init(int access, struct rxe_mr *mr) mr->rkey = mr->ibmr.rkey = rkey; mr->state = RXE_MR_STATE_INVALID; - mr->map_shift = ilog2(RXE_BUF_PER_MAP); } static int rxe_mr_alloc(struct rxe_mr *mr, int num_buf) @@ -99,6 +99,7 @@ err2: kfree(mr->map[i]); kfree(mr->map); + mr->map = NULL; err1: return -ENOMEM; } @@ -109,7 +110,16 @@ void rxe_mr_init_dma(int access, struct rxe_mr *mr) mr->access = access; mr->state = RXE_MR_STATE_VALID; - mr->type = IB_MR_TYPE_DMA; + mr->ibmr.type = IB_MR_TYPE_DMA; +} + +static bool is_pmem_page(struct page *pg) +{ + unsigned long paddr = page_to_phys(pg); + + return REGION_INTERSECTS == + region_intersects(paddr, PAGE_SIZE, IORESOURCE_MEM, + IORES_DESC_PERSISTENT_MEMORY); } int rxe_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length, u64 iova, @@ -122,12 +132,11 @@ int rxe_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length, u64 iova, int num_buf; void *vaddr; int err; - int i; umem = ib_umem_get(&rxe->ib_dev, start, length, access); if (IS_ERR(umem)) { - pr_warn("%s: Unable to pin memory region err = %d\n", - __func__, (int)PTR_ERR(umem)); + rxe_dbg_mr(mr, "Unable to pin memory region err = %d\n", + (int)PTR_ERR(umem)); err = PTR_ERR(umem); goto err_out; } @@ -138,8 +147,7 @@ int rxe_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length, u64 iova, err = rxe_mr_alloc(mr, num_buf); if (err) { - pr_warn("%s: Unable to allocate memory for map\n", - __func__); + rxe_dbg_mr(mr, "Unable to allocate memory for map\n"); goto err_release_umem; } @@ -149,23 +157,30 @@ int rxe_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length, u64 iova, num_buf = 0; map = mr->map; if (length > 0) { - buf = map[0]->buf; + bool persistent_access = access & IB_ACCESS_FLUSH_PERSISTENT; + buf = map[0]->buf; for_each_sgtable_page (&umem->sgt_append.sgt, &sg_iter, 0) { + struct page *pg = sg_page_iter_page(&sg_iter); + + if (persistent_access && !is_pmem_page(pg)) { + rxe_dbg_mr(mr, "Unable to register persistent access to non-pmem device\n"); + err = -EINVAL; + goto err_release_umem; + } + if (num_buf >= RXE_BUF_PER_MAP) { map++; buf = map[0]->buf; num_buf = 0; } - vaddr = page_address(sg_page_iter_page(&sg_iter)); + vaddr = page_address(pg); if (!vaddr) { - pr_warn("%s: Unable to get virtual address\n", - __func__); + rxe_dbg_mr(mr, "Unable to get virtual address\n"); err = -ENOMEM; - goto err_cleanup_map; + goto err_release_umem; } - buf->addr = (uintptr_t)vaddr; buf->size = PAGE_SIZE; num_buf++; @@ -178,14 +193,11 @@ int rxe_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length, u64 iova, mr->access = access; mr->offset = ib_umem_offset(umem); mr->state = RXE_MR_STATE_VALID; - mr->type = IB_MR_TYPE_USER; + mr->ibmr.type = IB_MR_TYPE_USER; + mr->ibmr.page_size = PAGE_SIZE; return 0; -err_cleanup_map: - for (i = 0; i < mr->num_map; i++) - kfree(mr->map[i]); - kfree(mr->map); err_release_umem: ib_umem_release(umem); err_out: @@ -205,7 +217,7 @@ int rxe_mr_init_fast(int max_pages, struct rxe_mr *mr) mr->max_buf = max_pages; mr->state = RXE_MR_STATE_FREE; - mr->type = IB_MR_TYPE_MEM_REG; + mr->ibmr.type = IB_MR_TYPE_MEM_REG; return 0; @@ -256,7 +268,7 @@ void *iova_to_vaddr(struct rxe_mr *mr, u64 iova, int length) void *addr; if (mr->state != RXE_MR_STATE_VALID) { - pr_warn("mr not in valid state\n"); + rxe_dbg_mr(mr, "Not in valid state\n"); addr = NULL; goto out; } @@ -267,7 +279,7 @@ void *iova_to_vaddr(struct rxe_mr *mr, u64 iova, int length) } if (mr_check_range(mr, iova, length)) { - pr_warn("range violation\n"); + rxe_dbg_mr(mr, "Range violation\n"); addr = NULL; goto out; } @@ -275,7 +287,7 @@ void *iova_to_vaddr(struct rxe_mr *mr, u64 iova, int length) lookup_iova(mr, iova, &m, &n, &offset); if (offset + length > mr->map[m]->buf[n].size) { - pr_warn("crosses page boundary\n"); + rxe_dbg_mr(mr, "Crosses page boundary\n"); addr = NULL; goto out; } @@ -286,6 +298,39 @@ out: return addr; } +int rxe_flush_pmem_iova(struct rxe_mr *mr, u64 iova, int length) +{ + size_t offset; + + if (length == 0) + return 0; + + if (mr->ibmr.type == IB_MR_TYPE_DMA) + return -EFAULT; + + offset = (iova - mr->ibmr.iova + mr->offset) & mr->page_mask; + while (length > 0) { + u8 *va; + int bytes; + + bytes = mr->ibmr.page_size - offset; + if (bytes > length) + bytes = length; + + va = iova_to_vaddr(mr, iova, length); + if (!va) + return -EFAULT; + + arch_wb_cache_pmem(va, bytes); + + length -= bytes; + iova += bytes; + offset = 0; + } + + return 0; +} + /* copy data from a range (vaddr, vaddr+length-1) to or from * a mr object starting at iova. */ @@ -304,7 +349,7 @@ int rxe_mr_copy(struct rxe_mr *mr, u64 iova, void *addr, int length, if (length == 0) return 0; - if (mr->type == IB_MR_TYPE_DMA) { + if (mr->ibmr.type == IB_MR_TYPE_DMA) { u8 *src, *dest; src = (dir == RXE_TO_MR_OBJ) ? addr : ((void *)(uintptr_t)iova); @@ -511,7 +556,7 @@ struct rxe_mr *lookup_mr(struct rxe_pd *pd, int access, u32 key, if (unlikely((type == RXE_LOOKUP_LOCAL && mr->lkey != key) || (type == RXE_LOOKUP_REMOTE && mr->rkey != key) || - mr_pd(mr) != pd || (access && !(access & mr->access)) || + mr_pd(mr) != pd || ((access & mr->access) != access) || mr->state != RXE_MR_STATE_VALID)) { rxe_put(mr); mr = NULL; @@ -528,27 +573,26 @@ int rxe_invalidate_mr(struct rxe_qp *qp, u32 key) mr = rxe_pool_get_index(&rxe->mr_pool, key >> 8); if (!mr) { - pr_err("%s: No MR for key %#x\n", __func__, key); + rxe_dbg_qp(qp, "No MR for key %#x\n", key); ret = -EINVAL; goto err; } if (mr->rkey ? (key != mr->rkey) : (key != mr->lkey)) { - pr_err("%s: wr key (%#x) doesn't match mr key (%#x)\n", - __func__, key, (mr->rkey ? mr->rkey : mr->lkey)); + rxe_dbg_mr(mr, "wr key (%#x) doesn't match mr key (%#x)\n", + key, (mr->rkey ? mr->rkey : mr->lkey)); ret = -EINVAL; goto err_drop_ref; } if (atomic_read(&mr->num_mw) > 0) { - pr_warn("%s: Attempt to invalidate an MR while bound to MWs\n", - __func__); + rxe_dbg_mr(mr, "Attempt to invalidate an MR while bound to MWs\n"); ret = -EINVAL; goto err_drop_ref; } - if (unlikely(mr->type != IB_MR_TYPE_MEM_REG)) { - pr_warn("%s: mr->type (%d) is wrong type\n", __func__, mr->type); + if (unlikely(mr->ibmr.type != IB_MR_TYPE_MEM_REG)) { + rxe_dbg_mr(mr, "Type (%d) is wrong\n", mr->ibmr.type); ret = -EINVAL; goto err_drop_ref; } @@ -577,22 +621,20 @@ int rxe_reg_fast_mr(struct rxe_qp *qp, struct rxe_send_wqe *wqe) /* user can only register MR in free state */ if (unlikely(mr->state != RXE_MR_STATE_FREE)) { - pr_warn("%s: mr->lkey = 0x%x not free\n", - __func__, mr->lkey); + rxe_dbg_mr(mr, "mr->lkey = 0x%x not free\n", mr->lkey); return -EINVAL; } /* user can only register mr with qp in same protection domain */ if (unlikely(qp->ibqp.pd != mr->ibmr.pd)) { - pr_warn("%s: qp->pd and mr->pd don't match\n", - __func__); + rxe_dbg_mr(mr, "qp->pd and mr->pd don't match\n"); return -EINVAL; } /* user is only allowed to change key portion of l/rkey */ if (unlikely((mr->lkey & ~0xff) != (key & ~0xff))) { - pr_warn("%s: key = 0x%x has wrong index mr->lkey = 0x%x\n", - __func__, key, mr->lkey); + rxe_dbg_mr(mr, "key = 0x%x has wrong index mr->lkey = 0x%x\n", + key, mr->lkey); return -EINVAL; } diff --git a/drivers/infiniband/sw/rxe/rxe_mw.c b/drivers/infiniband/sw/rxe/rxe_mw.c index 902b7df7aaedb626ce33075da22e2cd3ebdbc685..afa5ce1a711667e90bc501f908ad49c00013564e 100644 --- a/drivers/infiniband/sw/rxe/rxe_mw.c +++ b/drivers/infiniband/sw/rxe/rxe_mw.c @@ -52,14 +52,14 @@ static int rxe_check_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe, { if (mw->ibmw.type == IB_MW_TYPE_1) { if (unlikely(mw->state != RXE_MW_STATE_VALID)) { - pr_err_once( + rxe_dbg_mw(mw, "attempt to bind a type 1 MW not in the valid state\n"); return -EINVAL; } /* o10-36.2.2 */ if (unlikely((mw->access & IB_ZERO_BASED))) { - pr_err_once("attempt to bind a zero based type 1 MW\n"); + rxe_dbg_mw(mw, "attempt to bind a zero based type 1 MW\n"); return -EINVAL; } } @@ -67,21 +67,21 @@ static int rxe_check_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe, if (mw->ibmw.type == IB_MW_TYPE_2) { /* o10-37.2.30 */ if (unlikely(mw->state != RXE_MW_STATE_FREE)) { - pr_err_once( + rxe_dbg_mw(mw, "attempt to bind a type 2 MW not in the free state\n"); return -EINVAL; } /* C10-72 */ if (unlikely(qp->pd != to_rpd(mw->ibmw.pd))) { - pr_err_once( + rxe_dbg_mw(mw, "attempt to bind type 2 MW with qp with different PD\n"); return -EINVAL; } /* o10-37.2.40 */ if (unlikely(!mr || wqe->wr.wr.mw.length == 0)) { - pr_err_once( + rxe_dbg_mw(mw, "attempt to invalidate type 2 MW by binding with NULL or zero length MR\n"); return -EINVAL; } @@ -92,13 +92,13 @@ static int rxe_check_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe, return 0; if (unlikely(mr->access & IB_ZERO_BASED)) { - pr_err_once("attempt to bind MW to zero based MR\n"); + rxe_dbg_mw(mw, "attempt to bind MW to zero based MR\n"); return -EINVAL; } /* C10-73 */ if (unlikely(!(mr->access & IB_ACCESS_MW_BIND))) { - pr_err_once( + rxe_dbg_mw(mw, "attempt to bind an MW to an MR without bind access\n"); return -EINVAL; } @@ -107,7 +107,7 @@ static int rxe_check_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe, if (unlikely((mw->access & (IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_ATOMIC)) && !(mr->access & IB_ACCESS_LOCAL_WRITE))) { - pr_err_once( + rxe_dbg_mw(mw, "attempt to bind an Writable MW to an MR without local write access\n"); return -EINVAL; } @@ -115,7 +115,7 @@ static int rxe_check_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe, /* C10-75 */ if (mw->access & IB_ZERO_BASED) { if (unlikely(wqe->wr.wr.mw.length > mr->ibmr.length)) { - pr_err_once( + rxe_dbg_mw(mw, "attempt to bind a ZB MW outside of the MR\n"); return -EINVAL; } @@ -123,7 +123,7 @@ static int rxe_check_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe, if (unlikely((wqe->wr.wr.mw.addr < mr->ibmr.iova) || ((wqe->wr.wr.mw.addr + wqe->wr.wr.mw.length) > (mr->ibmr.iova + mr->ibmr.length)))) { - pr_err_once( + rxe_dbg_mw(mw, "attempt to bind a VA MW outside of the MR\n"); return -EINVAL; } @@ -293,8 +293,7 @@ struct rxe_mw *rxe_lookup_mw(struct rxe_qp *qp, int access, u32 rkey) if (unlikely((mw->rkey != rkey) || rxe_mw_pd(mw) != pd || (mw->ibmw.type == IB_MW_TYPE_2 && mw->qp != qp) || - (mw->length == 0) || - (access && !(access & mw->access)) || + (mw->length == 0) || ((access & mw->access) != access) || mw->state != RXE_MW_STATE_VALID)) { rxe_put(mw); return NULL; diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c index 35f327b9d4b8ec28e3435a73139149742f8128c7..e02e1624bcf4db4a4fb11c62d396f0bc35a63e8b 100644 --- a/drivers/infiniband/sw/rxe/rxe_net.c +++ b/drivers/infiniband/sw/rxe/rxe_net.c @@ -20,9 +20,10 @@ static struct rxe_recv_sockets recv_sockets; -static struct dst_entry *rxe_find_route4(struct net_device *ndev, - struct in_addr *saddr, - struct in_addr *daddr) +static struct dst_entry *rxe_find_route4(struct rxe_qp *qp, + struct net_device *ndev, + struct in_addr *saddr, + struct in_addr *daddr) { struct rtable *rt; struct flowi4 fl = { { 0 } }; @@ -35,7 +36,7 @@ static struct dst_entry *rxe_find_route4(struct net_device *ndev, rt = ip_route_output_key(&init_net, &fl); if (IS_ERR(rt)) { - pr_err_ratelimited("no route to %pI4\n", &daddr->s_addr); + rxe_dbg_qp(qp, "no route to %pI4\n", &daddr->s_addr); return NULL; } @@ -43,7 +44,8 @@ static struct dst_entry *rxe_find_route4(struct net_device *ndev, } #if IS_ENABLED(CONFIG_IPV6) -static struct dst_entry *rxe_find_route6(struct net_device *ndev, +static struct dst_entry *rxe_find_route6(struct rxe_qp *qp, + struct net_device *ndev, struct in6_addr *saddr, struct in6_addr *daddr) { @@ -60,12 +62,12 @@ static struct dst_entry *rxe_find_route6(struct net_device *ndev, recv_sockets.sk6->sk, &fl6, NULL); if (IS_ERR(ndst)) { - pr_err_ratelimited("no route to %pI6\n", daddr); + rxe_dbg_qp(qp, "no route to %pI6\n", daddr); return NULL; } if (unlikely(ndst->error)) { - pr_err("no route to %pI6\n", daddr); + rxe_dbg_qp(qp, "no route to %pI6\n", daddr); goto put; } @@ -77,7 +79,8 @@ put: #else -static struct dst_entry *rxe_find_route6(struct net_device *ndev, +static struct dst_entry *rxe_find_route6(struct rxe_qp *qp, + struct net_device *ndev, struct in6_addr *saddr, struct in6_addr *daddr) { @@ -105,14 +108,14 @@ static struct dst_entry *rxe_find_route(struct net_device *ndev, saddr = &av->sgid_addr._sockaddr_in.sin_addr; daddr = &av->dgid_addr._sockaddr_in.sin_addr; - dst = rxe_find_route4(ndev, saddr, daddr); + dst = rxe_find_route4(qp, ndev, saddr, daddr); } else if (av->network_type == RXE_NETWORK_TYPE_IPV6) { struct in6_addr *saddr6; struct in6_addr *daddr6; saddr6 = &av->sgid_addr._sockaddr_in6.sin6_addr; daddr6 = &av->dgid_addr._sockaddr_in6.sin6_addr; - dst = rxe_find_route6(ndev, saddr6, daddr6); + dst = rxe_find_route6(qp, ndev, saddr6, daddr6); #if IS_ENABLED(CONFIG_IPV6) if (dst) qp->dst_cookie = @@ -282,7 +285,7 @@ static int prepare4(struct rxe_av *av, struct rxe_pkt_info *pkt, dst = rxe_find_route(skb->dev, qp, av); if (!dst) { - pr_err("Host not reachable\n"); + rxe_dbg_qp(qp, "Host not reachable\n"); return -EHOSTUNREACH; } @@ -306,7 +309,7 @@ static int prepare6(struct rxe_av *av, struct rxe_pkt_info *pkt, dst = rxe_find_route(skb->dev, qp, av); if (!dst) { - pr_err("Host not reachable\n"); + rxe_dbg_qp(qp, "Host not reachable\n"); return -EHOSTUNREACH; } @@ -345,7 +348,7 @@ static void rxe_skb_tx_dtor(struct sk_buff *skb) if (unlikely(qp->need_req_skb && skb_out < RXE_INFLIGHT_SKBS_PER_QP_LOW)) - rxe_run_task(&qp->req.task, 1); + rxe_sched_task(&qp->req.task); rxe_put(qp); } @@ -365,7 +368,8 @@ static int rxe_send(struct sk_buff *skb, struct rxe_pkt_info *pkt) } else if (skb->protocol == htons(ETH_P_IPV6)) { err = ip6_local_out(dev_net(skb_dst(skb)->dev), skb->sk, skb); } else { - pr_err("Unknown layer 3 protocol: %d\n", skb->protocol); + rxe_dbg_qp(pkt->qp, "Unknown layer 3 protocol: %d\n", + skb->protocol); atomic_dec(&pkt->qp->skb_out); rxe_put(pkt->qp); kfree_skb(skb); @@ -373,7 +377,7 @@ static int rxe_send(struct sk_buff *skb, struct rxe_pkt_info *pkt) } if (unlikely(net_xmit_eval(err))) { - pr_debug("error sending packet: %d\n", err); + rxe_dbg_qp(pkt->qp, "error sending packet: %d\n", err); return -EAGAIN; } @@ -411,7 +415,7 @@ int rxe_xmit_packet(struct rxe_qp *qp, struct rxe_pkt_info *pkt, if ((is_request && (qp->req.state != QP_STATE_READY)) || (!is_request && (qp->resp.state != QP_STATE_READY))) { - pr_info("Packet dropped. QP is not in ready state\n"); + rxe_dbg_qp(qp, "Packet dropped. QP is not in ready state\n"); goto drop; } @@ -429,7 +433,7 @@ int rxe_xmit_packet(struct rxe_qp *qp, struct rxe_pkt_info *pkt, if ((qp_type(qp) != IB_QPT_RC) && (pkt->mask & RXE_END_MASK)) { pkt->wqe->state = wqe_state_done; - rxe_run_task(&qp->comp.task, 1); + rxe_sched_task(&qp->comp.task); } rxe_counter_inc(rxe, RXE_CNT_SENT_PKTS); @@ -592,7 +596,7 @@ static int rxe_notify(struct notifier_block *not_blk, rxe_port_down(rxe); break; case NETDEV_CHANGEMTU: - pr_info("%s changed mtu to %d\n", ndev->name, ndev->mtu); + rxe_dbg(rxe, "%s changed mtu to %d\n", ndev->name, ndev->mtu); rxe_set_mtu(rxe, ndev->mtu); break; case NETDEV_CHANGE: @@ -604,7 +608,7 @@ static int rxe_notify(struct notifier_block *not_blk, case NETDEV_CHANGENAME: case NETDEV_FEAT_CHANGE: default: - pr_info("ignoring netdev event = %ld for %s\n", + rxe_dbg(rxe, "ignoring netdev event = %ld for %s\n", event, ndev->name); break; } diff --git a/drivers/infiniband/sw/rxe/rxe_opcode.c b/drivers/infiniband/sw/rxe/rxe_opcode.c index d4ba4d506f1760b9a04bcb6f35d6d5c2180c1b93..5c0d5c6ffda4f05949891058cf42eef1acfb15bf 100644 --- a/drivers/infiniband/sw/rxe/rxe_opcode.c +++ b/drivers/infiniband/sw/rxe/rxe_opcode.c @@ -101,6 +101,18 @@ struct rxe_wr_opcode_info rxe_wr_opcode_info[] = { [IB_QPT_UC] = WR_LOCAL_OP_MASK, }, }, + [IB_WR_FLUSH] = { + .name = "IB_WR_FLUSH", + .mask = { + [IB_QPT_RC] = WR_FLUSH_MASK, + }, + }, + [IB_WR_ATOMIC_WRITE] = { + .name = "IB_WR_ATOMIC_WRITE", + .mask = { + [IB_QPT_RC] = WR_ATOMIC_WRITE_MASK, + }, + }, }; struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = { @@ -378,6 +390,29 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = { RXE_IETH_BYTES, } }, + [IB_OPCODE_RC_FLUSH] = { + .name = "IB_OPCODE_RC_FLUSH", + .mask = RXE_FETH_MASK | RXE_RETH_MASK | RXE_FLUSH_MASK | + RXE_START_MASK | RXE_END_MASK | RXE_REQ_MASK, + .length = RXE_BTH_BYTES + RXE_FETH_BYTES + RXE_RETH_BYTES, + .offset = { + [RXE_BTH] = 0, + [RXE_FETH] = RXE_BTH_BYTES, + [RXE_RETH] = RXE_BTH_BYTES + RXE_FETH_BYTES, + } + }, + [IB_OPCODE_RC_ATOMIC_WRITE] = { + .name = "IB_OPCODE_RC_ATOMIC_WRITE", + .mask = RXE_RETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK | + RXE_ATOMIC_WRITE_MASK | RXE_START_MASK | + RXE_END_MASK, + .length = RXE_BTH_BYTES + RXE_RETH_BYTES, + .offset = { + [RXE_BTH] = 0, + [RXE_RETH] = RXE_BTH_BYTES, + [RXE_PAYLOAD] = RXE_BTH_BYTES + RXE_RETH_BYTES, + } + }, /* UC */ [IB_OPCODE_UC_SEND_FIRST] = { diff --git a/drivers/infiniband/sw/rxe/rxe_opcode.h b/drivers/infiniband/sw/rxe/rxe_opcode.h index 8f9aaaf260f293c0fab54ba08f74cd0e839a608d..cea4e0a639199cd9d2aa829aa36f2415d845e359 100644 --- a/drivers/infiniband/sw/rxe/rxe_opcode.h +++ b/drivers/infiniband/sw/rxe/rxe_opcode.h @@ -20,6 +20,8 @@ enum rxe_wr_mask { WR_READ_MASK = BIT(3), WR_WRITE_MASK = BIT(4), WR_LOCAL_OP_MASK = BIT(5), + WR_FLUSH_MASK = BIT(6), + WR_ATOMIC_WRITE_MASK = BIT(7), WR_READ_OR_WRITE_MASK = WR_READ_MASK | WR_WRITE_MASK, WR_WRITE_OR_SEND_MASK = WR_WRITE_MASK | WR_SEND_MASK, @@ -47,6 +49,7 @@ enum rxe_hdr_type { RXE_RDETH, RXE_DETH, RXE_IMMDT, + RXE_FETH, RXE_PAYLOAD, NUM_HDR_TYPES }; @@ -63,6 +66,7 @@ enum rxe_hdr_mask { RXE_IETH_MASK = BIT(RXE_IETH), RXE_RDETH_MASK = BIT(RXE_RDETH), RXE_DETH_MASK = BIT(RXE_DETH), + RXE_FETH_MASK = BIT(RXE_FETH), RXE_PAYLOAD_MASK = BIT(RXE_PAYLOAD), RXE_REQ_MASK = BIT(NUM_HDR_TYPES + 0), @@ -71,16 +75,19 @@ enum rxe_hdr_mask { RXE_WRITE_MASK = BIT(NUM_HDR_TYPES + 3), RXE_READ_MASK = BIT(NUM_HDR_TYPES + 4), RXE_ATOMIC_MASK = BIT(NUM_HDR_TYPES + 5), + RXE_FLUSH_MASK = BIT(NUM_HDR_TYPES + 6), - RXE_RWR_MASK = BIT(NUM_HDR_TYPES + 6), - RXE_COMP_MASK = BIT(NUM_HDR_TYPES + 7), + RXE_RWR_MASK = BIT(NUM_HDR_TYPES + 7), + RXE_COMP_MASK = BIT(NUM_HDR_TYPES + 8), - RXE_START_MASK = BIT(NUM_HDR_TYPES + 8), - RXE_MIDDLE_MASK = BIT(NUM_HDR_TYPES + 9), - RXE_END_MASK = BIT(NUM_HDR_TYPES + 10), + RXE_START_MASK = BIT(NUM_HDR_TYPES + 9), + RXE_MIDDLE_MASK = BIT(NUM_HDR_TYPES + 10), + RXE_END_MASK = BIT(NUM_HDR_TYPES + 11), RXE_LOOPBACK_MASK = BIT(NUM_HDR_TYPES + 12), + RXE_ATOMIC_WRITE_MASK = BIT(NUM_HDR_TYPES + 14), + RXE_READ_OR_ATOMIC_MASK = (RXE_READ_MASK | RXE_ATOMIC_MASK), RXE_WRITE_OR_SEND_MASK = (RXE_WRITE_MASK | RXE_SEND_MASK), RXE_READ_OR_WRITE_MASK = (RXE_READ_MASK | RXE_WRITE_MASK), diff --git a/drivers/infiniband/sw/rxe/rxe_param.h b/drivers/infiniband/sw/rxe/rxe_param.h index 86c7a8bf3cbbd8c8dc0f61e0c0afaf9b6f7978b8..a754fc902e3d190c1fbdf7ab3ecdeb59fef266de 100644 --- a/drivers/infiniband/sw/rxe/rxe_param.h +++ b/drivers/infiniband/sw/rxe/rxe_param.h @@ -51,7 +51,14 @@ enum rxe_device_param { | IB_DEVICE_SRQ_RESIZE | IB_DEVICE_MEM_MGT_EXTENSIONS | IB_DEVICE_MEM_WINDOW + | IB_DEVICE_FLUSH_GLOBAL + | IB_DEVICE_FLUSH_PERSISTENT +#ifdef CONFIG_64BIT + | IB_DEVICE_MEM_WINDOW_TYPE_2B + | IB_DEVICE_ATOMIC_WRITE, +#else | IB_DEVICE_MEM_WINDOW_TYPE_2B, +#endif /* CONFIG_64BIT */ RXE_MAX_SGE = 32, RXE_MAX_WQE_SIZE = sizeof(struct rxe_send_wqe) + sizeof(struct ib_sge) * RXE_MAX_SGE, diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c index a62bab88415cbff117ee50e8fdd921594278e8b9..ab72db68b58f69cb65a5b69d8e7d932d965fc554 100644 --- a/drivers/infiniband/sw/rxe/rxe_qp.c +++ b/drivers/infiniband/sw/rxe/rxe_qp.c @@ -19,33 +19,33 @@ static int rxe_qp_chk_cap(struct rxe_dev *rxe, struct ib_qp_cap *cap, int has_srq) { if (cap->max_send_wr > rxe->attr.max_qp_wr) { - pr_debug("invalid send wr = %u > %d\n", + rxe_dbg(rxe, "invalid send wr = %u > %d\n", cap->max_send_wr, rxe->attr.max_qp_wr); goto err1; } if (cap->max_send_sge > rxe->attr.max_send_sge) { - pr_debug("invalid send sge = %u > %d\n", + rxe_dbg(rxe, "invalid send sge = %u > %d\n", cap->max_send_sge, rxe->attr.max_send_sge); goto err1; } if (!has_srq) { if (cap->max_recv_wr > rxe->attr.max_qp_wr) { - pr_debug("invalid recv wr = %u > %d\n", + rxe_dbg(rxe, "invalid recv wr = %u > %d\n", cap->max_recv_wr, rxe->attr.max_qp_wr); goto err1; } if (cap->max_recv_sge > rxe->attr.max_recv_sge) { - pr_debug("invalid recv sge = %u > %d\n", + rxe_dbg(rxe, "invalid recv sge = %u > %d\n", cap->max_recv_sge, rxe->attr.max_recv_sge); goto err1; } } if (cap->max_inline_data > rxe->max_inline_data) { - pr_debug("invalid max inline data = %u > %d\n", + rxe_dbg(rxe, "invalid max inline data = %u > %d\n", cap->max_inline_data, rxe->max_inline_data); goto err1; } @@ -73,7 +73,7 @@ int rxe_qp_chk_init(struct rxe_dev *rxe, struct ib_qp_init_attr *init) } if (!init->recv_cq || !init->send_cq) { - pr_debug("missing cq\n"); + rxe_dbg(rxe, "missing cq\n"); goto err1; } @@ -82,14 +82,14 @@ int rxe_qp_chk_init(struct rxe_dev *rxe, struct ib_qp_init_attr *init) if (init->qp_type == IB_QPT_GSI) { if (!rdma_is_port_valid(&rxe->ib_dev, port_num)) { - pr_debug("invalid port = %d\n", port_num); + rxe_dbg(rxe, "invalid port = %d\n", port_num); goto err1; } port = &rxe->port; if (init->qp_type == IB_QPT_GSI && port->qp_gsi_index) { - pr_debug("GSI QP exists for port %d\n", port_num); + rxe_dbg(rxe, "GSI QP exists for port %d\n", port_num); goto err1; } } @@ -172,10 +172,6 @@ static void rxe_qp_init_misc(struct rxe_dev *rxe, struct rxe_qp *qp, spin_lock_init(&qp->state_lock); - spin_lock_init(&qp->req.task.state_lock); - spin_lock_init(&qp->resp.task.state_lock); - spin_lock_init(&qp->comp.task.state_lock); - spin_lock_init(&qp->sq.sq_lock); spin_lock_init(&qp->rq.producer_lock); spin_lock_init(&qp->rq.consumer_lock); @@ -242,10 +238,8 @@ static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp, skb_queue_head_init(&qp->req_pkts); - rxe_init_task(&qp->req.task, qp, - rxe_requester, "req"); - rxe_init_task(&qp->comp.task, qp, - rxe_completer, "comp"); + rxe_init_task(&qp->req.task, qp, rxe_requester); + rxe_init_task(&qp->comp.task, qp, rxe_completer); qp->qp_timeout_jiffies = 0; /* Can't be set for UD/UC in modify_qp */ if (init->qp_type == IB_QPT_RC) { @@ -270,9 +264,6 @@ static int rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp, wqe_size = rcv_wqe_size(qp->rq.max_sge); - pr_debug("qp#%d max_wr = %d, max_sge = %d, wqe_size = %d\n", - qp_num(qp), qp->rq.max_wr, qp->rq.max_sge, wqe_size); - type = QUEUE_TYPE_FROM_CLIENT; qp->rq.queue = rxe_queue_init(rxe, &qp->rq.max_wr, wqe_size, type); @@ -292,8 +283,7 @@ static int rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp, skb_queue_head_init(&qp->resp_pkts); - rxe_init_task(&qp->resp.task, qp, - rxe_responder, "resp"); + rxe_init_task(&qp->resp.task, qp, rxe_responder); qp->resp.opcode = OPCODE_NONE; qp->resp.msn = 0; @@ -402,7 +392,7 @@ int rxe_qp_chk_attr(struct rxe_dev *rxe, struct rxe_qp *qp, attr->qp_state : cur_state; if (!ib_modify_qp_is_ok(cur_state, new_state, qp_type(qp), mask)) { - pr_debug("invalid mask or state for qp\n"); + rxe_dbg_qp(qp, "invalid mask or state\n"); goto err1; } @@ -416,7 +406,7 @@ int rxe_qp_chk_attr(struct rxe_dev *rxe, struct rxe_qp *qp, if (mask & IB_QP_PORT) { if (!rdma_is_port_valid(&rxe->ib_dev, attr->port_num)) { - pr_debug("invalid port %d\n", attr->port_num); + rxe_dbg_qp(qp, "invalid port %d\n", attr->port_num); goto err1; } } @@ -424,18 +414,18 @@ int rxe_qp_chk_attr(struct rxe_dev *rxe, struct rxe_qp *qp, if (mask & IB_QP_CAP && rxe_qp_chk_cap(rxe, &attr->cap, !!qp->srq)) goto err1; - if (mask & IB_QP_AV && rxe_av_chk_attr(rxe, &attr->ah_attr)) + if (mask & IB_QP_AV && rxe_av_chk_attr(qp, &attr->ah_attr)) goto err1; if (mask & IB_QP_ALT_PATH) { - if (rxe_av_chk_attr(rxe, &attr->alt_ah_attr)) + if (rxe_av_chk_attr(qp, &attr->alt_ah_attr)) goto err1; if (!rdma_is_port_valid(&rxe->ib_dev, attr->alt_port_num)) { - pr_debug("invalid alt port %d\n", attr->alt_port_num); + rxe_dbg_qp(qp, "invalid alt port %d\n", attr->alt_port_num); goto err1; } if (attr->alt_timeout > 31) { - pr_debug("invalid QP alt timeout %d > 31\n", + rxe_dbg_qp(qp, "invalid alt timeout %d > 31\n", attr->alt_timeout); goto err1; } @@ -448,7 +438,7 @@ int rxe_qp_chk_attr(struct rxe_dev *rxe, struct rxe_qp *qp, enum ib_mtu mtu = attr->path_mtu; if (mtu > max_mtu) { - pr_debug("invalid mtu (%d) > (%d)\n", + rxe_dbg_qp(qp, "invalid mtu (%d) > (%d)\n", ib_mtu_enum_to_int(mtu), ib_mtu_enum_to_int(max_mtu)); goto err1; @@ -457,7 +447,7 @@ int rxe_qp_chk_attr(struct rxe_dev *rxe, struct rxe_qp *qp, if (mask & IB_QP_MAX_QP_RD_ATOMIC) { if (attr->max_rd_atomic > rxe->attr.max_qp_rd_atom) { - pr_debug("invalid max_rd_atomic %d > %d\n", + rxe_dbg_qp(qp, "invalid max_rd_atomic %d > %d\n", attr->max_rd_atomic, rxe->attr.max_qp_rd_atom); goto err1; @@ -466,7 +456,8 @@ int rxe_qp_chk_attr(struct rxe_dev *rxe, struct rxe_qp *qp, if (mask & IB_QP_TIMEOUT) { if (attr->timeout > 31) { - pr_debug("invalid QP timeout %d > 31\n", attr->timeout); + rxe_dbg_qp(qp, "invalid timeout %d > 31\n", + attr->timeout); goto err1; } } @@ -543,10 +534,10 @@ static void rxe_qp_drain(struct rxe_qp *qp) if (qp->req.state != QP_STATE_DRAINED) { qp->req.state = QP_STATE_DRAIN; if (qp_type(qp) == IB_QPT_RC) - rxe_run_task(&qp->comp.task, 1); + rxe_sched_task(&qp->comp.task); else __rxe_do_task(&qp->comp.task); - rxe_run_task(&qp->req.task, 1); + rxe_sched_task(&qp->req.task); } } } @@ -560,13 +551,13 @@ void rxe_qp_error(struct rxe_qp *qp) qp->attr.qp_state = IB_QPS_ERR; /* drain work and packet queues */ - rxe_run_task(&qp->resp.task, 1); + rxe_sched_task(&qp->resp.task); if (qp_type(qp) == IB_QPT_RC) - rxe_run_task(&qp->comp.task, 1); + rxe_sched_task(&qp->comp.task); else __rxe_do_task(&qp->comp.task); - rxe_run_task(&qp->req.task, 1); + rxe_sched_task(&qp->req.task); } /* called by the modify qp verb */ @@ -644,27 +635,24 @@ int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask, if (mask & IB_QP_RETRY_CNT) { qp->attr.retry_cnt = attr->retry_cnt; qp->comp.retry_cnt = attr->retry_cnt; - pr_debug("qp#%d set retry count = %d\n", qp_num(qp), - attr->retry_cnt); + rxe_dbg_qp(qp, "set retry count = %d\n", attr->retry_cnt); } if (mask & IB_QP_RNR_RETRY) { qp->attr.rnr_retry = attr->rnr_retry; qp->comp.rnr_retry = attr->rnr_retry; - pr_debug("qp#%d set rnr retry count = %d\n", qp_num(qp), - attr->rnr_retry); + rxe_dbg_qp(qp, "set rnr retry count = %d\n", attr->rnr_retry); } if (mask & IB_QP_RQ_PSN) { qp->attr.rq_psn = (attr->rq_psn & BTH_PSN_MASK); qp->resp.psn = qp->attr.rq_psn; - pr_debug("qp#%d set resp psn = 0x%x\n", qp_num(qp), - qp->resp.psn); + rxe_dbg_qp(qp, "set resp psn = 0x%x\n", qp->resp.psn); } if (mask & IB_QP_MIN_RNR_TIMER) { qp->attr.min_rnr_timer = attr->min_rnr_timer; - pr_debug("qp#%d set min rnr timer = 0x%x\n", qp_num(qp), + rxe_dbg_qp(qp, "set min rnr timer = 0x%x\n", attr->min_rnr_timer); } @@ -672,7 +660,7 @@ int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask, qp->attr.sq_psn = (attr->sq_psn & BTH_PSN_MASK); qp->req.psn = qp->attr.sq_psn; qp->comp.psn = qp->attr.sq_psn; - pr_debug("qp#%d set req psn = 0x%x\n", qp_num(qp), qp->req.psn); + rxe_dbg_qp(qp, "set req psn = 0x%x\n", qp->req.psn); } if (mask & IB_QP_PATH_MIG_STATE) @@ -686,40 +674,40 @@ int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask, switch (attr->qp_state) { case IB_QPS_RESET: - pr_debug("qp#%d state -> RESET\n", qp_num(qp)); + rxe_dbg_qp(qp, "state -> RESET\n"); rxe_qp_reset(qp); break; case IB_QPS_INIT: - pr_debug("qp#%d state -> INIT\n", qp_num(qp)); + rxe_dbg_qp(qp, "state -> INIT\n"); qp->req.state = QP_STATE_INIT; qp->resp.state = QP_STATE_INIT; qp->comp.state = QP_STATE_INIT; break; case IB_QPS_RTR: - pr_debug("qp#%d state -> RTR\n", qp_num(qp)); + rxe_dbg_qp(qp, "state -> RTR\n"); qp->resp.state = QP_STATE_READY; break; case IB_QPS_RTS: - pr_debug("qp#%d state -> RTS\n", qp_num(qp)); + rxe_dbg_qp(qp, "state -> RTS\n"); qp->req.state = QP_STATE_READY; qp->comp.state = QP_STATE_READY; break; case IB_QPS_SQD: - pr_debug("qp#%d state -> SQD\n", qp_num(qp)); + rxe_dbg_qp(qp, "state -> SQD\n"); rxe_qp_drain(qp); break; case IB_QPS_SQE: - pr_warn("qp#%d state -> SQE !!?\n", qp_num(qp)); + rxe_dbg_qp(qp, "state -> SQE !!?\n"); /* Not possible from modify_qp. */ break; case IB_QPS_ERR: - pr_debug("qp#%d state -> ERR\n", qp_num(qp)); + rxe_dbg_qp(qp, "state -> ERR\n"); rxe_qp_error(qp); break; } @@ -759,7 +747,7 @@ int rxe_qp_to_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask) attr->sq_draining = 0; } - pr_debug("attr->sq_draining = %d\n", attr->sq_draining); + rxe_dbg_qp(qp, "attr->sq_draining = %d\n", attr->sq_draining); return 0; } @@ -771,7 +759,7 @@ int rxe_qp_chk_destroy(struct rxe_qp *qp) * will fail immediately. */ if (atomic_read(&qp->mcg_num)) { - pr_debug("Attempt to destroy QP while attached to multicast group\n"); + rxe_dbg_qp(qp, "Attempt to destroy while attached to multicast group\n"); return -EBUSY; } @@ -829,12 +817,12 @@ static void rxe_qp_do_cleanup(struct work_struct *work) if (qp->resp.mr) rxe_put(qp->resp.mr); - if (qp_type(qp) == IB_QPT_RC) - sk_dst_reset(qp->sk->sk); - free_rd_atomic_resources(qp); if (qp->sk) { + if (qp_type(qp) == IB_QPT_RC) + sk_dst_reset(qp->sk->sk); + kernel_sock_shutdown(qp->sk, SHUT_RDWR); sock_release(qp->sk); } diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c index f637712079705f85f7f4c3547c90b5284ce81cfe..899c8779f8001cd21bb16f8726e8b61cc611d50b 100644 --- a/drivers/infiniband/sw/rxe/rxe_req.c +++ b/drivers/infiniband/sw/rxe/rxe_req.c @@ -100,12 +100,12 @@ void rnr_nak_timer(struct timer_list *t) { struct rxe_qp *qp = from_timer(qp, t, rnr_nak_timer); - pr_debug("%s: fired for qp#%d\n", __func__, qp_num(qp)); + rxe_dbg_qp(qp, "nak timer fired\n"); /* request a send queue retry */ qp->req.need_retry = 1; qp->req.wait_for_rnr_timer = 0; - rxe_run_task(&qp->req.task, 1); + rxe_sched_task(&qp->req.task); } static struct rxe_send_wqe *req_next_wqe(struct rxe_qp *qp) @@ -241,6 +241,9 @@ static int next_opcode_rc(struct rxe_qp *qp, u32 opcode, int fits) IB_OPCODE_RC_SEND_ONLY_WITH_IMMEDIATE : IB_OPCODE_RC_SEND_FIRST; + case IB_WR_FLUSH: + return IB_OPCODE_RC_FLUSH; + case IB_WR_RDMA_READ: return IB_OPCODE_RC_RDMA_READ_REQUEST; @@ -258,6 +261,10 @@ static int next_opcode_rc(struct rxe_qp *qp, u32 opcode, int fits) else return fits ? IB_OPCODE_RC_SEND_ONLY_WITH_INVALIDATE : IB_OPCODE_RC_SEND_FIRST; + + case IB_WR_ATOMIC_WRITE: + return IB_OPCODE_RC_ATOMIC_WRITE; + case IB_WR_REG_MR: case IB_WR_LOCAL_INV: return opcode; @@ -421,11 +428,18 @@ static struct sk_buff *init_req_packet(struct rxe_qp *qp, /* init optional headers */ if (pkt->mask & RXE_RETH_MASK) { - reth_set_rkey(pkt, ibwr->wr.rdma.rkey); + if (pkt->mask & RXE_FETH_MASK) + reth_set_rkey(pkt, ibwr->wr.flush.rkey); + else + reth_set_rkey(pkt, ibwr->wr.rdma.rkey); reth_set_va(pkt, wqe->iova); reth_set_len(pkt, wqe->dma.resid); } + /* Fill Flush Extension Transport Header */ + if (pkt->mask & RXE_FETH_MASK) + feth_init(pkt, ibwr->wr.flush.type, ibwr->wr.flush.level); + if (pkt->mask & RXE_IMMDT_MASK) immdt_set_imm(pkt, ibwr->ex.imm_data); @@ -484,6 +498,14 @@ static int finish_packet(struct rxe_qp *qp, struct rxe_av *av, memset(pad, 0, bth_pad(pkt)); } + } else if (pkt->mask & RXE_FLUSH_MASK) { + /* oA19-2: shall have no payload. */ + wqe->dma.resid = 0; + } + + if (pkt->mask & RXE_ATOMIC_WRITE_MASK) { + memcpy(payload_addr(pkt), wqe->dma.atomic_wr, payload); + wqe->dma.resid -= payload; } return 0; @@ -595,7 +617,7 @@ static int rxe_do_local_ops(struct rxe_qp *qp, struct rxe_send_wqe *wqe) } break; default: - pr_err("Unexpected send wqe opcode %d\n", opcode); + rxe_dbg_qp(qp, "Unexpected send wqe opcode %d\n", opcode); wqe->status = IB_WC_LOC_QP_OP_ERR; return -EINVAL; } @@ -608,7 +630,7 @@ static int rxe_do_local_ops(struct rxe_qp *qp, struct rxe_send_wqe *wqe) * which can lead to a deadlock. So go ahead and complete * it now. */ - rxe_run_task(&qp->comp.task, 1); + rxe_sched_task(&qp->comp.task); return 0; } @@ -709,13 +731,15 @@ int rxe_requester(void *arg) } mask = rxe_opcode[opcode].mask; - if (unlikely(mask & RXE_READ_OR_ATOMIC_MASK)) { + if (unlikely(mask & (RXE_READ_OR_ATOMIC_MASK | + RXE_ATOMIC_WRITE_MASK))) { if (check_init_depth(qp, wqe)) goto exit; } mtu = get_mtu(qp); - payload = (mask & RXE_WRITE_OR_SEND_MASK) ? wqe->dma.resid : 0; + payload = (mask & (RXE_WRITE_OR_SEND_MASK | RXE_ATOMIC_WRITE_MASK)) ? + wqe->dma.resid : 0; if (payload > mtu) { if (qp_type(qp) == IB_QPT_UD) { /* C10-93.1.1: If the total sum of all the buffer lengths specified for a @@ -733,7 +757,7 @@ int rxe_requester(void *arg) qp->req.wqe_index); wqe->state = wqe_state_done; wqe->status = IB_WC_SUCCESS; - rxe_run_task(&qp->comp.task, 0); + rxe_run_task(&qp->comp.task); goto done; } payload = mtu; @@ -748,14 +772,14 @@ int rxe_requester(void *arg) av = rxe_get_av(&pkt, &ah); if (unlikely(!av)) { - pr_err("qp#%d Failed no address vector\n", qp_num(qp)); + rxe_dbg_qp(qp, "Failed no address vector\n"); wqe->status = IB_WC_LOC_QP_OP_ERR; goto err; } skb = init_req_packet(qp, av, wqe, opcode, payload, &pkt); if (unlikely(!skb)) { - pr_err("qp#%d Failed allocating skb\n", qp_num(qp)); + rxe_dbg_qp(qp, "Failed allocating skb\n"); wqe->status = IB_WC_LOC_QP_OP_ERR; if (ah) rxe_put(ah); @@ -764,7 +788,7 @@ int rxe_requester(void *arg) err = finish_packet(qp, av, wqe, &pkt, skb, payload); if (unlikely(err)) { - pr_debug("qp#%d Error during finish packet\n", qp_num(qp)); + rxe_dbg_qp(qp, "Error during finish packet\n"); if (err == -EFAULT) wqe->status = IB_WC_LOC_PROT_ERR; else @@ -795,7 +819,7 @@ int rxe_requester(void *arg) rollback_state(wqe, qp, &rollback_wqe, rollback_psn); if (err == -EAGAIN) { - rxe_run_task(&qp->req.task, 1); + rxe_sched_task(&qp->req.task); goto exit; } @@ -817,7 +841,7 @@ err: qp->req.wqe_index = queue_next_index(qp->sq.queue, qp->req.wqe_index); wqe->state = wqe_state_error; qp->req.state = QP_STATE_ERROR; - rxe_run_task(&qp->comp.task, 0); + rxe_run_task(&qp->comp.task); exit: ret = -EAGAIN; out: diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c index 693081e813ec08c8d37619da75c84a82a4d5acfa..c74972244f08f5004c9430876d9c64c57a8d1d82 100644 --- a/drivers/infiniband/sw/rxe/rxe_resp.c +++ b/drivers/infiniband/sw/rxe/rxe_resp.c @@ -22,6 +22,8 @@ enum resp_states { RESPST_EXECUTE, RESPST_READ_REPLY, RESPST_ATOMIC_REPLY, + RESPST_ATOMIC_WRITE_REPLY, + RESPST_PROCESS_FLUSH, RESPST_COMPLETE, RESPST_ACKNOWLEDGE, RESPST_CLEANUP, @@ -57,6 +59,8 @@ static char *resp_state_name[] = { [RESPST_EXECUTE] = "EXECUTE", [RESPST_READ_REPLY] = "READ_REPLY", [RESPST_ATOMIC_REPLY] = "ATOMIC_REPLY", + [RESPST_ATOMIC_WRITE_REPLY] = "ATOMIC_WRITE_REPLY", + [RESPST_PROCESS_FLUSH] = "PROCESS_FLUSH", [RESPST_COMPLETE] = "COMPLETE", [RESPST_ACKNOWLEDGE] = "ACKNOWLEDGE", [RESPST_CLEANUP] = "CLEANUP", @@ -91,7 +95,10 @@ void rxe_resp_queue_pkt(struct rxe_qp *qp, struct sk_buff *skb) must_sched = (pkt->opcode == IB_OPCODE_RC_RDMA_READ_REQUEST) || (skb_queue_len(&qp->req_pkts) > 1); - rxe_run_task(&qp->resp.task, must_sched); + if (must_sched) + rxe_sched_task(&qp->resp.task); + else + rxe_run_task(&qp->resp.task); } static inline enum resp_states get_req(struct rxe_qp *qp, @@ -253,19 +260,37 @@ static enum resp_states check_op_seq(struct rxe_qp *qp, } } +static bool check_qp_attr_access(struct rxe_qp *qp, + struct rxe_pkt_info *pkt) +{ + if (((pkt->mask & RXE_READ_MASK) && + !(qp->attr.qp_access_flags & IB_ACCESS_REMOTE_READ)) || + ((pkt->mask & (RXE_WRITE_MASK | RXE_ATOMIC_WRITE_MASK)) && + !(qp->attr.qp_access_flags & IB_ACCESS_REMOTE_WRITE)) || + ((pkt->mask & RXE_ATOMIC_MASK) && + !(qp->attr.qp_access_flags & IB_ACCESS_REMOTE_ATOMIC))) + return false; + + if (pkt->mask & RXE_FLUSH_MASK) { + u32 flush_type = feth_plt(pkt); + + if ((flush_type & IB_FLUSH_GLOBAL && + !(qp->attr.qp_access_flags & IB_ACCESS_FLUSH_GLOBAL)) || + (flush_type & IB_FLUSH_PERSISTENT && + !(qp->attr.qp_access_flags & IB_ACCESS_FLUSH_PERSISTENT))) + return false; + } + + return true; +} + static enum resp_states check_op_valid(struct rxe_qp *qp, struct rxe_pkt_info *pkt) { switch (qp_type(qp)) { case IB_QPT_RC: - if (((pkt->mask & RXE_READ_MASK) && - !(qp->attr.qp_access_flags & IB_ACCESS_REMOTE_READ)) || - ((pkt->mask & RXE_WRITE_MASK) && - !(qp->attr.qp_access_flags & IB_ACCESS_REMOTE_WRITE)) || - ((pkt->mask & RXE_ATOMIC_MASK) && - !(qp->attr.qp_access_flags & IB_ACCESS_REMOTE_ATOMIC))) { + if (!check_qp_attr_access(qp, pkt)) return RESPST_ERR_UNSUPPORTED_OPCODE; - } break; @@ -314,7 +339,7 @@ static enum resp_states get_srq_wqe(struct rxe_qp *qp) /* don't trust user space data */ if (unlikely(wqe->dma.num_sge > srq->rq.max_sge)) { spin_unlock_irqrestore(&srq->rq.consumer_lock, flags); - pr_warn("%s: invalid num_sge in SRQ entry\n", __func__); + rxe_dbg_qp(qp, "invalid num_sge in SRQ entry\n"); return RESPST_ERR_MALFORMED_WQE; } size = sizeof(*wqe) + wqe->dma.num_sge*sizeof(struct rxe_sge); @@ -364,7 +389,7 @@ static enum resp_states check_resource(struct rxe_qp *qp, } } - if (pkt->mask & RXE_READ_OR_ATOMIC_MASK) { + if (pkt->mask & (RXE_READ_OR_ATOMIC_MASK | RXE_ATOMIC_WRITE_MASK)) { /* it is the requesters job to not send * too many read/atomic ops, we just * recycle the responder resource queue @@ -387,19 +412,66 @@ static enum resp_states check_resource(struct rxe_qp *qp, return RESPST_CHK_LENGTH; } -static enum resp_states check_length(struct rxe_qp *qp, - struct rxe_pkt_info *pkt) +static enum resp_states rxe_resp_check_length(struct rxe_qp *qp, + struct rxe_pkt_info *pkt) { - switch (qp_type(qp)) { - case IB_QPT_RC: - return RESPST_CHK_RKEY; - - case IB_QPT_UC: - return RESPST_CHK_RKEY; + /* + * See IBA C9-92 + * For UD QPs we only check if the packet will fit in the + * receive buffer later. For rmda operations additional + * length checks are performed in check_rkey. + */ + if (pkt->mask & RXE_PAYLOAD_MASK && ((qp_type(qp) == IB_QPT_RC) || + (qp_type(qp) == IB_QPT_UC))) { + unsigned int mtu = qp->mtu; + unsigned int payload = payload_size(pkt); + + if ((pkt->mask & RXE_START_MASK) && + (pkt->mask & RXE_END_MASK)) { + if (unlikely(payload > mtu)) { + rxe_dbg_qp(qp, "only packet too long"); + return RESPST_ERR_LENGTH; + } + } else if ((pkt->mask & RXE_START_MASK) || + (pkt->mask & RXE_MIDDLE_MASK)) { + if (unlikely(payload != mtu)) { + rxe_dbg_qp(qp, "first or middle packet not mtu"); + return RESPST_ERR_LENGTH; + } + } else if (pkt->mask & RXE_END_MASK) { + if (unlikely((payload == 0) || (payload > mtu))) { + rxe_dbg_qp(qp, "last packet zero or too long"); + return RESPST_ERR_LENGTH; + } + } + } - default: - return RESPST_CHK_RKEY; + /* See IBA C9-94 */ + if (pkt->mask & RXE_RETH_MASK) { + if (reth_len(pkt) > (1U << 31)) { + rxe_dbg_qp(qp, "dma length too long"); + return RESPST_ERR_LENGTH; + } } + + return RESPST_CHK_RKEY; +} + +static void qp_resp_from_reth(struct rxe_qp *qp, struct rxe_pkt_info *pkt) +{ + qp->resp.va = reth_va(pkt); + qp->resp.offset = 0; + qp->resp.rkey = reth_rkey(pkt); + qp->resp.resid = reth_len(pkt); + qp->resp.length = reth_len(pkt); +} + +static void qp_resp_from_atmeth(struct rxe_qp *qp, struct rxe_pkt_info *pkt) +{ + qp->resp.va = atmeth_va(pkt); + qp->resp.offset = 0; + qp->resp.rkey = atmeth_rkey(pkt); + qp->resp.resid = sizeof(u64); } static enum resp_states check_rkey(struct rxe_qp *qp, @@ -413,29 +485,32 @@ static enum resp_states check_rkey(struct rxe_qp *qp, u32 pktlen; int mtu = qp->mtu; enum resp_states state; - int access; - - if (pkt->mask & RXE_READ_OR_WRITE_MASK) { - if (pkt->mask & RXE_RETH_MASK) { - qp->resp.va = reth_va(pkt); - qp->resp.offset = 0; - qp->resp.rkey = reth_rkey(pkt); - qp->resp.resid = reth_len(pkt); - qp->resp.length = reth_len(pkt); - } + int access = 0; + + if (pkt->mask & (RXE_READ_OR_WRITE_MASK | RXE_ATOMIC_WRITE_MASK)) { + if (pkt->mask & RXE_RETH_MASK) + qp_resp_from_reth(qp, pkt); + access = (pkt->mask & RXE_READ_MASK) ? IB_ACCESS_REMOTE_READ : IB_ACCESS_REMOTE_WRITE; + } else if (pkt->mask & RXE_FLUSH_MASK) { + u32 flush_type = feth_plt(pkt); + + if (pkt->mask & RXE_RETH_MASK) + qp_resp_from_reth(qp, pkt); + + if (flush_type & IB_FLUSH_GLOBAL) + access |= IB_ACCESS_FLUSH_GLOBAL; + if (flush_type & IB_FLUSH_PERSISTENT) + access |= IB_ACCESS_FLUSH_PERSISTENT; } else if (pkt->mask & RXE_ATOMIC_MASK) { - qp->resp.va = atmeth_va(pkt); - qp->resp.offset = 0; - qp->resp.rkey = atmeth_rkey(pkt); - qp->resp.resid = sizeof(u64); + qp_resp_from_atmeth(qp, pkt); access = IB_ACCESS_REMOTE_ATOMIC; } else { return RESPST_EXECUTE; } - /* A zero-byte op is not required to set an addr or rkey. */ + /* A zero-byte op is not required to set an addr or rkey. See C9-88 */ if ((pkt->mask & RXE_READ_OR_WRITE_MASK) && (pkt->mask & RXE_RETH_MASK) && reth_len(pkt) == 0) { @@ -450,15 +525,14 @@ static enum resp_states check_rkey(struct rxe_qp *qp, if (rkey_is_mw(rkey)) { mw = rxe_lookup_mw(qp, access, rkey); if (!mw) { - pr_debug("%s: no MW matches rkey %#x\n", - __func__, rkey); + rxe_dbg_qp(qp, "no MW matches rkey %#x\n", rkey); state = RESPST_ERR_RKEY_VIOLATION; goto err; } mr = mw->mr; if (!mr) { - pr_err("%s: MW doesn't have an MR\n", __func__); + rxe_dbg_qp(qp, "MW doesn't have an MR\n"); state = RESPST_ERR_RKEY_VIOLATION; goto err; } @@ -471,19 +545,27 @@ static enum resp_states check_rkey(struct rxe_qp *qp, } else { mr = lookup_mr(qp->pd, access, rkey, RXE_LOOKUP_REMOTE); if (!mr) { - pr_debug("%s: no MR matches rkey %#x\n", - __func__, rkey); + rxe_dbg_qp(qp, "no MR matches rkey %#x\n", rkey); state = RESPST_ERR_RKEY_VIOLATION; goto err; } } + if (pkt->mask & RXE_FLUSH_MASK) { + /* FLUSH MR may not set va or resid + * no need to check range since we will flush whole mr + */ + if (feth_sel(pkt) == IB_FLUSH_MR) + goto skip_check_range; + } + if (mr_check_range(mr, va + qp->resp.offset, resid)) { state = RESPST_ERR_RKEY_VIOLATION; goto err; } - if (pkt->mask & RXE_WRITE_MASK) { +skip_check_range: + if (pkt->mask & (RXE_WRITE_MASK | RXE_ATOMIC_WRITE_MASK)) { if (resid > mtu) { if (pktlen != mtu || bth_pad(pkt)) { state = RESPST_ERR_LENGTH; @@ -583,15 +665,66 @@ static struct resp_res *rxe_prepare_res(struct rxe_qp *qp, res->state = rdatm_res_state_new; break; case RXE_ATOMIC_MASK: + case RXE_ATOMIC_WRITE_MASK: res->first_psn = pkt->psn; res->last_psn = pkt->psn; res->cur_psn = pkt->psn; break; + case RXE_FLUSH_MASK: + res->flush.va = qp->resp.va + qp->resp.offset; + res->flush.length = qp->resp.length; + res->flush.type = feth_plt(pkt); + res->flush.level = feth_sel(pkt); } return res; } +static enum resp_states process_flush(struct rxe_qp *qp, + struct rxe_pkt_info *pkt) +{ + u64 length, start; + struct rxe_mr *mr = qp->resp.mr; + struct resp_res *res = qp->resp.res; + + /* oA19-14, oA19-15 */ + if (res && res->replay) + return RESPST_ACKNOWLEDGE; + else if (!res) { + res = rxe_prepare_res(qp, pkt, RXE_FLUSH_MASK); + qp->resp.res = res; + } + + if (res->flush.level == IB_FLUSH_RANGE) { + start = res->flush.va; + length = res->flush.length; + } else { /* level == IB_FLUSH_MR */ + start = mr->ibmr.iova; + length = mr->ibmr.length; + } + + if (res->flush.type & IB_FLUSH_PERSISTENT) { + if (rxe_flush_pmem_iova(mr, start, length)) + return RESPST_ERR_RKEY_VIOLATION; + /* Make data persistent. */ + wmb(); + } else if (res->flush.type & IB_FLUSH_GLOBAL) { + /* Make data global visibility. */ + wmb(); + } + + qp->resp.msn++; + + /* next expected psn, read handles this separately */ + qp->resp.psn = (pkt->psn + 1) & BTH_PSN_MASK; + qp->resp.ack_psn = qp->resp.psn; + + qp->resp.opcode = pkt->opcode; + qp->resp.status = IB_WC_SUCCESS; + + return RESPST_ACKNOWLEDGE; +} + /* Guarantee atomicity of atomic operations at the machine level. */ static DEFINE_SPINLOCK(atomic_ops_lock); @@ -652,6 +785,63 @@ out: return ret; } +#ifdef CONFIG_64BIT +static enum resp_states do_atomic_write(struct rxe_qp *qp, + struct rxe_pkt_info *pkt) +{ + struct rxe_mr *mr = qp->resp.mr; + int payload = payload_size(pkt); + u64 src, *dst; + + if (mr->state != RXE_MR_STATE_VALID) + return RESPST_ERR_RKEY_VIOLATION; + + memcpy(&src, payload_addr(pkt), payload); + + dst = iova_to_vaddr(mr, qp->resp.va + qp->resp.offset, payload); + /* check vaddr is 8 bytes aligned. */ + if (!dst || (uintptr_t)dst & 7) + return RESPST_ERR_MISALIGNED_ATOMIC; + + /* Do atomic write after all prior operations have completed */ + smp_store_release(dst, src); + + /* decrease resp.resid to zero */ + qp->resp.resid -= sizeof(payload); + + qp->resp.msn++; + + /* next expected psn, read handles this separately */ + qp->resp.psn = (pkt->psn + 1) & BTH_PSN_MASK; + qp->resp.ack_psn = qp->resp.psn; + + qp->resp.opcode = pkt->opcode; + qp->resp.status = IB_WC_SUCCESS; + return RESPST_ACKNOWLEDGE; +} +#else +static enum resp_states do_atomic_write(struct rxe_qp *qp, + struct rxe_pkt_info *pkt) +{ + return RESPST_ERR_UNSUPPORTED_OPCODE; +} +#endif /* CONFIG_64BIT */ + +static enum resp_states atomic_write_reply(struct rxe_qp *qp, + struct rxe_pkt_info *pkt) +{ + struct resp_res *res = qp->resp.res; + + if (!res) { + res = rxe_prepare_res(qp, pkt, RXE_ATOMIC_WRITE_MASK); + qp->resp.res = res; + } + + if (res->replay) + return RESPST_ACKNOWLEDGE; + return do_atomic_write(qp, pkt); +} + static struct sk_buff *prepare_ack_packet(struct rxe_qp *qp, struct rxe_pkt_info *ack, int opcode, @@ -807,14 +997,19 @@ static enum resp_states read_reply(struct rxe_qp *qp, skb = prepare_ack_packet(qp, &ack_pkt, opcode, payload, res->cur_psn, AETH_ACK_UNLIMITED); if (!skb) { - rxe_put(mr); + if (mr) + rxe_put(mr); return RESPST_ERR_RNR; } - rxe_mr_copy(mr, res->read.va, payload_addr(&ack_pkt), - payload, RXE_FROM_MR_OBJ); + err = rxe_mr_copy(mr, res->read.va, payload_addr(&ack_pkt), + payload, RXE_FROM_MR_OBJ); if (mr) rxe_put(mr); + if (err) { + kfree_skb(skb); + return RESPST_ERR_RKEY_VIOLATION; + } if (bth_pad(&ack_pkt)) { u8 *pad = payload_addr(&ack_pkt) + payload; @@ -890,6 +1085,10 @@ static enum resp_states execute(struct rxe_qp *qp, struct rxe_pkt_info *pkt) return RESPST_READ_REPLY; } else if (pkt->mask & RXE_ATOMIC_MASK) { return RESPST_ATOMIC_REPLY; + } else if (pkt->mask & RXE_ATOMIC_WRITE_MASK) { + return RESPST_ATOMIC_WRITE_REPLY; + } else if (pkt->mask & RXE_FLUSH_MASK) { + return RESPST_PROCESS_FLUSH; } else { /* Unreachable */ WARN_ON_ONCE(1); @@ -1040,7 +1239,7 @@ static int send_common_ack(struct rxe_qp *qp, u8 syndrome, u32 psn, err = rxe_xmit_packet(qp, &ack_pkt, skb); if (err) - pr_err_ratelimited("Failed sending %s\n", msg); + rxe_dbg_qp(qp, "Failed sending %s\n", msg); return err; } @@ -1063,6 +1262,19 @@ static int send_atomic_ack(struct rxe_qp *qp, u8 syndrome, u32 psn) return ret; } +static int send_read_response_ack(struct rxe_qp *qp, u8 syndrome, u32 psn) +{ + int ret = send_common_ack(qp, syndrome, psn, + IB_OPCODE_RC_RDMA_READ_RESPONSE_ONLY, + "RDMA READ response of length zero ACK"); + + /* have to clear this since it is used to trigger + * long read replies + */ + qp->resp.res = NULL; + return ret; +} + static enum resp_states acknowledge(struct rxe_qp *qp, struct rxe_pkt_info *pkt) { @@ -1073,6 +1285,8 @@ static enum resp_states acknowledge(struct rxe_qp *qp, send_ack(qp, qp->resp.aeth_syndrome, pkt->psn); else if (pkt->mask & RXE_ATOMIC_MASK) send_atomic_ack(qp, AETH_ACK_UNLIMITED, pkt->psn); + else if (pkt->mask & (RXE_FLUSH_MASK | RXE_ATOMIC_WRITE_MASK)) + send_read_response_ack(qp, AETH_ACK_UNLIMITED, pkt->psn); else if (bth_ack(pkt)) send_ack(qp, AETH_ACK_UNLIMITED, pkt->psn); @@ -1129,6 +1343,22 @@ static enum resp_states duplicate_request(struct rxe_qp *qp, /* SEND. Ack again and cleanup. C9-105. */ send_ack(qp, AETH_ACK_UNLIMITED, prev_psn); return RESPST_CLEANUP; + } else if (pkt->mask & RXE_FLUSH_MASK) { + struct resp_res *res; + + /* Find the operation in our list of responder resources. */ + res = find_resource(qp, pkt->psn); + if (res) { + res->replay = 1; + res->cur_psn = pkt->psn; + qp->resp.res = res; + rc = RESPST_PROCESS_FLUSH; + goto out; + } + + /* Resource not found. Class D error. Drop the request. */ + rc = RESPST_CLEANUP; + goto out; } else if (pkt->mask & RXE_READ_MASK) { struct resp_res *res; @@ -1184,7 +1414,9 @@ static enum resp_states duplicate_request(struct rxe_qp *qp, res->replay = 1; res->cur_psn = pkt->psn; qp->resp.res = res; - rc = RESPST_ATOMIC_REPLY; + rc = pkt->mask & RXE_ATOMIC_MASK ? + RESPST_ATOMIC_REPLY : + RESPST_ATOMIC_WRITE_REPLY; goto out; } @@ -1286,8 +1518,7 @@ int rxe_responder(void *arg) } while (1) { - pr_debug("qp#%d state = %s\n", qp_num(qp), - resp_state_name[state]); + rxe_dbg_qp(qp, "state = %s\n", resp_state_name[state]); switch (state) { case RESPST_GET_REQ: state = get_req(qp, &pkt); @@ -1305,7 +1536,7 @@ int rxe_responder(void *arg) state = check_resource(qp, pkt); break; case RESPST_CHK_LENGTH: - state = check_length(qp, pkt); + state = rxe_resp_check_length(qp, pkt); break; case RESPST_CHK_RKEY: state = check_rkey(qp, pkt); @@ -1322,6 +1553,12 @@ int rxe_responder(void *arg) case RESPST_ATOMIC_REPLY: state = atomic_reply(qp, pkt); break; + case RESPST_ATOMIC_WRITE_REPLY: + state = atomic_write_reply(qp, pkt); + break; + case RESPST_PROCESS_FLUSH: + state = process_flush(qp, pkt); + break; case RESPST_ACKNOWLEDGE: state = acknowledge(qp, pkt); break; @@ -1444,7 +1681,7 @@ int rxe_responder(void *arg) case RESPST_ERROR: qp->resp.goto_error = 0; - pr_debug("qp#%d moved to error state\n", qp_num(qp)); + rxe_dbg_qp(qp, "moved to error state\n"); rxe_qp_error(qp); goto exit; diff --git a/drivers/infiniband/sw/rxe/rxe_srq.c b/drivers/infiniband/sw/rxe/rxe_srq.c index 02b39498c370d24e53bb61a8a2b79cf7b01c9432..82e37a41ced40def8626a5ff1560afb472ed14d3 100644 --- a/drivers/infiniband/sw/rxe/rxe_srq.c +++ b/drivers/infiniband/sw/rxe/rxe_srq.c @@ -13,13 +13,13 @@ int rxe_srq_chk_init(struct rxe_dev *rxe, struct ib_srq_init_attr *init) struct ib_srq_attr *attr = &init->attr; if (attr->max_wr > rxe->attr.max_srq_wr) { - pr_warn("max_wr(%d) > max_srq_wr(%d)\n", + rxe_dbg(rxe, "max_wr(%d) > max_srq_wr(%d)\n", attr->max_wr, rxe->attr.max_srq_wr); goto err1; } if (attr->max_wr <= 0) { - pr_warn("max_wr(%d) <= 0\n", attr->max_wr); + rxe_dbg(rxe, "max_wr(%d) <= 0\n", attr->max_wr); goto err1; } @@ -27,7 +27,7 @@ int rxe_srq_chk_init(struct rxe_dev *rxe, struct ib_srq_init_attr *init) attr->max_wr = RXE_MIN_SRQ_WR; if (attr->max_sge > rxe->attr.max_srq_sge) { - pr_warn("max_sge(%d) > max_srq_sge(%d)\n", + rxe_dbg(rxe, "max_sge(%d) > max_srq_sge(%d)\n", attr->max_sge, rxe->attr.max_srq_sge); goto err1; } @@ -65,7 +65,7 @@ int rxe_srq_from_init(struct rxe_dev *rxe, struct rxe_srq *srq, type = QUEUE_TYPE_FROM_CLIENT; q = rxe_queue_init(rxe, &srq->rq.max_wr, srq_wqe_size, type); if (!q) { - pr_warn("unable to allocate queue for srq\n"); + rxe_dbg_srq(srq, "Unable to allocate queue\n"); return -ENOMEM; } @@ -94,24 +94,24 @@ int rxe_srq_chk_attr(struct rxe_dev *rxe, struct rxe_srq *srq, struct ib_srq_attr *attr, enum ib_srq_attr_mask mask) { if (srq->error) { - pr_warn("srq in error state\n"); + rxe_dbg_srq(srq, "in error state\n"); goto err1; } if (mask & IB_SRQ_MAX_WR) { if (attr->max_wr > rxe->attr.max_srq_wr) { - pr_warn("max_wr(%d) > max_srq_wr(%d)\n", + rxe_dbg_srq(srq, "max_wr(%d) > max_srq_wr(%d)\n", attr->max_wr, rxe->attr.max_srq_wr); goto err1; } if (attr->max_wr <= 0) { - pr_warn("max_wr(%d) <= 0\n", attr->max_wr); + rxe_dbg_srq(srq, "max_wr(%d) <= 0\n", attr->max_wr); goto err1; } if (srq->limit && (attr->max_wr < srq->limit)) { - pr_warn("max_wr (%d) < srq->limit (%d)\n", + rxe_dbg_srq(srq, "max_wr (%d) < srq->limit (%d)\n", attr->max_wr, srq->limit); goto err1; } @@ -122,13 +122,13 @@ int rxe_srq_chk_attr(struct rxe_dev *rxe, struct rxe_srq *srq, if (mask & IB_SRQ_LIMIT) { if (attr->srq_limit > rxe->attr.max_srq_wr) { - pr_warn("srq_limit(%d) > max_srq_wr(%d)\n", + rxe_dbg_srq(srq, "srq_limit(%d) > max_srq_wr(%d)\n", attr->srq_limit, rxe->attr.max_srq_wr); goto err1; } if (attr->srq_limit > srq->rq.queue->buf->index_mask) { - pr_warn("srq_limit (%d) > cur limit(%d)\n", + rxe_dbg_srq(srq, "srq_limit (%d) > cur limit(%d)\n", attr->srq_limit, srq->rq.queue->buf->index_mask); goto err1; diff --git a/drivers/infiniband/sw/rxe/rxe_task.c b/drivers/infiniband/sw/rxe/rxe_task.c index ec2b7de1c4972105e1552b2bf3c8190869ce86a8..60b90e33a88496a4bbaf2e97dd0563c862ffd081 100644 --- a/drivers/infiniband/sw/rxe/rxe_task.c +++ b/drivers/infiniband/sw/rxe/rxe_task.c @@ -4,10 +4,6 @@ * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved. */ -#include -#include -#include - #include "rxe.h" int __rxe_do_task(struct rxe_task *task) @@ -28,30 +24,31 @@ int __rxe_do_task(struct rxe_task *task) * a second caller finds the task already running * but looks just after the last call to func */ -void rxe_do_task(struct tasklet_struct *t) +static void do_task(struct tasklet_struct *t) { int cont; int ret; struct rxe_task *task = from_tasklet(task, t, tasklet); + struct rxe_qp *qp = (struct rxe_qp *)task->arg; unsigned int iterations = RXE_MAX_ITERATIONS; - spin_lock_bh(&task->state_lock); + spin_lock_bh(&task->lock); switch (task->state) { case TASK_STATE_START: task->state = TASK_STATE_BUSY; - spin_unlock_bh(&task->state_lock); + spin_unlock_bh(&task->lock); break; case TASK_STATE_BUSY: task->state = TASK_STATE_ARMED; fallthrough; case TASK_STATE_ARMED: - spin_unlock_bh(&task->state_lock); + spin_unlock_bh(&task->lock); return; default: - spin_unlock_bh(&task->state_lock); - pr_warn("%s failed with bad state %d\n", __func__, task->state); + spin_unlock_bh(&task->lock); + rxe_dbg_qp(qp, "failed with bad state %d\n", task->state); return; } @@ -59,7 +56,7 @@ void rxe_do_task(struct tasklet_struct *t) cont = 0; ret = task->func(task->arg); - spin_lock_bh(&task->state_lock); + spin_lock_bh(&task->lock); switch (task->state) { case TASK_STATE_BUSY: if (ret) { @@ -85,27 +82,25 @@ void rxe_do_task(struct tasklet_struct *t) break; default: - pr_warn("%s failed with bad state %d\n", __func__, - task->state); + rxe_dbg_qp(qp, "failed with bad state %d\n", + task->state); } - spin_unlock_bh(&task->state_lock); + spin_unlock_bh(&task->lock); } while (cont); task->ret = ret; } -int rxe_init_task(struct rxe_task *task, - void *arg, int (*func)(void *), char *name) +int rxe_init_task(struct rxe_task *task, void *arg, int (*func)(void *)) { task->arg = arg; task->func = func; - snprintf(task->name, sizeof(task->name), "%s", name); task->destroyed = false; - tasklet_setup(&task->tasklet, rxe_do_task); + tasklet_setup(&task->tasklet, do_task); task->state = TASK_STATE_START; - spin_lock_init(&task->state_lock); + spin_lock_init(&task->lock); return 0; } @@ -121,23 +116,28 @@ void rxe_cleanup_task(struct rxe_task *task) task->destroyed = true; do { - spin_lock_bh(&task->state_lock); + spin_lock_bh(&task->lock); idle = (task->state == TASK_STATE_START); - spin_unlock_bh(&task->state_lock); + spin_unlock_bh(&task->lock); } while (!idle); tasklet_kill(&task->tasklet); } -void rxe_run_task(struct rxe_task *task, int sched) +void rxe_run_task(struct rxe_task *task) +{ + if (task->destroyed) + return; + + do_task(&task->tasklet); +} + +void rxe_sched_task(struct rxe_task *task) { if (task->destroyed) return; - if (sched) - tasklet_schedule(&task->tasklet); - else - rxe_do_task(&task->tasklet); + tasklet_schedule(&task->tasklet); } void rxe_disable_task(struct rxe_task *task) diff --git a/drivers/infiniband/sw/rxe/rxe_task.h b/drivers/infiniband/sw/rxe/rxe_task.h index 7f612a1c68a7ba3c76d02aaa1fd5ee04c384a2bd..7b88129702ac6dae833368ec3f7c7c8413ebb943 100644 --- a/drivers/infiniband/sw/rxe/rxe_task.h +++ b/drivers/infiniband/sw/rxe/rxe_task.h @@ -21,11 +21,10 @@ enum { struct rxe_task { struct tasklet_struct tasklet; int state; - spinlock_t state_lock; /* spinlock for task state */ + spinlock_t lock; void *arg; int (*func)(void *arg); int ret; - char name[16]; bool destroyed; }; @@ -34,8 +33,7 @@ struct rxe_task { * arg => parameter to pass to fcn * func => function to call until it returns != 0 */ -int rxe_init_task(struct rxe_task *task, - void *arg, int (*func)(void *), char *name); +int rxe_init_task(struct rxe_task *task, void *arg, int (*func)(void *)); /* cleanup task */ void rxe_cleanup_task(struct rxe_task *task); @@ -46,18 +44,9 @@ void rxe_cleanup_task(struct rxe_task *task); */ int __rxe_do_task(struct rxe_task *task); -/* - * common function called by any of the main tasklets - * If there is any chance that there is additional - * work to do someone must reschedule the task before - * leaving - */ -void rxe_do_task(struct tasklet_struct *t); +void rxe_run_task(struct rxe_task *task); -/* run a task, else schedule it to run as a tasklet, The decision - * to run or schedule tasklet is based on the parameter sched. - */ -void rxe_run_task(struct rxe_task *task, int sched); +void rxe_sched_task(struct rxe_task *task); /* keep a task from scheduling */ void rxe_disable_task(struct rxe_task *task); diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c index 88825edc7dce19c0ac4a2806586fe68202e7d4aa..025b35bf014e2a24f292ac838a2ac66e7087b17a 100644 --- a/drivers/infiniband/sw/rxe/rxe_verbs.c +++ b/drivers/infiniband/sw/rxe/rxe_verbs.c @@ -172,10 +172,6 @@ static int rxe_create_ah(struct ib_ah *ibah, ah->is_user = false; } - err = rxe_av_chk_attr(rxe, init_attr->ah_attr); - if (err) - return err; - err = rxe_add_to_pool_ah(&rxe->ah_pool, ah, init_attr->flags & RDMA_CREATE_AH_SLEEPABLE); if (err) @@ -184,6 +180,12 @@ static int rxe_create_ah(struct ib_ah *ibah, /* create index > 0 */ ah->ah_num = ah->elem.index; + err = rxe_ah_chk_attr(ah, init_attr->ah_attr); + if (err) { + rxe_cleanup(ah); + return err; + } + if (uresp) { /* only if new user provider */ err = copy_to_user(&uresp->ah_num, &ah->ah_num, @@ -206,10 +208,9 @@ static int rxe_create_ah(struct ib_ah *ibah, static int rxe_modify_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr) { int err; - struct rxe_dev *rxe = to_rdev(ibah->device); struct rxe_ah *ah = to_rah(ibah); - err = rxe_av_chk_attr(rxe, attr); + err = rxe_ah_chk_attr(ah, attr); if (err) return err; @@ -238,7 +239,6 @@ static int rxe_destroy_ah(struct ib_ah *ibah, u32 flags) static int post_one_recv(struct rxe_rq *rq, const struct ib_recv_wr *ibwr) { - int err; int i; u32 length; struct rxe_recv_wqe *recv_wqe; @@ -246,15 +246,11 @@ static int post_one_recv(struct rxe_rq *rq, const struct ib_recv_wr *ibwr) int full; full = queue_full(rq->queue, QUEUE_TYPE_TO_DRIVER); - if (unlikely(full)) { - err = -ENOMEM; - goto err1; - } + if (unlikely(full)) + return -ENOMEM; - if (unlikely(num_sge > rq->max_sge)) { - err = -EINVAL; - goto err1; - } + if (unlikely(num_sge > rq->max_sge)) + return -EINVAL; length = 0; for (i = 0; i < num_sge; i++) @@ -275,9 +271,6 @@ static int post_one_recv(struct rxe_rq *rq, const struct ib_recv_wr *ibwr) queue_advance_producer(rq->queue, QUEUE_TYPE_TO_DRIVER); return 0; - -err1: - return err; } static int rxe_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init, @@ -343,10 +336,7 @@ static int rxe_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, if (err) return err; - err = rxe_srq_from_attr(rxe, srq, attr, mask, &ucmd, udata); - if (err) - return err; - return 0; + return rxe_srq_from_attr(rxe, srq, attr, mask, &ucmd, udata); } static int rxe_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr) @@ -453,11 +443,11 @@ static int rxe_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, err = rxe_qp_chk_attr(rxe, qp, attr, mask); if (err) - goto err1; + return err; err = rxe_qp_from_attr(qp, attr, mask, udata); if (err) - goto err1; + return err; if ((mask & IB_QP_AV) && (attr->ah_attr.ah_flags & IB_AH_GRH)) qp->src_port = rdma_get_udp_sport(attr->ah_attr.grh.flow_label, @@ -465,9 +455,6 @@ static int rxe_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, qp->attr.dest_qp_num); return 0; - -err1: - return err; } static int rxe_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, @@ -501,24 +488,21 @@ static int validate_send_wr(struct rxe_qp *qp, const struct ib_send_wr *ibwr, struct rxe_sq *sq = &qp->sq; if (unlikely(num_sge > sq->max_sge)) - goto err1; + return -EINVAL; if (unlikely(mask & WR_ATOMIC_MASK)) { if (length < 8) - goto err1; + return -EINVAL; if (atomic_wr(ibwr)->remote_addr & 0x7) - goto err1; + return -EINVAL; } if (unlikely((ibwr->send_flags & IB_SEND_INLINE) && (length > sq->max_inline))) - goto err1; + return -EINVAL; return 0; - -err1: - return -EINVAL; } static void init_send_wr(struct rxe_qp *qp, struct rxe_send_wr *wr, @@ -695,9 +679,9 @@ static int rxe_post_send_kernel(struct rxe_qp *qp, const struct ib_send_wr *wr, wr = next; } - rxe_run_task(&qp->req.task, 1); + rxe_sched_task(&qp->req.task); if (unlikely(qp->req.state == QP_STATE_ERROR)) - rxe_run_task(&qp->comp.task, 1); + rxe_sched_task(&qp->comp.task); return err; } @@ -719,7 +703,7 @@ static int rxe_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, if (qp->is_user) { /* Utilize process context to do protocol processing */ - rxe_run_task(&qp->req.task, 0); + rxe_run_task(&qp->req.task); return 0; } else return rxe_post_send_kernel(qp, wr, bad_wr); @@ -735,14 +719,12 @@ static int rxe_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr, if (unlikely((qp_state(qp) < IB_QPS_INIT) || !qp->valid)) { *bad_wr = wr; - err = -EINVAL; - goto err1; + return -EINVAL; } if (unlikely(qp->srq)) { *bad_wr = wr; - err = -EINVAL; - goto err1; + return -EINVAL; } spin_lock_irqsave(&rq->producer_lock, flags); @@ -759,9 +741,8 @@ static int rxe_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr, spin_unlock_irqrestore(&rq->producer_lock, flags); if (qp->resp.state == QP_STATE_ERROR) - rxe_run_task(&qp->resp.task, 1); + rxe_sched_task(&qp->resp.task); -err1: return err; } @@ -826,16 +807,9 @@ static int rxe_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata) err = rxe_cq_chk_attr(rxe, cq, cqe, 0); if (err) - goto err1; - - err = rxe_cq_resize_queue(cq, cqe, uresp, udata); - if (err) - goto err1; - - return 0; + return err; -err1: - return err; + return rxe_cq_resize_queue(cq, cqe, uresp, udata); } static int rxe_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) @@ -902,6 +876,7 @@ static struct ib_mr *rxe_get_dma_mr(struct ib_pd *ibpd, int access) rxe_get(pd); mr->ibmr.pd = ibpd; + mr->ibmr.device = ibpd->device; rxe_mr_init_dma(access, mr); rxe_finalize(mr); @@ -921,26 +896,23 @@ static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd, struct rxe_mr *mr; mr = rxe_alloc(&rxe->mr_pool); - if (!mr) { - err = -ENOMEM; - goto err2; - } - + if (!mr) + return ERR_PTR(-ENOMEM); rxe_get(pd); mr->ibmr.pd = ibpd; + mr->ibmr.device = ibpd->device; err = rxe_mr_init_user(rxe, start, length, iova, access, mr); if (err) - goto err3; + goto err1; rxe_finalize(mr); return &mr->ibmr; -err3: +err1: rxe_cleanup(mr); -err2: return ERR_PTR(err); } @@ -956,25 +928,23 @@ static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type, return ERR_PTR(-EINVAL); mr = rxe_alloc(&rxe->mr_pool); - if (!mr) { - err = -ENOMEM; - goto err1; - } + if (!mr) + return ERR_PTR(-ENOMEM); rxe_get(pd); mr->ibmr.pd = ibpd; + mr->ibmr.device = ibpd->device; err = rxe_mr_init_fast(max_num_sg, mr); if (err) - goto err2; + goto err1; rxe_finalize(mr); return &mr->ibmr; -err2: - rxe_cleanup(mr); err1: + rxe_cleanup(mr); return ERR_PTR(err); } @@ -1134,7 +1104,7 @@ int rxe_register_device(struct rxe_dev *rxe, const char *ibdev_name) err = ib_register_device(dev, ibdev_name, NULL); if (err) - pr_warn("%s failed with error %d\n", __func__, err); + rxe_dbg(rxe, "failed with error %d\n", err); /* * Note that rxe may be invalid at this point if another thread diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.h b/drivers/infiniband/sw/rxe/rxe_verbs.h index 5f5cbfcb35695b930168df9f9f96c3ff2122bef9..19ddfa89048035d64ca551b711961aa4529da36a 100644 --- a/drivers/infiniband/sw/rxe/rxe_verbs.h +++ b/drivers/infiniband/sw/rxe/rxe_verbs.h @@ -165,6 +165,12 @@ struct resp_res { u64 va; u32 resid; } read; + struct { + u32 length; + u64 va; + u8 type; + u8 level; + } flush; }; }; @@ -304,7 +310,6 @@ struct rxe_mr { u32 lkey; u32 rkey; enum rxe_mr_state state; - enum ib_mr_type type; u32 offset; int access; diff --git a/drivers/infiniband/sw/siw/siw_cq.c b/drivers/infiniband/sw/siw/siw_cq.c index d68e37859e73bf37f0fd64a8a8e120479d93418b..403029de6b92d45050191c31c3feef64a9bd68e1 100644 --- a/drivers/infiniband/sw/siw/siw_cq.c +++ b/drivers/infiniband/sw/siw/siw_cq.c @@ -56,8 +56,6 @@ int siw_reap_cqe(struct siw_cq *cq, struct ib_wc *wc) if (READ_ONCE(cqe->flags) & SIW_WQE_VALID) { memset(wc, 0, sizeof(*wc)); wc->wr_id = cqe->id; - wc->status = map_cqe_status[cqe->status].ib; - wc->opcode = map_wc_opcode[cqe->opcode]; wc->byte_len = cqe->bytes; /* @@ -71,10 +69,32 @@ int siw_reap_cqe(struct siw_cq *cq, struct ib_wc *wc) wc->wc_flags = IB_WC_WITH_INVALIDATE; } wc->qp = cqe->base_qp; + wc->opcode = map_wc_opcode[cqe->opcode]; + wc->status = map_cqe_status[cqe->status].ib; siw_dbg_cq(cq, "idx %u, type %d, flags %2x, id 0x%pK\n", cq->cq_get % cq->num_cqe, cqe->opcode, cqe->flags, (void *)(uintptr_t)cqe->id); + } else { + /* + * A malicious user may set invalid opcode or + * status in the user mmapped CQE array. + * Sanity check and correct values in that case + * to avoid out-of-bounds access to global arrays + * for opcode and status mapping. + */ + u8 opcode = cqe->opcode; + u16 status = cqe->status; + + if (opcode >= SIW_NUM_OPCODES) { + opcode = 0; + status = SIW_WC_GENERAL_ERR; + } else if (status >= SIW_NUM_WC_STATUS) { + status = SIW_WC_GENERAL_ERR; + } + wc->opcode = map_wc_opcode[opcode]; + wc->status = map_cqe_status[status].ib; + } WRITE_ONCE(cqe->flags, 0); cq->cq_get++; diff --git a/drivers/infiniband/sw/siw/siw_mem.c b/drivers/infiniband/sw/siw/siw_mem.c index 61c17db70d658a00cb4d041528924dc46ee71d21..b2b33dd3b4fa1e63943837639174e31cefc9328c 100644 --- a/drivers/infiniband/sw/siw/siw_mem.c +++ b/drivers/infiniband/sw/siw/siw_mem.c @@ -368,7 +368,7 @@ struct siw_umem *siw_umem_get(u64 start, u64 len, bool writable) struct mm_struct *mm_s; u64 first_page_va; unsigned long mlock_limit; - unsigned int foll_flags = FOLL_WRITE; + unsigned int foll_flags = FOLL_LONGTERM; int num_pages, num_chunks, i, rv = 0; if (!can_do_mlock()) @@ -391,8 +391,8 @@ struct siw_umem *siw_umem_get(u64 start, u64 len, bool writable) mmgrab(mm_s); - if (!writable) - foll_flags |= FOLL_FORCE; + if (writable) + foll_flags |= FOLL_WRITE; mmap_read_lock(mm_s); @@ -423,8 +423,7 @@ struct siw_umem *siw_umem_get(u64 start, u64 len, bool writable) while (nents) { struct page **plist = &umem->page_chunk[i].plist[got]; - rv = pin_user_pages(first_page_va, nents, - foll_flags | FOLL_LONGTERM, + rv = pin_user_pages(first_page_va, nents, foll_flags, plist, NULL); if (rv < 0) goto out_sem_up; diff --git a/drivers/infiniband/sw/siw/siw_qp_tx.c b/drivers/infiniband/sw/siw/siw_qp_tx.c index 7d47b521070b1b73d69a8822d45a6e879489e669..05052b49107f27a46e6374ef486886f071ad92fd 100644 --- a/drivers/infiniband/sw/siw/siw_qp_tx.c +++ b/drivers/infiniband/sw/siw/siw_qp_tx.c @@ -29,7 +29,7 @@ static struct page *siw_get_pblpage(struct siw_mem *mem, u64 addr, int *idx) dma_addr_t paddr = siw_pbl_get_buffer(pbl, offset, NULL, idx); if (paddr) - return virt_to_page((void *)paddr); + return virt_to_page((void *)(uintptr_t)paddr); return NULL; } diff --git a/drivers/infiniband/sw/siw/siw_verbs.c b/drivers/infiniband/sw/siw/siw_verbs.c index 3e814cfb298cf8342734fa6917eca138b2ee38b4..906fde1a2a0de2d9ebea9f2e340aacbe2b816fd5 100644 --- a/drivers/infiniband/sw/siw/siw_verbs.c +++ b/drivers/infiniband/sw/siw/siw_verbs.c @@ -676,13 +676,45 @@ static int siw_copy_inline_sgl(const struct ib_send_wr *core_wr, static int siw_sq_flush_wr(struct siw_qp *qp, const struct ib_send_wr *wr, const struct ib_send_wr **bad_wr) { - struct siw_sqe sqe = {}; int rv = 0; while (wr) { - sqe.id = wr->wr_id; - sqe.opcode = wr->opcode; - rv = siw_sqe_complete(qp, &sqe, 0, SIW_WC_WR_FLUSH_ERR); + struct siw_sqe sqe = {}; + + switch (wr->opcode) { + case IB_WR_RDMA_WRITE: + sqe.opcode = SIW_OP_WRITE; + break; + case IB_WR_RDMA_READ: + sqe.opcode = SIW_OP_READ; + break; + case IB_WR_RDMA_READ_WITH_INV: + sqe.opcode = SIW_OP_READ_LOCAL_INV; + break; + case IB_WR_SEND: + sqe.opcode = SIW_OP_SEND; + break; + case IB_WR_SEND_WITH_IMM: + sqe.opcode = SIW_OP_SEND_WITH_IMM; + break; + case IB_WR_SEND_WITH_INV: + sqe.opcode = SIW_OP_SEND_REMOTE_INV; + break; + case IB_WR_LOCAL_INV: + sqe.opcode = SIW_OP_INVAL_STAG; + break; + case IB_WR_REG_MR: + sqe.opcode = SIW_OP_REG_MR; + break; + default: + rv = -EINVAL; + break; + } + if (!rv) { + sqe.id = wr->wr_id; + rv = siw_sqe_complete(qp, &sqe, 0, + SIW_WC_WR_FLUSH_ERR); + } if (rv) { if (bad_wr) *bad_wr = wr; diff --git a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c index ea16ba5d8da6c8c0fc31dc4bd2c64cf83fe66f71..9ad8d985627524118150ddcdbcd583d4d576257e 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c @@ -41,6 +41,11 @@ static const struct nla_policy ipoib_policy[IFLA_IPOIB_MAX + 1] = { [IFLA_IPOIB_UMCAST] = { .type = NLA_U16 }, }; +static unsigned int ipoib_get_max_num_queues(void) +{ + return min_t(unsigned int, num_possible_cpus(), 128); +} + static int ipoib_fill_info(struct sk_buff *skb, const struct net_device *dev) { struct ipoib_dev_priv *priv = ipoib_priv(dev); @@ -172,6 +177,8 @@ static struct rtnl_link_ops ipoib_link_ops __read_mostly = { .changelink = ipoib_changelink, .get_size = ipoib_get_size, .fill_info = ipoib_fill_info, + .get_num_rx_queues = ipoib_get_max_num_queues, + .get_num_tx_queues = ipoib_get_max_num_queues, }; struct rtnl_link_ops *ipoib_get_link_ops(void) diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c index a00ca117303a98cc8ca00c5e7e5aa933c617d64d..1b8eda0dae4e0cae4bb97d48ec0893f65584ca63 100644 --- a/drivers/infiniband/ulp/iser/iser_verbs.c +++ b/drivers/infiniband/ulp/iser/iser_verbs.c @@ -347,22 +347,6 @@ static void iser_device_try_release(struct iser_device *device) mutex_unlock(&ig.device_list_mutex); } -/* - * Called with state mutex held - */ -static int iser_conn_state_comp_exch(struct iser_conn *iser_conn, - enum iser_conn_state comp, - enum iser_conn_state exch) -{ - int ret; - - ret = (iser_conn->state == comp); - if (ret) - iser_conn->state = exch; - - return ret; -} - void iser_release_work(struct work_struct *work) { struct iser_conn *iser_conn; @@ -464,11 +448,13 @@ int iser_conn_terminate(struct iser_conn *iser_conn) struct ib_conn *ib_conn = &iser_conn->ib_conn; int err = 0; + lockdep_assert_held(&iser_conn->state_mutex); + /* terminate the iser conn only if the conn state is UP */ - if (!iser_conn_state_comp_exch(iser_conn, ISER_CONN_UP, - ISER_CONN_TERMINATING)) + if (iser_conn->state != ISER_CONN_UP) return 0; + iser_conn->state = ISER_CONN_TERMINATING; iser_info("iser_conn %p state %d\n", iser_conn, iser_conn->state); /* suspend queuing of new iscsi commands */ @@ -498,9 +484,10 @@ int iser_conn_terminate(struct iser_conn *iser_conn) */ static void iser_connect_error(struct rdma_cm_id *cma_id) { - struct iser_conn *iser_conn; + struct iser_conn *iser_conn = cma_id->context; + + lockdep_assert_held(&iser_conn->state_mutex); - iser_conn = cma_id->context; iser_conn->state = ISER_CONN_TERMINATING; } @@ -542,12 +529,13 @@ static void iser_calc_scsi_params(struct iser_conn *iser_conn, */ static void iser_addr_handler(struct rdma_cm_id *cma_id) { + struct iser_conn *iser_conn = cma_id->context; struct iser_device *device; - struct iser_conn *iser_conn; struct ib_conn *ib_conn; int ret; - iser_conn = cma_id->context; + lockdep_assert_held(&iser_conn->state_mutex); + if (iser_conn->state != ISER_CONN_PENDING) /* bailout */ return; @@ -597,6 +585,8 @@ static void iser_route_handler(struct rdma_cm_id *cma_id) struct ib_conn *ib_conn = &iser_conn->ib_conn; struct ib_device *ib_dev = ib_conn->device->ib_device; + lockdep_assert_held(&iser_conn->state_mutex); + if (iser_conn->state != ISER_CONN_PENDING) /* bailout */ return; @@ -629,14 +619,18 @@ failure: iser_connect_error(cma_id); } +/* + * Called with state mutex held + */ static void iser_connected_handler(struct rdma_cm_id *cma_id, const void *private_data) { - struct iser_conn *iser_conn; + struct iser_conn *iser_conn = cma_id->context; struct ib_qp_attr attr; struct ib_qp_init_attr init_attr; - iser_conn = cma_id->context; + lockdep_assert_held(&iser_conn->state_mutex); + if (iser_conn->state != ISER_CONN_PENDING) /* bailout */ return; @@ -657,30 +651,27 @@ static void iser_connected_handler(struct rdma_cm_id *cma_id, complete(&iser_conn->up_completion); } -static void iser_disconnected_handler(struct rdma_cm_id *cma_id) -{ - struct iser_conn *iser_conn = cma_id->context; - - if (iser_conn_terminate(iser_conn)) { - if (iser_conn->iscsi_conn) - iscsi_conn_failure(iser_conn->iscsi_conn, - ISCSI_ERR_CONN_FAILED); - else - iser_err("iscsi_iser connection isn't bound\n"); - } -} - +/* + * Called with state mutex held + */ static void iser_cleanup_handler(struct rdma_cm_id *cma_id, bool destroy) { struct iser_conn *iser_conn = cma_id->context; + lockdep_assert_held(&iser_conn->state_mutex); /* * We are not guaranteed that we visited disconnected_handler * by now, call it here to be safe that we handle CM drep * and flush errors. */ - iser_disconnected_handler(cma_id); + if (iser_conn_terminate(iser_conn)) { + if (iser_conn->iscsi_conn) + iscsi_conn_failure(iser_conn->iscsi_conn, + ISCSI_ERR_CONN_FAILED); + else + iser_err("iscsi_iser connection isn't bound\n"); + } iser_free_ib_conn_res(iser_conn, destroy); complete(&iser_conn->ib_completion); } diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c index b360a1527cd1028679da02e801c3b44f4d34429c..75404885cf98102ae6c15fe6020c71107a1e1531 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.c +++ b/drivers/infiniband/ulp/isert/ib_isert.c @@ -993,9 +993,8 @@ isert_rx_login_req(struct isert_conn *isert_conn) * login request PDU. */ login->leading_connection = (!login_req->tsih) ? 1 : 0; - login->current_stage = - (login_req->flags & ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK) - >> 2; + login->current_stage = ISCSI_LOGIN_CURRENT_STAGE( + login_req->flags); login->version_min = login_req->min_version; login->version_max = login_req->max_version; memcpy(login->isid, login_req->isid, 6); diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt.c b/drivers/infiniband/ulp/rtrs/rtrs-clt.c index 205fd44a4727a817695e62ab9aa5e1190443ffff..80abf45a197ac079f6304da86d9ca5b5c8d41abb 100644 --- a/drivers/infiniband/ulp/rtrs/rtrs-clt.c +++ b/drivers/infiniband/ulp/rtrs/rtrs-clt.c @@ -1064,10 +1064,8 @@ static int rtrs_map_sg_fr(struct rtrs_clt_io_req *req, size_t count) /* Align the MR to a 4K page size to match the block virt boundary */ nr = ib_map_mr_sg(req->mr, req->sglist, count, NULL, SZ_4K); - if (nr < 0) - return nr; - if (nr < req->sg_cnt) - return -EINVAL; + if (nr != count) + return nr < 0 ? nr : -EINVAL; ib_update_fast_reg_key(req->mr, ib_inc_rkey(req->mr->rkey)); return nr; diff --git a/drivers/infiniband/ulp/rtrs/rtrs-pri.h b/drivers/infiniband/ulp/rtrs/rtrs-pri.h index a2420eecaf5a1080c8e9f5f49ba9b70123a5b806..ab25619261d28b98d90488822fae4971bf0a1a55 100644 --- a/drivers/infiniband/ulp/rtrs/rtrs-pri.h +++ b/drivers/infiniband/ulp/rtrs/rtrs-pri.h @@ -68,10 +68,7 @@ enum { struct rtrs_ib_dev; struct rtrs_rdma_dev_pd_ops { - struct rtrs_ib_dev *(*alloc)(void); - void (*free)(struct rtrs_ib_dev *dev); int (*init)(struct rtrs_ib_dev *dev); - void (*deinit)(struct rtrs_ib_dev *dev); }; struct rtrs_rdma_dev_pd { diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c b/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c index 2a3c9ac64a42e289c9e71dde10f03d4e5756a47f..c76ba29da1e206297fc6017c9984489f1b364dc2 100644 --- a/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c +++ b/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c @@ -203,7 +203,6 @@ rtrs_srv_destroy_once_sysfs_root_folders(struct rtrs_srv_path *srv_path) mutex_lock(&srv->paths_mutex); if (!--srv->dev_ref) { - kobject_del(srv->kobj_paths); kobject_put(srv->kobj_paths); mutex_unlock(&srv->paths_mutex); device_del(&srv->dev); @@ -304,12 +303,18 @@ destroy_root: void rtrs_srv_destroy_path_files(struct rtrs_srv_path *srv_path) { - if (srv_path->kobj.state_in_sysfs) { + if (srv_path->stats->kobj_stats.state_in_sysfs) { + sysfs_remove_group(&srv_path->stats->kobj_stats, + &rtrs_srv_stats_attr_group); kobject_del(&srv_path->stats->kobj_stats); kobject_put(&srv_path->stats->kobj_stats); + } + + if (srv_path->kobj.state_in_sysfs) { sysfs_remove_group(&srv_path->kobj, &rtrs_srv_path_attr_group); + kobject_del(&srv_path->kobj); kobject_put(&srv_path->kobj); - - rtrs_srv_destroy_once_sysfs_root_folders(srv_path); } + + rtrs_srv_destroy_once_sysfs_root_folders(srv_path); } diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv.c b/drivers/infiniband/ulp/rtrs/rtrs-srv.c index 22d7ba05e9fe838b1fab5bde3e43b037d9b9bf0a..d1703e2c0b82fcc746154132fffafa5cbc61a77a 100644 --- a/drivers/infiniband/ulp/rtrs/rtrs-srv.c +++ b/drivers/infiniband/ulp/rtrs/rtrs-srv.c @@ -561,9 +561,11 @@ static int map_cont_bufs(struct rtrs_srv_path *srv_path) { struct rtrs_srv_sess *srv = srv_path->srv; struct rtrs_path *ss = &srv_path->s; - int i, mri, err, mrs_num; + int i, err, mrs_num; unsigned int chunk_bits; int chunks_per_mr = 1; + struct ib_mr *mr; + struct sg_table *sgt; /* * Here we map queue_depth chunks to MR. Firstly we have to @@ -586,16 +588,14 @@ static int map_cont_bufs(struct rtrs_srv_path *srv_path) if (!srv_path->mrs) return -ENOMEM; - srv_path->mrs_num = mrs_num; - - for (mri = 0; mri < mrs_num; mri++) { - struct rtrs_srv_mr *srv_mr = &srv_path->mrs[mri]; - struct sg_table *sgt = &srv_mr->sgt; + for (srv_path->mrs_num = 0; srv_path->mrs_num < mrs_num; + srv_path->mrs_num++) { + struct rtrs_srv_mr *srv_mr = &srv_path->mrs[srv_path->mrs_num]; struct scatterlist *s; - struct ib_mr *mr; int nr, nr_sgt, chunks; - chunks = chunks_per_mr * mri; + sgt = &srv_mr->sgt; + chunks = chunks_per_mr * srv_path->mrs_num; if (!always_invalidate) chunks_per_mr = min_t(int, chunks_per_mr, srv->queue_depth - chunks); @@ -622,7 +622,7 @@ static int map_cont_bufs(struct rtrs_srv_path *srv_path) } nr = ib_map_mr_sg(mr, sgt->sgl, nr_sgt, NULL, max_chunk_size); - if (nr < 0 || nr < sgt->nents) { + if (nr != nr_sgt) { err = nr < 0 ? nr : -EINVAL; goto dereg_mr; } @@ -644,31 +644,24 @@ static int map_cont_bufs(struct rtrs_srv_path *srv_path) ib_update_fast_reg_key(mr, ib_inc_rkey(mr->rkey)); srv_mr->mr = mr; - - continue; -err: - while (mri--) { - srv_mr = &srv_path->mrs[mri]; - sgt = &srv_mr->sgt; - mr = srv_mr->mr; - rtrs_iu_free(srv_mr->iu, srv_path->s.dev->ib_dev, 1); -dereg_mr: - ib_dereg_mr(mr); -unmap_sg: - ib_dma_unmap_sg(srv_path->s.dev->ib_dev, sgt->sgl, - sgt->nents, DMA_BIDIRECTIONAL); -free_sg: - sg_free_table(sgt); - } - kfree(srv_path->mrs); - - return err; } chunk_bits = ilog2(srv->queue_depth - 1) + 1; srv_path->mem_bits = (MAX_IMM_PAYL_BITS - chunk_bits); return 0; + +dereg_mr: + ib_dereg_mr(mr); +unmap_sg: + ib_dma_unmap_sg(srv_path->s.dev->ib_dev, sgt->sgl, + sgt->nents, DMA_BIDIRECTIONAL); +free_sg: + sg_free_table(sgt); +err: + unmap_cont_bufs(srv_path); + + return err; } static void rtrs_srv_hb_err_handler(struct rtrs_con *c) @@ -1678,12 +1671,6 @@ static int create_con(struct rtrs_srv_path *srv_path, srv->queue_depth * (1 + 2) + 1); max_recv_wr = srv->queue_depth + 1; - /* - * If we have all receive requests posted and - * all write requests posted and each read request - * requires an invalidate request + drain - * and qp gets into error state. - */ } cq_num = max_send_wr + max_recv_wr; atomic_set(&con->c.sq_wr_avail, max_send_wr); @@ -1950,22 +1937,21 @@ static int rtrs_srv_rdma_cm_handler(struct rdma_cm_id *cm_id, { struct rtrs_srv_path *srv_path = NULL; struct rtrs_path *s = NULL; + struct rtrs_con *c = NULL; - if (ev->event != RDMA_CM_EVENT_CONNECT_REQUEST) { - struct rtrs_con *c = cm_id->context; - - s = c->path; - srv_path = to_srv_path(s); - } - - switch (ev->event) { - case RDMA_CM_EVENT_CONNECT_REQUEST: + if (ev->event == RDMA_CM_EVENT_CONNECT_REQUEST) /* * In case of error cma.c will destroy cm_id, * see cma_process_remove() */ return rtrs_rdma_connect(cm_id, ev->param.conn.private_data, ev->param.conn.private_data_len); + + c = cm_id->context; + s = c->path; + srv_path = to_srv_path(s); + + switch (ev->event) { case RDMA_CM_EVENT_ESTABLISHED: /* Nothing here */ break; diff --git a/drivers/infiniband/ulp/rtrs/rtrs.c b/drivers/infiniband/ulp/rtrs/rtrs.c index ed324b47d93ae48017c34a80ab70d0b609adfbc7..4bf9d868cc522bd8b7631f0f28aba9199385800c 100644 --- a/drivers/infiniband/ulp/rtrs/rtrs.c +++ b/drivers/infiniband/ulp/rtrs/rtrs.c @@ -557,7 +557,6 @@ EXPORT_SYMBOL(rtrs_addr_to_sockaddr); void rtrs_rdma_dev_pd_init(enum ib_pd_flags pd_flags, struct rtrs_rdma_dev_pd *pool) { - WARN_ON(pool->ops && (!pool->ops->alloc ^ !pool->ops->free)); INIT_LIST_HEAD(&pool->list); mutex_init(&pool->mutex); pool->pd_flags = pd_flags; @@ -583,15 +582,8 @@ static void dev_free(struct kref *ref) list_del(&dev->entry); mutex_unlock(&pool->mutex); - if (pool->ops && pool->ops->deinit) - pool->ops->deinit(dev); - ib_dealloc_pd(dev->ib_pd); - - if (pool->ops && pool->ops->free) - pool->ops->free(dev); - else - kfree(dev); + kfree(dev); } int rtrs_ib_dev_put(struct rtrs_ib_dev *dev) @@ -618,11 +610,8 @@ rtrs_ib_dev_find_or_add(struct ib_device *ib_dev, goto out_unlock; } mutex_unlock(&pool->mutex); - if (pool->ops && pool->ops->alloc) - dev = pool->ops->alloc(); - else - dev = kzalloc(sizeof(*dev), GFP_KERNEL); - if (IS_ERR_OR_NULL(dev)) + dev = kzalloc(sizeof(*dev), GFP_KERNEL); + if (!dev) goto out_err; kref_init(&dev->ref); @@ -644,10 +633,7 @@ out_unlock: out_free_pd: ib_dealloc_pd(dev->ib_pd); out_free_dev: - if (pool->ops && pool->ops->free) - pool->ops->free(dev); - else - kfree(dev); + kfree(dev); out_err: return NULL; } diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c index 1075c2ac8fe209deb703a97f38e5593791cfefe9..b4d6a4a5ae81e6e33e6b87b0a250c923c8b00b9e 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.c +++ b/drivers/infiniband/ulp/srp/ib_srp.c @@ -3410,7 +3410,8 @@ static int srp_parse_options(struct net *net, const char *buf, break; case SRP_OPT_PKEY: - if (match_hex(args, &token)) { + ret = match_hex(args, &token); + if (ret) { pr_warn("bad P_Key parameter '%s'\n", p); goto out; } @@ -3470,7 +3471,8 @@ static int srp_parse_options(struct net *net, const char *buf, break; case SRP_OPT_MAX_SECT: - if (match_int(args, &token)) { + ret = match_int(args, &token); + if (ret) { pr_warn("bad max sect parameter '%s'\n", p); goto out; } @@ -3478,8 +3480,15 @@ static int srp_parse_options(struct net *net, const char *buf, break; case SRP_OPT_QUEUE_SIZE: - if (match_int(args, &token) || token < 1) { + ret = match_int(args, &token); + if (ret) { + pr_warn("match_int() failed for queue_size parameter '%s', Error %d\n", + p, ret); + goto out; + } + if (token < 1) { pr_warn("bad queue_size parameter '%s'\n", p); + ret = -EINVAL; goto out; } target->scsi_host->can_queue = token; @@ -3490,25 +3499,40 @@ static int srp_parse_options(struct net *net, const char *buf, break; case SRP_OPT_MAX_CMD_PER_LUN: - if (match_int(args, &token) || token < 1) { + ret = match_int(args, &token); + if (ret) { + pr_warn("match_int() failed for max cmd_per_lun parameter '%s', Error %d\n", + p, ret); + goto out; + } + if (token < 1) { pr_warn("bad max cmd_per_lun parameter '%s'\n", p); + ret = -EINVAL; goto out; } target->scsi_host->cmd_per_lun = token; break; case SRP_OPT_TARGET_CAN_QUEUE: - if (match_int(args, &token) || token < 1) { + ret = match_int(args, &token); + if (ret) { + pr_warn("match_int() failed for max target_can_queue parameter '%s', Error %d\n", + p, ret); + goto out; + } + if (token < 1) { pr_warn("bad max target_can_queue parameter '%s'\n", p); + ret = -EINVAL; goto out; } target->target_can_queue = token; break; case SRP_OPT_IO_CLASS: - if (match_hex(args, &token)) { + ret = match_hex(args, &token); + if (ret) { pr_warn("bad IO class parameter '%s'\n", p); goto out; } @@ -3517,6 +3541,7 @@ static int srp_parse_options(struct net *net, const char *buf, pr_warn("unknown IO class parameter value %x specified (use %x or %x).\n", token, SRP_REV10_IB_IO_CLASS, SRP_REV16A_IB_IO_CLASS); + ret = -EINVAL; goto out; } target->io_class = token; @@ -3539,16 +3564,24 @@ static int srp_parse_options(struct net *net, const char *buf, break; case SRP_OPT_CMD_SG_ENTRIES: - if (match_int(args, &token) || token < 1 || token > 255) { + ret = match_int(args, &token); + if (ret) { + pr_warn("match_int() failed for max cmd_sg_entries parameter '%s', Error %d\n", + p, ret); + goto out; + } + if (token < 1 || token > 255) { pr_warn("bad max cmd_sg_entries parameter '%s'\n", p); + ret = -EINVAL; goto out; } target->cmd_sg_cnt = token; break; case SRP_OPT_ALLOW_EXT_SG: - if (match_int(args, &token)) { + ret = match_int(args, &token); + if (ret) { pr_warn("bad allow_ext_sg parameter '%s'\n", p); goto out; } @@ -3556,43 +3589,77 @@ static int srp_parse_options(struct net *net, const char *buf, break; case SRP_OPT_SG_TABLESIZE: - if (match_int(args, &token) || token < 1 || - token > SG_MAX_SEGMENTS) { + ret = match_int(args, &token); + if (ret) { + pr_warn("match_int() failed for max sg_tablesize parameter '%s', Error %d\n", + p, ret); + goto out; + } + if (token < 1 || token > SG_MAX_SEGMENTS) { pr_warn("bad max sg_tablesize parameter '%s'\n", p); + ret = -EINVAL; goto out; } target->sg_tablesize = token; break; case SRP_OPT_COMP_VECTOR: - if (match_int(args, &token) || token < 0) { + ret = match_int(args, &token); + if (ret) { + pr_warn("match_int() failed for comp_vector parameter '%s', Error %d\n", + p, ret); + goto out; + } + if (token < 0) { pr_warn("bad comp_vector parameter '%s'\n", p); + ret = -EINVAL; goto out; } target->comp_vector = token; break; case SRP_OPT_TL_RETRY_COUNT: - if (match_int(args, &token) || token < 2 || token > 7) { + ret = match_int(args, &token); + if (ret) { + pr_warn("match_int() failed for tl_retry_count parameter '%s', Error %d\n", + p, ret); + goto out; + } + if (token < 2 || token > 7) { pr_warn("bad tl_retry_count parameter '%s' (must be a number between 2 and 7)\n", p); + ret = -EINVAL; goto out; } target->tl_retry_count = token; break; case SRP_OPT_MAX_IT_IU_SIZE: - if (match_int(args, &token) || token < 0) { + ret = match_int(args, &token); + if (ret) { + pr_warn("match_int() failed for max it_iu_size parameter '%s', Error %d\n", + p, ret); + goto out; + } + if (token < 0) { pr_warn("bad maximum initiator to target IU size '%s'\n", p); + ret = -EINVAL; goto out; } target->max_it_iu_size = token; break; case SRP_OPT_CH_COUNT: - if (match_int(args, &token) || token < 1) { + ret = match_int(args, &token); + if (ret) { + pr_warn("match_int() failed for channel count parameter '%s', Error %d\n", + p, ret); + goto out; + } + if (token < 1) { pr_warn("bad channel count %s\n", p); + ret = -EINVAL; goto out; } target->ch_count = token; @@ -3601,6 +3668,7 @@ static int srp_parse_options(struct net *net, const char *buf, default: pr_warn("unknown parameter or missing value '%s' in target creation request\n", p); + ret = -EINVAL; goto out; } } diff --git a/drivers/input/input.c b/drivers/input/input.c index 783961df362624bbf6be56a2a1e178115847e2a4..ca2e3dd7188bb9461b7916ce159902efacef1851 100644 --- a/drivers/input/input.c +++ b/drivers/input/input.c @@ -1914,7 +1914,7 @@ static const struct device_type input_dev_type = { #endif }; -static char *input_devnode(struct device *dev, umode_t *mode) +static char *input_devnode(const struct device *dev, umode_t *mode) { return kasprintf(GFP_KERNEL, "input/%s", dev_name(dev)); } diff --git a/drivers/interconnect/qcom/icc-rpm.c b/drivers/interconnect/qcom/icc-rpm.c index 39e43b9575998d1dc07ce881a66affb9120b4e5a..ba6781f54ab73a6d37de76272a694161710e1ac2 100644 --- a/drivers/interconnect/qcom/icc-rpm.c +++ b/drivers/interconnect/qcom/icc-rpm.c @@ -477,11 +477,8 @@ int qnoc_probe(struct platform_device *pdev) } mmio = devm_ioremap_resource(dev, res); - - if (IS_ERR(mmio)) { - dev_err(dev, "Cannot ioremap interconnect bus resource\n"); + if (IS_ERR(mmio)) return PTR_ERR(mmio); - } qp->regmap = devm_regmap_init_mmio(dev, mmio, desc->regmap_cfg); if (IS_ERR(qp->regmap)) { diff --git a/drivers/interconnect/qcom/osm-l3.c b/drivers/interconnect/qcom/osm-l3.c index ddbdf0943f94e7181deb7af0a41d36690a8d9439..5fa1710874258926ac8c31a7b8b70e0880a538d8 100644 --- a/drivers/interconnect/qcom/osm-l3.c +++ b/drivers/interconnect/qcom/osm-l3.c @@ -34,6 +34,7 @@ /* EPSS Register offsets */ #define EPSS_LUT_ROW_SIZE 4 +#define EPSS_REG_L3_VOTE 0x90 #define EPSS_REG_FREQ_LUT 0x100 #define EPSS_REG_PERF_STATE 0x320 @@ -74,6 +75,11 @@ struct qcom_osm_l3_desc { unsigned int reg_perf_state; }; +enum { + OSM_L3_MASTER_NODE = 10000, + OSM_L3_SLAVE_NODE, +}; + #define DEFINE_QNODE(_name, _id, _buswidth, ...) \ static const struct qcom_osm_l3_node _name = { \ .name = #_name, \ @@ -83,100 +89,44 @@ struct qcom_osm_l3_desc { .links = { __VA_ARGS__ }, \ } -DEFINE_QNODE(sdm845_osm_apps_l3, SDM845_MASTER_OSM_L3_APPS, 16, SDM845_SLAVE_OSM_L3); -DEFINE_QNODE(sdm845_osm_l3, SDM845_SLAVE_OSM_L3, 16); - -static const struct qcom_osm_l3_node * const sdm845_osm_l3_nodes[] = { - [MASTER_OSM_L3_APPS] = &sdm845_osm_apps_l3, - [SLAVE_OSM_L3] = &sdm845_osm_l3, -}; +DEFINE_QNODE(osm_l3_master, OSM_L3_MASTER_NODE, 16, OSM_L3_SLAVE_NODE); +DEFINE_QNODE(osm_l3_slave, OSM_L3_SLAVE_NODE, 16); -static const struct qcom_osm_l3_desc sdm845_icc_osm_l3 = { - .nodes = sdm845_osm_l3_nodes, - .num_nodes = ARRAY_SIZE(sdm845_osm_l3_nodes), - .lut_row_size = OSM_LUT_ROW_SIZE, - .reg_freq_lut = OSM_REG_FREQ_LUT, - .reg_perf_state = OSM_REG_PERF_STATE, +static const struct qcom_osm_l3_node * const osm_l3_nodes[] = { + [MASTER_OSM_L3_APPS] = &osm_l3_master, + [SLAVE_OSM_L3] = &osm_l3_slave, }; -DEFINE_QNODE(sc7180_osm_apps_l3, SC7180_MASTER_OSM_L3_APPS, 16, SC7180_SLAVE_OSM_L3); -DEFINE_QNODE(sc7180_osm_l3, SC7180_SLAVE_OSM_L3, 16); +DEFINE_QNODE(epss_l3_master, OSM_L3_MASTER_NODE, 32, OSM_L3_SLAVE_NODE); +DEFINE_QNODE(epss_l3_slave, OSM_L3_SLAVE_NODE, 32); -static const struct qcom_osm_l3_node * const sc7180_osm_l3_nodes[] = { - [MASTER_OSM_L3_APPS] = &sc7180_osm_apps_l3, - [SLAVE_OSM_L3] = &sc7180_osm_l3, +static const struct qcom_osm_l3_node * const epss_l3_nodes[] = { + [MASTER_EPSS_L3_APPS] = &epss_l3_master, + [SLAVE_EPSS_L3_SHARED] = &epss_l3_slave, }; -static const struct qcom_osm_l3_desc sc7180_icc_osm_l3 = { - .nodes = sc7180_osm_l3_nodes, - .num_nodes = ARRAY_SIZE(sc7180_osm_l3_nodes), +static const struct qcom_osm_l3_desc osm_l3 = { + .nodes = osm_l3_nodes, + .num_nodes = ARRAY_SIZE(osm_l3_nodes), .lut_row_size = OSM_LUT_ROW_SIZE, .reg_freq_lut = OSM_REG_FREQ_LUT, .reg_perf_state = OSM_REG_PERF_STATE, }; -DEFINE_QNODE(sc7280_epss_apps_l3, SC7280_MASTER_EPSS_L3_APPS, 32, SC7280_SLAVE_EPSS_L3); -DEFINE_QNODE(sc7280_epss_l3, SC7280_SLAVE_EPSS_L3, 32); - -static const struct qcom_osm_l3_node * const sc7280_epss_l3_nodes[] = { - [MASTER_EPSS_L3_APPS] = &sc7280_epss_apps_l3, - [SLAVE_EPSS_L3_SHARED] = &sc7280_epss_l3, -}; - -static const struct qcom_osm_l3_desc sc7280_icc_epss_l3 = { - .nodes = sc7280_epss_l3_nodes, - .num_nodes = ARRAY_SIZE(sc7280_epss_l3_nodes), +static const struct qcom_osm_l3_desc epss_l3_perf_state = { + .nodes = epss_l3_nodes, + .num_nodes = ARRAY_SIZE(epss_l3_nodes), .lut_row_size = EPSS_LUT_ROW_SIZE, .reg_freq_lut = EPSS_REG_FREQ_LUT, .reg_perf_state = EPSS_REG_PERF_STATE, }; -DEFINE_QNODE(sc8180x_osm_apps_l3, SC8180X_MASTER_OSM_L3_APPS, 32, SC8180X_SLAVE_OSM_L3); -DEFINE_QNODE(sc8180x_osm_l3, SC8180X_SLAVE_OSM_L3, 32); - -static const struct qcom_osm_l3_node * const sc8180x_osm_l3_nodes[] = { - [MASTER_OSM_L3_APPS] = &sc8180x_osm_apps_l3, - [SLAVE_OSM_L3] = &sc8180x_osm_l3, -}; - -static const struct qcom_osm_l3_desc sc8180x_icc_osm_l3 = { - .nodes = sc8180x_osm_l3_nodes, - .num_nodes = ARRAY_SIZE(sc8180x_osm_l3_nodes), - .lut_row_size = OSM_LUT_ROW_SIZE, - .reg_freq_lut = OSM_REG_FREQ_LUT, - .reg_perf_state = OSM_REG_PERF_STATE, -}; - -DEFINE_QNODE(sm8150_osm_apps_l3, SM8150_MASTER_OSM_L3_APPS, 32, SM8150_SLAVE_OSM_L3); -DEFINE_QNODE(sm8150_osm_l3, SM8150_SLAVE_OSM_L3, 32); - -static const struct qcom_osm_l3_node * const sm8150_osm_l3_nodes[] = { - [MASTER_OSM_L3_APPS] = &sm8150_osm_apps_l3, - [SLAVE_OSM_L3] = &sm8150_osm_l3, -}; - -static const struct qcom_osm_l3_desc sm8150_icc_osm_l3 = { - .nodes = sm8150_osm_l3_nodes, - .num_nodes = ARRAY_SIZE(sm8150_osm_l3_nodes), - .lut_row_size = OSM_LUT_ROW_SIZE, - .reg_freq_lut = OSM_REG_FREQ_LUT, - .reg_perf_state = OSM_REG_PERF_STATE, -}; - -DEFINE_QNODE(sm8250_epss_apps_l3, SM8250_MASTER_EPSS_L3_APPS, 32, SM8250_SLAVE_EPSS_L3); -DEFINE_QNODE(sm8250_epss_l3, SM8250_SLAVE_EPSS_L3, 32); - -static const struct qcom_osm_l3_node * const sm8250_epss_l3_nodes[] = { - [MASTER_EPSS_L3_APPS] = &sm8250_epss_apps_l3, - [SLAVE_EPSS_L3_SHARED] = &sm8250_epss_l3, -}; - -static const struct qcom_osm_l3_desc sm8250_icc_epss_l3 = { - .nodes = sm8250_epss_l3_nodes, - .num_nodes = ARRAY_SIZE(sm8250_epss_l3_nodes), +static const struct qcom_osm_l3_desc epss_l3_l3_vote = { + .nodes = epss_l3_nodes, + .num_nodes = ARRAY_SIZE(epss_l3_nodes), .lut_row_size = EPSS_LUT_ROW_SIZE, .reg_freq_lut = EPSS_REG_FREQ_LUT, - .reg_perf_state = EPSS_REG_PERF_STATE, + .reg_perf_state = EPSS_REG_L3_VOTE, }; static int qcom_osm_l3_set(struct icc_node *src, struct icc_node *dst) @@ -184,22 +134,14 @@ static int qcom_osm_l3_set(struct icc_node *src, struct icc_node *dst) struct qcom_osm_l3_icc_provider *qp; struct icc_provider *provider; const struct qcom_osm_l3_node *qn; - struct icc_node *n; unsigned int index; - u32 agg_peak = 0; - u32 agg_avg = 0; u64 rate; qn = src->data; provider = src->provider; qp = to_osm_l3_provider(provider); - list_for_each_entry(n, &provider->nodes, node_list) - provider->aggregate(n, 0, n->avg_bw, n->peak_bw, - &agg_avg, &agg_peak); - - rate = max(agg_avg, agg_peak); - rate = icc_units_to_bps(rate); + rate = icc_units_to_bps(dst->peak_bw); do_div(rate, qn->buswidth); for (index = 0; index < qp->max_state - 1; index++) { @@ -344,12 +286,14 @@ err: } static const struct of_device_id osm_l3_of_match[] = { - { .compatible = "qcom,sc7180-osm-l3", .data = &sc7180_icc_osm_l3 }, - { .compatible = "qcom,sc7280-epss-l3", .data = &sc7280_icc_epss_l3 }, - { .compatible = "qcom,sdm845-osm-l3", .data = &sdm845_icc_osm_l3 }, - { .compatible = "qcom,sm8150-osm-l3", .data = &sm8150_icc_osm_l3 }, - { .compatible = "qcom,sc8180x-osm-l3", .data = &sc8180x_icc_osm_l3 }, - { .compatible = "qcom,sm8250-epss-l3", .data = &sm8250_icc_epss_l3 }, + { .compatible = "qcom,epss-l3", .data = &epss_l3_l3_vote }, + { .compatible = "qcom,osm-l3", .data = &osm_l3 }, + { .compatible = "qcom,sc7180-osm-l3", .data = &osm_l3 }, + { .compatible = "qcom,sc7280-epss-l3", .data = &epss_l3_perf_state }, + { .compatible = "qcom,sdm845-osm-l3", .data = &osm_l3 }, + { .compatible = "qcom,sm8150-osm-l3", .data = &osm_l3 }, + { .compatible = "qcom,sc8180x-osm-l3", .data = &osm_l3 }, + { .compatible = "qcom,sm8250-epss-l3", .data = &epss_l3_perf_state }, { } }; MODULE_DEVICE_TABLE(of, osm_l3_of_match); diff --git a/drivers/interconnect/qcom/sc7180.c b/drivers/interconnect/qcom/sc7180.c index 35cd448efdfbec401a7c7bf92332096ec894f796..ef4e13fb49831df20aef4b589333a1c4686df0cb 100644 --- a/drivers/interconnect/qcom/sc7180.c +++ b/drivers/interconnect/qcom/sc7180.c @@ -369,7 +369,7 @@ static const struct qcom_icc_desc sc7180_gem_noc = { .num_bcms = ARRAY_SIZE(gem_noc_bcms), }; -static struct qcom_icc_bcm *mc_virt_bcms[] = { +static struct qcom_icc_bcm * const mc_virt_bcms[] = { &bcm_acv, &bcm_mc0, }; @@ -443,7 +443,7 @@ static struct qcom_icc_node * const qup_virt_nodes[] = { [SLAVE_QUP_CORE_1] = &qup_core_slave_2, }; -static const struct qcom_icc_desc sc7180_qup_virt = { +static const struct qcom_icc_desc sc7180_qup_virt = { .nodes = qup_virt_nodes, .num_nodes = ARRAY_SIZE(qup_virt_nodes), .bcms = qup_virt_bcms, diff --git a/drivers/interconnect/qcom/sc8180x.c b/drivers/interconnect/qcom/sc8180x.c index 8e32ca958824c70e6e15c1e140be7488fa068a98..0f515bf10bd71e4b9e3a62580c8ed85d82561bfb 100644 --- a/drivers/interconnect/qcom/sc8180x.c +++ b/drivers/interconnect/qcom/sc8180x.c @@ -1889,7 +1889,7 @@ static struct qcom_icc_bcm * const qup_virt_bcms[] = { &bcm_qup0, }; -static struct qcom_icc_node *qup_virt_nodes[] = { +static struct qcom_icc_node * const qup_virt_nodes[] = { [MASTER_QUP_CORE_0] = &mas_qup_core_0, [MASTER_QUP_CORE_1] = &mas_qup_core_1, [MASTER_QUP_CORE_2] = &mas_qup_core_2, diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig index 453da65262af40769131a8f801f5a56f2a4d94b5..79707685d54a4f8f038f04d6d267d27c199e7ab9 100644 --- a/drivers/iommu/Kconfig +++ b/drivers/iommu/Kconfig @@ -188,6 +188,7 @@ config MSM_IOMMU source "drivers/iommu/amd/Kconfig" source "drivers/iommu/intel/Kconfig" +source "drivers/iommu/iommufd/Kconfig" config IRQ_REMAP bool "Support for Interrupt Remapping" diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile index cc9f381013c35fd1fcc44a9f280dad8cdf48730b..f461d065138564e4bacad8bd882a148742067cd3 100644 --- a/drivers/iommu/Makefile +++ b/drivers/iommu/Makefile @@ -1,5 +1,5 @@ # SPDX-License-Identifier: GPL-2.0 -obj-y += amd/ intel/ arm/ +obj-y += amd/ intel/ arm/ iommufd/ obj-$(CONFIG_IOMMU_API) += iommu.o obj-$(CONFIG_IOMMU_API) += iommu-traces.o obj-$(CONFIG_IOMMU_API) += iommu-sysfs.o @@ -28,6 +28,6 @@ obj-$(CONFIG_FSL_PAMU) += fsl_pamu.o fsl_pamu_domain.o obj-$(CONFIG_S390_IOMMU) += s390-iommu.o obj-$(CONFIG_HYPERV_IOMMU) += hyperv-iommu.o obj-$(CONFIG_VIRTIO_IOMMU) += virtio-iommu.o -obj-$(CONFIG_IOMMU_SVA) += iommu-sva-lib.o io-pgfault.o +obj-$(CONFIG_IOMMU_SVA) += iommu-sva.o io-pgfault.o obj-$(CONFIG_SPRD_IOMMU) += sprd-iommu.o obj-$(CONFIG_APPLE_DART) += apple-dart.o diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c index 1a2d425bf5687fffb44e419502ae7b8025687278..467b194975b30fe4a33a7adce186e5f4a04824e6 100644 --- a/drivers/iommu/amd/init.c +++ b/drivers/iommu/amd/init.c @@ -85,7 +85,7 @@ #define LOOP_TIMEOUT 2000000 -#define IVRS_GET_SBDF_ID(seg, bus, dev, fd) (((seg & 0xffff) << 16) | ((bus & 0xff) << 8) \ +#define IVRS_GET_SBDF_ID(seg, bus, dev, fn) (((seg & 0xffff) << 16) | ((bus & 0xff) << 8) \ | ((dev & 0x1f) << 3) | (fn & 0x7)) /* @@ -3402,18 +3402,24 @@ static int __init parse_amd_iommu_options(char *str) static int __init parse_ivrs_ioapic(char *str) { u32 seg = 0, bus, dev, fn; - int ret, id, i; + int id, i; u32 devid; - ret = sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn); - if (ret != 4) { - ret = sscanf(str, "[%d]=%x:%x:%x.%x", &id, &seg, &bus, &dev, &fn); - if (ret != 5) { - pr_err("Invalid command line: ivrs_ioapic%s\n", str); - return 1; - } + if (sscanf(str, "=%d@%x:%x.%x", &id, &bus, &dev, &fn) == 4 || + sscanf(str, "=%d@%x:%x:%x.%x", &id, &seg, &bus, &dev, &fn) == 5) + goto found; + + if (sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn) == 4 || + sscanf(str, "[%d]=%x:%x:%x.%x", &id, &seg, &bus, &dev, &fn) == 5) { + pr_warn("ivrs_ioapic%s option format deprecated; use ivrs_ioapic=%d@%04x:%02x:%02x.%d instead\n", + str, id, seg, bus, dev, fn); + goto found; } + pr_err("Invalid command line: ivrs_ioapic%s\n", str); + return 1; + +found: if (early_ioapic_map_size == EARLY_MAP_SIZE) { pr_err("Early IOAPIC map overflow - ignoring ivrs_ioapic%s\n", str); @@ -3434,18 +3440,24 @@ static int __init parse_ivrs_ioapic(char *str) static int __init parse_ivrs_hpet(char *str) { u32 seg = 0, bus, dev, fn; - int ret, id, i; + int id, i; u32 devid; - ret = sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn); - if (ret != 4) { - ret = sscanf(str, "[%d]=%x:%x:%x.%x", &id, &seg, &bus, &dev, &fn); - if (ret != 5) { - pr_err("Invalid command line: ivrs_hpet%s\n", str); - return 1; - } + if (sscanf(str, "=%d@%x:%x.%x", &id, &bus, &dev, &fn) == 4 || + sscanf(str, "=%d@%x:%x:%x.%x", &id, &seg, &bus, &dev, &fn) == 5) + goto found; + + if (sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn) == 4 || + sscanf(str, "[%d]=%x:%x:%x.%x", &id, &seg, &bus, &dev, &fn) == 5) { + pr_warn("ivrs_hpet%s option format deprecated; use ivrs_hpet=%d@%04x:%02x:%02x.%d instead\n", + str, id, seg, bus, dev, fn); + goto found; } + pr_err("Invalid command line: ivrs_hpet%s\n", str); + return 1; + +found: if (early_hpet_map_size == EARLY_MAP_SIZE) { pr_err("Early HPET map overflow - ignoring ivrs_hpet%s\n", str); @@ -3466,19 +3478,36 @@ static int __init parse_ivrs_hpet(char *str) static int __init parse_ivrs_acpihid(char *str) { u32 seg = 0, bus, dev, fn; - char *hid, *uid, *p; + char *hid, *uid, *p, *addr; char acpiid[ACPIHID_UID_LEN + ACPIHID_HID_LEN] = {0}; - int ret, i; - - ret = sscanf(str, "[%x:%x.%x]=%s", &bus, &dev, &fn, acpiid); - if (ret != 4) { - ret = sscanf(str, "[%x:%x:%x.%x]=%s", &seg, &bus, &dev, &fn, acpiid); - if (ret != 5) { - pr_err("Invalid command line: ivrs_acpihid(%s)\n", str); - return 1; + int i; + + addr = strchr(str, '@'); + if (!addr) { + if (sscanf(str, "[%x:%x.%x]=%s", &bus, &dev, &fn, acpiid) == 4 || + sscanf(str, "[%x:%x:%x.%x]=%s", &seg, &bus, &dev, &fn, acpiid) == 5) { + pr_warn("ivrs_acpihid%s option format deprecated; use ivrs_acpihid=%s@%04x:%02x:%02x.%d instead\n", + str, acpiid, seg, bus, dev, fn); + goto found; } + goto not_found; } + /* We have the '@', make it the terminator to get just the acpiid */ + *addr++ = 0; + + if (sscanf(str, "=%s", acpiid) != 1) + goto not_found; + + if (sscanf(addr, "%x:%x.%x", &bus, &dev, &fn) == 3 || + sscanf(addr, "%x:%x:%x.%x", &seg, &bus, &dev, &fn) == 4) + goto found; + +not_found: + pr_err("Invalid command line: ivrs_acpihid%s\n", str); + return 1; + +found: p = acpiid; hid = strsep(&p, ":"); uid = p; @@ -3488,6 +3517,13 @@ static int __init parse_ivrs_acpihid(char *str) return 1; } + /* + * Ignore leading zeroes after ':', so e.g., AMDI0095:00 + * will match AMDI0095:0 in the second strcmp in acpi_dev_hid_uid_match + */ + while (*uid == '0' && *(uid + 1)) + uid++; + i = early_acpihid_map_size++; memcpy(early_acpihid_map[i].hid, hid, strlen(hid)); memcpy(early_acpihid_map[i].uid, uid, strlen(uid)); diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c index 4d28967f910d96d40236c0d76f7e658d9146e2ca..cbeaab55c0dbccb3e3c4e0ad86839f6cd52a137d 100644 --- a/drivers/iommu/amd/iommu.c +++ b/drivers/iommu/amd/iommu.c @@ -767,7 +767,7 @@ EXPORT_SYMBOL(amd_iommu_register_ga_log_notifier); static void iommu_poll_ga_log(struct amd_iommu *iommu) { - u32 head, tail, cnt = 0; + u32 head, tail; if (iommu->ga_log == NULL) return; @@ -780,7 +780,6 @@ static void iommu_poll_ga_log(struct amd_iommu *iommu) u64 log_entry; raw = (u64 *)(iommu->ga_log + head); - cnt++; /* Avoid memcpy function-call overhead */ log_entry = *raw; @@ -2155,21 +2154,13 @@ static void amd_iommu_detach_device(struct iommu_domain *dom, static int amd_iommu_attach_device(struct iommu_domain *dom, struct device *dev) { + struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev); struct protection_domain *domain = to_pdomain(dom); - struct iommu_dev_data *dev_data; - struct amd_iommu *iommu; + struct amd_iommu *iommu = rlookup_amd_iommu(dev); int ret; - if (!check_device(dev)) - return -EINVAL; - - dev_data = dev_iommu_priv_get(dev); dev_data->defer_attach = false; - iommu = rlookup_amd_iommu(dev); - if (!iommu) - return -EINVAL; - if (dev_data->domain) detach_device(dev); @@ -2286,6 +2277,8 @@ static bool amd_iommu_capable(struct device *dev, enum iommu_cap cap) return false; case IOMMU_CAP_PRE_BOOT_PROTECTION: return amdr_ivrs_remap_support; + case IOMMU_CAP_ENFORCE_CACHE_COHERENCY: + return true; default: break; } diff --git a/drivers/iommu/amd/iommu_v2.c b/drivers/iommu/amd/iommu_v2.c index 6a1f02c62dffccd32cdd435f96ba322a7da42a44..864e4ffb6aa94ecb154b76c861c4eda3fdd11ec4 100644 --- a/drivers/iommu/amd/iommu_v2.c +++ b/drivers/iommu/amd/iommu_v2.c @@ -587,6 +587,7 @@ out_drop_state: put_device_state(dev_state); out: + pci_dev_put(pdev); return ret; } @@ -639,7 +640,9 @@ int amd_iommu_bind_pasid(struct pci_dev *pdev, u32 pasid, if (pasid_state->mm == NULL) goto out_free; - mmu_notifier_register(&pasid_state->mn, mm); + ret = mmu_notifier_register(&pasid_state->mn, mm); + if (ret) + goto out_free; ret = set_pasid_state(dev_state, pasid_state, pasid); if (ret) diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c index 5968a568aae2afea4d8358c8ecfbb7c69dfaa296..a5a63b1c947eb1daaec72db4ba7c6ca6f5d5c6a4 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c @@ -10,7 +10,7 @@ #include #include "arm-smmu-v3.h" -#include "../../iommu-sva-lib.h" +#include "../../iommu-sva.h" #include "../../io-pgtable-arm.h" struct arm_smmu_mmu_notifier { @@ -344,11 +344,6 @@ __arm_smmu_sva_bind(struct device *dev, struct mm_struct *mm) if (!bond) return ERR_PTR(-ENOMEM); - /* Allocate a PASID for this mm if necessary */ - ret = iommu_sva_alloc_pasid(mm, 1, (1U << master->ssid_bits) - 1); - if (ret) - goto err_free_bond; - bond->mm = mm; bond->sva.dev = dev; refcount_set(&bond->refs, 1); @@ -367,42 +362,6 @@ err_free_bond: return ERR_PTR(ret); } -struct iommu_sva * -arm_smmu_sva_bind(struct device *dev, struct mm_struct *mm, void *drvdata) -{ - struct iommu_sva *handle; - struct iommu_domain *domain = iommu_get_domain_for_dev(dev); - struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); - - if (smmu_domain->stage != ARM_SMMU_DOMAIN_S1) - return ERR_PTR(-EINVAL); - - mutex_lock(&sva_lock); - handle = __arm_smmu_sva_bind(dev, mm); - mutex_unlock(&sva_lock); - return handle; -} - -void arm_smmu_sva_unbind(struct iommu_sva *handle) -{ - struct arm_smmu_bond *bond = sva_to_bond(handle); - - mutex_lock(&sva_lock); - if (refcount_dec_and_test(&bond->refs)) { - list_del(&bond->list); - arm_smmu_mmu_notifier_put(bond->smmu_mn); - kfree(bond); - } - mutex_unlock(&sva_lock); -} - -u32 arm_smmu_sva_get_pasid(struct iommu_sva *handle) -{ - struct arm_smmu_bond *bond = sva_to_bond(handle); - - return bond->mm->pasid; -} - bool arm_smmu_sva_supported(struct arm_smmu_device *smmu) { unsigned long reg, fld; @@ -550,3 +509,64 @@ void arm_smmu_sva_notifier_synchronize(void) */ mmu_notifier_synchronize(); } + +void arm_smmu_sva_remove_dev_pasid(struct iommu_domain *domain, + struct device *dev, ioasid_t id) +{ + struct mm_struct *mm = domain->mm; + struct arm_smmu_bond *bond = NULL, *t; + struct arm_smmu_master *master = dev_iommu_priv_get(dev); + + mutex_lock(&sva_lock); + list_for_each_entry(t, &master->bonds, list) { + if (t->mm == mm) { + bond = t; + break; + } + } + + if (!WARN_ON(!bond) && refcount_dec_and_test(&bond->refs)) { + list_del(&bond->list); + arm_smmu_mmu_notifier_put(bond->smmu_mn); + kfree(bond); + } + mutex_unlock(&sva_lock); +} + +static int arm_smmu_sva_set_dev_pasid(struct iommu_domain *domain, + struct device *dev, ioasid_t id) +{ + int ret = 0; + struct iommu_sva *handle; + struct mm_struct *mm = domain->mm; + + mutex_lock(&sva_lock); + handle = __arm_smmu_sva_bind(dev, mm); + if (IS_ERR(handle)) + ret = PTR_ERR(handle); + mutex_unlock(&sva_lock); + + return ret; +} + +static void arm_smmu_sva_domain_free(struct iommu_domain *domain) +{ + kfree(domain); +} + +static const struct iommu_domain_ops arm_smmu_sva_domain_ops = { + .set_dev_pasid = arm_smmu_sva_set_dev_pasid, + .free = arm_smmu_sva_domain_free +}; + +struct iommu_domain *arm_smmu_sva_domain_alloc(void) +{ + struct iommu_domain *domain; + + domain = kzalloc(sizeof(*domain), GFP_KERNEL); + if (!domain) + return NULL; + domain->ops = &arm_smmu_sva_domain_ops; + + return domain; +} diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c index 6d5df91c5c465a4314f6a8bc41ec969d3d84c910..ab160198edd6b1d861af9459632d7da020d18ca9 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c @@ -29,7 +29,7 @@ #include "arm-smmu-v3.h" #include "../../dma-iommu.h" -#include "../../iommu-sva-lib.h" +#include "../../iommu-sva.h" static bool disable_bypass = true; module_param(disable_bypass, bool, 0444); @@ -2009,6 +2009,9 @@ static struct iommu_domain *arm_smmu_domain_alloc(unsigned type) { struct arm_smmu_domain *smmu_domain; + if (type == IOMMU_DOMAIN_SVA) + return arm_smmu_sva_domain_alloc(); + if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA && type != IOMMU_DOMAIN_DMA_FQ && @@ -2430,23 +2433,14 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) goto out_unlock; } } else if (smmu_domain->smmu != smmu) { - dev_err(dev, - "cannot attach to SMMU %s (upstream of %s)\n", - dev_name(smmu_domain->smmu->dev), - dev_name(smmu->dev)); - ret = -ENXIO; + ret = -EINVAL; goto out_unlock; } else if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1 && master->ssid_bits != smmu_domain->s1_cfg.s1cdmax) { - dev_err(dev, - "cannot attach to incompatible domain (%u SSID bits != %u)\n", - smmu_domain->s1_cfg.s1cdmax, master->ssid_bits); ret = -EINVAL; goto out_unlock; } else if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1 && smmu_domain->stall_enabled != master->stall_enabled) { - dev_err(dev, "cannot attach to stall-%s domain\n", - smmu_domain->stall_enabled ? "enabled" : "disabled"); ret = -EINVAL; goto out_unlock; } @@ -2838,6 +2832,17 @@ static int arm_smmu_def_domain_type(struct device *dev) return 0; } +static void arm_smmu_remove_dev_pasid(struct device *dev, ioasid_t pasid) +{ + struct iommu_domain *domain; + + domain = iommu_get_domain_for_dev_pasid(dev, pasid, IOMMU_DOMAIN_SVA); + if (WARN_ON(IS_ERR(domain)) || !domain) + return; + + arm_smmu_sva_remove_dev_pasid(domain, dev, pasid); +} + static struct iommu_ops arm_smmu_ops = { .capable = arm_smmu_capable, .domain_alloc = arm_smmu_domain_alloc, @@ -2846,11 +2851,9 @@ static struct iommu_ops arm_smmu_ops = { .device_group = arm_smmu_device_group, .of_xlate = arm_smmu_of_xlate, .get_resv_regions = arm_smmu_get_resv_regions, + .remove_dev_pasid = arm_smmu_remove_dev_pasid, .dev_enable_feat = arm_smmu_dev_enable_feature, .dev_disable_feat = arm_smmu_dev_disable_feature, - .sva_bind = arm_smmu_sva_bind, - .sva_unbind = arm_smmu_sva_unbind, - .sva_get_pasid = arm_smmu_sva_get_pasid, .page_response = arm_smmu_page_response, .def_domain_type = arm_smmu_def_domain_type, .pgsize_bitmap = -1UL, /* Restricted during device attach */ @@ -3543,6 +3546,7 @@ static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu) /* SID/SSID sizes */ smmu->ssid_bits = FIELD_GET(IDR1_SSIDSIZE, reg); smmu->sid_bits = FIELD_GET(IDR1_SIDSIZE, reg); + smmu->iommu.max_pasids = 1UL << smmu->ssid_bits; /* * If the SMMU supports fewer bits than would fill a single L2 stream diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h index cd48590ada3039ad70eadbc1edaa8bf877ce74ff..8d772ea8a58367b690e91b2a7fbea251b11b93c2 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h @@ -754,11 +754,10 @@ bool arm_smmu_master_sva_enabled(struct arm_smmu_master *master); int arm_smmu_master_enable_sva(struct arm_smmu_master *master); int arm_smmu_master_disable_sva(struct arm_smmu_master *master); bool arm_smmu_master_iopf_supported(struct arm_smmu_master *master); -struct iommu_sva *arm_smmu_sva_bind(struct device *dev, struct mm_struct *mm, - void *drvdata); -void arm_smmu_sva_unbind(struct iommu_sva *handle); -u32 arm_smmu_sva_get_pasid(struct iommu_sva *handle); void arm_smmu_sva_notifier_synchronize(void); +struct iommu_domain *arm_smmu_sva_domain_alloc(void); +void arm_smmu_sva_remove_dev_pasid(struct iommu_domain *domain, + struct device *dev, ioasid_t id); #else /* CONFIG_ARM_SMMU_V3_SVA */ static inline bool arm_smmu_sva_supported(struct arm_smmu_device *smmu) { @@ -790,19 +789,17 @@ static inline bool arm_smmu_master_iopf_supported(struct arm_smmu_master *master return false; } -static inline struct iommu_sva * -arm_smmu_sva_bind(struct device *dev, struct mm_struct *mm, void *drvdata) +static inline void arm_smmu_sva_notifier_synchronize(void) {} + +static inline struct iommu_domain *arm_smmu_sva_domain_alloc(void) { - return ERR_PTR(-ENODEV); + return NULL; } -static inline void arm_smmu_sva_unbind(struct iommu_sva *handle) {} - -static inline u32 arm_smmu_sva_get_pasid(struct iommu_sva *handle) +static inline void arm_smmu_sva_remove_dev_pasid(struct iommu_domain *domain, + struct device *dev, + ioasid_t id) { - return IOMMU_PASID_INVALID; } - -static inline void arm_smmu_sva_notifier_synchronize(void) {} #endif /* CONFIG_ARM_SMMU_V3_SVA */ #endif /* _ARM_SMMU_V3_H */ diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-impl.c b/drivers/iommu/arm/arm-smmu/arm-smmu-impl.c index 658f3cc832781b0d7134dd1471568e3f4e7c732e..9dc772f2cbb27c414fe1d83e91eb950fc5dcc82f 100644 --- a/drivers/iommu/arm/arm-smmu/arm-smmu-impl.c +++ b/drivers/iommu/arm/arm-smmu/arm-smmu-impl.c @@ -136,6 +136,9 @@ int arm_mmu500_reset(struct arm_smmu_device *smmu) reg = arm_smmu_cb_read(smmu, i, ARM_SMMU_CB_ACTLR); reg &= ~ARM_MMU500_ACTLR_CPRE; arm_smmu_cb_write(smmu, i, ARM_SMMU_CB_ACTLR, reg); + reg = arm_smmu_cb_read(smmu, i, ARM_SMMU_CB_ACTLR); + if (reg & ARM_MMU500_ACTLR_CPRE) + dev_warn_once(smmu->dev, "Failed to disable prefetcher [errata #841119 and #826419], check ACR.CACHE_LOCK\n"); } return 0; diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom-debug.c b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom-debug.c index 6eed8e67a0ca834d4a7c6ac3f2c1598f24a2a296..74e9ef2fd5804fcf933e918983ad4ca7d1698aac 100644 --- a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom-debug.c +++ b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom-debug.c @@ -10,16 +10,6 @@ #include "arm-smmu.h" #include "arm-smmu-qcom.h" -enum qcom_smmu_impl_reg_offset { - QCOM_SMMU_TBU_PWR_STATUS, - QCOM_SMMU_STATS_SYNC_INV_TBU_ACK, - QCOM_SMMU_MMU2QSS_AND_SAFE_WAIT_CNTR, -}; - -struct qcom_smmu_config { - const u32 *reg_offset; -}; - void qcom_smmu_tlb_sync_debug(struct arm_smmu_device *smmu) { int ret; @@ -59,84 +49,3 @@ void qcom_smmu_tlb_sync_debug(struct arm_smmu_device *smmu) tbu_pwr_status, sync_inv_ack, sync_inv_progress); } } - -/* Implementation Defined Register Space 0 register offsets */ -static const u32 qcom_smmu_impl0_reg_offset[] = { - [QCOM_SMMU_TBU_PWR_STATUS] = 0x2204, - [QCOM_SMMU_STATS_SYNC_INV_TBU_ACK] = 0x25dc, - [QCOM_SMMU_MMU2QSS_AND_SAFE_WAIT_CNTR] = 0x2670, -}; - -static const struct qcom_smmu_config qcm2290_smmu_cfg = { - .reg_offset = qcom_smmu_impl0_reg_offset, -}; - -static const struct qcom_smmu_config sc7180_smmu_cfg = { - .reg_offset = qcom_smmu_impl0_reg_offset, -}; - -static const struct qcom_smmu_config sc7280_smmu_cfg = { - .reg_offset = qcom_smmu_impl0_reg_offset, -}; - -static const struct qcom_smmu_config sc8180x_smmu_cfg = { - .reg_offset = qcom_smmu_impl0_reg_offset, -}; - -static const struct qcom_smmu_config sc8280xp_smmu_cfg = { - .reg_offset = qcom_smmu_impl0_reg_offset, -}; - -static const struct qcom_smmu_config sm6125_smmu_cfg = { - .reg_offset = qcom_smmu_impl0_reg_offset, -}; - -static const struct qcom_smmu_config sm6350_smmu_cfg = { - .reg_offset = qcom_smmu_impl0_reg_offset, -}; - -static const struct qcom_smmu_config sm8150_smmu_cfg = { - .reg_offset = qcom_smmu_impl0_reg_offset, -}; - -static const struct qcom_smmu_config sm8250_smmu_cfg = { - .reg_offset = qcom_smmu_impl0_reg_offset, -}; - -static const struct qcom_smmu_config sm8350_smmu_cfg = { - .reg_offset = qcom_smmu_impl0_reg_offset, -}; - -static const struct qcom_smmu_config sm8450_smmu_cfg = { - .reg_offset = qcom_smmu_impl0_reg_offset, -}; - -static const struct of_device_id __maybe_unused qcom_smmu_impl_debug_match[] = { - { .compatible = "qcom,msm8998-smmu-v2" }, - { .compatible = "qcom,qcm2290-smmu-500", .data = &qcm2290_smmu_cfg }, - { .compatible = "qcom,sc7180-smmu-500", .data = &sc7180_smmu_cfg }, - { .compatible = "qcom,sc7280-smmu-500", .data = &sc7280_smmu_cfg}, - { .compatible = "qcom,sc8180x-smmu-500", .data = &sc8180x_smmu_cfg }, - { .compatible = "qcom,sc8280xp-smmu-500", .data = &sc8280xp_smmu_cfg }, - { .compatible = "qcom,sdm630-smmu-v2" }, - { .compatible = "qcom,sdm845-smmu-500" }, - { .compatible = "qcom,sm6125-smmu-500", .data = &sm6125_smmu_cfg}, - { .compatible = "qcom,sm6350-smmu-500", .data = &sm6350_smmu_cfg}, - { .compatible = "qcom,sm8150-smmu-500", .data = &sm8150_smmu_cfg }, - { .compatible = "qcom,sm8250-smmu-500", .data = &sm8250_smmu_cfg }, - { .compatible = "qcom,sm8350-smmu-500", .data = &sm8350_smmu_cfg }, - { .compatible = "qcom,sm8450-smmu-500", .data = &sm8450_smmu_cfg }, - { } -}; - -const void *qcom_smmu_impl_data(struct arm_smmu_device *smmu) -{ - const struct of_device_id *match; - const struct device_node *np = smmu->dev->of_node; - - match = of_match_node(qcom_smmu_impl_debug_match, np); - if (!match) - return NULL; - - return match->data; -} diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c index b2708de25ea345e017f441f3d1f0495264bf41d1..91d404deb1155880c4667e72cca527bcbbeb2008 100644 --- a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c +++ b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c @@ -361,6 +361,8 @@ static int qcom_sdm845_smmu500_reset(struct arm_smmu_device *smmu) { int ret; + arm_mmu500_reset(smmu); + /* * To address performance degradation in non-real time clients, * such as USB and UFS, turn off wait-for-safe on sdm845 based boards, @@ -374,41 +376,67 @@ static int qcom_sdm845_smmu500_reset(struct arm_smmu_device *smmu) return ret; } -static int qcom_smmu500_reset(struct arm_smmu_device *smmu) -{ - const struct device_node *np = smmu->dev->of_node; - - arm_mmu500_reset(smmu); - - if (of_device_is_compatible(np, "qcom,sdm845-smmu-500")) - return qcom_sdm845_smmu500_reset(smmu); +static const struct arm_smmu_impl qcom_smmu_v2_impl = { + .init_context = qcom_smmu_init_context, + .cfg_probe = qcom_smmu_cfg_probe, + .def_domain_type = qcom_smmu_def_domain_type, + .write_s2cr = qcom_smmu_write_s2cr, + .tlb_sync = qcom_smmu_tlb_sync, +}; - return 0; -} +static const struct arm_smmu_impl qcom_smmu_500_impl = { + .init_context = qcom_smmu_init_context, + .cfg_probe = qcom_smmu_cfg_probe, + .def_domain_type = qcom_smmu_def_domain_type, + .reset = arm_mmu500_reset, + .write_s2cr = qcom_smmu_write_s2cr, + .tlb_sync = qcom_smmu_tlb_sync, +}; -static const struct arm_smmu_impl qcom_smmu_impl = { +static const struct arm_smmu_impl sdm845_smmu_500_impl = { .init_context = qcom_smmu_init_context, .cfg_probe = qcom_smmu_cfg_probe, .def_domain_type = qcom_smmu_def_domain_type, - .reset = qcom_smmu500_reset, + .reset = qcom_sdm845_smmu500_reset, .write_s2cr = qcom_smmu_write_s2cr, .tlb_sync = qcom_smmu_tlb_sync, }; -static const struct arm_smmu_impl qcom_adreno_smmu_impl = { +static const struct arm_smmu_impl qcom_adreno_smmu_v2_impl = { + .init_context = qcom_adreno_smmu_init_context, + .def_domain_type = qcom_smmu_def_domain_type, + .alloc_context_bank = qcom_adreno_smmu_alloc_context_bank, + .write_sctlr = qcom_adreno_smmu_write_sctlr, + .tlb_sync = qcom_smmu_tlb_sync, +}; + +static const struct arm_smmu_impl qcom_adreno_smmu_500_impl = { .init_context = qcom_adreno_smmu_init_context, .def_domain_type = qcom_smmu_def_domain_type, - .reset = qcom_smmu500_reset, + .reset = arm_mmu500_reset, .alloc_context_bank = qcom_adreno_smmu_alloc_context_bank, .write_sctlr = qcom_adreno_smmu_write_sctlr, .tlb_sync = qcom_smmu_tlb_sync, }; static struct arm_smmu_device *qcom_smmu_create(struct arm_smmu_device *smmu, - const struct arm_smmu_impl *impl) + const struct qcom_smmu_match_data *data) { + const struct device_node *np = smmu->dev->of_node; + const struct arm_smmu_impl *impl; struct qcom_smmu *qsmmu; + if (!data) + return ERR_PTR(-EINVAL); + + if (np && of_device_is_compatible(np, "qcom,adreno-smmu")) + impl = data->adreno_impl; + else + impl = data->impl; + + if (!impl) + return smmu; + /* Check to make sure qcom_scm has finished probing */ if (!qcom_scm_is_available()) return ERR_PTR(-EPROBE_DEFER); @@ -418,27 +446,77 @@ static struct arm_smmu_device *qcom_smmu_create(struct arm_smmu_device *smmu, return ERR_PTR(-ENOMEM); qsmmu->smmu.impl = impl; - qsmmu->cfg = qcom_smmu_impl_data(smmu); + qsmmu->cfg = data->cfg; return &qsmmu->smmu; } +/* Implementation Defined Register Space 0 register offsets */ +static const u32 qcom_smmu_impl0_reg_offset[] = { + [QCOM_SMMU_TBU_PWR_STATUS] = 0x2204, + [QCOM_SMMU_STATS_SYNC_INV_TBU_ACK] = 0x25dc, + [QCOM_SMMU_MMU2QSS_AND_SAFE_WAIT_CNTR] = 0x2670, +}; + +static const struct qcom_smmu_config qcom_smmu_impl0_cfg = { + .reg_offset = qcom_smmu_impl0_reg_offset, +}; + +/* + * It is not yet possible to use MDP SMMU with the bypass quirk on the msm8996, + * there are not enough context banks. + */ +static const struct qcom_smmu_match_data msm8996_smmu_data = { + .impl = NULL, + .adreno_impl = &qcom_adreno_smmu_v2_impl, +}; + +static const struct qcom_smmu_match_data qcom_smmu_v2_data = { + .impl = &qcom_smmu_v2_impl, + .adreno_impl = &qcom_adreno_smmu_v2_impl, +}; + +static const struct qcom_smmu_match_data sdm845_smmu_500_data = { + .impl = &sdm845_smmu_500_impl, + /* + * No need for adreno impl here. On sdm845 the Adreno SMMU is handled + * by the separate sdm845-smmu-v2 device. + */ + /* Also no debug configuration. */ +}; + +static const struct qcom_smmu_match_data qcom_smmu_500_impl0_data = { + .impl = &qcom_smmu_500_impl, + .adreno_impl = &qcom_adreno_smmu_500_impl, + .cfg = &qcom_smmu_impl0_cfg, +}; + +/* + * Do not add any more qcom,SOC-smmu-500 entries to this list, unless they need + * special handling and can not be covered by the qcom,smmu-500 entry. + */ static const struct of_device_id __maybe_unused qcom_smmu_impl_of_match[] = { - { .compatible = "qcom,msm8998-smmu-v2" }, - { .compatible = "qcom,qcm2290-smmu-500" }, - { .compatible = "qcom,sc7180-smmu-500" }, - { .compatible = "qcom,sc7280-smmu-500" }, - { .compatible = "qcom,sc8180x-smmu-500" }, - { .compatible = "qcom,sc8280xp-smmu-500" }, - { .compatible = "qcom,sdm630-smmu-v2" }, - { .compatible = "qcom,sdm845-smmu-500" }, - { .compatible = "qcom,sm6125-smmu-500" }, - { .compatible = "qcom,sm6350-smmu-500" }, - { .compatible = "qcom,sm6375-smmu-500" }, - { .compatible = "qcom,sm8150-smmu-500" }, - { .compatible = "qcom,sm8250-smmu-500" }, - { .compatible = "qcom,sm8350-smmu-500" }, - { .compatible = "qcom,sm8450-smmu-500" }, + { .compatible = "qcom,msm8996-smmu-v2", .data = &msm8996_smmu_data }, + { .compatible = "qcom,msm8998-smmu-v2", .data = &qcom_smmu_v2_data }, + { .compatible = "qcom,qcm2290-smmu-500", .data = &qcom_smmu_500_impl0_data }, + { .compatible = "qcom,qdu1000-smmu-500", .data = &qcom_smmu_500_impl0_data }, + { .compatible = "qcom,sc7180-smmu-500", .data = &qcom_smmu_500_impl0_data }, + { .compatible = "qcom,sc7280-smmu-500", .data = &qcom_smmu_500_impl0_data }, + { .compatible = "qcom,sc8180x-smmu-500", .data = &qcom_smmu_500_impl0_data }, + { .compatible = "qcom,sc8280xp-smmu-500", .data = &qcom_smmu_500_impl0_data }, + { .compatible = "qcom,sdm630-smmu-v2", .data = &qcom_smmu_v2_data }, + { .compatible = "qcom,sdm845-smmu-v2", .data = &qcom_smmu_v2_data }, + { .compatible = "qcom,sdm845-smmu-500", .data = &sdm845_smmu_500_data }, + { .compatible = "qcom,sm6115-smmu-500", .data = &qcom_smmu_500_impl0_data}, + { .compatible = "qcom,sm6125-smmu-500", .data = &qcom_smmu_500_impl0_data }, + { .compatible = "qcom,sm6350-smmu-v2", .data = &qcom_smmu_v2_data }, + { .compatible = "qcom,sm6350-smmu-500", .data = &qcom_smmu_500_impl0_data }, + { .compatible = "qcom,sm6375-smmu-500", .data = &qcom_smmu_500_impl0_data }, + { .compatible = "qcom,sm8150-smmu-500", .data = &qcom_smmu_500_impl0_data }, + { .compatible = "qcom,sm8250-smmu-500", .data = &qcom_smmu_500_impl0_data }, + { .compatible = "qcom,sm8350-smmu-500", .data = &qcom_smmu_500_impl0_data }, + { .compatible = "qcom,sm8450-smmu-500", .data = &qcom_smmu_500_impl0_data }, + { .compatible = "qcom,smmu-500", .data = &qcom_smmu_500_impl0_data }, { } }; @@ -453,26 +531,19 @@ static struct acpi_platform_list qcom_acpi_platlist[] = { struct arm_smmu_device *qcom_smmu_impl_init(struct arm_smmu_device *smmu) { const struct device_node *np = smmu->dev->of_node; + const struct of_device_id *match; #ifdef CONFIG_ACPI if (np == NULL) { /* Match platform for ACPI boot */ if (acpi_match_platform_list(qcom_acpi_platlist) >= 0) - return qcom_smmu_create(smmu, &qcom_smmu_impl); + return qcom_smmu_create(smmu, &qcom_smmu_500_impl0_data); } #endif - /* - * Do not change this order of implementation, i.e., first adreno - * smmu impl and then apss smmu since we can have both implementing - * arm,mmu-500 in which case we will miss setting adreno smmu specific - * features if the order is changed. - */ - if (of_device_is_compatible(np, "qcom,adreno-smmu")) - return qcom_smmu_create(smmu, &qcom_adreno_smmu_impl); - - if (of_match_node(qcom_smmu_impl_of_match, np)) - return qcom_smmu_create(smmu, &qcom_smmu_impl); + match = of_match_node(qcom_smmu_impl_of_match, np); + if (match) + return qcom_smmu_create(smmu, match->data); return smmu; } diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.h b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.h index 99ec8f8629a0d92f45b7f51db37670208753e727..593910567b8842c5522351aa1fcf74df46be87a6 100644 --- a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.h +++ b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.h @@ -14,15 +14,26 @@ struct qcom_smmu { u32 stall_enabled; }; +enum qcom_smmu_impl_reg_offset { + QCOM_SMMU_TBU_PWR_STATUS, + QCOM_SMMU_STATS_SYNC_INV_TBU_ACK, + QCOM_SMMU_MMU2QSS_AND_SAFE_WAIT_CNTR, +}; + +struct qcom_smmu_config { + const u32 *reg_offset; +}; + +struct qcom_smmu_match_data { + const struct qcom_smmu_config *cfg; + const struct arm_smmu_impl *impl; + const struct arm_smmu_impl *adreno_impl; +}; + #ifdef CONFIG_ARM_SMMU_QCOM_DEBUG void qcom_smmu_tlb_sync_debug(struct arm_smmu_device *smmu); -const void *qcom_smmu_impl_data(struct arm_smmu_device *smmu); #else static inline void qcom_smmu_tlb_sync_debug(struct arm_smmu_device *smmu) { } -static inline const void *qcom_smmu_impl_data(struct arm_smmu_device *smmu) -{ - return NULL; -} #endif #endif /* _ARM_SMMU_QCOM_H */ diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu.c b/drivers/iommu/arm/arm-smmu/arm-smmu.c index 30dab1418e3ff09c0bc62d5c5058d1f37a3a197c..719fbca1fe52a0b329ccac1c3f8f1001b11194c7 100644 --- a/drivers/iommu/arm/arm-smmu/arm-smmu.c +++ b/drivers/iommu/arm/arm-smmu/arm-smmu.c @@ -1150,9 +1150,6 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) * different SMMUs. */ if (smmu_domain->smmu != smmu) { - dev_err(dev, - "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n", - dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev)); ret = -EINVAL; goto rpm_put; } diff --git a/drivers/iommu/arm/arm-smmu/qcom_iommu.c b/drivers/iommu/arm/arm-smmu/qcom_iommu.c index 3869c3ecda8cd1193cfdbeec0ac2d235ed3d04de..270c3d9128bab874585f9e90a46ad09c524acc41 100644 --- a/drivers/iommu/arm/arm-smmu/qcom_iommu.c +++ b/drivers/iommu/arm/arm-smmu/qcom_iommu.c @@ -381,13 +381,8 @@ static int qcom_iommu_attach_dev(struct iommu_domain *domain, struct device *dev * Sanity check the domain. We don't support domains across * different IOMMUs. */ - if (qcom_domain->iommu != qcom_iommu) { - dev_err(dev, "cannot attach to IOMMU %s while already " - "attached to domain on IOMMU %s\n", - dev_name(qcom_domain->iommu->dev), - dev_name(qcom_iommu->dev)); + if (qcom_domain->iommu != qcom_iommu) return -EINVAL; - } return 0; } @@ -415,7 +410,8 @@ static void qcom_iommu_detach_dev(struct iommu_domain *domain, struct device *de } static int qcom_iommu_map(struct iommu_domain *domain, unsigned long iova, - phys_addr_t paddr, size_t size, int prot, gfp_t gfp) + phys_addr_t paddr, size_t pgsize, size_t pgcount, + int prot, gfp_t gfp, size_t *mapped) { int ret; unsigned long flags; @@ -426,13 +422,14 @@ static int qcom_iommu_map(struct iommu_domain *domain, unsigned long iova, return -ENODEV; spin_lock_irqsave(&qcom_domain->pgtbl_lock, flags); - ret = ops->map(ops, iova, paddr, size, prot, GFP_ATOMIC); + ret = ops->map_pages(ops, iova, paddr, pgsize, pgcount, prot, GFP_ATOMIC, mapped); spin_unlock_irqrestore(&qcom_domain->pgtbl_lock, flags); return ret; } static size_t qcom_iommu_unmap(struct iommu_domain *domain, unsigned long iova, - size_t size, struct iommu_iotlb_gather *gather) + size_t pgsize, size_t pgcount, + struct iommu_iotlb_gather *gather) { size_t ret; unsigned long flags; @@ -449,7 +446,7 @@ static size_t qcom_iommu_unmap(struct iommu_domain *domain, unsigned long iova, */ pm_runtime_get_sync(qcom_domain->iommu->dev); spin_lock_irqsave(&qcom_domain->pgtbl_lock, flags); - ret = ops->unmap(ops, iova, size, gather); + ret = ops->unmap_pages(ops, iova, pgsize, pgcount, gather); spin_unlock_irqrestore(&qcom_domain->pgtbl_lock, flags); pm_runtime_put_sync(qcom_domain->iommu->dev); @@ -587,8 +584,8 @@ static const struct iommu_ops qcom_iommu_ops = { .default_domain_ops = &(const struct iommu_domain_ops) { .attach_dev = qcom_iommu_attach_dev, .detach_dev = qcom_iommu_detach_dev, - .map = qcom_iommu_map, - .unmap = qcom_iommu_unmap, + .map_pages = qcom_iommu_map, + .unmap_pages = qcom_iommu_unmap, .flush_iotlb_all = qcom_iommu_flush_iotlb_all, .iotlb_sync = qcom_iommu_iotlb_sync, .iova_to_phys = qcom_iommu_iova_to_phys, diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c index 45fd4850bacbdbc53d58faa0f87da6ed50563236..b0cde22119875eadcfb36cbc637d2a48133a70b6 100644 --- a/drivers/iommu/exynos-iommu.c +++ b/drivers/iommu/exynos-iommu.c @@ -708,10 +708,6 @@ static int exynos_sysmmu_probe(struct platform_device *pdev) if (ret) return ret; - ret = iommu_device_register(&data->iommu, &exynos_iommu_ops, dev); - if (ret) - goto err_iommu_register; - platform_set_drvdata(pdev, data); if (PG_ENT_SHIFT < 0) { @@ -743,11 +739,13 @@ static int exynos_sysmmu_probe(struct platform_device *pdev) pm_runtime_enable(dev); + ret = iommu_device_register(&data->iommu, &exynos_iommu_ops, dev); + if (ret) + goto err_dma_set_mask; + return 0; err_dma_set_mask: - iommu_device_unregister(&data->iommu); -err_iommu_register: iommu_device_sysfs_remove(&data->iommu); return ret; } @@ -1432,12 +1430,6 @@ static int __init exynos_iommu_init(void) return -ENOMEM; } - ret = platform_driver_register(&exynos_sysmmu_driver); - if (ret) { - pr_err("%s: Failed to register driver\n", __func__); - goto err_reg_driver; - } - zero_lv2_table = kmem_cache_zalloc(lv2table_kmem_cache, GFP_KERNEL); if (zero_lv2_table == NULL) { pr_err("%s: Failed to allocate zero level2 page table\n", @@ -1446,10 +1438,16 @@ static int __init exynos_iommu_init(void) goto err_zero_lv2; } + ret = platform_driver_register(&exynos_sysmmu_driver); + if (ret) { + pr_err("%s: Failed to register driver\n", __func__); + goto err_reg_driver; + } + return 0; -err_zero_lv2: - platform_driver_unregister(&exynos_sysmmu_driver); err_reg_driver: + platform_driver_unregister(&exynos_sysmmu_driver); +err_zero_lv2: kmem_cache_destroy(lv2table_kmem_cache); return ret; } diff --git a/drivers/iommu/fsl_pamu.c b/drivers/iommu/fsl_pamu.c index 0d03f837a5d4e7ff820a223dd4a6349424a42647..05d820fb1d0bf56c5ea50ccdc22988b2ad474dc0 100644 --- a/drivers/iommu/fsl_pamu.c +++ b/drivers/iommu/fsl_pamu.c @@ -211,7 +211,7 @@ int pamu_config_ppaace(int liodn, u32 omi, u32 stashid, int prot) ppaace->op_encode.index_ot.omi = omi; } else if (~omi != 0) { pr_debug("bad operation mapping index: %d\n", omi); - return -EINVAL; + return -ENODEV; } /* configure stash id */ @@ -779,7 +779,7 @@ static int fsl_pamu_probe(struct platform_device *pdev) of_get_address(dev->of_node, 0, &size, NULL); irq = irq_of_parse_and_map(dev->of_node, 0); - if (irq == NO_IRQ) { + if (!irq) { dev_warn(dev, "no interrupts listed in PAMU node\n"); goto error; } @@ -868,7 +868,7 @@ static int fsl_pamu_probe(struct platform_device *pdev) ret = create_csd(ppaact_phys, mem_size, csd_port_id); if (ret) { dev_err(dev, "could not create coherence subdomain\n"); - return ret; + goto error; } } @@ -903,7 +903,7 @@ static int fsl_pamu_probe(struct platform_device *pdev) return 0; error: - if (irq != NO_IRQ) + if (irq) free_irq(irq, data); kfree_sensitive(data); diff --git a/drivers/iommu/fsl_pamu_domain.c b/drivers/iommu/fsl_pamu_domain.c index fa20f4b03e12db026b41e8017f7859d4b9e227bd..4408ac3c49b610e13a3b7158b61f7e117a917394 100644 --- a/drivers/iommu/fsl_pamu_domain.c +++ b/drivers/iommu/fsl_pamu_domain.c @@ -258,7 +258,7 @@ static int fsl_pamu_attach_device(struct iommu_domain *domain, liodn = of_get_property(dev->of_node, "fsl,liodn", &len); if (!liodn) { pr_debug("missing fsl,liodn property at %pOF\n", dev->of_node); - return -EINVAL; + return -ENODEV; } spin_lock_irqsave(&dma_domain->domain_lock, flags); @@ -267,7 +267,7 @@ static int fsl_pamu_attach_device(struct iommu_domain *domain, if (liodn[i] >= PAACE_NUMBER_ENTRIES) { pr_debug("Invalid liodn %d, attach device failed for %pOF\n", liodn[i], dev->of_node); - ret = -EINVAL; + ret = -ENODEV; break; } diff --git a/drivers/iommu/intel/dmar.c b/drivers/iommu/intel/dmar.c index bc94059a5b870abbd28b8e92fe7116170eb53259..b00a0ceb2d13701841b06b44c946bd48e07e87a1 100644 --- a/drivers/iommu/intel/dmar.c +++ b/drivers/iommu/intel/dmar.c @@ -1105,6 +1105,13 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd) raw_spin_lock_init(&iommu->register_lock); + /* + * A value of N in PSS field of eCap register indicates hardware + * supports PASID field of N+1 bits. + */ + if (pasid_supported(iommu)) + iommu->iommu.max_pasids = 2UL << ecap_pss(iommu->ecap); + /* * This is only for hotplug; at boot time intel_iommu_enabled won't * be set yet. When intel_iommu_init() runs, it registers the units diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index 644ca49e8cf80a92ba344455c509115853058ad2..59df7e42fd533cae18444d83e5da9d0083c7a8aa 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -27,7 +27,7 @@ #include "iommu.h" #include "../dma-iommu.h" #include "../irq_remapping.h" -#include "../iommu-sva-lib.h" +#include "../iommu-sva.h" #include "pasid.h" #include "cap_audit.h" @@ -277,7 +277,8 @@ static LIST_HEAD(dmar_satc_units); #define for_each_rmrr_units(rmrr) \ list_for_each_entry(rmrr, &dmar_rmrr_units, list) -static void dmar_remove_one_dev_info(struct device *dev); +static void device_block_translation(struct device *dev); +static void intel_iommu_domain_free(struct iommu_domain *domain); int dmar_disabled = !IS_ENABLED(CONFIG_INTEL_IOMMU_DEFAULT_ON); int intel_iommu_sm = IS_ENABLED(CONFIG_INTEL_IOMMU_SCALABLE_MODE_DEFAULT_ON); @@ -382,11 +383,6 @@ static inline int domain_type_is_si(struct dmar_domain *domain) return domain->domain.type == IOMMU_DOMAIN_IDENTITY; } -static inline bool domain_use_first_level(struct dmar_domain *domain) -{ - return domain->flags & DOMAIN_FLAG_USE_FIRST_LEVEL; -} - static inline int domain_pfn_supported(struct dmar_domain *domain, unsigned long pfn) { @@ -500,7 +496,7 @@ static int domain_update_iommu_superpage(struct dmar_domain *domain, rcu_read_lock(); for_each_active_iommu(iommu, drhd) { if (iommu != skip) { - if (domain && domain_use_first_level(domain)) { + if (domain && domain->use_first_level) { if (!cap_fl1gp_support(iommu->cap)) mask = 0x1; } else { @@ -578,7 +574,7 @@ static void domain_update_iommu_cap(struct dmar_domain *domain) * paging and 57-bits with 5-level paging). Hence, skip bit * [N-1]. */ - if (domain_use_first_level(domain)) + if (domain->use_first_level) domain->domain.geometry.aperture_end = __DOMAIN_MAX_ADDR(domain->gaw - 1); else domain->domain.geometry.aperture_end = __DOMAIN_MAX_ADDR(domain->gaw); @@ -779,19 +775,6 @@ static void domain_flush_cache(struct dmar_domain *domain, clflush_cache_range(addr, size); } -static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn) -{ - struct context_entry *context; - int ret = 0; - - spin_lock(&iommu->lock); - context = iommu_context_addr(iommu, bus, devfn, 0); - if (context) - ret = context_present(context); - spin_unlock(&iommu->lock); - return ret; -} - static void free_context_table(struct intel_iommu *iommu) { struct context_entry *context; @@ -959,7 +942,7 @@ static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain, domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE); pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE; - if (domain_use_first_level(domain)) + if (domain->use_first_level) pteval |= DMA_FL_PTE_XD | DMA_FL_PTE_US | DMA_FL_PTE_ACCESS; if (cmpxchg64(&pte->val, 0ULL, pteval)) @@ -1418,7 +1401,7 @@ static void iommu_enable_pci_caps(struct device_domain_info *info) { struct pci_dev *pdev; - if (!info || !dev_is_pci(info->dev)) + if (!dev_is_pci(info->dev)) return; pdev = to_pci_dev(info->dev); @@ -1458,7 +1441,7 @@ static void iommu_enable_pci_caps(struct device_domain_info *info) } } -static void iommu_disable_dev_iotlb(struct device_domain_info *info) +static void iommu_disable_pci_caps(struct device_domain_info *info) { struct pci_dev *pdev; @@ -1529,7 +1512,7 @@ static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, if (ih) ih = 1 << 6; - if (domain_use_first_level(domain)) { + if (domain->use_first_level) { qi_flush_piotlb(iommu, did, PASID_RID2PASID, addr, pages, ih); } else { unsigned long bitmask = aligned_pages - 1; @@ -1583,7 +1566,7 @@ static inline void __mapping_notify_one(struct intel_iommu *iommu, * It's a non-present to present mapping. Only flush if caching mode * and second level. */ - if (cap_caching_mode(iommu->cap) && !domain_use_first_level(domain)) + if (cap_caching_mode(iommu->cap) && !domain->use_first_level) iommu_flush_iotlb_psi(iommu, domain, pfn, pages, 0, 1); else iommu_flush_write_buffer(iommu); @@ -1599,7 +1582,7 @@ static void intel_flush_iotlb_all(struct iommu_domain *domain) struct intel_iommu *iommu = info->iommu; u16 did = domain_id_iommu(dmar_domain, iommu); - if (domain_use_first_level(dmar_domain)) + if (dmar_domain->use_first_level) qi_flush_piotlb(iommu, did, PASID_RID2PASID, 0, -1, 0); else iommu->flush.flush_iotlb(iommu, did, 0, 0, @@ -1772,7 +1755,7 @@ static struct dmar_domain *alloc_domain(unsigned int type) domain->nid = NUMA_NO_NODE; if (first_level_by_default(type)) - domain->flags |= DOMAIN_FLAG_USE_FIRST_LEVEL; + domain->use_first_level = true; domain->has_iotlb_device = false; INIT_LIST_HEAD(&domain->devices); spin_lock_init(&domain->lock); @@ -2064,7 +2047,6 @@ static int domain_context_mapping_one(struct dmar_domain *domain, } else { iommu_flush_write_buffer(iommu); } - iommu_enable_pci_caps(info); ret = 0; @@ -2116,30 +2098,6 @@ domain_context_mapping(struct dmar_domain *domain, struct device *dev) &domain_context_mapping_cb, &data); } -static int domain_context_mapped_cb(struct pci_dev *pdev, - u16 alias, void *opaque) -{ - struct intel_iommu *iommu = opaque; - - return !device_context_mapped(iommu, PCI_BUS_NUM(alias), alias & 0xff); -} - -static int domain_context_mapped(struct device *dev) -{ - struct intel_iommu *iommu; - u8 bus, devfn; - - iommu = device_to_iommu(dev, &bus, &devfn); - if (!iommu) - return -ENODEV; - - if (!dev_is_pci(dev)) - return device_context_mapped(iommu, bus, devfn); - - return !pci_for_each_dma_alias(to_pci_dev(dev), - domain_context_mapped_cb, iommu); -} - /* Returns a number of VTD pages, but aligned to MM page size */ static inline unsigned long aligned_nrpages(unsigned long host_addr, size_t size) @@ -2229,7 +2187,7 @@ __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn, attr = prot & (DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP); attr |= DMA_FL_PTE_PRESENT; - if (domain_use_first_level(domain)) { + if (domain->use_first_level) { attr |= DMA_FL_PTE_XD | DMA_FL_PTE_US | DMA_FL_PTE_ACCESS; if (prot & DMA_PTE_WRITE) attr |= DMA_FL_PTE_DIRTY; @@ -2472,7 +2430,8 @@ static int __init si_domain_init(int hw) return 0; } -static int domain_add_dev_info(struct dmar_domain *domain, struct device *dev) +static int dmar_domain_attach_device(struct dmar_domain *domain, + struct device *dev) { struct device_domain_info *info = dev_iommu_priv_get(dev); struct intel_iommu *iommu; @@ -2494,18 +2453,11 @@ static int domain_add_dev_info(struct dmar_domain *domain, struct device *dev) /* PASID table is mandatory for a PCI device in scalable mode. */ if (sm_supported(iommu) && !dev_is_real_dma_subdevice(dev)) { - ret = intel_pasid_alloc_table(dev); - if (ret) { - dev_err(dev, "PASID table allocation failed\n"); - dmar_remove_one_dev_info(dev); - return ret; - } - /* Setup the PASID entry for requests without PASID: */ if (hw_pass_through && domain_type_is_si(domain)) ret = intel_pasid_setup_pass_through(iommu, domain, dev, PASID_RID2PASID); - else if (domain_use_first_level(domain)) + else if (domain->use_first_level) ret = domain_setup_first_level(iommu, domain, dev, PASID_RID2PASID); else @@ -2513,7 +2465,7 @@ static int domain_add_dev_info(struct dmar_domain *domain, struct device *dev) dev, PASID_RID2PASID); if (ret) { dev_err(dev, "Setup RID2PASID failed\n"); - dmar_remove_one_dev_info(dev); + device_block_translation(dev); return ret; } } @@ -2521,10 +2473,12 @@ static int domain_add_dev_info(struct dmar_domain *domain, struct device *dev) ret = domain_context_mapping(domain, dev); if (ret) { dev_err(dev, "Domain context map failed\n"); - dmar_remove_one_dev_info(dev); + device_block_translation(dev); return ret; } + iommu_enable_pci_caps(info); + return 0; } @@ -4125,9 +4079,8 @@ static void dmar_remove_one_dev_info(struct device *dev) intel_pasid_tear_down_entry(iommu, info->dev, PASID_RID2PASID, false); - iommu_disable_dev_iotlb(info); + iommu_disable_pci_caps(info); domain_context_clear(info); - intel_pasid_free_table(info->dev); } spin_lock_irqsave(&domain->lock, flags); @@ -4138,6 +4091,37 @@ static void dmar_remove_one_dev_info(struct device *dev) info->domain = NULL; } +/* + * Clear the page table pointer in context or pasid table entries so that + * all DMA requests without PASID from the device are blocked. If the page + * table has been set, clean up the data structures. + */ +static void device_block_translation(struct device *dev) +{ + struct device_domain_info *info = dev_iommu_priv_get(dev); + struct intel_iommu *iommu = info->iommu; + unsigned long flags; + + iommu_disable_pci_caps(info); + if (!dev_is_real_dma_subdevice(dev)) { + if (sm_supported(iommu)) + intel_pasid_tear_down_entry(iommu, dev, + PASID_RID2PASID, false); + else + domain_context_clear(info); + } + + if (!info->domain) + return; + + spin_lock_irqsave(&info->domain->lock, flags); + list_del(&info->link); + spin_unlock_irqrestore(&info->domain->lock, flags); + + domain_detach_iommu(info->domain, iommu); + info->domain = NULL; +} + static int md_domain_init(struct dmar_domain *domain, int guest_width) { int adjust_width; @@ -4159,12 +4143,28 @@ static int md_domain_init(struct dmar_domain *domain, int guest_width) return 0; } +static int blocking_domain_attach_dev(struct iommu_domain *domain, + struct device *dev) +{ + device_block_translation(dev); + return 0; +} + +static struct iommu_domain blocking_domain = { + .ops = &(const struct iommu_domain_ops) { + .attach_dev = blocking_domain_attach_dev, + .free = intel_iommu_domain_free + } +}; + static struct iommu_domain *intel_iommu_domain_alloc(unsigned type) { struct dmar_domain *dmar_domain; struct iommu_domain *domain; switch (type) { + case IOMMU_DOMAIN_BLOCKED: + return &blocking_domain; case IOMMU_DOMAIN_DMA: case IOMMU_DOMAIN_DMA_FQ: case IOMMU_DOMAIN_UNMANAGED: @@ -4188,6 +4188,8 @@ static struct iommu_domain *intel_iommu_domain_alloc(unsigned type) return domain; case IOMMU_DOMAIN_IDENTITY: return &si_domain->domain; + case IOMMU_DOMAIN_SVA: + return intel_svm_domain_alloc(); default: return NULL; } @@ -4197,7 +4199,7 @@ static struct iommu_domain *intel_iommu_domain_alloc(unsigned type) static void intel_iommu_domain_free(struct iommu_domain *domain) { - if (domain != &si_domain->domain) + if (domain != &si_domain->domain && domain != &blocking_domain) domain_exit(to_dmar_domain(domain)); } @@ -4213,19 +4215,15 @@ static int prepare_domain_attach_device(struct iommu_domain *domain, return -ENODEV; if (dmar_domain->force_snooping && !ecap_sc_support(iommu->ecap)) - return -EOPNOTSUPP; + return -EINVAL; /* check if this iommu agaw is sufficient for max mapped address */ addr_width = agaw_to_width(iommu->agaw); if (addr_width > cap_mgaw(iommu->cap)) addr_width = cap_mgaw(iommu->cap); - if (dmar_domain->max_addr > (1LL << addr_width)) { - dev_err(dev, "%s: iommu width (%d) is not " - "sufficient for the mapped address (%llx)\n", - __func__, addr_width, dmar_domain->max_addr); - return -EFAULT; - } + if (dmar_domain->max_addr > (1LL << addr_width)) + return -EINVAL; dmar_domain->gaw = addr_width; /* @@ -4248,6 +4246,7 @@ static int prepare_domain_attach_device(struct iommu_domain *domain, static int intel_iommu_attach_device(struct iommu_domain *domain, struct device *dev) { + struct device_domain_info *info = dev_iommu_priv_get(dev); int ret; if (domain->type == IOMMU_DOMAIN_UNMANAGED && @@ -4256,25 +4255,14 @@ static int intel_iommu_attach_device(struct iommu_domain *domain, return -EPERM; } - /* normally dev is not mapped */ - if (unlikely(domain_context_mapped(dev))) { - struct device_domain_info *info = dev_iommu_priv_get(dev); - - if (info->domain) - dmar_remove_one_dev_info(dev); - } + if (info->domain) + device_block_translation(dev); ret = prepare_domain_attach_device(domain, dev); if (ret) return ret; - return domain_add_dev_info(to_dmar_domain(domain), dev); -} - -static void intel_iommu_detach_device(struct iommu_domain *domain, - struct device *dev) -{ - dmar_remove_one_dev_info(dev); + return dmar_domain_attach_device(to_dmar_domain(domain), dev); } static int intel_iommu_map(struct iommu_domain *domain, @@ -4438,7 +4426,7 @@ static void domain_set_force_snooping(struct dmar_domain *domain) * Second level page table supports per-PTE snoop control. The * iommu_map() interface will handle this by setting SNP bit. */ - if (!domain_use_first_level(domain)) { + if (!domain->use_first_level) { domain->set_pte_snp = true; return; } @@ -4471,14 +4459,20 @@ static bool intel_iommu_enforce_cache_coherency(struct iommu_domain *domain) static bool intel_iommu_capable(struct device *dev, enum iommu_cap cap) { - if (cap == IOMMU_CAP_CACHE_COHERENCY) + struct device_domain_info *info = dev_iommu_priv_get(dev); + + switch (cap) { + case IOMMU_CAP_CACHE_COHERENCY: return true; - if (cap == IOMMU_CAP_INTR_REMAP) + case IOMMU_CAP_INTR_REMAP: return irq_remapping_enabled == 1; - if (cap == IOMMU_CAP_PRE_BOOT_PROTECTION) + case IOMMU_CAP_PRE_BOOT_PROTECTION: return dmar_platform_optin(); - - return false; + case IOMMU_CAP_ENFORCE_CACHE_COHERENCY: + return ecap_sc_support(info->iommu->ecap); + default: + return false; + } } static struct iommu_device *intel_iommu_probe_device(struct device *dev) @@ -4487,6 +4481,7 @@ static struct iommu_device *intel_iommu_probe_device(struct device *dev) struct device_domain_info *info; struct intel_iommu *iommu; u8 bus, devfn; + int ret; iommu = device_to_iommu(dev, &bus, &devfn); if (!iommu || !iommu->iommu.ops) @@ -4531,6 +4526,16 @@ static struct iommu_device *intel_iommu_probe_device(struct device *dev) dev_iommu_priv_set(dev, info); + if (sm_supported(iommu) && !dev_is_real_dma_subdevice(dev)) { + ret = intel_pasid_alloc_table(dev); + if (ret) { + dev_err(dev, "PASID table allocation failed\n"); + dev_iommu_priv_set(dev, NULL); + kfree(info); + return ERR_PTR(ret); + } + } + return &iommu->iommu; } @@ -4539,6 +4544,7 @@ static void intel_iommu_release_device(struct device *dev) struct device_domain_info *info = dev_iommu_priv_get(dev); dmar_remove_one_dev_info(dev); + intel_pasid_free_table(dev); dev_iommu_priv_set(dev, NULL); kfree(info); set_dma_ops(dev, NULL); @@ -4732,6 +4738,28 @@ static void intel_iommu_iotlb_sync_map(struct iommu_domain *domain, __mapping_notify_one(info->iommu, dmar_domain, pfn, pages); } +static void intel_iommu_remove_dev_pasid(struct device *dev, ioasid_t pasid) +{ + struct intel_iommu *iommu = device_to_iommu(dev, NULL, NULL); + struct iommu_domain *domain; + + /* Domain type specific cleanup: */ + domain = iommu_get_domain_for_dev_pasid(dev, pasid, 0); + if (domain) { + switch (domain->type) { + case IOMMU_DOMAIN_SVA: + intel_svm_remove_dev_pasid(dev, pasid); + break; + default: + /* should never reach here */ + WARN_ON(1); + break; + } + } + + intel_pasid_tear_down_entry(iommu, dev, pasid, false); +} + const struct iommu_ops intel_iommu_ops = { .capable = intel_iommu_capable, .domain_alloc = intel_iommu_domain_alloc, @@ -4744,16 +4772,13 @@ const struct iommu_ops intel_iommu_ops = { .dev_disable_feat = intel_iommu_dev_disable_feat, .is_attach_deferred = intel_iommu_is_attach_deferred, .def_domain_type = device_def_domain_type, + .remove_dev_pasid = intel_iommu_remove_dev_pasid, .pgsize_bitmap = SZ_4K, #ifdef CONFIG_INTEL_IOMMU_SVM - .sva_bind = intel_svm_bind, - .sva_unbind = intel_svm_unbind, - .sva_get_pasid = intel_svm_get_pasid, .page_response = intel_svm_page_response, #endif .default_domain_ops = &(const struct iommu_domain_ops) { .attach_dev = intel_iommu_attach_device, - .detach_dev = intel_iommu_detach_device, .map_pages = intel_iommu_map_pages, .unmap_pages = intel_iommu_unmap_pages, .iotlb_sync_map = intel_iommu_iotlb_sync_map, diff --git a/drivers/iommu/intel/iommu.h b/drivers/iommu/intel/iommu.h index b3d82d7093e6b4aa458a2c1d85cc804d4807a335..06e61e4748567a69cb891e07a7e077eee9dad2e1 100644 --- a/drivers/iommu/intel/iommu.h +++ b/drivers/iommu/intel/iommu.h @@ -480,8 +480,6 @@ enum { #define VTD_FLAG_IRQ_REMAP_PRE_ENABLED (1 << 1) #define VTD_FLAG_SVM_CAPABLE (1 << 2) -extern int intel_iommu_sm; - #define sm_supported(iommu) (intel_iommu_sm && ecap_smts((iommu)->ecap)) #define pasid_supported(iommu) (sm_supported(iommu) && \ ecap_pasid((iommu)->ecap)) @@ -517,14 +515,6 @@ struct context_entry { u64 hi; }; -/* - * When VT-d works in the scalable mode, it allows DMA translation to - * happen through either first level or second level page table. This - * bit marks that the DMA translation for the domain goes through the - * first level page table, otherwise, it goes through the second level. - */ -#define DOMAIN_FLAG_USE_FIRST_LEVEL BIT(1) - struct iommu_domain_info { struct intel_iommu *iommu; unsigned int refcnt; /* Refcount of devices per iommu */ @@ -541,6 +531,11 @@ struct dmar_domain { u8 iommu_coherency: 1; /* indicate coherency of iommu access */ u8 force_snooping : 1; /* Create IOPTEs with snoop control */ u8 set_pte_snp:1; + u8 use_first_level:1; /* DMA translation for the domain goes + * through the first level page table, + * otherwise, goes through the second + * level. + */ spinlock_t lock; /* Protect device tracking lists */ struct list_head devices; /* all devices' list */ @@ -550,8 +545,6 @@ struct dmar_domain { /* adjusted guest address width, 0 is level 2 30-bit */ int agaw; - - int flags; /* flags to find out type of domain */ int iommu_superpage;/* Level of superpages supported: 0 == 4KiB (no superpages), 1 == 2MiB, 2 == 1GiB, 3 == 512GiB, 4 == 1TiB */ @@ -753,12 +746,10 @@ struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn); extern void intel_svm_check(struct intel_iommu *iommu); extern int intel_svm_enable_prq(struct intel_iommu *iommu); extern int intel_svm_finish_prq(struct intel_iommu *iommu); -struct iommu_sva *intel_svm_bind(struct device *dev, struct mm_struct *mm, - void *drvdata); -void intel_svm_unbind(struct iommu_sva *handle); -u32 intel_svm_get_pasid(struct iommu_sva *handle); int intel_svm_page_response(struct device *dev, struct iommu_fault_event *evt, struct iommu_page_response *msg); +struct iommu_domain *intel_svm_domain_alloc(void); +void intel_svm_remove_dev_pasid(struct device *dev, ioasid_t pasid); struct intel_svm_dev { struct list_head list; @@ -783,6 +774,14 @@ struct intel_svm { }; #else static inline void intel_svm_check(struct intel_iommu *iommu) {} +static inline struct iommu_domain *intel_svm_domain_alloc(void) +{ + return NULL; +} + +static inline void intel_svm_remove_dev_pasid(struct device *dev, ioasid_t pasid) +{ +} #endif #ifdef CONFIG_INTEL_IOMMU_DEBUGFS @@ -798,6 +797,7 @@ struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus, extern const struct iommu_ops intel_iommu_ops; #ifdef CONFIG_INTEL_IOMMU +extern int intel_iommu_sm; extern int iommu_calculate_agaw(struct intel_iommu *iommu); extern int iommu_calculate_max_sagaw(struct intel_iommu *iommu); extern int dmar_disabled; @@ -813,6 +813,7 @@ static inline int iommu_calculate_max_sagaw(struct intel_iommu *iommu) } #define dmar_disabled (1) #define intel_iommu_enabled (0) +#define intel_iommu_sm (0) #endif static inline const char *decode_prq_descriptor(char *str, size_t size, diff --git a/drivers/iommu/intel/irq_remapping.c b/drivers/iommu/intel/irq_remapping.c index a723f53ba472f967833536db39fecd6c1a966f12..f58f5f57af782ba8be7c4c598526763a2d347081 100644 --- a/drivers/iommu/intel/irq_remapping.c +++ b/drivers/iommu/intel/irq_remapping.c @@ -174,7 +174,6 @@ static int modify_irte(struct irq_2_iommu *irq_iommu, index = irq_iommu->irte_index + irq_iommu->sub_handle; irte = &iommu->ir_table->base[index]; -#if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) if ((irte->pst == 1) || (irte_modified->pst == 1)) { bool ret; @@ -188,11 +187,9 @@ static int modify_irte(struct irq_2_iommu *irq_iommu, * same as the old value. */ WARN_ON(!ret); - } else -#endif - { - set_64bit(&irte->low, irte_modified->low); - set_64bit(&irte->high, irte_modified->high); + } else { + WRITE_ONCE(irte->low, irte_modified->low); + WRITE_ONCE(irte->high, irte_modified->high); } __iommu_flush_cache(iommu, irte, sizeof(*irte)); @@ -250,8 +247,8 @@ static int clear_entries(struct irq_2_iommu *irq_iommu) end = start + (1 << irq_iommu->irte_mask); for (entry = start; entry < end; entry++) { - set_64bit(&entry->low, 0); - set_64bit(&entry->high, 0); + WRITE_ONCE(entry->low, 0); + WRITE_ONCE(entry->high, 0); } bitmap_release_region(iommu->ir_table->bitmap, index, irq_iommu->irte_mask); diff --git a/drivers/iommu/intel/pasid.c b/drivers/iommu/intel/pasid.c index e13d7e5273e195cc0bc84019e650e4c7e7a5523c..fb3c7020028d07cc4e8b598f25b4b4f185c67376 100644 --- a/drivers/iommu/intel/pasid.c +++ b/drivers/iommu/intel/pasid.c @@ -101,8 +101,10 @@ int intel_pasid_alloc_table(struct device *dev) might_sleep(); info = dev_iommu_priv_get(dev); - if (WARN_ON(!info || !dev_is_pci(dev) || info->pasid_table)) - return -EINVAL; + if (WARN_ON(!info || !dev_is_pci(dev))) + return -ENODEV; + if (WARN_ON(info->pasid_table)) + return -EEXIST; pasid_table = kzalloc(sizeof(*pasid_table), GFP_KERNEL); if (!pasid_table) diff --git a/drivers/iommu/intel/svm.c b/drivers/iommu/intel/svm.c index 03b25358946c4fa00d101f69a8178effc61d7664..c76b66263467aefa12df8aee8ca1d09963933465 100644 --- a/drivers/iommu/intel/svm.c +++ b/drivers/iommu/intel/svm.c @@ -24,7 +24,7 @@ #include "iommu.h" #include "pasid.h" #include "perf.h" -#include "../iommu-sva-lib.h" +#include "../iommu-sva.h" #include "trace.h" static irqreturn_t prq_event_thread(int irq, void *d); @@ -299,19 +299,9 @@ out: return 0; } -static int intel_svm_alloc_pasid(struct device *dev, struct mm_struct *mm, - unsigned int flags) -{ - ioasid_t max_pasid = dev_is_pci(dev) ? - pci_max_pasids(to_pci_dev(dev)) : intel_pasid_max_id; - - return iommu_sva_alloc_pasid(mm, PASID_MIN, max_pasid - 1); -} - static struct iommu_sva *intel_svm_bind_mm(struct intel_iommu *iommu, struct device *dev, - struct mm_struct *mm, - unsigned int flags) + struct mm_struct *mm) { struct device_domain_info *info = dev_iommu_priv_get(dev); struct intel_svm_dev *sdev; @@ -327,22 +317,18 @@ static struct iommu_sva *intel_svm_bind_mm(struct intel_iommu *iommu, svm->pasid = mm->pasid; svm->mm = mm; - svm->flags = flags; INIT_LIST_HEAD_RCU(&svm->devs); - if (!(flags & SVM_FLAG_SUPERVISOR_MODE)) { - svm->notifier.ops = &intel_mmuops; - ret = mmu_notifier_register(&svm->notifier, mm); - if (ret) { - kfree(svm); - return ERR_PTR(ret); - } + svm->notifier.ops = &intel_mmuops; + ret = mmu_notifier_register(&svm->notifier, mm); + if (ret) { + kfree(svm); + return ERR_PTR(ret); } ret = pasid_private_add(svm->pasid, svm); if (ret) { - if (svm->notifier.ops) - mmu_notifier_unregister(&svm->notifier, mm); + mmu_notifier_unregister(&svm->notifier, mm); kfree(svm); return ERR_PTR(ret); } @@ -377,9 +363,7 @@ static struct iommu_sva *intel_svm_bind_mm(struct intel_iommu *iommu, } /* Setup the pasid table: */ - sflags = (flags & SVM_FLAG_SUPERVISOR_MODE) ? - PASID_FLAG_SUPERVISOR_MODE : 0; - sflags |= cpu_feature_enabled(X86_FEATURE_LA57) ? PASID_FLAG_FL5LP : 0; + sflags = cpu_feature_enabled(X86_FEATURE_LA57) ? PASID_FLAG_FL5LP : 0; ret = intel_pasid_setup_first_level(iommu, dev, mm->pgd, mm->pasid, FLPT_DEFAULT_DID, sflags); if (ret) @@ -393,8 +377,7 @@ free_sdev: kfree(sdev); free_svm: if (list_empty(&svm->devs)) { - if (svm->notifier.ops) - mmu_notifier_unregister(&svm->notifier, mm); + mmu_notifier_unregister(&svm->notifier, mm); pasid_private_remove(mm->pasid); kfree(svm); } @@ -787,67 +770,6 @@ prq_advance: return IRQ_RETVAL(handled); } -struct iommu_sva *intel_svm_bind(struct device *dev, struct mm_struct *mm, void *drvdata) -{ - struct intel_iommu *iommu = device_to_iommu(dev, NULL, NULL); - unsigned int flags = 0; - struct iommu_sva *sva; - int ret; - - if (drvdata) - flags = *(unsigned int *)drvdata; - - if (flags & SVM_FLAG_SUPERVISOR_MODE) { - if (!ecap_srs(iommu->ecap)) { - dev_err(dev, "%s: Supervisor PASID not supported\n", - iommu->name); - return ERR_PTR(-EOPNOTSUPP); - } - - if (mm) { - dev_err(dev, "%s: Supervisor PASID with user provided mm\n", - iommu->name); - return ERR_PTR(-EINVAL); - } - - mm = &init_mm; - } - - mutex_lock(&pasid_mutex); - ret = intel_svm_alloc_pasid(dev, mm, flags); - if (ret) { - mutex_unlock(&pasid_mutex); - return ERR_PTR(ret); - } - - sva = intel_svm_bind_mm(iommu, dev, mm, flags); - mutex_unlock(&pasid_mutex); - - return sva; -} - -void intel_svm_unbind(struct iommu_sva *sva) -{ - struct intel_svm_dev *sdev = to_intel_svm_dev(sva); - - mutex_lock(&pasid_mutex); - intel_svm_unbind_mm(sdev->dev, sdev->pasid); - mutex_unlock(&pasid_mutex); -} - -u32 intel_svm_get_pasid(struct iommu_sva *sva) -{ - struct intel_svm_dev *sdev; - u32 pasid; - - mutex_lock(&pasid_mutex); - sdev = to_intel_svm_dev(sva); - pasid = sdev->pasid; - mutex_unlock(&pasid_mutex); - - return pasid; -} - int intel_svm_page_response(struct device *dev, struct iommu_fault_event *evt, struct iommu_page_response *msg) @@ -918,3 +840,50 @@ int intel_svm_page_response(struct device *dev, out: return ret; } + +void intel_svm_remove_dev_pasid(struct device *dev, ioasid_t pasid) +{ + mutex_lock(&pasid_mutex); + intel_svm_unbind_mm(dev, pasid); + mutex_unlock(&pasid_mutex); +} + +static int intel_svm_set_dev_pasid(struct iommu_domain *domain, + struct device *dev, ioasid_t pasid) +{ + struct device_domain_info *info = dev_iommu_priv_get(dev); + struct intel_iommu *iommu = info->iommu; + struct mm_struct *mm = domain->mm; + struct iommu_sva *sva; + int ret = 0; + + mutex_lock(&pasid_mutex); + sva = intel_svm_bind_mm(iommu, dev, mm); + if (IS_ERR(sva)) + ret = PTR_ERR(sva); + mutex_unlock(&pasid_mutex); + + return ret; +} + +static void intel_svm_domain_free(struct iommu_domain *domain) +{ + kfree(to_dmar_domain(domain)); +} + +static const struct iommu_domain_ops intel_svm_domain_ops = { + .set_dev_pasid = intel_svm_set_dev_pasid, + .free = intel_svm_domain_free +}; + +struct iommu_domain *intel_svm_domain_alloc(void) +{ + struct dmar_domain *domain; + + domain = kzalloc(sizeof(*domain), GFP_KERNEL); + if (!domain) + return NULL; + domain->domain.ops = &intel_svm_domain_ops; + + return &domain->domain; +} diff --git a/drivers/iommu/io-pgfault.c b/drivers/iommu/io-pgfault.c index 1df8c1dcae7761feb78238ba8bfcfd78a6ed3beb..e5b8b9110c132aa88444e52fd75585aba1945bab 100644 --- a/drivers/iommu/io-pgfault.c +++ b/drivers/iommu/io-pgfault.c @@ -11,7 +11,7 @@ #include #include -#include "iommu-sva-lib.h" +#include "iommu-sva.h" /** * struct iopf_queue - IO Page Fault queue @@ -69,69 +69,18 @@ static int iopf_complete_group(struct device *dev, struct iopf_fault *iopf, return iommu_page_response(dev, &resp); } -static enum iommu_page_response_code -iopf_handle_single(struct iopf_fault *iopf) -{ - vm_fault_t ret; - struct mm_struct *mm; - struct vm_area_struct *vma; - unsigned int access_flags = 0; - unsigned int fault_flags = FAULT_FLAG_REMOTE; - struct iommu_fault_page_request *prm = &iopf->fault.prm; - enum iommu_page_response_code status = IOMMU_PAGE_RESP_INVALID; - - if (!(prm->flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID)) - return status; - - mm = iommu_sva_find(prm->pasid); - if (IS_ERR_OR_NULL(mm)) - return status; - - mmap_read_lock(mm); - - vma = find_extend_vma(mm, prm->addr); - if (!vma) - /* Unmapped area */ - goto out_put_mm; - - if (prm->perm & IOMMU_FAULT_PERM_READ) - access_flags |= VM_READ; - - if (prm->perm & IOMMU_FAULT_PERM_WRITE) { - access_flags |= VM_WRITE; - fault_flags |= FAULT_FLAG_WRITE; - } - - if (prm->perm & IOMMU_FAULT_PERM_EXEC) { - access_flags |= VM_EXEC; - fault_flags |= FAULT_FLAG_INSTRUCTION; - } - - if (!(prm->perm & IOMMU_FAULT_PERM_PRIV)) - fault_flags |= FAULT_FLAG_USER; - - if (access_flags & ~vma->vm_flags) - /* Access fault */ - goto out_put_mm; - - ret = handle_mm_fault(vma, prm->addr, fault_flags, NULL); - status = ret & VM_FAULT_ERROR ? IOMMU_PAGE_RESP_INVALID : - IOMMU_PAGE_RESP_SUCCESS; - -out_put_mm: - mmap_read_unlock(mm); - mmput(mm); - - return status; -} - -static void iopf_handle_group(struct work_struct *work) +static void iopf_handler(struct work_struct *work) { struct iopf_group *group; + struct iommu_domain *domain; struct iopf_fault *iopf, *next; enum iommu_page_response_code status = IOMMU_PAGE_RESP_SUCCESS; group = container_of(work, struct iopf_group, work); + domain = iommu_get_domain_for_dev_pasid(group->dev, + group->last_fault.fault.prm.pasid, 0); + if (!domain || !domain->iopf_handler) + status = IOMMU_PAGE_RESP_INVALID; list_for_each_entry_safe(iopf, next, &group->faults, list) { /* @@ -139,7 +88,8 @@ static void iopf_handle_group(struct work_struct *work) * faults in the group if there is an error. */ if (status == IOMMU_PAGE_RESP_SUCCESS) - status = iopf_handle_single(iopf); + status = domain->iopf_handler(&iopf->fault, + domain->fault_data); if (!(iopf->fault.prm.flags & IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE)) @@ -181,6 +131,13 @@ static void iopf_handle_group(struct work_struct *work) * request completes, outstanding faults will have been dealt with by the time * the PASID is freed. * + * Any valid page fault will be eventually routed to an iommu domain and the + * page fault handler installed there will get called. The users of this + * handling framework should guarantee that the iommu domain could only be + * freed after the device has stopped generating page faults (or the iommu + * hardware has been set to block the page faults) and the pending page faults + * have been flushed. + * * Return: 0 on success and <0 on error. */ int iommu_queue_iopf(struct iommu_fault *fault, void *cookie) @@ -235,7 +192,7 @@ int iommu_queue_iopf(struct iommu_fault *fault, void *cookie) group->last_fault.fault = *fault; INIT_LIST_HEAD(&group->faults); list_add(&group->last_fault.list, &group->faults); - INIT_WORK(&group->work, iopf_handle_group); + INIT_WORK(&group->work, iopf_handler); /* See if we have partial faults for this group */ list_for_each_entry_safe(iopf, next, &iopf_param->partial, list) { diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c index ba3115fd0f86a82c2fc0fee1566f58d5cfd29c99..75f244a3e12df6fe9a4b7eda88671a82b2369480 100644 --- a/drivers/iommu/io-pgtable-arm-v7s.c +++ b/drivers/iommu/io-pgtable-arm-v7s.c @@ -564,8 +564,7 @@ static int arm_v7s_map_pages(struct io_pgtable_ops *ops, unsigned long iova, iova += pgsize; paddr += pgsize; - if (mapped) - *mapped += pgsize; + *mapped += pgsize; } /* * Synchronise all PTE updates for the new mapping before there's @@ -576,12 +575,6 @@ static int arm_v7s_map_pages(struct io_pgtable_ops *ops, unsigned long iova, return ret; } -static int arm_v7s_map(struct io_pgtable_ops *ops, unsigned long iova, - phys_addr_t paddr, size_t size, int prot, gfp_t gfp) -{ - return arm_v7s_map_pages(ops, iova, paddr, size, 1, prot, gfp, NULL); -} - static void arm_v7s_free_pgtable(struct io_pgtable *iop) { struct arm_v7s_io_pgtable *data = io_pgtable_to_data(iop); @@ -764,12 +757,6 @@ static size_t arm_v7s_unmap_pages(struct io_pgtable_ops *ops, unsigned long iova return unmapped; } -static size_t arm_v7s_unmap(struct io_pgtable_ops *ops, unsigned long iova, - size_t size, struct iommu_iotlb_gather *gather) -{ - return arm_v7s_unmap_pages(ops, iova, size, 1, gather); -} - static phys_addr_t arm_v7s_iova_to_phys(struct io_pgtable_ops *ops, unsigned long iova) { @@ -842,9 +829,7 @@ static struct io_pgtable *arm_v7s_alloc_pgtable(struct io_pgtable_cfg *cfg, goto out_free_data; data->iop.ops = (struct io_pgtable_ops) { - .map = arm_v7s_map, .map_pages = arm_v7s_map_pages, - .unmap = arm_v7s_unmap, .unmap_pages = arm_v7s_unmap_pages, .iova_to_phys = arm_v7s_iova_to_phys, }; @@ -954,6 +939,7 @@ static int __init arm_v7s_do_selftests(void) }; unsigned int iova, size, iova_start; unsigned int i, loopnr = 0; + size_t mapped; selftest_running = true; @@ -984,15 +970,16 @@ static int __init arm_v7s_do_selftests(void) iova = 0; for_each_set_bit(i, &cfg.pgsize_bitmap, BITS_PER_LONG) { size = 1UL << i; - if (ops->map(ops, iova, iova, size, IOMMU_READ | - IOMMU_WRITE | - IOMMU_NOEXEC | - IOMMU_CACHE, GFP_KERNEL)) + if (ops->map_pages(ops, iova, iova, size, 1, + IOMMU_READ | IOMMU_WRITE | + IOMMU_NOEXEC | IOMMU_CACHE, + GFP_KERNEL, &mapped)) return __FAIL(ops); /* Overlapping mappings */ - if (!ops->map(ops, iova, iova + size, size, - IOMMU_READ | IOMMU_NOEXEC, GFP_KERNEL)) + if (!ops->map_pages(ops, iova, iova + size, size, 1, + IOMMU_READ | IOMMU_NOEXEC, GFP_KERNEL, + &mapped)) return __FAIL(ops); if (ops->iova_to_phys(ops, iova + 42) != (iova + 42)) @@ -1007,11 +994,12 @@ static int __init arm_v7s_do_selftests(void) size = 1UL << __ffs(cfg.pgsize_bitmap); while (i < loopnr) { iova_start = i * SZ_16M; - if (ops->unmap(ops, iova_start + size, size, NULL) != size) + if (ops->unmap_pages(ops, iova_start + size, size, 1, NULL) != size) return __FAIL(ops); /* Remap of partial unmap */ - if (ops->map(ops, iova_start + size, size, size, IOMMU_READ, GFP_KERNEL)) + if (ops->map_pages(ops, iova_start + size, size, size, 1, + IOMMU_READ, GFP_KERNEL, &mapped)) return __FAIL(ops); if (ops->iova_to_phys(ops, iova_start + size + 42) @@ -1025,14 +1013,15 @@ static int __init arm_v7s_do_selftests(void) for_each_set_bit(i, &cfg.pgsize_bitmap, BITS_PER_LONG) { size = 1UL << i; - if (ops->unmap(ops, iova, size, NULL) != size) + if (ops->unmap_pages(ops, iova, size, 1, NULL) != size) return __FAIL(ops); if (ops->iova_to_phys(ops, iova + 42)) return __FAIL(ops); /* Remap full block */ - if (ops->map(ops, iova, iova, size, IOMMU_WRITE, GFP_KERNEL)) + if (ops->map_pages(ops, iova, iova, size, 1, IOMMU_WRITE, + GFP_KERNEL, &mapped)) return __FAIL(ops); if (ops->iova_to_phys(ops, iova + 42) != (iova + 42)) diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c index 0ba817e863465b8babf33d025c3309d85e329c87..72dcdd468cf30d6ec32eaf0795463549076309b0 100644 --- a/drivers/iommu/io-pgtable-arm.c +++ b/drivers/iommu/io-pgtable-arm.c @@ -360,7 +360,7 @@ static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova, max_entries = ARM_LPAE_PTES_PER_TABLE(data) - map_idx_start; num_entries = min_t(int, pgcount, max_entries); ret = arm_lpae_init_pte(data, iova, paddr, prot, lvl, num_entries, ptep); - if (!ret && mapped) + if (!ret) *mapped += num_entries * size; return ret; @@ -496,13 +496,6 @@ static int arm_lpae_map_pages(struct io_pgtable_ops *ops, unsigned long iova, return ret; } -static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova, - phys_addr_t paddr, size_t size, int iommu_prot, gfp_t gfp) -{ - return arm_lpae_map_pages(ops, iova, paddr, size, 1, iommu_prot, gfp, - NULL); -} - static void __arm_lpae_free_pgtable(struct arm_lpae_io_pgtable *data, int lvl, arm_lpae_iopte *ptep) { @@ -682,12 +675,6 @@ static size_t arm_lpae_unmap_pages(struct io_pgtable_ops *ops, unsigned long iov data->start_level, ptep); } -static size_t arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova, - size_t size, struct iommu_iotlb_gather *gather) -{ - return arm_lpae_unmap_pages(ops, iova, size, 1, gather); -} - static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops, unsigned long iova) { @@ -799,9 +786,7 @@ arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg) data->pgd_bits = va_bits - (data->bits_per_level * (levels - 1)); data->iop.ops = (struct io_pgtable_ops) { - .map = arm_lpae_map, .map_pages = arm_lpae_map_pages, - .unmap = arm_lpae_unmap, .unmap_pages = arm_lpae_unmap_pages, .iova_to_phys = arm_lpae_iova_to_phys, }; @@ -1176,7 +1161,7 @@ static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg) int i, j; unsigned long iova; - size_t size; + size_t size, mapped; struct io_pgtable_ops *ops; selftest_running = true; @@ -1209,15 +1194,16 @@ static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg) for_each_set_bit(j, &cfg->pgsize_bitmap, BITS_PER_LONG) { size = 1UL << j; - if (ops->map(ops, iova, iova, size, IOMMU_READ | - IOMMU_WRITE | - IOMMU_NOEXEC | - IOMMU_CACHE, GFP_KERNEL)) + if (ops->map_pages(ops, iova, iova, size, 1, + IOMMU_READ | IOMMU_WRITE | + IOMMU_NOEXEC | IOMMU_CACHE, + GFP_KERNEL, &mapped)) return __FAIL(ops, i); /* Overlapping mappings */ - if (!ops->map(ops, iova, iova + size, size, - IOMMU_READ | IOMMU_NOEXEC, GFP_KERNEL)) + if (!ops->map_pages(ops, iova, iova + size, size, 1, + IOMMU_READ | IOMMU_NOEXEC, + GFP_KERNEL, &mapped)) return __FAIL(ops, i); if (ops->iova_to_phys(ops, iova + 42) != (iova + 42)) @@ -1228,11 +1214,12 @@ static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg) /* Partial unmap */ size = 1UL << __ffs(cfg->pgsize_bitmap); - if (ops->unmap(ops, SZ_1G + size, size, NULL) != size) + if (ops->unmap_pages(ops, SZ_1G + size, size, 1, NULL) != size) return __FAIL(ops, i); /* Remap of partial unmap */ - if (ops->map(ops, SZ_1G + size, size, size, IOMMU_READ, GFP_KERNEL)) + if (ops->map_pages(ops, SZ_1G + size, size, size, 1, + IOMMU_READ, GFP_KERNEL, &mapped)) return __FAIL(ops, i); if (ops->iova_to_phys(ops, SZ_1G + size + 42) != (size + 42)) @@ -1243,14 +1230,15 @@ static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg) for_each_set_bit(j, &cfg->pgsize_bitmap, BITS_PER_LONG) { size = 1UL << j; - if (ops->unmap(ops, iova, size, NULL) != size) + if (ops->unmap_pages(ops, iova, size, 1, NULL) != size) return __FAIL(ops, i); if (ops->iova_to_phys(ops, iova + 42)) return __FAIL(ops, i); /* Remap full block */ - if (ops->map(ops, iova, iova, size, IOMMU_WRITE, GFP_KERNEL)) + if (ops->map_pages(ops, iova, iova, size, 1, + IOMMU_WRITE, GFP_KERNEL, &mapped)) return __FAIL(ops, i); if (ops->iova_to_phys(ops, iova + 42) != (iova + 42)) diff --git a/drivers/iommu/iommu-sva-lib.c b/drivers/iommu/iommu-sva-lib.c deleted file mode 100644 index 10650614389605b6442b471af36546fe8d29903f..0000000000000000000000000000000000000000 --- a/drivers/iommu/iommu-sva-lib.c +++ /dev/null @@ -1,71 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Helpers for IOMMU drivers implementing SVA - */ -#include -#include - -#include "iommu-sva-lib.h" - -static DEFINE_MUTEX(iommu_sva_lock); -static DECLARE_IOASID_SET(iommu_sva_pasid); - -/** - * iommu_sva_alloc_pasid - Allocate a PASID for the mm - * @mm: the mm - * @min: minimum PASID value (inclusive) - * @max: maximum PASID value (inclusive) - * - * Try to allocate a PASID for this mm, or take a reference to the existing one - * provided it fits within the [@min, @max] range. On success the PASID is - * available in mm->pasid and will be available for the lifetime of the mm. - * - * Returns 0 on success and < 0 on error. - */ -int iommu_sva_alloc_pasid(struct mm_struct *mm, ioasid_t min, ioasid_t max) -{ - int ret = 0; - ioasid_t pasid; - - if (min == INVALID_IOASID || max == INVALID_IOASID || - min == 0 || max < min) - return -EINVAL; - - mutex_lock(&iommu_sva_lock); - /* Is a PASID already associated with this mm? */ - if (pasid_valid(mm->pasid)) { - if (mm->pasid < min || mm->pasid >= max) - ret = -EOVERFLOW; - goto out; - } - - pasid = ioasid_alloc(&iommu_sva_pasid, min, max, mm); - if (!pasid_valid(pasid)) - ret = -ENOMEM; - else - mm_pasid_set(mm, pasid); -out: - mutex_unlock(&iommu_sva_lock); - return ret; -} -EXPORT_SYMBOL_GPL(iommu_sva_alloc_pasid); - -/* ioasid_find getter() requires a void * argument */ -static bool __mmget_not_zero(void *mm) -{ - return mmget_not_zero(mm); -} - -/** - * iommu_sva_find() - Find mm associated to the given PASID - * @pasid: Process Address Space ID assigned to the mm - * - * On success a reference to the mm is taken, and must be released with mmput(). - * - * Returns the mm corresponding to this PASID, or an error if not found. - */ -struct mm_struct *iommu_sva_find(ioasid_t pasid) -{ - return ioasid_find(&iommu_sva_pasid, pasid, __mmget_not_zero); -} -EXPORT_SYMBOL_GPL(iommu_sva_find); diff --git a/drivers/iommu/iommu-sva.c b/drivers/iommu/iommu-sva.c new file mode 100644 index 0000000000000000000000000000000000000000..24bf9b2b58aa6d2c2b84f390290b1d9253994d8e --- /dev/null +++ b/drivers/iommu/iommu-sva.c @@ -0,0 +1,240 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Helpers for IOMMU drivers implementing SVA + */ +#include +#include +#include + +#include "iommu-sva.h" + +static DEFINE_MUTEX(iommu_sva_lock); +static DECLARE_IOASID_SET(iommu_sva_pasid); + +/** + * iommu_sva_alloc_pasid - Allocate a PASID for the mm + * @mm: the mm + * @min: minimum PASID value (inclusive) + * @max: maximum PASID value (inclusive) + * + * Try to allocate a PASID for this mm, or take a reference to the existing one + * provided it fits within the [@min, @max] range. On success the PASID is + * available in mm->pasid and will be available for the lifetime of the mm. + * + * Returns 0 on success and < 0 on error. + */ +int iommu_sva_alloc_pasid(struct mm_struct *mm, ioasid_t min, ioasid_t max) +{ + int ret = 0; + ioasid_t pasid; + + if (min == INVALID_IOASID || max == INVALID_IOASID || + min == 0 || max < min) + return -EINVAL; + + mutex_lock(&iommu_sva_lock); + /* Is a PASID already associated with this mm? */ + if (pasid_valid(mm->pasid)) { + if (mm->pasid < min || mm->pasid >= max) + ret = -EOVERFLOW; + goto out; + } + + pasid = ioasid_alloc(&iommu_sva_pasid, min, max, mm); + if (!pasid_valid(pasid)) + ret = -ENOMEM; + else + mm_pasid_set(mm, pasid); +out: + mutex_unlock(&iommu_sva_lock); + return ret; +} +EXPORT_SYMBOL_GPL(iommu_sva_alloc_pasid); + +/* ioasid_find getter() requires a void * argument */ +static bool __mmget_not_zero(void *mm) +{ + return mmget_not_zero(mm); +} + +/** + * iommu_sva_find() - Find mm associated to the given PASID + * @pasid: Process Address Space ID assigned to the mm + * + * On success a reference to the mm is taken, and must be released with mmput(). + * + * Returns the mm corresponding to this PASID, or an error if not found. + */ +struct mm_struct *iommu_sva_find(ioasid_t pasid) +{ + return ioasid_find(&iommu_sva_pasid, pasid, __mmget_not_zero); +} +EXPORT_SYMBOL_GPL(iommu_sva_find); + +/** + * iommu_sva_bind_device() - Bind a process address space to a device + * @dev: the device + * @mm: the mm to bind, caller must hold a reference to mm_users + * + * Create a bond between device and address space, allowing the device to + * access the mm using the PASID returned by iommu_sva_get_pasid(). If a + * bond already exists between @device and @mm, an additional internal + * reference is taken. Caller must call iommu_sva_unbind_device() + * to release each reference. + * + * iommu_dev_enable_feature(dev, IOMMU_DEV_FEAT_SVA) must be called first, to + * initialize the required SVA features. + * + * On error, returns an ERR_PTR value. + */ +struct iommu_sva *iommu_sva_bind_device(struct device *dev, struct mm_struct *mm) +{ + struct iommu_domain *domain; + struct iommu_sva *handle; + ioasid_t max_pasids; + int ret; + + max_pasids = dev->iommu->max_pasids; + if (!max_pasids) + return ERR_PTR(-EOPNOTSUPP); + + /* Allocate mm->pasid if necessary. */ + ret = iommu_sva_alloc_pasid(mm, 1, max_pasids - 1); + if (ret) + return ERR_PTR(ret); + + handle = kzalloc(sizeof(*handle), GFP_KERNEL); + if (!handle) + return ERR_PTR(-ENOMEM); + + mutex_lock(&iommu_sva_lock); + /* Search for an existing domain. */ + domain = iommu_get_domain_for_dev_pasid(dev, mm->pasid, + IOMMU_DOMAIN_SVA); + if (IS_ERR(domain)) { + ret = PTR_ERR(domain); + goto out_unlock; + } + + if (domain) { + domain->users++; + goto out; + } + + /* Allocate a new domain and set it on device pasid. */ + domain = iommu_sva_domain_alloc(dev, mm); + if (!domain) { + ret = -ENOMEM; + goto out_unlock; + } + + ret = iommu_attach_device_pasid(domain, dev, mm->pasid); + if (ret) + goto out_free_domain; + domain->users = 1; +out: + mutex_unlock(&iommu_sva_lock); + handle->dev = dev; + handle->domain = domain; + + return handle; + +out_free_domain: + iommu_domain_free(domain); +out_unlock: + mutex_unlock(&iommu_sva_lock); + kfree(handle); + + return ERR_PTR(ret); +} +EXPORT_SYMBOL_GPL(iommu_sva_bind_device); + +/** + * iommu_sva_unbind_device() - Remove a bond created with iommu_sva_bind_device + * @handle: the handle returned by iommu_sva_bind_device() + * + * Put reference to a bond between device and address space. The device should + * not be issuing any more transaction for this PASID. All outstanding page + * requests for this PASID must have been flushed to the IOMMU. + */ +void iommu_sva_unbind_device(struct iommu_sva *handle) +{ + struct iommu_domain *domain = handle->domain; + ioasid_t pasid = domain->mm->pasid; + struct device *dev = handle->dev; + + mutex_lock(&iommu_sva_lock); + if (--domain->users == 0) { + iommu_detach_device_pasid(domain, dev, pasid); + iommu_domain_free(domain); + } + mutex_unlock(&iommu_sva_lock); + kfree(handle); +} +EXPORT_SYMBOL_GPL(iommu_sva_unbind_device); + +u32 iommu_sva_get_pasid(struct iommu_sva *handle) +{ + struct iommu_domain *domain = handle->domain; + + return domain->mm->pasid; +} +EXPORT_SYMBOL_GPL(iommu_sva_get_pasid); + +/* + * I/O page fault handler for SVA + */ +enum iommu_page_response_code +iommu_sva_handle_iopf(struct iommu_fault *fault, void *data) +{ + vm_fault_t ret; + struct vm_area_struct *vma; + struct mm_struct *mm = data; + unsigned int access_flags = 0; + unsigned int fault_flags = FAULT_FLAG_REMOTE; + struct iommu_fault_page_request *prm = &fault->prm; + enum iommu_page_response_code status = IOMMU_PAGE_RESP_INVALID; + + if (!(prm->flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID)) + return status; + + if (!mmget_not_zero(mm)) + return status; + + mmap_read_lock(mm); + + vma = find_extend_vma(mm, prm->addr); + if (!vma) + /* Unmapped area */ + goto out_put_mm; + + if (prm->perm & IOMMU_FAULT_PERM_READ) + access_flags |= VM_READ; + + if (prm->perm & IOMMU_FAULT_PERM_WRITE) { + access_flags |= VM_WRITE; + fault_flags |= FAULT_FLAG_WRITE; + } + + if (prm->perm & IOMMU_FAULT_PERM_EXEC) { + access_flags |= VM_EXEC; + fault_flags |= FAULT_FLAG_INSTRUCTION; + } + + if (!(prm->perm & IOMMU_FAULT_PERM_PRIV)) + fault_flags |= FAULT_FLAG_USER; + + if (access_flags & ~vma->vm_flags) + /* Access fault */ + goto out_put_mm; + + ret = handle_mm_fault(vma, prm->addr, fault_flags, NULL); + status = ret & VM_FAULT_ERROR ? IOMMU_PAGE_RESP_INVALID : + IOMMU_PAGE_RESP_SUCCESS; + +out_put_mm: + mmap_read_unlock(mm); + mmput(mm); + + return status; +} diff --git a/drivers/iommu/iommu-sva-lib.h b/drivers/iommu/iommu-sva.h similarity index 83% rename from drivers/iommu/iommu-sva-lib.h rename to drivers/iommu/iommu-sva.h index 8909ea1094e3a842a92e289a8573cae7859c47fa..7215a761b96282c4e22e2e0b74b3d635472d0a3e 100644 --- a/drivers/iommu/iommu-sva-lib.h +++ b/drivers/iommu/iommu-sva.h @@ -2,8 +2,8 @@ /* * SVA library for IOMMU drivers */ -#ifndef _IOMMU_SVA_LIB_H -#define _IOMMU_SVA_LIB_H +#ifndef _IOMMU_SVA_H +#define _IOMMU_SVA_H #include #include @@ -26,6 +26,8 @@ int iopf_queue_flush_dev(struct device *dev); struct iopf_queue *iopf_queue_alloc(const char *name); void iopf_queue_free(struct iopf_queue *queue); int iopf_queue_discard_partial(struct iopf_queue *queue); +enum iommu_page_response_code +iommu_sva_handle_iopf(struct iommu_fault *fault, void *data); #else /* CONFIG_IOMMU_SVA */ static inline int iommu_queue_iopf(struct iommu_fault *fault, void *cookie) @@ -63,5 +65,11 @@ static inline int iopf_queue_discard_partial(struct iopf_queue *queue) { return -ENODEV; } + +static inline enum iommu_page_response_code +iommu_sva_handle_iopf(struct iommu_fault *fault, void *data) +{ + return IOMMU_PAGE_RESP_INVALID; +} #endif /* CONFIG_IOMMU_SVA */ -#endif /* _IOMMU_SVA_LIB_H */ +#endif /* _IOMMU_SVA_H */ diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index 65a3b3d886dc0092ddf2a73735d7bd06de7d6f66..de91dd88705bd3dc108ac4d68ed52445d7faa9ff 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -21,6 +21,7 @@ #include #include #include +#include #include #include #include @@ -28,9 +29,12 @@ #include #include #include +#include #include "dma-iommu.h" +#include "iommu-sva.h" + static struct kset *iommu_group_kset; static DEFINE_IDA(iommu_group_ida); @@ -42,6 +46,7 @@ struct iommu_group { struct kobject kobj; struct kobject *devices_kobj; struct list_head devices; + struct xarray pasid_array; struct mutex mutex; void *iommu_data; void (*iommu_data_release)(void *iommu_data); @@ -278,18 +283,46 @@ static void dev_iommu_free(struct device *dev) kfree(param); } +static u32 dev_iommu_get_max_pasids(struct device *dev) +{ + u32 max_pasids = 0, bits = 0; + int ret; + + if (dev_is_pci(dev)) { + ret = pci_max_pasids(to_pci_dev(dev)); + if (ret > 0) + max_pasids = ret; + } else { + ret = device_property_read_u32(dev, "pasid-num-bits", &bits); + if (!ret) + max_pasids = 1UL << bits; + } + + return min_t(u32, max_pasids, dev->iommu->iommu_dev->max_pasids); +} + static int __iommu_probe_device(struct device *dev, struct list_head *group_list) { const struct iommu_ops *ops = dev->bus->iommu_ops; struct iommu_device *iommu_dev; struct iommu_group *group; + static DEFINE_MUTEX(iommu_probe_device_lock); int ret; if (!ops) return -ENODEV; - - if (!dev_iommu_get(dev)) - return -ENOMEM; + /* + * Serialise to avoid races between IOMMU drivers registering in + * parallel and/or the "replay" calls from ACPI/OF code via client + * driver probe. Once the latter have been cleaned up we should + * probably be able to use device_lock() here to minimise the scope, + * but for now enforcing a simple global ordering is fine. + */ + mutex_lock(&iommu_probe_device_lock); + if (!dev_iommu_get(dev)) { + ret = -ENOMEM; + goto err_unlock; + } if (!try_module_get(ops->owner)) { ret = -EINVAL; @@ -303,17 +336,21 @@ static int __iommu_probe_device(struct device *dev, struct list_head *group_list } dev->iommu->iommu_dev = iommu_dev; + dev->iommu->max_pasids = dev_iommu_get_max_pasids(dev); group = iommu_group_get_for_dev(dev); if (IS_ERR(group)) { ret = PTR_ERR(group); goto out_release; } - iommu_group_put(group); + mutex_lock(&group->mutex); if (group_list && !group->default_domain && list_empty(&group->entry)) list_add_tail(&group->entry, group_list); + mutex_unlock(&group->mutex); + iommu_group_put(group); + mutex_unlock(&iommu_probe_device_lock); iommu_device_link(iommu_dev, dev); return 0; @@ -328,6 +365,9 @@ out_module_put: err_free: dev_iommu_free(dev); +err_unlock: + mutex_unlock(&iommu_probe_device_lock); + return ret; } @@ -703,6 +743,7 @@ struct iommu_group *iommu_group_alloc(void) mutex_init(&group->mutex); INIT_LIST_HEAD(&group->devices); INIT_LIST_HEAD(&group->entry); + xa_init(&group->pasid_array); ret = ida_alloc(&iommu_group_ida, GFP_KERNEL); if (ret < 0) { @@ -1799,11 +1840,11 @@ int bus_iommu_probe(struct bus_type *bus) return ret; list_for_each_entry_safe(group, next, &group_list, entry) { + mutex_lock(&group->mutex); + /* Remove item from the list */ list_del_init(&group->entry); - mutex_lock(&group->mutex); - /* Try to allocate default domain */ probe_alloc_default_domain(bus, group); @@ -1912,6 +1953,8 @@ EXPORT_SYMBOL_GPL(iommu_domain_alloc); void iommu_domain_free(struct iommu_domain *domain) { + if (domain->type == IOMMU_DOMAIN_SVA) + mmdrop(domain->mm); iommu_put_dma_cookie(domain); domain->ops->free(domain); } @@ -1949,6 +1992,18 @@ static int __iommu_attach_device(struct iommu_domain *domain, return ret; } +/** + * iommu_attach_device - Attach an IOMMU domain to a device + * @domain: IOMMU domain to attach + * @dev: Device that will be attached + * + * Returns 0 on success and error code on failure + * + * Note that EINVAL can be treated as a soft failure, indicating + * that certain configuration of the domain is incompatible with + * the device. In this case attaching a different domain to the + * device may succeed. + */ int iommu_attach_device(struct iommu_domain *domain, struct device *dev) { struct iommu_group *group; @@ -2075,6 +2130,18 @@ static int __iommu_attach_group(struct iommu_domain *domain, return ret; } +/** + * iommu_attach_group - Attach an IOMMU domain to an IOMMU group + * @domain: IOMMU domain to attach + * @group: IOMMU group that will be attached + * + * Returns 0 on success and error code on failure + * + * Note that EINVAL can be treated as a soft failure, indicating + * that certain configuration of the domain is incompatible with + * the group. In this case attaching a different domain to the + * group may succeed. + */ int iommu_attach_group(struct iommu_domain *domain, struct iommu_group *group) { int ret; @@ -2726,98 +2793,6 @@ int iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features feat) } EXPORT_SYMBOL_GPL(iommu_dev_disable_feature); -/** - * iommu_sva_bind_device() - Bind a process address space to a device - * @dev: the device - * @mm: the mm to bind, caller must hold a reference to it - * @drvdata: opaque data pointer to pass to bind callback - * - * Create a bond between device and address space, allowing the device to access - * the mm using the returned PASID. If a bond already exists between @device and - * @mm, it is returned and an additional reference is taken. Caller must call - * iommu_sva_unbind_device() to release each reference. - * - * iommu_dev_enable_feature(dev, IOMMU_DEV_FEAT_SVA) must be called first, to - * initialize the required SVA features. - * - * On error, returns an ERR_PTR value. - */ -struct iommu_sva * -iommu_sva_bind_device(struct device *dev, struct mm_struct *mm, void *drvdata) -{ - struct iommu_group *group; - struct iommu_sva *handle = ERR_PTR(-EINVAL); - const struct iommu_ops *ops = dev_iommu_ops(dev); - - if (!ops->sva_bind) - return ERR_PTR(-ENODEV); - - group = iommu_group_get(dev); - if (!group) - return ERR_PTR(-ENODEV); - - /* Ensure device count and domain don't change while we're binding */ - mutex_lock(&group->mutex); - - /* - * To keep things simple, SVA currently doesn't support IOMMU groups - * with more than one device. Existing SVA-capable systems are not - * affected by the problems that required IOMMU groups (lack of ACS - * isolation, device ID aliasing and other hardware issues). - */ - if (iommu_group_device_count(group) != 1) - goto out_unlock; - - handle = ops->sva_bind(dev, mm, drvdata); - -out_unlock: - mutex_unlock(&group->mutex); - iommu_group_put(group); - - return handle; -} -EXPORT_SYMBOL_GPL(iommu_sva_bind_device); - -/** - * iommu_sva_unbind_device() - Remove a bond created with iommu_sva_bind_device - * @handle: the handle returned by iommu_sva_bind_device() - * - * Put reference to a bond between device and address space. The device should - * not be issuing any more transaction for this PASID. All outstanding page - * requests for this PASID must have been flushed to the IOMMU. - */ -void iommu_sva_unbind_device(struct iommu_sva *handle) -{ - struct iommu_group *group; - struct device *dev = handle->dev; - const struct iommu_ops *ops = dev_iommu_ops(dev); - - if (!ops->sva_unbind) - return; - - group = iommu_group_get(dev); - if (!group) - return; - - mutex_lock(&group->mutex); - ops->sva_unbind(handle); - mutex_unlock(&group->mutex); - - iommu_group_put(group); -} -EXPORT_SYMBOL_GPL(iommu_sva_unbind_device); - -u32 iommu_sva_get_pasid(struct iommu_sva *handle) -{ - const struct iommu_ops *ops = dev_iommu_ops(handle->dev); - - if (!ops->sva_get_pasid) - return IOMMU_PASID_INVALID; - - return ops->sva_get_pasid(handle); -} -EXPORT_SYMBOL_GPL(iommu_sva_get_pasid); - /* * Changes the default domain of an iommu group that has *only* one device * @@ -3087,7 +3062,8 @@ int iommu_device_use_default_domain(struct device *dev) mutex_lock(&group->mutex); if (group->owner_cnt) { - if (group->owner || !iommu_is_default_domain(group)) { + if (group->owner || !iommu_is_default_domain(group) || + !xa_empty(&group->pasid_array)) { ret = -EBUSY; goto unlock_out; } @@ -3118,7 +3094,7 @@ void iommu_device_unuse_default_domain(struct device *dev) return; mutex_lock(&group->mutex); - if (!WARN_ON(!group->owner_cnt)) + if (!WARN_ON(!group->owner_cnt || !xa_empty(&group->pasid_array))) group->owner_cnt--; mutex_unlock(&group->mutex); @@ -3148,40 +3124,49 @@ static int __iommu_group_alloc_blocking_domain(struct iommu_group *group) return 0; } +static int __iommu_take_dma_ownership(struct iommu_group *group, void *owner) +{ + int ret; + + if ((group->domain && group->domain != group->default_domain) || + !xa_empty(&group->pasid_array)) + return -EBUSY; + + ret = __iommu_group_alloc_blocking_domain(group); + if (ret) + return ret; + ret = __iommu_group_set_domain(group, group->blocking_domain); + if (ret) + return ret; + + group->owner = owner; + group->owner_cnt++; + return 0; +} + /** * iommu_group_claim_dma_owner() - Set DMA ownership of a group * @group: The group. * @owner: Caller specified pointer. Used for exclusive ownership. * - * This is to support backward compatibility for vfio which manages - * the dma ownership in iommu_group level. New invocations on this - * interface should be prohibited. + * This is to support backward compatibility for vfio which manages the dma + * ownership in iommu_group level. New invocations on this interface should be + * prohibited. Only a single owner may exist for a group. */ int iommu_group_claim_dma_owner(struct iommu_group *group, void *owner) { int ret = 0; + if (WARN_ON(!owner)) + return -EINVAL; + mutex_lock(&group->mutex); if (group->owner_cnt) { ret = -EPERM; goto unlock_out; - } else { - if (group->domain && group->domain != group->default_domain) { - ret = -EBUSY; - goto unlock_out; - } - - ret = __iommu_group_alloc_blocking_domain(group); - if (ret) - goto unlock_out; - - ret = __iommu_group_set_domain(group, group->blocking_domain); - if (ret) - goto unlock_out; - group->owner = owner; } - group->owner_cnt++; + ret = __iommu_take_dma_ownership(group, owner); unlock_out: mutex_unlock(&group->mutex); @@ -3190,29 +3175,91 @@ unlock_out: EXPORT_SYMBOL_GPL(iommu_group_claim_dma_owner); /** - * iommu_group_release_dma_owner() - Release DMA ownership of a group - * @group: The group. + * iommu_device_claim_dma_owner() - Set DMA ownership of a device + * @dev: The device. + * @owner: Caller specified pointer. Used for exclusive ownership. * - * Release the DMA ownership claimed by iommu_group_claim_dma_owner(). + * Claim the DMA ownership of a device. Multiple devices in the same group may + * concurrently claim ownership if they present the same owner value. Returns 0 + * on success and error code on failure */ -void iommu_group_release_dma_owner(struct iommu_group *group) +int iommu_device_claim_dma_owner(struct device *dev, void *owner) { - int ret; + struct iommu_group *group = iommu_group_get(dev); + int ret = 0; + + if (!group) + return -ENODEV; + if (WARN_ON(!owner)) + return -EINVAL; mutex_lock(&group->mutex); - if (WARN_ON(!group->owner_cnt || !group->owner)) + if (group->owner_cnt) { + if (group->owner != owner) { + ret = -EPERM; + goto unlock_out; + } + group->owner_cnt++; goto unlock_out; + } + + ret = __iommu_take_dma_ownership(group, owner); +unlock_out: + mutex_unlock(&group->mutex); + iommu_group_put(group); + + return ret; +} +EXPORT_SYMBOL_GPL(iommu_device_claim_dma_owner); + +static void __iommu_release_dma_ownership(struct iommu_group *group) +{ + int ret; + + if (WARN_ON(!group->owner_cnt || !group->owner || + !xa_empty(&group->pasid_array))) + return; group->owner_cnt = 0; group->owner = NULL; ret = __iommu_group_set_domain(group, group->default_domain); WARN(ret, "iommu driver failed to attach the default domain"); +} -unlock_out: +/** + * iommu_group_release_dma_owner() - Release DMA ownership of a group + * @dev: The device + * + * Release the DMA ownership claimed by iommu_group_claim_dma_owner(). + */ +void iommu_group_release_dma_owner(struct iommu_group *group) +{ + mutex_lock(&group->mutex); + __iommu_release_dma_ownership(group); mutex_unlock(&group->mutex); } EXPORT_SYMBOL_GPL(iommu_group_release_dma_owner); +/** + * iommu_device_release_dma_owner() - Release DMA ownership of a device + * @group: The device. + * + * Release the DMA ownership claimed by iommu_device_claim_dma_owner(). + */ +void iommu_device_release_dma_owner(struct device *dev) +{ + struct iommu_group *group = iommu_group_get(dev); + + mutex_lock(&group->mutex); + if (group->owner_cnt > 1) + group->owner_cnt--; + else + __iommu_release_dma_ownership(group); + mutex_unlock(&group->mutex); + iommu_group_put(group); +} +EXPORT_SYMBOL_GPL(iommu_device_release_dma_owner); + /** * iommu_group_dma_owner_claimed() - Query group dma ownership status * @group: The group. @@ -3231,3 +3278,150 @@ bool iommu_group_dma_owner_claimed(struct iommu_group *group) return user; } EXPORT_SYMBOL_GPL(iommu_group_dma_owner_claimed); + +static int __iommu_set_group_pasid(struct iommu_domain *domain, + struct iommu_group *group, ioasid_t pasid) +{ + struct group_device *device; + int ret = 0; + + list_for_each_entry(device, &group->devices, list) { + ret = domain->ops->set_dev_pasid(domain, device->dev, pasid); + if (ret) + break; + } + + return ret; +} + +static void __iommu_remove_group_pasid(struct iommu_group *group, + ioasid_t pasid) +{ + struct group_device *device; + const struct iommu_ops *ops; + + list_for_each_entry(device, &group->devices, list) { + ops = dev_iommu_ops(device->dev); + ops->remove_dev_pasid(device->dev, pasid); + } +} + +/* + * iommu_attach_device_pasid() - Attach a domain to pasid of device + * @domain: the iommu domain. + * @dev: the attached device. + * @pasid: the pasid of the device. + * + * Return: 0 on success, or an error. + */ +int iommu_attach_device_pasid(struct iommu_domain *domain, + struct device *dev, ioasid_t pasid) +{ + struct iommu_group *group; + void *curr; + int ret; + + if (!domain->ops->set_dev_pasid) + return -EOPNOTSUPP; + + group = iommu_group_get(dev); + if (!group) + return -ENODEV; + + mutex_lock(&group->mutex); + curr = xa_cmpxchg(&group->pasid_array, pasid, NULL, domain, GFP_KERNEL); + if (curr) { + ret = xa_err(curr) ? : -EBUSY; + goto out_unlock; + } + + ret = __iommu_set_group_pasid(domain, group, pasid); + if (ret) { + __iommu_remove_group_pasid(group, pasid); + xa_erase(&group->pasid_array, pasid); + } +out_unlock: + mutex_unlock(&group->mutex); + iommu_group_put(group); + + return ret; +} +EXPORT_SYMBOL_GPL(iommu_attach_device_pasid); + +/* + * iommu_detach_device_pasid() - Detach the domain from pasid of device + * @domain: the iommu domain. + * @dev: the attached device. + * @pasid: the pasid of the device. + * + * The @domain must have been attached to @pasid of the @dev with + * iommu_attach_device_pasid(). + */ +void iommu_detach_device_pasid(struct iommu_domain *domain, struct device *dev, + ioasid_t pasid) +{ + struct iommu_group *group = iommu_group_get(dev); + + mutex_lock(&group->mutex); + __iommu_remove_group_pasid(group, pasid); + WARN_ON(xa_erase(&group->pasid_array, pasid) != domain); + mutex_unlock(&group->mutex); + + iommu_group_put(group); +} +EXPORT_SYMBOL_GPL(iommu_detach_device_pasid); + +/* + * iommu_get_domain_for_dev_pasid() - Retrieve domain for @pasid of @dev + * @dev: the queried device + * @pasid: the pasid of the device + * @type: matched domain type, 0 for any match + * + * This is a variant of iommu_get_domain_for_dev(). It returns the existing + * domain attached to pasid of a device. Callers must hold a lock around this + * function, and both iommu_attach/detach_dev_pasid() whenever a domain of + * type is being manipulated. This API does not internally resolve races with + * attach/detach. + * + * Return: attached domain on success, NULL otherwise. + */ +struct iommu_domain *iommu_get_domain_for_dev_pasid(struct device *dev, + ioasid_t pasid, + unsigned int type) +{ + struct iommu_domain *domain; + struct iommu_group *group; + + group = iommu_group_get(dev); + if (!group) + return NULL; + + xa_lock(&group->pasid_array); + domain = xa_load(&group->pasid_array, pasid); + if (type && domain && domain->type != type) + domain = ERR_PTR(-EBUSY); + xa_unlock(&group->pasid_array); + iommu_group_put(group); + + return domain; +} +EXPORT_SYMBOL_GPL(iommu_get_domain_for_dev_pasid); + +struct iommu_domain *iommu_sva_domain_alloc(struct device *dev, + struct mm_struct *mm) +{ + const struct iommu_ops *ops = dev_iommu_ops(dev); + struct iommu_domain *domain; + + domain = ops->domain_alloc(IOMMU_DOMAIN_SVA); + if (!domain) + return NULL; + + domain->type = IOMMU_DOMAIN_SVA; + mmgrab(mm); + domain->mm = mm; + domain->iopf_handler = iommu_sva_handle_iopf; + domain->fault_data = mm; + + return domain; +} diff --git a/drivers/iommu/iommufd/Kconfig b/drivers/iommu/iommufd/Kconfig new file mode 100644 index 0000000000000000000000000000000000000000..8306616b6d819802f1c8f8c1807d35506316bca6 --- /dev/null +++ b/drivers/iommu/iommufd/Kconfig @@ -0,0 +1,44 @@ +# SPDX-License-Identifier: GPL-2.0-only +config IOMMUFD + tristate "IOMMU Userspace API" + select INTERVAL_TREE + select INTERVAL_TREE_SPAN_ITER + select IOMMU_API + default n + help + Provides /dev/iommu, the user API to control the IOMMU subsystem as + it relates to managing IO page tables that point at user space memory. + + If you don't know what to do here, say N. + +if IOMMUFD +config IOMMUFD_VFIO_CONTAINER + bool "IOMMUFD provides the VFIO container /dev/vfio/vfio" + depends on VFIO && !VFIO_CONTAINER + default VFIO && !VFIO_CONTAINER + help + IOMMUFD will provide /dev/vfio/vfio instead of VFIO. This relies on + IOMMUFD providing compatibility emulation to give the same ioctls. + It provides an option to build a kernel with legacy VFIO components + removed. + + IOMMUFD VFIO container emulation is known to lack certain features + of the native VFIO container, such as no-IOMMU support, peer-to-peer + DMA mapping, PPC IOMMU support, as well as other potentially + undiscovered gaps. This option is currently intended for the + purpose of testing IOMMUFD with unmodified userspace supporting VFIO + and making use of the Type1 VFIO IOMMU backend. General purpose + enabling of this option is currently discouraged. + + Unless testing IOMMUFD, say N here. + +config IOMMUFD_TEST + bool "IOMMU Userspace API Test support" + depends on DEBUG_KERNEL + depends on FAULT_INJECTION + depends on RUNTIME_TESTING_MENU + default n + help + This is dangerous, do not enable unless running + tools/testing/selftests/iommu +endif diff --git a/drivers/iommu/iommufd/Makefile b/drivers/iommu/iommufd/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..8aeba81800c512dc9e9eb1c32b3240080b503db1 --- /dev/null +++ b/drivers/iommu/iommufd/Makefile @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-2.0-only +iommufd-y := \ + device.o \ + hw_pagetable.o \ + io_pagetable.o \ + ioas.o \ + main.o \ + pages.o \ + vfio_compat.o + +iommufd-$(CONFIG_IOMMUFD_TEST) += selftest.o + +obj-$(CONFIG_IOMMUFD) += iommufd.o diff --git a/drivers/iommu/iommufd/device.c b/drivers/iommu/iommufd/device.c new file mode 100644 index 0000000000000000000000000000000000000000..d81f93a321afcb7a79e5e69cb755ff008ab96f6b --- /dev/null +++ b/drivers/iommu/iommufd/device.c @@ -0,0 +1,772 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES + */ +#include +#include +#include +#include + +#include "io_pagetable.h" +#include "iommufd_private.h" + +static bool allow_unsafe_interrupts; +module_param(allow_unsafe_interrupts, bool, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC( + allow_unsafe_interrupts, + "Allow IOMMUFD to bind to devices even if the platform cannot isolate " + "the MSI interrupt window. Enabling this is a security weakness."); + +/* + * A iommufd_device object represents the binding relationship between a + * consuming driver and the iommufd. These objects are created/destroyed by + * external drivers, not by userspace. + */ +struct iommufd_device { + struct iommufd_object obj; + struct iommufd_ctx *ictx; + struct iommufd_hw_pagetable *hwpt; + /* Head at iommufd_hw_pagetable::devices */ + struct list_head devices_item; + /* always the physical device */ + struct device *dev; + struct iommu_group *group; + bool enforce_cache_coherency; +}; + +void iommufd_device_destroy(struct iommufd_object *obj) +{ + struct iommufd_device *idev = + container_of(obj, struct iommufd_device, obj); + + iommu_device_release_dma_owner(idev->dev); + iommu_group_put(idev->group); + iommufd_ctx_put(idev->ictx); +} + +/** + * iommufd_device_bind - Bind a physical device to an iommu fd + * @ictx: iommufd file descriptor + * @dev: Pointer to a physical device struct + * @id: Output ID number to return to userspace for this device + * + * A successful bind establishes an ownership over the device and returns + * struct iommufd_device pointer, otherwise returns error pointer. + * + * A driver using this API must set driver_managed_dma and must not touch + * the device until this routine succeeds and establishes ownership. + * + * Binding a PCI device places the entire RID under iommufd control. + * + * The caller must undo this with iommufd_device_unbind() + */ +struct iommufd_device *iommufd_device_bind(struct iommufd_ctx *ictx, + struct device *dev, u32 *id) +{ + struct iommufd_device *idev; + struct iommu_group *group; + int rc; + + /* + * iommufd always sets IOMMU_CACHE because we offer no way for userspace + * to restore cache coherency. + */ + if (!device_iommu_capable(dev, IOMMU_CAP_CACHE_COHERENCY)) + return ERR_PTR(-EINVAL); + + group = iommu_group_get(dev); + if (!group) + return ERR_PTR(-ENODEV); + + rc = iommu_device_claim_dma_owner(dev, ictx); + if (rc) + goto out_group_put; + + idev = iommufd_object_alloc(ictx, idev, IOMMUFD_OBJ_DEVICE); + if (IS_ERR(idev)) { + rc = PTR_ERR(idev); + goto out_release_owner; + } + idev->ictx = ictx; + iommufd_ctx_get(ictx); + idev->dev = dev; + idev->enforce_cache_coherency = + device_iommu_capable(dev, IOMMU_CAP_ENFORCE_CACHE_COHERENCY); + /* The calling driver is a user until iommufd_device_unbind() */ + refcount_inc(&idev->obj.users); + /* group refcount moves into iommufd_device */ + idev->group = group; + + /* + * If the caller fails after this success it must call + * iommufd_unbind_device() which is safe since we hold this refcount. + * This also means the device is a leaf in the graph and no other object + * can take a reference on it. + */ + iommufd_object_finalize(ictx, &idev->obj); + *id = idev->obj.id; + return idev; + +out_release_owner: + iommu_device_release_dma_owner(dev); +out_group_put: + iommu_group_put(group); + return ERR_PTR(rc); +} +EXPORT_SYMBOL_NS_GPL(iommufd_device_bind, IOMMUFD); + +/** + * iommufd_device_unbind - Undo iommufd_device_bind() + * @idev: Device returned by iommufd_device_bind() + * + * Release the device from iommufd control. The DMA ownership will return back + * to unowned with DMA controlled by the DMA API. This invalidates the + * iommufd_device pointer, other APIs that consume it must not be called + * concurrently. + */ +void iommufd_device_unbind(struct iommufd_device *idev) +{ + bool was_destroyed; + + was_destroyed = iommufd_object_destroy_user(idev->ictx, &idev->obj); + WARN_ON(!was_destroyed); +} +EXPORT_SYMBOL_NS_GPL(iommufd_device_unbind, IOMMUFD); + +static int iommufd_device_setup_msi(struct iommufd_device *idev, + struct iommufd_hw_pagetable *hwpt, + phys_addr_t sw_msi_start) +{ + int rc; + + /* + * If the IOMMU driver gives a IOMMU_RESV_SW_MSI then it is asking us to + * call iommu_get_msi_cookie() on its behalf. This is necessary to setup + * the MSI window so iommu_dma_prepare_msi() can install pages into our + * domain after request_irq(). If it is not done interrupts will not + * work on this domain. + * + * FIXME: This is conceptually broken for iommufd since we want to allow + * userspace to change the domains, eg switch from an identity IOAS to a + * DMA IOAS. There is currently no way to create a MSI window that + * matches what the IRQ layer actually expects in a newly created + * domain. + */ + if (sw_msi_start != PHYS_ADDR_MAX && !hwpt->msi_cookie) { + rc = iommu_get_msi_cookie(hwpt->domain, sw_msi_start); + if (rc) + return rc; + + /* + * iommu_get_msi_cookie() can only be called once per domain, + * it returns -EBUSY on later calls. + */ + hwpt->msi_cookie = true; + } + + /* + * For historical compat with VFIO the insecure interrupt path is + * allowed if the module parameter is set. Insecure means that a MemWr + * operation from the device (eg a simple DMA) cannot trigger an + * interrupt outside this iommufd context. + */ + if (!device_iommu_capable(idev->dev, IOMMU_CAP_INTR_REMAP) && + !irq_domain_check_msi_remap()) { + if (!allow_unsafe_interrupts) + return -EPERM; + + dev_warn( + idev->dev, + "MSI interrupts are not secure, they cannot be isolated by the platform. " + "Check that platform features like interrupt remapping are enabled. " + "Use the \"allow_unsafe_interrupts\" module parameter to override\n"); + } + return 0; +} + +static bool iommufd_hw_pagetable_has_group(struct iommufd_hw_pagetable *hwpt, + struct iommu_group *group) +{ + struct iommufd_device *cur_dev; + + list_for_each_entry(cur_dev, &hwpt->devices, devices_item) + if (cur_dev->group == group) + return true; + return false; +} + +static int iommufd_device_do_attach(struct iommufd_device *idev, + struct iommufd_hw_pagetable *hwpt) +{ + phys_addr_t sw_msi_start = PHYS_ADDR_MAX; + int rc; + + mutex_lock(&hwpt->devices_lock); + + /* + * Try to upgrade the domain we have, it is an iommu driver bug to + * report IOMMU_CAP_ENFORCE_CACHE_COHERENCY but fail + * enforce_cache_coherency when there are no devices attached to the + * domain. + */ + if (idev->enforce_cache_coherency && !hwpt->enforce_cache_coherency) { + if (hwpt->domain->ops->enforce_cache_coherency) + hwpt->enforce_cache_coherency = + hwpt->domain->ops->enforce_cache_coherency( + hwpt->domain); + if (!hwpt->enforce_cache_coherency) { + WARN_ON(list_empty(&hwpt->devices)); + rc = -EINVAL; + goto out_unlock; + } + } + + rc = iopt_table_enforce_group_resv_regions(&hwpt->ioas->iopt, idev->dev, + idev->group, &sw_msi_start); + if (rc) + goto out_unlock; + + rc = iommufd_device_setup_msi(idev, hwpt, sw_msi_start); + if (rc) + goto out_iova; + + /* + * FIXME: Hack around missing a device-centric iommu api, only attach to + * the group once for the first device that is in the group. + */ + if (!iommufd_hw_pagetable_has_group(hwpt, idev->group)) { + rc = iommu_attach_group(hwpt->domain, idev->group); + if (rc) + goto out_iova; + + if (list_empty(&hwpt->devices)) { + rc = iopt_table_add_domain(&hwpt->ioas->iopt, + hwpt->domain); + if (rc) + goto out_detach; + } + } + + idev->hwpt = hwpt; + refcount_inc(&hwpt->obj.users); + list_add(&idev->devices_item, &hwpt->devices); + mutex_unlock(&hwpt->devices_lock); + return 0; + +out_detach: + iommu_detach_group(hwpt->domain, idev->group); +out_iova: + iopt_remove_reserved_iova(&hwpt->ioas->iopt, idev->dev); +out_unlock: + mutex_unlock(&hwpt->devices_lock); + return rc; +} + +/* + * When automatically managing the domains we search for a compatible domain in + * the iopt and if one is found use it, otherwise create a new domain. + * Automatic domain selection will never pick a manually created domain. + */ +static int iommufd_device_auto_get_domain(struct iommufd_device *idev, + struct iommufd_ioas *ioas) +{ + struct iommufd_hw_pagetable *hwpt; + int rc; + + /* + * There is no differentiation when domains are allocated, so any domain + * that is willing to attach to the device is interchangeable with any + * other. + */ + mutex_lock(&ioas->mutex); + list_for_each_entry(hwpt, &ioas->hwpt_list, hwpt_item) { + if (!hwpt->auto_domain) + continue; + + rc = iommufd_device_do_attach(idev, hwpt); + + /* + * -EINVAL means the domain is incompatible with the device. + * Other error codes should propagate to userspace as failure. + * Success means the domain is attached. + */ + if (rc == -EINVAL) + continue; + goto out_unlock; + } + + hwpt = iommufd_hw_pagetable_alloc(idev->ictx, ioas, idev->dev); + if (IS_ERR(hwpt)) { + rc = PTR_ERR(hwpt); + goto out_unlock; + } + hwpt->auto_domain = true; + + rc = iommufd_device_do_attach(idev, hwpt); + if (rc) + goto out_abort; + list_add_tail(&hwpt->hwpt_item, &ioas->hwpt_list); + + mutex_unlock(&ioas->mutex); + iommufd_object_finalize(idev->ictx, &hwpt->obj); + return 0; + +out_abort: + iommufd_object_abort_and_destroy(idev->ictx, &hwpt->obj); +out_unlock: + mutex_unlock(&ioas->mutex); + return rc; +} + +/** + * iommufd_device_attach - Connect a device from an iommu_domain + * @idev: device to attach + * @pt_id: Input a IOMMUFD_OBJ_IOAS, or IOMMUFD_OBJ_HW_PAGETABLE + * Output the IOMMUFD_OBJ_HW_PAGETABLE ID + * + * This connects the device to an iommu_domain, either automatically or manually + * selected. Once this completes the device could do DMA. + * + * The caller should return the resulting pt_id back to userspace. + * This function is undone by calling iommufd_device_detach(). + */ +int iommufd_device_attach(struct iommufd_device *idev, u32 *pt_id) +{ + struct iommufd_object *pt_obj; + int rc; + + pt_obj = iommufd_get_object(idev->ictx, *pt_id, IOMMUFD_OBJ_ANY); + if (IS_ERR(pt_obj)) + return PTR_ERR(pt_obj); + + switch (pt_obj->type) { + case IOMMUFD_OBJ_HW_PAGETABLE: { + struct iommufd_hw_pagetable *hwpt = + container_of(pt_obj, struct iommufd_hw_pagetable, obj); + + rc = iommufd_device_do_attach(idev, hwpt); + if (rc) + goto out_put_pt_obj; + + mutex_lock(&hwpt->ioas->mutex); + list_add_tail(&hwpt->hwpt_item, &hwpt->ioas->hwpt_list); + mutex_unlock(&hwpt->ioas->mutex); + break; + } + case IOMMUFD_OBJ_IOAS: { + struct iommufd_ioas *ioas = + container_of(pt_obj, struct iommufd_ioas, obj); + + rc = iommufd_device_auto_get_domain(idev, ioas); + if (rc) + goto out_put_pt_obj; + break; + } + default: + rc = -EINVAL; + goto out_put_pt_obj; + } + + refcount_inc(&idev->obj.users); + *pt_id = idev->hwpt->obj.id; + rc = 0; + +out_put_pt_obj: + iommufd_put_object(pt_obj); + return rc; +} +EXPORT_SYMBOL_NS_GPL(iommufd_device_attach, IOMMUFD); + +/** + * iommufd_device_detach - Disconnect a device to an iommu_domain + * @idev: device to detach + * + * Undo iommufd_device_attach(). This disconnects the idev from the previously + * attached pt_id. The device returns back to a blocked DMA translation. + */ +void iommufd_device_detach(struct iommufd_device *idev) +{ + struct iommufd_hw_pagetable *hwpt = idev->hwpt; + + mutex_lock(&hwpt->ioas->mutex); + mutex_lock(&hwpt->devices_lock); + list_del(&idev->devices_item); + if (!iommufd_hw_pagetable_has_group(hwpt, idev->group)) { + if (list_empty(&hwpt->devices)) { + iopt_table_remove_domain(&hwpt->ioas->iopt, + hwpt->domain); + list_del(&hwpt->hwpt_item); + } + iommu_detach_group(hwpt->domain, idev->group); + } + iopt_remove_reserved_iova(&hwpt->ioas->iopt, idev->dev); + mutex_unlock(&hwpt->devices_lock); + mutex_unlock(&hwpt->ioas->mutex); + + if (hwpt->auto_domain) + iommufd_object_destroy_user(idev->ictx, &hwpt->obj); + else + refcount_dec(&hwpt->obj.users); + + idev->hwpt = NULL; + + refcount_dec(&idev->obj.users); +} +EXPORT_SYMBOL_NS_GPL(iommufd_device_detach, IOMMUFD); + +void iommufd_access_destroy_object(struct iommufd_object *obj) +{ + struct iommufd_access *access = + container_of(obj, struct iommufd_access, obj); + + iopt_remove_access(&access->ioas->iopt, access); + iommufd_ctx_put(access->ictx); + refcount_dec(&access->ioas->obj.users); +} + +/** + * iommufd_access_create - Create an iommufd_access + * @ictx: iommufd file descriptor + * @ioas_id: ID for a IOMMUFD_OBJ_IOAS + * @ops: Driver's ops to associate with the access + * @data: Opaque data to pass into ops functions + * + * An iommufd_access allows a driver to read/write to the IOAS without using + * DMA. The underlying CPU memory can be accessed using the + * iommufd_access_pin_pages() or iommufd_access_rw() functions. + * + * The provided ops are required to use iommufd_access_pin_pages(). + */ +struct iommufd_access * +iommufd_access_create(struct iommufd_ctx *ictx, u32 ioas_id, + const struct iommufd_access_ops *ops, void *data) +{ + struct iommufd_access *access; + struct iommufd_object *obj; + int rc; + + /* + * There is no uAPI for the access object, but to keep things symmetric + * use the object infrastructure anyhow. + */ + access = iommufd_object_alloc(ictx, access, IOMMUFD_OBJ_ACCESS); + if (IS_ERR(access)) + return access; + + access->data = data; + access->ops = ops; + + obj = iommufd_get_object(ictx, ioas_id, IOMMUFD_OBJ_IOAS); + if (IS_ERR(obj)) { + rc = PTR_ERR(obj); + goto out_abort; + } + access->ioas = container_of(obj, struct iommufd_ioas, obj); + iommufd_ref_to_users(obj); + + if (ops->needs_pin_pages) + access->iova_alignment = PAGE_SIZE; + else + access->iova_alignment = 1; + rc = iopt_add_access(&access->ioas->iopt, access); + if (rc) + goto out_put_ioas; + + /* The calling driver is a user until iommufd_access_destroy() */ + refcount_inc(&access->obj.users); + access->ictx = ictx; + iommufd_ctx_get(ictx); + iommufd_object_finalize(ictx, &access->obj); + return access; +out_put_ioas: + refcount_dec(&access->ioas->obj.users); +out_abort: + iommufd_object_abort(ictx, &access->obj); + return ERR_PTR(rc); +} +EXPORT_SYMBOL_NS_GPL(iommufd_access_create, IOMMUFD); + +/** + * iommufd_access_destroy - Destroy an iommufd_access + * @access: The access to destroy + * + * The caller must stop using the access before destroying it. + */ +void iommufd_access_destroy(struct iommufd_access *access) +{ + bool was_destroyed; + + was_destroyed = iommufd_object_destroy_user(access->ictx, &access->obj); + WARN_ON(!was_destroyed); +} +EXPORT_SYMBOL_NS_GPL(iommufd_access_destroy, IOMMUFD); + +/** + * iommufd_access_notify_unmap - Notify users of an iopt to stop using it + * @iopt: iopt to work on + * @iova: Starting iova in the iopt + * @length: Number of bytes + * + * After this function returns there should be no users attached to the pages + * linked to this iopt that intersect with iova,length. Anyone that has attached + * a user through iopt_access_pages() needs to detach it through + * iommufd_access_unpin_pages() before this function returns. + * + * iommufd_access_destroy() will wait for any outstanding unmap callback to + * complete. Once iommufd_access_destroy() no unmap ops are running or will + * run in the future. Due to this a driver must not create locking that prevents + * unmap to complete while iommufd_access_destroy() is running. + */ +void iommufd_access_notify_unmap(struct io_pagetable *iopt, unsigned long iova, + unsigned long length) +{ + struct iommufd_ioas *ioas = + container_of(iopt, struct iommufd_ioas, iopt); + struct iommufd_access *access; + unsigned long index; + + xa_lock(&ioas->iopt.access_list); + xa_for_each(&ioas->iopt.access_list, index, access) { + if (!iommufd_lock_obj(&access->obj)) + continue; + xa_unlock(&ioas->iopt.access_list); + + access->ops->unmap(access->data, iova, length); + + iommufd_put_object(&access->obj); + xa_lock(&ioas->iopt.access_list); + } + xa_unlock(&ioas->iopt.access_list); +} + +/** + * iommufd_access_unpin_pages() - Undo iommufd_access_pin_pages + * @access: IOAS access to act on + * @iova: Starting IOVA + * @length: Number of bytes to access + * + * Return the struct page's. The caller must stop accessing them before calling + * this. The iova/length must exactly match the one provided to access_pages. + */ +void iommufd_access_unpin_pages(struct iommufd_access *access, + unsigned long iova, unsigned long length) +{ + struct io_pagetable *iopt = &access->ioas->iopt; + struct iopt_area_contig_iter iter; + unsigned long last_iova; + struct iopt_area *area; + + if (WARN_ON(!length) || + WARN_ON(check_add_overflow(iova, length - 1, &last_iova))) + return; + + down_read(&iopt->iova_rwsem); + iopt_for_each_contig_area(&iter, area, iopt, iova, last_iova) + iopt_area_remove_access( + area, iopt_area_iova_to_index(area, iter.cur_iova), + iopt_area_iova_to_index( + area, + min(last_iova, iopt_area_last_iova(area)))); + up_read(&iopt->iova_rwsem); + WARN_ON(!iopt_area_contig_done(&iter)); +} +EXPORT_SYMBOL_NS_GPL(iommufd_access_unpin_pages, IOMMUFD); + +static bool iopt_area_contig_is_aligned(struct iopt_area_contig_iter *iter) +{ + if (iopt_area_start_byte(iter->area, iter->cur_iova) % PAGE_SIZE) + return false; + + if (!iopt_area_contig_done(iter) && + (iopt_area_start_byte(iter->area, iopt_area_last_iova(iter->area)) % + PAGE_SIZE) != (PAGE_SIZE - 1)) + return false; + return true; +} + +static bool check_area_prot(struct iopt_area *area, unsigned int flags) +{ + if (flags & IOMMUFD_ACCESS_RW_WRITE) + return area->iommu_prot & IOMMU_WRITE; + return area->iommu_prot & IOMMU_READ; +} + +/** + * iommufd_access_pin_pages() - Return a list of pages under the iova + * @access: IOAS access to act on + * @iova: Starting IOVA + * @length: Number of bytes to access + * @out_pages: Output page list + * @flags: IOPMMUFD_ACCESS_RW_* flags + * + * Reads @length bytes starting at iova and returns the struct page * pointers. + * These can be kmap'd by the caller for CPU access. + * + * The caller must perform iommufd_access_unpin_pages() when done to balance + * this. + * + * This API always requires a page aligned iova. This happens naturally if the + * ioas alignment is >= PAGE_SIZE and the iova is PAGE_SIZE aligned. However + * smaller alignments have corner cases where this API can fail on otherwise + * aligned iova. + */ +int iommufd_access_pin_pages(struct iommufd_access *access, unsigned long iova, + unsigned long length, struct page **out_pages, + unsigned int flags) +{ + struct io_pagetable *iopt = &access->ioas->iopt; + struct iopt_area_contig_iter iter; + unsigned long last_iova; + struct iopt_area *area; + int rc; + + /* Driver's ops don't support pin_pages */ + if (IS_ENABLED(CONFIG_IOMMUFD_TEST) && + WARN_ON(access->iova_alignment != PAGE_SIZE || !access->ops->unmap)) + return -EINVAL; + + if (!length) + return -EINVAL; + if (check_add_overflow(iova, length - 1, &last_iova)) + return -EOVERFLOW; + + down_read(&iopt->iova_rwsem); + iopt_for_each_contig_area(&iter, area, iopt, iova, last_iova) { + unsigned long last = min(last_iova, iopt_area_last_iova(area)); + unsigned long last_index = iopt_area_iova_to_index(area, last); + unsigned long index = + iopt_area_iova_to_index(area, iter.cur_iova); + + if (area->prevent_access || + !iopt_area_contig_is_aligned(&iter)) { + rc = -EINVAL; + goto err_remove; + } + + if (!check_area_prot(area, flags)) { + rc = -EPERM; + goto err_remove; + } + + rc = iopt_area_add_access(area, index, last_index, out_pages, + flags); + if (rc) + goto err_remove; + out_pages += last_index - index + 1; + } + if (!iopt_area_contig_done(&iter)) { + rc = -ENOENT; + goto err_remove; + } + + up_read(&iopt->iova_rwsem); + return 0; + +err_remove: + if (iova < iter.cur_iova) { + last_iova = iter.cur_iova - 1; + iopt_for_each_contig_area(&iter, area, iopt, iova, last_iova) + iopt_area_remove_access( + area, + iopt_area_iova_to_index(area, iter.cur_iova), + iopt_area_iova_to_index( + area, min(last_iova, + iopt_area_last_iova(area)))); + } + up_read(&iopt->iova_rwsem); + return rc; +} +EXPORT_SYMBOL_NS_GPL(iommufd_access_pin_pages, IOMMUFD); + +/** + * iommufd_access_rw - Read or write data under the iova + * @access: IOAS access to act on + * @iova: Starting IOVA + * @data: Kernel buffer to copy to/from + * @length: Number of bytes to access + * @flags: IOMMUFD_ACCESS_RW_* flags + * + * Copy kernel to/from data into the range given by IOVA/length. If flags + * indicates IOMMUFD_ACCESS_RW_KTHREAD then a large copy can be optimized + * by changing it into copy_to/from_user(). + */ +int iommufd_access_rw(struct iommufd_access *access, unsigned long iova, + void *data, size_t length, unsigned int flags) +{ + struct io_pagetable *iopt = &access->ioas->iopt; + struct iopt_area_contig_iter iter; + struct iopt_area *area; + unsigned long last_iova; + int rc; + + if (!length) + return -EINVAL; + if (check_add_overflow(iova, length - 1, &last_iova)) + return -EOVERFLOW; + + down_read(&iopt->iova_rwsem); + iopt_for_each_contig_area(&iter, area, iopt, iova, last_iova) { + unsigned long last = min(last_iova, iopt_area_last_iova(area)); + unsigned long bytes = (last - iter.cur_iova) + 1; + + if (area->prevent_access) { + rc = -EINVAL; + goto err_out; + } + + if (!check_area_prot(area, flags)) { + rc = -EPERM; + goto err_out; + } + + rc = iopt_pages_rw_access( + area->pages, iopt_area_start_byte(area, iter.cur_iova), + data, bytes, flags); + if (rc) + goto err_out; + data += bytes; + } + if (!iopt_area_contig_done(&iter)) + rc = -ENOENT; +err_out: + up_read(&iopt->iova_rwsem); + return rc; +} +EXPORT_SYMBOL_NS_GPL(iommufd_access_rw, IOMMUFD); + +#ifdef CONFIG_IOMMUFD_TEST +/* + * Creating a real iommufd_device is too hard, bypass creating a iommufd_device + * and go directly to attaching a domain. + */ +struct iommufd_hw_pagetable * +iommufd_device_selftest_attach(struct iommufd_ctx *ictx, + struct iommufd_ioas *ioas, + struct device *mock_dev) +{ + struct iommufd_hw_pagetable *hwpt; + int rc; + + hwpt = iommufd_hw_pagetable_alloc(ictx, ioas, mock_dev); + if (IS_ERR(hwpt)) + return hwpt; + + rc = iopt_table_add_domain(&hwpt->ioas->iopt, hwpt->domain); + if (rc) + goto out_hwpt; + + refcount_inc(&hwpt->obj.users); + iommufd_object_finalize(ictx, &hwpt->obj); + return hwpt; + +out_hwpt: + iommufd_object_abort_and_destroy(ictx, &hwpt->obj); + return ERR_PTR(rc); +} + +void iommufd_device_selftest_detach(struct iommufd_ctx *ictx, + struct iommufd_hw_pagetable *hwpt) +{ + iopt_table_remove_domain(&hwpt->ioas->iopt, hwpt->domain); + refcount_dec(&hwpt->obj.users); +} +#endif diff --git a/drivers/iommu/iommufd/double_span.h b/drivers/iommu/iommufd/double_span.h new file mode 100644 index 0000000000000000000000000000000000000000..b37aab7488c0faa39fefee35c7aacaea32a0faf0 --- /dev/null +++ b/drivers/iommu/iommufd/double_span.h @@ -0,0 +1,53 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. + */ +#ifndef __IOMMUFD_DOUBLE_SPAN_H +#define __IOMMUFD_DOUBLE_SPAN_H + +#include + +/* + * This is a variation of the general interval_tree_span_iter that computes the + * spans over the union of two different interval trees. Used ranges are broken + * up and reported based on the tree that provides the interval. The first span + * always takes priority. Like interval_tree_span_iter it is greedy and the same + * value of is_used will not repeat on two iteration cycles. + */ +struct interval_tree_double_span_iter { + struct rb_root_cached *itrees[2]; + struct interval_tree_span_iter spans[2]; + union { + unsigned long start_hole; + unsigned long start_used; + }; + union { + unsigned long last_hole; + unsigned long last_used; + }; + /* 0 = hole, 1 = used span[0], 2 = used span[1], -1 done iteration */ + int is_used; +}; + +void interval_tree_double_span_iter_update( + struct interval_tree_double_span_iter *iter); +void interval_tree_double_span_iter_first( + struct interval_tree_double_span_iter *iter, + struct rb_root_cached *itree1, struct rb_root_cached *itree2, + unsigned long first_index, unsigned long last_index); +void interval_tree_double_span_iter_next( + struct interval_tree_double_span_iter *iter); + +static inline bool +interval_tree_double_span_iter_done(struct interval_tree_double_span_iter *state) +{ + return state->is_used == -1; +} + +#define interval_tree_for_each_double_span(span, itree1, itree2, first_index, \ + last_index) \ + for (interval_tree_double_span_iter_first(span, itree1, itree2, \ + first_index, last_index); \ + !interval_tree_double_span_iter_done(span); \ + interval_tree_double_span_iter_next(span)) + +#endif diff --git a/drivers/iommu/iommufd/hw_pagetable.c b/drivers/iommu/iommufd/hw_pagetable.c new file mode 100644 index 0000000000000000000000000000000000000000..43d473989a0667701b93697f1efa9955e1555c1f --- /dev/null +++ b/drivers/iommu/iommufd/hw_pagetable.c @@ -0,0 +1,57 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES + */ +#include + +#include "iommufd_private.h" + +void iommufd_hw_pagetable_destroy(struct iommufd_object *obj) +{ + struct iommufd_hw_pagetable *hwpt = + container_of(obj, struct iommufd_hw_pagetable, obj); + + WARN_ON(!list_empty(&hwpt->devices)); + + iommu_domain_free(hwpt->domain); + refcount_dec(&hwpt->ioas->obj.users); + mutex_destroy(&hwpt->devices_lock); +} + +/** + * iommufd_hw_pagetable_alloc() - Get an iommu_domain for a device + * @ictx: iommufd context + * @ioas: IOAS to associate the domain with + * @dev: Device to get an iommu_domain for + * + * Allocate a new iommu_domain and return it as a hw_pagetable. + */ +struct iommufd_hw_pagetable * +iommufd_hw_pagetable_alloc(struct iommufd_ctx *ictx, struct iommufd_ioas *ioas, + struct device *dev) +{ + struct iommufd_hw_pagetable *hwpt; + int rc; + + hwpt = iommufd_object_alloc(ictx, hwpt, IOMMUFD_OBJ_HW_PAGETABLE); + if (IS_ERR(hwpt)) + return hwpt; + + hwpt->domain = iommu_domain_alloc(dev->bus); + if (!hwpt->domain) { + rc = -ENOMEM; + goto out_abort; + } + + INIT_LIST_HEAD(&hwpt->devices); + INIT_LIST_HEAD(&hwpt->hwpt_item); + mutex_init(&hwpt->devices_lock); + /* Pairs with iommufd_hw_pagetable_destroy() */ + refcount_inc(&ioas->obj.users); + hwpt->ioas = ioas; + return hwpt; + +out_abort: + iommufd_object_abort(ictx, &hwpt->obj); + return ERR_PTR(rc); +} diff --git a/drivers/iommu/iommufd/io_pagetable.c b/drivers/iommu/iommufd/io_pagetable.c new file mode 100644 index 0000000000000000000000000000000000000000..e0ae72b9e67f86f89ed29ac29c6363f52821c776 --- /dev/null +++ b/drivers/iommu/iommufd/io_pagetable.c @@ -0,0 +1,1216 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. + * + * The io_pagetable is the top of datastructure that maps IOVA's to PFNs. The + * PFNs can be placed into an iommu_domain, or returned to the caller as a page + * list for access by an in-kernel user. + * + * The datastructure uses the iopt_pages to optimize the storage of the PFNs + * between the domains and xarray. + */ +#include +#include +#include +#include +#include +#include +#include + +#include "io_pagetable.h" +#include "double_span.h" + +struct iopt_pages_list { + struct iopt_pages *pages; + struct iopt_area *area; + struct list_head next; + unsigned long start_byte; + unsigned long length; +}; + +struct iopt_area *iopt_area_contig_init(struct iopt_area_contig_iter *iter, + struct io_pagetable *iopt, + unsigned long iova, + unsigned long last_iova) +{ + lockdep_assert_held(&iopt->iova_rwsem); + + iter->cur_iova = iova; + iter->last_iova = last_iova; + iter->area = iopt_area_iter_first(iopt, iova, iova); + if (!iter->area) + return NULL; + if (!iter->area->pages) { + iter->area = NULL; + return NULL; + } + return iter->area; +} + +struct iopt_area *iopt_area_contig_next(struct iopt_area_contig_iter *iter) +{ + unsigned long last_iova; + + if (!iter->area) + return NULL; + last_iova = iopt_area_last_iova(iter->area); + if (iter->last_iova <= last_iova) + return NULL; + + iter->cur_iova = last_iova + 1; + iter->area = iopt_area_iter_next(iter->area, iter->cur_iova, + iter->last_iova); + if (!iter->area) + return NULL; + if (iter->cur_iova != iopt_area_iova(iter->area) || + !iter->area->pages) { + iter->area = NULL; + return NULL; + } + return iter->area; +} + +static bool __alloc_iova_check_hole(struct interval_tree_double_span_iter *span, + unsigned long length, + unsigned long iova_alignment, + unsigned long page_offset) +{ + if (span->is_used || span->last_hole - span->start_hole < length - 1) + return false; + + span->start_hole = ALIGN(span->start_hole, iova_alignment) | + page_offset; + if (span->start_hole > span->last_hole || + span->last_hole - span->start_hole < length - 1) + return false; + return true; +} + +static bool __alloc_iova_check_used(struct interval_tree_span_iter *span, + unsigned long length, + unsigned long iova_alignment, + unsigned long page_offset) +{ + if (span->is_hole || span->last_used - span->start_used < length - 1) + return false; + + span->start_used = ALIGN(span->start_used, iova_alignment) | + page_offset; + if (span->start_used > span->last_used || + span->last_used - span->start_used < length - 1) + return false; + return true; +} + +/* + * Automatically find a block of IOVA that is not being used and not reserved. + * Does not return a 0 IOVA even if it is valid. + */ +static int iopt_alloc_iova(struct io_pagetable *iopt, unsigned long *iova, + unsigned long uptr, unsigned long length) +{ + unsigned long page_offset = uptr % PAGE_SIZE; + struct interval_tree_double_span_iter used_span; + struct interval_tree_span_iter allowed_span; + unsigned long iova_alignment; + + lockdep_assert_held(&iopt->iova_rwsem); + + /* Protect roundup_pow-of_two() from overflow */ + if (length == 0 || length >= ULONG_MAX / 2) + return -EOVERFLOW; + + /* + * Keep alignment present in the uptr when building the IOVA, this + * increases the chance we can map a THP. + */ + if (!uptr) + iova_alignment = roundup_pow_of_two(length); + else + iova_alignment = min_t(unsigned long, + roundup_pow_of_two(length), + 1UL << __ffs64(uptr)); + + if (iova_alignment < iopt->iova_alignment) + return -EINVAL; + + interval_tree_for_each_span(&allowed_span, &iopt->allowed_itree, + PAGE_SIZE, ULONG_MAX - PAGE_SIZE) { + if (RB_EMPTY_ROOT(&iopt->allowed_itree.rb_root)) { + allowed_span.start_used = PAGE_SIZE; + allowed_span.last_used = ULONG_MAX - PAGE_SIZE; + allowed_span.is_hole = false; + } + + if (!__alloc_iova_check_used(&allowed_span, length, + iova_alignment, page_offset)) + continue; + + interval_tree_for_each_double_span( + &used_span, &iopt->reserved_itree, &iopt->area_itree, + allowed_span.start_used, allowed_span.last_used) { + if (!__alloc_iova_check_hole(&used_span, length, + iova_alignment, + page_offset)) + continue; + + *iova = used_span.start_hole; + return 0; + } + } + return -ENOSPC; +} + +static int iopt_check_iova(struct io_pagetable *iopt, unsigned long iova, + unsigned long length) +{ + unsigned long last; + + lockdep_assert_held(&iopt->iova_rwsem); + + if ((iova & (iopt->iova_alignment - 1))) + return -EINVAL; + + if (check_add_overflow(iova, length - 1, &last)) + return -EOVERFLOW; + + /* No reserved IOVA intersects the range */ + if (iopt_reserved_iter_first(iopt, iova, last)) + return -EINVAL; + + /* Check that there is not already a mapping in the range */ + if (iopt_area_iter_first(iopt, iova, last)) + return -EEXIST; + return 0; +} + +/* + * The area takes a slice of the pages from start_bytes to start_byte + length + */ +static int iopt_insert_area(struct io_pagetable *iopt, struct iopt_area *area, + struct iopt_pages *pages, unsigned long iova, + unsigned long start_byte, unsigned long length, + int iommu_prot) +{ + lockdep_assert_held_write(&iopt->iova_rwsem); + + if ((iommu_prot & IOMMU_WRITE) && !pages->writable) + return -EPERM; + + area->iommu_prot = iommu_prot; + area->page_offset = start_byte % PAGE_SIZE; + if (area->page_offset & (iopt->iova_alignment - 1)) + return -EINVAL; + + area->node.start = iova; + if (check_add_overflow(iova, length - 1, &area->node.last)) + return -EOVERFLOW; + + area->pages_node.start = start_byte / PAGE_SIZE; + if (check_add_overflow(start_byte, length - 1, &area->pages_node.last)) + return -EOVERFLOW; + area->pages_node.last = area->pages_node.last / PAGE_SIZE; + if (WARN_ON(area->pages_node.last >= pages->npages)) + return -EOVERFLOW; + + /* + * The area is inserted with a NULL pages indicating it is not fully + * initialized yet. + */ + area->iopt = iopt; + interval_tree_insert(&area->node, &iopt->area_itree); + return 0; +} + +static int iopt_alloc_area_pages(struct io_pagetable *iopt, + struct list_head *pages_list, + unsigned long length, unsigned long *dst_iova, + int iommu_prot, unsigned int flags) +{ + struct iopt_pages_list *elm; + unsigned long iova; + int rc = 0; + + list_for_each_entry(elm, pages_list, next) { + elm->area = kzalloc(sizeof(*elm->area), GFP_KERNEL_ACCOUNT); + if (!elm->area) + return -ENOMEM; + } + + down_write(&iopt->iova_rwsem); + if ((length & (iopt->iova_alignment - 1)) || !length) { + rc = -EINVAL; + goto out_unlock; + } + + if (flags & IOPT_ALLOC_IOVA) { + /* Use the first entry to guess the ideal IOVA alignment */ + elm = list_first_entry(pages_list, struct iopt_pages_list, + next); + rc = iopt_alloc_iova( + iopt, dst_iova, + (uintptr_t)elm->pages->uptr + elm->start_byte, length); + if (rc) + goto out_unlock; + if (IS_ENABLED(CONFIG_IOMMUFD_TEST) && + WARN_ON(iopt_check_iova(iopt, *dst_iova, length))) { + rc = -EINVAL; + goto out_unlock; + } + } else { + rc = iopt_check_iova(iopt, *dst_iova, length); + if (rc) + goto out_unlock; + } + + /* + * Areas are created with a NULL pages so that the IOVA space is + * reserved and we can unlock the iova_rwsem. + */ + iova = *dst_iova; + list_for_each_entry(elm, pages_list, next) { + rc = iopt_insert_area(iopt, elm->area, elm->pages, iova, + elm->start_byte, elm->length, iommu_prot); + if (rc) + goto out_unlock; + iova += elm->length; + } + +out_unlock: + up_write(&iopt->iova_rwsem); + return rc; +} + +static void iopt_abort_area(struct iopt_area *area) +{ + if (IS_ENABLED(CONFIG_IOMMUFD_TEST)) + WARN_ON(area->pages); + if (area->iopt) { + down_write(&area->iopt->iova_rwsem); + interval_tree_remove(&area->node, &area->iopt->area_itree); + up_write(&area->iopt->iova_rwsem); + } + kfree(area); +} + +void iopt_free_pages_list(struct list_head *pages_list) +{ + struct iopt_pages_list *elm; + + while ((elm = list_first_entry_or_null(pages_list, + struct iopt_pages_list, next))) { + if (elm->area) + iopt_abort_area(elm->area); + if (elm->pages) + iopt_put_pages(elm->pages); + list_del(&elm->next); + kfree(elm); + } +} + +static int iopt_fill_domains_pages(struct list_head *pages_list) +{ + struct iopt_pages_list *undo_elm; + struct iopt_pages_list *elm; + int rc; + + list_for_each_entry(elm, pages_list, next) { + rc = iopt_area_fill_domains(elm->area, elm->pages); + if (rc) + goto err_undo; + } + return 0; + +err_undo: + list_for_each_entry(undo_elm, pages_list, next) { + if (undo_elm == elm) + break; + iopt_area_unfill_domains(undo_elm->area, undo_elm->pages); + } + return rc; +} + +int iopt_map_pages(struct io_pagetable *iopt, struct list_head *pages_list, + unsigned long length, unsigned long *dst_iova, + int iommu_prot, unsigned int flags) +{ + struct iopt_pages_list *elm; + int rc; + + rc = iopt_alloc_area_pages(iopt, pages_list, length, dst_iova, + iommu_prot, flags); + if (rc) + return rc; + + down_read(&iopt->domains_rwsem); + rc = iopt_fill_domains_pages(pages_list); + if (rc) + goto out_unlock_domains; + + down_write(&iopt->iova_rwsem); + list_for_each_entry(elm, pages_list, next) { + /* + * area->pages must be set inside the domains_rwsem to ensure + * any newly added domains will get filled. Moves the reference + * in from the list. + */ + elm->area->pages = elm->pages; + elm->pages = NULL; + elm->area = NULL; + } + up_write(&iopt->iova_rwsem); +out_unlock_domains: + up_read(&iopt->domains_rwsem); + return rc; +} + +/** + * iopt_map_user_pages() - Map a user VA to an iova in the io page table + * @ictx: iommufd_ctx the iopt is part of + * @iopt: io_pagetable to act on + * @iova: If IOPT_ALLOC_IOVA is set this is unused on input and contains + * the chosen iova on output. Otherwise is the iova to map to on input + * @uptr: User VA to map + * @length: Number of bytes to map + * @iommu_prot: Combination of IOMMU_READ/WRITE/etc bits for the mapping + * @flags: IOPT_ALLOC_IOVA or zero + * + * iova, uptr, and length must be aligned to iova_alignment. For domain backed + * page tables this will pin the pages and load them into the domain at iova. + * For non-domain page tables this will only setup a lazy reference and the + * caller must use iopt_access_pages() to touch them. + * + * iopt_unmap_iova() must be called to undo this before the io_pagetable can be + * destroyed. + */ +int iopt_map_user_pages(struct iommufd_ctx *ictx, struct io_pagetable *iopt, + unsigned long *iova, void __user *uptr, + unsigned long length, int iommu_prot, + unsigned int flags) +{ + struct iopt_pages_list elm = {}; + LIST_HEAD(pages_list); + int rc; + + elm.pages = iopt_alloc_pages(uptr, length, iommu_prot & IOMMU_WRITE); + if (IS_ERR(elm.pages)) + return PTR_ERR(elm.pages); + if (ictx->account_mode == IOPT_PAGES_ACCOUNT_MM && + elm.pages->account_mode == IOPT_PAGES_ACCOUNT_USER) + elm.pages->account_mode = IOPT_PAGES_ACCOUNT_MM; + elm.start_byte = uptr - elm.pages->uptr; + elm.length = length; + list_add(&elm.next, &pages_list); + + rc = iopt_map_pages(iopt, &pages_list, length, iova, iommu_prot, flags); + if (rc) { + if (elm.area) + iopt_abort_area(elm.area); + if (elm.pages) + iopt_put_pages(elm.pages); + return rc; + } + return 0; +} + +int iopt_get_pages(struct io_pagetable *iopt, unsigned long iova, + unsigned long length, struct list_head *pages_list) +{ + struct iopt_area_contig_iter iter; + unsigned long last_iova; + struct iopt_area *area; + int rc; + + if (!length) + return -EINVAL; + if (check_add_overflow(iova, length - 1, &last_iova)) + return -EOVERFLOW; + + down_read(&iopt->iova_rwsem); + iopt_for_each_contig_area(&iter, area, iopt, iova, last_iova) { + struct iopt_pages_list *elm; + unsigned long last = min(last_iova, iopt_area_last_iova(area)); + + elm = kzalloc(sizeof(*elm), GFP_KERNEL_ACCOUNT); + if (!elm) { + rc = -ENOMEM; + goto err_free; + } + elm->start_byte = iopt_area_start_byte(area, iter.cur_iova); + elm->pages = area->pages; + elm->length = (last - iter.cur_iova) + 1; + kref_get(&elm->pages->kref); + list_add_tail(&elm->next, pages_list); + } + if (!iopt_area_contig_done(&iter)) { + rc = -ENOENT; + goto err_free; + } + up_read(&iopt->iova_rwsem); + return 0; +err_free: + up_read(&iopt->iova_rwsem); + iopt_free_pages_list(pages_list); + return rc; +} + +static int iopt_unmap_iova_range(struct io_pagetable *iopt, unsigned long start, + unsigned long last, unsigned long *unmapped) +{ + struct iopt_area *area; + unsigned long unmapped_bytes = 0; + int rc = -ENOENT; + + /* + * The domains_rwsem must be held in read mode any time any area->pages + * is NULL. This prevents domain attach/detatch from running + * concurrently with cleaning up the area. + */ +again: + down_read(&iopt->domains_rwsem); + down_write(&iopt->iova_rwsem); + while ((area = iopt_area_iter_first(iopt, start, last))) { + unsigned long area_last = iopt_area_last_iova(area); + unsigned long area_first = iopt_area_iova(area); + struct iopt_pages *pages; + + /* Userspace should not race map/unmap's of the same area */ + if (!area->pages) { + rc = -EBUSY; + goto out_unlock_iova; + } + + if (area_first < start || area_last > last) { + rc = -ENOENT; + goto out_unlock_iova; + } + + /* + * num_accesses writers must hold the iova_rwsem too, so we can + * safely read it under the write side of the iovam_rwsem + * without the pages->mutex. + */ + if (area->num_accesses) { + start = area_first; + area->prevent_access = true; + up_write(&iopt->iova_rwsem); + up_read(&iopt->domains_rwsem); + iommufd_access_notify_unmap(iopt, area_first, + iopt_area_length(area)); + if (WARN_ON(READ_ONCE(area->num_accesses))) + return -EDEADLOCK; + goto again; + } + + pages = area->pages; + area->pages = NULL; + up_write(&iopt->iova_rwsem); + + iopt_area_unfill_domains(area, pages); + iopt_abort_area(area); + iopt_put_pages(pages); + + unmapped_bytes += area_last - area_first + 1; + + down_write(&iopt->iova_rwsem); + } + if (unmapped_bytes) + rc = 0; + +out_unlock_iova: + up_write(&iopt->iova_rwsem); + up_read(&iopt->domains_rwsem); + if (unmapped) + *unmapped = unmapped_bytes; + return rc; +} + +/** + * iopt_unmap_iova() - Remove a range of iova + * @iopt: io_pagetable to act on + * @iova: Starting iova to unmap + * @length: Number of bytes to unmap + * @unmapped: Return number of bytes unmapped + * + * The requested range must be a superset of existing ranges. + * Splitting/truncating IOVA mappings is not allowed. + */ +int iopt_unmap_iova(struct io_pagetable *iopt, unsigned long iova, + unsigned long length, unsigned long *unmapped) +{ + unsigned long iova_last; + + if (!length) + return -EINVAL; + + if (check_add_overflow(iova, length - 1, &iova_last)) + return -EOVERFLOW; + + return iopt_unmap_iova_range(iopt, iova, iova_last, unmapped); +} + +int iopt_unmap_all(struct io_pagetable *iopt, unsigned long *unmapped) +{ + int rc; + + rc = iopt_unmap_iova_range(iopt, 0, ULONG_MAX, unmapped); + /* If the IOVAs are empty then unmap all succeeds */ + if (rc == -ENOENT) + return 0; + return rc; +} + +/* The caller must always free all the nodes in the allowed_iova rb_root. */ +int iopt_set_allow_iova(struct io_pagetable *iopt, + struct rb_root_cached *allowed_iova) +{ + struct iopt_allowed *allowed; + + down_write(&iopt->iova_rwsem); + swap(*allowed_iova, iopt->allowed_itree); + + for (allowed = iopt_allowed_iter_first(iopt, 0, ULONG_MAX); allowed; + allowed = iopt_allowed_iter_next(allowed, 0, ULONG_MAX)) { + if (iopt_reserved_iter_first(iopt, allowed->node.start, + allowed->node.last)) { + swap(*allowed_iova, iopt->allowed_itree); + up_write(&iopt->iova_rwsem); + return -EADDRINUSE; + } + } + up_write(&iopt->iova_rwsem); + return 0; +} + +int iopt_reserve_iova(struct io_pagetable *iopt, unsigned long start, + unsigned long last, void *owner) +{ + struct iopt_reserved *reserved; + + lockdep_assert_held_write(&iopt->iova_rwsem); + + if (iopt_area_iter_first(iopt, start, last) || + iopt_allowed_iter_first(iopt, start, last)) + return -EADDRINUSE; + + reserved = kzalloc(sizeof(*reserved), GFP_KERNEL_ACCOUNT); + if (!reserved) + return -ENOMEM; + reserved->node.start = start; + reserved->node.last = last; + reserved->owner = owner; + interval_tree_insert(&reserved->node, &iopt->reserved_itree); + return 0; +} + +static void __iopt_remove_reserved_iova(struct io_pagetable *iopt, void *owner) +{ + struct iopt_reserved *reserved, *next; + + lockdep_assert_held_write(&iopt->iova_rwsem); + + for (reserved = iopt_reserved_iter_first(iopt, 0, ULONG_MAX); reserved; + reserved = next) { + next = iopt_reserved_iter_next(reserved, 0, ULONG_MAX); + + if (reserved->owner == owner) { + interval_tree_remove(&reserved->node, + &iopt->reserved_itree); + kfree(reserved); + } + } +} + +void iopt_remove_reserved_iova(struct io_pagetable *iopt, void *owner) +{ + down_write(&iopt->iova_rwsem); + __iopt_remove_reserved_iova(iopt, owner); + up_write(&iopt->iova_rwsem); +} + +void iopt_init_table(struct io_pagetable *iopt) +{ + init_rwsem(&iopt->iova_rwsem); + init_rwsem(&iopt->domains_rwsem); + iopt->area_itree = RB_ROOT_CACHED; + iopt->allowed_itree = RB_ROOT_CACHED; + iopt->reserved_itree = RB_ROOT_CACHED; + xa_init_flags(&iopt->domains, XA_FLAGS_ACCOUNT); + xa_init_flags(&iopt->access_list, XA_FLAGS_ALLOC); + + /* + * iopt's start as SW tables that can use the entire size_t IOVA space + * due to the use of size_t in the APIs. They have no alignment + * restriction. + */ + iopt->iova_alignment = 1; +} + +void iopt_destroy_table(struct io_pagetable *iopt) +{ + struct interval_tree_node *node; + + if (IS_ENABLED(CONFIG_IOMMUFD_TEST)) + iopt_remove_reserved_iova(iopt, NULL); + + while ((node = interval_tree_iter_first(&iopt->allowed_itree, 0, + ULONG_MAX))) { + interval_tree_remove(node, &iopt->allowed_itree); + kfree(container_of(node, struct iopt_allowed, node)); + } + + WARN_ON(!RB_EMPTY_ROOT(&iopt->reserved_itree.rb_root)); + WARN_ON(!xa_empty(&iopt->domains)); + WARN_ON(!xa_empty(&iopt->access_list)); + WARN_ON(!RB_EMPTY_ROOT(&iopt->area_itree.rb_root)); +} + +/** + * iopt_unfill_domain() - Unfill a domain with PFNs + * @iopt: io_pagetable to act on + * @domain: domain to unfill + * + * This is used when removing a domain from the iopt. Every area in the iopt + * will be unmapped from the domain. The domain must already be removed from the + * domains xarray. + */ +static void iopt_unfill_domain(struct io_pagetable *iopt, + struct iommu_domain *domain) +{ + struct iopt_area *area; + + lockdep_assert_held(&iopt->iova_rwsem); + lockdep_assert_held_write(&iopt->domains_rwsem); + + /* + * Some other domain is holding all the pfns still, rapidly unmap this + * domain. + */ + if (iopt->next_domain_id != 0) { + /* Pick an arbitrary remaining domain to act as storage */ + struct iommu_domain *storage_domain = + xa_load(&iopt->domains, 0); + + for (area = iopt_area_iter_first(iopt, 0, ULONG_MAX); area; + area = iopt_area_iter_next(area, 0, ULONG_MAX)) { + struct iopt_pages *pages = area->pages; + + if (!pages) + continue; + + mutex_lock(&pages->mutex); + if (IS_ENABLED(CONFIG_IOMMUFD_TEST)) + WARN_ON(!area->storage_domain); + if (area->storage_domain == domain) + area->storage_domain = storage_domain; + mutex_unlock(&pages->mutex); + + iopt_area_unmap_domain(area, domain); + } + return; + } + + for (area = iopt_area_iter_first(iopt, 0, ULONG_MAX); area; + area = iopt_area_iter_next(area, 0, ULONG_MAX)) { + struct iopt_pages *pages = area->pages; + + if (!pages) + continue; + + mutex_lock(&pages->mutex); + interval_tree_remove(&area->pages_node, &pages->domains_itree); + WARN_ON(area->storage_domain != domain); + area->storage_domain = NULL; + iopt_area_unfill_domain(area, pages, domain); + mutex_unlock(&pages->mutex); + } +} + +/** + * iopt_fill_domain() - Fill a domain with PFNs + * @iopt: io_pagetable to act on + * @domain: domain to fill + * + * Fill the domain with PFNs from every area in the iopt. On failure the domain + * is left unchanged. + */ +static int iopt_fill_domain(struct io_pagetable *iopt, + struct iommu_domain *domain) +{ + struct iopt_area *end_area; + struct iopt_area *area; + int rc; + + lockdep_assert_held(&iopt->iova_rwsem); + lockdep_assert_held_write(&iopt->domains_rwsem); + + for (area = iopt_area_iter_first(iopt, 0, ULONG_MAX); area; + area = iopt_area_iter_next(area, 0, ULONG_MAX)) { + struct iopt_pages *pages = area->pages; + + if (!pages) + continue; + + mutex_lock(&pages->mutex); + rc = iopt_area_fill_domain(area, domain); + if (rc) { + mutex_unlock(&pages->mutex); + goto out_unfill; + } + if (!area->storage_domain) { + WARN_ON(iopt->next_domain_id != 0); + area->storage_domain = domain; + interval_tree_insert(&area->pages_node, + &pages->domains_itree); + } + mutex_unlock(&pages->mutex); + } + return 0; + +out_unfill: + end_area = area; + for (area = iopt_area_iter_first(iopt, 0, ULONG_MAX); area; + area = iopt_area_iter_next(area, 0, ULONG_MAX)) { + struct iopt_pages *pages = area->pages; + + if (area == end_area) + break; + if (!pages) + continue; + mutex_lock(&pages->mutex); + if (iopt->next_domain_id == 0) { + interval_tree_remove(&area->pages_node, + &pages->domains_itree); + area->storage_domain = NULL; + } + iopt_area_unfill_domain(area, pages, domain); + mutex_unlock(&pages->mutex); + } + return rc; +} + +/* All existing area's conform to an increased page size */ +static int iopt_check_iova_alignment(struct io_pagetable *iopt, + unsigned long new_iova_alignment) +{ + unsigned long align_mask = new_iova_alignment - 1; + struct iopt_area *area; + + lockdep_assert_held(&iopt->iova_rwsem); + lockdep_assert_held(&iopt->domains_rwsem); + + for (area = iopt_area_iter_first(iopt, 0, ULONG_MAX); area; + area = iopt_area_iter_next(area, 0, ULONG_MAX)) + if ((iopt_area_iova(area) & align_mask) || + (iopt_area_length(area) & align_mask) || + (area->page_offset & align_mask)) + return -EADDRINUSE; + + if (IS_ENABLED(CONFIG_IOMMUFD_TEST)) { + struct iommufd_access *access; + unsigned long index; + + xa_for_each(&iopt->access_list, index, access) + if (WARN_ON(access->iova_alignment > + new_iova_alignment)) + return -EADDRINUSE; + } + return 0; +} + +int iopt_table_add_domain(struct io_pagetable *iopt, + struct iommu_domain *domain) +{ + const struct iommu_domain_geometry *geometry = &domain->geometry; + struct iommu_domain *iter_domain; + unsigned int new_iova_alignment; + unsigned long index; + int rc; + + down_write(&iopt->domains_rwsem); + down_write(&iopt->iova_rwsem); + + xa_for_each(&iopt->domains, index, iter_domain) { + if (WARN_ON(iter_domain == domain)) { + rc = -EEXIST; + goto out_unlock; + } + } + + /* + * The io page size drives the iova_alignment. Internally the iopt_pages + * works in PAGE_SIZE units and we adjust when mapping sub-PAGE_SIZE + * objects into the iommu_domain. + * + * A iommu_domain must always be able to accept PAGE_SIZE to be + * compatible as we can't guarantee higher contiguity. + */ + new_iova_alignment = max_t(unsigned long, + 1UL << __ffs(domain->pgsize_bitmap), + iopt->iova_alignment); + if (new_iova_alignment > PAGE_SIZE) { + rc = -EINVAL; + goto out_unlock; + } + if (new_iova_alignment != iopt->iova_alignment) { + rc = iopt_check_iova_alignment(iopt, new_iova_alignment); + if (rc) + goto out_unlock; + } + + /* No area exists that is outside the allowed domain aperture */ + if (geometry->aperture_start != 0) { + rc = iopt_reserve_iova(iopt, 0, geometry->aperture_start - 1, + domain); + if (rc) + goto out_reserved; + } + if (geometry->aperture_end != ULONG_MAX) { + rc = iopt_reserve_iova(iopt, geometry->aperture_end + 1, + ULONG_MAX, domain); + if (rc) + goto out_reserved; + } + + rc = xa_reserve(&iopt->domains, iopt->next_domain_id, GFP_KERNEL); + if (rc) + goto out_reserved; + + rc = iopt_fill_domain(iopt, domain); + if (rc) + goto out_release; + + iopt->iova_alignment = new_iova_alignment; + xa_store(&iopt->domains, iopt->next_domain_id, domain, GFP_KERNEL); + iopt->next_domain_id++; + up_write(&iopt->iova_rwsem); + up_write(&iopt->domains_rwsem); + return 0; +out_release: + xa_release(&iopt->domains, iopt->next_domain_id); +out_reserved: + __iopt_remove_reserved_iova(iopt, domain); +out_unlock: + up_write(&iopt->iova_rwsem); + up_write(&iopt->domains_rwsem); + return rc; +} + +static int iopt_calculate_iova_alignment(struct io_pagetable *iopt) +{ + unsigned long new_iova_alignment; + struct iommufd_access *access; + struct iommu_domain *domain; + unsigned long index; + + lockdep_assert_held_write(&iopt->iova_rwsem); + lockdep_assert_held(&iopt->domains_rwsem); + + /* See batch_iommu_map_small() */ + if (iopt->disable_large_pages) + new_iova_alignment = PAGE_SIZE; + else + new_iova_alignment = 1; + + xa_for_each(&iopt->domains, index, domain) + new_iova_alignment = max_t(unsigned long, + 1UL << __ffs(domain->pgsize_bitmap), + new_iova_alignment); + xa_for_each(&iopt->access_list, index, access) + new_iova_alignment = max_t(unsigned long, + access->iova_alignment, + new_iova_alignment); + + if (new_iova_alignment > iopt->iova_alignment) { + int rc; + + rc = iopt_check_iova_alignment(iopt, new_iova_alignment); + if (rc) + return rc; + } + iopt->iova_alignment = new_iova_alignment; + return 0; +} + +void iopt_table_remove_domain(struct io_pagetable *iopt, + struct iommu_domain *domain) +{ + struct iommu_domain *iter_domain = NULL; + unsigned long index; + + down_write(&iopt->domains_rwsem); + down_write(&iopt->iova_rwsem); + + xa_for_each(&iopt->domains, index, iter_domain) + if (iter_domain == domain) + break; + if (WARN_ON(iter_domain != domain) || index >= iopt->next_domain_id) + goto out_unlock; + + /* + * Compress the xarray to keep it linear by swapping the entry to erase + * with the tail entry and shrinking the tail. + */ + iopt->next_domain_id--; + iter_domain = xa_erase(&iopt->domains, iopt->next_domain_id); + if (index != iopt->next_domain_id) + xa_store(&iopt->domains, index, iter_domain, GFP_KERNEL); + + iopt_unfill_domain(iopt, domain); + __iopt_remove_reserved_iova(iopt, domain); + + WARN_ON(iopt_calculate_iova_alignment(iopt)); +out_unlock: + up_write(&iopt->iova_rwsem); + up_write(&iopt->domains_rwsem); +} + +/** + * iopt_area_split - Split an area into two parts at iova + * @area: The area to split + * @iova: Becomes the last of a new area + * + * This splits an area into two. It is part of the VFIO compatibility to allow + * poking a hole in the mapping. The two areas continue to point at the same + * iopt_pages, just with different starting bytes. + */ +static int iopt_area_split(struct iopt_area *area, unsigned long iova) +{ + unsigned long alignment = area->iopt->iova_alignment; + unsigned long last_iova = iopt_area_last_iova(area); + unsigned long start_iova = iopt_area_iova(area); + unsigned long new_start = iova + 1; + struct io_pagetable *iopt = area->iopt; + struct iopt_pages *pages = area->pages; + struct iopt_area *lhs; + struct iopt_area *rhs; + int rc; + + lockdep_assert_held_write(&iopt->iova_rwsem); + + if (iova == start_iova || iova == last_iova) + return 0; + + if (!pages || area->prevent_access) + return -EBUSY; + + if (new_start & (alignment - 1) || + iopt_area_start_byte(area, new_start) & (alignment - 1)) + return -EINVAL; + + lhs = kzalloc(sizeof(*area), GFP_KERNEL_ACCOUNT); + if (!lhs) + return -ENOMEM; + + rhs = kzalloc(sizeof(*area), GFP_KERNEL_ACCOUNT); + if (!rhs) { + rc = -ENOMEM; + goto err_free_lhs; + } + + mutex_lock(&pages->mutex); + /* + * Splitting is not permitted if an access exists, we don't track enough + * information to split existing accesses. + */ + if (area->num_accesses) { + rc = -EINVAL; + goto err_unlock; + } + + /* + * Splitting is not permitted if a domain could have been mapped with + * huge pages. + */ + if (area->storage_domain && !iopt->disable_large_pages) { + rc = -EINVAL; + goto err_unlock; + } + + interval_tree_remove(&area->node, &iopt->area_itree); + rc = iopt_insert_area(iopt, lhs, area->pages, start_iova, + iopt_area_start_byte(area, start_iova), + (new_start - 1) - start_iova + 1, + area->iommu_prot); + if (WARN_ON(rc)) + goto err_insert; + + rc = iopt_insert_area(iopt, rhs, area->pages, new_start, + iopt_area_start_byte(area, new_start), + last_iova - new_start + 1, area->iommu_prot); + if (WARN_ON(rc)) + goto err_remove_lhs; + + lhs->storage_domain = area->storage_domain; + lhs->pages = area->pages; + rhs->storage_domain = area->storage_domain; + rhs->pages = area->pages; + kref_get(&rhs->pages->kref); + kfree(area); + mutex_unlock(&pages->mutex); + + /* + * No change to domains or accesses because the pages hasn't been + * changed + */ + return 0; + +err_remove_lhs: + interval_tree_remove(&lhs->node, &iopt->area_itree); +err_insert: + interval_tree_insert(&area->node, &iopt->area_itree); +err_unlock: + mutex_unlock(&pages->mutex); + kfree(rhs); +err_free_lhs: + kfree(lhs); + return rc; +} + +int iopt_cut_iova(struct io_pagetable *iopt, unsigned long *iovas, + size_t num_iovas) +{ + int rc = 0; + int i; + + down_write(&iopt->iova_rwsem); + for (i = 0; i < num_iovas; i++) { + struct iopt_area *area; + + area = iopt_area_iter_first(iopt, iovas[i], iovas[i]); + if (!area) + continue; + rc = iopt_area_split(area, iovas[i]); + if (rc) + break; + } + up_write(&iopt->iova_rwsem); + return rc; +} + +void iopt_enable_large_pages(struct io_pagetable *iopt) +{ + int rc; + + down_write(&iopt->domains_rwsem); + down_write(&iopt->iova_rwsem); + WRITE_ONCE(iopt->disable_large_pages, false); + rc = iopt_calculate_iova_alignment(iopt); + WARN_ON(rc); + up_write(&iopt->iova_rwsem); + up_write(&iopt->domains_rwsem); +} + +int iopt_disable_large_pages(struct io_pagetable *iopt) +{ + int rc = 0; + + down_write(&iopt->domains_rwsem); + down_write(&iopt->iova_rwsem); + if (iopt->disable_large_pages) + goto out_unlock; + + /* Won't do it if domains already have pages mapped in them */ + if (!xa_empty(&iopt->domains) && + !RB_EMPTY_ROOT(&iopt->area_itree.rb_root)) { + rc = -EINVAL; + goto out_unlock; + } + + WRITE_ONCE(iopt->disable_large_pages, true); + rc = iopt_calculate_iova_alignment(iopt); + if (rc) + WRITE_ONCE(iopt->disable_large_pages, false); +out_unlock: + up_write(&iopt->iova_rwsem); + up_write(&iopt->domains_rwsem); + return rc; +} + +int iopt_add_access(struct io_pagetable *iopt, struct iommufd_access *access) +{ + int rc; + + down_write(&iopt->domains_rwsem); + down_write(&iopt->iova_rwsem); + rc = xa_alloc(&iopt->access_list, &access->iopt_access_list_id, access, + xa_limit_16b, GFP_KERNEL_ACCOUNT); + if (rc) + goto out_unlock; + + rc = iopt_calculate_iova_alignment(iopt); + if (rc) { + xa_erase(&iopt->access_list, access->iopt_access_list_id); + goto out_unlock; + } + +out_unlock: + up_write(&iopt->iova_rwsem); + up_write(&iopt->domains_rwsem); + return rc; +} + +void iopt_remove_access(struct io_pagetable *iopt, + struct iommufd_access *access) +{ + down_write(&iopt->domains_rwsem); + down_write(&iopt->iova_rwsem); + WARN_ON(xa_erase(&iopt->access_list, access->iopt_access_list_id) != + access); + WARN_ON(iopt_calculate_iova_alignment(iopt)); + up_write(&iopt->iova_rwsem); + up_write(&iopt->domains_rwsem); +} + +/* Narrow the valid_iova_itree to include reserved ranges from a group. */ +int iopt_table_enforce_group_resv_regions(struct io_pagetable *iopt, + struct device *device, + struct iommu_group *group, + phys_addr_t *sw_msi_start) +{ + struct iommu_resv_region *resv; + struct iommu_resv_region *tmp; + LIST_HEAD(group_resv_regions); + unsigned int num_hw_msi = 0; + unsigned int num_sw_msi = 0; + int rc; + + down_write(&iopt->iova_rwsem); + rc = iommu_get_group_resv_regions(group, &group_resv_regions); + if (rc) + goto out_unlock; + + list_for_each_entry(resv, &group_resv_regions, list) { + if (resv->type == IOMMU_RESV_DIRECT_RELAXABLE) + continue; + + if (sw_msi_start && resv->type == IOMMU_RESV_MSI) + num_hw_msi++; + if (sw_msi_start && resv->type == IOMMU_RESV_SW_MSI) { + *sw_msi_start = resv->start; + num_sw_msi++; + } + + rc = iopt_reserve_iova(iopt, resv->start, + resv->length - 1 + resv->start, device); + if (rc) + goto out_reserved; + } + + /* Drivers must offer sane combinations of regions */ + if (WARN_ON(num_sw_msi && num_hw_msi) || WARN_ON(num_sw_msi > 1)) { + rc = -EINVAL; + goto out_reserved; + } + + rc = 0; + goto out_free_resv; + +out_reserved: + __iopt_remove_reserved_iova(iopt, device); +out_free_resv: + list_for_each_entry_safe(resv, tmp, &group_resv_regions, list) + kfree(resv); +out_unlock: + up_write(&iopt->iova_rwsem); + return rc; +} diff --git a/drivers/iommu/iommufd/io_pagetable.h b/drivers/iommu/iommufd/io_pagetable.h new file mode 100644 index 0000000000000000000000000000000000000000..0ec3509b7e339243e1c70a62c2bb0300191e5075 --- /dev/null +++ b/drivers/iommu/iommufd/io_pagetable.h @@ -0,0 +1,241 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. + * + */ +#ifndef __IO_PAGETABLE_H +#define __IO_PAGETABLE_H + +#include +#include +#include +#include + +#include "iommufd_private.h" + +struct iommu_domain; + +/* + * Each io_pagetable is composed of intervals of areas which cover regions of + * the iova that are backed by something. iova not covered by areas is not + * populated in the page table. Each area is fully populated with pages. + * + * iovas are in byte units, but must be iopt->iova_alignment aligned. + * + * pages can be NULL, this means some other thread is still working on setting + * up or tearing down the area. When observed under the write side of the + * domain_rwsem a NULL pages must mean the area is still being setup and no + * domains are filled. + * + * storage_domain points at an arbitrary iommu_domain that is holding the PFNs + * for this area. It is locked by the pages->mutex. This simplifies the locking + * as the pages code can rely on the storage_domain without having to get the + * iopt->domains_rwsem. + * + * The io_pagetable::iova_rwsem protects node + * The iopt_pages::mutex protects pages_node + * iopt and iommu_prot are immutable + * The pages::mutex protects num_accesses + */ +struct iopt_area { + struct interval_tree_node node; + struct interval_tree_node pages_node; + struct io_pagetable *iopt; + struct iopt_pages *pages; + struct iommu_domain *storage_domain; + /* How many bytes into the first page the area starts */ + unsigned int page_offset; + /* IOMMU_READ, IOMMU_WRITE, etc */ + int iommu_prot; + bool prevent_access : 1; + unsigned int num_accesses; +}; + +struct iopt_allowed { + struct interval_tree_node node; +}; + +struct iopt_reserved { + struct interval_tree_node node; + void *owner; +}; + +int iopt_area_fill_domains(struct iopt_area *area, struct iopt_pages *pages); +void iopt_area_unfill_domains(struct iopt_area *area, struct iopt_pages *pages); + +int iopt_area_fill_domain(struct iopt_area *area, struct iommu_domain *domain); +void iopt_area_unfill_domain(struct iopt_area *area, struct iopt_pages *pages, + struct iommu_domain *domain); +void iopt_area_unmap_domain(struct iopt_area *area, + struct iommu_domain *domain); + +static inline unsigned long iopt_area_index(struct iopt_area *area) +{ + return area->pages_node.start; +} + +static inline unsigned long iopt_area_last_index(struct iopt_area *area) +{ + return area->pages_node.last; +} + +static inline unsigned long iopt_area_iova(struct iopt_area *area) +{ + return area->node.start; +} + +static inline unsigned long iopt_area_last_iova(struct iopt_area *area) +{ + return area->node.last; +} + +static inline size_t iopt_area_length(struct iopt_area *area) +{ + return (area->node.last - area->node.start) + 1; +} + +/* + * Number of bytes from the start of the iopt_pages that the iova begins. + * iopt_area_start_byte() / PAGE_SIZE encodes the starting page index + * iopt_area_start_byte() % PAGE_SIZE encodes the offset within that page + */ +static inline unsigned long iopt_area_start_byte(struct iopt_area *area, + unsigned long iova) +{ + if (IS_ENABLED(CONFIG_IOMMUFD_TEST)) + WARN_ON(iova < iopt_area_iova(area) || + iova > iopt_area_last_iova(area)); + return (iova - iopt_area_iova(area)) + area->page_offset + + iopt_area_index(area) * PAGE_SIZE; +} + +static inline unsigned long iopt_area_iova_to_index(struct iopt_area *area, + unsigned long iova) +{ + return iopt_area_start_byte(area, iova) / PAGE_SIZE; +} + +#define __make_iopt_iter(name) \ + static inline struct iopt_##name *iopt_##name##_iter_first( \ + struct io_pagetable *iopt, unsigned long start, \ + unsigned long last) \ + { \ + struct interval_tree_node *node; \ + \ + lockdep_assert_held(&iopt->iova_rwsem); \ + node = interval_tree_iter_first(&iopt->name##_itree, start, \ + last); \ + if (!node) \ + return NULL; \ + return container_of(node, struct iopt_##name, node); \ + } \ + static inline struct iopt_##name *iopt_##name##_iter_next( \ + struct iopt_##name *last_node, unsigned long start, \ + unsigned long last) \ + { \ + struct interval_tree_node *node; \ + \ + node = interval_tree_iter_next(&last_node->node, start, last); \ + if (!node) \ + return NULL; \ + return container_of(node, struct iopt_##name, node); \ + } + +__make_iopt_iter(area) +__make_iopt_iter(allowed) +__make_iopt_iter(reserved) + +struct iopt_area_contig_iter { + unsigned long cur_iova; + unsigned long last_iova; + struct iopt_area *area; +}; +struct iopt_area *iopt_area_contig_init(struct iopt_area_contig_iter *iter, + struct io_pagetable *iopt, + unsigned long iova, + unsigned long last_iova); +struct iopt_area *iopt_area_contig_next(struct iopt_area_contig_iter *iter); + +static inline bool iopt_area_contig_done(struct iopt_area_contig_iter *iter) +{ + return iter->area && iter->last_iova <= iopt_area_last_iova(iter->area); +} + +/* + * Iterate over a contiguous list of areas that span the iova,last_iova range. + * The caller must check iopt_area_contig_done() after the loop to see if + * contiguous areas existed. + */ +#define iopt_for_each_contig_area(iter, area, iopt, iova, last_iova) \ + for (area = iopt_area_contig_init(iter, iopt, iova, last_iova); area; \ + area = iopt_area_contig_next(iter)) + +enum { + IOPT_PAGES_ACCOUNT_NONE = 0, + IOPT_PAGES_ACCOUNT_USER = 1, + IOPT_PAGES_ACCOUNT_MM = 2, +}; + +/* + * This holds a pinned page list for multiple areas of IO address space. The + * pages always originate from a linear chunk of userspace VA. Multiple + * io_pagetable's, through their iopt_area's, can share a single iopt_pages + * which avoids multi-pinning and double accounting of page consumption. + * + * indexes in this structure are measured in PAGE_SIZE units, are 0 based from + * the start of the uptr and extend to npages. pages are pinned dynamically + * according to the intervals in the access_itree and domains_itree, npinned + * records the current number of pages pinned. + */ +struct iopt_pages { + struct kref kref; + struct mutex mutex; + size_t npages; + size_t npinned; + size_t last_npinned; + struct task_struct *source_task; + struct mm_struct *source_mm; + struct user_struct *source_user; + void __user *uptr; + bool writable:1; + u8 account_mode; + + struct xarray pinned_pfns; + /* Of iopt_pages_access::node */ + struct rb_root_cached access_itree; + /* Of iopt_area::pages_node */ + struct rb_root_cached domains_itree; +}; + +struct iopt_pages *iopt_alloc_pages(void __user *uptr, unsigned long length, + bool writable); +void iopt_release_pages(struct kref *kref); +static inline void iopt_put_pages(struct iopt_pages *pages) +{ + kref_put(&pages->kref, iopt_release_pages); +} + +void iopt_pages_fill_from_xarray(struct iopt_pages *pages, unsigned long start, + unsigned long last, struct page **out_pages); +int iopt_pages_fill_xarray(struct iopt_pages *pages, unsigned long start, + unsigned long last, struct page **out_pages); +void iopt_pages_unfill_xarray(struct iopt_pages *pages, unsigned long start, + unsigned long last); + +int iopt_area_add_access(struct iopt_area *area, unsigned long start, + unsigned long last, struct page **out_pages, + unsigned int flags); +void iopt_area_remove_access(struct iopt_area *area, unsigned long start, + unsigned long last); +int iopt_pages_rw_access(struct iopt_pages *pages, unsigned long start_byte, + void *data, unsigned long length, unsigned int flags); + +/* + * Each interval represents an active iopt_access_pages(), it acts as an + * interval lock that keeps the PFNs pinned and stored in the xarray. + */ +struct iopt_pages_access { + struct interval_tree_node node; + unsigned int users; +}; + +#endif diff --git a/drivers/iommu/iommufd/ioas.c b/drivers/iommu/iommufd/ioas.c new file mode 100644 index 0000000000000000000000000000000000000000..31577e9d434f878425303d1985c28668270afde4 --- /dev/null +++ b/drivers/iommu/iommufd/ioas.c @@ -0,0 +1,398 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES + */ +#include +#include +#include +#include + +#include "io_pagetable.h" + +void iommufd_ioas_destroy(struct iommufd_object *obj) +{ + struct iommufd_ioas *ioas = container_of(obj, struct iommufd_ioas, obj); + int rc; + + rc = iopt_unmap_all(&ioas->iopt, NULL); + WARN_ON(rc && rc != -ENOENT); + iopt_destroy_table(&ioas->iopt); + mutex_destroy(&ioas->mutex); +} + +struct iommufd_ioas *iommufd_ioas_alloc(struct iommufd_ctx *ictx) +{ + struct iommufd_ioas *ioas; + + ioas = iommufd_object_alloc(ictx, ioas, IOMMUFD_OBJ_IOAS); + if (IS_ERR(ioas)) + return ioas; + + iopt_init_table(&ioas->iopt); + INIT_LIST_HEAD(&ioas->hwpt_list); + mutex_init(&ioas->mutex); + return ioas; +} + +int iommufd_ioas_alloc_ioctl(struct iommufd_ucmd *ucmd) +{ + struct iommu_ioas_alloc *cmd = ucmd->cmd; + struct iommufd_ioas *ioas; + int rc; + + if (cmd->flags) + return -EOPNOTSUPP; + + ioas = iommufd_ioas_alloc(ucmd->ictx); + if (IS_ERR(ioas)) + return PTR_ERR(ioas); + + cmd->out_ioas_id = ioas->obj.id; + rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd)); + if (rc) + goto out_table; + iommufd_object_finalize(ucmd->ictx, &ioas->obj); + return 0; + +out_table: + iommufd_object_abort_and_destroy(ucmd->ictx, &ioas->obj); + return rc; +} + +int iommufd_ioas_iova_ranges(struct iommufd_ucmd *ucmd) +{ + struct iommu_iova_range __user *ranges; + struct iommu_ioas_iova_ranges *cmd = ucmd->cmd; + struct iommufd_ioas *ioas; + struct interval_tree_span_iter span; + u32 max_iovas; + int rc; + + if (cmd->__reserved) + return -EOPNOTSUPP; + + ioas = iommufd_get_ioas(ucmd, cmd->ioas_id); + if (IS_ERR(ioas)) + return PTR_ERR(ioas); + + down_read(&ioas->iopt.iova_rwsem); + max_iovas = cmd->num_iovas; + ranges = u64_to_user_ptr(cmd->allowed_iovas); + cmd->num_iovas = 0; + cmd->out_iova_alignment = ioas->iopt.iova_alignment; + interval_tree_for_each_span(&span, &ioas->iopt.reserved_itree, 0, + ULONG_MAX) { + if (!span.is_hole) + continue; + if (cmd->num_iovas < max_iovas) { + struct iommu_iova_range elm = { + .start = span.start_hole, + .last = span.last_hole, + }; + + if (copy_to_user(&ranges[cmd->num_iovas], &elm, + sizeof(elm))) { + rc = -EFAULT; + goto out_put; + } + } + cmd->num_iovas++; + } + rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd)); + if (rc) + goto out_put; + if (cmd->num_iovas > max_iovas) + rc = -EMSGSIZE; +out_put: + up_read(&ioas->iopt.iova_rwsem); + iommufd_put_object(&ioas->obj); + return rc; +} + +static int iommufd_ioas_load_iovas(struct rb_root_cached *itree, + struct iommu_iova_range __user *ranges, + u32 num) +{ + u32 i; + + for (i = 0; i != num; i++) { + struct iommu_iova_range range; + struct iopt_allowed *allowed; + + if (copy_from_user(&range, ranges + i, sizeof(range))) + return -EFAULT; + + if (range.start >= range.last) + return -EINVAL; + + if (interval_tree_iter_first(itree, range.start, range.last)) + return -EINVAL; + + allowed = kzalloc(sizeof(*allowed), GFP_KERNEL_ACCOUNT); + if (!allowed) + return -ENOMEM; + allowed->node.start = range.start; + allowed->node.last = range.last; + + interval_tree_insert(&allowed->node, itree); + } + return 0; +} + +int iommufd_ioas_allow_iovas(struct iommufd_ucmd *ucmd) +{ + struct iommu_ioas_allow_iovas *cmd = ucmd->cmd; + struct rb_root_cached allowed_iova = RB_ROOT_CACHED; + struct interval_tree_node *node; + struct iommufd_ioas *ioas; + struct io_pagetable *iopt; + int rc = 0; + + if (cmd->__reserved) + return -EOPNOTSUPP; + + ioas = iommufd_get_ioas(ucmd, cmd->ioas_id); + if (IS_ERR(ioas)) + return PTR_ERR(ioas); + iopt = &ioas->iopt; + + rc = iommufd_ioas_load_iovas(&allowed_iova, + u64_to_user_ptr(cmd->allowed_iovas), + cmd->num_iovas); + if (rc) + goto out_free; + + /* + * We want the allowed tree update to be atomic, so we have to keep the + * original nodes around, and keep track of the new nodes as we allocate + * memory for them. The simplest solution is to have a new/old tree and + * then swap new for old. On success we free the old tree, on failure we + * free the new tree. + */ + rc = iopt_set_allow_iova(iopt, &allowed_iova); +out_free: + while ((node = interval_tree_iter_first(&allowed_iova, 0, ULONG_MAX))) { + interval_tree_remove(node, &allowed_iova); + kfree(container_of(node, struct iopt_allowed, node)); + } + iommufd_put_object(&ioas->obj); + return rc; +} + +static int conv_iommu_prot(u32 map_flags) +{ + /* + * We provide no manual cache coherency ioctls to userspace and most + * architectures make the CPU ops for cache flushing privileged. + * Therefore we require the underlying IOMMU to support CPU coherent + * operation. Support for IOMMU_CACHE is enforced by the + * IOMMU_CAP_CACHE_COHERENCY test during bind. + */ + int iommu_prot = IOMMU_CACHE; + + if (map_flags & IOMMU_IOAS_MAP_WRITEABLE) + iommu_prot |= IOMMU_WRITE; + if (map_flags & IOMMU_IOAS_MAP_READABLE) + iommu_prot |= IOMMU_READ; + return iommu_prot; +} + +int iommufd_ioas_map(struct iommufd_ucmd *ucmd) +{ + struct iommu_ioas_map *cmd = ucmd->cmd; + unsigned long iova = cmd->iova; + struct iommufd_ioas *ioas; + unsigned int flags = 0; + int rc; + + if ((cmd->flags & + ~(IOMMU_IOAS_MAP_FIXED_IOVA | IOMMU_IOAS_MAP_WRITEABLE | + IOMMU_IOAS_MAP_READABLE)) || + cmd->__reserved) + return -EOPNOTSUPP; + if (cmd->iova >= ULONG_MAX || cmd->length >= ULONG_MAX) + return -EOVERFLOW; + + ioas = iommufd_get_ioas(ucmd, cmd->ioas_id); + if (IS_ERR(ioas)) + return PTR_ERR(ioas); + + if (!(cmd->flags & IOMMU_IOAS_MAP_FIXED_IOVA)) + flags = IOPT_ALLOC_IOVA; + rc = iopt_map_user_pages(ucmd->ictx, &ioas->iopt, &iova, + u64_to_user_ptr(cmd->user_va), cmd->length, + conv_iommu_prot(cmd->flags), flags); + if (rc) + goto out_put; + + cmd->iova = iova; + rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd)); +out_put: + iommufd_put_object(&ioas->obj); + return rc; +} + +int iommufd_ioas_copy(struct iommufd_ucmd *ucmd) +{ + struct iommu_ioas_copy *cmd = ucmd->cmd; + struct iommufd_ioas *src_ioas; + struct iommufd_ioas *dst_ioas; + unsigned int flags = 0; + LIST_HEAD(pages_list); + unsigned long iova; + int rc; + + iommufd_test_syz_conv_iova_id(ucmd, cmd->src_ioas_id, &cmd->src_iova, + &cmd->flags); + + if ((cmd->flags & + ~(IOMMU_IOAS_MAP_FIXED_IOVA | IOMMU_IOAS_MAP_WRITEABLE | + IOMMU_IOAS_MAP_READABLE))) + return -EOPNOTSUPP; + if (cmd->length >= ULONG_MAX || cmd->src_iova >= ULONG_MAX || + cmd->dst_iova >= ULONG_MAX) + return -EOVERFLOW; + + src_ioas = iommufd_get_ioas(ucmd, cmd->src_ioas_id); + if (IS_ERR(src_ioas)) + return PTR_ERR(src_ioas); + rc = iopt_get_pages(&src_ioas->iopt, cmd->src_iova, cmd->length, + &pages_list); + iommufd_put_object(&src_ioas->obj); + if (rc) + return rc; + + dst_ioas = iommufd_get_ioas(ucmd, cmd->dst_ioas_id); + if (IS_ERR(dst_ioas)) { + rc = PTR_ERR(dst_ioas); + goto out_pages; + } + + if (!(cmd->flags & IOMMU_IOAS_MAP_FIXED_IOVA)) + flags = IOPT_ALLOC_IOVA; + iova = cmd->dst_iova; + rc = iopt_map_pages(&dst_ioas->iopt, &pages_list, cmd->length, &iova, + conv_iommu_prot(cmd->flags), flags); + if (rc) + goto out_put_dst; + + cmd->dst_iova = iova; + rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd)); +out_put_dst: + iommufd_put_object(&dst_ioas->obj); +out_pages: + iopt_free_pages_list(&pages_list); + return rc; +} + +int iommufd_ioas_unmap(struct iommufd_ucmd *ucmd) +{ + struct iommu_ioas_unmap *cmd = ucmd->cmd; + struct iommufd_ioas *ioas; + unsigned long unmapped = 0; + int rc; + + ioas = iommufd_get_ioas(ucmd, cmd->ioas_id); + if (IS_ERR(ioas)) + return PTR_ERR(ioas); + + if (cmd->iova == 0 && cmd->length == U64_MAX) { + rc = iopt_unmap_all(&ioas->iopt, &unmapped); + if (rc) + goto out_put; + } else { + if (cmd->iova >= ULONG_MAX || cmd->length >= ULONG_MAX) { + rc = -EOVERFLOW; + goto out_put; + } + rc = iopt_unmap_iova(&ioas->iopt, cmd->iova, cmd->length, + &unmapped); + if (rc) + goto out_put; + } + + cmd->length = unmapped; + rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd)); + +out_put: + iommufd_put_object(&ioas->obj); + return rc; +} + +int iommufd_option_rlimit_mode(struct iommu_option *cmd, + struct iommufd_ctx *ictx) +{ + if (cmd->object_id) + return -EOPNOTSUPP; + + if (cmd->op == IOMMU_OPTION_OP_GET) { + cmd->val64 = ictx->account_mode == IOPT_PAGES_ACCOUNT_MM; + return 0; + } + if (cmd->op == IOMMU_OPTION_OP_SET) { + int rc = 0; + + if (!capable(CAP_SYS_RESOURCE)) + return -EPERM; + + xa_lock(&ictx->objects); + if (!xa_empty(&ictx->objects)) { + rc = -EBUSY; + } else { + if (cmd->val64 == 0) + ictx->account_mode = IOPT_PAGES_ACCOUNT_USER; + else if (cmd->val64 == 1) + ictx->account_mode = IOPT_PAGES_ACCOUNT_MM; + else + rc = -EINVAL; + } + xa_unlock(&ictx->objects); + + return rc; + } + return -EOPNOTSUPP; +} + +static int iommufd_ioas_option_huge_pages(struct iommu_option *cmd, + struct iommufd_ioas *ioas) +{ + if (cmd->op == IOMMU_OPTION_OP_GET) { + cmd->val64 = !ioas->iopt.disable_large_pages; + return 0; + } + if (cmd->op == IOMMU_OPTION_OP_SET) { + if (cmd->val64 == 0) + return iopt_disable_large_pages(&ioas->iopt); + if (cmd->val64 == 1) { + iopt_enable_large_pages(&ioas->iopt); + return 0; + } + return -EINVAL; + } + return -EOPNOTSUPP; +} + +int iommufd_ioas_option(struct iommufd_ucmd *ucmd) +{ + struct iommu_option *cmd = ucmd->cmd; + struct iommufd_ioas *ioas; + int rc = 0; + + if (cmd->__reserved) + return -EOPNOTSUPP; + + ioas = iommufd_get_ioas(ucmd, cmd->object_id); + if (IS_ERR(ioas)) + return PTR_ERR(ioas); + + switch (cmd->option_id) { + case IOMMU_OPTION_HUGE_PAGES: + rc = iommufd_ioas_option_huge_pages(cmd, ioas); + break; + default: + rc = -EOPNOTSUPP; + } + + iommufd_put_object(&ioas->obj); + return rc; +} diff --git a/drivers/iommu/iommufd/iommufd_private.h b/drivers/iommu/iommufd/iommufd_private.h new file mode 100644 index 0000000000000000000000000000000000000000..222e86591f8ac98a5ac11a5e56fb9245d16a35d4 --- /dev/null +++ b/drivers/iommu/iommufd/iommufd_private.h @@ -0,0 +1,307 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES + */ +#ifndef __IOMMUFD_PRIVATE_H +#define __IOMMUFD_PRIVATE_H + +#include +#include +#include +#include + +struct iommu_domain; +struct iommu_group; +struct iommu_option; + +struct iommufd_ctx { + struct file *file; + struct xarray objects; + + u8 account_mode; + struct iommufd_ioas *vfio_ioas; +}; + +/* + * The IOVA to PFN map. The map automatically copies the PFNs into multiple + * domains and permits sharing of PFNs between io_pagetable instances. This + * supports both a design where IOAS's are 1:1 with a domain (eg because the + * domain is HW customized), or where the IOAS is 1:N with multiple generic + * domains. The io_pagetable holds an interval tree of iopt_areas which point + * to shared iopt_pages which hold the pfns mapped to the page table. + * + * The locking order is domains_rwsem -> iova_rwsem -> pages::mutex + */ +struct io_pagetable { + struct rw_semaphore domains_rwsem; + struct xarray domains; + struct xarray access_list; + unsigned int next_domain_id; + + struct rw_semaphore iova_rwsem; + struct rb_root_cached area_itree; + /* IOVA that cannot become reserved, struct iopt_allowed */ + struct rb_root_cached allowed_itree; + /* IOVA that cannot be allocated, struct iopt_reserved */ + struct rb_root_cached reserved_itree; + u8 disable_large_pages; + unsigned long iova_alignment; +}; + +void iopt_init_table(struct io_pagetable *iopt); +void iopt_destroy_table(struct io_pagetable *iopt); +int iopt_get_pages(struct io_pagetable *iopt, unsigned long iova, + unsigned long length, struct list_head *pages_list); +void iopt_free_pages_list(struct list_head *pages_list); +enum { + IOPT_ALLOC_IOVA = 1 << 0, +}; +int iopt_map_user_pages(struct iommufd_ctx *ictx, struct io_pagetable *iopt, + unsigned long *iova, void __user *uptr, + unsigned long length, int iommu_prot, + unsigned int flags); +int iopt_map_pages(struct io_pagetable *iopt, struct list_head *pages_list, + unsigned long length, unsigned long *dst_iova, + int iommu_prot, unsigned int flags); +int iopt_unmap_iova(struct io_pagetable *iopt, unsigned long iova, + unsigned long length, unsigned long *unmapped); +int iopt_unmap_all(struct io_pagetable *iopt, unsigned long *unmapped); + +void iommufd_access_notify_unmap(struct io_pagetable *iopt, unsigned long iova, + unsigned long length); +int iopt_table_add_domain(struct io_pagetable *iopt, + struct iommu_domain *domain); +void iopt_table_remove_domain(struct io_pagetable *iopt, + struct iommu_domain *domain); +int iopt_table_enforce_group_resv_regions(struct io_pagetable *iopt, + struct device *device, + struct iommu_group *group, + phys_addr_t *sw_msi_start); +int iopt_set_allow_iova(struct io_pagetable *iopt, + struct rb_root_cached *allowed_iova); +int iopt_reserve_iova(struct io_pagetable *iopt, unsigned long start, + unsigned long last, void *owner); +void iopt_remove_reserved_iova(struct io_pagetable *iopt, void *owner); +int iopt_cut_iova(struct io_pagetable *iopt, unsigned long *iovas, + size_t num_iovas); +void iopt_enable_large_pages(struct io_pagetable *iopt); +int iopt_disable_large_pages(struct io_pagetable *iopt); + +struct iommufd_ucmd { + struct iommufd_ctx *ictx; + void __user *ubuffer; + u32 user_size; + void *cmd; +}; + +int iommufd_vfio_ioctl(struct iommufd_ctx *ictx, unsigned int cmd, + unsigned long arg); + +/* Copy the response in ucmd->cmd back to userspace. */ +static inline int iommufd_ucmd_respond(struct iommufd_ucmd *ucmd, + size_t cmd_len) +{ + if (copy_to_user(ucmd->ubuffer, ucmd->cmd, + min_t(size_t, ucmd->user_size, cmd_len))) + return -EFAULT; + return 0; +} + +enum iommufd_object_type { + IOMMUFD_OBJ_NONE, + IOMMUFD_OBJ_ANY = IOMMUFD_OBJ_NONE, + IOMMUFD_OBJ_DEVICE, + IOMMUFD_OBJ_HW_PAGETABLE, + IOMMUFD_OBJ_IOAS, + IOMMUFD_OBJ_ACCESS, +#ifdef CONFIG_IOMMUFD_TEST + IOMMUFD_OBJ_SELFTEST, +#endif +}; + +/* Base struct for all objects with a userspace ID handle. */ +struct iommufd_object { + struct rw_semaphore destroy_rwsem; + refcount_t users; + enum iommufd_object_type type; + unsigned int id; +}; + +static inline bool iommufd_lock_obj(struct iommufd_object *obj) +{ + if (!down_read_trylock(&obj->destroy_rwsem)) + return false; + if (!refcount_inc_not_zero(&obj->users)) { + up_read(&obj->destroy_rwsem); + return false; + } + return true; +} + +struct iommufd_object *iommufd_get_object(struct iommufd_ctx *ictx, u32 id, + enum iommufd_object_type type); +static inline void iommufd_put_object(struct iommufd_object *obj) +{ + refcount_dec(&obj->users); + up_read(&obj->destroy_rwsem); +} + +/** + * iommufd_ref_to_users() - Switch from destroy_rwsem to users refcount + * protection + * @obj - Object to release + * + * Objects have two refcount protections (destroy_rwsem and the refcount_t + * users). Holding either of these will prevent the object from being destroyed. + * + * Depending on the use case, one protection or the other is appropriate. In + * most cases references are being protected by the destroy_rwsem. This allows + * orderly destruction of the object because iommufd_object_destroy_user() will + * wait for it to become unlocked. However, as a rwsem, it cannot be held across + * a system call return. So cases that have longer term needs must switch + * to the weaker users refcount_t. + * + * With users protection iommufd_object_destroy_user() will return false, + * refusing to destroy the object, causing -EBUSY to userspace. + */ +static inline void iommufd_ref_to_users(struct iommufd_object *obj) +{ + up_read(&obj->destroy_rwsem); + /* iommufd_lock_obj() obtains users as well */ +} +void iommufd_object_abort(struct iommufd_ctx *ictx, struct iommufd_object *obj); +void iommufd_object_abort_and_destroy(struct iommufd_ctx *ictx, + struct iommufd_object *obj); +void iommufd_object_finalize(struct iommufd_ctx *ictx, + struct iommufd_object *obj); +bool iommufd_object_destroy_user(struct iommufd_ctx *ictx, + struct iommufd_object *obj); +struct iommufd_object *_iommufd_object_alloc(struct iommufd_ctx *ictx, + size_t size, + enum iommufd_object_type type); + +#define iommufd_object_alloc(ictx, ptr, type) \ + container_of(_iommufd_object_alloc( \ + ictx, \ + sizeof(*(ptr)) + BUILD_BUG_ON_ZERO( \ + offsetof(typeof(*(ptr)), \ + obj) != 0), \ + type), \ + typeof(*(ptr)), obj) + +/* + * The IO Address Space (IOAS) pagetable is a virtual page table backed by the + * io_pagetable object. It is a user controlled mapping of IOVA -> PFNs. The + * mapping is copied into all of the associated domains and made available to + * in-kernel users. + * + * Every iommu_domain that is created is wrapped in a iommufd_hw_pagetable + * object. When we go to attach a device to an IOAS we need to get an + * iommu_domain and wrapping iommufd_hw_pagetable for it. + * + * An iommu_domain & iommfd_hw_pagetable will be automatically selected + * for a device based on the hwpt_list. If no suitable iommu_domain + * is found a new iommu_domain will be created. + */ +struct iommufd_ioas { + struct iommufd_object obj; + struct io_pagetable iopt; + struct mutex mutex; + struct list_head hwpt_list; +}; + +static inline struct iommufd_ioas *iommufd_get_ioas(struct iommufd_ucmd *ucmd, + u32 id) +{ + return container_of(iommufd_get_object(ucmd->ictx, id, + IOMMUFD_OBJ_IOAS), + struct iommufd_ioas, obj); +} + +struct iommufd_ioas *iommufd_ioas_alloc(struct iommufd_ctx *ictx); +int iommufd_ioas_alloc_ioctl(struct iommufd_ucmd *ucmd); +void iommufd_ioas_destroy(struct iommufd_object *obj); +int iommufd_ioas_iova_ranges(struct iommufd_ucmd *ucmd); +int iommufd_ioas_allow_iovas(struct iommufd_ucmd *ucmd); +int iommufd_ioas_map(struct iommufd_ucmd *ucmd); +int iommufd_ioas_copy(struct iommufd_ucmd *ucmd); +int iommufd_ioas_unmap(struct iommufd_ucmd *ucmd); +int iommufd_ioas_option(struct iommufd_ucmd *ucmd); +int iommufd_option_rlimit_mode(struct iommu_option *cmd, + struct iommufd_ctx *ictx); + +int iommufd_vfio_ioas(struct iommufd_ucmd *ucmd); + +/* + * A HW pagetable is called an iommu_domain inside the kernel. This user object + * allows directly creating and inspecting the domains. Domains that have kernel + * owned page tables will be associated with an iommufd_ioas that provides the + * IOVA to PFN map. + */ +struct iommufd_hw_pagetable { + struct iommufd_object obj; + struct iommufd_ioas *ioas; + struct iommu_domain *domain; + bool auto_domain : 1; + bool enforce_cache_coherency : 1; + bool msi_cookie : 1; + /* Head at iommufd_ioas::hwpt_list */ + struct list_head hwpt_item; + struct mutex devices_lock; + struct list_head devices; +}; + +struct iommufd_hw_pagetable * +iommufd_hw_pagetable_alloc(struct iommufd_ctx *ictx, struct iommufd_ioas *ioas, + struct device *dev); +void iommufd_hw_pagetable_destroy(struct iommufd_object *obj); + +void iommufd_device_destroy(struct iommufd_object *obj); + +struct iommufd_access { + struct iommufd_object obj; + struct iommufd_ctx *ictx; + struct iommufd_ioas *ioas; + const struct iommufd_access_ops *ops; + void *data; + unsigned long iova_alignment; + u32 iopt_access_list_id; +}; + +int iopt_add_access(struct io_pagetable *iopt, struct iommufd_access *access); +void iopt_remove_access(struct io_pagetable *iopt, + struct iommufd_access *access); +void iommufd_access_destroy_object(struct iommufd_object *obj); + +#ifdef CONFIG_IOMMUFD_TEST +struct iommufd_hw_pagetable * +iommufd_device_selftest_attach(struct iommufd_ctx *ictx, + struct iommufd_ioas *ioas, + struct device *mock_dev); +void iommufd_device_selftest_detach(struct iommufd_ctx *ictx, + struct iommufd_hw_pagetable *hwpt); +int iommufd_test(struct iommufd_ucmd *ucmd); +void iommufd_selftest_destroy(struct iommufd_object *obj); +extern size_t iommufd_test_memory_limit; +void iommufd_test_syz_conv_iova_id(struct iommufd_ucmd *ucmd, + unsigned int ioas_id, u64 *iova, u32 *flags); +bool iommufd_should_fail(void); +void __init iommufd_test_init(void); +void iommufd_test_exit(void); +#else +static inline void iommufd_test_syz_conv_iova_id(struct iommufd_ucmd *ucmd, + unsigned int ioas_id, + u64 *iova, u32 *flags) +{ +} +static inline bool iommufd_should_fail(void) +{ + return false; +} +static inline void __init iommufd_test_init(void) +{ +} +static inline void iommufd_test_exit(void) +{ +} +#endif +#endif diff --git a/drivers/iommu/iommufd/iommufd_test.h b/drivers/iommu/iommufd/iommufd_test.h new file mode 100644 index 0000000000000000000000000000000000000000..1d96a8f466fd2990667adc826ddb4bdf34534451 --- /dev/null +++ b/drivers/iommu/iommufd/iommufd_test.h @@ -0,0 +1,93 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. + */ +#ifndef _UAPI_IOMMUFD_TEST_H +#define _UAPI_IOMMUFD_TEST_H + +#include +#include + +enum { + IOMMU_TEST_OP_ADD_RESERVED = 1, + IOMMU_TEST_OP_MOCK_DOMAIN, + IOMMU_TEST_OP_MD_CHECK_MAP, + IOMMU_TEST_OP_MD_CHECK_REFS, + IOMMU_TEST_OP_CREATE_ACCESS, + IOMMU_TEST_OP_DESTROY_ACCESS_PAGES, + IOMMU_TEST_OP_ACCESS_PAGES, + IOMMU_TEST_OP_ACCESS_RW, + IOMMU_TEST_OP_SET_TEMP_MEMORY_LIMIT, +}; + +enum { + MOCK_APERTURE_START = 1UL << 24, + MOCK_APERTURE_LAST = (1UL << 31) - 1, +}; + +enum { + MOCK_FLAGS_ACCESS_WRITE = 1 << 0, + MOCK_FLAGS_ACCESS_SYZ = 1 << 16, +}; + +enum { + MOCK_ACCESS_RW_WRITE = 1 << 0, + MOCK_ACCESS_RW_SLOW_PATH = 1 << 2, +}; + +enum { + MOCK_FLAGS_ACCESS_CREATE_NEEDS_PIN_PAGES = 1 << 0, +}; + +struct iommu_test_cmd { + __u32 size; + __u32 op; + __u32 id; + __u32 __reserved; + union { + struct { + __aligned_u64 start; + __aligned_u64 length; + } add_reserved; + struct { + __u32 out_device_id; + __u32 out_hwpt_id; + } mock_domain; + struct { + __aligned_u64 iova; + __aligned_u64 length; + __aligned_u64 uptr; + } check_map; + struct { + __aligned_u64 length; + __aligned_u64 uptr; + __u32 refs; + } check_refs; + struct { + __u32 out_access_fd; + __u32 flags; + } create_access; + struct { + __u32 access_pages_id; + } destroy_access_pages; + struct { + __u32 flags; + __u32 out_access_pages_id; + __aligned_u64 iova; + __aligned_u64 length; + __aligned_u64 uptr; + } access_pages; + struct { + __aligned_u64 iova; + __aligned_u64 length; + __aligned_u64 uptr; + __u32 flags; + } access_rw; + struct { + __u32 limit; + } memory_limit; + }; + __u32 last; +}; +#define IOMMU_TEST_CMD _IO(IOMMUFD_TYPE, IOMMUFD_CMD_BASE + 32) + +#endif diff --git a/drivers/iommu/iommufd/main.c b/drivers/iommu/iommufd/main.c new file mode 100644 index 0000000000000000000000000000000000000000..083e6fcbe10ad92b5214099da4e7e892734dd8aa --- /dev/null +++ b/drivers/iommu/iommufd/main.c @@ -0,0 +1,460 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (C) 2021 Intel Corporation + * Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES + * + * iommufd provides control over the IOMMU HW objects created by IOMMU kernel + * drivers. IOMMU HW objects revolve around IO page tables that map incoming DMA + * addresses (IOVA) to CPU addresses. + */ +#define pr_fmt(fmt) "iommufd: " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "io_pagetable.h" +#include "iommufd_private.h" +#include "iommufd_test.h" + +struct iommufd_object_ops { + void (*destroy)(struct iommufd_object *obj); +}; +static const struct iommufd_object_ops iommufd_object_ops[]; +static struct miscdevice vfio_misc_dev; + +struct iommufd_object *_iommufd_object_alloc(struct iommufd_ctx *ictx, + size_t size, + enum iommufd_object_type type) +{ + struct iommufd_object *obj; + int rc; + + obj = kzalloc(size, GFP_KERNEL_ACCOUNT); + if (!obj) + return ERR_PTR(-ENOMEM); + obj->type = type; + init_rwsem(&obj->destroy_rwsem); + refcount_set(&obj->users, 1); + + /* + * Reserve an ID in the xarray but do not publish the pointer yet since + * the caller hasn't initialized it yet. Once the pointer is published + * in the xarray and visible to other threads we can't reliably destroy + * it anymore, so the caller must complete all errorable operations + * before calling iommufd_object_finalize(). + */ + rc = xa_alloc(&ictx->objects, &obj->id, XA_ZERO_ENTRY, + xa_limit_32b, GFP_KERNEL_ACCOUNT); + if (rc) + goto out_free; + return obj; +out_free: + kfree(obj); + return ERR_PTR(rc); +} + +/* + * Allow concurrent access to the object. + * + * Once another thread can see the object pointer it can prevent object + * destruction. Expect for special kernel-only objects there is no in-kernel way + * to reliably destroy a single object. Thus all APIs that are creating objects + * must use iommufd_object_abort() to handle their errors and only call + * iommufd_object_finalize() once object creation cannot fail. + */ +void iommufd_object_finalize(struct iommufd_ctx *ictx, + struct iommufd_object *obj) +{ + void *old; + + old = xa_store(&ictx->objects, obj->id, obj, GFP_KERNEL); + /* obj->id was returned from xa_alloc() so the xa_store() cannot fail */ + WARN_ON(old); +} + +/* Undo _iommufd_object_alloc() if iommufd_object_finalize() was not called */ +void iommufd_object_abort(struct iommufd_ctx *ictx, struct iommufd_object *obj) +{ + void *old; + + old = xa_erase(&ictx->objects, obj->id); + WARN_ON(old); + kfree(obj); +} + +/* + * Abort an object that has been fully initialized and needs destroy, but has + * not been finalized. + */ +void iommufd_object_abort_and_destroy(struct iommufd_ctx *ictx, + struct iommufd_object *obj) +{ + iommufd_object_ops[obj->type].destroy(obj); + iommufd_object_abort(ictx, obj); +} + +struct iommufd_object *iommufd_get_object(struct iommufd_ctx *ictx, u32 id, + enum iommufd_object_type type) +{ + struct iommufd_object *obj; + + if (iommufd_should_fail()) + return ERR_PTR(-ENOENT); + + xa_lock(&ictx->objects); + obj = xa_load(&ictx->objects, id); + if (!obj || (type != IOMMUFD_OBJ_ANY && obj->type != type) || + !iommufd_lock_obj(obj)) + obj = ERR_PTR(-ENOENT); + xa_unlock(&ictx->objects); + return obj; +} + +/* + * The caller holds a users refcount and wants to destroy the object. Returns + * true if the object was destroyed. In all cases the caller no longer has a + * reference on obj. + */ +bool iommufd_object_destroy_user(struct iommufd_ctx *ictx, + struct iommufd_object *obj) +{ + /* + * The purpose of the destroy_rwsem is to ensure deterministic + * destruction of objects used by external drivers and destroyed by this + * function. Any temporary increment of the refcount must hold the read + * side of this, such as during ioctl execution. + */ + down_write(&obj->destroy_rwsem); + xa_lock(&ictx->objects); + refcount_dec(&obj->users); + if (!refcount_dec_if_one(&obj->users)) { + xa_unlock(&ictx->objects); + up_write(&obj->destroy_rwsem); + return false; + } + __xa_erase(&ictx->objects, obj->id); + if (ictx->vfio_ioas && &ictx->vfio_ioas->obj == obj) + ictx->vfio_ioas = NULL; + xa_unlock(&ictx->objects); + up_write(&obj->destroy_rwsem); + + iommufd_object_ops[obj->type].destroy(obj); + kfree(obj); + return true; +} + +static int iommufd_destroy(struct iommufd_ucmd *ucmd) +{ + struct iommu_destroy *cmd = ucmd->cmd; + struct iommufd_object *obj; + + obj = iommufd_get_object(ucmd->ictx, cmd->id, IOMMUFD_OBJ_ANY); + if (IS_ERR(obj)) + return PTR_ERR(obj); + iommufd_ref_to_users(obj); + /* See iommufd_ref_to_users() */ + if (!iommufd_object_destroy_user(ucmd->ictx, obj)) + return -EBUSY; + return 0; +} + +static int iommufd_fops_open(struct inode *inode, struct file *filp) +{ + struct iommufd_ctx *ictx; + + ictx = kzalloc(sizeof(*ictx), GFP_KERNEL_ACCOUNT); + if (!ictx) + return -ENOMEM; + + /* + * For compatibility with VFIO when /dev/vfio/vfio is opened we default + * to the same rlimit accounting as vfio uses. + */ + if (IS_ENABLED(CONFIG_IOMMUFD_VFIO_CONTAINER) && + filp->private_data == &vfio_misc_dev) { + ictx->account_mode = IOPT_PAGES_ACCOUNT_MM; + pr_info_once("IOMMUFD is providing /dev/vfio/vfio, not VFIO.\n"); + } + + xa_init_flags(&ictx->objects, XA_FLAGS_ALLOC1 | XA_FLAGS_ACCOUNT); + ictx->file = filp; + filp->private_data = ictx; + return 0; +} + +static int iommufd_fops_release(struct inode *inode, struct file *filp) +{ + struct iommufd_ctx *ictx = filp->private_data; + struct iommufd_object *obj; + + /* + * The objects in the xarray form a graph of "users" counts, and we have + * to destroy them in a depth first manner. Leaf objects will reduce the + * users count of interior objects when they are destroyed. + * + * Repeatedly destroying all the "1 users" leaf objects will progress + * until the entire list is destroyed. If this can't progress then there + * is some bug related to object refcounting. + */ + while (!xa_empty(&ictx->objects)) { + unsigned int destroyed = 0; + unsigned long index; + + xa_for_each(&ictx->objects, index, obj) { + if (!refcount_dec_if_one(&obj->users)) + continue; + destroyed++; + xa_erase(&ictx->objects, index); + iommufd_object_ops[obj->type].destroy(obj); + kfree(obj); + } + /* Bug related to users refcount */ + if (WARN_ON(!destroyed)) + break; + } + kfree(ictx); + return 0; +} + +static int iommufd_option(struct iommufd_ucmd *ucmd) +{ + struct iommu_option *cmd = ucmd->cmd; + int rc; + + if (cmd->__reserved) + return -EOPNOTSUPP; + + switch (cmd->option_id) { + case IOMMU_OPTION_RLIMIT_MODE: + rc = iommufd_option_rlimit_mode(cmd, ucmd->ictx); + break; + case IOMMU_OPTION_HUGE_PAGES: + rc = iommufd_ioas_option(ucmd); + break; + default: + return -EOPNOTSUPP; + } + if (rc) + return rc; + if (copy_to_user(&((struct iommu_option __user *)ucmd->ubuffer)->val64, + &cmd->val64, sizeof(cmd->val64))) + return -EFAULT; + return 0; +} + +union ucmd_buffer { + struct iommu_destroy destroy; + struct iommu_ioas_alloc alloc; + struct iommu_ioas_allow_iovas allow_iovas; + struct iommu_ioas_iova_ranges iova_ranges; + struct iommu_ioas_map map; + struct iommu_ioas_unmap unmap; +#ifdef CONFIG_IOMMUFD_TEST + struct iommu_test_cmd test; +#endif +}; + +struct iommufd_ioctl_op { + unsigned int size; + unsigned int min_size; + unsigned int ioctl_num; + int (*execute)(struct iommufd_ucmd *ucmd); +}; + +#define IOCTL_OP(_ioctl, _fn, _struct, _last) \ + [_IOC_NR(_ioctl) - IOMMUFD_CMD_BASE] = { \ + .size = sizeof(_struct) + \ + BUILD_BUG_ON_ZERO(sizeof(union ucmd_buffer) < \ + sizeof(_struct)), \ + .min_size = offsetofend(_struct, _last), \ + .ioctl_num = _ioctl, \ + .execute = _fn, \ + } +static const struct iommufd_ioctl_op iommufd_ioctl_ops[] = { + IOCTL_OP(IOMMU_DESTROY, iommufd_destroy, struct iommu_destroy, id), + IOCTL_OP(IOMMU_IOAS_ALLOC, iommufd_ioas_alloc_ioctl, + struct iommu_ioas_alloc, out_ioas_id), + IOCTL_OP(IOMMU_IOAS_ALLOW_IOVAS, iommufd_ioas_allow_iovas, + struct iommu_ioas_allow_iovas, allowed_iovas), + IOCTL_OP(IOMMU_IOAS_COPY, iommufd_ioas_copy, struct iommu_ioas_copy, + src_iova), + IOCTL_OP(IOMMU_IOAS_IOVA_RANGES, iommufd_ioas_iova_ranges, + struct iommu_ioas_iova_ranges, out_iova_alignment), + IOCTL_OP(IOMMU_IOAS_MAP, iommufd_ioas_map, struct iommu_ioas_map, + iova), + IOCTL_OP(IOMMU_IOAS_UNMAP, iommufd_ioas_unmap, struct iommu_ioas_unmap, + length), + IOCTL_OP(IOMMU_OPTION, iommufd_option, struct iommu_option, + val64), + IOCTL_OP(IOMMU_VFIO_IOAS, iommufd_vfio_ioas, struct iommu_vfio_ioas, + __reserved), +#ifdef CONFIG_IOMMUFD_TEST + IOCTL_OP(IOMMU_TEST_CMD, iommufd_test, struct iommu_test_cmd, last), +#endif +}; + +static long iommufd_fops_ioctl(struct file *filp, unsigned int cmd, + unsigned long arg) +{ + struct iommufd_ctx *ictx = filp->private_data; + const struct iommufd_ioctl_op *op; + struct iommufd_ucmd ucmd = {}; + union ucmd_buffer buf; + unsigned int nr; + int ret; + + nr = _IOC_NR(cmd); + if (nr < IOMMUFD_CMD_BASE || + (nr - IOMMUFD_CMD_BASE) >= ARRAY_SIZE(iommufd_ioctl_ops)) + return iommufd_vfio_ioctl(ictx, cmd, arg); + + ucmd.ictx = ictx; + ucmd.ubuffer = (void __user *)arg; + ret = get_user(ucmd.user_size, (u32 __user *)ucmd.ubuffer); + if (ret) + return ret; + + op = &iommufd_ioctl_ops[nr - IOMMUFD_CMD_BASE]; + if (op->ioctl_num != cmd) + return -ENOIOCTLCMD; + if (ucmd.user_size < op->min_size) + return -EINVAL; + + ucmd.cmd = &buf; + ret = copy_struct_from_user(ucmd.cmd, op->size, ucmd.ubuffer, + ucmd.user_size); + if (ret) + return ret; + ret = op->execute(&ucmd); + return ret; +} + +static const struct file_operations iommufd_fops = { + .owner = THIS_MODULE, + .open = iommufd_fops_open, + .release = iommufd_fops_release, + .unlocked_ioctl = iommufd_fops_ioctl, +}; + +/** + * iommufd_ctx_get - Get a context reference + * @ictx: Context to get + * + * The caller must already hold a valid reference to ictx. + */ +void iommufd_ctx_get(struct iommufd_ctx *ictx) +{ + get_file(ictx->file); +} +EXPORT_SYMBOL_NS_GPL(iommufd_ctx_get, IOMMUFD); + +/** + * iommufd_ctx_from_file - Acquires a reference to the iommufd context + * @file: File to obtain the reference from + * + * Returns a pointer to the iommufd_ctx, otherwise ERR_PTR. The struct file + * remains owned by the caller and the caller must still do fput. On success + * the caller is responsible to call iommufd_ctx_put(). + */ +struct iommufd_ctx *iommufd_ctx_from_file(struct file *file) +{ + struct iommufd_ctx *ictx; + + if (file->f_op != &iommufd_fops) + return ERR_PTR(-EBADFD); + ictx = file->private_data; + iommufd_ctx_get(ictx); + return ictx; +} +EXPORT_SYMBOL_NS_GPL(iommufd_ctx_from_file, IOMMUFD); + +/** + * iommufd_ctx_put - Put back a reference + * @ictx: Context to put back + */ +void iommufd_ctx_put(struct iommufd_ctx *ictx) +{ + fput(ictx->file); +} +EXPORT_SYMBOL_NS_GPL(iommufd_ctx_put, IOMMUFD); + +static const struct iommufd_object_ops iommufd_object_ops[] = { + [IOMMUFD_OBJ_ACCESS] = { + .destroy = iommufd_access_destroy_object, + }, + [IOMMUFD_OBJ_DEVICE] = { + .destroy = iommufd_device_destroy, + }, + [IOMMUFD_OBJ_IOAS] = { + .destroy = iommufd_ioas_destroy, + }, + [IOMMUFD_OBJ_HW_PAGETABLE] = { + .destroy = iommufd_hw_pagetable_destroy, + }, +#ifdef CONFIG_IOMMUFD_TEST + [IOMMUFD_OBJ_SELFTEST] = { + .destroy = iommufd_selftest_destroy, + }, +#endif +}; + +static struct miscdevice iommu_misc_dev = { + .minor = MISC_DYNAMIC_MINOR, + .name = "iommu", + .fops = &iommufd_fops, + .nodename = "iommu", + .mode = 0660, +}; + + +static struct miscdevice vfio_misc_dev = { + .minor = VFIO_MINOR, + .name = "vfio", + .fops = &iommufd_fops, + .nodename = "vfio/vfio", + .mode = 0666, +}; + +static int __init iommufd_init(void) +{ + int ret; + + ret = misc_register(&iommu_misc_dev); + if (ret) + return ret; + + if (IS_ENABLED(CONFIG_IOMMUFD_VFIO_CONTAINER)) { + ret = misc_register(&vfio_misc_dev); + if (ret) + goto err_misc; + } + iommufd_test_init(); + return 0; +err_misc: + misc_deregister(&iommu_misc_dev); + return ret; +} + +static void __exit iommufd_exit(void) +{ + iommufd_test_exit(); + if (IS_ENABLED(CONFIG_IOMMUFD_VFIO_CONTAINER)) + misc_deregister(&vfio_misc_dev); + misc_deregister(&iommu_misc_dev); +} + +module_init(iommufd_init); +module_exit(iommufd_exit); + +#if IS_ENABLED(CONFIG_IOMMUFD_VFIO_CONTAINER) +MODULE_ALIAS_MISCDEV(VFIO_MINOR); +MODULE_ALIAS("devname:vfio/vfio"); +#endif +MODULE_DESCRIPTION("I/O Address Space Management for passthrough devices"); +MODULE_LICENSE("GPL"); diff --git a/drivers/iommu/iommufd/pages.c b/drivers/iommu/iommufd/pages.c new file mode 100644 index 0000000000000000000000000000000000000000..1e1d3509efae5e0068a34005f06752555b1a7bbc --- /dev/null +++ b/drivers/iommu/iommufd/pages.c @@ -0,0 +1,1977 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. + * + * The iopt_pages is the center of the storage and motion of PFNs. Each + * iopt_pages represents a logical linear array of full PFNs. The array is 0 + * based and has npages in it. Accessors use 'index' to refer to the entry in + * this logical array, regardless of its storage location. + * + * PFNs are stored in a tiered scheme: + * 1) iopt_pages::pinned_pfns xarray + * 2) An iommu_domain + * 3) The origin of the PFNs, i.e. the userspace pointer + * + * PFN have to be copied between all combinations of tiers, depending on the + * configuration. + * + * When a PFN is taken out of the userspace pointer it is pinned exactly once. + * The storage locations of the PFN's index are tracked in the two interval + * trees. If no interval includes the index then it is not pinned. + * + * If access_itree includes the PFN's index then an in-kernel access has + * requested the page. The PFN is stored in the xarray so other requestors can + * continue to find it. + * + * If the domains_itree includes the PFN's index then an iommu_domain is storing + * the PFN and it can be read back using iommu_iova_to_phys(). To avoid + * duplicating storage the xarray is not used if only iommu_domains are using + * the PFN's index. + * + * As a general principle this is designed so that destroy never fails. This + * means removing an iommu_domain or releasing a in-kernel access will not fail + * due to insufficient memory. In practice this means some cases have to hold + * PFNs in the xarray even though they are also being stored in an iommu_domain. + * + * While the iopt_pages can use an iommu_domain as storage, it does not have an + * IOVA itself. Instead the iopt_area represents a range of IOVA and uses the + * iopt_pages as the PFN provider. Multiple iopt_areas can share the iopt_pages + * and reference their own slice of the PFN array, with sub page granularity. + * + * In this file the term 'last' indicates an inclusive and closed interval, eg + * [0,0] refers to a single PFN. 'end' means an open range, eg [0,0) refers to + * no PFNs. + * + * Be cautious of overflow. An IOVA can go all the way up to U64_MAX, so + * last_iova + 1 can overflow. An iopt_pages index will always be much less than + * ULONG_MAX so last_index + 1 cannot overflow. + */ +#include +#include +#include +#include +#include +#include +#include + +#include "io_pagetable.h" +#include "double_span.h" + +#ifndef CONFIG_IOMMUFD_TEST +#define TEMP_MEMORY_LIMIT 65536 +#else +#define TEMP_MEMORY_LIMIT iommufd_test_memory_limit +#endif +#define BATCH_BACKUP_SIZE 32 + +/* + * More memory makes pin_user_pages() and the batching more efficient, but as + * this is only a performance optimization don't try too hard to get it. A 64k + * allocation can hold about 26M of 4k pages and 13G of 2M pages in an + * pfn_batch. Various destroy paths cannot fail and provide a small amount of + * stack memory as a backup contingency. If backup_len is given this cannot + * fail. + */ +static void *temp_kmalloc(size_t *size, void *backup, size_t backup_len) +{ + void *res; + + if (WARN_ON(*size == 0)) + return NULL; + + if (*size < backup_len) + return backup; + + if (!backup && iommufd_should_fail()) + return NULL; + + *size = min_t(size_t, *size, TEMP_MEMORY_LIMIT); + res = kmalloc(*size, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY); + if (res) + return res; + *size = PAGE_SIZE; + if (backup_len) { + res = kmalloc(*size, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY); + if (res) + return res; + *size = backup_len; + return backup; + } + return kmalloc(*size, GFP_KERNEL); +} + +void interval_tree_double_span_iter_update( + struct interval_tree_double_span_iter *iter) +{ + unsigned long last_hole = ULONG_MAX; + unsigned int i; + + for (i = 0; i != ARRAY_SIZE(iter->spans); i++) { + if (interval_tree_span_iter_done(&iter->spans[i])) { + iter->is_used = -1; + return; + } + + if (iter->spans[i].is_hole) { + last_hole = min(last_hole, iter->spans[i].last_hole); + continue; + } + + iter->is_used = i + 1; + iter->start_used = iter->spans[i].start_used; + iter->last_used = min(iter->spans[i].last_used, last_hole); + return; + } + + iter->is_used = 0; + iter->start_hole = iter->spans[0].start_hole; + iter->last_hole = + min(iter->spans[0].last_hole, iter->spans[1].last_hole); +} + +void interval_tree_double_span_iter_first( + struct interval_tree_double_span_iter *iter, + struct rb_root_cached *itree1, struct rb_root_cached *itree2, + unsigned long first_index, unsigned long last_index) +{ + unsigned int i; + + iter->itrees[0] = itree1; + iter->itrees[1] = itree2; + for (i = 0; i != ARRAY_SIZE(iter->spans); i++) + interval_tree_span_iter_first(&iter->spans[i], iter->itrees[i], + first_index, last_index); + interval_tree_double_span_iter_update(iter); +} + +void interval_tree_double_span_iter_next( + struct interval_tree_double_span_iter *iter) +{ + unsigned int i; + + if (iter->is_used == -1 || + iter->last_hole == iter->spans[0].last_index) { + iter->is_used = -1; + return; + } + + for (i = 0; i != ARRAY_SIZE(iter->spans); i++) + interval_tree_span_iter_advance( + &iter->spans[i], iter->itrees[i], iter->last_hole + 1); + interval_tree_double_span_iter_update(iter); +} + +static void iopt_pages_add_npinned(struct iopt_pages *pages, size_t npages) +{ + int rc; + + rc = check_add_overflow(pages->npinned, npages, &pages->npinned); + if (IS_ENABLED(CONFIG_IOMMUFD_TEST)) + WARN_ON(rc || pages->npinned > pages->npages); +} + +static void iopt_pages_sub_npinned(struct iopt_pages *pages, size_t npages) +{ + int rc; + + rc = check_sub_overflow(pages->npinned, npages, &pages->npinned); + if (IS_ENABLED(CONFIG_IOMMUFD_TEST)) + WARN_ON(rc || pages->npinned > pages->npages); +} + +static void iopt_pages_err_unpin(struct iopt_pages *pages, + unsigned long start_index, + unsigned long last_index, + struct page **page_list) +{ + unsigned long npages = last_index - start_index + 1; + + unpin_user_pages(page_list, npages); + iopt_pages_sub_npinned(pages, npages); +} + +/* + * index is the number of PAGE_SIZE units from the start of the area's + * iopt_pages. If the iova is sub page-size then the area has an iova that + * covers a portion of the first and last pages in the range. + */ +static unsigned long iopt_area_index_to_iova(struct iopt_area *area, + unsigned long index) +{ + if (IS_ENABLED(CONFIG_IOMMUFD_TEST)) + WARN_ON(index < iopt_area_index(area) || + index > iopt_area_last_index(area)); + index -= iopt_area_index(area); + if (index == 0) + return iopt_area_iova(area); + return iopt_area_iova(area) - area->page_offset + index * PAGE_SIZE; +} + +static unsigned long iopt_area_index_to_iova_last(struct iopt_area *area, + unsigned long index) +{ + if (IS_ENABLED(CONFIG_IOMMUFD_TEST)) + WARN_ON(index < iopt_area_index(area) || + index > iopt_area_last_index(area)); + if (index == iopt_area_last_index(area)) + return iopt_area_last_iova(area); + return iopt_area_iova(area) - area->page_offset + + (index - iopt_area_index(area) + 1) * PAGE_SIZE - 1; +} + +static void iommu_unmap_nofail(struct iommu_domain *domain, unsigned long iova, + size_t size) +{ + size_t ret; + + ret = iommu_unmap(domain, iova, size); + /* + * It is a logic error in this code or a driver bug if the IOMMU unmaps + * something other than exactly as requested. This implies that the + * iommu driver may not fail unmap for reasons beyond bad agruments. + * Particularly, the iommu driver may not do a memory allocation on the + * unmap path. + */ + WARN_ON(ret != size); +} + +static void iopt_area_unmap_domain_range(struct iopt_area *area, + struct iommu_domain *domain, + unsigned long start_index, + unsigned long last_index) +{ + unsigned long start_iova = iopt_area_index_to_iova(area, start_index); + + iommu_unmap_nofail(domain, start_iova, + iopt_area_index_to_iova_last(area, last_index) - + start_iova + 1); +} + +static struct iopt_area *iopt_pages_find_domain_area(struct iopt_pages *pages, + unsigned long index) +{ + struct interval_tree_node *node; + + node = interval_tree_iter_first(&pages->domains_itree, index, index); + if (!node) + return NULL; + return container_of(node, struct iopt_area, pages_node); +} + +/* + * A simple datastructure to hold a vector of PFNs, optimized for contiguous + * PFNs. This is used as a temporary holding memory for shuttling pfns from one + * place to another. Generally everything is made more efficient if operations + * work on the largest possible grouping of pfns. eg fewer lock/unlock cycles, + * better cache locality, etc + */ +struct pfn_batch { + unsigned long *pfns; + u32 *npfns; + unsigned int array_size; + unsigned int end; + unsigned int total_pfns; +}; + +static void batch_clear(struct pfn_batch *batch) +{ + batch->total_pfns = 0; + batch->end = 0; + batch->pfns[0] = 0; + batch->npfns[0] = 0; +} + +/* + * Carry means we carry a portion of the final hugepage over to the front of the + * batch + */ +static void batch_clear_carry(struct pfn_batch *batch, unsigned int keep_pfns) +{ + if (!keep_pfns) + return batch_clear(batch); + + if (IS_ENABLED(CONFIG_IOMMUFD_TEST)) + WARN_ON(!batch->end || + batch->npfns[batch->end - 1] < keep_pfns); + + batch->total_pfns = keep_pfns; + batch->npfns[0] = keep_pfns; + batch->pfns[0] = batch->pfns[batch->end - 1] + + (batch->npfns[batch->end - 1] - keep_pfns); + batch->end = 0; +} + +static void batch_skip_carry(struct pfn_batch *batch, unsigned int skip_pfns) +{ + if (!batch->total_pfns) + return; + if (IS_ENABLED(CONFIG_IOMMUFD_TEST)) + WARN_ON(batch->total_pfns != batch->npfns[0]); + skip_pfns = min(batch->total_pfns, skip_pfns); + batch->pfns[0] += skip_pfns; + batch->npfns[0] -= skip_pfns; + batch->total_pfns -= skip_pfns; +} + +static int __batch_init(struct pfn_batch *batch, size_t max_pages, void *backup, + size_t backup_len) +{ + const size_t elmsz = sizeof(*batch->pfns) + sizeof(*batch->npfns); + size_t size = max_pages * elmsz; + + batch->pfns = temp_kmalloc(&size, backup, backup_len); + if (!batch->pfns) + return -ENOMEM; + if (IS_ENABLED(CONFIG_IOMMUFD_TEST) && WARN_ON(size < elmsz)) + return -EINVAL; + batch->array_size = size / elmsz; + batch->npfns = (u32 *)(batch->pfns + batch->array_size); + batch_clear(batch); + return 0; +} + +static int batch_init(struct pfn_batch *batch, size_t max_pages) +{ + return __batch_init(batch, max_pages, NULL, 0); +} + +static void batch_init_backup(struct pfn_batch *batch, size_t max_pages, + void *backup, size_t backup_len) +{ + __batch_init(batch, max_pages, backup, backup_len); +} + +static void batch_destroy(struct pfn_batch *batch, void *backup) +{ + if (batch->pfns != backup) + kfree(batch->pfns); +} + +/* true if the pfn was added, false otherwise */ +static bool batch_add_pfn(struct pfn_batch *batch, unsigned long pfn) +{ + const unsigned int MAX_NPFNS = type_max(typeof(*batch->npfns)); + + if (batch->end && + pfn == batch->pfns[batch->end - 1] + batch->npfns[batch->end - 1] && + batch->npfns[batch->end - 1] != MAX_NPFNS) { + batch->npfns[batch->end - 1]++; + batch->total_pfns++; + return true; + } + if (batch->end == batch->array_size) + return false; + batch->total_pfns++; + batch->pfns[batch->end] = pfn; + batch->npfns[batch->end] = 1; + batch->end++; + return true; +} + +/* + * Fill the batch with pfns from the domain. When the batch is full, or it + * reaches last_index, the function will return. The caller should use + * batch->total_pfns to determine the starting point for the next iteration. + */ +static void batch_from_domain(struct pfn_batch *batch, + struct iommu_domain *domain, + struct iopt_area *area, unsigned long start_index, + unsigned long last_index) +{ + unsigned int page_offset = 0; + unsigned long iova; + phys_addr_t phys; + + iova = iopt_area_index_to_iova(area, start_index); + if (start_index == iopt_area_index(area)) + page_offset = area->page_offset; + while (start_index <= last_index) { + /* + * This is pretty slow, it would be nice to get the page size + * back from the driver, or have the driver directly fill the + * batch. + */ + phys = iommu_iova_to_phys(domain, iova) - page_offset; + if (!batch_add_pfn(batch, PHYS_PFN(phys))) + return; + iova += PAGE_SIZE - page_offset; + page_offset = 0; + start_index++; + } +} + +static struct page **raw_pages_from_domain(struct iommu_domain *domain, + struct iopt_area *area, + unsigned long start_index, + unsigned long last_index, + struct page **out_pages) +{ + unsigned int page_offset = 0; + unsigned long iova; + phys_addr_t phys; + + iova = iopt_area_index_to_iova(area, start_index); + if (start_index == iopt_area_index(area)) + page_offset = area->page_offset; + while (start_index <= last_index) { + phys = iommu_iova_to_phys(domain, iova) - page_offset; + *(out_pages++) = pfn_to_page(PHYS_PFN(phys)); + iova += PAGE_SIZE - page_offset; + page_offset = 0; + start_index++; + } + return out_pages; +} + +/* Continues reading a domain until we reach a discontinuity in the pfns. */ +static void batch_from_domain_continue(struct pfn_batch *batch, + struct iommu_domain *domain, + struct iopt_area *area, + unsigned long start_index, + unsigned long last_index) +{ + unsigned int array_size = batch->array_size; + + batch->array_size = batch->end; + batch_from_domain(batch, domain, area, start_index, last_index); + batch->array_size = array_size; +} + +/* + * This is part of the VFIO compatibility support for VFIO_TYPE1_IOMMU. That + * mode permits splitting a mapped area up, and then one of the splits is + * unmapped. Doing this normally would cause us to violate our invariant of + * pairing map/unmap. Thus, to support old VFIO compatibility disable support + * for batching consecutive PFNs. All PFNs mapped into the iommu are done in + * PAGE_SIZE units, not larger or smaller. + */ +static int batch_iommu_map_small(struct iommu_domain *domain, + unsigned long iova, phys_addr_t paddr, + size_t size, int prot) +{ + unsigned long start_iova = iova; + int rc; + + if (IS_ENABLED(CONFIG_IOMMUFD_TEST)) + WARN_ON(paddr % PAGE_SIZE || iova % PAGE_SIZE || + size % PAGE_SIZE); + + while (size) { + rc = iommu_map(domain, iova, paddr, PAGE_SIZE, prot); + if (rc) + goto err_unmap; + iova += PAGE_SIZE; + paddr += PAGE_SIZE; + size -= PAGE_SIZE; + } + return 0; + +err_unmap: + if (start_iova != iova) + iommu_unmap_nofail(domain, start_iova, iova - start_iova); + return rc; +} + +static int batch_to_domain(struct pfn_batch *batch, struct iommu_domain *domain, + struct iopt_area *area, unsigned long start_index) +{ + bool disable_large_pages = area->iopt->disable_large_pages; + unsigned long last_iova = iopt_area_last_iova(area); + unsigned int page_offset = 0; + unsigned long start_iova; + unsigned long next_iova; + unsigned int cur = 0; + unsigned long iova; + int rc; + + /* The first index might be a partial page */ + if (start_index == iopt_area_index(area)) + page_offset = area->page_offset; + next_iova = iova = start_iova = + iopt_area_index_to_iova(area, start_index); + while (cur < batch->end) { + next_iova = min(last_iova + 1, + next_iova + batch->npfns[cur] * PAGE_SIZE - + page_offset); + if (disable_large_pages) + rc = batch_iommu_map_small( + domain, iova, + PFN_PHYS(batch->pfns[cur]) + page_offset, + next_iova - iova, area->iommu_prot); + else + rc = iommu_map(domain, iova, + PFN_PHYS(batch->pfns[cur]) + page_offset, + next_iova - iova, area->iommu_prot); + if (rc) + goto err_unmap; + iova = next_iova; + page_offset = 0; + cur++; + } + return 0; +err_unmap: + if (start_iova != iova) + iommu_unmap_nofail(domain, start_iova, iova - start_iova); + return rc; +} + +static void batch_from_xarray(struct pfn_batch *batch, struct xarray *xa, + unsigned long start_index, + unsigned long last_index) +{ + XA_STATE(xas, xa, start_index); + void *entry; + + rcu_read_lock(); + while (true) { + entry = xas_next(&xas); + if (xas_retry(&xas, entry)) + continue; + WARN_ON(!xa_is_value(entry)); + if (!batch_add_pfn(batch, xa_to_value(entry)) || + start_index == last_index) + break; + start_index++; + } + rcu_read_unlock(); +} + +static void batch_from_xarray_clear(struct pfn_batch *batch, struct xarray *xa, + unsigned long start_index, + unsigned long last_index) +{ + XA_STATE(xas, xa, start_index); + void *entry; + + xas_lock(&xas); + while (true) { + entry = xas_next(&xas); + if (xas_retry(&xas, entry)) + continue; + WARN_ON(!xa_is_value(entry)); + if (!batch_add_pfn(batch, xa_to_value(entry))) + break; + xas_store(&xas, NULL); + if (start_index == last_index) + break; + start_index++; + } + xas_unlock(&xas); +} + +static void clear_xarray(struct xarray *xa, unsigned long start_index, + unsigned long last_index) +{ + XA_STATE(xas, xa, start_index); + void *entry; + + xas_lock(&xas); + xas_for_each(&xas, entry, last_index) + xas_store(&xas, NULL); + xas_unlock(&xas); +} + +static int pages_to_xarray(struct xarray *xa, unsigned long start_index, + unsigned long last_index, struct page **pages) +{ + struct page **end_pages = pages + (last_index - start_index) + 1; + struct page **half_pages = pages + (end_pages - pages) / 2; + XA_STATE(xas, xa, start_index); + + do { + void *old; + + xas_lock(&xas); + while (pages != end_pages) { + /* xarray does not participate in fault injection */ + if (pages == half_pages && iommufd_should_fail()) { + xas_set_err(&xas, -EINVAL); + xas_unlock(&xas); + /* aka xas_destroy() */ + xas_nomem(&xas, GFP_KERNEL); + goto err_clear; + } + + old = xas_store(&xas, xa_mk_value(page_to_pfn(*pages))); + if (xas_error(&xas)) + break; + WARN_ON(old); + pages++; + xas_next(&xas); + } + xas_unlock(&xas); + } while (xas_nomem(&xas, GFP_KERNEL)); + +err_clear: + if (xas_error(&xas)) { + if (xas.xa_index != start_index) + clear_xarray(xa, start_index, xas.xa_index - 1); + return xas_error(&xas); + } + return 0; +} + +static void batch_from_pages(struct pfn_batch *batch, struct page **pages, + size_t npages) +{ + struct page **end = pages + npages; + + for (; pages != end; pages++) + if (!batch_add_pfn(batch, page_to_pfn(*pages))) + break; +} + +static void batch_unpin(struct pfn_batch *batch, struct iopt_pages *pages, + unsigned int first_page_off, size_t npages) +{ + unsigned int cur = 0; + + while (first_page_off) { + if (batch->npfns[cur] > first_page_off) + break; + first_page_off -= batch->npfns[cur]; + cur++; + } + + while (npages) { + size_t to_unpin = min_t(size_t, npages, + batch->npfns[cur] - first_page_off); + + unpin_user_page_range_dirty_lock( + pfn_to_page(batch->pfns[cur] + first_page_off), + to_unpin, pages->writable); + iopt_pages_sub_npinned(pages, to_unpin); + cur++; + first_page_off = 0; + npages -= to_unpin; + } +} + +static void copy_data_page(struct page *page, void *data, unsigned long offset, + size_t length, unsigned int flags) +{ + void *mem; + + mem = kmap_local_page(page); + if (flags & IOMMUFD_ACCESS_RW_WRITE) { + memcpy(mem + offset, data, length); + set_page_dirty_lock(page); + } else { + memcpy(data, mem + offset, length); + } + kunmap_local(mem); +} + +static unsigned long batch_rw(struct pfn_batch *batch, void *data, + unsigned long offset, unsigned long length, + unsigned int flags) +{ + unsigned long copied = 0; + unsigned int npage = 0; + unsigned int cur = 0; + + while (cur < batch->end) { + unsigned long bytes = min(length, PAGE_SIZE - offset); + + copy_data_page(pfn_to_page(batch->pfns[cur] + npage), data, + offset, bytes, flags); + offset = 0; + length -= bytes; + data += bytes; + copied += bytes; + npage++; + if (npage == batch->npfns[cur]) { + npage = 0; + cur++; + } + if (!length) + break; + } + return copied; +} + +/* pfn_reader_user is just the pin_user_pages() path */ +struct pfn_reader_user { + struct page **upages; + size_t upages_len; + unsigned long upages_start; + unsigned long upages_end; + unsigned int gup_flags; + /* + * 1 means mmget() and mmap_read_lock(), 0 means only mmget(), -1 is + * neither + */ + int locked; +}; + +static void pfn_reader_user_init(struct pfn_reader_user *user, + struct iopt_pages *pages) +{ + user->upages = NULL; + user->upages_start = 0; + user->upages_end = 0; + user->locked = -1; + + user->gup_flags = FOLL_LONGTERM; + if (pages->writable) + user->gup_flags |= FOLL_WRITE; +} + +static void pfn_reader_user_destroy(struct pfn_reader_user *user, + struct iopt_pages *pages) +{ + if (user->locked != -1) { + if (user->locked) + mmap_read_unlock(pages->source_mm); + if (pages->source_mm != current->mm) + mmput(pages->source_mm); + user->locked = -1; + } + + kfree(user->upages); + user->upages = NULL; +} + +static int pfn_reader_user_pin(struct pfn_reader_user *user, + struct iopt_pages *pages, + unsigned long start_index, + unsigned long last_index) +{ + bool remote_mm = pages->source_mm != current->mm; + unsigned long npages; + uintptr_t uptr; + long rc; + + if (IS_ENABLED(CONFIG_IOMMUFD_TEST) && + WARN_ON(last_index < start_index)) + return -EINVAL; + + if (!user->upages) { + /* All undone in pfn_reader_destroy() */ + user->upages_len = + (last_index - start_index + 1) * sizeof(*user->upages); + user->upages = temp_kmalloc(&user->upages_len, NULL, 0); + if (!user->upages) + return -ENOMEM; + } + + if (user->locked == -1) { + /* + * The majority of usages will run the map task within the mm + * providing the pages, so we can optimize into + * get_user_pages_fast() + */ + if (remote_mm) { + if (!mmget_not_zero(pages->source_mm)) + return -EFAULT; + } + user->locked = 0; + } + + npages = min_t(unsigned long, last_index - start_index + 1, + user->upages_len / sizeof(*user->upages)); + + + if (iommufd_should_fail()) + return -EFAULT; + + uptr = (uintptr_t)(pages->uptr + start_index * PAGE_SIZE); + if (!remote_mm) + rc = pin_user_pages_fast(uptr, npages, user->gup_flags, + user->upages); + else { + if (!user->locked) { + mmap_read_lock(pages->source_mm); + user->locked = 1; + } + rc = pin_user_pages_remote(pages->source_mm, uptr, npages, + user->gup_flags, user->upages, NULL, + &user->locked); + } + if (rc <= 0) { + if (WARN_ON(!rc)) + return -EFAULT; + return rc; + } + iopt_pages_add_npinned(pages, rc); + user->upages_start = start_index; + user->upages_end = start_index + rc; + return 0; +} + +/* This is the "modern" and faster accounting method used by io_uring */ +static int incr_user_locked_vm(struct iopt_pages *pages, unsigned long npages) +{ + unsigned long lock_limit; + unsigned long cur_pages; + unsigned long new_pages; + + lock_limit = task_rlimit(pages->source_task, RLIMIT_MEMLOCK) >> + PAGE_SHIFT; + do { + cur_pages = atomic_long_read(&pages->source_user->locked_vm); + new_pages = cur_pages + npages; + if (new_pages > lock_limit) + return -ENOMEM; + } while (atomic_long_cmpxchg(&pages->source_user->locked_vm, cur_pages, + new_pages) != cur_pages); + return 0; +} + +static void decr_user_locked_vm(struct iopt_pages *pages, unsigned long npages) +{ + if (WARN_ON(atomic_long_read(&pages->source_user->locked_vm) < npages)) + return; + atomic_long_sub(npages, &pages->source_user->locked_vm); +} + +/* This is the accounting method used for compatibility with VFIO */ +static int update_mm_locked_vm(struct iopt_pages *pages, unsigned long npages, + bool inc, struct pfn_reader_user *user) +{ + bool do_put = false; + int rc; + + if (user && user->locked) { + mmap_read_unlock(pages->source_mm); + user->locked = 0; + /* If we had the lock then we also have a get */ + } else if ((!user || !user->upages) && + pages->source_mm != current->mm) { + if (!mmget_not_zero(pages->source_mm)) + return -EINVAL; + do_put = true; + } + + mmap_write_lock(pages->source_mm); + rc = __account_locked_vm(pages->source_mm, npages, inc, + pages->source_task, false); + mmap_write_unlock(pages->source_mm); + + if (do_put) + mmput(pages->source_mm); + return rc; +} + +static int do_update_pinned(struct iopt_pages *pages, unsigned long npages, + bool inc, struct pfn_reader_user *user) +{ + int rc = 0; + + switch (pages->account_mode) { + case IOPT_PAGES_ACCOUNT_NONE: + break; + case IOPT_PAGES_ACCOUNT_USER: + if (inc) + rc = incr_user_locked_vm(pages, npages); + else + decr_user_locked_vm(pages, npages); + break; + case IOPT_PAGES_ACCOUNT_MM: + rc = update_mm_locked_vm(pages, npages, inc, user); + break; + } + if (rc) + return rc; + + pages->last_npinned = pages->npinned; + if (inc) + atomic64_add(npages, &pages->source_mm->pinned_vm); + else + atomic64_sub(npages, &pages->source_mm->pinned_vm); + return 0; +} + +static void update_unpinned(struct iopt_pages *pages) +{ + if (WARN_ON(pages->npinned > pages->last_npinned)) + return; + if (pages->npinned == pages->last_npinned) + return; + do_update_pinned(pages, pages->last_npinned - pages->npinned, false, + NULL); +} + +/* + * Changes in the number of pages pinned is done after the pages have been read + * and processed. If the user lacked the limit then the error unwind will unpin + * everything that was just pinned. This is because it is expensive to calculate + * how many pages we have already pinned within a range to generate an accurate + * prediction in advance of doing the work to actually pin them. + */ +static int pfn_reader_user_update_pinned(struct pfn_reader_user *user, + struct iopt_pages *pages) +{ + unsigned long npages; + bool inc; + + lockdep_assert_held(&pages->mutex); + + if (pages->npinned == pages->last_npinned) + return 0; + + if (pages->npinned < pages->last_npinned) { + npages = pages->last_npinned - pages->npinned; + inc = false; + } else { + if (iommufd_should_fail()) + return -ENOMEM; + npages = pages->npinned - pages->last_npinned; + inc = true; + } + return do_update_pinned(pages, npages, inc, user); +} + +/* + * PFNs are stored in three places, in order of preference: + * - The iopt_pages xarray. This is only populated if there is a + * iopt_pages_access + * - The iommu_domain under an area + * - The original PFN source, ie pages->source_mm + * + * This iterator reads the pfns optimizing to load according to the + * above order. + */ +struct pfn_reader { + struct iopt_pages *pages; + struct interval_tree_double_span_iter span; + struct pfn_batch batch; + unsigned long batch_start_index; + unsigned long batch_end_index; + unsigned long last_index; + + struct pfn_reader_user user; +}; + +static int pfn_reader_update_pinned(struct pfn_reader *pfns) +{ + return pfn_reader_user_update_pinned(&pfns->user, pfns->pages); +} + +/* + * The batch can contain a mixture of pages that are still in use and pages that + * need to be unpinned. Unpin only pages that are not held anywhere else. + */ +static void pfn_reader_unpin(struct pfn_reader *pfns) +{ + unsigned long last = pfns->batch_end_index - 1; + unsigned long start = pfns->batch_start_index; + struct interval_tree_double_span_iter span; + struct iopt_pages *pages = pfns->pages; + + lockdep_assert_held(&pages->mutex); + + interval_tree_for_each_double_span(&span, &pages->access_itree, + &pages->domains_itree, start, last) { + if (span.is_used) + continue; + + batch_unpin(&pfns->batch, pages, span.start_hole - start, + span.last_hole - span.start_hole + 1); + } +} + +/* Process a single span to load it from the proper storage */ +static int pfn_reader_fill_span(struct pfn_reader *pfns) +{ + struct interval_tree_double_span_iter *span = &pfns->span; + unsigned long start_index = pfns->batch_end_index; + struct iopt_area *area; + int rc; + + if (IS_ENABLED(CONFIG_IOMMUFD_TEST) && + WARN_ON(span->last_used < start_index)) + return -EINVAL; + + if (span->is_used == 1) { + batch_from_xarray(&pfns->batch, &pfns->pages->pinned_pfns, + start_index, span->last_used); + return 0; + } + + if (span->is_used == 2) { + /* + * Pull as many pages from the first domain we find in the + * target span. If it is too small then we will be called again + * and we'll find another area. + */ + area = iopt_pages_find_domain_area(pfns->pages, start_index); + if (WARN_ON(!area)) + return -EINVAL; + + /* The storage_domain cannot change without the pages mutex */ + batch_from_domain( + &pfns->batch, area->storage_domain, area, start_index, + min(iopt_area_last_index(area), span->last_used)); + return 0; + } + + if (start_index >= pfns->user.upages_end) { + rc = pfn_reader_user_pin(&pfns->user, pfns->pages, start_index, + span->last_hole); + if (rc) + return rc; + } + + batch_from_pages(&pfns->batch, + pfns->user.upages + + (start_index - pfns->user.upages_start), + pfns->user.upages_end - start_index); + return 0; +} + +static bool pfn_reader_done(struct pfn_reader *pfns) +{ + return pfns->batch_start_index == pfns->last_index + 1; +} + +static int pfn_reader_next(struct pfn_reader *pfns) +{ + int rc; + + batch_clear(&pfns->batch); + pfns->batch_start_index = pfns->batch_end_index; + + while (pfns->batch_end_index != pfns->last_index + 1) { + unsigned int npfns = pfns->batch.total_pfns; + + if (IS_ENABLED(CONFIG_IOMMUFD_TEST) && + WARN_ON(interval_tree_double_span_iter_done(&pfns->span))) + return -EINVAL; + + rc = pfn_reader_fill_span(pfns); + if (rc) + return rc; + + if (WARN_ON(!pfns->batch.total_pfns)) + return -EINVAL; + + pfns->batch_end_index = + pfns->batch_start_index + pfns->batch.total_pfns; + if (pfns->batch_end_index == pfns->span.last_used + 1) + interval_tree_double_span_iter_next(&pfns->span); + + /* Batch is full */ + if (npfns == pfns->batch.total_pfns) + return 0; + } + return 0; +} + +static int pfn_reader_init(struct pfn_reader *pfns, struct iopt_pages *pages, + unsigned long start_index, unsigned long last_index) +{ + int rc; + + lockdep_assert_held(&pages->mutex); + + pfns->pages = pages; + pfns->batch_start_index = start_index; + pfns->batch_end_index = start_index; + pfns->last_index = last_index; + pfn_reader_user_init(&pfns->user, pages); + rc = batch_init(&pfns->batch, last_index - start_index + 1); + if (rc) + return rc; + interval_tree_double_span_iter_first(&pfns->span, &pages->access_itree, + &pages->domains_itree, start_index, + last_index); + return 0; +} + +/* + * There are many assertions regarding the state of pages->npinned vs + * pages->last_pinned, for instance something like unmapping a domain must only + * decrement the npinned, and pfn_reader_destroy() must be called only after all + * the pins are updated. This is fine for success flows, but error flows + * sometimes need to release the pins held inside the pfn_reader before going on + * to complete unmapping and releasing pins held in domains. + */ +static void pfn_reader_release_pins(struct pfn_reader *pfns) +{ + struct iopt_pages *pages = pfns->pages; + + if (pfns->user.upages_end > pfns->batch_end_index) { + size_t npages = pfns->user.upages_end - pfns->batch_end_index; + + /* Any pages not transferred to the batch are just unpinned */ + unpin_user_pages(pfns->user.upages + (pfns->batch_end_index - + pfns->user.upages_start), + npages); + iopt_pages_sub_npinned(pages, npages); + pfns->user.upages_end = pfns->batch_end_index; + } + if (pfns->batch_start_index != pfns->batch_end_index) { + pfn_reader_unpin(pfns); + pfns->batch_start_index = pfns->batch_end_index; + } +} + +static void pfn_reader_destroy(struct pfn_reader *pfns) +{ + struct iopt_pages *pages = pfns->pages; + + pfn_reader_release_pins(pfns); + pfn_reader_user_destroy(&pfns->user, pfns->pages); + batch_destroy(&pfns->batch, NULL); + WARN_ON(pages->last_npinned != pages->npinned); +} + +static int pfn_reader_first(struct pfn_reader *pfns, struct iopt_pages *pages, + unsigned long start_index, unsigned long last_index) +{ + int rc; + + if (IS_ENABLED(CONFIG_IOMMUFD_TEST) && + WARN_ON(last_index < start_index)) + return -EINVAL; + + rc = pfn_reader_init(pfns, pages, start_index, last_index); + if (rc) + return rc; + rc = pfn_reader_next(pfns); + if (rc) { + pfn_reader_destroy(pfns); + return rc; + } + return 0; +} + +struct iopt_pages *iopt_alloc_pages(void __user *uptr, unsigned long length, + bool writable) +{ + struct iopt_pages *pages; + + /* + * The iommu API uses size_t as the length, and protect the DIV_ROUND_UP + * below from overflow + */ + if (length > SIZE_MAX - PAGE_SIZE || length == 0) + return ERR_PTR(-EINVAL); + + pages = kzalloc(sizeof(*pages), GFP_KERNEL_ACCOUNT); + if (!pages) + return ERR_PTR(-ENOMEM); + + kref_init(&pages->kref); + xa_init_flags(&pages->pinned_pfns, XA_FLAGS_ACCOUNT); + mutex_init(&pages->mutex); + pages->source_mm = current->mm; + mmgrab(pages->source_mm); + pages->uptr = (void __user *)ALIGN_DOWN((uintptr_t)uptr, PAGE_SIZE); + pages->npages = DIV_ROUND_UP(length + (uptr - pages->uptr), PAGE_SIZE); + pages->access_itree = RB_ROOT_CACHED; + pages->domains_itree = RB_ROOT_CACHED; + pages->writable = writable; + if (capable(CAP_IPC_LOCK)) + pages->account_mode = IOPT_PAGES_ACCOUNT_NONE; + else + pages->account_mode = IOPT_PAGES_ACCOUNT_USER; + pages->source_task = current->group_leader; + get_task_struct(current->group_leader); + pages->source_user = get_uid(current_user()); + return pages; +} + +void iopt_release_pages(struct kref *kref) +{ + struct iopt_pages *pages = container_of(kref, struct iopt_pages, kref); + + WARN_ON(!RB_EMPTY_ROOT(&pages->access_itree.rb_root)); + WARN_ON(!RB_EMPTY_ROOT(&pages->domains_itree.rb_root)); + WARN_ON(pages->npinned); + WARN_ON(!xa_empty(&pages->pinned_pfns)); + mmdrop(pages->source_mm); + mutex_destroy(&pages->mutex); + put_task_struct(pages->source_task); + free_uid(pages->source_user); + kfree(pages); +} + +static void +iopt_area_unpin_domain(struct pfn_batch *batch, struct iopt_area *area, + struct iopt_pages *pages, struct iommu_domain *domain, + unsigned long start_index, unsigned long last_index, + unsigned long *unmapped_end_index, + unsigned long real_last_index) +{ + while (start_index <= last_index) { + unsigned long batch_last_index; + + if (*unmapped_end_index <= last_index) { + unsigned long start = + max(start_index, *unmapped_end_index); + + batch_from_domain(batch, domain, area, start, + last_index); + batch_last_index = start + batch->total_pfns - 1; + } else { + batch_last_index = last_index; + } + + /* + * unmaps must always 'cut' at a place where the pfns are not + * contiguous to pair with the maps that always install + * contiguous pages. Thus, if we have to stop unpinning in the + * middle of the domains we need to keep reading pfns until we + * find a cut point to do the unmap. The pfns we read are + * carried over and either skipped or integrated into the next + * batch. + */ + if (batch_last_index == last_index && + last_index != real_last_index) + batch_from_domain_continue(batch, domain, area, + last_index + 1, + real_last_index); + + if (*unmapped_end_index <= batch_last_index) { + iopt_area_unmap_domain_range( + area, domain, *unmapped_end_index, + start_index + batch->total_pfns - 1); + *unmapped_end_index = start_index + batch->total_pfns; + } + + /* unpin must follow unmap */ + batch_unpin(batch, pages, 0, + batch_last_index - start_index + 1); + start_index = batch_last_index + 1; + + batch_clear_carry(batch, + *unmapped_end_index - batch_last_index - 1); + } +} + +static void __iopt_area_unfill_domain(struct iopt_area *area, + struct iopt_pages *pages, + struct iommu_domain *domain, + unsigned long last_index) +{ + struct interval_tree_double_span_iter span; + unsigned long start_index = iopt_area_index(area); + unsigned long unmapped_end_index = start_index; + u64 backup[BATCH_BACKUP_SIZE]; + struct pfn_batch batch; + + lockdep_assert_held(&pages->mutex); + + /* + * For security we must not unpin something that is still DMA mapped, + * so this must unmap any IOVA before we go ahead and unpin the pages. + * This creates a complexity where we need to skip over unpinning pages + * held in the xarray, but continue to unmap from the domain. + * + * The domain unmap cannot stop in the middle of a contiguous range of + * PFNs. To solve this problem the unpinning step will read ahead to the + * end of any contiguous span, unmap that whole span, and then only + * unpin the leading part that does not have any accesses. The residual + * PFNs that were unmapped but not unpinned are called a "carry" in the + * batch as they are moved to the front of the PFN list and continue on + * to the next iteration(s). + */ + batch_init_backup(&batch, last_index + 1, backup, sizeof(backup)); + interval_tree_for_each_double_span(&span, &pages->domains_itree, + &pages->access_itree, start_index, + last_index) { + if (span.is_used) { + batch_skip_carry(&batch, + span.last_used - span.start_used + 1); + continue; + } + iopt_area_unpin_domain(&batch, area, pages, domain, + span.start_hole, span.last_hole, + &unmapped_end_index, last_index); + } + /* + * If the range ends in a access then we do the residual unmap without + * any unpins. + */ + if (unmapped_end_index != last_index + 1) + iopt_area_unmap_domain_range(area, domain, unmapped_end_index, + last_index); + WARN_ON(batch.total_pfns); + batch_destroy(&batch, backup); + update_unpinned(pages); +} + +static void iopt_area_unfill_partial_domain(struct iopt_area *area, + struct iopt_pages *pages, + struct iommu_domain *domain, + unsigned long end_index) +{ + if (end_index != iopt_area_index(area)) + __iopt_area_unfill_domain(area, pages, domain, end_index - 1); +} + +/** + * iopt_area_unmap_domain() - Unmap without unpinning PFNs in a domain + * @area: The IOVA range to unmap + * @domain: The domain to unmap + * + * The caller must know that unpinning is not required, usually because there + * are other domains in the iopt. + */ +void iopt_area_unmap_domain(struct iopt_area *area, struct iommu_domain *domain) +{ + iommu_unmap_nofail(domain, iopt_area_iova(area), + iopt_area_length(area)); +} + +/** + * iopt_area_unfill_domain() - Unmap and unpin PFNs in a domain + * @area: IOVA area to use + * @pages: page supplier for the area (area->pages is NULL) + * @domain: Domain to unmap from + * + * The domain should be removed from the domains_itree before calling. The + * domain will always be unmapped, but the PFNs may not be unpinned if there are + * still accesses. + */ +void iopt_area_unfill_domain(struct iopt_area *area, struct iopt_pages *pages, + struct iommu_domain *domain) +{ + __iopt_area_unfill_domain(area, pages, domain, + iopt_area_last_index(area)); +} + +/** + * iopt_area_fill_domain() - Map PFNs from the area into a domain + * @area: IOVA area to use + * @domain: Domain to load PFNs into + * + * Read the pfns from the area's underlying iopt_pages and map them into the + * given domain. Called when attaching a new domain to an io_pagetable. + */ +int iopt_area_fill_domain(struct iopt_area *area, struct iommu_domain *domain) +{ + unsigned long done_end_index; + struct pfn_reader pfns; + int rc; + + lockdep_assert_held(&area->pages->mutex); + + rc = pfn_reader_first(&pfns, area->pages, iopt_area_index(area), + iopt_area_last_index(area)); + if (rc) + return rc; + + while (!pfn_reader_done(&pfns)) { + done_end_index = pfns.batch_start_index; + rc = batch_to_domain(&pfns.batch, domain, area, + pfns.batch_start_index); + if (rc) + goto out_unmap; + done_end_index = pfns.batch_end_index; + + rc = pfn_reader_next(&pfns); + if (rc) + goto out_unmap; + } + + rc = pfn_reader_update_pinned(&pfns); + if (rc) + goto out_unmap; + goto out_destroy; + +out_unmap: + pfn_reader_release_pins(&pfns); + iopt_area_unfill_partial_domain(area, area->pages, domain, + done_end_index); +out_destroy: + pfn_reader_destroy(&pfns); + return rc; +} + +/** + * iopt_area_fill_domains() - Install PFNs into the area's domains + * @area: The area to act on + * @pages: The pages associated with the area (area->pages is NULL) + * + * Called during area creation. The area is freshly created and not inserted in + * the domains_itree yet. PFNs are read and loaded into every domain held in the + * area's io_pagetable and the area is installed in the domains_itree. + * + * On failure all domains are left unchanged. + */ +int iopt_area_fill_domains(struct iopt_area *area, struct iopt_pages *pages) +{ + unsigned long done_first_end_index; + unsigned long done_all_end_index; + struct iommu_domain *domain; + unsigned long unmap_index; + struct pfn_reader pfns; + unsigned long index; + int rc; + + lockdep_assert_held(&area->iopt->domains_rwsem); + + if (xa_empty(&area->iopt->domains)) + return 0; + + mutex_lock(&pages->mutex); + rc = pfn_reader_first(&pfns, pages, iopt_area_index(area), + iopt_area_last_index(area)); + if (rc) + goto out_unlock; + + while (!pfn_reader_done(&pfns)) { + done_first_end_index = pfns.batch_end_index; + done_all_end_index = pfns.batch_start_index; + xa_for_each(&area->iopt->domains, index, domain) { + rc = batch_to_domain(&pfns.batch, domain, area, + pfns.batch_start_index); + if (rc) + goto out_unmap; + } + done_all_end_index = done_first_end_index; + + rc = pfn_reader_next(&pfns); + if (rc) + goto out_unmap; + } + rc = pfn_reader_update_pinned(&pfns); + if (rc) + goto out_unmap; + + area->storage_domain = xa_load(&area->iopt->domains, 0); + interval_tree_insert(&area->pages_node, &pages->domains_itree); + goto out_destroy; + +out_unmap: + pfn_reader_release_pins(&pfns); + xa_for_each(&area->iopt->domains, unmap_index, domain) { + unsigned long end_index; + + if (unmap_index < index) + end_index = done_first_end_index; + else + end_index = done_all_end_index; + + /* + * The area is not yet part of the domains_itree so we have to + * manage the unpinning specially. The last domain does the + * unpin, every other domain is just unmapped. + */ + if (unmap_index != area->iopt->next_domain_id - 1) { + if (end_index != iopt_area_index(area)) + iopt_area_unmap_domain_range( + area, domain, iopt_area_index(area), + end_index - 1); + } else { + iopt_area_unfill_partial_domain(area, pages, domain, + end_index); + } + } +out_destroy: + pfn_reader_destroy(&pfns); +out_unlock: + mutex_unlock(&pages->mutex); + return rc; +} + +/** + * iopt_area_unfill_domains() - unmap PFNs from the area's domains + * @area: The area to act on + * @pages: The pages associated with the area (area->pages is NULL) + * + * Called during area destruction. This unmaps the iova's covered by all the + * area's domains and releases the PFNs. + */ +void iopt_area_unfill_domains(struct iopt_area *area, struct iopt_pages *pages) +{ + struct io_pagetable *iopt = area->iopt; + struct iommu_domain *domain; + unsigned long index; + + lockdep_assert_held(&iopt->domains_rwsem); + + mutex_lock(&pages->mutex); + if (!area->storage_domain) + goto out_unlock; + + xa_for_each(&iopt->domains, index, domain) + if (domain != area->storage_domain) + iopt_area_unmap_domain_range( + area, domain, iopt_area_index(area), + iopt_area_last_index(area)); + + interval_tree_remove(&area->pages_node, &pages->domains_itree); + iopt_area_unfill_domain(area, pages, area->storage_domain); + area->storage_domain = NULL; +out_unlock: + mutex_unlock(&pages->mutex); +} + +static void iopt_pages_unpin_xarray(struct pfn_batch *batch, + struct iopt_pages *pages, + unsigned long start_index, + unsigned long end_index) +{ + while (start_index <= end_index) { + batch_from_xarray_clear(batch, &pages->pinned_pfns, start_index, + end_index); + batch_unpin(batch, pages, 0, batch->total_pfns); + start_index += batch->total_pfns; + batch_clear(batch); + } +} + +/** + * iopt_pages_unfill_xarray() - Update the xarry after removing an access + * @pages: The pages to act on + * @start_index: Starting PFN index + * @last_index: Last PFN index + * + * Called when an iopt_pages_access is removed, removes pages from the itree. + * The access should already be removed from the access_itree. + */ +void iopt_pages_unfill_xarray(struct iopt_pages *pages, + unsigned long start_index, + unsigned long last_index) +{ + struct interval_tree_double_span_iter span; + u64 backup[BATCH_BACKUP_SIZE]; + struct pfn_batch batch; + bool batch_inited = false; + + lockdep_assert_held(&pages->mutex); + + interval_tree_for_each_double_span(&span, &pages->access_itree, + &pages->domains_itree, start_index, + last_index) { + if (!span.is_used) { + if (!batch_inited) { + batch_init_backup(&batch, + last_index - start_index + 1, + backup, sizeof(backup)); + batch_inited = true; + } + iopt_pages_unpin_xarray(&batch, pages, span.start_hole, + span.last_hole); + } else if (span.is_used == 2) { + /* Covered by a domain */ + clear_xarray(&pages->pinned_pfns, span.start_used, + span.last_used); + } + /* Otherwise covered by an existing access */ + } + if (batch_inited) + batch_destroy(&batch, backup); + update_unpinned(pages); +} + +/** + * iopt_pages_fill_from_xarray() - Fast path for reading PFNs + * @pages: The pages to act on + * @start_index: The first page index in the range + * @last_index: The last page index in the range + * @out_pages: The output array to return the pages + * + * This can be called if the caller is holding a refcount on an + * iopt_pages_access that is known to have already been filled. It quickly reads + * the pages directly from the xarray. + * + * This is part of the SW iommu interface to read pages for in-kernel use. + */ +void iopt_pages_fill_from_xarray(struct iopt_pages *pages, + unsigned long start_index, + unsigned long last_index, + struct page **out_pages) +{ + XA_STATE(xas, &pages->pinned_pfns, start_index); + void *entry; + + rcu_read_lock(); + while (start_index <= last_index) { + entry = xas_next(&xas); + if (xas_retry(&xas, entry)) + continue; + WARN_ON(!xa_is_value(entry)); + *(out_pages++) = pfn_to_page(xa_to_value(entry)); + start_index++; + } + rcu_read_unlock(); +} + +static int iopt_pages_fill_from_domain(struct iopt_pages *pages, + unsigned long start_index, + unsigned long last_index, + struct page **out_pages) +{ + while (start_index != last_index + 1) { + unsigned long domain_last; + struct iopt_area *area; + + area = iopt_pages_find_domain_area(pages, start_index); + if (WARN_ON(!area)) + return -EINVAL; + + domain_last = min(iopt_area_last_index(area), last_index); + out_pages = raw_pages_from_domain(area->storage_domain, area, + start_index, domain_last, + out_pages); + start_index = domain_last + 1; + } + return 0; +} + +static int iopt_pages_fill_from_mm(struct iopt_pages *pages, + struct pfn_reader_user *user, + unsigned long start_index, + unsigned long last_index, + struct page **out_pages) +{ + unsigned long cur_index = start_index; + int rc; + + while (cur_index != last_index + 1) { + user->upages = out_pages + (cur_index - start_index); + rc = pfn_reader_user_pin(user, pages, cur_index, last_index); + if (rc) + goto out_unpin; + cur_index = user->upages_end; + } + return 0; + +out_unpin: + if (start_index != cur_index) + iopt_pages_err_unpin(pages, start_index, cur_index - 1, + out_pages); + return rc; +} + +/** + * iopt_pages_fill_xarray() - Read PFNs + * @pages: The pages to act on + * @start_index: The first page index in the range + * @last_index: The last page index in the range + * @out_pages: The output array to return the pages, may be NULL + * + * This populates the xarray and returns the pages in out_pages. As the slow + * path this is able to copy pages from other storage tiers into the xarray. + * + * On failure the xarray is left unchanged. + * + * This is part of the SW iommu interface to read pages for in-kernel use. + */ +int iopt_pages_fill_xarray(struct iopt_pages *pages, unsigned long start_index, + unsigned long last_index, struct page **out_pages) +{ + struct interval_tree_double_span_iter span; + unsigned long xa_end = start_index; + struct pfn_reader_user user; + int rc; + + lockdep_assert_held(&pages->mutex); + + pfn_reader_user_init(&user, pages); + user.upages_len = (last_index - start_index + 1) * sizeof(*out_pages); + interval_tree_for_each_double_span(&span, &pages->access_itree, + &pages->domains_itree, start_index, + last_index) { + struct page **cur_pages; + + if (span.is_used == 1) { + cur_pages = out_pages + (span.start_used - start_index); + iopt_pages_fill_from_xarray(pages, span.start_used, + span.last_used, cur_pages); + continue; + } + + if (span.is_used == 2) { + cur_pages = out_pages + (span.start_used - start_index); + iopt_pages_fill_from_domain(pages, span.start_used, + span.last_used, cur_pages); + rc = pages_to_xarray(&pages->pinned_pfns, + span.start_used, span.last_used, + cur_pages); + if (rc) + goto out_clean_xa; + xa_end = span.last_used + 1; + continue; + } + + /* hole */ + cur_pages = out_pages + (span.start_hole - start_index); + rc = iopt_pages_fill_from_mm(pages, &user, span.start_hole, + span.last_hole, cur_pages); + if (rc) + goto out_clean_xa; + rc = pages_to_xarray(&pages->pinned_pfns, span.start_hole, + span.last_hole, cur_pages); + if (rc) { + iopt_pages_err_unpin(pages, span.start_hole, + span.last_hole, cur_pages); + goto out_clean_xa; + } + xa_end = span.last_hole + 1; + } + rc = pfn_reader_user_update_pinned(&user, pages); + if (rc) + goto out_clean_xa; + user.upages = NULL; + pfn_reader_user_destroy(&user, pages); + return 0; + +out_clean_xa: + if (start_index != xa_end) + iopt_pages_unfill_xarray(pages, start_index, xa_end - 1); + user.upages = NULL; + pfn_reader_user_destroy(&user, pages); + return rc; +} + +/* + * This uses the pfn_reader instead of taking a shortcut by using the mm. It can + * do every scenario and is fully consistent with what an iommu_domain would + * see. + */ +static int iopt_pages_rw_slow(struct iopt_pages *pages, + unsigned long start_index, + unsigned long last_index, unsigned long offset, + void *data, unsigned long length, + unsigned int flags) +{ + struct pfn_reader pfns; + int rc; + + mutex_lock(&pages->mutex); + + rc = pfn_reader_first(&pfns, pages, start_index, last_index); + if (rc) + goto out_unlock; + + while (!pfn_reader_done(&pfns)) { + unsigned long done; + + done = batch_rw(&pfns.batch, data, offset, length, flags); + data += done; + length -= done; + offset = 0; + pfn_reader_unpin(&pfns); + + rc = pfn_reader_next(&pfns); + if (rc) + goto out_destroy; + } + if (WARN_ON(length != 0)) + rc = -EINVAL; +out_destroy: + pfn_reader_destroy(&pfns); +out_unlock: + mutex_unlock(&pages->mutex); + return rc; +} + +/* + * A medium speed path that still allows DMA inconsistencies, but doesn't do any + * memory allocations or interval tree searches. + */ +static int iopt_pages_rw_page(struct iopt_pages *pages, unsigned long index, + unsigned long offset, void *data, + unsigned long length, unsigned int flags) +{ + struct page *page = NULL; + int rc; + + if (!mmget_not_zero(pages->source_mm)) + return iopt_pages_rw_slow(pages, index, index, offset, data, + length, flags); + + if (iommufd_should_fail()) { + rc = -EINVAL; + goto out_mmput; + } + + mmap_read_lock(pages->source_mm); + rc = pin_user_pages_remote( + pages->source_mm, (uintptr_t)(pages->uptr + index * PAGE_SIZE), + 1, (flags & IOMMUFD_ACCESS_RW_WRITE) ? FOLL_WRITE : 0, &page, + NULL, NULL); + mmap_read_unlock(pages->source_mm); + if (rc != 1) { + if (WARN_ON(rc >= 0)) + rc = -EINVAL; + goto out_mmput; + } + copy_data_page(page, data, offset, length, flags); + unpin_user_page(page); + rc = 0; + +out_mmput: + mmput(pages->source_mm); + return rc; +} + +/** + * iopt_pages_rw_access - Copy to/from a linear slice of the pages + * @pages: pages to act on + * @start_byte: First byte of pages to copy to/from + * @data: Kernel buffer to get/put the data + * @length: Number of bytes to copy + * @flags: IOMMUFD_ACCESS_RW_* flags + * + * This will find each page in the range, kmap it and then memcpy to/from + * the given kernel buffer. + */ +int iopt_pages_rw_access(struct iopt_pages *pages, unsigned long start_byte, + void *data, unsigned long length, unsigned int flags) +{ + unsigned long start_index = start_byte / PAGE_SIZE; + unsigned long last_index = (start_byte + length - 1) / PAGE_SIZE; + bool change_mm = current->mm != pages->source_mm; + int rc = 0; + + if (IS_ENABLED(CONFIG_IOMMUFD_TEST) && + (flags & __IOMMUFD_ACCESS_RW_SLOW_PATH)) + change_mm = true; + + if ((flags & IOMMUFD_ACCESS_RW_WRITE) && !pages->writable) + return -EPERM; + + if (!(flags & IOMMUFD_ACCESS_RW_KTHREAD) && change_mm) { + if (start_index == last_index) + return iopt_pages_rw_page(pages, start_index, + start_byte % PAGE_SIZE, data, + length, flags); + return iopt_pages_rw_slow(pages, start_index, last_index, + start_byte % PAGE_SIZE, data, length, + flags); + } + + /* + * Try to copy using copy_to_user(). We do this as a fast path and + * ignore any pinning inconsistencies, unlike a real DMA path. + */ + if (change_mm) { + if (!mmget_not_zero(pages->source_mm)) + return iopt_pages_rw_slow(pages, start_index, + last_index, + start_byte % PAGE_SIZE, data, + length, flags); + kthread_use_mm(pages->source_mm); + } + + if (flags & IOMMUFD_ACCESS_RW_WRITE) { + if (copy_to_user(pages->uptr + start_byte, data, length)) + rc = -EFAULT; + } else { + if (copy_from_user(data, pages->uptr + start_byte, length)) + rc = -EFAULT; + } + + if (change_mm) { + kthread_unuse_mm(pages->source_mm); + mmput(pages->source_mm); + } + + return rc; +} + +static struct iopt_pages_access * +iopt_pages_get_exact_access(struct iopt_pages *pages, unsigned long index, + unsigned long last) +{ + struct interval_tree_node *node; + + lockdep_assert_held(&pages->mutex); + + /* There can be overlapping ranges in this interval tree */ + for (node = interval_tree_iter_first(&pages->access_itree, index, last); + node; node = interval_tree_iter_next(node, index, last)) + if (node->start == index && node->last == last) + return container_of(node, struct iopt_pages_access, + node); + return NULL; +} + +/** + * iopt_area_add_access() - Record an in-knerel access for PFNs + * @area: The source of PFNs + * @start_index: First page index + * @last_index: Inclusive last page index + * @out_pages: Output list of struct page's representing the PFNs + * @flags: IOMMUFD_ACCESS_RW_* flags + * + * Record that an in-kernel access will be accessing the pages, ensure they are + * pinned, and return the PFNs as a simple list of 'struct page *'. + * + * This should be undone through a matching call to iopt_area_remove_access() + */ +int iopt_area_add_access(struct iopt_area *area, unsigned long start_index, + unsigned long last_index, struct page **out_pages, + unsigned int flags) +{ + struct iopt_pages *pages = area->pages; + struct iopt_pages_access *access; + int rc; + + if ((flags & IOMMUFD_ACCESS_RW_WRITE) && !pages->writable) + return -EPERM; + + mutex_lock(&pages->mutex); + access = iopt_pages_get_exact_access(pages, start_index, last_index); + if (access) { + area->num_accesses++; + access->users++; + iopt_pages_fill_from_xarray(pages, start_index, last_index, + out_pages); + mutex_unlock(&pages->mutex); + return 0; + } + + access = kzalloc(sizeof(*access), GFP_KERNEL_ACCOUNT); + if (!access) { + rc = -ENOMEM; + goto err_unlock; + } + + rc = iopt_pages_fill_xarray(pages, start_index, last_index, out_pages); + if (rc) + goto err_free; + + access->node.start = start_index; + access->node.last = last_index; + access->users = 1; + area->num_accesses++; + interval_tree_insert(&access->node, &pages->access_itree); + mutex_unlock(&pages->mutex); + return 0; + +err_free: + kfree(access); +err_unlock: + mutex_unlock(&pages->mutex); + return rc; +} + +/** + * iopt_area_remove_access() - Release an in-kernel access for PFNs + * @area: The source of PFNs + * @start_index: First page index + * @last_index: Inclusive last page index + * + * Undo iopt_area_add_access() and unpin the pages if necessary. The caller + * must stop using the PFNs before calling this. + */ +void iopt_area_remove_access(struct iopt_area *area, unsigned long start_index, + unsigned long last_index) +{ + struct iopt_pages *pages = area->pages; + struct iopt_pages_access *access; + + mutex_lock(&pages->mutex); + access = iopt_pages_get_exact_access(pages, start_index, last_index); + if (WARN_ON(!access)) + goto out_unlock; + + WARN_ON(area->num_accesses == 0 || access->users == 0); + area->num_accesses--; + access->users--; + if (access->users) + goto out_unlock; + + interval_tree_remove(&access->node, &pages->access_itree); + iopt_pages_unfill_xarray(pages, start_index, last_index); + kfree(access); +out_unlock: + mutex_unlock(&pages->mutex); +} diff --git a/drivers/iommu/iommufd/selftest.c b/drivers/iommu/iommufd/selftest.c new file mode 100644 index 0000000000000000000000000000000000000000..cfb5fe9a5e0ee8664518158a7176c909ee8a9c06 --- /dev/null +++ b/drivers/iommu/iommufd/selftest.c @@ -0,0 +1,853 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. + * + * Kernel side components to support tools/testing/selftests/iommu + */ +#include +#include +#include +#include +#include +#include +#include + +#include "io_pagetable.h" +#include "iommufd_private.h" +#include "iommufd_test.h" + +static DECLARE_FAULT_ATTR(fail_iommufd); +static struct dentry *dbgfs_root; + +size_t iommufd_test_memory_limit = 65536; + +enum { + MOCK_IO_PAGE_SIZE = PAGE_SIZE / 2, + + /* + * Like a real page table alignment requires the low bits of the address + * to be zero. xarray also requires the high bit to be zero, so we store + * the pfns shifted. The upper bits are used for metadata. + */ + MOCK_PFN_MASK = ULONG_MAX / MOCK_IO_PAGE_SIZE, + + _MOCK_PFN_START = MOCK_PFN_MASK + 1, + MOCK_PFN_START_IOVA = _MOCK_PFN_START, + MOCK_PFN_LAST_IOVA = _MOCK_PFN_START, +}; + +/* + * Syzkaller has trouble randomizing the correct iova to use since it is linked + * to the map ioctl's output, and it has no ide about that. So, simplify things. + * In syzkaller mode the 64 bit IOVA is converted into an nth area and offset + * value. This has a much smaller randomization space and syzkaller can hit it. + */ +static unsigned long iommufd_test_syz_conv_iova(struct io_pagetable *iopt, + u64 *iova) +{ + struct syz_layout { + __u32 nth_area; + __u32 offset; + }; + struct syz_layout *syz = (void *)iova; + unsigned int nth = syz->nth_area; + struct iopt_area *area; + + down_read(&iopt->iova_rwsem); + for (area = iopt_area_iter_first(iopt, 0, ULONG_MAX); area; + area = iopt_area_iter_next(area, 0, ULONG_MAX)) { + if (nth == 0) { + up_read(&iopt->iova_rwsem); + return iopt_area_iova(area) + syz->offset; + } + nth--; + } + up_read(&iopt->iova_rwsem); + + return 0; +} + +void iommufd_test_syz_conv_iova_id(struct iommufd_ucmd *ucmd, + unsigned int ioas_id, u64 *iova, u32 *flags) +{ + struct iommufd_ioas *ioas; + + if (!(*flags & MOCK_FLAGS_ACCESS_SYZ)) + return; + *flags &= ~(u32)MOCK_FLAGS_ACCESS_SYZ; + + ioas = iommufd_get_ioas(ucmd, ioas_id); + if (IS_ERR(ioas)) + return; + *iova = iommufd_test_syz_conv_iova(&ioas->iopt, iova); + iommufd_put_object(&ioas->obj); +} + +struct mock_iommu_domain { + struct iommu_domain domain; + struct xarray pfns; +}; + +enum selftest_obj_type { + TYPE_IDEV, +}; + +struct selftest_obj { + struct iommufd_object obj; + enum selftest_obj_type type; + + union { + struct { + struct iommufd_hw_pagetable *hwpt; + struct iommufd_ctx *ictx; + struct device mock_dev; + } idev; + }; +}; + +static struct iommu_domain *mock_domain_alloc(unsigned int iommu_domain_type) +{ + struct mock_iommu_domain *mock; + + if (WARN_ON(iommu_domain_type != IOMMU_DOMAIN_UNMANAGED)) + return NULL; + + mock = kzalloc(sizeof(*mock), GFP_KERNEL); + if (!mock) + return NULL; + mock->domain.geometry.aperture_start = MOCK_APERTURE_START; + mock->domain.geometry.aperture_end = MOCK_APERTURE_LAST; + mock->domain.pgsize_bitmap = MOCK_IO_PAGE_SIZE; + xa_init(&mock->pfns); + return &mock->domain; +} + +static void mock_domain_free(struct iommu_domain *domain) +{ + struct mock_iommu_domain *mock = + container_of(domain, struct mock_iommu_domain, domain); + + WARN_ON(!xa_empty(&mock->pfns)); + kfree(mock); +} + +static int mock_domain_map_pages(struct iommu_domain *domain, + unsigned long iova, phys_addr_t paddr, + size_t pgsize, size_t pgcount, int prot, + gfp_t gfp, size_t *mapped) +{ + struct mock_iommu_domain *mock = + container_of(domain, struct mock_iommu_domain, domain); + unsigned long flags = MOCK_PFN_START_IOVA; + unsigned long start_iova = iova; + + /* + * xarray does not reliably work with fault injection because it does a + * retry allocation, so put our own failure point. + */ + if (iommufd_should_fail()) + return -ENOENT; + + WARN_ON(iova % MOCK_IO_PAGE_SIZE); + WARN_ON(pgsize % MOCK_IO_PAGE_SIZE); + for (; pgcount; pgcount--) { + size_t cur; + + for (cur = 0; cur != pgsize; cur += MOCK_IO_PAGE_SIZE) { + void *old; + + if (pgcount == 1 && cur + MOCK_IO_PAGE_SIZE == pgsize) + flags = MOCK_PFN_LAST_IOVA; + old = xa_store(&mock->pfns, iova / MOCK_IO_PAGE_SIZE, + xa_mk_value((paddr / MOCK_IO_PAGE_SIZE) | + flags), + gfp); + if (xa_is_err(old)) { + for (; start_iova != iova; + start_iova += MOCK_IO_PAGE_SIZE) + xa_erase(&mock->pfns, + start_iova / + MOCK_IO_PAGE_SIZE); + return xa_err(old); + } + WARN_ON(old); + iova += MOCK_IO_PAGE_SIZE; + paddr += MOCK_IO_PAGE_SIZE; + *mapped += MOCK_IO_PAGE_SIZE; + flags = 0; + } + } + return 0; +} + +static size_t mock_domain_unmap_pages(struct iommu_domain *domain, + unsigned long iova, size_t pgsize, + size_t pgcount, + struct iommu_iotlb_gather *iotlb_gather) +{ + struct mock_iommu_domain *mock = + container_of(domain, struct mock_iommu_domain, domain); + bool first = true; + size_t ret = 0; + void *ent; + + WARN_ON(iova % MOCK_IO_PAGE_SIZE); + WARN_ON(pgsize % MOCK_IO_PAGE_SIZE); + + for (; pgcount; pgcount--) { + size_t cur; + + for (cur = 0; cur != pgsize; cur += MOCK_IO_PAGE_SIZE) { + ent = xa_erase(&mock->pfns, iova / MOCK_IO_PAGE_SIZE); + WARN_ON(!ent); + /* + * iommufd generates unmaps that must be a strict + * superset of the map's performend So every starting + * IOVA should have been an iova passed to map, and the + * + * First IOVA must be present and have been a first IOVA + * passed to map_pages + */ + if (first) { + WARN_ON(!(xa_to_value(ent) & + MOCK_PFN_START_IOVA)); + first = false; + } + if (pgcount == 1 && cur + MOCK_IO_PAGE_SIZE == pgsize) + WARN_ON(!(xa_to_value(ent) & + MOCK_PFN_LAST_IOVA)); + + iova += MOCK_IO_PAGE_SIZE; + ret += MOCK_IO_PAGE_SIZE; + } + } + return ret; +} + +static phys_addr_t mock_domain_iova_to_phys(struct iommu_domain *domain, + dma_addr_t iova) +{ + struct mock_iommu_domain *mock = + container_of(domain, struct mock_iommu_domain, domain); + void *ent; + + WARN_ON(iova % MOCK_IO_PAGE_SIZE); + ent = xa_load(&mock->pfns, iova / MOCK_IO_PAGE_SIZE); + WARN_ON(!ent); + return (xa_to_value(ent) & MOCK_PFN_MASK) * MOCK_IO_PAGE_SIZE; +} + +static const struct iommu_ops mock_ops = { + .owner = THIS_MODULE, + .pgsize_bitmap = MOCK_IO_PAGE_SIZE, + .domain_alloc = mock_domain_alloc, + .default_domain_ops = + &(struct iommu_domain_ops){ + .free = mock_domain_free, + .map_pages = mock_domain_map_pages, + .unmap_pages = mock_domain_unmap_pages, + .iova_to_phys = mock_domain_iova_to_phys, + }, +}; + +static inline struct iommufd_hw_pagetable * +get_md_pagetable(struct iommufd_ucmd *ucmd, u32 mockpt_id, + struct mock_iommu_domain **mock) +{ + struct iommufd_hw_pagetable *hwpt; + struct iommufd_object *obj; + + obj = iommufd_get_object(ucmd->ictx, mockpt_id, + IOMMUFD_OBJ_HW_PAGETABLE); + if (IS_ERR(obj)) + return ERR_CAST(obj); + hwpt = container_of(obj, struct iommufd_hw_pagetable, obj); + if (hwpt->domain->ops != mock_ops.default_domain_ops) { + iommufd_put_object(&hwpt->obj); + return ERR_PTR(-EINVAL); + } + *mock = container_of(hwpt->domain, struct mock_iommu_domain, domain); + return hwpt; +} + +/* Create an hw_pagetable with the mock domain so we can test the domain ops */ +static int iommufd_test_mock_domain(struct iommufd_ucmd *ucmd, + struct iommu_test_cmd *cmd) +{ + static struct bus_type mock_bus = { .iommu_ops = &mock_ops }; + struct iommufd_hw_pagetable *hwpt; + struct selftest_obj *sobj; + struct iommufd_ioas *ioas; + int rc; + + ioas = iommufd_get_ioas(ucmd, cmd->id); + if (IS_ERR(ioas)) + return PTR_ERR(ioas); + + sobj = iommufd_object_alloc(ucmd->ictx, sobj, IOMMUFD_OBJ_SELFTEST); + if (IS_ERR(sobj)) { + rc = PTR_ERR(sobj); + goto out_ioas; + } + sobj->idev.ictx = ucmd->ictx; + sobj->type = TYPE_IDEV; + sobj->idev.mock_dev.bus = &mock_bus; + + hwpt = iommufd_device_selftest_attach(ucmd->ictx, ioas, + &sobj->idev.mock_dev); + if (IS_ERR(hwpt)) { + rc = PTR_ERR(hwpt); + goto out_sobj; + } + sobj->idev.hwpt = hwpt; + + /* Userspace must destroy both of these IDs to destroy the object */ + cmd->mock_domain.out_hwpt_id = hwpt->obj.id; + cmd->mock_domain.out_device_id = sobj->obj.id; + iommufd_object_finalize(ucmd->ictx, &sobj->obj); + iommufd_put_object(&ioas->obj); + return iommufd_ucmd_respond(ucmd, sizeof(*cmd)); + +out_sobj: + iommufd_object_abort(ucmd->ictx, &sobj->obj); +out_ioas: + iommufd_put_object(&ioas->obj); + return rc; +} + +/* Add an additional reserved IOVA to the IOAS */ +static int iommufd_test_add_reserved(struct iommufd_ucmd *ucmd, + unsigned int mockpt_id, + unsigned long start, size_t length) +{ + struct iommufd_ioas *ioas; + int rc; + + ioas = iommufd_get_ioas(ucmd, mockpt_id); + if (IS_ERR(ioas)) + return PTR_ERR(ioas); + down_write(&ioas->iopt.iova_rwsem); + rc = iopt_reserve_iova(&ioas->iopt, start, start + length - 1, NULL); + up_write(&ioas->iopt.iova_rwsem); + iommufd_put_object(&ioas->obj); + return rc; +} + +/* Check that every pfn under each iova matches the pfn under a user VA */ +static int iommufd_test_md_check_pa(struct iommufd_ucmd *ucmd, + unsigned int mockpt_id, unsigned long iova, + size_t length, void __user *uptr) +{ + struct iommufd_hw_pagetable *hwpt; + struct mock_iommu_domain *mock; + int rc; + + if (iova % MOCK_IO_PAGE_SIZE || length % MOCK_IO_PAGE_SIZE || + (uintptr_t)uptr % MOCK_IO_PAGE_SIZE) + return -EINVAL; + + hwpt = get_md_pagetable(ucmd, mockpt_id, &mock); + if (IS_ERR(hwpt)) + return PTR_ERR(hwpt); + + for (; length; length -= MOCK_IO_PAGE_SIZE) { + struct page *pages[1]; + unsigned long pfn; + long npages; + void *ent; + + npages = get_user_pages_fast((uintptr_t)uptr & PAGE_MASK, 1, 0, + pages); + if (npages < 0) { + rc = npages; + goto out_put; + } + if (WARN_ON(npages != 1)) { + rc = -EFAULT; + goto out_put; + } + pfn = page_to_pfn(pages[0]); + put_page(pages[0]); + + ent = xa_load(&mock->pfns, iova / MOCK_IO_PAGE_SIZE); + if (!ent || + (xa_to_value(ent) & MOCK_PFN_MASK) * MOCK_IO_PAGE_SIZE != + pfn * PAGE_SIZE + ((uintptr_t)uptr % PAGE_SIZE)) { + rc = -EINVAL; + goto out_put; + } + iova += MOCK_IO_PAGE_SIZE; + uptr += MOCK_IO_PAGE_SIZE; + } + rc = 0; + +out_put: + iommufd_put_object(&hwpt->obj); + return rc; +} + +/* Check that the page ref count matches, to look for missing pin/unpins */ +static int iommufd_test_md_check_refs(struct iommufd_ucmd *ucmd, + void __user *uptr, size_t length, + unsigned int refs) +{ + if (length % PAGE_SIZE || (uintptr_t)uptr % PAGE_SIZE) + return -EINVAL; + + for (; length; length -= PAGE_SIZE) { + struct page *pages[1]; + long npages; + + npages = get_user_pages_fast((uintptr_t)uptr, 1, 0, pages); + if (npages < 0) + return npages; + if (WARN_ON(npages != 1)) + return -EFAULT; + if (!PageCompound(pages[0])) { + unsigned int count; + + count = page_ref_count(pages[0]); + if (count / GUP_PIN_COUNTING_BIAS != refs) { + put_page(pages[0]); + return -EIO; + } + } + put_page(pages[0]); + uptr += PAGE_SIZE; + } + return 0; +} + +struct selftest_access { + struct iommufd_access *access; + struct file *file; + struct mutex lock; + struct list_head items; + unsigned int next_id; + bool destroying; +}; + +struct selftest_access_item { + struct list_head items_elm; + unsigned long iova; + size_t length; + unsigned int id; +}; + +static const struct file_operations iommfd_test_staccess_fops; + +static struct selftest_access *iommufd_access_get(int fd) +{ + struct file *file; + + file = fget(fd); + if (!file) + return ERR_PTR(-EBADFD); + + if (file->f_op != &iommfd_test_staccess_fops) { + fput(file); + return ERR_PTR(-EBADFD); + } + return file->private_data; +} + +static void iommufd_test_access_unmap(void *data, unsigned long iova, + unsigned long length) +{ + unsigned long iova_last = iova + length - 1; + struct selftest_access *staccess = data; + struct selftest_access_item *item; + struct selftest_access_item *tmp; + + mutex_lock(&staccess->lock); + list_for_each_entry_safe(item, tmp, &staccess->items, items_elm) { + if (iova > item->iova + item->length - 1 || + iova_last < item->iova) + continue; + list_del(&item->items_elm); + iommufd_access_unpin_pages(staccess->access, item->iova, + item->length); + kfree(item); + } + mutex_unlock(&staccess->lock); +} + +static int iommufd_test_access_item_destroy(struct iommufd_ucmd *ucmd, + unsigned int access_id, + unsigned int item_id) +{ + struct selftest_access_item *item; + struct selftest_access *staccess; + + staccess = iommufd_access_get(access_id); + if (IS_ERR(staccess)) + return PTR_ERR(staccess); + + mutex_lock(&staccess->lock); + list_for_each_entry(item, &staccess->items, items_elm) { + if (item->id == item_id) { + list_del(&item->items_elm); + iommufd_access_unpin_pages(staccess->access, item->iova, + item->length); + mutex_unlock(&staccess->lock); + kfree(item); + fput(staccess->file); + return 0; + } + } + mutex_unlock(&staccess->lock); + fput(staccess->file); + return -ENOENT; +} + +static int iommufd_test_staccess_release(struct inode *inode, + struct file *filep) +{ + struct selftest_access *staccess = filep->private_data; + + if (staccess->access) { + iommufd_test_access_unmap(staccess, 0, ULONG_MAX); + iommufd_access_destroy(staccess->access); + } + mutex_destroy(&staccess->lock); + kfree(staccess); + return 0; +} + +static const struct iommufd_access_ops selftest_access_ops_pin = { + .needs_pin_pages = 1, + .unmap = iommufd_test_access_unmap, +}; + +static const struct iommufd_access_ops selftest_access_ops = { + .unmap = iommufd_test_access_unmap, +}; + +static const struct file_operations iommfd_test_staccess_fops = { + .release = iommufd_test_staccess_release, +}; + +static struct selftest_access *iommufd_test_alloc_access(void) +{ + struct selftest_access *staccess; + struct file *filep; + + staccess = kzalloc(sizeof(*staccess), GFP_KERNEL_ACCOUNT); + if (!staccess) + return ERR_PTR(-ENOMEM); + INIT_LIST_HEAD(&staccess->items); + mutex_init(&staccess->lock); + + filep = anon_inode_getfile("[iommufd_test_staccess]", + &iommfd_test_staccess_fops, staccess, + O_RDWR); + if (IS_ERR(filep)) { + kfree(staccess); + return ERR_CAST(filep); + } + staccess->file = filep; + return staccess; +} + +static int iommufd_test_create_access(struct iommufd_ucmd *ucmd, + unsigned int ioas_id, unsigned int flags) +{ + struct iommu_test_cmd *cmd = ucmd->cmd; + struct selftest_access *staccess; + struct iommufd_access *access; + int fdno; + int rc; + + if (flags & ~MOCK_FLAGS_ACCESS_CREATE_NEEDS_PIN_PAGES) + return -EOPNOTSUPP; + + staccess = iommufd_test_alloc_access(); + if (IS_ERR(staccess)) + return PTR_ERR(staccess); + + fdno = get_unused_fd_flags(O_CLOEXEC); + if (fdno < 0) { + rc = -ENOMEM; + goto out_free_staccess; + } + + access = iommufd_access_create( + ucmd->ictx, ioas_id, + (flags & MOCK_FLAGS_ACCESS_CREATE_NEEDS_PIN_PAGES) ? + &selftest_access_ops_pin : + &selftest_access_ops, + staccess); + if (IS_ERR(access)) { + rc = PTR_ERR(access); + goto out_put_fdno; + } + cmd->create_access.out_access_fd = fdno; + rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd)); + if (rc) + goto out_destroy; + + staccess->access = access; + fd_install(fdno, staccess->file); + return 0; + +out_destroy: + iommufd_access_destroy(access); +out_put_fdno: + put_unused_fd(fdno); +out_free_staccess: + fput(staccess->file); + return rc; +} + +/* Check that the pages in a page array match the pages in the user VA */ +static int iommufd_test_check_pages(void __user *uptr, struct page **pages, + size_t npages) +{ + for (; npages; npages--) { + struct page *tmp_pages[1]; + long rc; + + rc = get_user_pages_fast((uintptr_t)uptr, 1, 0, tmp_pages); + if (rc < 0) + return rc; + if (WARN_ON(rc != 1)) + return -EFAULT; + put_page(tmp_pages[0]); + if (tmp_pages[0] != *pages) + return -EBADE; + pages++; + uptr += PAGE_SIZE; + } + return 0; +} + +static int iommufd_test_access_pages(struct iommufd_ucmd *ucmd, + unsigned int access_id, unsigned long iova, + size_t length, void __user *uptr, + u32 flags) +{ + struct iommu_test_cmd *cmd = ucmd->cmd; + struct selftest_access_item *item; + struct selftest_access *staccess; + struct page **pages; + size_t npages; + int rc; + + /* Prevent syzkaller from triggering a WARN_ON in kvzalloc() */ + if (length > 16*1024*1024) + return -ENOMEM; + + if (flags & ~(MOCK_FLAGS_ACCESS_WRITE | MOCK_FLAGS_ACCESS_SYZ)) + return -EOPNOTSUPP; + + staccess = iommufd_access_get(access_id); + if (IS_ERR(staccess)) + return PTR_ERR(staccess); + + if (staccess->access->ops != &selftest_access_ops_pin) { + rc = -EOPNOTSUPP; + goto out_put; + } + + if (flags & MOCK_FLAGS_ACCESS_SYZ) + iova = iommufd_test_syz_conv_iova(&staccess->access->ioas->iopt, + &cmd->access_pages.iova); + + npages = (ALIGN(iova + length, PAGE_SIZE) - + ALIGN_DOWN(iova, PAGE_SIZE)) / + PAGE_SIZE; + pages = kvcalloc(npages, sizeof(*pages), GFP_KERNEL_ACCOUNT); + if (!pages) { + rc = -ENOMEM; + goto out_put; + } + + /* + * Drivers will need to think very carefully about this locking. The + * core code can do multiple unmaps instantaneously after + * iommufd_access_pin_pages() and *all* the unmaps must not return until + * the range is unpinned. This simple implementation puts a global lock + * around the pin, which may not suit drivers that want this to be a + * performance path. drivers that get this wrong will trigger WARN_ON + * races and cause EDEADLOCK failures to userspace. + */ + mutex_lock(&staccess->lock); + rc = iommufd_access_pin_pages(staccess->access, iova, length, pages, + flags & MOCK_FLAGS_ACCESS_WRITE); + if (rc) + goto out_unlock; + + /* For syzkaller allow uptr to be NULL to skip this check */ + if (uptr) { + rc = iommufd_test_check_pages( + uptr - (iova - ALIGN_DOWN(iova, PAGE_SIZE)), pages, + npages); + if (rc) + goto out_unaccess; + } + + item = kzalloc(sizeof(*item), GFP_KERNEL_ACCOUNT); + if (!item) { + rc = -ENOMEM; + goto out_unaccess; + } + + item->iova = iova; + item->length = length; + item->id = staccess->next_id++; + list_add_tail(&item->items_elm, &staccess->items); + + cmd->access_pages.out_access_pages_id = item->id; + rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd)); + if (rc) + goto out_free_item; + goto out_unlock; + +out_free_item: + list_del(&item->items_elm); + kfree(item); +out_unaccess: + iommufd_access_unpin_pages(staccess->access, iova, length); +out_unlock: + mutex_unlock(&staccess->lock); + kvfree(pages); +out_put: + fput(staccess->file); + return rc; +} + +static int iommufd_test_access_rw(struct iommufd_ucmd *ucmd, + unsigned int access_id, unsigned long iova, + size_t length, void __user *ubuf, + unsigned int flags) +{ + struct iommu_test_cmd *cmd = ucmd->cmd; + struct selftest_access *staccess; + void *tmp; + int rc; + + /* Prevent syzkaller from triggering a WARN_ON in kvzalloc() */ + if (length > 16*1024*1024) + return -ENOMEM; + + if (flags & ~(MOCK_ACCESS_RW_WRITE | MOCK_ACCESS_RW_SLOW_PATH | + MOCK_FLAGS_ACCESS_SYZ)) + return -EOPNOTSUPP; + + staccess = iommufd_access_get(access_id); + if (IS_ERR(staccess)) + return PTR_ERR(staccess); + + tmp = kvzalloc(length, GFP_KERNEL_ACCOUNT); + if (!tmp) { + rc = -ENOMEM; + goto out_put; + } + + if (flags & MOCK_ACCESS_RW_WRITE) { + if (copy_from_user(tmp, ubuf, length)) { + rc = -EFAULT; + goto out_free; + } + } + + if (flags & MOCK_FLAGS_ACCESS_SYZ) + iova = iommufd_test_syz_conv_iova(&staccess->access->ioas->iopt, + &cmd->access_rw.iova); + + rc = iommufd_access_rw(staccess->access, iova, tmp, length, flags); + if (rc) + goto out_free; + if (!(flags & MOCK_ACCESS_RW_WRITE)) { + if (copy_to_user(ubuf, tmp, length)) { + rc = -EFAULT; + goto out_free; + } + } + +out_free: + kvfree(tmp); +out_put: + fput(staccess->file); + return rc; +} +static_assert((unsigned int)MOCK_ACCESS_RW_WRITE == IOMMUFD_ACCESS_RW_WRITE); +static_assert((unsigned int)MOCK_ACCESS_RW_SLOW_PATH == + __IOMMUFD_ACCESS_RW_SLOW_PATH); + +void iommufd_selftest_destroy(struct iommufd_object *obj) +{ + struct selftest_obj *sobj = container_of(obj, struct selftest_obj, obj); + + switch (sobj->type) { + case TYPE_IDEV: + iommufd_device_selftest_detach(sobj->idev.ictx, + sobj->idev.hwpt); + break; + } +} + +int iommufd_test(struct iommufd_ucmd *ucmd) +{ + struct iommu_test_cmd *cmd = ucmd->cmd; + + switch (cmd->op) { + case IOMMU_TEST_OP_ADD_RESERVED: + return iommufd_test_add_reserved(ucmd, cmd->id, + cmd->add_reserved.start, + cmd->add_reserved.length); + case IOMMU_TEST_OP_MOCK_DOMAIN: + return iommufd_test_mock_domain(ucmd, cmd); + case IOMMU_TEST_OP_MD_CHECK_MAP: + return iommufd_test_md_check_pa( + ucmd, cmd->id, cmd->check_map.iova, + cmd->check_map.length, + u64_to_user_ptr(cmd->check_map.uptr)); + case IOMMU_TEST_OP_MD_CHECK_REFS: + return iommufd_test_md_check_refs( + ucmd, u64_to_user_ptr(cmd->check_refs.uptr), + cmd->check_refs.length, cmd->check_refs.refs); + case IOMMU_TEST_OP_CREATE_ACCESS: + return iommufd_test_create_access(ucmd, cmd->id, + cmd->create_access.flags); + case IOMMU_TEST_OP_ACCESS_PAGES: + return iommufd_test_access_pages( + ucmd, cmd->id, cmd->access_pages.iova, + cmd->access_pages.length, + u64_to_user_ptr(cmd->access_pages.uptr), + cmd->access_pages.flags); + case IOMMU_TEST_OP_ACCESS_RW: + return iommufd_test_access_rw( + ucmd, cmd->id, cmd->access_rw.iova, + cmd->access_rw.length, + u64_to_user_ptr(cmd->access_rw.uptr), + cmd->access_rw.flags); + case IOMMU_TEST_OP_DESTROY_ACCESS_PAGES: + return iommufd_test_access_item_destroy( + ucmd, cmd->id, cmd->destroy_access_pages.access_pages_id); + case IOMMU_TEST_OP_SET_TEMP_MEMORY_LIMIT: + /* Protect _batch_init(), can not be less than elmsz */ + if (cmd->memory_limit.limit < + sizeof(unsigned long) + sizeof(u32)) + return -EINVAL; + iommufd_test_memory_limit = cmd->memory_limit.limit; + return 0; + default: + return -EOPNOTSUPP; + } +} + +bool iommufd_should_fail(void) +{ + return should_fail(&fail_iommufd, 1); +} + +void __init iommufd_test_init(void) +{ + dbgfs_root = + fault_create_debugfs_attr("fail_iommufd", NULL, &fail_iommufd); +} + +void iommufd_test_exit(void) +{ + debugfs_remove_recursive(dbgfs_root); +} diff --git a/drivers/iommu/iommufd/vfio_compat.c b/drivers/iommu/iommufd/vfio_compat.c new file mode 100644 index 0000000000000000000000000000000000000000..3ceca0e8311c39fab2a4a1f06664d7dc4742cc81 --- /dev/null +++ b/drivers/iommu/iommufd/vfio_compat.c @@ -0,0 +1,472 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES + */ +#include +#include +#include +#include +#include +#include +#include +#include + +#include "iommufd_private.h" + +static struct iommufd_ioas *get_compat_ioas(struct iommufd_ctx *ictx) +{ + struct iommufd_ioas *ioas = ERR_PTR(-ENODEV); + + xa_lock(&ictx->objects); + if (!ictx->vfio_ioas || !iommufd_lock_obj(&ictx->vfio_ioas->obj)) + goto out_unlock; + ioas = ictx->vfio_ioas; +out_unlock: + xa_unlock(&ictx->objects); + return ioas; +} + +/** + * iommufd_vfio_compat_ioas_id - Return the IOAS ID that vfio should use + * @ictx: Context to operate on + * @out_ioas_id: The ioas_id the caller should use + * + * The compatibility IOAS is the IOAS that the vfio compatibility ioctls operate + * on since they do not have an IOAS ID input in their ABI. Only attaching a + * group should cause a default creation of the internal ioas, this returns the + * existing ioas if it has already been assigned somehow. + */ +int iommufd_vfio_compat_ioas_id(struct iommufd_ctx *ictx, u32 *out_ioas_id) +{ + struct iommufd_ioas *ioas = NULL; + struct iommufd_ioas *out_ioas; + + ioas = iommufd_ioas_alloc(ictx); + if (IS_ERR(ioas)) + return PTR_ERR(ioas); + + xa_lock(&ictx->objects); + if (ictx->vfio_ioas && iommufd_lock_obj(&ictx->vfio_ioas->obj)) + out_ioas = ictx->vfio_ioas; + else { + out_ioas = ioas; + ictx->vfio_ioas = ioas; + } + xa_unlock(&ictx->objects); + + *out_ioas_id = out_ioas->obj.id; + if (out_ioas != ioas) { + iommufd_put_object(&out_ioas->obj); + iommufd_object_abort(ictx, &ioas->obj); + return 0; + } + /* + * An automatically created compat IOAS is treated as a userspace + * created object. Userspace can learn the ID via IOMMU_VFIO_IOAS_GET, + * and if not manually destroyed it will be destroyed automatically + * at iommufd release. + */ + iommufd_object_finalize(ictx, &ioas->obj); + return 0; +} +EXPORT_SYMBOL_NS_GPL(iommufd_vfio_compat_ioas_id, IOMMUFD_VFIO); + +int iommufd_vfio_ioas(struct iommufd_ucmd *ucmd) +{ + struct iommu_vfio_ioas *cmd = ucmd->cmd; + struct iommufd_ioas *ioas; + + if (cmd->__reserved) + return -EOPNOTSUPP; + switch (cmd->op) { + case IOMMU_VFIO_IOAS_GET: + ioas = get_compat_ioas(ucmd->ictx); + if (IS_ERR(ioas)) + return PTR_ERR(ioas); + cmd->ioas_id = ioas->obj.id; + iommufd_put_object(&ioas->obj); + return iommufd_ucmd_respond(ucmd, sizeof(*cmd)); + + case IOMMU_VFIO_IOAS_SET: + ioas = iommufd_get_ioas(ucmd, cmd->ioas_id); + if (IS_ERR(ioas)) + return PTR_ERR(ioas); + xa_lock(&ucmd->ictx->objects); + ucmd->ictx->vfio_ioas = ioas; + xa_unlock(&ucmd->ictx->objects); + iommufd_put_object(&ioas->obj); + return 0; + + case IOMMU_VFIO_IOAS_CLEAR: + xa_lock(&ucmd->ictx->objects); + ucmd->ictx->vfio_ioas = NULL; + xa_unlock(&ucmd->ictx->objects); + return 0; + default: + return -EOPNOTSUPP; + } +} + +static int iommufd_vfio_map_dma(struct iommufd_ctx *ictx, unsigned int cmd, + void __user *arg) +{ + u32 supported_flags = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE; + size_t minsz = offsetofend(struct vfio_iommu_type1_dma_map, size); + struct vfio_iommu_type1_dma_map map; + int iommu_prot = IOMMU_CACHE; + struct iommufd_ioas *ioas; + unsigned long iova; + int rc; + + if (copy_from_user(&map, arg, minsz)) + return -EFAULT; + + if (map.argsz < minsz || map.flags & ~supported_flags) + return -EINVAL; + + if (map.flags & VFIO_DMA_MAP_FLAG_READ) + iommu_prot |= IOMMU_READ; + if (map.flags & VFIO_DMA_MAP_FLAG_WRITE) + iommu_prot |= IOMMU_WRITE; + + ioas = get_compat_ioas(ictx); + if (IS_ERR(ioas)) + return PTR_ERR(ioas); + + /* + * Maps created through the legacy interface always use VFIO compatible + * rlimit accounting. If the user wishes to use the faster user based + * rlimit accounting then they must use the new interface. + */ + iova = map.iova; + rc = iopt_map_user_pages(ictx, &ioas->iopt, &iova, u64_to_user_ptr(map.vaddr), + map.size, iommu_prot, 0); + iommufd_put_object(&ioas->obj); + return rc; +} + +static int iommufd_vfio_unmap_dma(struct iommufd_ctx *ictx, unsigned int cmd, + void __user *arg) +{ + size_t minsz = offsetofend(struct vfio_iommu_type1_dma_unmap, size); + /* + * VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP is obsoleted by the new + * dirty tracking direction: + * https://lore.kernel.org/kvm/20220731125503.142683-1-yishaih@nvidia.com/ + * https://lore.kernel.org/kvm/20220428210933.3583-1-joao.m.martins@oracle.com/ + */ + u32 supported_flags = VFIO_DMA_UNMAP_FLAG_ALL; + struct vfio_iommu_type1_dma_unmap unmap; + unsigned long unmapped = 0; + struct iommufd_ioas *ioas; + int rc; + + if (copy_from_user(&unmap, arg, minsz)) + return -EFAULT; + + if (unmap.argsz < minsz || unmap.flags & ~supported_flags) + return -EINVAL; + + ioas = get_compat_ioas(ictx); + if (IS_ERR(ioas)) + return PTR_ERR(ioas); + + if (unmap.flags & VFIO_DMA_UNMAP_FLAG_ALL) { + if (unmap.iova != 0 || unmap.size != 0) { + rc = -EINVAL; + goto err_put; + } + rc = iopt_unmap_all(&ioas->iopt, &unmapped); + } else { + if (READ_ONCE(ioas->iopt.disable_large_pages)) { + /* + * Create cuts at the start and last of the requested + * range. If the start IOVA is 0 then it doesn't need to + * be cut. + */ + unsigned long iovas[] = { unmap.iova + unmap.size - 1, + unmap.iova - 1 }; + + rc = iopt_cut_iova(&ioas->iopt, iovas, + unmap.iova ? 2 : 1); + if (rc) + goto err_put; + } + rc = iopt_unmap_iova(&ioas->iopt, unmap.iova, unmap.size, + &unmapped); + } + unmap.size = unmapped; + if (copy_to_user(arg, &unmap, minsz)) + rc = -EFAULT; + +err_put: + iommufd_put_object(&ioas->obj); + return rc; +} + +static int iommufd_vfio_cc_iommu(struct iommufd_ctx *ictx) +{ + struct iommufd_hw_pagetable *hwpt; + struct iommufd_ioas *ioas; + int rc = 1; + + ioas = get_compat_ioas(ictx); + if (IS_ERR(ioas)) + return PTR_ERR(ioas); + + mutex_lock(&ioas->mutex); + list_for_each_entry(hwpt, &ioas->hwpt_list, hwpt_item) { + if (!hwpt->enforce_cache_coherency) { + rc = 0; + break; + } + } + mutex_unlock(&ioas->mutex); + + iommufd_put_object(&ioas->obj); + return rc; +} + +static int iommufd_vfio_check_extension(struct iommufd_ctx *ictx, + unsigned long type) +{ + switch (type) { + case VFIO_TYPE1_IOMMU: + case VFIO_TYPE1v2_IOMMU: + case VFIO_UNMAP_ALL: + return 1; + + case VFIO_DMA_CC_IOMMU: + return iommufd_vfio_cc_iommu(ictx); + + /* + * This is obsolete, and to be removed from VFIO. It was an incomplete + * idea that got merged. + * https://lore.kernel.org/kvm/0-v1-0093c9b0e345+19-vfio_no_nesting_jgg@nvidia.com/ + */ + case VFIO_TYPE1_NESTING_IOMMU: + return 0; + + /* + * VFIO_DMA_MAP_FLAG_VADDR + * https://lore.kernel.org/kvm/1611939252-7240-1-git-send-email-steven.sistare@oracle.com/ + * https://lore.kernel.org/all/Yz777bJZjTyLrHEQ@nvidia.com/ + * + * It is hard to see how this could be implemented safely. + */ + case VFIO_UPDATE_VADDR: + default: + return 0; + } +} + +static int iommufd_vfio_set_iommu(struct iommufd_ctx *ictx, unsigned long type) +{ + struct iommufd_ioas *ioas = NULL; + int rc = 0; + + if (type != VFIO_TYPE1_IOMMU && type != VFIO_TYPE1v2_IOMMU) + return -EINVAL; + + /* VFIO fails the set_iommu if there is no group */ + ioas = get_compat_ioas(ictx); + if (IS_ERR(ioas)) + return PTR_ERR(ioas); + + /* + * The difference between TYPE1 and TYPE1v2 is the ability to unmap in + * the middle of mapped ranges. This is complicated by huge page support + * which creates single large IOPTEs that cannot be split by the iommu + * driver. TYPE1 is very old at this point and likely nothing uses it, + * however it is simple enough to emulate by simply disabling the + * problematic large IOPTEs. Then we can safely unmap within any range. + */ + if (type == VFIO_TYPE1_IOMMU) + rc = iopt_disable_large_pages(&ioas->iopt); + iommufd_put_object(&ioas->obj); + return rc; +} + +static unsigned long iommufd_get_pagesizes(struct iommufd_ioas *ioas) +{ + struct io_pagetable *iopt = &ioas->iopt; + unsigned long pgsize_bitmap = ULONG_MAX; + struct iommu_domain *domain; + unsigned long index; + + down_read(&iopt->domains_rwsem); + xa_for_each(&iopt->domains, index, domain) + pgsize_bitmap &= domain->pgsize_bitmap; + + /* See vfio_update_pgsize_bitmap() */ + if (pgsize_bitmap & ~PAGE_MASK) { + pgsize_bitmap &= PAGE_MASK; + pgsize_bitmap |= PAGE_SIZE; + } + pgsize_bitmap = max(pgsize_bitmap, ioas->iopt.iova_alignment); + up_read(&iopt->domains_rwsem); + return pgsize_bitmap; +} + +static int iommufd_fill_cap_iova(struct iommufd_ioas *ioas, + struct vfio_info_cap_header __user *cur, + size_t avail) +{ + struct vfio_iommu_type1_info_cap_iova_range __user *ucap_iovas = + container_of(cur, + struct vfio_iommu_type1_info_cap_iova_range __user, + header); + struct vfio_iommu_type1_info_cap_iova_range cap_iovas = { + .header = { + .id = VFIO_IOMMU_TYPE1_INFO_CAP_IOVA_RANGE, + .version = 1, + }, + }; + struct interval_tree_span_iter span; + + interval_tree_for_each_span(&span, &ioas->iopt.reserved_itree, 0, + ULONG_MAX) { + struct vfio_iova_range range; + + if (!span.is_hole) + continue; + range.start = span.start_hole; + range.end = span.last_hole; + if (avail >= struct_size(&cap_iovas, iova_ranges, + cap_iovas.nr_iovas + 1) && + copy_to_user(&ucap_iovas->iova_ranges[cap_iovas.nr_iovas], + &range, sizeof(range))) + return -EFAULT; + cap_iovas.nr_iovas++; + } + if (avail >= struct_size(&cap_iovas, iova_ranges, cap_iovas.nr_iovas) && + copy_to_user(ucap_iovas, &cap_iovas, sizeof(cap_iovas))) + return -EFAULT; + return struct_size(&cap_iovas, iova_ranges, cap_iovas.nr_iovas); +} + +static int iommufd_fill_cap_dma_avail(struct iommufd_ioas *ioas, + struct vfio_info_cap_header __user *cur, + size_t avail) +{ + struct vfio_iommu_type1_info_dma_avail cap_dma = { + .header = { + .id = VFIO_IOMMU_TYPE1_INFO_DMA_AVAIL, + .version = 1, + }, + /* + * iommufd's limit is based on the cgroup's memory limit. + * Normally vfio would return U16_MAX here, and provide a module + * parameter to adjust it. Since S390 qemu userspace actually + * pays attention and needs a value bigger than U16_MAX return + * U32_MAX. + */ + .avail = U32_MAX, + }; + + if (avail >= sizeof(cap_dma) && + copy_to_user(cur, &cap_dma, sizeof(cap_dma))) + return -EFAULT; + return sizeof(cap_dma); +} + +static int iommufd_vfio_iommu_get_info(struct iommufd_ctx *ictx, + void __user *arg) +{ + typedef int (*fill_cap_fn)(struct iommufd_ioas *ioas, + struct vfio_info_cap_header __user *cur, + size_t avail); + static const fill_cap_fn fill_fns[] = { + iommufd_fill_cap_dma_avail, + iommufd_fill_cap_iova, + }; + size_t minsz = offsetofend(struct vfio_iommu_type1_info, iova_pgsizes); + struct vfio_info_cap_header __user *last_cap = NULL; + struct vfio_iommu_type1_info info; + struct iommufd_ioas *ioas; + size_t total_cap_size; + int rc; + int i; + + if (copy_from_user(&info, arg, minsz)) + return -EFAULT; + + if (info.argsz < minsz) + return -EINVAL; + minsz = min_t(size_t, info.argsz, sizeof(info)); + + ioas = get_compat_ioas(ictx); + if (IS_ERR(ioas)) + return PTR_ERR(ioas); + + info.flags = VFIO_IOMMU_INFO_PGSIZES; + info.iova_pgsizes = iommufd_get_pagesizes(ioas); + info.cap_offset = 0; + + down_read(&ioas->iopt.iova_rwsem); + total_cap_size = sizeof(info); + for (i = 0; i != ARRAY_SIZE(fill_fns); i++) { + int cap_size; + + if (info.argsz > total_cap_size) + cap_size = fill_fns[i](ioas, arg + total_cap_size, + info.argsz - total_cap_size); + else + cap_size = fill_fns[i](ioas, NULL, 0); + if (cap_size < 0) { + rc = cap_size; + goto out_put; + } + if (last_cap && info.argsz >= total_cap_size && + put_user(total_cap_size, &last_cap->next)) { + rc = -EFAULT; + goto out_put; + } + last_cap = arg + total_cap_size; + total_cap_size += cap_size; + } + + /* + * If the user did not provide enough space then only some caps are + * returned and the argsz will be updated to the correct amount to get + * all caps. + */ + if (info.argsz >= total_cap_size) + info.cap_offset = sizeof(info); + info.argsz = total_cap_size; + info.flags |= VFIO_IOMMU_INFO_CAPS; + if (copy_to_user(arg, &info, minsz)) { + rc = -EFAULT; + goto out_put; + } + rc = 0; + +out_put: + up_read(&ioas->iopt.iova_rwsem); + iommufd_put_object(&ioas->obj); + return rc; +} + +int iommufd_vfio_ioctl(struct iommufd_ctx *ictx, unsigned int cmd, + unsigned long arg) +{ + void __user *uarg = (void __user *)arg; + + switch (cmd) { + case VFIO_GET_API_VERSION: + return VFIO_API_VERSION; + case VFIO_SET_IOMMU: + return iommufd_vfio_set_iommu(ictx, arg); + case VFIO_CHECK_EXTENSION: + return iommufd_vfio_check_extension(ictx, arg); + case VFIO_IOMMU_GET_INFO: + return iommufd_vfio_iommu_get_info(ictx, uarg); + case VFIO_IOMMU_MAP_DMA: + return iommufd_vfio_map_dma(ictx, cmd, uarg); + case VFIO_IOMMU_UNMAP_DMA: + return iommufd_vfio_unmap_dma(ictx, cmd, uarg); + case VFIO_IOMMU_DIRTY_PAGES: + default: + return -ENOIOCTLCMD; + } + return -ENOIOCTLCMD; +} diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c index 3b30c0752274fd43e13ac19908499a6fa1a65e0f..a003bd5fc65c135a7a358e0553af0db8adab7652 100644 --- a/drivers/iommu/ipmmu-vmsa.c +++ b/drivers/iommu/ipmmu-vmsa.c @@ -628,8 +628,6 @@ static int ipmmu_attach_device(struct iommu_domain *io_domain, * Something is wrong, we can't attach two devices using * different IOMMUs to the same domain. */ - dev_err(dev, "Can't attach IPMMU %s to domain on IPMMU %s\n", - dev_name(mmu->dev), dev_name(domain->mmu->dev)); ret = -EINVAL; } else dev_info(dev, "Reusing IPMMU context %u\n", domain->context_id); @@ -661,22 +659,22 @@ static void ipmmu_detach_device(struct iommu_domain *io_domain, } static int ipmmu_map(struct iommu_domain *io_domain, unsigned long iova, - phys_addr_t paddr, size_t size, int prot, gfp_t gfp) + phys_addr_t paddr, size_t pgsize, size_t pgcount, + int prot, gfp_t gfp, size_t *mapped) { struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); - if (!domain) - return -ENODEV; - - return domain->iop->map(domain->iop, iova, paddr, size, prot, gfp); + return domain->iop->map_pages(domain->iop, iova, paddr, pgsize, pgcount, + prot, gfp, mapped); } static size_t ipmmu_unmap(struct iommu_domain *io_domain, unsigned long iova, - size_t size, struct iommu_iotlb_gather *gather) + size_t pgsize, size_t pgcount, + struct iommu_iotlb_gather *gather) { struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); - return domain->iop->unmap(domain->iop, iova, size, gather); + return domain->iop->unmap_pages(domain->iop, iova, pgsize, pgcount, gather); } static void ipmmu_flush_iotlb_all(struct iommu_domain *io_domain) @@ -879,8 +877,8 @@ static const struct iommu_ops ipmmu_ops = { .default_domain_ops = &(const struct iommu_domain_ops) { .attach_dev = ipmmu_attach_device, .detach_dev = ipmmu_detach_device, - .map = ipmmu_map, - .unmap = ipmmu_unmap, + .map_pages = ipmmu_map, + .unmap_pages = ipmmu_unmap, .flush_iotlb_all = ipmmu_flush_iotlb_all, .iotlb_sync = ipmmu_iotlb_sync, .iova_to_phys = ipmmu_iova_to_phys, diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c index 16179a9a728300934f23f3c919bb9c200b16a728..c60624910872c6856009c222dcf848a74199d8b8 100644 --- a/drivers/iommu/msm_iommu.c +++ b/drivers/iommu/msm_iommu.c @@ -471,14 +471,16 @@ fail: } static int msm_iommu_map(struct iommu_domain *domain, unsigned long iova, - phys_addr_t pa, size_t len, int prot, gfp_t gfp) + phys_addr_t pa, size_t pgsize, size_t pgcount, + int prot, gfp_t gfp, size_t *mapped) { struct msm_priv *priv = to_msm_priv(domain); unsigned long flags; int ret; spin_lock_irqsave(&priv->pgtlock, flags); - ret = priv->iop->map(priv->iop, iova, pa, len, prot, GFP_ATOMIC); + ret = priv->iop->map_pages(priv->iop, iova, pa, pgsize, pgcount, prot, + GFP_ATOMIC, mapped); spin_unlock_irqrestore(&priv->pgtlock, flags); return ret; @@ -493,16 +495,18 @@ static void msm_iommu_sync_map(struct iommu_domain *domain, unsigned long iova, } static size_t msm_iommu_unmap(struct iommu_domain *domain, unsigned long iova, - size_t len, struct iommu_iotlb_gather *gather) + size_t pgsize, size_t pgcount, + struct iommu_iotlb_gather *gather) { struct msm_priv *priv = to_msm_priv(domain); unsigned long flags; + size_t ret; spin_lock_irqsave(&priv->pgtlock, flags); - len = priv->iop->unmap(priv->iop, iova, len, gather); + ret = priv->iop->unmap_pages(priv->iop, iova, pgsize, pgcount, gather); spin_unlock_irqrestore(&priv->pgtlock, flags); - return len; + return ret; } static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain, @@ -679,8 +683,8 @@ static struct iommu_ops msm_iommu_ops = { .default_domain_ops = &(const struct iommu_domain_ops) { .attach_dev = msm_iommu_attach_dev, .detach_dev = msm_iommu_detach_dev, - .map = msm_iommu_map, - .unmap = msm_iommu_unmap, + .map_pages = msm_iommu_map, + .unmap_pages = msm_iommu_unmap, /* * Nothing is needed here, the barrier to guarantee * completion of the tlb sync operation is implicitly diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c index 2ab2ecfe01f80260ef6c835127ee385249926df3..2badd6acfb23d68f0fc0656b75f18c85cb910ca4 100644 --- a/drivers/iommu/mtk_iommu.c +++ b/drivers/iommu/mtk_iommu.c @@ -108,8 +108,12 @@ #define F_MMU_INT_ID_SUB_COMM_ID(a) (((a) >> 7) & 0x3) #define F_MMU_INT_ID_COMM_ID_EXT(a) (((a) >> 10) & 0x7) #define F_MMU_INT_ID_SUB_COMM_ID_EXT(a) (((a) >> 7) & 0x7) +/* Macro for 5 bits length port ID field (default) */ #define F_MMU_INT_ID_LARB_ID(a) (((a) >> 7) & 0x7) #define F_MMU_INT_ID_PORT_ID(a) (((a) >> 2) & 0x1f) +/* Macro for 6 bits length port ID field */ +#define F_MMU_INT_ID_LARB_ID_WID_6(a) (((a) >> 8) & 0x7) +#define F_MMU_INT_ID_PORT_ID_WID_6(a) (((a) >> 2) & 0x3f) #define MTK_PROTECT_PA_ALIGN 256 #define MTK_IOMMU_BANK_SZ 0x1000 @@ -139,6 +143,7 @@ #define IFA_IOMMU_PCIE_SUPPORT BIT(16) #define PGTABLE_PA_35_EN BIT(17) #define TF_PORT_TO_ADDR_MT8173 BIT(18) +#define INT_ID_PORT_WIDTH_6 BIT(19) #define MTK_IOMMU_HAS_FLAG_MASK(pdata, _x, mask) \ ((((pdata)->flags) & (mask)) == (_x)) @@ -165,6 +170,7 @@ enum mtk_iommu_plat { M4U_MT8186, M4U_MT8192, M4U_MT8195, + M4U_MT8365, }; struct mtk_iommu_iova_region { @@ -223,10 +229,7 @@ struct mtk_iommu_data { struct device *smicomm_dev; struct mtk_iommu_bank_data *bank; - - struct dma_iommu_mapping *mapping; /* For mtk_iommu_v1.c */ struct regmap *pericfg; - struct mutex mutex; /* Protect m4u_group/m4u_dom above */ /* @@ -441,20 +444,25 @@ static irqreturn_t mtk_iommu_isr(int irq, void *dev_id) fault_pa |= (u64)pa34_32 << 32; if (MTK_IOMMU_IS_TYPE(plat_data, MTK_IOMMU_TYPE_MM)) { - fault_port = F_MMU_INT_ID_PORT_ID(regval); if (MTK_IOMMU_HAS_FLAG(plat_data, HAS_SUB_COMM_2BITS)) { fault_larb = F_MMU_INT_ID_COMM_ID(regval); sub_comm = F_MMU_INT_ID_SUB_COMM_ID(regval); + fault_port = F_MMU_INT_ID_PORT_ID(regval); } else if (MTK_IOMMU_HAS_FLAG(plat_data, HAS_SUB_COMM_3BITS)) { fault_larb = F_MMU_INT_ID_COMM_ID_EXT(regval); sub_comm = F_MMU_INT_ID_SUB_COMM_ID_EXT(regval); + fault_port = F_MMU_INT_ID_PORT_ID(regval); + } else if (MTK_IOMMU_HAS_FLAG(plat_data, INT_ID_PORT_WIDTH_6)) { + fault_port = F_MMU_INT_ID_PORT_ID_WID_6(regval); + fault_larb = F_MMU_INT_ID_LARB_ID_WID_6(regval); } else { + fault_port = F_MMU_INT_ID_PORT_ID(regval); fault_larb = F_MMU_INT_ID_LARB_ID(regval); } fault_larb = data->plat_data->larbid_remap[fault_larb][sub_comm]; } - if (report_iommu_fault(&dom->domain, bank->parent_dev, fault_iova, + if (!dom || report_iommu_fault(&dom->domain, bank->parent_dev, fault_iova, write ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ)) { dev_err_ratelimited( bank->parent_dev, @@ -609,7 +617,7 @@ static int mtk_iommu_domain_finalise(struct mtk_iommu_domain *dom, dom->iop = alloc_io_pgtable_ops(ARM_V7S, &dom->cfg, data); if (!dom->iop) { dev_err(data->dev, "Failed to alloc io pgtable\n"); - return -EINVAL; + return -ENOMEM; } /* Update our support page sizes bitmap */ @@ -668,7 +676,7 @@ static int mtk_iommu_attach_device(struct iommu_domain *domain, ret = mtk_iommu_domain_finalise(dom, frstdata, region_id); if (ret) { mutex_unlock(&dom->mutex); - return -ENODEV; + return ret; } dom->bank = &data->bank[bankid]; } @@ -711,7 +719,8 @@ static void mtk_iommu_detach_device(struct iommu_domain *domain, } static int mtk_iommu_map(struct iommu_domain *domain, unsigned long iova, - phys_addr_t paddr, size_t size, int prot, gfp_t gfp) + phys_addr_t paddr, size_t pgsize, size_t pgcount, + int prot, gfp_t gfp, size_t *mapped) { struct mtk_iommu_domain *dom = to_mtk_domain(domain); @@ -720,17 +729,17 @@ static int mtk_iommu_map(struct iommu_domain *domain, unsigned long iova, paddr |= BIT_ULL(32); /* Synchronize with the tlb_lock */ - return dom->iop->map(dom->iop, iova, paddr, size, prot, gfp); + return dom->iop->map_pages(dom->iop, iova, paddr, pgsize, pgcount, prot, gfp, mapped); } static size_t mtk_iommu_unmap(struct iommu_domain *domain, - unsigned long iova, size_t size, + unsigned long iova, size_t pgsize, size_t pgcount, struct iommu_iotlb_gather *gather) { struct mtk_iommu_domain *dom = to_mtk_domain(domain); - iommu_iotlb_gather_add_range(gather, iova, size); - return dom->iop->unmap(dom->iop, iova, size, gather); + iommu_iotlb_gather_add_range(gather, iova, pgsize * pgcount); + return dom->iop->unmap_pages(dom->iop, iova, pgsize, pgcount, gather); } static void mtk_iommu_flush_iotlb_all(struct iommu_domain *domain) @@ -938,8 +947,8 @@ static const struct iommu_ops mtk_iommu_ops = { .default_domain_ops = &(const struct iommu_domain_ops) { .attach_dev = mtk_iommu_attach_device, .detach_dev = mtk_iommu_detach_device, - .map = mtk_iommu_map, - .unmap = mtk_iommu_unmap, + .map_pages = mtk_iommu_map, + .unmap_pages = mtk_iommu_unmap, .flush_iotlb_all = mtk_iommu_flush_iotlb_all, .iotlb_sync = mtk_iommu_iotlb_sync, .iotlb_sync_map = mtk_iommu_sync_map, @@ -1043,21 +1052,26 @@ static const struct component_master_ops mtk_iommu_com_ops = { static int mtk_iommu_mm_dts_parse(struct device *dev, struct component_match **match, struct mtk_iommu_data *data) { - struct device_node *larbnode, *smicomm_node, *smi_subcomm_node; - struct platform_device *plarbdev; + struct device_node *larbnode, *frst_avail_smicomm_node = NULL; + struct platform_device *plarbdev, *pcommdev; struct device_link *link; int i, larb_nr, ret; larb_nr = of_count_phandle_with_args(dev->of_node, "mediatek,larbs", NULL); if (larb_nr < 0) return larb_nr; + if (larb_nr == 0 || larb_nr > MTK_LARB_NR_MAX) + return -EINVAL; for (i = 0; i < larb_nr; i++) { + struct device_node *smicomm_node, *smi_subcomm_node; u32 id; larbnode = of_parse_phandle(dev->of_node, "mediatek,larbs", i); - if (!larbnode) - return -EINVAL; + if (!larbnode) { + ret = -EINVAL; + goto err_larbdev_put; + } if (!of_device_is_available(larbnode)) { of_node_put(larbnode); @@ -1067,48 +1081,91 @@ static int mtk_iommu_mm_dts_parse(struct device *dev, struct component_match **m ret = of_property_read_u32(larbnode, "mediatek,larb-id", &id); if (ret)/* The id is consecutive if there is no this property */ id = i; + if (id >= MTK_LARB_NR_MAX) { + of_node_put(larbnode); + ret = -EINVAL; + goto err_larbdev_put; + } plarbdev = of_find_device_by_node(larbnode); + of_node_put(larbnode); if (!plarbdev) { - of_node_put(larbnode); - return -ENODEV; + ret = -ENODEV; + goto err_larbdev_put; } - if (!plarbdev->dev.driver) { - of_node_put(larbnode); - return -EPROBE_DEFER; + if (data->larb_imu[id].dev) { + platform_device_put(plarbdev); + ret = -EEXIST; + goto err_larbdev_put; } data->larb_imu[id].dev = &plarbdev->dev; - component_match_add_release(dev, match, component_release_of, - component_compare_of, larbnode); + if (!plarbdev->dev.driver) { + ret = -EPROBE_DEFER; + goto err_larbdev_put; + } + + /* Get smi-(sub)-common dev from the last larb. */ + smi_subcomm_node = of_parse_phandle(larbnode, "mediatek,smi", 0); + if (!smi_subcomm_node) { + ret = -EINVAL; + goto err_larbdev_put; + } + + /* + * It may have two level smi-common. the node is smi-sub-common if it + * has a new mediatek,smi property. otherwise it is smi-commmon. + */ + smicomm_node = of_parse_phandle(smi_subcomm_node, "mediatek,smi", 0); + if (smicomm_node) + of_node_put(smi_subcomm_node); + else + smicomm_node = smi_subcomm_node; + + /* + * All the larbs that connect to one IOMMU must connect with the same + * smi-common. + */ + if (!frst_avail_smicomm_node) { + frst_avail_smicomm_node = smicomm_node; + } else if (frst_avail_smicomm_node != smicomm_node) { + dev_err(dev, "mediatek,smi property is not right @larb%d.", id); + of_node_put(smicomm_node); + ret = -EINVAL; + goto err_larbdev_put; + } else { + of_node_put(smicomm_node); + } + + component_match_add(dev, match, component_compare_dev, &plarbdev->dev); + platform_device_put(plarbdev); } - /* Get smi-(sub)-common dev from the last larb. */ - smi_subcomm_node = of_parse_phandle(larbnode, "mediatek,smi", 0); - if (!smi_subcomm_node) + if (!frst_avail_smicomm_node) return -EINVAL; - /* - * It may have two level smi-common. the node is smi-sub-common if it - * has a new mediatek,smi property. otherwise it is smi-commmon. - */ - smicomm_node = of_parse_phandle(smi_subcomm_node, "mediatek,smi", 0); - if (smicomm_node) - of_node_put(smi_subcomm_node); - else - smicomm_node = smi_subcomm_node; - - plarbdev = of_find_device_by_node(smicomm_node); - of_node_put(smicomm_node); - data->smicomm_dev = &plarbdev->dev; + pcommdev = of_find_device_by_node(frst_avail_smicomm_node); + of_node_put(frst_avail_smicomm_node); + if (!pcommdev) + return -ENODEV; + data->smicomm_dev = &pcommdev->dev; link = device_link_add(data->smicomm_dev, dev, DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME); + platform_device_put(pcommdev); if (!link) { dev_err(dev, "Unable to link %s.\n", dev_name(data->smicomm_dev)); return -EINVAL; } return 0; + +err_larbdev_put: + for (i = MTK_LARB_NR_MAX - 1; i >= 0; i--) { + if (!data->larb_imu[i].dev) + continue; + put_device(data->larb_imu[i].dev); + } + return ret; } static int mtk_iommu_probe(struct platform_device *pdev) @@ -1173,6 +1230,8 @@ static int mtk_iommu_probe(struct platform_device *pdev) banks_num = data->plat_data->banks_num; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) + return -EINVAL; if (resource_size(res) < banks_num * MTK_IOMMU_BANK_SZ) { dev_err(dev, "banknr %d. res %pR is not enough.\n", banks_num, res); return -EINVAL; @@ -1516,6 +1575,17 @@ static const struct mtk_iommu_plat_data mt8195_data_vpp = { {4, MTK_INVALID_LARBID, MTK_INVALID_LARBID, MTK_INVALID_LARBID, 6}}, }; +static const struct mtk_iommu_plat_data mt8365_data = { + .m4u_plat = M4U_MT8365, + .flags = RESET_AXI | INT_ID_PORT_WIDTH_6, + .inv_sel_reg = REG_MMU_INV_SEL_GEN1, + .banks_num = 1, + .banks_enable = {true}, + .iova_region = single_domain, + .iova_region_nr = ARRAY_SIZE(single_domain), + .larbid_remap = {{0}, {1}, {2}, {3}, {4}, {5}}, /* Linear mapping. */ +}; + static const struct of_device_id mtk_iommu_of_ids[] = { { .compatible = "mediatek,mt2712-m4u", .data = &mt2712_data}, { .compatible = "mediatek,mt6779-m4u", .data = &mt6779_data}, @@ -1528,6 +1598,7 @@ static const struct of_device_id mtk_iommu_of_ids[] = { { .compatible = "mediatek,mt8195-iommu-infra", .data = &mt8195_data_infra}, { .compatible = "mediatek,mt8195-iommu-vdo", .data = &mt8195_data_vdo}, { .compatible = "mediatek,mt8195-iommu-vpp", .data = &mt8195_data_vpp}, + { .compatible = "mediatek,mt8365-m4u", .data = &mt8365_data}, {} }; diff --git a/drivers/iommu/mtk_iommu_v1.c b/drivers/iommu/mtk_iommu_v1.c index 6e0e65831eb70d9f3ffbd285d464ef356562c2b7..69682ee068d2bd88205c00fb12f1bfeda8dfa640 100644 --- a/drivers/iommu/mtk_iommu_v1.c +++ b/drivers/iommu/mtk_iommu_v1.c @@ -327,44 +327,42 @@ static void mtk_iommu_v1_detach_device(struct iommu_domain *domain, struct devic } static int mtk_iommu_v1_map(struct iommu_domain *domain, unsigned long iova, - phys_addr_t paddr, size_t size, int prot, gfp_t gfp) + phys_addr_t paddr, size_t pgsize, size_t pgcount, + int prot, gfp_t gfp, size_t *mapped) { struct mtk_iommu_v1_domain *dom = to_mtk_domain(domain); - unsigned int page_num = size >> MT2701_IOMMU_PAGE_SHIFT; unsigned long flags; unsigned int i; u32 *pgt_base_iova = dom->pgt_va + (iova >> MT2701_IOMMU_PAGE_SHIFT); u32 pabase = (u32)paddr; - int map_size = 0; spin_lock_irqsave(&dom->pgtlock, flags); - for (i = 0; i < page_num; i++) { - if (pgt_base_iova[i]) { - memset(pgt_base_iova, 0, i * sizeof(u32)); + for (i = 0; i < pgcount; i++) { + if (pgt_base_iova[i]) break; - } pgt_base_iova[i] = pabase | F_DESC_VALID | F_DESC_NONSEC; pabase += MT2701_IOMMU_PAGE_SIZE; - map_size += MT2701_IOMMU_PAGE_SIZE; } spin_unlock_irqrestore(&dom->pgtlock, flags); - mtk_iommu_v1_tlb_flush_range(dom->data, iova, size); + *mapped = i * MT2701_IOMMU_PAGE_SIZE; + mtk_iommu_v1_tlb_flush_range(dom->data, iova, *mapped); - return map_size == size ? 0 : -EEXIST; + return i == pgcount ? 0 : -EEXIST; } static size_t mtk_iommu_v1_unmap(struct iommu_domain *domain, unsigned long iova, - size_t size, struct iommu_iotlb_gather *gather) + size_t pgsize, size_t pgcount, + struct iommu_iotlb_gather *gather) { struct mtk_iommu_v1_domain *dom = to_mtk_domain(domain); unsigned long flags; u32 *pgt_base_iova = dom->pgt_va + (iova >> MT2701_IOMMU_PAGE_SHIFT); - unsigned int page_num = size >> MT2701_IOMMU_PAGE_SHIFT; + size_t size = pgcount * MT2701_IOMMU_PAGE_SIZE; spin_lock_irqsave(&dom->pgtlock, flags); - memset(pgt_base_iova, 0, page_num * sizeof(u32)); + memset(pgt_base_iova, 0, pgcount * sizeof(u32)); spin_unlock_irqrestore(&dom->pgtlock, flags); mtk_iommu_v1_tlb_flush_range(dom->data, iova, size); @@ -586,13 +584,13 @@ static const struct iommu_ops mtk_iommu_v1_ops = { .release_device = mtk_iommu_v1_release_device, .def_domain_type = mtk_iommu_v1_def_domain_type, .device_group = generic_device_group, - .pgsize_bitmap = ~0UL << MT2701_IOMMU_PAGE_SHIFT, + .pgsize_bitmap = MT2701_IOMMU_PAGE_SIZE, .owner = THIS_MODULE, .default_domain_ops = &(const struct iommu_domain_ops) { .attach_dev = mtk_iommu_v1_attach_device, .detach_dev = mtk_iommu_v1_detach_device, - .map = mtk_iommu_v1_map, - .unmap = mtk_iommu_v1_unmap, + .map_pages = mtk_iommu_v1_map, + .unmap_pages = mtk_iommu_v1_unmap, .iova_to_phys = mtk_iommu_v1_iova_to_phys, .free = mtk_iommu_v1_domain_free, } diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c index 07ee2600113c20969b8f5b24ae4dfc7fc81be3a3..2fd7702c6709690a770325ac52fa890ecae7410d 100644 --- a/drivers/iommu/omap-iommu.c +++ b/drivers/iommu/omap-iommu.c @@ -1414,7 +1414,7 @@ static int omap_iommu_attach_init(struct device *dev, odomain->num_iommus = omap_iommu_count(dev); if (!odomain->num_iommus) - return -EINVAL; + return -ENODEV; odomain->iommus = kcalloc(odomain->num_iommus, sizeof(*iommu), GFP_ATOMIC); @@ -1464,7 +1464,7 @@ omap_iommu_attach_dev(struct iommu_domain *domain, struct device *dev) if (!arch_data || !arch_data->iommu_dev) { dev_err(dev, "device doesn't have an associated iommu\n"); - return -EINVAL; + return -ENODEV; } spin_lock(&omap_domain->lock); @@ -1472,7 +1472,7 @@ omap_iommu_attach_dev(struct iommu_domain *domain, struct device *dev) /* only a single client device can be attached to a domain */ if (omap_domain->dev) { dev_err(dev, "iommu domain is already attached\n"); - ret = -EBUSY; + ret = -EINVAL; goto out; } diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c index a3fc59b814ab5e7434ad055aa543951e7f11f153..a68eadd64f38db5fd2401164456d0a55c9750f02 100644 --- a/drivers/iommu/rockchip-iommu.c +++ b/drivers/iommu/rockchip-iommu.c @@ -280,19 +280,17 @@ static u32 rk_mk_pte(phys_addr_t page, int prot) * 11:9 - Page address bit 34:32 * 8:4 - Page address bit 39:35 * 3 - Security - * 2 - Readable - * 1 - Writable + * 2 - Writable + * 1 - Readable * 0 - 1 if Page @ Page address is valid */ -#define RK_PTE_PAGE_READABLE_V2 BIT(2) -#define RK_PTE_PAGE_WRITABLE_V2 BIT(1) static u32 rk_mk_pte_v2(phys_addr_t page, int prot) { u32 flags = 0; - flags |= (prot & IOMMU_READ) ? RK_PTE_PAGE_READABLE_V2 : 0; - flags |= (prot & IOMMU_WRITE) ? RK_PTE_PAGE_WRITABLE_V2 : 0; + flags |= (prot & IOMMU_READ) ? RK_PTE_PAGE_READABLE : 0; + flags |= (prot & IOMMU_WRITE) ? RK_PTE_PAGE_WRITABLE : 0; return rk_mk_dte_v2(page) | flags; } diff --git a/drivers/iommu/s390-iommu.c b/drivers/iommu/s390-iommu.c index 3c071782f6f16d7c3c0f2ef34b6d8088927694a2..ed33c6cce0836288df998fefe5745d743c8f41c1 100644 --- a/drivers/iommu/s390-iommu.c +++ b/drivers/iommu/s390-iommu.c @@ -10,28 +10,18 @@ #include #include #include +#include +#include #include -/* - * Physically contiguous memory regions can be mapped with 4 KiB alignment, - * we allow all page sizes that are an order of 4KiB (no special large page - * support so far). - */ -#define S390_IOMMU_PGSIZES (~0xFFFUL) - static const struct iommu_ops s390_iommu_ops; struct s390_domain { struct iommu_domain domain; struct list_head devices; unsigned long *dma_table; - spinlock_t dma_table_lock; spinlock_t list_lock; -}; - -struct s390_domain_device { - struct list_head list; - struct zpci_dev *zdev; + struct rcu_head rcu; }; static struct s390_domain *to_s390_domain(struct iommu_domain *dom) @@ -67,119 +57,125 @@ static struct iommu_domain *s390_domain_alloc(unsigned domain_type) kfree(s390_domain); return NULL; } + s390_domain->domain.geometry.force_aperture = true; + s390_domain->domain.geometry.aperture_start = 0; + s390_domain->domain.geometry.aperture_end = ZPCI_TABLE_SIZE_RT - 1; - spin_lock_init(&s390_domain->dma_table_lock); spin_lock_init(&s390_domain->list_lock); - INIT_LIST_HEAD(&s390_domain->devices); + INIT_LIST_HEAD_RCU(&s390_domain->devices); return &s390_domain->domain; } -static void s390_domain_free(struct iommu_domain *domain) +static void s390_iommu_rcu_free_domain(struct rcu_head *head) { - struct s390_domain *s390_domain = to_s390_domain(domain); + struct s390_domain *s390_domain = container_of(head, struct s390_domain, rcu); dma_cleanup_tables(s390_domain->dma_table); kfree(s390_domain); } +static void s390_domain_free(struct iommu_domain *domain) +{ + struct s390_domain *s390_domain = to_s390_domain(domain); + + rcu_read_lock(); + WARN_ON(!list_empty(&s390_domain->devices)); + rcu_read_unlock(); + + call_rcu(&s390_domain->rcu, s390_iommu_rcu_free_domain); +} + +static void __s390_iommu_detach_device(struct zpci_dev *zdev) +{ + struct s390_domain *s390_domain = zdev->s390_domain; + unsigned long flags; + + if (!s390_domain) + return; + + spin_lock_irqsave(&s390_domain->list_lock, flags); + list_del_rcu(&zdev->iommu_list); + spin_unlock_irqrestore(&s390_domain->list_lock, flags); + + zpci_unregister_ioat(zdev, 0); + zdev->s390_domain = NULL; + zdev->dma_table = NULL; +} + static int s390_iommu_attach_device(struct iommu_domain *domain, struct device *dev) { struct s390_domain *s390_domain = to_s390_domain(domain); struct zpci_dev *zdev = to_zpci_dev(dev); - struct s390_domain_device *domain_device; unsigned long flags; - int cc, rc; + u8 status; + int cc; if (!zdev) return -ENODEV; - domain_device = kzalloc(sizeof(*domain_device), GFP_KERNEL); - if (!domain_device) - return -ENOMEM; - - if (zdev->dma_table && !zdev->s390_domain) { - cc = zpci_dma_exit_device(zdev); - if (cc) { - rc = -EIO; - goto out_free; - } - } + if (WARN_ON(domain->geometry.aperture_start > zdev->end_dma || + domain->geometry.aperture_end < zdev->start_dma)) + return -EINVAL; if (zdev->s390_domain) - zpci_unregister_ioat(zdev, 0); + __s390_iommu_detach_device(zdev); + else if (zdev->dma_table) + zpci_dma_exit_device(zdev); - zdev->dma_table = s390_domain->dma_table; cc = zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma, - virt_to_phys(zdev->dma_table)); - if (cc) { - rc = -EIO; - goto out_restore; - } + virt_to_phys(s390_domain->dma_table), &status); + /* + * If the device is undergoing error recovery the reset code + * will re-establish the new domain. + */ + if (cc && status != ZPCI_PCI_ST_FUNC_NOT_AVAIL) + return -EIO; + zdev->dma_table = s390_domain->dma_table; - spin_lock_irqsave(&s390_domain->list_lock, flags); - /* First device defines the DMA range limits */ - if (list_empty(&s390_domain->devices)) { - domain->geometry.aperture_start = zdev->start_dma; - domain->geometry.aperture_end = zdev->end_dma; - domain->geometry.force_aperture = true; - /* Allow only devices with identical DMA range limits */ - } else if (domain->geometry.aperture_start != zdev->start_dma || - domain->geometry.aperture_end != zdev->end_dma) { - rc = -EINVAL; - spin_unlock_irqrestore(&s390_domain->list_lock, flags); - goto out_restore; - } - domain_device->zdev = zdev; + zdev->dma_table = s390_domain->dma_table; zdev->s390_domain = s390_domain; - list_add(&domain_device->list, &s390_domain->devices); + + spin_lock_irqsave(&s390_domain->list_lock, flags); + list_add_rcu(&zdev->iommu_list, &s390_domain->devices); spin_unlock_irqrestore(&s390_domain->list_lock, flags); return 0; - -out_restore: - if (!zdev->s390_domain) { - zpci_dma_init_device(zdev); - } else { - zdev->dma_table = zdev->s390_domain->dma_table; - zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma, - virt_to_phys(zdev->dma_table)); - } -out_free: - kfree(domain_device); - - return rc; } static void s390_iommu_detach_device(struct iommu_domain *domain, struct device *dev) { - struct s390_domain *s390_domain = to_s390_domain(domain); struct zpci_dev *zdev = to_zpci_dev(dev); - struct s390_domain_device *domain_device, *tmp; - unsigned long flags; - int found = 0; - if (!zdev) - return; + WARN_ON(zdev->s390_domain != to_s390_domain(domain)); - spin_lock_irqsave(&s390_domain->list_lock, flags); - list_for_each_entry_safe(domain_device, tmp, &s390_domain->devices, - list) { - if (domain_device->zdev == zdev) { - list_del(&domain_device->list); - kfree(domain_device); - found = 1; - break; - } + __s390_iommu_detach_device(zdev); + zpci_dma_init_device(zdev); +} + +static void s390_iommu_get_resv_regions(struct device *dev, + struct list_head *list) +{ + struct zpci_dev *zdev = to_zpci_dev(dev); + struct iommu_resv_region *region; + + if (zdev->start_dma) { + region = iommu_alloc_resv_region(0, zdev->start_dma, 0, + IOMMU_RESV_RESERVED, GFP_KERNEL); + if (!region) + return; + list_add_tail(®ion->list, list); } - spin_unlock_irqrestore(&s390_domain->list_lock, flags); - if (found && (zdev->s390_domain == s390_domain)) { - zdev->s390_domain = NULL; - zpci_unregister_ioat(zdev, 0); - zpci_dma_init_device(zdev); + if (zdev->end_dma < ZPCI_TABLE_SIZE_RT - 1) { + region = iommu_alloc_resv_region(zdev->end_dma + 1, + ZPCI_TABLE_SIZE_RT - zdev->end_dma - 1, + 0, IOMMU_RESV_RESERVED, GFP_KERNEL); + if (!region) + return; + list_add_tail(®ion->list, list); } } @@ -192,55 +188,88 @@ static struct iommu_device *s390_iommu_probe_device(struct device *dev) zdev = to_zpci_dev(dev); + if (zdev->start_dma > zdev->end_dma || + zdev->start_dma > ZPCI_TABLE_SIZE_RT - 1) + return ERR_PTR(-EINVAL); + + if (zdev->end_dma > ZPCI_TABLE_SIZE_RT - 1) + zdev->end_dma = ZPCI_TABLE_SIZE_RT - 1; + return &zdev->iommu_dev; } static void s390_iommu_release_device(struct device *dev) { struct zpci_dev *zdev = to_zpci_dev(dev); - struct iommu_domain *domain; /* - * This is a workaround for a scenario where the IOMMU API common code - * "forgets" to call the detach_dev callback: After binding a device - * to vfio-pci and completing the VFIO_SET_IOMMU ioctl (which triggers - * the attach_dev), removing the device via - * "echo 1 > /sys/bus/pci/devices/.../remove" won't trigger detach_dev, - * only release_device will be called via the BUS_NOTIFY_REMOVED_DEVICE - * notifier. - * - * So let's call detach_dev from here if it hasn't been called before. + * release_device is expected to detach any domain currently attached + * to the device, but keep it attached to other devices in the group. */ - if (zdev && zdev->s390_domain) { - domain = iommu_get_domain_for_dev(dev); - if (domain) - s390_iommu_detach_device(domain, dev); + if (zdev) + __s390_iommu_detach_device(zdev); +} + +static void s390_iommu_flush_iotlb_all(struct iommu_domain *domain) +{ + struct s390_domain *s390_domain = to_s390_domain(domain); + struct zpci_dev *zdev; + + rcu_read_lock(); + list_for_each_entry_rcu(zdev, &s390_domain->devices, iommu_list) { + zpci_refresh_trans((u64)zdev->fh << 32, zdev->start_dma, + zdev->end_dma - zdev->start_dma + 1); } + rcu_read_unlock(); } -static int s390_iommu_update_trans(struct s390_domain *s390_domain, - phys_addr_t pa, dma_addr_t dma_addr, - size_t size, int flags) +static void s390_iommu_iotlb_sync(struct iommu_domain *domain, + struct iommu_iotlb_gather *gather) { - struct s390_domain_device *domain_device; - phys_addr_t page_addr = pa & PAGE_MASK; - dma_addr_t start_dma_addr = dma_addr; - unsigned long irq_flags, nr_pages, i; - unsigned long *entry; - int rc = 0; + struct s390_domain *s390_domain = to_s390_domain(domain); + size_t size = gather->end - gather->start + 1; + struct zpci_dev *zdev; - if (dma_addr < s390_domain->domain.geometry.aperture_start || - dma_addr + size > s390_domain->domain.geometry.aperture_end) - return -EINVAL; + /* If gather was never added to there is nothing to flush */ + if (!gather->end) + return; - nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; - if (!nr_pages) - return 0; + rcu_read_lock(); + list_for_each_entry_rcu(zdev, &s390_domain->devices, iommu_list) { + zpci_refresh_trans((u64)zdev->fh << 32, gather->start, + size); + } + rcu_read_unlock(); +} + +static void s390_iommu_iotlb_sync_map(struct iommu_domain *domain, + unsigned long iova, size_t size) +{ + struct s390_domain *s390_domain = to_s390_domain(domain); + struct zpci_dev *zdev; + + rcu_read_lock(); + list_for_each_entry_rcu(zdev, &s390_domain->devices, iommu_list) { + if (!zdev->tlb_refresh) + continue; + zpci_refresh_trans((u64)zdev->fh << 32, + iova, size); + } + rcu_read_unlock(); +} + +static int s390_iommu_validate_trans(struct s390_domain *s390_domain, + phys_addr_t pa, dma_addr_t dma_addr, + unsigned long nr_pages, int flags) +{ + phys_addr_t page_addr = pa & PAGE_MASK; + unsigned long *entry; + unsigned long i; + int rc; - spin_lock_irqsave(&s390_domain->dma_table_lock, irq_flags); for (i = 0; i < nr_pages; i++) { entry = dma_walk_cpu_trans(s390_domain->dma_table, dma_addr); - if (!entry) { + if (unlikely(!entry)) { rc = -ENOMEM; goto undo_cpu_trans; } @@ -249,47 +278,70 @@ static int s390_iommu_update_trans(struct s390_domain *s390_domain, dma_addr += PAGE_SIZE; } - spin_lock(&s390_domain->list_lock); - list_for_each_entry(domain_device, &s390_domain->devices, list) { - rc = zpci_refresh_trans((u64) domain_device->zdev->fh << 32, - start_dma_addr, nr_pages * PAGE_SIZE); - if (rc) + return 0; + +undo_cpu_trans: + while (i-- > 0) { + dma_addr -= PAGE_SIZE; + entry = dma_walk_cpu_trans(s390_domain->dma_table, + dma_addr); + if (!entry) break; + dma_update_cpu_trans(entry, 0, ZPCI_PTE_INVALID); } - spin_unlock(&s390_domain->list_lock); -undo_cpu_trans: - if (rc && ((flags & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID)) { - flags = ZPCI_PTE_INVALID; - while (i-- > 0) { - page_addr -= PAGE_SIZE; - dma_addr -= PAGE_SIZE; - entry = dma_walk_cpu_trans(s390_domain->dma_table, - dma_addr); - if (!entry) - break; - dma_update_cpu_trans(entry, page_addr, flags); + return rc; +} + +static int s390_iommu_invalidate_trans(struct s390_domain *s390_domain, + dma_addr_t dma_addr, unsigned long nr_pages) +{ + unsigned long *entry; + unsigned long i; + int rc = 0; + + for (i = 0; i < nr_pages; i++) { + entry = dma_walk_cpu_trans(s390_domain->dma_table, dma_addr); + if (unlikely(!entry)) { + rc = -EINVAL; + break; } + dma_update_cpu_trans(entry, 0, ZPCI_PTE_INVALID); + dma_addr += PAGE_SIZE; } - spin_unlock_irqrestore(&s390_domain->dma_table_lock, irq_flags); return rc; } -static int s390_iommu_map(struct iommu_domain *domain, unsigned long iova, - phys_addr_t paddr, size_t size, int prot, gfp_t gfp) +static int s390_iommu_map_pages(struct iommu_domain *domain, + unsigned long iova, phys_addr_t paddr, + size_t pgsize, size_t pgcount, + int prot, gfp_t gfp, size_t *mapped) { struct s390_domain *s390_domain = to_s390_domain(domain); + size_t size = pgcount << __ffs(pgsize); int flags = ZPCI_PTE_VALID, rc = 0; + if (pgsize != SZ_4K) + return -EINVAL; + + if (iova < s390_domain->domain.geometry.aperture_start || + (iova + size - 1) > s390_domain->domain.geometry.aperture_end) + return -EINVAL; + + if (!IS_ALIGNED(iova | paddr, pgsize)) + return -EINVAL; + if (!(prot & IOMMU_READ)) return -EINVAL; if (!(prot & IOMMU_WRITE)) flags |= ZPCI_TABLE_PROTECTED; - rc = s390_iommu_update_trans(s390_domain, paddr, iova, - size, flags); + rc = s390_iommu_validate_trans(s390_domain, paddr, iova, + pgcount, flags); + if (!rc) + *mapped = size; return rc; } @@ -298,7 +350,8 @@ static phys_addr_t s390_iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova) { struct s390_domain *s390_domain = to_s390_domain(domain); - unsigned long *sto, *pto, *rto, flags; + unsigned long *rto, *sto, *pto; + unsigned long ste, pte, rte; unsigned int rtx, sx, px; phys_addr_t phys = 0; @@ -311,38 +364,40 @@ static phys_addr_t s390_iommu_iova_to_phys(struct iommu_domain *domain, px = calc_px(iova); rto = s390_domain->dma_table; - spin_lock_irqsave(&s390_domain->dma_table_lock, flags); - if (rto && reg_entry_isvalid(rto[rtx])) { - sto = get_rt_sto(rto[rtx]); - if (sto && reg_entry_isvalid(sto[sx])) { - pto = get_st_pto(sto[sx]); - if (pto && pt_entry_isvalid(pto[px])) - phys = pto[px] & ZPCI_PTE_ADDR_MASK; + rte = READ_ONCE(rto[rtx]); + if (reg_entry_isvalid(rte)) { + sto = get_rt_sto(rte); + ste = READ_ONCE(sto[sx]); + if (reg_entry_isvalid(ste)) { + pto = get_st_pto(ste); + pte = READ_ONCE(pto[px]); + if (pt_entry_isvalid(pte)) + phys = pte & ZPCI_PTE_ADDR_MASK; } } - spin_unlock_irqrestore(&s390_domain->dma_table_lock, flags); return phys; } -static size_t s390_iommu_unmap(struct iommu_domain *domain, - unsigned long iova, size_t size, - struct iommu_iotlb_gather *gather) +static size_t s390_iommu_unmap_pages(struct iommu_domain *domain, + unsigned long iova, + size_t pgsize, size_t pgcount, + struct iommu_iotlb_gather *gather) { struct s390_domain *s390_domain = to_s390_domain(domain); - int flags = ZPCI_PTE_INVALID; - phys_addr_t paddr; + size_t size = pgcount << __ffs(pgsize); int rc; - paddr = s390_iommu_iova_to_phys(domain, iova); - if (!paddr) + if (WARN_ON(iova < s390_domain->domain.geometry.aperture_start || + (iova + size - 1) > s390_domain->domain.geometry.aperture_end)) return 0; - rc = s390_iommu_update_trans(s390_domain, paddr, iova, - size, flags); + rc = s390_iommu_invalidate_trans(s390_domain, iova, pgcount); if (rc) return 0; + iommu_iotlb_gather_add_range(gather, iova, size); + return size; } @@ -380,12 +435,16 @@ static const struct iommu_ops s390_iommu_ops = { .probe_device = s390_iommu_probe_device, .release_device = s390_iommu_release_device, .device_group = generic_device_group, - .pgsize_bitmap = S390_IOMMU_PGSIZES, + .pgsize_bitmap = SZ_4K, + .get_resv_regions = s390_iommu_get_resv_regions, .default_domain_ops = &(const struct iommu_domain_ops) { .attach_dev = s390_iommu_attach_device, .detach_dev = s390_iommu_detach_device, - .map = s390_iommu_map, - .unmap = s390_iommu_unmap, + .map_pages = s390_iommu_map_pages, + .unmap_pages = s390_iommu_unmap_pages, + .flush_iotlb_all = s390_iommu_flush_iotlb_all, + .iotlb_sync = s390_iommu_iotlb_sync, + .iotlb_sync_map = s390_iommu_iotlb_sync_map, .iova_to_phys = s390_iommu_iova_to_phys, .free = s390_domain_free, } diff --git a/drivers/iommu/sprd-iommu.c b/drivers/iommu/sprd-iommu.c index fadd2c907222b9230b49b2ec54f6dbcfdf5914e5..219bfa11f7f48fbb6444c60a00761f28204f25b8 100644 --- a/drivers/iommu/sprd-iommu.c +++ b/drivers/iommu/sprd-iommu.c @@ -237,10 +237,8 @@ static int sprd_iommu_attach_device(struct iommu_domain *domain, struct sprd_iommu_domain *dom = to_sprd_domain(domain); size_t pgt_size = sprd_iommu_pgt_size(domain); - if (dom->sdev) { - pr_err("There's already a device attached to this domain.\n"); + if (dom->sdev) return -EINVAL; - } dom->pgt_va = dma_alloc_coherent(sdev->dev, pgt_size, &dom->pgt_pa, GFP_KERNEL); if (!dom->pgt_va) @@ -273,10 +271,11 @@ static void sprd_iommu_detach_device(struct iommu_domain *domain, } static int sprd_iommu_map(struct iommu_domain *domain, unsigned long iova, - phys_addr_t paddr, size_t size, int prot, gfp_t gfp) + phys_addr_t paddr, size_t pgsize, size_t pgcount, + int prot, gfp_t gfp, size_t *mapped) { struct sprd_iommu_domain *dom = to_sprd_domain(domain); - unsigned int page_num = size >> SPRD_IOMMU_PAGE_SHIFT; + size_t size = pgcount * SPRD_IOMMU_PAGE_SIZE; unsigned long flags; unsigned int i; u32 *pgt_base_iova; @@ -298,35 +297,37 @@ static int sprd_iommu_map(struct iommu_domain *domain, unsigned long iova, pgt_base_iova = dom->pgt_va + ((iova - start) >> SPRD_IOMMU_PAGE_SHIFT); spin_lock_irqsave(&dom->pgtlock, flags); - for (i = 0; i < page_num; i++) { + for (i = 0; i < pgcount; i++) { pgt_base_iova[i] = pabase >> SPRD_IOMMU_PAGE_SHIFT; pabase += SPRD_IOMMU_PAGE_SIZE; } spin_unlock_irqrestore(&dom->pgtlock, flags); + *mapped = size; return 0; } static size_t sprd_iommu_unmap(struct iommu_domain *domain, unsigned long iova, - size_t size, struct iommu_iotlb_gather *iotlb_gather) + size_t pgsize, size_t pgcount, + struct iommu_iotlb_gather *iotlb_gather) { struct sprd_iommu_domain *dom = to_sprd_domain(domain); unsigned long flags; u32 *pgt_base_iova; - unsigned int page_num = size >> SPRD_IOMMU_PAGE_SHIFT; + size_t size = pgcount * SPRD_IOMMU_PAGE_SIZE; unsigned long start = domain->geometry.aperture_start; unsigned long end = domain->geometry.aperture_end; if (iova < start || (iova + size) > (end + 1)) - return -EINVAL; + return 0; pgt_base_iova = dom->pgt_va + ((iova - start) >> SPRD_IOMMU_PAGE_SHIFT); spin_lock_irqsave(&dom->pgtlock, flags); - memset(pgt_base_iova, 0, page_num * sizeof(u32)); + memset(pgt_base_iova, 0, pgcount * sizeof(u32)); spin_unlock_irqrestore(&dom->pgtlock, flags); - return 0; + return size; } static void sprd_iommu_sync_map(struct iommu_domain *domain, @@ -409,13 +410,13 @@ static const struct iommu_ops sprd_iommu_ops = { .probe_device = sprd_iommu_probe_device, .device_group = sprd_iommu_device_group, .of_xlate = sprd_iommu_of_xlate, - .pgsize_bitmap = ~0UL << SPRD_IOMMU_PAGE_SHIFT, + .pgsize_bitmap = SPRD_IOMMU_PAGE_SIZE, .owner = THIS_MODULE, .default_domain_ops = &(const struct iommu_domain_ops) { .attach_dev = sprd_iommu_attach_device, .detach_dev = sprd_iommu_detach_device, - .map = sprd_iommu_map, - .unmap = sprd_iommu_unmap, + .map_pages = sprd_iommu_map, + .unmap_pages = sprd_iommu_unmap, .iotlb_sync_map = sprd_iommu_sync_map, .iotlb_sync = sprd_iommu_sync, .iova_to_phys = sprd_iommu_iova_to_phys, diff --git a/drivers/iommu/sun50i-iommu.c b/drivers/iommu/sun50i-iommu.c index cd9b74ee24def1b0dd5f7db12f95af359a364053..5b585eace3d46f8381e4feda764e0804064ece5f 100644 --- a/drivers/iommu/sun50i-iommu.c +++ b/drivers/iommu/sun50i-iommu.c @@ -27,6 +27,7 @@ #include #define IOMMU_RESET_REG 0x010 +#define IOMMU_RESET_RELEASE_ALL 0xffffffff #define IOMMU_ENABLE_REG 0x020 #define IOMMU_ENABLE_ENABLE BIT(0) @@ -92,6 +93,8 @@ #define NUM_PT_ENTRIES 256 #define PT_SIZE (NUM_PT_ENTRIES * PT_ENTRY_SIZE) +#define SPAGE_SIZE 4096 + struct sun50i_iommu { struct iommu_device iommu; @@ -270,7 +273,7 @@ static u32 sun50i_mk_pte(phys_addr_t page, int prot) enum sun50i_iommu_aci aci; u32 flags = 0; - if (prot & (IOMMU_READ | IOMMU_WRITE)) + if ((prot & (IOMMU_READ | IOMMU_WRITE)) == (IOMMU_READ | IOMMU_WRITE)) aci = SUN50I_IOMMU_ACI_RD_WR; else if (prot & IOMMU_READ) aci = SUN50I_IOMMU_ACI_RD; @@ -294,6 +297,62 @@ static void sun50i_table_flush(struct sun50i_iommu_domain *sun50i_domain, dma_sync_single_for_device(iommu->dev, dma, size, DMA_TO_DEVICE); } +static void sun50i_iommu_zap_iova(struct sun50i_iommu *iommu, + unsigned long iova) +{ + u32 reg; + int ret; + + iommu_write(iommu, IOMMU_TLB_IVLD_ADDR_REG, iova); + iommu_write(iommu, IOMMU_TLB_IVLD_ADDR_MASK_REG, GENMASK(31, 12)); + iommu_write(iommu, IOMMU_TLB_IVLD_ENABLE_REG, + IOMMU_TLB_IVLD_ENABLE_ENABLE); + + ret = readl_poll_timeout_atomic(iommu->base + IOMMU_TLB_IVLD_ENABLE_REG, + reg, !reg, 1, 2000); + if (ret) + dev_warn(iommu->dev, "TLB invalidation timed out!\n"); +} + +static void sun50i_iommu_zap_ptw_cache(struct sun50i_iommu *iommu, + unsigned long iova) +{ + u32 reg; + int ret; + + iommu_write(iommu, IOMMU_PC_IVLD_ADDR_REG, iova); + iommu_write(iommu, IOMMU_PC_IVLD_ENABLE_REG, + IOMMU_PC_IVLD_ENABLE_ENABLE); + + ret = readl_poll_timeout_atomic(iommu->base + IOMMU_PC_IVLD_ENABLE_REG, + reg, !reg, 1, 2000); + if (ret) + dev_warn(iommu->dev, "PTW cache invalidation timed out!\n"); +} + +static void sun50i_iommu_zap_range(struct sun50i_iommu *iommu, + unsigned long iova, size_t size) +{ + assert_spin_locked(&iommu->iommu_lock); + + iommu_write(iommu, IOMMU_AUTO_GATING_REG, 0); + + sun50i_iommu_zap_iova(iommu, iova); + sun50i_iommu_zap_iova(iommu, iova + SPAGE_SIZE); + if (size > SPAGE_SIZE) { + sun50i_iommu_zap_iova(iommu, iova + size); + sun50i_iommu_zap_iova(iommu, iova + size + SPAGE_SIZE); + } + sun50i_iommu_zap_ptw_cache(iommu, iova); + sun50i_iommu_zap_ptw_cache(iommu, iova + SZ_1M); + if (size > SZ_1M) { + sun50i_iommu_zap_ptw_cache(iommu, iova + size); + sun50i_iommu_zap_ptw_cache(iommu, iova + size + SZ_1M); + } + + iommu_write(iommu, IOMMU_AUTO_GATING_REG, IOMMU_AUTO_GATING_ENABLE); +} + static int sun50i_iommu_flush_all_tlb(struct sun50i_iommu *iommu) { u32 reg; @@ -343,6 +402,18 @@ static void sun50i_iommu_flush_iotlb_all(struct iommu_domain *domain) spin_unlock_irqrestore(&iommu->iommu_lock, flags); } +static void sun50i_iommu_iotlb_sync_map(struct iommu_domain *domain, + unsigned long iova, size_t size) +{ + struct sun50i_iommu_domain *sun50i_domain = to_sun50i_domain(domain); + struct sun50i_iommu *iommu = sun50i_domain->iommu; + unsigned long flags; + + spin_lock_irqsave(&iommu->iommu_lock, flags); + sun50i_iommu_zap_range(iommu, iova, size); + spin_unlock_irqrestore(&iommu->iommu_lock, flags); +} + static void sun50i_iommu_iotlb_sync(struct iommu_domain *domain, struct iommu_iotlb_gather *gather) { @@ -511,7 +582,7 @@ static u32 *sun50i_dte_get_page_table(struct sun50i_iommu_domain *sun50i_domain, sun50i_iommu_free_page_table(iommu, drop_pt); } - sun50i_table_flush(sun50i_domain, page_table, PT_SIZE); + sun50i_table_flush(sun50i_domain, page_table, NUM_PT_ENTRIES); sun50i_table_flush(sun50i_domain, dte_addr, 1); return page_table; @@ -601,7 +672,6 @@ static struct iommu_domain *sun50i_iommu_domain_alloc(unsigned type) struct sun50i_iommu_domain *sun50i_domain; if (type != IOMMU_DOMAIN_DMA && - type != IOMMU_DOMAIN_IDENTITY && type != IOMMU_DOMAIN_UNMANAGED) return NULL; @@ -766,6 +836,7 @@ static const struct iommu_ops sun50i_iommu_ops = { .attach_dev = sun50i_iommu_attach_device, .detach_dev = sun50i_iommu_detach_device, .flush_iotlb_all = sun50i_iommu_flush_iotlb_all, + .iotlb_sync_map = sun50i_iommu_iotlb_sync_map, .iotlb_sync = sun50i_iommu_iotlb_sync, .iova_to_phys = sun50i_iommu_iova_to_phys, .map = sun50i_iommu_map, @@ -785,6 +856,8 @@ static void sun50i_iommu_report_fault(struct sun50i_iommu *iommu, report_iommu_fault(iommu->domain, iommu->dev, iova, prot); else dev_err(iommu->dev, "Page fault while iommu not attached to any domain?\n"); + + sun50i_iommu_zap_range(iommu, iova, SPAGE_SIZE); } static phys_addr_t sun50i_iommu_handle_pt_irq(struct sun50i_iommu *iommu, @@ -868,8 +941,8 @@ static phys_addr_t sun50i_iommu_handle_perm_irq(struct sun50i_iommu *iommu) static irqreturn_t sun50i_iommu_irq(int irq, void *dev_id) { + u32 status, l1_status, l2_status, resets; struct sun50i_iommu *iommu = dev_id; - u32 status; spin_lock(&iommu->iommu_lock); @@ -879,6 +952,9 @@ static irqreturn_t sun50i_iommu_irq(int irq, void *dev_id) return IRQ_NONE; } + l1_status = iommu_read(iommu, IOMMU_L1PG_INT_REG); + l2_status = iommu_read(iommu, IOMMU_L2PG_INT_REG); + if (status & IOMMU_INT_INVALID_L2PG) sun50i_iommu_handle_pt_irq(iommu, IOMMU_INT_ERR_ADDR_L2_REG, @@ -892,8 +968,9 @@ static irqreturn_t sun50i_iommu_irq(int irq, void *dev_id) iommu_write(iommu, IOMMU_INT_CLR_REG, status); - iommu_write(iommu, IOMMU_RESET_REG, ~status); - iommu_write(iommu, IOMMU_RESET_REG, status); + resets = (status | l1_status | l2_status) & IOMMU_INT_MASTER_MASK; + iommu_write(iommu, IOMMU_RESET_REG, ~resets); + iommu_write(iommu, IOMMU_RESET_REG, IOMMU_RESET_RELEASE_ALL); spin_unlock(&iommu->iommu_lock); diff --git a/drivers/iommu/tegra-gart.c b/drivers/iommu/tegra-gart.c index e5ca3cf1a94964e1312b04703c73520a5d0b732d..ed53279d1106c437a3eb0dde68ff9beef7e3f3fd 100644 --- a/drivers/iommu/tegra-gart.c +++ b/drivers/iommu/tegra-gart.c @@ -112,7 +112,7 @@ static int gart_iommu_attach_dev(struct iommu_domain *domain, spin_lock(&gart->dom_lock); if (gart->active_domain && gart->active_domain != domain) { - ret = -EBUSY; + ret = -EINVAL; } else if (dev_iommu_priv_get(dev) != domain) { dev_iommu_priv_set(dev, domain); gart->active_domain = domain; diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c index 8b1b5c270e502c5c15b48e88ffc6fb346f0818e4..5b8fe9bfa9a5b9ce2a78d5f0bfa4bd511b27d07f 100644 --- a/drivers/iommu/virtio-iommu.c +++ b/drivers/iommu/virtio-iommu.c @@ -670,7 +670,7 @@ static int viommu_domain_finalise(struct viommu_endpoint *vdev, dev_err(vdev->dev, "granule 0x%lx larger than system page size 0x%lx\n", viommu_page_size, PAGE_SIZE); - return -EINVAL; + return -ENODEV; } ret = ida_alloc_range(&viommu->domain_ids, viommu->first_domain, @@ -697,7 +697,7 @@ static int viommu_domain_finalise(struct viommu_endpoint *vdev, if (ret) { ida_free(&viommu->domain_ids, vdomain->id); vdomain->viommu = NULL; - return -EOPNOTSUPP; + return ret; } } @@ -734,8 +734,7 @@ static int viommu_attach_dev(struct iommu_domain *domain, struct device *dev) */ ret = viommu_domain_finalise(vdev, domain); } else if (vdomain->viommu != vdev->viommu) { - dev_err(dev, "cannot attach to foreign vIOMMU\n"); - ret = -EXDEV; + ret = -EINVAL; } mutex_unlock(&vdomain->mutex); diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig index d07568a2c539fb9410336a7d2e606831d6becbff..caa952c40ff9284ab29b41c8662a4d3c88b17f38 100644 --- a/drivers/irqchip/Kconfig +++ b/drivers/irqchip/Kconfig @@ -538,31 +538,14 @@ config TI_PRUSS_INTC different processors within the SoC. config RISCV_INTC - bool "RISC-V Local Interrupt Controller" + bool depends on RISCV - default y - help - This enables support for the per-HART local interrupt controller - found in standard RISC-V systems. The per-HART local interrupt - controller handles timer interrupts, software interrupts, and - hardware interrupts. Without a per-HART local interrupt controller, - a RISC-V system will be unable to handle any interrupts. - - If you don't know what to do here, say Y. config SIFIVE_PLIC - bool "SiFive Platform-Level Interrupt Controller" + bool depends on RISCV select IRQ_DOMAIN_HIERARCHY select GENERIC_IRQ_EFFECTIVE_AFF_MASK if SMP - help - This enables support for the PLIC chip found in SiFive (and - potentially other) RISC-V systems. The PLIC controls devices - interrupts and connects them to each core's local interrupt - controller. Aside from timer and software interrupts, all other - interrupt sources are subordinate to the PLIC. - - If you don't know what to do here, say Y. config EXYNOS_IRQ_COMBINER bool "Samsung Exynos IRQ combiner support" if COMPILE_TEST diff --git a/drivers/isdn/hardware/mISDN/hfcmulti.c b/drivers/isdn/hardware/mISDN/hfcmulti.c index 4f7eaa17fb274883605a878226a046c8d7f27ee9..e840609c50eb74b917f0c1f5e93cc8ee24c9d84c 100644 --- a/drivers/isdn/hardware/mISDN/hfcmulti.c +++ b/drivers/isdn/hardware/mISDN/hfcmulti.c @@ -3217,6 +3217,7 @@ static int hfcm_l1callback(struct dchannel *dch, u_int cmd) { struct hfc_multi *hc = dch->hw; + struct sk_buff_head free_queue; u_long flags; switch (cmd) { @@ -3245,6 +3246,7 @@ hfcm_l1callback(struct dchannel *dch, u_int cmd) l1_event(dch->l1, HW_POWERUP_IND); break; case HW_DEACT_REQ: + __skb_queue_head_init(&free_queue); /* start deactivation */ spin_lock_irqsave(&hc->lock, flags); if (hc->ctype == HFC_TYPE_E1) { @@ -3264,20 +3266,21 @@ hfcm_l1callback(struct dchannel *dch, u_int cmd) plxsd_checksync(hc, 0); } } - skb_queue_purge(&dch->squeue); + skb_queue_splice_init(&dch->squeue, &free_queue); if (dch->tx_skb) { - dev_kfree_skb(dch->tx_skb); + __skb_queue_tail(&free_queue, dch->tx_skb); dch->tx_skb = NULL; } dch->tx_idx = 0; if (dch->rx_skb) { - dev_kfree_skb(dch->rx_skb); + __skb_queue_tail(&free_queue, dch->rx_skb); dch->rx_skb = NULL; } test_and_clear_bit(FLG_TX_BUSY, &dch->Flags); if (test_and_clear_bit(FLG_BUSY_TIMER, &dch->Flags)) del_timer(&dch->timer); spin_unlock_irqrestore(&hc->lock, flags); + __skb_queue_purge(&free_queue); break; case HW_POWERUP_REQ: spin_lock_irqsave(&hc->lock, flags); @@ -3384,6 +3387,9 @@ handle_dmsg(struct mISDNchannel *ch, struct sk_buff *skb) case PH_DEACTIVATE_REQ: test_and_clear_bit(FLG_L2_ACTIVATED, &dch->Flags); if (dch->dev.D.protocol != ISDN_P_TE_S0) { + struct sk_buff_head free_queue; + + __skb_queue_head_init(&free_queue); spin_lock_irqsave(&hc->lock, flags); if (debug & DEBUG_HFCMULTI_MSG) printk(KERN_DEBUG @@ -3405,14 +3411,14 @@ handle_dmsg(struct mISDNchannel *ch, struct sk_buff *skb) /* deactivate */ dch->state = 1; } - skb_queue_purge(&dch->squeue); + skb_queue_splice_init(&dch->squeue, &free_queue); if (dch->tx_skb) { - dev_kfree_skb(dch->tx_skb); + __skb_queue_tail(&free_queue, dch->tx_skb); dch->tx_skb = NULL; } dch->tx_idx = 0; if (dch->rx_skb) { - dev_kfree_skb(dch->rx_skb); + __skb_queue_tail(&free_queue, dch->rx_skb); dch->rx_skb = NULL; } test_and_clear_bit(FLG_TX_BUSY, &dch->Flags); @@ -3424,6 +3430,7 @@ handle_dmsg(struct mISDNchannel *ch, struct sk_buff *skb) #endif ret = 0; spin_unlock_irqrestore(&hc->lock, flags); + __skb_queue_purge(&free_queue); } else ret = l1_event(dch->l1, hh->prim); break; diff --git a/drivers/isdn/hardware/mISDN/hfcpci.c b/drivers/isdn/hardware/mISDN/hfcpci.c index e964a8dd8512aea84f15e75361ae96b3d32c7d70..c0331b26801087ad461b6c0f09dfc59e324badea 100644 --- a/drivers/isdn/hardware/mISDN/hfcpci.c +++ b/drivers/isdn/hardware/mISDN/hfcpci.c @@ -1617,16 +1617,19 @@ hfcpci_l2l1D(struct mISDNchannel *ch, struct sk_buff *skb) test_and_clear_bit(FLG_L2_ACTIVATED, &dch->Flags); spin_lock_irqsave(&hc->lock, flags); if (hc->hw.protocol == ISDN_P_NT_S0) { + struct sk_buff_head free_queue; + + __skb_queue_head_init(&free_queue); /* prepare deactivation */ Write_hfc(hc, HFCPCI_STATES, 0x40); - skb_queue_purge(&dch->squeue); + skb_queue_splice_init(&dch->squeue, &free_queue); if (dch->tx_skb) { - dev_kfree_skb(dch->tx_skb); + __skb_queue_tail(&free_queue, dch->tx_skb); dch->tx_skb = NULL; } dch->tx_idx = 0; if (dch->rx_skb) { - dev_kfree_skb(dch->rx_skb); + __skb_queue_tail(&free_queue, dch->rx_skb); dch->rx_skb = NULL; } test_and_clear_bit(FLG_TX_BUSY, &dch->Flags); @@ -1639,10 +1642,12 @@ hfcpci_l2l1D(struct mISDNchannel *ch, struct sk_buff *skb) hc->hw.mst_m &= ~HFCPCI_MASTER; Write_hfc(hc, HFCPCI_MST_MODE, hc->hw.mst_m); ret = 0; + spin_unlock_irqrestore(&hc->lock, flags); + __skb_queue_purge(&free_queue); } else { ret = l1_event(dch->l1, hh->prim); + spin_unlock_irqrestore(&hc->lock, flags); } - spin_unlock_irqrestore(&hc->lock, flags); break; } if (!ret) diff --git a/drivers/isdn/hardware/mISDN/hfcsusb.c b/drivers/isdn/hardware/mISDN/hfcsusb.c index 651f2f8f685b763b445ac23c555db7c3c679ee62..1efd17979f240e9000cd219dbc9d72a99ff64c7a 100644 --- a/drivers/isdn/hardware/mISDN/hfcsusb.c +++ b/drivers/isdn/hardware/mISDN/hfcsusb.c @@ -326,20 +326,24 @@ hfcusb_l2l1D(struct mISDNchannel *ch, struct sk_buff *skb) test_and_clear_bit(FLG_L2_ACTIVATED, &dch->Flags); if (hw->protocol == ISDN_P_NT_S0) { + struct sk_buff_head free_queue; + + __skb_queue_head_init(&free_queue); hfcsusb_ph_command(hw, HFC_L1_DEACTIVATE_NT); spin_lock_irqsave(&hw->lock, flags); - skb_queue_purge(&dch->squeue); + skb_queue_splice_init(&dch->squeue, &free_queue); if (dch->tx_skb) { - dev_kfree_skb(dch->tx_skb); + __skb_queue_tail(&free_queue, dch->tx_skb); dch->tx_skb = NULL; } dch->tx_idx = 0; if (dch->rx_skb) { - dev_kfree_skb(dch->rx_skb); + __skb_queue_tail(&free_queue, dch->rx_skb); dch->rx_skb = NULL; } test_and_clear_bit(FLG_TX_BUSY, &dch->Flags); spin_unlock_irqrestore(&hw->lock, flags); + __skb_queue_purge(&free_queue); #ifdef FIXME if (test_and_clear_bit(FLG_L1_BUSY, &dch->Flags)) dchannel_sched_event(&hc->dch, D_CLEARBUSY); @@ -1330,7 +1334,7 @@ tx_iso_complete(struct urb *urb) printk("\n"); } - dev_kfree_skb(tx_skb); + dev_consume_skb_irq(tx_skb); tx_skb = NULL; if (fifo->dch && get_next_dframe(fifo->dch)) tx_skb = fifo->dch->tx_skb; diff --git a/drivers/isdn/mISDN/core.c b/drivers/isdn/mISDN/core.c index 90ee56d07a6e92f434619e15a90cd2c613093abf..9120be5903258ba8d328287dbf9328f384ad15d3 100644 --- a/drivers/isdn/mISDN/core.c +++ b/drivers/isdn/mISDN/core.c @@ -139,9 +139,9 @@ static struct attribute *mISDN_attrs[] = { }; ATTRIBUTE_GROUPS(mISDN); -static int mISDN_uevent(struct device *dev, struct kobj_uevent_env *env) +static int mISDN_uevent(const struct device *dev, struct kobj_uevent_env *env) { - struct mISDNdevice *mdev = dev_to_mISDN(dev); + const struct mISDNdevice *mdev = dev_to_mISDN(dev); if (!mdev) return 0; diff --git a/drivers/leds/blink/leds-lgm-sso.c b/drivers/leds/blink/leds-lgm-sso.c index 6f270c0272fb1b49a0705404dc795a97549dfda5..35c61311e7fd8b65122c993e393c44f133757a12 100644 --- a/drivers/leds/blink/leds-lgm-sso.c +++ b/drivers/leds/blink/leds-lgm-sso.c @@ -635,9 +635,8 @@ __sso_led_dt_parse(struct sso_led_priv *priv, struct fwnode_handle *fw_ssoled) led->priv = priv; desc = &led->desc; - led->gpiod = devm_fwnode_get_gpiod_from_child(dev, NULL, - fwnode_child, - GPIOD_ASIS, NULL); + led->gpiod = devm_fwnode_gpiod_get(dev, fwnode_child, NULL, + GPIOD_ASIS, NULL); if (IS_ERR(led->gpiod)) { ret = dev_err_probe(dev, PTR_ERR(led->gpiod), "led: get gpio fail!\n"); goto __dt_err; diff --git a/drivers/leds/leds-blinkm.c b/drivers/leds/leds-blinkm.c index 3fb6a2fdaefa67bd2818cd29bfdb8f1cb797b7ec..e19cc8a7b7cac2c997700e4d371ebf8e9436d241 100644 --- a/drivers/leds/leds-blinkm.c +++ b/drivers/leds/leds-blinkm.c @@ -139,11 +139,11 @@ static ssize_t show_color_common(struct device *dev, char *buf, int color) return ret; switch (color) { case RED: - return scnprintf(buf, PAGE_SIZE, "%02X\n", data->red); + return sysfs_emit(buf, "%02X\n", data->red); case GREEN: - return scnprintf(buf, PAGE_SIZE, "%02X\n", data->green); + return sysfs_emit(buf, "%02X\n", data->green); case BLUE: - return scnprintf(buf, PAGE_SIZE, "%02X\n", data->blue); + return sysfs_emit(buf, "%02X\n", data->blue); default: return -EINVAL; } @@ -253,7 +253,7 @@ static DEVICE_ATTR_RW(blue); static ssize_t test_show(struct device *dev, struct device_attribute *attr, char *buf) { - return scnprintf(buf, PAGE_SIZE, + return sysfs_emit(buf, "#Write into test to start test sequence!#\n"); } diff --git a/drivers/leds/leds-gpio.c b/drivers/leds/leds-gpio.c index 092eb59a7d325d29a1ed36b80002a1485d2710f7..ce4e79939731d89d69effdb610ac7cbdfd1d28b0 100644 --- a/drivers/leds/leds-gpio.c +++ b/drivers/leds/leds-gpio.c @@ -151,9 +151,8 @@ static struct gpio_leds_priv *gpio_leds_create(struct platform_device *pdev) * will be updated after LED class device is registered, * Only then the final LED name is known. */ - led.gpiod = devm_fwnode_get_gpiod_from_child(dev, NULL, child, - GPIOD_ASIS, - NULL); + led.gpiod = devm_fwnode_gpiod_get(dev, child, NULL, GPIOD_ASIS, + NULL); if (IS_ERR(led.gpiod)) { fwnode_handle_put(child); return ERR_CAST(led.gpiod); diff --git a/drivers/leds/leds-is31fl319x.c b/drivers/leds/leds-is31fl319x.c index 52b59b62f437cd07c60aac30d2986d55efc9da79..b2f4c4ec7c5671906e3cb4f77d4e7c72931c07c5 100644 --- a/drivers/leds/leds-is31fl319x.c +++ b/drivers/leds/leds-is31fl319x.c @@ -38,6 +38,7 @@ #define IS31FL3190_CURRENT_uA_MIN 5000 #define IS31FL3190_CURRENT_uA_DEFAULT 42000 #define IS31FL3190_CURRENT_uA_MAX 42000 +#define IS31FL3190_CURRENT_SHIFT 2 #define IS31FL3190_CURRENT_MASK GENMASK(4, 2) #define IS31FL3190_CURRENT_5_mA 0x02 #define IS31FL3190_CURRENT_10_mA 0x01 @@ -553,7 +554,7 @@ static int is31fl319x_probe(struct i2c_client *client) is31fl3196_db_to_gain(is31->audio_gain_db)); else regmap_update_bits(is31->regmap, IS31FL3190_CURRENT, IS31FL3190_CURRENT_MASK, - is31fl3190_microamp_to_cs(dev, aggregated_led_microamp)); + is31fl3190_microamp_to_cs(dev, aggregated_led_microamp) << IS31FL3190_CURRENT_SHIFT); for (i = 0; i < is31->cdef->num_leds; i++) { struct is31fl319x_led *led = &is31->leds[i]; diff --git a/drivers/leds/leds-lm3533.c b/drivers/leds/leds-lm3533.c index 43d5970d96aa229f08d7d563bb68580873b10a94..bcd414eb47246e514afd16b2298c57ae3872f1ab 100644 --- a/drivers/leds/leds-lm3533.c +++ b/drivers/leds/leds-lm3533.c @@ -314,7 +314,7 @@ static ssize_t show_id(struct device *dev, struct led_classdev *led_cdev = dev_get_drvdata(dev); struct lm3533_led *led = to_lm3533_led(led_cdev); - return scnprintf(buf, PAGE_SIZE, "%d\n", led->id); + return sysfs_emit(buf, "%d\n", led->id); } /* @@ -344,7 +344,7 @@ static ssize_t show_risefalltime(struct device *dev, if (ret) return ret; - return scnprintf(buf, PAGE_SIZE, "%x\n", val); + return sysfs_emit(buf, "%x\n", val); } static ssize_t show_risetime(struct device *dev, @@ -415,7 +415,7 @@ static ssize_t show_als_channel(struct device *dev, channel = (val & LM3533_REG_CTRLBANK_BCONF_ALS_CHANNEL_MASK) + 1; - return scnprintf(buf, PAGE_SIZE, "%u\n", channel); + return sysfs_emit(buf, "%u\n", channel); } static ssize_t store_als_channel(struct device *dev, @@ -465,7 +465,7 @@ static ssize_t show_als_en(struct device *dev, enable = val & LM3533_REG_CTRLBANK_BCONF_ALS_EN_MASK; - return scnprintf(buf, PAGE_SIZE, "%d\n", enable); + return sysfs_emit(buf, "%d\n", enable); } static ssize_t store_als_en(struct device *dev, @@ -518,7 +518,7 @@ static ssize_t show_linear(struct device *dev, else linear = 0; - return scnprintf(buf, PAGE_SIZE, "%x\n", linear); + return sysfs_emit(buf, "%x\n", linear); } static ssize_t store_linear(struct device *dev, @@ -564,7 +564,7 @@ static ssize_t show_pwm(struct device *dev, if (ret) return ret; - return scnprintf(buf, PAGE_SIZE, "%u\n", val); + return sysfs_emit(buf, "%u\n", val); } static ssize_t store_pwm(struct device *dev, diff --git a/drivers/leds/leds-lp5521.c b/drivers/leds/leds-lp5521.c index 7ff20c2605041bb8ec79c69288235f89abb115e6..19478d9c19a70d5932d3c3af7a150589ba112242 100644 --- a/drivers/leds/leds-lp5521.c +++ b/drivers/leds/leds-lp5521.c @@ -469,7 +469,7 @@ static ssize_t lp5521_selftest(struct device *dev, ret = lp5521_run_selftest(chip, buf); mutex_unlock(&chip->lock); - return scnprintf(buf, PAGE_SIZE, "%s\n", ret ? "FAIL" : "OK"); + return sysfs_emit(buf, "%s\n", ret ? "FAIL" : "OK"); } /* device attributes */ diff --git a/drivers/leds/leds-lp5523.c b/drivers/leds/leds-lp5523.c index 369d40b0b65b85d64faeb82c43a6772b1d374662..e08e3de1428df3ff37d7a2c31bcfa5504ba6455c 100644 --- a/drivers/leds/leds-lp5523.c +++ b/drivers/leds/leds-lp5523.c @@ -581,8 +581,8 @@ static ssize_t lp5523_selftest(struct device *dev, struct lp55xx_led *led = i2c_get_clientdata(to_i2c_client(dev)); struct lp55xx_chip *chip = led->chip; struct lp55xx_platform_data *pdata = chip->pdata; - int i, ret, pos = 0; - u8 status, adc, vdd; + int ret, pos = 0; + u8 status, adc, vdd, i; mutex_lock(&chip->lock); @@ -612,20 +612,21 @@ static ssize_t lp5523_selftest(struct device *dev, vdd--; /* There may be some fluctuation in measurement */ - for (i = 0; i < LP5523_MAX_LEDS; i++) { - /* Skip non-existing channels */ + for (i = 0; i < pdata->num_channels; i++) { + /* Skip disabled channels */ if (pdata->led_config[i].led_current == 0) continue; /* Set default current */ - lp55xx_write(chip, LP5523_REG_LED_CURRENT_BASE + i, + lp55xx_write(chip, LP5523_REG_LED_CURRENT_BASE + led->chan_nr, pdata->led_config[i].led_current); - lp55xx_write(chip, LP5523_REG_LED_PWM_BASE + i, 0xff); + lp55xx_write(chip, LP5523_REG_LED_PWM_BASE + led->chan_nr, + 0xff); /* let current stabilize 2 - 4ms before measurements start */ usleep_range(2000, 4000); lp55xx_write(chip, LP5523_REG_LED_TEST_CTRL, - LP5523_EN_LEDTEST | i); + LP5523_EN_LEDTEST | led->chan_nr); /* ADC conversion time is 2.7 ms typically */ usleep_range(3000, 6000); ret = lp55xx_read(chip, LP5523_REG_STATUS, &status); @@ -633,20 +634,22 @@ static ssize_t lp5523_selftest(struct device *dev, goto fail; if (!(status & LP5523_LEDTEST_DONE)) - usleep_range(3000, 6000);/* Was not ready. Wait. */ + usleep_range(3000, 6000); /* Was not ready. Wait. */ ret = lp55xx_read(chip, LP5523_REG_LED_TEST_ADC, &adc); if (ret < 0) goto fail; if (adc >= vdd || adc < LP5523_ADC_SHORTCIRC_LIM) - pos += sprintf(buf + pos, "LED %d FAIL\n", i); + pos += sprintf(buf + pos, "LED %d FAIL\n", + led->chan_nr); - lp55xx_write(chip, LP5523_REG_LED_PWM_BASE + i, 0x00); + lp55xx_write(chip, LP5523_REG_LED_PWM_BASE + led->chan_nr, + 0x00); /* Restore current */ - lp55xx_write(chip, LP5523_REG_LED_CURRENT_BASE + i, - led->led_current); + lp55xx_write(chip, LP5523_REG_LED_CURRENT_BASE + led->chan_nr, + led->led_current); led++; } if (pos == 0) diff --git a/drivers/leds/leds-lp55xx-common.c b/drivers/leds/leds-lp55xx-common.c index 9fdfc1b9a1a0cb3e4106a2d89abe3d02b2a8815e..c1940964067af8c09450a87bdc658b542cbdd0f0 100644 --- a/drivers/leds/leds-lp55xx-common.c +++ b/drivers/leds/leds-lp55xx-common.c @@ -88,7 +88,7 @@ static ssize_t led_current_show(struct device *dev, { struct lp55xx_led *led = dev_to_lp55xx_led(dev); - return scnprintf(buf, PAGE_SIZE, "%d\n", led->led_current); + return sysfs_emit(buf, "%d\n", led->led_current); } static ssize_t led_current_store(struct device *dev, @@ -121,7 +121,7 @@ static ssize_t max_current_show(struct device *dev, { struct lp55xx_led *led = dev_to_lp55xx_led(dev); - return scnprintf(buf, PAGE_SIZE, "%d\n", led->max_current); + return sysfs_emit(buf, "%d\n", led->max_current); } static DEVICE_ATTR_RW(led_current); @@ -166,7 +166,7 @@ static int lp55xx_init_led(struct lp55xx_led *led, struct mc_subled *mc_led_info; struct led_classdev *led_cdev; char name[32]; - int i, j = 0; + int i; int ret; if (chan >= max_channel) { @@ -201,7 +201,6 @@ static int lp55xx_init_led(struct lp55xx_led *led, pdata->led_config[chan].color_id[i]; mc_led_info[i].channel = pdata->led_config[chan].output_num[i]; - j++; } led->mc_cdev.subled_info = mc_led_info; diff --git a/drivers/leds/leds-max8997.c b/drivers/leds/leds-max8997.c index c0bddb33888d7f1e2c4f725373c86b737a1e3826..c8d7f55c9dec29a052e9b5f3e6a83a003a9a7deb 100644 --- a/drivers/leds/leds-max8997.c +++ b/drivers/leds/leds-max8997.c @@ -238,11 +238,6 @@ static int max8997_led_probe(struct platform_device *pdev) char name[20]; int ret = 0; - if (pdata == NULL) { - dev_err(&pdev->dev, "no platform data\n"); - return -ENODEV; - } - led = devm_kzalloc(&pdev->dev, sizeof(*led), GFP_KERNEL); if (led == NULL) return -ENOMEM; @@ -258,7 +253,7 @@ static int max8997_led_probe(struct platform_device *pdev) led->iodev = iodev; /* initialize mode and brightness according to platform_data */ - if (pdata->led_pdata) { + if (pdata && pdata->led_pdata) { u8 mode = 0, brightness = 0; mode = pdata->led_pdata->mode[led->id]; diff --git a/drivers/leds/leds-pca955x.c b/drivers/leds/leds-pca955x.c index 81aaf21212d7d6545aeade47ae651ce2a91214d8..33ec4543fb4f82b21216f6105f2953366330ed50 100644 --- a/drivers/leds/leds-pca955x.c +++ b/drivers/leds/leds-pca955x.c @@ -145,12 +145,6 @@ static inline int pca95xx_num_input_regs(int bits) return (bits + 7) / 8; } -/* 4 bits per LED selector register */ -static inline int pca95xx_num_led_regs(int bits) -{ - return (bits + 3) / 4; -} - /* * Return an LED selector register value based on an existing one, with * the appropriate 2-bit state value set for the given LED number (0-3). diff --git a/drivers/leds/rgb/leds-qcom-lpg.c b/drivers/leds/rgb/leds-qcom-lpg.c index 02f51cc618376a49a63cf2771902aed8cda5c26f..c1a56259226fb7b4401e4443ff76e93e113827df 100644 --- a/drivers/leds/rgb/leds-qcom-lpg.c +++ b/drivers/leds/rgb/leds-qcom-lpg.c @@ -602,8 +602,8 @@ static void lpg_brightness_set(struct lpg_led *led, struct led_classdev *cdev, lpg_lut_sync(lpg, lut_mask); } -static void lpg_brightness_single_set(struct led_classdev *cdev, - enum led_brightness value) +static int lpg_brightness_single_set(struct led_classdev *cdev, + enum led_brightness value) { struct lpg_led *led = container_of(cdev, struct lpg_led, cdev); struct mc_subled info; @@ -614,10 +614,12 @@ static void lpg_brightness_single_set(struct led_classdev *cdev, lpg_brightness_set(led, cdev, &info); mutex_unlock(&led->lpg->lock); + + return 0; } -static void lpg_brightness_mc_set(struct led_classdev *cdev, - enum led_brightness value) +static int lpg_brightness_mc_set(struct led_classdev *cdev, + enum led_brightness value) { struct led_classdev_mc *mc = lcdev_to_mccdev(cdev); struct lpg_led *led = container_of(mc, struct lpg_led, mcdev); @@ -628,6 +630,8 @@ static void lpg_brightness_mc_set(struct led_classdev *cdev, lpg_brightness_set(led, cdev, mc->subled_info); mutex_unlock(&led->lpg->lock); + + return 0; } static int lpg_blink_set(struct lpg_led *led, @@ -1118,7 +1122,7 @@ static int lpg_add_led(struct lpg *lpg, struct device_node *np) led->mcdev.num_colors = num_channels; cdev = &led->mcdev.led_cdev; - cdev->brightness_set = lpg_brightness_mc_set; + cdev->brightness_set_blocking = lpg_brightness_mc_set; cdev->blink_set = lpg_blink_mc_set; /* Register pattern accessors only if we have a LUT block */ @@ -1132,7 +1136,7 @@ static int lpg_add_led(struct lpg *lpg, struct device_node *np) return ret; cdev = &led->cdev; - cdev->brightness_set = lpg_brightness_single_set; + cdev->brightness_set_blocking = lpg_brightness_single_set; cdev->blink_set = lpg_blink_single_set; /* Register pattern accessors only if we have a LUT block */ @@ -1151,7 +1155,7 @@ static int lpg_add_led(struct lpg *lpg, struct device_node *np) else cdev->brightness = LED_OFF; - cdev->brightness_set(cdev, cdev->brightness); + cdev->brightness_set_blocking(cdev, cdev->brightness); init_data.fwnode = of_fwnode_handle(np); diff --git a/drivers/leds/trigger/ledtrig-pattern.c b/drivers/leds/trigger/ledtrig-pattern.c index 43a265dc4696e34c716c5568ec8f6540c7abcc22..885ca63f383fb4d27af588043582c69c6ea3a822 100644 --- a/drivers/leds/trigger/ledtrig-pattern.c +++ b/drivers/leds/trigger/ledtrig-pattern.c @@ -155,7 +155,7 @@ static ssize_t repeat_show(struct device *dev, struct device_attribute *attr, mutex_unlock(&data->lock); - return scnprintf(buf, PAGE_SIZE, "%d\n", repeat); + return sysfs_emit(buf, "%d\n", repeat); } static ssize_t repeat_store(struct device *dev, struct device_attribute *attr, diff --git a/drivers/macintosh/adb.c b/drivers/macintosh/adb.c index 1bbb9ca08d40f8f1d0bf50e0d1e743a05de63352..23bd0c77ac1af3b279df00122e606364304057c5 100644 --- a/drivers/macintosh/adb.c +++ b/drivers/macintosh/adb.c @@ -478,7 +478,7 @@ adb_register(int default_id, int handler_id, struct adb_ids *ids, if ((adb_handler[i].original_address == default_id) && (!handler_id || (handler_id == adb_handler[i].handler_id) || try_handler_change(i, handler_id))) { - if (adb_handler[i].handler != 0) { + if (adb_handler[i].handler) { pr_err("Two handlers for ADB device %d\n", default_id); continue; @@ -673,7 +673,7 @@ static int adb_open(struct inode *inode, struct file *file) goto out; } state = kmalloc(sizeof(struct adbdev_state), GFP_KERNEL); - if (state == 0) { + if (!state) { ret = -ENOMEM; goto out; } diff --git a/drivers/macintosh/ams/ams-i2c.c b/drivers/macintosh/ams/ams-i2c.c index 3ded340699fb5c4d6a12d857938c3212ff58a74a..a4a1035eb412332e55b2caed28dbaca08f03d947 100644 --- a/drivers/macintosh/ams/ams-i2c.c +++ b/drivers/macintosh/ams/ams-i2c.c @@ -56,8 +56,7 @@ enum ams_i2c_cmd { AMS_CMD_START, }; -static int ams_i2c_probe(struct i2c_client *client, - const struct i2c_device_id *id); +static int ams_i2c_probe(struct i2c_client *client); static void ams_i2c_remove(struct i2c_client *client); static const struct i2c_device_id ams_id[] = { @@ -70,7 +69,7 @@ static struct i2c_driver ams_i2c_driver = { .driver = { .name = "ams", }, - .probe = ams_i2c_probe, + .probe_new = ams_i2c_probe, .remove = ams_i2c_remove, .id_table = ams_id, }; @@ -155,8 +154,7 @@ static void ams_i2c_get_xyz(s8 *x, s8 *y, s8 *z) *z = ams_i2c_read(AMS_DATAZ); } -static int ams_i2c_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int ams_i2c_probe(struct i2c_client *client) { int vmaj, vmin; int result; diff --git a/drivers/macintosh/ams/ams.h b/drivers/macintosh/ams/ams.h index 935bdd9cd9a63c3b6915ac6d53cfa1234d35f369..2c159c8844c198cfcba9ee032b4c717e5deeb37e 100644 --- a/drivers/macintosh/ams/ams.h +++ b/drivers/macintosh/ams/ams.h @@ -1,4 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _AMS_H +#define _AMS_H + #include #include #include @@ -69,3 +72,5 @@ extern int ams_i2c_init(struct device_node *np); extern int ams_input_init(void); extern void ams_input_exit(void); + +#endif /* _AMS_H */ diff --git a/drivers/macintosh/macio-adb.c b/drivers/macintosh/macio-adb.c index 9b63bd2551c632a4893f2be9332b5e11a03d4e85..55a9f8c3a150e1433edb7384ea9474c4137a4aad 100644 --- a/drivers/macintosh/macio-adb.c +++ b/drivers/macintosh/macio-adb.c @@ -100,7 +100,7 @@ int macio_init(void) unsigned int irq; adbs = of_find_compatible_node(NULL, "adb", "chrp,adb0"); - if (adbs == 0) + if (!adbs) return -ENXIO; if (of_address_to_resource(adbs, 0, &r)) { @@ -108,6 +108,10 @@ int macio_init(void) return -ENXIO; } adb = ioremap(r.start, sizeof(struct adb_regs)); + if (!adb) { + of_node_put(adbs); + return -ENOMEM; + } out_8(&adb->ctrl.r, 0); out_8(&adb->intr.r, 0); @@ -183,7 +187,7 @@ static int macio_send_request(struct adb_request *req, int sync) req->reply_len = 0; spin_lock_irqsave(&macio_lock, flags); - if (current_req != 0) { + if (current_req) { last_req->next = req; last_req = req; } else { @@ -213,7 +217,8 @@ static irqreturn_t macio_adb_interrupt(int irq, void *arg) spin_lock(&macio_lock); if (in_8(&adb->intr.r) & TAG) { handled = 1; - if ((req = current_req) != 0) { + req = current_req; + if (req) { /* put the current request in */ for (i = 0; i < req->nbytes; ++i) out_8(&adb->data[i].r, req->data[i]); diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c index 1ec1e5984563fd2bef6ab9e62b7248d2a8a36a70..3bc1f374e65770af2e57ab9fa7f68c264531fa33 100644 --- a/drivers/macintosh/macio_asic.c +++ b/drivers/macintosh/macio_asic.c @@ -424,7 +424,7 @@ static struct macio_dev * macio_add_one_device(struct macio_chip *chip, if (of_device_register(&dev->ofdev) != 0) { printk(KERN_DEBUG"macio: device registration error for %s!\n", dev_name(&dev->ofdev.dev)); - kfree(dev); + put_device(&dev->ofdev.dev); return NULL; } diff --git a/drivers/macintosh/therm_adt746x.c b/drivers/macintosh/therm_adt746x.c index b004ea2a110255d6f356b8ef2a8f2f0900dcf631..8f5db9093c9af94bfd79b1a50b1616496e5c9336 100644 --- a/drivers/macintosh/therm_adt746x.c +++ b/drivers/macintosh/therm_adt746x.c @@ -464,9 +464,9 @@ static void thermostat_remove_files(struct thermostat *th) } -static int probe_thermostat(struct i2c_client *client, - const struct i2c_device_id *id) +static int probe_thermostat(struct i2c_client *client) { + const struct i2c_device_id *id = i2c_client_get_device_id(client); struct device_node *np = client->dev.of_node; struct thermostat* th; const __be32 *prop; @@ -598,7 +598,7 @@ static struct i2c_driver thermostat_driver = { .driver = { .name = "therm_adt746x", }, - .probe = probe_thermostat, + .probe_new = probe_thermostat, .remove = remove_thermostat, .id_table = therm_adt746x_id, }; diff --git a/drivers/macintosh/therm_windtunnel.c b/drivers/macintosh/therm_windtunnel.c index b8228ca404544741fb79c7db1b97146144f79d49..22b15efcc0258af874dd267d68556e4fcca812a7 100644 --- a/drivers/macintosh/therm_windtunnel.c +++ b/drivers/macintosh/therm_windtunnel.c @@ -411,8 +411,9 @@ static const struct i2c_device_id therm_windtunnel_id[] = { MODULE_DEVICE_TABLE(i2c, therm_windtunnel_id); static int -do_probe(struct i2c_client *cl, const struct i2c_device_id *id) +do_probe(struct i2c_client *cl) { + const struct i2c_device_id *id = i2c_client_get_device_id(cl); struct i2c_adapter *adapter = cl->adapter; int ret = 0; @@ -441,7 +442,7 @@ static struct i2c_driver g4fan_driver = { .driver = { .name = "therm_windtunnel", }, - .probe = do_probe, + .probe_new = do_probe, .remove = do_remove, .id_table = therm_windtunnel_id, }; diff --git a/drivers/macintosh/via-pmu-backlight.c b/drivers/macintosh/via-pmu-backlight.c index 2194016122d2f4a739b699ad679a560a072c4529..c2d87e7fa85beefefa395f2d5f350afc67cc886d 100644 --- a/drivers/macintosh/via-pmu-backlight.c +++ b/drivers/macintosh/via-pmu-backlight.c @@ -71,12 +71,7 @@ static int pmu_backlight_get_level_brightness(int level) static int __pmu_backlight_update_status(struct backlight_device *bd) { struct adb_request req; - int level = bd->props.brightness; - - - if (bd->props.power != FB_BLANK_UNBLANK || - bd->props.fb_blank != FB_BLANK_UNBLANK) - level = 0; + int level = backlight_get_brightness(bd); if (level > 0) { int pmulevel = pmu_backlight_get_level_brightness(level); diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c index 49657962d89206d554b4d2eed4303b4353e09d75..e0cb8daf4f082743dde0e6ee4108086f0432059e 100644 --- a/drivers/macintosh/via-pmu.c +++ b/drivers/macintosh/via-pmu.c @@ -203,9 +203,11 @@ static int init_pmu(void); static void pmu_start(void); static irqreturn_t via_pmu_interrupt(int irq, void *arg); static irqreturn_t gpio1_interrupt(int irq, void *arg); +#ifdef CONFIG_PROC_FS static int pmu_info_proc_show(struct seq_file *m, void *v); static int pmu_irqstats_proc_show(struct seq_file *m, void *v); static int pmu_battery_proc_show(struct seq_file *m, void *v); +#endif static void pmu_pass_intr(unsigned char *data, int len); static const struct proc_ops pmu_options_proc_ops; @@ -852,6 +854,7 @@ query_battery_state(void) 2, PMU_SMART_BATTERY_STATE, pmu_cur_battery+1); } +#ifdef CONFIG_PROC_FS static int pmu_info_proc_show(struct seq_file *m, void *v) { seq_printf(m, "PMU driver version : %d\n", PMU_DRIVER_VERSION); @@ -972,6 +975,7 @@ static const struct proc_ops pmu_options_proc_ops = { .proc_release = single_release, .proc_write = pmu_options_proc_write, }; +#endif #ifdef CONFIG_ADB /* Send an ADB command */ diff --git a/drivers/macintosh/windfarm_ad7417_sensor.c b/drivers/macintosh/windfarm_ad7417_sensor.c index c5c54a4ce91f244a7af828738640062caf63206a..33b4723d235e45da2076d8b3f847e71f2d335c5f 100644 --- a/drivers/macintosh/windfarm_ad7417_sensor.c +++ b/drivers/macintosh/windfarm_ad7417_sensor.c @@ -229,8 +229,7 @@ static void wf_ad7417_init_chip(struct wf_ad7417_priv *pv) pv->config = config; } -static int wf_ad7417_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int wf_ad7417_probe(struct i2c_client *client) { struct wf_ad7417_priv *pv; const struct mpu_data *mpu; @@ -321,7 +320,7 @@ static struct i2c_driver wf_ad7417_driver = { .name = "wf_ad7417", .of_match_table = wf_ad7417_of_id, }, - .probe = wf_ad7417_probe, + .probe_new = wf_ad7417_probe, .remove = wf_ad7417_remove, .id_table = wf_ad7417_id, }; diff --git a/drivers/macintosh/windfarm_fcu_controls.c b/drivers/macintosh/windfarm_fcu_controls.c index c5b1ca5bcd7325eda3f77367dfd03294e00f0f75..e027d889d7e80c070d091782a5ad1d5eeba822f2 100644 --- a/drivers/macintosh/windfarm_fcu_controls.c +++ b/drivers/macintosh/windfarm_fcu_controls.c @@ -514,8 +514,7 @@ static int wf_fcu_init_chip(struct wf_fcu_priv *pv) return 0; } -static int wf_fcu_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int wf_fcu_probe(struct i2c_client *client) { struct wf_fcu_priv *pv; @@ -590,7 +589,7 @@ static struct i2c_driver wf_fcu_driver = { .name = "wf_fcu", .of_match_table = wf_fcu_of_id, }, - .probe = wf_fcu_probe, + .probe_new = wf_fcu_probe, .remove = wf_fcu_remove, .id_table = wf_fcu_id, }; diff --git a/drivers/macintosh/windfarm_lm75_sensor.c b/drivers/macintosh/windfarm_lm75_sensor.c index 204661c8e918f8b2a62ae52f2cc3c9cb6ad7a851..24f0a444d3122ca5c1f3f2feaa84db2ec410dae9 100644 --- a/drivers/macintosh/windfarm_lm75_sensor.c +++ b/drivers/macintosh/windfarm_lm75_sensor.c @@ -87,9 +87,9 @@ static const struct wf_sensor_ops wf_lm75_ops = { .owner = THIS_MODULE, }; -static int wf_lm75_probe(struct i2c_client *client, - const struct i2c_device_id *id) -{ +static int wf_lm75_probe(struct i2c_client *client) +{ + const struct i2c_device_id *id = i2c_client_get_device_id(client); struct wf_lm75_sensor *lm; int rc, ds1775; const char *name, *loc; @@ -177,7 +177,7 @@ static struct i2c_driver wf_lm75_driver = { .name = "wf_lm75", .of_match_table = wf_lm75_of_id, }, - .probe = wf_lm75_probe, + .probe_new = wf_lm75_probe, .remove = wf_lm75_remove, .id_table = wf_lm75_id, }; diff --git a/drivers/macintosh/windfarm_lm87_sensor.c b/drivers/macintosh/windfarm_lm87_sensor.c index 40d25463346ed2bc0b495721bd3aac7ae4cdab7b..f37a32c2070ca010d376a0bf0c1dab5ca3b4bb3e 100644 --- a/drivers/macintosh/windfarm_lm87_sensor.c +++ b/drivers/macintosh/windfarm_lm87_sensor.c @@ -95,8 +95,7 @@ static const struct wf_sensor_ops wf_lm87_ops = { .owner = THIS_MODULE, }; -static int wf_lm87_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int wf_lm87_probe(struct i2c_client *client) { struct wf_lm87_sensor *lm; const char *name = NULL, *loc; @@ -173,7 +172,7 @@ static struct i2c_driver wf_lm87_driver = { .name = "wf_lm87", .of_match_table = wf_lm87_of_id, }, - .probe = wf_lm87_probe, + .probe_new = wf_lm87_probe, .remove = wf_lm87_remove, .id_table = wf_lm87_id, }; diff --git a/drivers/macintosh/windfarm_max6690_sensor.c b/drivers/macintosh/windfarm_max6690_sensor.c index c0d404ebc7922e92fd0782a6b6b610f6ff3acc84..6c5ab657b6b39c7cbc061ff90295956ce7cb2a15 100644 --- a/drivers/macintosh/windfarm_max6690_sensor.c +++ b/drivers/macintosh/windfarm_max6690_sensor.c @@ -60,8 +60,7 @@ static const struct wf_sensor_ops wf_max6690_ops = { .owner = THIS_MODULE, }; -static int wf_max6690_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int wf_max6690_probe(struct i2c_client *client) { const char *name, *loc; struct wf_6690_sensor *max; @@ -129,7 +128,7 @@ static struct i2c_driver wf_max6690_driver = { .name = "wf_max6690", .of_match_table = wf_max6690_of_id, }, - .probe = wf_max6690_probe, + .probe_new = wf_max6690_probe, .remove = wf_max6690_remove, .id_table = wf_max6690_id, }; diff --git a/drivers/macintosh/windfarm_pid.h b/drivers/macintosh/windfarm_pid.h index 83f747dbeafcc61a47dddaa70949167551ca2d12..335613a200fb092ff96b985357b0a613ab06170e 100644 --- a/drivers/macintosh/windfarm_pid.h +++ b/drivers/macintosh/windfarm_pid.h @@ -1,4 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0-only */ +#ifndef _WINDFARM_PID_H +#define _WINDFARM_PID_H + /* * Windfarm PowerMac thermal control. Generic PID helpers * @@ -82,3 +85,5 @@ struct wf_cpu_pid_state { extern void wf_cpu_pid_init(struct wf_cpu_pid_state *st, struct wf_cpu_pid_param *param); extern s32 wf_cpu_pid_run(struct wf_cpu_pid_state *st, s32 power, s32 temp); + +#endif /* _WINDFARM_PID_H */ diff --git a/drivers/macintosh/windfarm_pm121.c b/drivers/macintosh/windfarm_pm121.c index 36312f163aac8efb002148372f730a5e3d496b3c..82500417ebeec8546fc85d091d277e1ef448306a 100644 --- a/drivers/macintosh/windfarm_pm121.c +++ b/drivers/macintosh/windfarm_pm121.c @@ -651,7 +651,7 @@ static void pm121_create_cpu_fans(void) /* First, locate the PID params in SMU SBD */ hdr = smu_get_sdb_partition(SMU_SDB_CPUPIDDATA_ID, NULL); - if (hdr == 0) { + if (!hdr) { printk(KERN_WARNING "pm121: CPU PID fan config not found.\n"); goto fail; } @@ -970,7 +970,7 @@ static int pm121_init_pm(void) const struct smu_sdbp_header *hdr; hdr = smu_get_sdb_partition(SMU_SDB_SENSORTREE_ID, NULL); - if (hdr != 0) { + if (hdr) { struct smu_sdbp_sensortree *st = (struct smu_sdbp_sensortree *)&hdr[1]; pm121_mach_model = st->model_id; diff --git a/drivers/macintosh/windfarm_pm81.c b/drivers/macintosh/windfarm_pm81.c index e0f4743f21cc618f3e34ae186d2256088a002065..257fb2c695c53c1a246104395b000f1065fdf6ad 100644 --- a/drivers/macintosh/windfarm_pm81.c +++ b/drivers/macintosh/windfarm_pm81.c @@ -401,7 +401,7 @@ static void wf_smu_create_cpu_fans(void) /* First, locate the PID params in SMU SBD */ hdr = smu_get_sdb_partition(SMU_SDB_CPUPIDDATA_ID, NULL); - if (hdr == 0) { + if (!hdr) { printk(KERN_WARNING "windfarm: CPU PID fan config not found " "max fan speed\n"); goto fail; @@ -705,7 +705,7 @@ static int wf_init_pm(void) const struct smu_sdbp_header *hdr; hdr = smu_get_sdb_partition(SMU_SDB_SENSORTREE_ID, NULL); - if (hdr != 0) { + if (hdr) { struct smu_sdbp_sensortree *st = (struct smu_sdbp_sensortree *)&hdr[1]; wf_smu_mach_model = st->model_id; diff --git a/drivers/macintosh/windfarm_pm91.c b/drivers/macintosh/windfarm_pm91.c index c8535855360d1fbd43756952da03888f489705c7..120a9cfba0c54e994564e0fe6064f691263c7651 100644 --- a/drivers/macintosh/windfarm_pm91.c +++ b/drivers/macintosh/windfarm_pm91.c @@ -150,7 +150,7 @@ static void wf_smu_create_cpu_fans(void) /* First, locate the PID params in SMU SBD */ hdr = smu_get_sdb_partition(SMU_SDB_CPUPIDDATA_ID, NULL); - if (hdr == 0) { + if (!hdr) { printk(KERN_WARNING "windfarm: CPU PID fan config not found " "max fan speed\n"); goto fail; diff --git a/drivers/macintosh/windfarm_smu_controls.c b/drivers/macintosh/windfarm_smu_controls.c index e9957ad49a2af21c4f953daadce9021585a2429a..bdd92b27da2ae78207a114bd2fb009176597d93d 100644 --- a/drivers/macintosh/windfarm_smu_controls.c +++ b/drivers/macintosh/windfarm_smu_controls.c @@ -266,12 +266,11 @@ static int __init smu_controls_init(void) return -ENODEV; /* Look for RPM fans */ - for (fans = NULL; (fans = of_get_next_child(smu, fans)) != NULL;) + for_each_child_of_node(smu, fans) if (of_node_name_eq(fans, "rpm-fans") || of_device_is_compatible(fans, "smu-rpm-fans")) break; - for (fan = NULL; - fans && (fan = of_get_next_child(fans, fan)) != NULL;) { + for_each_child_of_node(fans, fan) { struct smu_fan_control *fct; fct = smu_fan_create(fan, 0); @@ -286,11 +285,10 @@ static int __init smu_controls_init(void) /* Look for PWM fans */ - for (fans = NULL; (fans = of_get_next_child(smu, fans)) != NULL;) + for_each_child_of_node(smu, fans) if (of_node_name_eq(fans, "pwm-fans")) break; - for (fan = NULL; - fans && (fan = of_get_next_child(fans, fan)) != NULL;) { + for_each_child_of_node(fans, fan) { struct smu_fan_control *fct; fct = smu_fan_create(fan, 1); diff --git a/drivers/macintosh/windfarm_smu_sat.c b/drivers/macintosh/windfarm_smu_sat.c index be5d4593db93e00982279250a94cf936aede28cc..ebc4256a9e4a0b61f080d9cf2f8306a87e5b4465 100644 --- a/drivers/macintosh/windfarm_smu_sat.c +++ b/drivers/macintosh/windfarm_smu_sat.c @@ -189,8 +189,7 @@ static const struct wf_sensor_ops wf_sat_ops = { .owner = THIS_MODULE, }; -static int wf_sat_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int wf_sat_probe(struct i2c_client *client) { struct device_node *dev = client->dev.of_node; struct wf_sat *sat; @@ -349,7 +348,7 @@ static struct i2c_driver wf_sat_driver = { .name = "wf_smu_sat", .of_match_table = wf_sat_of_id, }, - .probe = wf_sat_probe, + .probe_new = wf_sat_probe, .remove = wf_sat_remove, .id_table = wf_sat_id, }; diff --git a/drivers/mcb/mcb-core.c b/drivers/mcb/mcb-core.c index 338fc889b357a9f99de4d1177642a577686709a4..b8ad4f16b4acdc516e3733903dcdd6f42c00b146 100644 --- a/drivers/mcb/mcb-core.c +++ b/drivers/mcb/mcb-core.c @@ -71,8 +71,10 @@ static int mcb_probe(struct device *dev) get_device(dev); ret = mdrv->probe(mdev, found_id); - if (ret) + if (ret) { module_put(carrier_mod); + put_device(dev); + } return ret; } diff --git a/drivers/mcb/mcb-parse.c b/drivers/mcb/mcb-parse.c index 0266bfddfbe27e610cd2ee064e35d3adeea1c9a5..aa6938da0db85f9d260b79aca250c1241f2cc548 100644 --- a/drivers/mcb/mcb-parse.c +++ b/drivers/mcb/mcb-parse.c @@ -108,7 +108,7 @@ static int chameleon_parse_gdd(struct mcb_bus *bus, return 0; err: - mcb_free_dev(mdev); + put_device(&mdev->dev); return ret; } diff --git a/drivers/media/common/videobuf2/frame_vector.c b/drivers/media/common/videobuf2/frame_vector.c index 89eebc3341b84708b5775fed0e434c9ad1dea9be..0f430ddc1f6704e96dd42357eaf77f03be19793f 100644 --- a/drivers/media/common/videobuf2/frame_vector.c +++ b/drivers/media/common/videobuf2/frame_vector.c @@ -37,7 +37,7 @@ int get_vaddr_frames(unsigned long start, unsigned int nr_frames, bool write, struct frame_vector *vec) { int ret; - unsigned int gup_flags = FOLL_FORCE | FOLL_LONGTERM; + unsigned int gup_flags = FOLL_LONGTERM; if (nr_frames == 0) return 0; diff --git a/drivers/media/dvb-core/dvbdev.c b/drivers/media/dvb-core/dvbdev.c index 2a857cf70c944eb386d49274c3cbd84fbb8734cf..0ed087caf7f3b28e8a51505353334597b2987835 100644 --- a/drivers/media/dvb-core/dvbdev.c +++ b/drivers/media/dvb-core/dvbdev.c @@ -1028,9 +1028,9 @@ void dvb_module_release(struct i2c_client *client) EXPORT_SYMBOL_GPL(dvb_module_release); #endif -static int dvb_uevent(struct device *dev, struct kobj_uevent_env *env) +static int dvb_uevent(const struct device *dev, struct kobj_uevent_env *env) { - struct dvb_device *dvbdev = dev_get_drvdata(dev); + const struct dvb_device *dvbdev = dev_get_drvdata(dev); add_uevent_var(env, "DVB_ADAPTER_NUM=%d", dvbdev->adapter->num); add_uevent_var(env, "DVB_DEVICE_TYPE=%s", dnames[dvbdev->type]); @@ -1038,9 +1038,9 @@ static int dvb_uevent(struct device *dev, struct kobj_uevent_env *env) return 0; } -static char *dvb_devnode(struct device *dev, umode_t *mode) +static char *dvb_devnode(const struct device *dev, umode_t *mode) { - struct dvb_device *dvbdev = dev_get_drvdata(dev); + const struct dvb_device *dvbdev = dev_get_drvdata(dev); return kasprintf(GFP_KERNEL, "dvb/adapter%d/%s%d", dvbdev->adapter->num, dnames[dvbdev->type], dvbdev->id); diff --git a/drivers/media/pci/ddbridge/ddbridge-core.c b/drivers/media/pci/ddbridge/ddbridge-core.c index fe833f39698afb42f125c7bf7a1e56a08815c310..ee8087f29b2c4af379bf4ab60f0103c6f177c8ad 100644 --- a/drivers/media/pci/ddbridge/ddbridge-core.c +++ b/drivers/media/pci/ddbridge/ddbridge-core.c @@ -2716,9 +2716,9 @@ static const struct file_operations ddb_fops = { .release = ddb_release, }; -static char *ddb_devnode(struct device *device, umode_t *mode) +static char *ddb_devnode(const struct device *device, umode_t *mode) { - struct ddb *dev = dev_get_drvdata(device); + const struct ddb *dev = dev_get_drvdata(device); return kasprintf(GFP_KERNEL, "ddbridge/card%d", dev->nr); } diff --git a/drivers/media/pci/ivtv/ivtv-udma.c b/drivers/media/pci/ivtv/ivtv-udma.c index 210be8290f24dddd95c330780c3d7a80895b2994..99b9f55ca82922c2a2093e5484d46f79c44444a9 100644 --- a/drivers/media/pci/ivtv/ivtv-udma.c +++ b/drivers/media/pci/ivtv/ivtv-udma.c @@ -115,7 +115,7 @@ int ivtv_udma_setup(struct ivtv *itv, unsigned long ivtv_dest_addr, /* Pin user pages for DMA Xfer */ err = pin_user_pages_unlocked(user_dma.uaddr, user_dma.page_count, - dma->map, FOLL_FORCE); + dma->map, 0); if (user_dma.page_count != err) { IVTV_DEBUG_WARN("failed to map user pages, returned %d instead of %d\n", diff --git a/drivers/media/pci/ivtv/ivtv-yuv.c b/drivers/media/pci/ivtv/ivtv-yuv.c index 4ba10c34a16a4283d4a9b1c9f82893d3e6dffaf7..582146f8d70d5f9c96be91f88ea01dfa1b6f6742 100644 --- a/drivers/media/pci/ivtv/ivtv-yuv.c +++ b/drivers/media/pci/ivtv/ivtv-yuv.c @@ -63,12 +63,11 @@ static int ivtv_yuv_prep_user_dma(struct ivtv *itv, struct ivtv_user_dma *dma, /* Pin user pages for DMA Xfer */ y_pages = pin_user_pages_unlocked(y_dma.uaddr, - y_dma.page_count, &dma->map[0], FOLL_FORCE); + y_dma.page_count, &dma->map[0], 0); uv_pages = 0; /* silence gcc. value is set and consumed only if: */ if (y_pages == y_dma.page_count) { uv_pages = pin_user_pages_unlocked(uv_dma.uaddr, - uv_dma.page_count, &dma->map[y_pages], - FOLL_FORCE); + uv_dma.page_count, &dma->map[y_pages], 0); } if (y_pages != y_dma.page_count || uv_pages != uv_dma.page_count) { diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c index eba0cd30e314d75ee826e7db3b751a4993160e5b..527d9324742bb394aa7a855d492900bde375a4a9 100644 --- a/drivers/media/rc/rc-main.c +++ b/drivers/media/rc/rc-main.c @@ -1017,7 +1017,7 @@ static void ir_close(struct input_dev *idev) } /* class for /sys/class/rc */ -static char *rc_devnode(struct device *dev, umode_t *mode) +static char *rc_devnode(const struct device *dev, umode_t *mode) { return kasprintf(GFP_KERNEL, "rc/%s", dev_name(dev)); } diff --git a/drivers/media/v4l2-core/videobuf-dma-sg.c b/drivers/media/v4l2-core/videobuf-dma-sg.c index f75e5eedeee05ee31aa8f12ef34343ce9756c03e..234e9f647c963e05cd005df772122c33858fab6c 100644 --- a/drivers/media/v4l2-core/videobuf-dma-sg.c +++ b/drivers/media/v4l2-core/videobuf-dma-sg.c @@ -151,17 +151,16 @@ static void videobuf_dma_init(struct videobuf_dmabuf *dma) static int videobuf_dma_init_user_locked(struct videobuf_dmabuf *dma, int direction, unsigned long data, unsigned long size) { + unsigned int gup_flags = FOLL_LONGTERM; unsigned long first, last; - int err, rw = 0; - unsigned int flags = FOLL_FORCE; + int err; dma->direction = direction; switch (dma->direction) { case DMA_FROM_DEVICE: - rw = READ; + gup_flags |= FOLL_WRITE; break; case DMA_TO_DEVICE: - rw = WRITE; break; default: BUG(); @@ -177,14 +176,11 @@ static int videobuf_dma_init_user_locked(struct videobuf_dmabuf *dma, if (NULL == dma->pages) return -ENOMEM; - if (rw == READ) - flags |= FOLL_WRITE; - dprintk(1, "init user [0x%lx+0x%lx => %lu pages]\n", data, size, dma->nr_pages); - err = pin_user_pages(data & PAGE_MASK, dma->nr_pages, - flags | FOLL_LONGTERM, dma->pages, NULL); + err = pin_user_pages(data & PAGE_MASK, dma->nr_pages, gup_flags, + dma->pages, NULL); if (err != dma->nr_pages) { dma->nr_pages = (err >= 0) ? err : 0; diff --git a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c index 52c7020c9d191207e2df1154c5a9cb4e955d5078..1decd09a08d8166c448fc403660addb8e6d437f9 100644 --- a/drivers/message/fusion/mptctl.c +++ b/drivers/message/fusion/mptctl.c @@ -2879,7 +2879,6 @@ static struct mpt_pci_driver mptctl_driver = { static int __init mptctl_init(void) { int err; - int where = 1; show_mptmod_ver(my_NAME, my_VERSION); @@ -2898,7 +2897,6 @@ static int __init mptctl_init(void) /* * Install our handler */ - ++where; mptctl_id = mpt_register(mptctl_reply, MPTCTL_DRIVER, "mptctl_reply"); if (!mptctl_id || mptctl_id >= MPT_MAX_PROTOCOL_DRIVERS) { diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c index 34901bcd1ce81622c00fc52da8ce3a9c70338aa9..88fe4a860ae5149bd2825f7e79889050fd9c459e 100644 --- a/drivers/message/fusion/mptsas.c +++ b/drivers/message/fusion/mptsas.c @@ -1952,12 +1952,12 @@ mptsas_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *SCpnt) * @sc: scsi command that the midlayer is about to time out * **/ -static enum blk_eh_timer_return mptsas_eh_timed_out(struct scsi_cmnd *sc) +static enum scsi_timeout_action mptsas_eh_timed_out(struct scsi_cmnd *sc) { MPT_SCSI_HOST *hd; MPT_ADAPTER *ioc; VirtDevice *vdevice; - enum blk_eh_timer_return rc = BLK_EH_DONE; + enum scsi_timeout_action rc = SCSI_EH_NOT_HANDLED; hd = shost_priv(sc->device->host); if (hd == NULL) { @@ -1980,7 +1980,7 @@ static enum blk_eh_timer_return mptsas_eh_timed_out(struct scsi_cmnd *sc) dtmprintk(ioc, printk(MYIOC_s_WARN_FMT ": %s: ioc is in reset," "SML need to reset the timer (sc=%p)\n", ioc->name, __func__, sc)); - rc = BLK_EH_RESET_TIMER; + rc = SCSI_EH_RESET_TIMER; } vdevice = sc->device->hostdata; if (vdevice && vdevice->vtarget && (vdevice->vtarget->inDMD @@ -1988,7 +1988,7 @@ static enum blk_eh_timer_return mptsas_eh_timed_out(struct scsi_cmnd *sc) dtmprintk(ioc, printk(MYIOC_s_WARN_FMT ": %s: target removed " "or in device removal delay (sc=%p)\n", ioc->name, __func__, sc)); - rc = BLK_EH_RESET_TIMER; + rc = SCSI_EH_RESET_TIMER; goto done; } diff --git a/drivers/mfd/vexpress-sysreg.c b/drivers/mfd/vexpress-sysreg.c index aaf24af287dd367bdd653dc908018ece17b16231..eab82619ec314aef0f80bf3af0e789b07200ff01 100644 --- a/drivers/mfd/vexpress-sysreg.c +++ b/drivers/mfd/vexpress-sysreg.c @@ -61,35 +61,27 @@ static struct mfd_cell vexpress_sysreg_cells[] = { .name = "basic-mmio-gpio", .of_compatible = "arm,vexpress-sysreg,sys_led", .num_resources = 1, - .resources = (struct resource []) { - DEFINE_RES_MEM_NAMED(SYS_LED, 0x4, "dat"), - }, + .resources = &DEFINE_RES_MEM_NAMED(SYS_LED, 0x4, "dat"), .platform_data = &vexpress_sysreg_sys_led_pdata, .pdata_size = sizeof(vexpress_sysreg_sys_led_pdata), }, { .name = "basic-mmio-gpio", .of_compatible = "arm,vexpress-sysreg,sys_mci", .num_resources = 1, - .resources = (struct resource []) { - DEFINE_RES_MEM_NAMED(SYS_MCI, 0x4, "dat"), - }, + .resources = &DEFINE_RES_MEM_NAMED(SYS_MCI, 0x4, "dat"), .platform_data = &vexpress_sysreg_sys_mci_pdata, .pdata_size = sizeof(vexpress_sysreg_sys_mci_pdata), }, { .name = "basic-mmio-gpio", .of_compatible = "arm,vexpress-sysreg,sys_flash", .num_resources = 1, - .resources = (struct resource []) { - DEFINE_RES_MEM_NAMED(SYS_FLASH, 0x4, "dat"), - }, + .resources = &DEFINE_RES_MEM_NAMED(SYS_FLASH, 0x4, "dat"), .platform_data = &vexpress_sysreg_sys_flash_pdata, .pdata_size = sizeof(vexpress_sysreg_sys_flash_pdata), }, { .name = "vexpress-syscfg", .num_resources = 1, - .resources = (struct resource []) { - DEFINE_RES_MEM(SYS_MISC, 0x4c), - }, + .resources = &DEFINE_RES_MEM(SYS_MISC, 0x4c), } }; diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig index 358ad56f65245e2094960dad0e6ee6f52f0110b1..9947b7892bd5af61c016f3f27c30a78827996414 100644 --- a/drivers/misc/Kconfig +++ b/drivers/misc/Kconfig @@ -176,6 +176,28 @@ config SGI_XP this feature will allow for direct communication between SSIs based on a network adapter and DMA messaging. +config SMPRO_ERRMON + tristate "Ampere Computing SMPro error monitor driver" + depends on MFD_SMPRO || COMPILE_TEST + help + Say Y here to get support for the SMpro error monitor function + provided by Ampere Computing's Altra and Altra Max SoCs. Upon + loading, the driver creates sysfs files which can be use to gather + multiple HW error data reported via read and write system calls. + + To compile this driver as a module, say M here. The driver will be + called smpro-errmon. + +config SMPRO_MISC + tristate "Ampere Computing SMPro miscellaneous driver" + depends on MFD_SMPRO || COMPILE_TEST + help + Say Y here to get support for the SMpro error miscellalenous function + provided by Ampere Computing's Altra and Altra Max SoCs. + + To compile this driver as a module, say M here. The driver will be + called smpro-misc. + config CS5535_MFGPT tristate "CS5535/CS5536 Geode Multi-Function General Purpose Timer (MFGPT) support" depends on MFD_CS5535 diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile index ac9b3e757ba1dfbbd44d5410ab31fd335fba90e9..87b54a4a442244e273697f1d96970c2c04d95a76 100644 --- a/drivers/misc/Makefile +++ b/drivers/misc/Makefile @@ -23,6 +23,8 @@ obj-$(CONFIG_ENCLOSURE_SERVICES) += enclosure.o obj-$(CONFIG_KGDB_TESTS) += kgdbts.o obj-$(CONFIG_SGI_XP) += sgi-xp/ obj-$(CONFIG_SGI_GRU) += sgi-gru/ +obj-$(CONFIG_SMPRO_ERRMON) += smpro-errmon.o +obj-$(CONFIG_SMPRO_MISC) += smpro-misc.o obj-$(CONFIG_CS5535_MFGPT) += cs5535-mfgpt.o obj-$(CONFIG_GEHC_ACHC) += gehc-achc.o obj-$(CONFIG_HP_ILO) += hpilo.o diff --git a/drivers/misc/apds9802als.c b/drivers/misc/apds9802als.c index a32431f4b370abcb8292e36deffc8655a5c5cf8e..0526c55d5cd5cb9f817c658a29c8e9171e9da772 100644 --- a/drivers/misc/apds9802als.c +++ b/drivers/misc/apds9802als.c @@ -212,8 +212,7 @@ static int als_set_default_config(struct i2c_client *client) return ret_val; } -static int apds9802als_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int apds9802als_probe(struct i2c_client *client) { int res; struct als_data *data; @@ -297,7 +296,7 @@ static struct i2c_driver apds9802als_driver = { .name = DRIVER_NAME, .pm = APDS9802ALS_PM_OPS, }, - .probe = apds9802als_probe, + .probe_new = apds9802als_probe, .remove = apds9802als_remove, .id_table = apds9802als_id, }; diff --git a/drivers/misc/apds990x.c b/drivers/misc/apds990x.c index e2100cc42ce863dc68cb50383d027b204b452679..0024503ea6dbbfd6ce5e64a8ddaa3f8b994c6746 100644 --- a/drivers/misc/apds990x.c +++ b/drivers/misc/apds990x.c @@ -1051,8 +1051,7 @@ static const struct attribute_group apds990x_attribute_group[] = { {.attrs = sysfs_attrs_ctrl }, }; -static int apds990x_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int apds990x_probe(struct i2c_client *client) { struct apds990x_chip *chip; int err; @@ -1272,7 +1271,7 @@ static struct i2c_driver apds990x_driver = { .name = "apds990x", .pm = &apds990x_pm_ops, }, - .probe = apds990x_probe, + .probe_new = apds990x_probe, .remove = apds990x_remove, .id_table = apds990x_id, }; diff --git a/drivers/misc/bh1770glc.c b/drivers/misc/bh1770glc.c index d0dfa674414c34e131095fa021055ed73eab5996..bedbe0efb330e7cfb16eba071c602ee6777cedbd 100644 --- a/drivers/misc/bh1770glc.c +++ b/drivers/misc/bh1770glc.c @@ -1162,8 +1162,7 @@ static const struct attribute_group bh1770_attribute_group = { .attrs = sysfs_attrs }; -static int bh1770_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int bh1770_probe(struct i2c_client *client) { struct bh1770_chip *chip; int err; @@ -1379,7 +1378,7 @@ static struct i2c_driver bh1770_driver = { .name = "bh1770glc", .pm = &bh1770_pm_ops, }, - .probe = bh1770_probe, + .probe_new = bh1770_probe, .remove = bh1770_remove, .id_table = bh1770_id, }; diff --git a/drivers/misc/cxl/file.c b/drivers/misc/cxl/file.c index 3dbdce96fae0dfb4339aceef17c823ad1536509f..5878329b011a063f0c1ff3d21b6eb592e6154373 100644 --- a/drivers/misc/cxl/file.c +++ b/drivers/misc/cxl/file.c @@ -546,7 +546,7 @@ static const struct file_operations afu_master_fops = { }; -static char *cxl_devnode(struct device *dev, umode_t *mode) +static char *cxl_devnode(const struct device *dev, umode_t *mode) { if (cpu_has_feature(CPU_FTR_HVMODE) && CXL_DEVT_IS_CARD(dev->devt)) { diff --git a/drivers/misc/cxl/guest.c b/drivers/misc/cxl/guest.c index 375f692ae9d686c1c39d86844d7b71a883d81bfb..fb95a2d5cef48c395ca0f4ddc389d0fcf6544127 100644 --- a/drivers/misc/cxl/guest.c +++ b/drivers/misc/cxl/guest.c @@ -965,10 +965,10 @@ int cxl_guest_init_afu(struct cxl *adapter, int slice, struct device_node *afu_n * if it returns an error! */ if ((rc = cxl_register_afu(afu))) - goto err_put1; + goto err_put_dev; if ((rc = cxl_sysfs_afu_add(afu))) - goto err_put1; + goto err_del_dev; /* * pHyp doesn't expose the programming models supported by the @@ -984,7 +984,7 @@ int cxl_guest_init_afu(struct cxl *adapter, int slice, struct device_node *afu_n afu->modes_supported = CXL_MODE_DIRECTED; if ((rc = cxl_afu_select_best_mode(afu))) - goto err_put2; + goto err_remove_sysfs; adapter->afu[afu->slice] = afu; @@ -1004,10 +1004,12 @@ int cxl_guest_init_afu(struct cxl *adapter, int slice, struct device_node *afu_n return 0; -err_put2: +err_remove_sysfs: cxl_sysfs_afu_remove(afu); -err_put1: - device_unregister(&afu->dev); +err_del_dev: + device_del(&afu->dev); +err_put_dev: + put_device(&afu->dev); free = false; guest_release_serr_irq(afu); err2: @@ -1141,18 +1143,20 @@ struct cxl *cxl_guest_init_adapter(struct device_node *np, struct platform_devic * even if it returns an error! */ if ((rc = cxl_register_adapter(adapter))) - goto err_put1; + goto err_put_dev; if ((rc = cxl_sysfs_adapter_add(adapter))) - goto err_put1; + goto err_del_dev; /* release the context lock as the adapter is configured */ cxl_adapter_context_unlock(adapter); return adapter; -err_put1: - device_unregister(&adapter->dev); +err_del_dev: + device_del(&adapter->dev); +err_put_dev: + put_device(&adapter->dev); free = false; cxl_guest_remove_chardev(adapter); err1: diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c index 3de0aea62ade4bbc33bf50250da5783e30f15319..0ff944860dda988e3af5f8d2ed64ebe55e87a97f 100644 --- a/drivers/misc/cxl/pci.c +++ b/drivers/misc/cxl/pci.c @@ -387,6 +387,7 @@ int cxl_calc_capp_routing(struct pci_dev *dev, u64 *chipid, rc = get_phb_index(np, phb_index); if (rc) { pr_err("cxl: invalid phb index\n"); + of_node_put(np); return rc; } @@ -1164,10 +1165,10 @@ static int pci_init_afu(struct cxl *adapter, int slice, struct pci_dev *dev) * if it returns an error! */ if ((rc = cxl_register_afu(afu))) - goto err_put1; + goto err_put_dev; if ((rc = cxl_sysfs_afu_add(afu))) - goto err_put1; + goto err_del_dev; adapter->afu[afu->slice] = afu; @@ -1176,10 +1177,12 @@ static int pci_init_afu(struct cxl *adapter, int slice, struct pci_dev *dev) return 0; -err_put1: +err_del_dev: + device_del(&afu->dev); +err_put_dev: pci_deconfigure_afu(afu); cxl_debugfs_afu_remove(afu); - device_unregister(&afu->dev); + put_device(&afu->dev); return rc; err_free_native: @@ -1667,23 +1670,25 @@ static struct cxl *cxl_pci_init_adapter(struct pci_dev *dev) * even if it returns an error! */ if ((rc = cxl_register_adapter(adapter))) - goto err_put1; + goto err_put_dev; if ((rc = cxl_sysfs_adapter_add(adapter))) - goto err_put1; + goto err_del_dev; /* Release the context lock as adapter is configured */ cxl_adapter_context_unlock(adapter); return adapter; -err_put1: +err_del_dev: + device_del(&adapter->dev); +err_put_dev: /* This should mirror cxl_remove_adapter, except without the * sysfs parts */ cxl_debugfs_adapter_remove(adapter); cxl_deconfigure_adapter(adapter); - device_unregister(&adapter->dev); + put_device(&adapter->dev); return ERR_PTR(rc); err_release: diff --git a/drivers/misc/cxl/vphb.c b/drivers/misc/cxl/vphb.c index 1264253cc07bc749845af3bf8d925ce43e31c772..6332db8044bd6d3703be62eaeb362d336f9220d5 100644 --- a/drivers/misc/cxl/vphb.c +++ b/drivers/misc/cxl/vphb.c @@ -67,12 +67,6 @@ static void cxl_pci_disable_device(struct pci_dev *dev) } } -static resource_size_t cxl_pci_window_alignment(struct pci_bus *bus, - unsigned long type) -{ - return 1; -} - static void cxl_pci_reset_secondary_bus(struct pci_dev *dev) { /* Should we do an AFU reset here ? */ @@ -200,7 +194,6 @@ static struct pci_controller_ops cxl_pci_controller_ops = .enable_device_hook = cxl_pci_enable_device_hook, .disable_device = cxl_pci_disable_device, .release_device = cxl_pci_disable_device, - .window_alignment = cxl_pci_window_alignment, .reset_secondary_bus = cxl_pci_reset_secondary_bus, .setup_msi_irqs = cxl_setup_msi_irqs, .teardown_msi_irqs = cxl_teardown_msi_irqs, diff --git a/drivers/misc/ds1682.c b/drivers/misc/ds1682.c index 0698ddc5f4d54597883c92c1765f1f4c9eb204d6..d517eed32971c42495ea23b29ccf4e5c52892190 100644 --- a/drivers/misc/ds1682.c +++ b/drivers/misc/ds1682.c @@ -200,8 +200,7 @@ static const struct bin_attribute ds1682_eeprom_attr = { /* * Called when a ds1682 device is matched with this driver */ -static int ds1682_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int ds1682_probe(struct i2c_client *client) { int rc; @@ -251,7 +250,7 @@ static struct i2c_driver ds1682_driver = { .name = "ds1682", .of_match_table = ds1682_of_match, }, - .probe = ds1682_probe, + .probe_new = ds1682_probe, .remove = ds1682_remove, .id_table = ds1682_id, }; diff --git a/drivers/misc/eeprom/eeprom.c b/drivers/misc/eeprom/eeprom.c index 8a841a75d893175146ed3c996d3003cb1fc17be4..32611100d5cd332380dafe34f0ee0657f712ce2a 100644 --- a/drivers/misc/eeprom/eeprom.c +++ b/drivers/misc/eeprom/eeprom.c @@ -141,8 +141,7 @@ static int eeprom_detect(struct i2c_client *client, struct i2c_board_info *info) return 0; } -static int eeprom_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int eeprom_probe(struct i2c_client *client) { struct i2c_adapter *adapter = client->adapter; struct eeprom_data *data; @@ -197,7 +196,7 @@ static struct i2c_driver eeprom_driver = { .driver = { .name = "eeprom", }, - .probe = eeprom_probe, + .probe_new = eeprom_probe, .remove = eeprom_remove, .id_table = eeprom_id, diff --git a/drivers/misc/eeprom/idt_89hpesx.c b/drivers/misc/eeprom/idt_89hpesx.c index bb3ed352b95f99c8babd6571e3dc8dc384e7ce18..4e07ee9cb500e471432bed73297f59ce117363a2 100644 --- a/drivers/misc/eeprom/idt_89hpesx.c +++ b/drivers/misc/eeprom/idt_89hpesx.c @@ -1366,7 +1366,7 @@ static void idt_remove_dbgfs_files(struct idt_89hpesx_dev *pdev) /* * idt_probe() - IDT 89HPESx driver probe() callback method */ -static int idt_probe(struct i2c_client *client, const struct i2c_device_id *id) +static int idt_probe(struct i2c_client *client) { struct idt_89hpesx_dev *pdev; int ret; @@ -1556,7 +1556,7 @@ static struct i2c_driver idt_driver = { .name = IDT_NAME, .of_match_table = idt_of_match, }, - .probe = idt_probe, + .probe_new = idt_probe, .remove = idt_remove, .id_table = idt_ids, }; diff --git a/drivers/misc/eeprom/max6875.c b/drivers/misc/eeprom/max6875.c index 6bd4f4339af4afc8a064a13f3d13d312b3dfa9b9..79cf8afcef2e81150771e371c7fc489bd89d4477 100644 --- a/drivers/misc/eeprom/max6875.c +++ b/drivers/misc/eeprom/max6875.c @@ -130,8 +130,7 @@ static const struct bin_attribute user_eeprom_attr = { .read = max6875_read, }; -static int max6875_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int max6875_probe(struct i2c_client *client) { struct i2c_adapter *adapter = client->adapter; struct max6875_data *data; @@ -193,7 +192,7 @@ static struct i2c_driver max6875_driver = { .driver = { .name = "max6875", }, - .probe = max6875_probe, + .probe_new = max6875_probe, .remove = max6875_remove, .id_table = max6875_id, }; diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c index 0f467a71b069834b283bf4e503362fb40a1d66d4..c9902a1dcf5d311043182ee87f4dbc5ec6085cae 100644 --- a/drivers/misc/fastrpc.c +++ b/drivers/misc/fastrpc.c @@ -20,6 +20,7 @@ #include #include #include +#include #define ADSP_DOMAIN_ID (0) #define MDSP_DOMAIN_ID (1) @@ -37,8 +38,20 @@ #define FASTRPC_DSP_UTILITIES_HANDLE 2 #define FASTRPC_CTXID_MASK (0xFF0) #define INIT_FILELEN_MAX (2 * 1024 * 1024) +#define INIT_FILE_NAMELEN_MAX (128) #define FASTRPC_DEVICE_NAME "fastrpc" + +/* Add memory to static PD pool, protection thru XPU */ +#define ADSP_MMAP_HEAP_ADDR 4 +/* MAP static DMA buffer on DSP User PD */ +#define ADSP_MMAP_DMA_BUFFER 6 +/* Add memory to static PD pool protection thru hypervisor */ +#define ADSP_MMAP_REMOTE_HEAP_ADDR 8 +/* Add memory to userPD pool, for user heap */ #define ADSP_MMAP_ADD_PAGES 0x1000 +/* Add memory to userPD pool, for LLC heap */ +#define ADSP_MMAP_ADD_PAGES_LLC 0x3000, + #define DSP_UNSUPPORTED_API (0x80000414) /* MAX NUMBER of DSP ATTRIBUTES SUPPORTED */ #define FASTRPC_MAX_DSP_ATTRIBUTES (256) @@ -72,6 +85,7 @@ FASTRPC_BUILD_SCALARS(0, method, in, out, 0, 0) #define FASTRPC_CREATE_PROCESS_NARGS 6 +#define FASTRPC_CREATE_STATIC_PROCESS_NARGS 3 /* Remote Method id table */ #define FASTRPC_RMID_INIT_ATTACH 0 #define FASTRPC_RMID_INIT_RELEASE 1 @@ -84,7 +98,7 @@ #define FASTRPC_RMID_INIT_MEM_UNMAP 11 /* Protection Domain(PD) ids */ -#define AUDIO_PD (0) /* also GUEST_OS PD? */ +#define ROOT_PD (0) #define USER_PD (1) #define SENSORS_PD (2) @@ -261,8 +275,11 @@ struct fastrpc_channel_ctx { u32 dsp_attributes[FASTRPC_MAX_DSP_ATTRIBUTES]; struct fastrpc_device *secure_fdevice; struct fastrpc_device *fdevice; + struct fastrpc_buf *remote_heap; + struct list_head invoke_interrupted_mmaps; bool secure; bool unsigned_support; + u64 dma_mask; }; struct fastrpc_device { @@ -369,7 +386,7 @@ static void fastrpc_buf_free(struct fastrpc_buf *buf) kfree(buf); } -static int fastrpc_buf_alloc(struct fastrpc_user *fl, struct device *dev, +static int __fastrpc_buf_alloc(struct fastrpc_user *fl, struct device *dev, u64 size, struct fastrpc_buf **obuf) { struct fastrpc_buf *buf; @@ -397,14 +414,37 @@ static int fastrpc_buf_alloc(struct fastrpc_user *fl, struct device *dev, return -ENOMEM; } + *obuf = buf; + + return 0; +} + +static int fastrpc_buf_alloc(struct fastrpc_user *fl, struct device *dev, + u64 size, struct fastrpc_buf **obuf) +{ + int ret; + struct fastrpc_buf *buf; + + ret = __fastrpc_buf_alloc(fl, dev, size, obuf); + if (ret) + return ret; + + buf = *obuf; + if (fl->sctx && fl->sctx->sid) buf->phys += ((u64)fl->sctx->sid << 32); - *obuf = buf; - return 0; } +static int fastrpc_remote_heap_alloc(struct fastrpc_user *fl, struct device *dev, + u64 size, struct fastrpc_buf **obuf) +{ + struct device *rdev = &fl->cctx->rpdev->dev; + + return __fastrpc_buf_alloc(fl, rdev, size, obuf); +} + static void fastrpc_channel_ctx_free(struct kref *ref) { struct fastrpc_channel_ctx *cctx; @@ -714,6 +754,8 @@ static int fastrpc_map_create(struct fastrpc_user *fl, int fd, return -ENOMEM; INIT_LIST_HEAD(&map->node); + kref_init(&map->refcount); + map->fl = fl; map->fd = fd; map->buf = dma_buf_get(fd); @@ -740,7 +782,6 @@ static int fastrpc_map_create(struct fastrpc_user *fl, int fd, map->size = len; map->va = sg_virt(map->table->sgl); map->len = len; - kref_init(&map->refcount); if (attr & FASTRPC_ATTR_SECUREMAP) { /* @@ -770,7 +811,7 @@ map_err: attach_err: dma_buf_put(map->buf); get_err: - kfree(map); + fastrpc_map_put(map); return err; } @@ -1073,6 +1114,8 @@ static int fastrpc_internal_invoke(struct fastrpc_user *fl, u32 kernel, struct fastrpc_invoke_args *args) { struct fastrpc_invoke_ctx *ctx = NULL; + struct fastrpc_buf *buf, *b; + int err = 0; if (!fl->sctx) @@ -1135,6 +1178,14 @@ bail: spin_unlock(&fl->lock); fastrpc_context_put(ctx); } + + if (err == -ERESTARTSYS) { + list_for_each_entry_safe(buf, b, &fl->mmaps, node) { + list_del(&buf->node); + list_add_tail(&buf->node, &fl->cctx->invoke_interrupted_mmaps); + } + } + if (err) dev_dbg(fl->sctx->dev, "Error: Invoke Failed %d\n", err); @@ -1159,6 +1210,120 @@ static bool is_session_rejected(struct fastrpc_user *fl, bool unsigned_pd_reques return false; } +static int fastrpc_init_create_static_process(struct fastrpc_user *fl, + char __user *argp) +{ + struct fastrpc_init_create_static init; + struct fastrpc_invoke_args *args; + struct fastrpc_phy_page pages[1]; + char *name; + int err; + struct { + int pgid; + u32 namelen; + u32 pageslen; + } inbuf; + u32 sc; + + args = kcalloc(FASTRPC_CREATE_STATIC_PROCESS_NARGS, sizeof(*args), GFP_KERNEL); + if (!args) + return -ENOMEM; + + if (copy_from_user(&init, argp, sizeof(init))) { + err = -EFAULT; + goto err; + } + + if (init.namelen > INIT_FILE_NAMELEN_MAX) { + err = -EINVAL; + goto err; + } + + name = kzalloc(init.namelen, GFP_KERNEL); + if (!name) { + err = -ENOMEM; + goto err; + } + + if (copy_from_user(name, (void __user *)(uintptr_t)init.name, init.namelen)) { + err = -EFAULT; + goto err_name; + } + + if (!fl->cctx->remote_heap) { + err = fastrpc_remote_heap_alloc(fl, fl->sctx->dev, init.memlen, + &fl->cctx->remote_heap); + if (err) + goto err_name; + + /* Map if we have any heap VMIDs associated with this ADSP Static Process. */ + if (fl->cctx->vmcount) { + unsigned int perms = BIT(QCOM_SCM_VMID_HLOS); + + err = qcom_scm_assign_mem(fl->cctx->remote_heap->phys, + (u64)fl->cctx->remote_heap->size, &perms, + fl->cctx->vmperms, fl->cctx->vmcount); + if (err) { + dev_err(fl->sctx->dev, "Failed to assign memory with phys 0x%llx size 0x%llx err %d", + fl->cctx->remote_heap->phys, fl->cctx->remote_heap->size, err); + goto err_map; + } + } + } + + inbuf.pgid = fl->tgid; + inbuf.namelen = init.namelen; + inbuf.pageslen = 0; + fl->pd = USER_PD; + + args[0].ptr = (u64)(uintptr_t)&inbuf; + args[0].length = sizeof(inbuf); + args[0].fd = -1; + + args[1].ptr = (u64)(uintptr_t)name; + args[1].length = inbuf.namelen; + args[1].fd = -1; + + pages[0].addr = fl->cctx->remote_heap->phys; + pages[0].size = fl->cctx->remote_heap->size; + + args[2].ptr = (u64)(uintptr_t) pages; + args[2].length = sizeof(*pages); + args[2].fd = -1; + + sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_CREATE_STATIC, 3, 0); + + err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE, + sc, args); + if (err) + goto err_invoke; + + kfree(args); + + return 0; +err_invoke: + if (fl->cctx->vmcount) { + struct qcom_scm_vmperm perm; + + perm.vmid = QCOM_SCM_VMID_HLOS; + perm.perm = QCOM_SCM_PERM_RWX; + err = qcom_scm_assign_mem(fl->cctx->remote_heap->phys, + (u64)fl->cctx->remote_heap->size, + &(fl->cctx->vmperms[0].vmid), &perm, 1); + if (err) + dev_err(fl->sctx->dev, "Failed to assign memory phys 0x%llx size 0x%llx err %d", + fl->cctx->remote_heap->phys, fl->cctx->remote_heap->size, err); + } +err_map: + fastrpc_buf_free(fl->cctx->remote_heap); +err_name: + kfree(name); +err: + kfree(args); + + return err; +} + static int fastrpc_init_create_process(struct fastrpc_user *fl, char __user *argp) { @@ -1605,30 +1770,14 @@ static int fastrpc_get_dsp_info(struct fastrpc_user *fl, char __user *argp) return 0; } -static int fastrpc_req_munmap_impl(struct fastrpc_user *fl, - struct fastrpc_req_munmap *req) +static int fastrpc_req_munmap_impl(struct fastrpc_user *fl, struct fastrpc_buf *buf) { struct fastrpc_invoke_args args[1] = { [0] = { 0 } }; - struct fastrpc_buf *buf = NULL, *iter, *b; struct fastrpc_munmap_req_msg req_msg; struct device *dev = fl->sctx->dev; int err; u32 sc; - spin_lock(&fl->lock); - list_for_each_entry_safe(iter, b, &fl->mmaps, node) { - if ((iter->raddr == req->vaddrout) && (iter->size == req->size)) { - buf = iter; - break; - } - } - spin_unlock(&fl->lock); - - if (!buf) { - dev_err(dev, "mmap not in list\n"); - return -EINVAL; - } - req_msg.pgid = fl->tgid; req_msg.size = buf->size; req_msg.vaddr = buf->raddr; @@ -1654,12 +1803,29 @@ static int fastrpc_req_munmap_impl(struct fastrpc_user *fl, static int fastrpc_req_munmap(struct fastrpc_user *fl, char __user *argp) { + struct fastrpc_buf *buf = NULL, *iter, *b; struct fastrpc_req_munmap req; + struct device *dev = fl->sctx->dev; if (copy_from_user(&req, argp, sizeof(req))) return -EFAULT; - return fastrpc_req_munmap_impl(fl, &req); + spin_lock(&fl->lock); + list_for_each_entry_safe(iter, b, &fl->mmaps, node) { + if ((iter->raddr == req.vaddrout) && (iter->size == req.size)) { + buf = iter; + break; + } + } + spin_unlock(&fl->lock); + + if (!buf) { + dev_err(dev, "mmap\t\tpt 0x%09llx [len 0x%08llx] not in list\n", + req.vaddrout, req.size); + return -EINVAL; + } + + return fastrpc_req_munmap_impl(fl, buf); } static int fastrpc_req_mmap(struct fastrpc_user *fl, char __user *argp) @@ -1668,7 +1834,6 @@ static int fastrpc_req_mmap(struct fastrpc_user *fl, char __user *argp) struct fastrpc_buf *buf = NULL; struct fastrpc_mmap_req_msg req_msg; struct fastrpc_mmap_rsp_msg rsp_msg; - struct fastrpc_req_munmap req_unmap; struct fastrpc_phy_page pages; struct fastrpc_req_mmap req; struct device *dev = fl->sctx->dev; @@ -1678,8 +1843,9 @@ static int fastrpc_req_mmap(struct fastrpc_user *fl, char __user *argp) if (copy_from_user(&req, argp, sizeof(req))) return -EFAULT; - if (req.flags != ADSP_MMAP_ADD_PAGES) { + if (req.flags != ADSP_MMAP_ADD_PAGES && req.flags != ADSP_MMAP_REMOTE_HEAP_ADDR) { dev_err(dev, "flag not supported 0x%x\n", req.flags); + return -EINVAL; } @@ -1725,16 +1891,29 @@ static int fastrpc_req_mmap(struct fastrpc_user *fl, char __user *argp) /* let the client know the address to use */ req.vaddrout = rsp_msg.vaddr; + /* Add memory to static PD pool, protection thru hypervisor */ + if (req.flags != ADSP_MMAP_REMOTE_HEAP_ADDR && fl->cctx->vmcount) { + struct qcom_scm_vmperm perm; + int err = 0; + + perm.vmid = QCOM_SCM_VMID_HLOS; + perm.perm = QCOM_SCM_PERM_RWX; + err = qcom_scm_assign_mem(buf->phys, buf->size, + &(fl->cctx->vmperms[0].vmid), &perm, 1); + if (err) { + dev_err(fl->sctx->dev, "Failed to assign memory phys 0x%llx size 0x%llx err %d", + buf->phys, buf->size, err); + goto err_assign; + } + } + spin_lock(&fl->lock); list_add_tail(&buf->node, &fl->mmaps); spin_unlock(&fl->lock); if (copy_to_user((void __user *)argp, &req, sizeof(req))) { - /* unmap the memory and release the buffer */ - req_unmap.vaddrout = buf->raddr; - req_unmap.size = buf->size; - fastrpc_req_munmap_impl(fl, &req_unmap); - return -EFAULT; + err = -EFAULT; + goto err_assign; } dev_dbg(dev, "mmap\t\tpt 0x%09lx OK [len 0x%08llx]\n", @@ -1742,6 +1921,8 @@ static int fastrpc_req_mmap(struct fastrpc_user *fl, char __user *argp) return 0; +err_assign: + fastrpc_req_munmap_impl(fl, buf); err_invoke: fastrpc_buf_free(buf); @@ -1889,11 +2070,14 @@ static long fastrpc_device_ioctl(struct file *file, unsigned int cmd, err = fastrpc_invoke(fl, argp); break; case FASTRPC_IOCTL_INIT_ATTACH: - err = fastrpc_init_attach(fl, AUDIO_PD); + err = fastrpc_init_attach(fl, ROOT_PD); break; case FASTRPC_IOCTL_INIT_ATTACH_SNS: err = fastrpc_init_attach(fl, SENSORS_PD); break; + case FASTRPC_IOCTL_INIT_CREATE_STATIC: + err = fastrpc_init_create_static_process(fl, argp); + break; case FASTRPC_IOCTL_INIT_CREATE: err = fastrpc_init_create_process(fl, argp); break; @@ -2068,6 +2252,9 @@ static int fastrpc_rpmsg_probe(struct rpmsg_device *rpdev) return -EINVAL; } + if (of_reserved_mem_device_init_by_idx(rdev, rdev->of_node, 0)) + dev_info(rdev, "no reserved DMA memory for FASTRPC\n"); + vmcount = of_property_read_variable_u32_array(rdev->of_node, "qcom,vmids", &vmids[0], 0, FASTRPC_MAX_VMIDS); if (vmcount < 0) @@ -2120,8 +2307,10 @@ static int fastrpc_rpmsg_probe(struct rpmsg_device *rpdev) kref_init(&data->refcount); dev_set_drvdata(&rpdev->dev, data); + rdev->dma_mask = &data->dma_mask; dma_set_mask_and_coherent(rdev, DMA_BIT_MASK(32)); INIT_LIST_HEAD(&data->users); + INIT_LIST_HEAD(&data->invoke_interrupted_mmaps); spin_lock_init(&data->lock); idr_init(&data->ctx_idr); data->domain_id = domain_id; @@ -2146,6 +2335,7 @@ static void fastrpc_notify_users(struct fastrpc_user *user) static void fastrpc_rpmsg_remove(struct rpmsg_device *rpdev) { struct fastrpc_channel_ctx *cctx = dev_get_drvdata(&rpdev->dev); + struct fastrpc_buf *buf, *b; struct fastrpc_user *user; unsigned long flags; @@ -2160,6 +2350,12 @@ static void fastrpc_rpmsg_remove(struct rpmsg_device *rpdev) if (cctx->secure_fdevice) misc_deregister(&cctx->secure_fdevice->miscdev); + list_for_each_entry_safe(buf, b, &cctx->invoke_interrupted_mmaps, node) + list_del(&buf->node); + + if (cctx->remote_heap) + fastrpc_buf_free(cctx->remote_heap); + of_platform_depopulate(&rpdev->dev); cctx->rpdev = NULL; diff --git a/drivers/misc/genwqe/card_base.c b/drivers/misc/genwqe/card_base.c index 693981891870c4c5920aa66fd4e377d1fa9aa8e6..5b63d179b24ea753e98b095159bd557e8943b598 100644 --- a/drivers/misc/genwqe/card_base.c +++ b/drivers/misc/genwqe/card_base.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-only -/** +/* * IBM Accelerator Family 'GenWQE' * * (C) Copyright IBM Corp. 2013 @@ -1349,7 +1349,7 @@ static struct pci_driver genwqe_driver = { * Default mode should be rw for everybody. Do not change default * device name. */ -static char *genwqe_devnode(struct device *dev, umode_t *mode) +static char *genwqe_devnode(const struct device *dev, umode_t *mode) { if (mode) *mode = 0666; diff --git a/drivers/misc/habanalabs/common/command_submission.c b/drivers/misc/habanalabs/common/command_submission.c index fa05770865c65930ef913029920cb62b227f5420..ea0e5101c10ed6ea9f7e71e0c63ec09c941b55ae 100644 --- a/drivers/misc/habanalabs/common/command_submission.c +++ b/drivers/misc/habanalabs/common/command_submission.c @@ -742,13 +742,11 @@ static void cs_do_release(struct kref *ref) */ if (hl_cs_cmpl->encaps_signals) kref_put(&hl_cs_cmpl->encaps_sig_hdl->refcount, - hl_encaps_handle_do_release); + hl_encaps_release_handle_and_put_ctx); } - if ((cs->type == CS_TYPE_WAIT || cs->type == CS_TYPE_COLLECTIVE_WAIT) - && cs->encaps_signals) - kref_put(&cs->encaps_sig_hdl->refcount, - hl_encaps_handle_do_release); + if ((cs->type == CS_TYPE_WAIT || cs->type == CS_TYPE_COLLECTIVE_WAIT) && cs->encaps_signals) + kref_put(&cs->encaps_sig_hdl->refcount, hl_encaps_release_handle_and_put_ctx); out: /* Must be called before hl_ctx_put because inside we use ctx to get @@ -798,7 +796,7 @@ out: static void cs_timedout(struct work_struct *work) { struct hl_device *hdev; - u64 event_mask; + u64 event_mask = 0x0; int rc; struct hl_cs *cs = container_of(work, struct hl_cs, work_tdr.work); @@ -830,11 +828,7 @@ static void cs_timedout(struct work_struct *work) if (rc) { hdev->captured_err_info.cs_timeout.timestamp = ktime_get(); hdev->captured_err_info.cs_timeout.seq = cs->sequence; - - event_mask = device_reset ? (HL_NOTIFIER_EVENT_CS_TIMEOUT | - HL_NOTIFIER_EVENT_DEVICE_RESET) : HL_NOTIFIER_EVENT_CS_TIMEOUT; - - hl_notifier_event_send_all(hdev, event_mask); + event_mask |= HL_NOTIFIER_EVENT_CS_TIMEOUT; } switch (cs->type) { @@ -869,8 +863,12 @@ static void cs_timedout(struct work_struct *work) cs_put(cs); - if (device_reset) - hl_device_reset(hdev, HL_DRV_RESET_TDR); + if (device_reset) { + event_mask |= HL_NOTIFIER_EVENT_DEVICE_RESET; + hl_device_cond_reset(hdev, HL_DRV_RESET_TDR, event_mask); + } else if (event_mask) { + hl_notifier_event_send_all(hdev, event_mask); + } } static int allocate_cs(struct hl_device *hdev, struct hl_ctx *ctx, @@ -1011,6 +1009,34 @@ static void cs_rollback(struct hl_device *hdev, struct hl_cs *cs) hl_complete_job(hdev, job); } +/* + * release_reserved_encaps_signals() - release reserved encapsulated signals. + * @hdev: pointer to habanalabs device structure + * + * Release reserved encapsulated signals which weren't un-reserved, or for which a CS with + * encapsulated signals wasn't submitted and thus weren't released as part of CS roll-back. + * For these signals need also to put the refcount of the H/W SOB which was taken at the + * reservation. + */ +static void release_reserved_encaps_signals(struct hl_device *hdev) +{ + struct hl_ctx *ctx = hl_get_compute_ctx(hdev); + struct hl_cs_encaps_sig_handle *handle; + struct hl_encaps_signals_mgr *mgr; + u32 id; + + if (!ctx) + return; + + mgr = &ctx->sig_mgr; + + idr_for_each_entry(&mgr->handles, handle, id) + if (handle->cs_seq == ULLONG_MAX) + kref_put(&handle->refcount, hl_encaps_release_handle_and_put_sob_ctx); + + hl_ctx_put(ctx); +} + void hl_cs_rollback_all(struct hl_device *hdev, bool skip_wq_flush) { int i; @@ -1039,6 +1065,8 @@ void hl_cs_rollback_all(struct hl_device *hdev, bool skip_wq_flush) } force_complete_multi_cs(hdev); + + release_reserved_encaps_signals(hdev); } static void @@ -2001,6 +2029,8 @@ static int cs_ioctl_reserve_signals(struct hl_fpriv *hpriv, */ handle->pre_sob_val = prop->next_sob_val - handle->count; + handle->cs_seq = ULLONG_MAX; + *signals_count = prop->next_sob_val; hdev->asic_funcs->hw_queues_unlock(hdev); @@ -2350,10 +2380,8 @@ put_cs: /* We finished with the CS in this function, so put the ref */ cs_put(cs); free_cs_chunk_array: - if (!wait_cs_submitted && cs_encaps_signals && handle_found && - is_wait_cs) - kref_put(&encaps_sig_hdl->refcount, - hl_encaps_handle_do_release); + if (!wait_cs_submitted && cs_encaps_signals && handle_found && is_wait_cs) + kref_put(&encaps_sig_hdl->refcount, hl_encaps_release_handle_and_put_ctx); kfree(cs_chunk_array); out: return rc; diff --git a/drivers/misc/habanalabs/common/context.c b/drivers/misc/habanalabs/common/context.c index 2f4620b7990c7d63e7ef84ad435906f0d7a2a9e4..9c8b1b37b510ce822c8408bf821c8adab7f5c917 100644 --- a/drivers/misc/habanalabs/common/context.c +++ b/drivers/misc/habanalabs/common/context.c @@ -9,38 +9,46 @@ #include -void hl_encaps_handle_do_release(struct kref *ref) +static void encaps_handle_do_release(struct hl_cs_encaps_sig_handle *handle, bool put_hw_sob, + bool put_ctx) { - struct hl_cs_encaps_sig_handle *handle = - container_of(ref, struct hl_cs_encaps_sig_handle, refcount); struct hl_encaps_signals_mgr *mgr = &handle->ctx->sig_mgr; + if (put_hw_sob) + hw_sob_put(handle->hw_sob); + spin_lock(&mgr->lock); idr_remove(&mgr->handles, handle->id); spin_unlock(&mgr->lock); - hl_ctx_put(handle->ctx); + if (put_ctx) + hl_ctx_put(handle->ctx); + kfree(handle); } -static void hl_encaps_handle_do_release_sob(struct kref *ref) +void hl_encaps_release_handle_and_put_ctx(struct kref *ref) { struct hl_cs_encaps_sig_handle *handle = - container_of(ref, struct hl_cs_encaps_sig_handle, refcount); - struct hl_encaps_signals_mgr *mgr = &handle->ctx->sig_mgr; + container_of(ref, struct hl_cs_encaps_sig_handle, refcount); - /* if we're here, then there was a signals reservation but cs with - * encaps signals wasn't submitted, so need to put refcount - * to hw_sob taken at the reservation. - */ - hw_sob_put(handle->hw_sob); + encaps_handle_do_release(handle, false, true); +} - spin_lock(&mgr->lock); - idr_remove(&mgr->handles, handle->id); - spin_unlock(&mgr->lock); +static void hl_encaps_release_handle_and_put_sob(struct kref *ref) +{ + struct hl_cs_encaps_sig_handle *handle = + container_of(ref, struct hl_cs_encaps_sig_handle, refcount); - hl_ctx_put(handle->ctx); - kfree(handle); + encaps_handle_do_release(handle, true, false); +} + +void hl_encaps_release_handle_and_put_sob_ctx(struct kref *ref) +{ + struct hl_cs_encaps_sig_handle *handle = + container_of(ref, struct hl_cs_encaps_sig_handle, refcount); + + encaps_handle_do_release(handle, true, true); } static void hl_encaps_sig_mgr_init(struct hl_encaps_signals_mgr *mgr) @@ -49,8 +57,7 @@ static void hl_encaps_sig_mgr_init(struct hl_encaps_signals_mgr *mgr) idr_init(&mgr->handles); } -static void hl_encaps_sig_mgr_fini(struct hl_device *hdev, - struct hl_encaps_signals_mgr *mgr) +static void hl_encaps_sig_mgr_fini(struct hl_device *hdev, struct hl_encaps_signals_mgr *mgr) { struct hl_cs_encaps_sig_handle *handle; struct idr *idp; @@ -58,11 +65,14 @@ static void hl_encaps_sig_mgr_fini(struct hl_device *hdev, idp = &mgr->handles; + /* The IDR is expected to be empty at this stage, because any left signal should have been + * released as part of CS roll-back. + */ if (!idr_is_empty(idp)) { - dev_warn(hdev->dev, "device released while some encaps signals handles are still allocated\n"); + dev_warn(hdev->dev, + "device released while some encaps signals handles are still allocated\n"); idr_for_each_entry(idp, handle, id) - kref_put(&handle->refcount, - hl_encaps_handle_do_release_sob); + kref_put(&handle->refcount, hl_encaps_release_handle_and_put_sob); } idr_destroy(&mgr->handles); diff --git a/drivers/misc/habanalabs/common/debugfs.c b/drivers/misc/habanalabs/common/debugfs.c index 48d3ec8b5c8257bbb8ddd7085c5354f4a5e65cd0..945c0e6758caa5247bd74f60d864c4288b636e64 100644 --- a/drivers/misc/habanalabs/common/debugfs.c +++ b/drivers/misc/habanalabs/common/debugfs.c @@ -1769,6 +1769,11 @@ void hl_debugfs_add_device(struct hl_device *hdev) dev_entry, &hl_timeout_locked_fops); + debugfs_create_u32("device_release_watchdog_timeout", + 0644, + dev_entry->root, + &hdev->device_release_watchdog_timeout_sec); + for (i = 0, entry = dev_entry->entry_arr ; i < count ; i++, entry++) { debugfs_create_file(hl_debugfs_list[i].name, 0444, diff --git a/drivers/misc/habanalabs/common/device.c b/drivers/misc/habanalabs/common/device.c index 233d8b46c831fcc3e1ef8c6b28d13a5b41868cb6..87ab329e65d49dd5fbb8ae03bfed1c695546a702 100644 --- a/drivers/misc/habanalabs/common/device.c +++ b/drivers/misc/habanalabs/common/device.c @@ -12,10 +12,13 @@ #include #include +#include #include -#define HL_RESET_DELAY_USEC 10000 /* 10ms */ +#define HL_RESET_DELAY_USEC 10000 /* 10ms */ + +#define HL_DEVICE_RELEASE_WATCHDOG_TIMEOUT_SEC 5 enum dma_alloc_type { DMA_ALLOC_COHERENT, @@ -31,6 +34,7 @@ enum dma_alloc_type { * @hdev: pointer to habanalabs device structure. * @addr: the address the caller wants to access. * @region: the PCI region. + * @new_bar_region_base: the new BAR region base address. * * @return: the old BAR base address on success, U64_MAX for failure. * The caller should set it back to the old address after use. @@ -40,7 +44,8 @@ enum dma_alloc_type { * This function can be called also if the bar doesn't need to be set, * in that case it just won't change the base. */ -static u64 hl_set_dram_bar(struct hl_device *hdev, u64 addr, struct pci_mem_region *region) +static u64 hl_set_dram_bar(struct hl_device *hdev, u64 addr, struct pci_mem_region *region, + u64 *new_bar_region_base) { struct asic_fixed_properties *prop = &hdev->asic_prop; u64 bar_base_addr, old_base; @@ -54,27 +59,28 @@ static u64 hl_set_dram_bar(struct hl_device *hdev, u64 addr, struct pci_mem_regi old_base = hdev->asic_funcs->set_dram_bar_base(hdev, bar_base_addr); /* in case of success we need to update the new BAR base */ - if (old_base != U64_MAX) - region->region_base = bar_base_addr; + if ((old_base != U64_MAX) && new_bar_region_base) + *new_bar_region_base = bar_base_addr; return old_base; } -static int hl_access_sram_dram_region(struct hl_device *hdev, u64 addr, u64 *val, - enum debugfs_access_type acc_type, enum pci_region region_type) +int hl_access_sram_dram_region(struct hl_device *hdev, u64 addr, u64 *val, + enum debugfs_access_type acc_type, enum pci_region region_type, bool set_dram_bar) { struct pci_mem_region *region = &hdev->pci_mem_region[region_type]; + u64 old_base = 0, rc, bar_region_base = region->region_base; void __iomem *acc_addr; - u64 old_base = 0, rc; - if (region_type == PCI_REGION_DRAM) { - old_base = hl_set_dram_bar(hdev, addr, region); + if (set_dram_bar) { + old_base = hl_set_dram_bar(hdev, addr, region, &bar_region_base); if (old_base == U64_MAX) return -EIO; } - acc_addr = hdev->pcie_bar[region->bar_id] + addr - region->region_base + - region->offset_in_bar; + acc_addr = hdev->pcie_bar[region->bar_id] + region->offset_in_bar + + (addr - bar_region_base); + switch (acc_type) { case DEBUGFS_READ8: *val = readb(acc_addr); @@ -96,8 +102,8 @@ static int hl_access_sram_dram_region(struct hl_device *hdev, u64 addr, u64 *val break; } - if (region_type == PCI_REGION_DRAM) { - rc = hl_set_dram_bar(hdev, old_base, region); + if (set_dram_bar) { + rc = hl_set_dram_bar(hdev, old_base, region, NULL); if (rc == U64_MAX) return -EIO; } @@ -134,6 +140,9 @@ static void hl_asic_dma_free_common(struct hl_device *hdev, size_t size, void *c dma_addr_t dma_handle, enum dma_alloc_type alloc_type, const char *caller) { + /* this is needed to avoid warning on using freed pointer */ + u64 store_cpu_addr = (u64) (uintptr_t) cpu_addr; + switch (alloc_type) { case DMA_ALLOC_COHERENT: hdev->asic_funcs->asic_dma_free_coherent(hdev, size, cpu_addr, dma_handle); @@ -146,7 +155,7 @@ static void hl_asic_dma_free_common(struct hl_device *hdev, size_t size, void *c break; } - trace_habanalabs_dma_free(hdev->dev, (u64) (uintptr_t) cpu_addr, dma_handle, size, caller); + trace_habanalabs_dma_free(hdev->dev, store_cpu_addr, dma_handle, size, caller); } void *hl_asic_dma_alloc_coherent_caller(struct hl_device *hdev, size_t size, dma_addr_t *dma_handle, @@ -279,7 +288,7 @@ int hl_access_dev_mem(struct hl_device *hdev, enum pci_region region_type, case PCI_REGION_SRAM: case PCI_REGION_DRAM: return hl_access_sram_dram_region(hdev, addr, val, acc_type, - region_type); + region_type, (region_type == PCI_REGION_DRAM)); default: return -EFAULT; } @@ -355,10 +364,49 @@ bool hl_device_operational(struct hl_device *hdev, } } +bool hl_ctrl_device_operational(struct hl_device *hdev, + enum hl_device_status *status) +{ + enum hl_device_status current_status; + + current_status = hl_device_status(hdev); + if (status) + *status = current_status; + + switch (current_status) { + case HL_DEVICE_STATUS_MALFUNCTION: + return false; + case HL_DEVICE_STATUS_IN_RESET: + case HL_DEVICE_STATUS_IN_RESET_AFTER_DEVICE_RELEASE: + case HL_DEVICE_STATUS_NEEDS_RESET: + case HL_DEVICE_STATUS_OPERATIONAL: + case HL_DEVICE_STATUS_IN_DEVICE_CREATION: + default: + return true; + } +} + +static void print_idle_status_mask(struct hl_device *hdev, const char *message, + u64 idle_mask[HL_BUSY_ENGINES_MASK_EXT_SIZE]) +{ + u32 pad_width[HL_BUSY_ENGINES_MASK_EXT_SIZE] = {}; + + BUILD_BUG_ON(HL_BUSY_ENGINES_MASK_EXT_SIZE != 4); + + pad_width[3] = idle_mask[3] ? 16 : 0; + pad_width[2] = idle_mask[2] || pad_width[3] ? 16 : 0; + pad_width[1] = idle_mask[1] || pad_width[2] ? 16 : 0; + pad_width[0] = idle_mask[0] || pad_width[1] ? 16 : 0; + + dev_err(hdev->dev, "%s (mask %0*llx_%0*llx_%0*llx_%0*llx)\n", + message, pad_width[3], idle_mask[3], pad_width[2], idle_mask[2], + pad_width[1], idle_mask[1], pad_width[0], idle_mask[0]); +} + static void hpriv_release(struct kref *ref) { u64 idle_mask[HL_BUSY_ENGINES_MASK_EXT_SIZE] = {0}; - bool device_is_idle = true; + bool reset_device, device_is_idle = true; struct hl_fpriv *hpriv; struct hl_device *hdev; @@ -375,15 +423,19 @@ static void hpriv_release(struct kref *ref) mutex_destroy(&hpriv->ctx_lock); mutex_destroy(&hpriv->restore_phase_mutex); - if ((!hdev->pldm) && (hdev->pdev) && - (!hdev->asic_funcs->is_device_idle(hdev, - idle_mask, - HL_BUSY_ENGINES_MASK_EXT_SIZE, NULL))) { - dev_err(hdev->dev, - "device not idle after user context is closed (0x%llx_%llx)\n", - idle_mask[1], idle_mask[0]); + /* Device should be reset if reset-upon-device-release is enabled, or if there is a pending + * reset that waits for device release. + */ + reset_device = hdev->reset_upon_device_release || hdev->reset_info.watchdog_active; - device_is_idle = false; + /* Unless device is reset in any case, check idle status and reset if device is not idle */ + if (!reset_device && hdev->pdev && !hdev->pldm) + device_is_idle = hdev->asic_funcs->is_device_idle(hdev, idle_mask, + HL_BUSY_ENGINES_MASK_EXT_SIZE, NULL); + if (!device_is_idle) { + print_idle_status_mask(hdev, "device is not idle after user context is closed", + idle_mask); + reset_device = true; } /* We need to remove the user from the list to make sure the reset process won't @@ -399,9 +451,10 @@ static void hpriv_release(struct kref *ref) list_del(&hpriv->dev_node); mutex_unlock(&hdev->fpriv_list_lock); - if (!device_is_idle || hdev->reset_upon_device_release) { + if (reset_device) { hl_device_reset(hdev, HL_DRV_RESET_DEV_RELEASE); } else { + /* Scrubbing is handled within hl_device_reset(), so here need to do it directly */ int rc = hdev->asic_funcs->scrub_device_mem(hdev); if (rc) @@ -468,9 +521,10 @@ static int hl_device_release(struct inode *inode, struct file *filp) hdev->compute_ctx_in_release = 1; - if (!hl_hpriv_put(hpriv)) - dev_notice(hdev->dev, - "User process closed FD but device still in use\n"); + if (!hl_hpriv_put(hpriv)) { + dev_notice(hdev->dev, "User process closed FD but device still in use\n"); + hl_device_reset(hdev, HL_DRV_RESET_HARD); + } hdev->last_open_session_duration_jif = jiffies - hdev->last_successful_open_jif; @@ -658,17 +712,42 @@ static void device_hard_reset_pending(struct work_struct *work) flags = device_reset_work->flags | HL_DRV_RESET_FROM_RESET_THR; rc = hl_device_reset(hdev, flags); + if ((rc == -EBUSY) && !hdev->device_fini_pending) { - dev_info(hdev->dev, - "Could not reset device. will try again in %u seconds", - HL_PENDING_RESET_PER_SEC); + struct hl_ctx *ctx = hl_get_compute_ctx(hdev); - queue_delayed_work(device_reset_work->wq, - &device_reset_work->reset_work, - msecs_to_jiffies(HL_PENDING_RESET_PER_SEC * 1000)); + if (ctx) { + /* The read refcount value should subtracted by one, because the read is + * protected with hl_get_compute_ctx(). + */ + dev_info(hdev->dev, + "Could not reset device (compute_ctx refcount %u). will try again in %u seconds", + kref_read(&ctx->refcount) - 1, HL_PENDING_RESET_PER_SEC); + hl_ctx_put(ctx); + } else { + dev_info(hdev->dev, "Could not reset device. will try again in %u seconds", + HL_PENDING_RESET_PER_SEC); + } + + queue_delayed_work(hdev->reset_wq, &device_reset_work->reset_work, + msecs_to_jiffies(HL_PENDING_RESET_PER_SEC * 1000)); } } +static void device_release_watchdog_func(struct work_struct *work) +{ + struct hl_device_reset_work *device_release_watchdog_work = + container_of(work, struct hl_device_reset_work, reset_work.work); + struct hl_device *hdev = device_release_watchdog_work->hdev; + u32 flags; + + dev_dbg(hdev->dev, "Device wasn't released in time. Initiate device reset.\n"); + + flags = device_release_watchdog_work->flags | HL_DRV_RESET_FROM_WD_THR; + + hl_device_reset(hdev, flags); +} + /* * device_early_init - do some early initialization for the habanalabs device * @@ -699,9 +778,10 @@ static int device_early_init(struct hl_device *hdev) gaudi2_set_asic_funcs(hdev); strscpy(hdev->asic_name, "GAUDI2", sizeof(hdev->asic_name)); break; - case ASIC_GAUDI2_SEC: + case ASIC_GAUDI2B: gaudi2_set_asic_funcs(hdev); - strscpy(hdev->asic_name, "GAUDI2 SEC", sizeof(hdev->asic_name)); + strscpy(hdev->asic_name, "GAUDI2B", sizeof(hdev->asic_name)); + break; break; default: dev_err(hdev->dev, "Unrecognized ASIC type %d\n", @@ -737,7 +817,7 @@ static int device_early_init(struct hl_device *hdev) } } - hdev->eq_wq = alloc_workqueue("hl-events", WQ_UNBOUND, 0); + hdev->eq_wq = create_singlethread_workqueue("hl-events"); if (hdev->eq_wq == NULL) { dev_err(hdev->dev, "Failed to allocate EQ workqueue\n"); rc = -ENOMEM; @@ -760,8 +840,8 @@ static int device_early_init(struct hl_device *hdev) goto free_cs_cmplt_wq; } - hdev->pf_wq = alloc_workqueue("hl-prefetch", WQ_UNBOUND, 0); - if (!hdev->pf_wq) { + hdev->prefetch_wq = alloc_workqueue("hl-prefetch", WQ_UNBOUND, 0); + if (!hdev->prefetch_wq) { dev_err(hdev->dev, "Failed to allocate MMU prefetch workqueue\n"); rc = -ENOMEM; goto free_ts_free_wq; @@ -771,7 +851,7 @@ static int device_early_init(struct hl_device *hdev) GFP_KERNEL); if (!hdev->hl_chip_info) { rc = -ENOMEM; - goto free_pf_wq; + goto free_prefetch_wq; } rc = hl_mmu_if_set_funcs(hdev); @@ -780,19 +860,21 @@ static int device_early_init(struct hl_device *hdev) hl_mem_mgr_init(hdev->dev, &hdev->kernel_mem_mgr); - hdev->device_reset_work.wq = - create_singlethread_workqueue("hl_device_reset"); - if (!hdev->device_reset_work.wq) { + hdev->reset_wq = create_singlethread_workqueue("hl_device_reset"); + if (!hdev->reset_wq) { rc = -ENOMEM; dev_err(hdev->dev, "Failed to create device reset WQ\n"); goto free_cb_mgr; } - INIT_DELAYED_WORK(&hdev->device_reset_work.reset_work, - device_hard_reset_pending); + INIT_DELAYED_WORK(&hdev->device_reset_work.reset_work, device_hard_reset_pending); hdev->device_reset_work.hdev = hdev; hdev->device_fini_pending = 0; + INIT_DELAYED_WORK(&hdev->device_release_watchdog_work.reset_work, + device_release_watchdog_func); + hdev->device_release_watchdog_work.hdev = hdev; + mutex_init(&hdev->send_cpu_message_lock); mutex_init(&hdev->debug_lock); INIT_LIST_HEAD(&hdev->cs_mirror_list); @@ -810,8 +892,8 @@ free_cb_mgr: hl_mem_mgr_fini(&hdev->kernel_mem_mgr); free_chip_info: kfree(hdev->hl_chip_info); -free_pf_wq: - destroy_workqueue(hdev->pf_wq); +free_prefetch_wq: + destroy_workqueue(hdev->prefetch_wq); free_ts_free_wq: destroy_workqueue(hdev->ts_free_obj_wq); free_cs_cmplt_wq: @@ -854,11 +936,11 @@ static void device_early_fini(struct hl_device *hdev) kfree(hdev->hl_chip_info); - destroy_workqueue(hdev->pf_wq); + destroy_workqueue(hdev->prefetch_wq); destroy_workqueue(hdev->ts_free_obj_wq); destroy_workqueue(hdev->cs_cmplt_wq); destroy_workqueue(hdev->eq_wq); - destroy_workqueue(hdev->device_reset_work.wq); + destroy_workqueue(hdev->reset_wq); for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++) destroy_workqueue(hdev->cq_wq[i]); @@ -962,11 +1044,16 @@ static void device_late_fini(struct hl_device *hdev) int hl_device_utilization(struct hl_device *hdev, u32 *utilization) { - u64 max_power, curr_power, dc_power, dividend; + u64 max_power, curr_power, dc_power, dividend, divisor; int rc; max_power = hdev->max_power; dc_power = hdev->asic_prop.dc_power_default; + divisor = max_power - dc_power; + if (!divisor) { + dev_warn(hdev->dev, "device utilization is not supported\n"); + return -EOPNOTSUPP; + } rc = hl_fw_cpucp_power_get(hdev, &curr_power); if (rc) @@ -975,7 +1062,7 @@ int hl_device_utilization(struct hl_device *hdev, u32 *utilization) curr_power = clamp(curr_power, dc_power, max_power); dividend = (curr_power - dc_power) * 100; - *utilization = (u32) div_u64(dividend, (max_power - dc_power)); + *utilization = (u32) div_u64(dividend, divisor); return 0; } @@ -1053,7 +1140,7 @@ static void cleanup_resources(struct hl_device *hdev, bool hard_reset, bool fw_r hl_cs_rollback_all(hdev, skip_wq_flush); /* flush the MMU prefetch workqueue */ - flush_workqueue(hdev->pf_wq); + flush_workqueue(hdev->prefetch_wq); /* Release all pending user interrupts, each pending user interrupt * holds a reference to user context @@ -1264,6 +1351,10 @@ static void handle_reset_trigger(struct hl_device *hdev, u32 flags) { u32 cur_reset_trigger = HL_RESET_TRIGGER_DEFAULT; + /* No consecutive mechanism when user context exists */ + if (hdev->is_compute_ctx_active) + return; + /* * 'reset cause' is being updated here, because getting here * means that it's the 1st time and the last time we're here @@ -1337,8 +1428,8 @@ static void handle_reset_trigger(struct hl_device *hdev, u32 flags) int hl_device_reset(struct hl_device *hdev, u32 flags) { bool hard_reset, from_hard_reset_thread, fw_reset, hard_instead_soft = false, - reset_upon_device_release = false, schedule_hard_reset = false, - skip_wq_flush, delay_reset; + reset_upon_device_release = false, schedule_hard_reset = false, delay_reset, + from_dev_release, from_watchdog_thread; u64 idle_mask[HL_BUSY_ENGINES_MASK_EXT_SIZE] = {0}; struct hl_ctx *ctx; int i, rc; @@ -1351,8 +1442,9 @@ int hl_device_reset(struct hl_device *hdev, u32 flags) hard_reset = !!(flags & HL_DRV_RESET_HARD); from_hard_reset_thread = !!(flags & HL_DRV_RESET_FROM_RESET_THR); fw_reset = !!(flags & HL_DRV_RESET_BYPASS_REQ_TO_FW); - skip_wq_flush = !!(flags & HL_DRV_RESET_DEV_RELEASE); + from_dev_release = !!(flags & HL_DRV_RESET_DEV_RELEASE); delay_reset = !!(flags & HL_DRV_RESET_DELAY); + from_watchdog_thread = !!(flags & HL_DRV_RESET_FROM_WD_THR); if (!hard_reset && !hdev->asic_prop.supports_compute_reset) { hard_instead_soft = true; @@ -1409,6 +1501,23 @@ do_reset: spin_unlock(&hdev->reset_info.lock); + /* Cancel the device release watchdog work if required. + * In case of reset-upon-device-release while the release watchdog work is + * scheduled, do hard-reset instead of compute-reset. + */ + if ((hard_reset || from_dev_release) && hdev->reset_info.watchdog_active) { + hdev->reset_info.watchdog_active = 0; + if (!from_watchdog_thread) + cancel_delayed_work_sync( + &hdev->device_release_watchdog_work.reset_work); + + if (from_dev_release) { + flags |= HL_DRV_RESET_HARD; + flags &= ~HL_DRV_RESET_DEV_RELEASE; + hard_reset = true; + } + } + if (delay_reset) usleep_range(HL_RESET_DELAY_USEC, HL_RESET_DELAY_USEC << 1); @@ -1439,13 +1548,12 @@ again: * Because the reset function can't run from heartbeat work, * we need to call the reset function from a dedicated work. */ - queue_delayed_work(hdev->device_reset_work.wq, - &hdev->device_reset_work.reset_work, 0); + queue_delayed_work(hdev->reset_wq, &hdev->device_reset_work.reset_work, 0); return 0; } - cleanup_resources(hdev, hard_reset, fw_reset, skip_wq_flush); + cleanup_resources(hdev, hard_reset, fw_reset, from_dev_release); kill_processes: if (hard_reset) { @@ -1581,9 +1689,8 @@ kill_processes: /* If device is not idle fail the reset process */ if (!hdev->asic_funcs->is_device_idle(hdev, idle_mask, - HL_BUSY_ENGINES_MASK_EXT_SIZE, NULL)) { - dev_err(hdev->dev, "device is not idle (mask 0x%llx_%llx) after reset\n", - idle_mask[1], idle_mask[0]); + HL_BUSY_ENGINES_MASK_EXT_SIZE, NULL)) { + print_idle_status_mask(hdev, "device is not idle after reset", idle_mask); rc = -EIO; goto out_err; } @@ -1658,18 +1765,19 @@ kill_processes: * the device will be operational although it shouldn't be */ hdev->asic_funcs->enable_events_from_fw(hdev); - } else if (!reset_upon_device_release) { - hdev->reset_info.compute_reset_cnt++; - } - - if (schedule_hard_reset) { - dev_info(hdev->dev, "Performing hard reset scheduled during compute reset\n"); - flags = hdev->reset_info.hard_reset_schedule_flags; - hdev->reset_info.hard_reset_schedule_flags = 0; - hdev->disabled = true; - hard_reset = true; - handle_reset_trigger(hdev, flags); - goto again; + } else { + if (!reset_upon_device_release) + hdev->reset_info.compute_reset_cnt++; + + if (schedule_hard_reset) { + dev_info(hdev->dev, "Performing hard reset scheduled during compute reset\n"); + flags = hdev->reset_info.hard_reset_schedule_flags; + hdev->reset_info.hard_reset_schedule_flags = 0; + hdev->disabled = true; + hard_reset = true; + handle_reset_trigger(hdev, flags); + goto again; + } } return 0; @@ -1706,6 +1814,73 @@ out_err: return rc; } +/* + * hl_device_cond_reset() - conditionally reset the device. + * @hdev: pointer to habanalabs device structure. + * @reset_flags: reset flags. + * @event_mask: events to notify user about. + * + * Conditionally reset the device, or alternatively schedule a watchdog work to reset the device + * unless another reset precedes it. + */ +int hl_device_cond_reset(struct hl_device *hdev, u32 flags, u64 event_mask) +{ + struct hl_ctx *ctx = NULL; + + /* Device release watchdog is only for hard reset */ + if (!(flags & HL_DRV_RESET_HARD) && hdev->asic_prop.allow_inference_soft_reset) + goto device_reset; + + /* F/W reset cannot be postponed */ + if (flags & HL_DRV_RESET_BYPASS_REQ_TO_FW) + goto device_reset; + + /* Device release watchdog is relevant only if user exists and gets a reset notification */ + if (!(event_mask & HL_NOTIFIER_EVENT_DEVICE_RESET)) { + dev_err(hdev->dev, "Resetting device without a reset indication to user\n"); + goto device_reset; + } + + ctx = hl_get_compute_ctx(hdev); + if (!ctx || !ctx->hpriv->notifier_event.eventfd) + goto device_reset; + + /* Schedule the device release watchdog work unless reset is already in progress or if the + * work is already scheduled. + */ + spin_lock(&hdev->reset_info.lock); + if (hdev->reset_info.in_reset) { + spin_unlock(&hdev->reset_info.lock); + goto device_reset; + } + + if (hdev->reset_info.watchdog_active) + goto out; + + hdev->device_release_watchdog_work.flags = flags; + dev_dbg(hdev->dev, "Device is going to be reset in %u sec unless being released\n", + hdev->device_release_watchdog_timeout_sec); + schedule_delayed_work(&hdev->device_release_watchdog_work.reset_work, + msecs_to_jiffies(hdev->device_release_watchdog_timeout_sec * 1000)); + hdev->reset_info.watchdog_active = 1; +out: + spin_unlock(&hdev->reset_info.lock); + + hl_notifier_event_send_all(hdev, event_mask); + + hl_ctx_put(ctx); + + return 0; + +device_reset: + if (event_mask) + hl_notifier_event_send_all(hdev, event_mask); + if (ctx) + hl_ctx_put(ctx); + + return hl_device_reset(hdev, flags); +} + static void hl_notifier_event_send(struct hl_notifier_event *notifier_event, u64 event_mask) { mutex_lock(¬ifier_event->lock); @@ -1728,6 +1903,11 @@ void hl_notifier_event_send_all(struct hl_device *hdev, u64 event_mask) { struct hl_fpriv *hpriv; + if (!event_mask) { + dev_warn(hdev->dev, "Skip sending zero event"); + return; + } + mutex_lock(&hdev->fpriv_list_lock); list_for_each_entry(hpriv, &hdev->fpriv_list, dev_node) @@ -1898,6 +2078,8 @@ int hl_device_init(struct hl_device *hdev, struct class *hclass) hdev->asic_funcs->state_dump_init(hdev); + hdev->device_release_watchdog_timeout_sec = HL_DEVICE_RELEASE_WATCHDOG_TIMEOUT_SEC; + hdev->memory_scrub_val = MEM_SCRUB_DEFAULT_VAL; hl_debugfs_add_device(hdev); @@ -2118,6 +2300,8 @@ void hl_device_fini(struct hl_device *hdev) } } + cancel_delayed_work_sync(&hdev->device_release_watchdog_work.reset_work); + /* Disable PCI access from device F/W so it won't send us additional * interrupts. We disable MSI/MSI-X at the halt_engines function and we * can't have the F/W sending us interrupts after that. We need to @@ -2144,14 +2328,16 @@ void hl_device_fini(struct hl_device *hdev) */ dev_info(hdev->dev, "Waiting for all processes to exit (timeout of %u seconds)", - HL_PENDING_RESET_LONG_SEC); + HL_WAIT_PROCESS_KILL_ON_DEVICE_FINI); - rc = device_kill_open_processes(hdev, HL_PENDING_RESET_LONG_SEC, false); + hdev->process_kill_trial_cnt = 0; + rc = device_kill_open_processes(hdev, HL_WAIT_PROCESS_KILL_ON_DEVICE_FINI, false); if (rc) { dev_crit(hdev->dev, "Failed to kill all open processes\n"); device_disable_open_processes(hdev, false); } + hdev->process_kill_trial_cnt = 0; rc = device_kill_open_processes(hdev, 0, true); if (rc) { dev_crit(hdev->dev, "Failed to kill all control device open processes\n"); @@ -2177,6 +2363,8 @@ void hl_device_fini(struct hl_device *hdev) hl_mmu_fini(hdev); + vfree(hdev->captured_err_info.pgf_info.user_mappings); + hl_eq_fini(hdev, &hdev->event_queue); kfree(hdev->shadow_cs_queue); @@ -2231,3 +2419,117 @@ inline void hl_wreg(struct hl_device *hdev, u32 reg, u32 val) { writel(val, hdev->rmmio + reg); } + +void hl_capture_razwi(struct hl_device *hdev, u64 addr, u16 *engine_id, u16 num_of_engines, + u8 flags) +{ + if (num_of_engines > HL_RAZWI_MAX_NUM_OF_ENGINES_PER_RTR) { + dev_err(hdev->dev, + "Number of possible razwi initiators (%u) exceeded limit (%u)\n", + num_of_engines, HL_RAZWI_MAX_NUM_OF_ENGINES_PER_RTR); + return; + } + + /* In case it's the first razwi since the device was opened, capture its parameters */ + if (atomic_cmpxchg(&hdev->captured_err_info.razwi_info_recorded, 0, 1)) + return; + + hdev->captured_err_info.razwi.timestamp = ktime_to_ns(ktime_get()); + hdev->captured_err_info.razwi.addr = addr; + hdev->captured_err_info.razwi.num_of_possible_engines = num_of_engines; + memcpy(&hdev->captured_err_info.razwi.engine_id[0], &engine_id[0], + num_of_engines * sizeof(u16)); + hdev->captured_err_info.razwi.flags = flags; +} + +void hl_handle_razwi(struct hl_device *hdev, u64 addr, u16 *engine_id, u16 num_of_engines, + u8 flags, u64 *event_mask) +{ + hl_capture_razwi(hdev, addr, engine_id, num_of_engines, flags); + + if (event_mask) + *event_mask |= HL_NOTIFIER_EVENT_RAZWI; +} + +static void hl_capture_user_mappings(struct hl_device *hdev, bool is_pmmu) +{ + struct page_fault_info *pgf_info = &hdev->captured_err_info.pgf_info; + struct hl_vm_phys_pg_pack *phys_pg_pack = NULL; + struct hl_vm_hash_node *hnode; + struct hl_userptr *userptr; + enum vm_type *vm_type; + struct hl_ctx *ctx; + u32 map_idx = 0; + int i; + + /* Reset previous session count*/ + pgf_info->num_of_user_mappings = 0; + + ctx = hl_get_compute_ctx(hdev); + if (!ctx) { + dev_err(hdev->dev, "Can't get user context for user mappings\n"); + return; + } + + mutex_lock(&ctx->mem_hash_lock); + hash_for_each(ctx->mem_hash, i, hnode, node) { + vm_type = hnode->ptr; + if (((*vm_type == VM_TYPE_USERPTR) && is_pmmu) || + ((*vm_type == VM_TYPE_PHYS_PACK) && !is_pmmu)) + pgf_info->num_of_user_mappings++; + + } + + if (!pgf_info->num_of_user_mappings) + goto finish; + + /* In case we already allocated in previous session, need to release it before + * allocating new buffer. + */ + vfree(pgf_info->user_mappings); + pgf_info->user_mappings = + vzalloc(pgf_info->num_of_user_mappings * sizeof(struct hl_user_mapping)); + if (!pgf_info->user_mappings) { + pgf_info->num_of_user_mappings = 0; + goto finish; + } + + hash_for_each(ctx->mem_hash, i, hnode, node) { + vm_type = hnode->ptr; + if ((*vm_type == VM_TYPE_USERPTR) && (is_pmmu)) { + userptr = hnode->ptr; + pgf_info->user_mappings[map_idx].dev_va = hnode->vaddr; + pgf_info->user_mappings[map_idx].size = userptr->size; + map_idx++; + } else if ((*vm_type == VM_TYPE_PHYS_PACK) && (!is_pmmu)) { + phys_pg_pack = hnode->ptr; + pgf_info->user_mappings[map_idx].dev_va = hnode->vaddr; + pgf_info->user_mappings[map_idx].size = phys_pg_pack->total_size; + map_idx++; + } + } +finish: + mutex_unlock(&ctx->mem_hash_lock); + hl_ctx_put(ctx); +} + +void hl_capture_page_fault(struct hl_device *hdev, u64 addr, u16 eng_id, bool is_pmmu) +{ + /* Capture only the first page fault */ + if (atomic_cmpxchg(&hdev->captured_err_info.pgf_info_recorded, 0, 1)) + return; + + hdev->captured_err_info.pgf_info.pgf.timestamp = ktime_to_ns(ktime_get()); + hdev->captured_err_info.pgf_info.pgf.addr = addr; + hdev->captured_err_info.pgf_info.pgf.engine_id = eng_id; + hl_capture_user_mappings(hdev, is_pmmu); +} + +void hl_handle_page_fault(struct hl_device *hdev, u64 addr, u16 eng_id, bool is_pmmu, + u64 *event_mask) +{ + hl_capture_page_fault(hdev, addr, eng_id, is_pmmu); + + if (event_mask) + *event_mask |= HL_NOTIFIER_EVENT_PAGE_FAULT; +} diff --git a/drivers/misc/habanalabs/common/firmware_if.c b/drivers/misc/habanalabs/common/firmware_if.c index 2de6a9bd564de351e27c3db867537eeb4473275f..228b92278e480ba904faebd9b3093cf1c5709415 100644 --- a/drivers/misc/habanalabs/common/firmware_if.c +++ b/drivers/misc/habanalabs/common/firmware_if.c @@ -12,6 +12,7 @@ #include #include #include +#include #define FW_FILE_MAX_SIZE 0x1400000 /* maximum size of 20MB */ @@ -323,6 +324,7 @@ int hl_fw_send_cpu_message(struct hl_device *hdev, u32 hw_queue_id, u32 *msg, if (!prop->supports_advanced_cpucp_rc) { dev_dbg(hdev->dev, "F/W ERROR %d for CPU packet %d\n", rc, opcode); + rc = -EIO; goto scrub_descriptor; } @@ -615,16 +617,12 @@ static bool fw_report_boot_dev0(struct hl_device *hdev, u32 err_val, if (sts_val & CPU_BOOT_DEV_STS0_ENABLED) dev_dbg(hdev->dev, "Device status0 %#x\n", sts_val); - /* All warnings should go here in order not to reach the unknown error validation */ if (err_val & CPU_BOOT_ERR0_EEPROM_FAIL) { - dev_warn(hdev->dev, - "Device boot warning - EEPROM failure detected, default settings applied\n"); - /* This is a warning so we don't want it to disable the - * device - */ - err_val &= ~CPU_BOOT_ERR0_EEPROM_FAIL; + dev_err(hdev->dev, "Device boot error - EEPROM failure detected\n"); + err_exists = true; } + /* All warnings should go here in order not to reach the unknown error validation */ if (err_val & CPU_BOOT_ERR0_DRAM_SKIPPED) { dev_warn(hdev->dev, "Device boot warning - Skipped DRAM initialization\n"); @@ -1782,6 +1780,8 @@ int hl_fw_dynamic_send_protocol_cmd(struct hl_device *hdev, /* first send clear command to clean former commands */ rc = hl_fw_dynamic_send_clear_cmd(hdev, fw_loader); + if (rc) + return rc; /* send the actual command */ hl_fw_dynamic_send_cmd(hdev, fw_loader, cmd, size); @@ -1988,10 +1988,11 @@ static int hl_fw_dynamic_read_and_validate_descriptor(struct hl_device *hdev, struct fw_load_mgr *fw_loader) { struct lkd_fw_comms_desc *fw_desc; + void __iomem *src, *temp_fw_desc; struct pci_mem_region *region; struct fw_response *response; + u16 fw_data_size; enum pci_region region_id; - void __iomem *src; int rc; fw_desc = &fw_loader->dynamic_loader.comm_desc; @@ -2018,9 +2019,29 @@ static int hl_fw_dynamic_read_and_validate_descriptor(struct hl_device *hdev, fw_loader->dynamic_loader.fw_desc_valid = false; src = hdev->pcie_bar[region->bar_id] + region->offset_in_bar + response->ram_offset; + + /* + * We do the copy of the fw descriptor in 2 phases: + * 1. copy the header + data info according to our lkd_fw_comms_desc definition. + * then we're able to read the actual data size provided by fw. + * this is needed for cases where data in descriptor was changed(add/remove) + * in embedded specs header file before updating lkd copy of the header file + * 2. copy descriptor to temporary buffer with aligned size and send it to validation + */ memcpy_fromio(fw_desc, src, sizeof(struct lkd_fw_comms_desc)); + fw_data_size = le16_to_cpu(fw_desc->header.size); + + temp_fw_desc = vzalloc(sizeof(struct comms_desc_header) + fw_data_size); + if (!temp_fw_desc) + return -ENOMEM; - return hl_fw_dynamic_validate_descriptor(hdev, fw_loader, fw_desc); + memcpy_fromio(temp_fw_desc, src, sizeof(struct comms_desc_header) + fw_data_size); + + rc = hl_fw_dynamic_validate_descriptor(hdev, fw_loader, + (struct lkd_fw_comms_desc *) temp_fw_desc); + vfree(temp_fw_desc); + + return rc; } /** @@ -2507,7 +2528,7 @@ static int hl_fw_dynamic_init_cpu(struct hl_device *hdev, struct fw_load_mgr *fw_loader) { struct cpu_dyn_regs *dyn_regs; - int rc; + int rc, fw_error_rc; dev_info(hdev->dev, "Loading %sfirmware to device, may take some time...\n", @@ -2607,14 +2628,17 @@ static int hl_fw_dynamic_init_cpu(struct hl_device *hdev, hl_fw_dynamic_update_linux_interrupt_if(hdev); - return 0; - protocol_err: - if (fw_loader->dynamic_loader.fw_desc_valid) - fw_read_errors(hdev, le32_to_cpu(dyn_regs->cpu_boot_err0), + if (fw_loader->dynamic_loader.fw_desc_valid) { + fw_error_rc = fw_read_errors(hdev, le32_to_cpu(dyn_regs->cpu_boot_err0), le32_to_cpu(dyn_regs->cpu_boot_err1), le32_to_cpu(dyn_regs->cpu_boot_dev_sts0), le32_to_cpu(dyn_regs->cpu_boot_dev_sts1)); + + if (fw_error_rc) + return fw_error_rc; + } + return rc; } @@ -2983,7 +3007,7 @@ static int hl_fw_get_sec_attest_data(struct hl_device *hdev, u32 packet_id, void int rc; req_cpu_addr = hl_cpu_accessible_dma_pool_alloc(hdev, size, &req_dma_addr); - if (!data) { + if (!req_cpu_addr) { dev_err(hdev->dev, "Failed to allocate DMA memory for CPU-CP packet %u\n", packet_id); return -ENOMEM; diff --git a/drivers/misc/habanalabs/common/habanalabs.h b/drivers/misc/habanalabs/common/habanalabs.h index 58c95b13be69a4706ab9f75144ca6be7d55fa55d..e2527d976ee052b4eb523900688ac398485a85b4 100644 --- a/drivers/misc/habanalabs/common/habanalabs.h +++ b/drivers/misc/habanalabs/common/habanalabs.h @@ -50,9 +50,14 @@ struct hl_fpriv; #define HL_MMAP_OFFSET_VALUE_MASK (0x1FFFFFFFFFFFull >> PAGE_SHIFT) #define HL_MMAP_OFFSET_VALUE_GET(off) (off & HL_MMAP_OFFSET_VALUE_MASK) -#define HL_PENDING_RESET_PER_SEC 10 -#define HL_PENDING_RESET_MAX_TRIALS 60 /* 10 minutes */ -#define HL_PENDING_RESET_LONG_SEC 60 +#define HL_PENDING_RESET_PER_SEC 10 +#define HL_PENDING_RESET_MAX_TRIALS 60 /* 10 minutes */ +#define HL_PENDING_RESET_LONG_SEC 60 +/* + * In device fini, wait 10 minutes for user processes to be terminated after we kill them. + * This is needed to prevent situation of clearing resources while user processes are still alive. + */ +#define HL_WAIT_PROCESS_KILL_ON_DEVICE_FINI 600 #define HL_HARD_RESET_MAX_TIMEOUT 120 #define HL_PLDM_HARD_RESET_MAX_TIMEOUT (HL_HARD_RESET_MAX_TIMEOUT * 3) @@ -191,6 +196,9 @@ enum hl_mmu_enablement { * * - HL_DRV_RESET_DELAY * Set if a delay should be added before the reset + * + * - HL_DRV_RESET_FROM_WD_THR + * Set if the caller is the device release watchdog thread */ #define HL_DRV_RESET_HARD (1 << 0) @@ -201,6 +209,7 @@ enum hl_mmu_enablement { #define HL_DRV_RESET_BYPASS_REQ_TO_FW (1 << 5) #define HL_DRV_RESET_FW_FATAL_ERR (1 << 6) #define HL_DRV_RESET_DELAY (1 << 7) +#define HL_DRV_RESET_FROM_WD_THR (1 << 8) /* * Security @@ -1188,7 +1197,7 @@ struct hl_dec { * @ASIC_GAUDI: Gaudi device (HL-2000). * @ASIC_GAUDI_SEC: Gaudi secured device (HL-2000). * @ASIC_GAUDI2: Gaudi2 device. - * @ASIC_GAUDI2_SEC: Gaudi2 secured device. + * @ASIC_GAUDI2B: Gaudi2B device. */ enum hl_asic_type { ASIC_INVALID, @@ -1196,7 +1205,7 @@ enum hl_asic_type { ASIC_GAUDI, ASIC_GAUDI_SEC, ASIC_GAUDI2, - ASIC_GAUDI2_SEC, + ASIC_GAUDI2B, }; struct hl_cs_parser; @@ -2489,13 +2498,9 @@ void hl_wreg(struct hl_device *hdev, u32 reg, u32 val); #define WREG32_AND(reg, and) WREG32_P(reg, 0, and) #define WREG32_OR(reg, or) WREG32_P(reg, or, ~(or)) -#define RMWREG32(reg, val, mask) \ - do { \ - u32 tmp_ = RREG32(reg); \ - tmp_ &= ~(mask); \ - tmp_ |= ((val) << __ffs(mask)); \ - WREG32(reg, tmp_); \ - } while (0) +#define RMWREG32_SHIFTED(reg, val, mask) WREG32_P(reg, val, ~(mask)) + +#define RMWREG32(reg, val, mask) RMWREG32_SHIFTED(reg, (val) << __ffs(mask), mask) #define RREG32_MASK(reg, mask) ((RREG32(reg) & mask) >> __ffs(mask)) @@ -2528,7 +2533,7 @@ void hl_wreg(struct hl_device *hdev, u32 reg, u32 val); break; \ (val) = __elbi_read; \ } else {\ - (val) = RREG32((u32)(addr)); \ + (val) = RREG32(lower_32_bits(addr)); \ } \ if (cond) \ break; \ @@ -2539,7 +2544,7 @@ void hl_wreg(struct hl_device *hdev, u32 reg, u32 val); break; \ (val) = __elbi_read; \ } else {\ - (val) = RREG32((u32)(addr)); \ + (val) = RREG32(lower_32_bits(addr)); \ } \ break; \ } \ @@ -2594,7 +2599,7 @@ void hl_wreg(struct hl_device *hdev, u32 reg, u32 val); if (__rc) \ break; \ } else { \ - __read_val = RREG32((u32)(addr_arr)[__arr_idx]); \ + __read_val = RREG32(lower_32_bits(addr_arr[__arr_idx])); \ } \ if (__read_val == (expected_val)) \ __elem_bitmask &= ~BIT_ULL(__arr_idx); \ @@ -2682,17 +2687,15 @@ void hl_wreg(struct hl_device *hdev, u32 reg, u32 val); struct hwmon_chip_info; /** - * struct hl_device_reset_work - reset workqueue task wrapper. - * @wq: work queue for device reset procedure. + * struct hl_device_reset_work - reset work wrapper. * @reset_work: reset work to be done. * @hdev: habanalabs device structure. * @flags: reset flags. */ struct hl_device_reset_work { - struct workqueue_struct *wq; - struct delayed_work reset_work; - struct hl_device *hdev; - u32 flags; + struct delayed_work reset_work; + struct hl_device *hdev; + u32 flags; }; /** @@ -2811,7 +2814,7 @@ struct hl_mmu_funcs { /** * struct hl_prefetch_work - prefetch work structure handler - * @pf_work: actual work struct. + * @prefetch_work: actual work struct. * @ctx: compute context. * @va: virtual address to pre-fetch. * @size: pre-fetch size. @@ -2819,7 +2822,7 @@ struct hl_mmu_funcs { * @asid: ASID for maintenance operation. */ struct hl_prefetch_work { - struct work_struct pf_work; + struct work_struct prefetch_work; struct hl_ctx *ctx; u64 va; u64 size; @@ -2925,30 +2928,6 @@ struct cs_timeout_info { u64 seq; }; -/** - * struct razwi_info - info about last razwi error occurred. - * @timestamp: razwi timestamp. - * @write_enable: if set writing to razwi parameters in the structure is enabled. - * otherwise - disabled, so the first (root cause) razwi will not be overwritten. - * @addr: address that caused razwi. - * @engine_id_1: engine id of the razwi initiator, if it was initiated by engine that does - * not have engine id it will be set to U16_MAX. - * @engine_id_2: second engine id of razwi initiator. Might happen that razwi have 2 possible - * engines which one them caused the razwi. In that case, it will contain the - * second possible engine id, otherwise it will be set to U16_MAX. - * @non_engine_initiator: in case the initiator of the razwi does not have engine id. - * @type: cause of razwi, page fault or access error, otherwise it will be set to U8_MAX. - */ -struct razwi_info { - ktime_t timestamp; - atomic_t write_enable; - u64 addr; - u16 engine_id_1; - u16 engine_id_2; - u8 non_engine_initiator; - u8 type; -}; - #define MAX_QMAN_STREAMS_INFO 4 #define OPCODE_INFO_MAX_ADDR_SIZE 8 /** @@ -2981,16 +2960,38 @@ struct undefined_opcode_info { bool write_enable; }; +/** + * struct page_fault_info - info about page fault + * @pgf_info: page fault information. + * @user_mappings: buffer containing user mappings. + * @num_of_user_mappings: number of user mappings. + */ +struct page_fault_info { + struct hl_page_fault_info pgf; + struct hl_user_mapping *user_mappings; + u64 num_of_user_mappings; +}; + /** * struct hl_error_info - holds information collected during an error. * @cs_timeout: CS timeout error information. * @razwi: razwi information. + * @razwi_info_recorded: if set writing to razwi information is enabled. + * otherwise - disabled, so the first (root cause) razwi will not be + * overwritten. * @undef_opcode: undefined opcode information + * @pgf_info: page fault information. + * @pgf_info_recorded: if set writing to page fault information is enabled. + * otherwise - disabled, so the first (root cause) page fault will not be + * overwritten. */ struct hl_error_info { struct cs_timeout_info cs_timeout; - struct razwi_info razwi; + struct hl_info_razwi_event razwi; + atomic_t razwi_info_recorded; struct undefined_opcode_info undef_opcode; + struct page_fault_info pgf_info; + atomic_t pgf_info_recorded; }; /** @@ -3013,6 +3014,7 @@ struct hl_error_info { * same cause. * @skip_reset_on_timeout: Skip device reset if CS has timed out, wait for it to * complete instead. + * @watchdog_active: true if a device release watchdog work is scheduled. */ struct hl_reset_info { spinlock_t lock; @@ -3023,12 +3025,11 @@ struct hl_reset_info { u8 in_compute_reset; u8 needs_reset; u8 hard_reset_pending; - u8 curr_reset_cause; u8 prev_reset_trigger; u8 reset_trigger_repeated; - u8 skip_reset_on_timeout; + u8 watchdog_active; }; /** @@ -3044,6 +3045,8 @@ struct hl_reset_info { * @dev_ctrl: related kernel device structure for the control device * @work_heartbeat: delayed work for CPU-CP is-alive check. * @device_reset_work: delayed work which performs hard reset + * @device_release_watchdog_work: watchdog work that performs hard reset if user doesn't release + * device upon certain error cases. * @asic_name: ASIC specific name. * @asic_type: ASIC specific type. * @completion_queue: array of hl_cq. @@ -3062,7 +3065,8 @@ struct hl_reset_info { * @cs_cmplt_wq: work queue of CS completions for executing work in process * context. * @ts_free_obj_wq: work queue for timestamp registration objects release. - * @pf_wq: work queue for MMU pre-fetch operations. + * @prefetch_wq: work queue for MMU pre-fetch operations. + * @reset_wq: work queue for device reset procedure. * @kernel_ctx: Kernel driver context structure. * @kernel_queues: array of hl_hw_queue. * @cs_mirror_list: CS mirror list for TDR. @@ -3152,6 +3156,7 @@ struct hl_reset_info { * indicates which decoder engines are binned-out * @edma_binning: contains mask of edma engines that is received from the f/w which * indicates which edma engines are binned-out + * @device_release_watchdog_timeout_sec: device release watchdog timeout value in seconds. * @id: device minor. * @id_control: minor of the control device. * @cdev_idx: char device index. Used for setting its name. @@ -3221,6 +3226,7 @@ struct hl_device { struct device *dev_ctrl; struct delayed_work work_heartbeat; struct hl_device_reset_work device_reset_work; + struct hl_device_reset_work device_release_watchdog_work; char asic_name[HL_STR_MAX]; char status[HL_DEV_STS_MAX][HL_STR_MAX]; enum hl_asic_type asic_type; @@ -3233,7 +3239,8 @@ struct hl_device { struct workqueue_struct *eq_wq; struct workqueue_struct *cs_cmplt_wq; struct workqueue_struct *ts_free_obj_wq; - struct workqueue_struct *pf_wq; + struct workqueue_struct *prefetch_wq; + struct workqueue_struct *reset_wq; struct hl_ctx *kernel_ctx; struct hl_hw_queue *kernel_queues; struct list_head cs_mirror_list; @@ -3314,6 +3321,7 @@ struct hl_device { u32 high_pll; u32 decoder_binning; u32 edma_binning; + u32 device_release_watchdog_timeout_sec; u16 id; u16 id_control; u16 cdev_idx; @@ -3488,6 +3496,8 @@ void hl_asic_dma_pool_free_caller(struct hl_device *hdev, void *vaddr, dma_addr_ int hl_dma_map_sgtable(struct hl_device *hdev, struct sg_table *sgt, enum dma_data_direction dir); void hl_dma_unmap_sgtable(struct hl_device *hdev, struct sg_table *sgt, enum dma_data_direction dir); +int hl_access_sram_dram_region(struct hl_device *hdev, u64 addr, u64 *val, + enum debugfs_access_type acc_type, enum pci_region region_type, bool set_dram_bar); int hl_access_cfg_region(struct hl_device *hdev, u64 addr, u64 *val, enum debugfs_access_type acc_type); int hl_access_dev_mem(struct hl_device *hdev, enum pci_region region_type, @@ -3496,6 +3506,8 @@ int hl_device_open(struct inode *inode, struct file *filp); int hl_device_open_ctrl(struct inode *inode, struct file *filp); bool hl_device_operational(struct hl_device *hdev, enum hl_device_status *status); +bool hl_ctrl_device_operational(struct hl_device *hdev, + enum hl_device_status *status); enum hl_device_status hl_device_status(struct hl_device *hdev); int hl_device_set_debug_mode(struct hl_device *hdev, struct hl_ctx *ctx, bool enable); int hl_hw_queues_create(struct hl_device *hdev); @@ -3549,6 +3561,7 @@ void hl_device_fini(struct hl_device *hdev); int hl_device_suspend(struct hl_device *hdev); int hl_device_resume(struct hl_device *hdev); int hl_device_reset(struct hl_device *hdev, u32 flags); +int hl_device_cond_reset(struct hl_device *hdev, u32 flags, u64 event_mask); void hl_hpriv_get(struct hl_fpriv *hpriv); int hl_hpriv_put(struct hl_fpriv *hpriv); int hl_device_utilization(struct hl_device *hdev, u32 *utilization); @@ -3762,7 +3775,8 @@ void hl_sysfs_add_dev_vrm_attr(struct hl_device *hdev, struct attribute_group *d void hw_sob_get(struct hl_hw_sob *hw_sob); void hw_sob_put(struct hl_hw_sob *hw_sob); -void hl_encaps_handle_do_release(struct kref *ref); +void hl_encaps_release_handle_and_put_ctx(struct kref *ref); +void hl_encaps_release_handle_and_put_sob_ctx(struct kref *ref); void hl_hw_queue_encaps_sig_set_sob_info(struct hl_device *hdev, struct hl_cs *cs, struct hl_cs_job *job, struct hl_cs_compl *cs_cmpl); @@ -3798,6 +3812,13 @@ hl_mmap_mem_buf_alloc(struct hl_mem_mgr *mmg, struct hl_mmap_mem_buf_behavior *behavior, gfp_t gfp, void *args); __printf(2, 3) void hl_engine_data_sprintf(struct engines_data *e, const char *fmt, ...); +void hl_capture_razwi(struct hl_device *hdev, u64 addr, u16 *engine_id, u16 num_of_engines, + u8 flags); +void hl_handle_razwi(struct hl_device *hdev, u64 addr, u16 *engine_id, u16 num_of_engines, + u8 flags, u64 *event_mask); +void hl_capture_page_fault(struct hl_device *hdev, u64 addr, u16 eng_id, bool is_pmmu); +void hl_handle_page_fault(struct hl_device *hdev, u64 addr, u16 eng_id, bool is_pmmu, + u64 *event_mask); #ifdef CONFIG_DEBUG_FS diff --git a/drivers/misc/habanalabs/common/habanalabs_drv.c b/drivers/misc/habanalabs/common/habanalabs_drv.c index 112632afe7d53806be61dd058fa0aba86647e7ba..7815c60df54e23bcf141bc6cf9d40b2675bf758c 100644 --- a/drivers/misc/habanalabs/common/habanalabs_drv.c +++ b/drivers/misc/habanalabs/common/habanalabs_drv.c @@ -9,6 +9,7 @@ #define pr_fmt(fmt) "habanalabs: " fmt #include "habanalabs.h" +#include "../include/hw_ip/pci/pci_general.h" #include #include @@ -74,16 +75,17 @@ MODULE_DEVICE_TABLE(pci, ids); /* * get_asic_type - translate device id to asic type * - * @device: id of the PCI device + * @hdev: pointer to habanalabs device structure. * - * Translate device id to asic type. + * Translate device id and revision id to asic type. * In case of unidentified device, return -1 */ -static enum hl_asic_type get_asic_type(u16 device) +static enum hl_asic_type get_asic_type(struct hl_device *hdev) { - enum hl_asic_type asic_type; + struct pci_dev *pdev = hdev->pdev; + enum hl_asic_type asic_type = ASIC_INVALID; - switch (device) { + switch (pdev->device) { case PCI_IDS_GOYA: asic_type = ASIC_GOYA; break; @@ -94,10 +96,18 @@ static enum hl_asic_type get_asic_type(u16 device) asic_type = ASIC_GAUDI_SEC; break; case PCI_IDS_GAUDI2: - asic_type = ASIC_GAUDI2; + switch (pdev->revision) { + case REV_ID_A: + asic_type = ASIC_GAUDI2; + break; + case REV_ID_B: + asic_type = ASIC_GAUDI2B; + break; + default: + break; + } break; default: - asic_type = ASIC_INVALID; break; } @@ -212,7 +222,8 @@ int hl_device_open(struct inode *inode, struct file *filp) hl_debugfs_add_file(hpriv); atomic_set(&hdev->captured_err_info.cs_timeout.write_enable, 1); - atomic_set(&hdev->captured_err_info.razwi.write_enable, 1); + atomic_set(&hdev->captured_err_info.razwi_info_recorded, 0); + atomic_set(&hdev->captured_err_info.pgf_info_recorded, 0); hdev->captured_err_info.undef_opcode.write_enable = true; hdev->open_counter++; @@ -270,9 +281,9 @@ int hl_device_open_ctrl(struct inode *inode, struct file *filp) mutex_lock(&hdev->fpriv_ctrl_list_lock); - if (!hl_device_operational(hdev, NULL)) { + if (!hl_ctrl_device_operational(hdev, NULL)) { dev_dbg_ratelimited(hdev->dev_ctrl, - "Can't open %s because it is disabled or in reset\n", + "Can't open %s because it is disabled\n", dev_name(hdev->dev_ctrl)); rc = -EPERM; goto out_err; @@ -415,7 +426,7 @@ static int create_hdev(struct hl_device **dev, struct pci_dev *pdev) /* First, we must find out which ASIC are we handling. This is needed * to configure the behavior of the driver (kernel parameters) */ - hdev->asic_type = get_asic_type(pdev->device); + hdev->asic_type = get_asic_type(hdev); if (hdev->asic_type == ASIC_INVALID) { dev_err(&pdev->dev, "Unsupported ASIC\n"); rc = -ENODEV; @@ -594,15 +605,16 @@ hl_pci_err_detected(struct pci_dev *pdev, pci_channel_state_t state) switch (state) { case pci_channel_io_normal: + dev_warn(hdev->dev, "PCI normal state error detected\n"); return PCI_ERS_RESULT_CAN_RECOVER; case pci_channel_io_frozen: - dev_warn(hdev->dev, "frozen state error detected\n"); + dev_warn(hdev->dev, "PCI frozen state error detected\n"); result = PCI_ERS_RESULT_NEED_RESET; break; case pci_channel_io_perm_failure: - dev_warn(hdev->dev, "failure state error detected\n"); + dev_warn(hdev->dev, "PCI failure state error detected\n"); result = PCI_ERS_RESULT_DISCONNECT; break; @@ -638,6 +650,10 @@ static void hl_pci_err_resume(struct pci_dev *pdev) */ static pci_ers_result_t hl_pci_err_slot_reset(struct pci_dev *pdev) { + struct hl_device *hdev = pci_get_drvdata(pdev); + + dev_warn(hdev->dev, "PCI slot reset detected\n"); + return PCI_ERS_RESULT_RECOVERED; } diff --git a/drivers/misc/habanalabs/common/habanalabs_ioctl.c b/drivers/misc/habanalabs/common/habanalabs_ioctl.c index 43afe40966e50a3c7700c451b8fe122997e1d192..b6abfa7761a70e296dfc243d3827f22fa7bcfbed 100644 --- a/drivers/misc/habanalabs/common/habanalabs_ioctl.c +++ b/drivers/misc/habanalabs/common/habanalabs_ioctl.c @@ -10,10 +10,11 @@ #include #include "habanalabs.h" -#include #include -#include +#include +#include #include +#include #include static u32 hl_debug_struct_size[HL_DEBUG_OP_TIMESTAMP + 1] = { @@ -105,6 +106,7 @@ static int hw_ip_info(struct hl_device *hdev, struct hl_info_args *args) hw_ip.edma_enabled_mask = prop->edma_enabled_mask; hw_ip.server_type = prop->server_type; hw_ip.security_enabled = prop->fw_security_enabled; + hw_ip.revision_id = hdev->pdev->revision; return copy_to_user(out, &hw_ip, min((size_t) size, sizeof(hw_ip))) ? -EFAULT : 0; @@ -121,6 +123,10 @@ static int hw_events_info(struct hl_device *hdev, bool aggregate, return -EINVAL; arr = hdev->asic_funcs->get_events_stat(hdev, aggregate, &size); + if (!arr) { + dev_err(hdev->dev, "Events info not supported\n"); + return -EOPNOTSUPP; + } return copy_to_user(out, arr, min(max_size, size)) ? -EFAULT : 0; } @@ -603,20 +609,14 @@ static int razwi_info(struct hl_fpriv *hpriv, struct hl_info_args *args) { struct hl_device *hdev = hpriv->hdev; u32 max_size = args->return_size; - struct hl_info_razwi_event info = {0}; + struct hl_info_razwi_event *info = &hdev->captured_err_info.razwi; void __user *out = (void __user *) (uintptr_t) args->return_pointer; if ((!max_size) || (!out)) return -EINVAL; - info.timestamp = ktime_to_ns(hdev->captured_err_info.razwi.timestamp); - info.addr = hdev->captured_err_info.razwi.addr; - info.engine_id_1 = hdev->captured_err_info.razwi.engine_id_1; - info.engine_id_2 = hdev->captured_err_info.razwi.engine_id_2; - info.no_engine_id = hdev->captured_err_info.razwi.non_engine_initiator; - info.error_type = hdev->captured_err_info.razwi.type; - - return copy_to_user(out, &info, min_t(size_t, max_size, sizeof(info))) ? -EFAULT : 0; + return copy_to_user(out, info, min_t(size_t, max_size, sizeof(struct hl_info_razwi_event))) + ? -EFAULT : 0; } static int undefined_opcode_info(struct hl_fpriv *hpriv, struct hl_info_args *args) @@ -784,6 +784,42 @@ static int engine_status_info(struct hl_fpriv *hpriv, struct hl_info_args *args) return rc; } +static int page_fault_info(struct hl_fpriv *hpriv, struct hl_info_args *args) +{ + struct hl_device *hdev = hpriv->hdev; + u32 max_size = args->return_size; + struct hl_page_fault_info *info = &hdev->captured_err_info.pgf_info.pgf; + void __user *out = (void __user *) (uintptr_t) args->return_pointer; + + if ((!max_size) || (!out)) + return -EINVAL; + + return copy_to_user(out, info, min_t(size_t, max_size, sizeof(struct hl_page_fault_info))) + ? -EFAULT : 0; +} + +static int user_mappings_info(struct hl_fpriv *hpriv, struct hl_info_args *args) +{ + void __user *out = (void __user *) (uintptr_t) args->return_pointer; + u32 user_buf_size = args->return_size; + struct hl_device *hdev = hpriv->hdev; + struct page_fault_info *pgf_info; + u64 actual_size; + + pgf_info = &hdev->captured_err_info.pgf_info; + args->array_size = pgf_info->num_of_user_mappings; + + if (!out) + return -EINVAL; + + actual_size = pgf_info->num_of_user_mappings * sizeof(struct hl_user_mapping); + if (user_buf_size < actual_size) + return -ENOMEM; + + return copy_to_user(out, pgf_info->user_mappings, min_t(size_t, user_buf_size, actual_size)) + ? -EFAULT : 0; +} + static int _hl_info_ioctl(struct hl_fpriv *hpriv, void *data, struct device *dev) { @@ -843,6 +879,15 @@ static int _hl_info_ioctl(struct hl_fpriv *hpriv, void *data, case HL_INFO_GET_EVENTS: return events_info(hpriv, args); + case HL_INFO_PAGE_FAULT_EVENT: + return page_fault_info(hpriv, args); + + case HL_INFO_USER_MAPPINGS: + return user_mappings_info(hpriv, args); + + case HL_INFO_UNREGISTER_EVENTFD: + return eventfd_unregister(hpriv, args); + default: break; } @@ -899,9 +944,6 @@ static int _hl_info_ioctl(struct hl_fpriv *hpriv, void *data, case HL_INFO_REGISTER_EVENTFD: return eventfd_register(hpriv, args); - case HL_INFO_UNREGISTER_EVENTFD: - return eventfd_unregister(hpriv, args); - case HL_INFO_ENGINE_STATUS: return engine_status_info(hpriv, args); diff --git a/drivers/misc/habanalabs/common/memory.c b/drivers/misc/habanalabs/common/memory.c index ef28f3b37b93216bf9a7b50f296f21fcdff62261..5e9ae7600d75ee2865755f8f9c057f0c76547975 100644 --- a/drivers/misc/habanalabs/common/memory.c +++ b/drivers/misc/habanalabs/common/memory.c @@ -1689,7 +1689,7 @@ static int hl_dmabuf_attach(struct dma_buf *dmabuf, hl_dmabuf = dmabuf->priv; hdev = hl_dmabuf->ctx->hdev; - rc = pci_p2pdma_distance_many(hdev->pdev, &attachment->dev, 1, true); + rc = pci_p2pdma_distance(hdev->pdev, attachment->dev, true); if (rc < 0) attachment->peer2peer = false; @@ -2109,7 +2109,7 @@ static int hl_ts_alloc_buf(struct hl_mmap_mem_buf *buf, gfp_t gfp, void *args) /* Allocate the internal kernel buffer */ size = num_elements * sizeof(struct hl_user_pending_interrupt); - p = vmalloc(size); + p = vzalloc(size); if (!p) goto free_user_buff; @@ -2312,8 +2312,7 @@ static int get_user_memory(struct hl_device *hdev, u64 addr, u64 size, if (!userptr->pages) return -ENOMEM; - rc = pin_user_pages_fast(start, npages, - FOLL_FORCE | FOLL_WRITE | FOLL_LONGTERM, + rc = pin_user_pages_fast(start, npages, FOLL_WRITE | FOLL_LONGTERM, userptr->pages); if (rc != npages) { @@ -2508,24 +2507,20 @@ static int va_range_init(struct hl_device *hdev, struct hl_va_range **va_ranges, /* * PAGE_SIZE alignment - * it is the callers responsibility to align the addresses if the + * it is the caller's responsibility to align the addresses if the * page size is not a power of 2 */ if (is_power_of_2(page_size)) { - if (start & (PAGE_SIZE - 1)) { - start &= PAGE_MASK; - start += PAGE_SIZE; - } + start = round_up(start, page_size); /* * The end of the range is inclusive, hence we need to align it * to the end of the last full page in the range. For example if * end = 0x3ff5 with page size 0x1000, we need to align it to - * 0x2fff. The remainig 0xff5 bytes do not form a full page. + * 0x2fff. The remaining 0xff5 bytes do not form a full page. */ - if ((end + 1) & (PAGE_SIZE - 1)) - end = ((end + 1) & PAGE_MASK) - 1; + end = round_down(end + 1, page_size) - 1; } if (start >= end) { diff --git a/drivers/misc/habanalabs/common/mmu/mmu.c b/drivers/misc/habanalabs/common/mmu/mmu.c index cf8946266615c16973952fa90e26dcfa9e7f30b1..2c1005f74cf440eb59175862439452a6e0292ad4 100644 --- a/drivers/misc/habanalabs/common/mmu/mmu.c +++ b/drivers/misc/habanalabs/common/mmu/mmu.c @@ -635,7 +635,7 @@ int hl_mmu_if_set_funcs(struct hl_device *hdev) hl_mmu_v1_set_funcs(hdev, &hdev->mmu_func[MMU_DR_PGT]); break; case ASIC_GAUDI2: - case ASIC_GAUDI2_SEC: + case ASIC_GAUDI2B: /* MMUs in Gaudi2 are always host resident */ hl_mmu_v2_hr_set_funcs(hdev, &hdev->mmu_func[MMU_HR_PGT]); break; @@ -699,7 +699,7 @@ int hl_mmu_invalidate_cache_range(struct hl_device *hdev, bool is_hard, static void hl_mmu_prefetch_work_function(struct work_struct *work) { - struct hl_prefetch_work *pfw = container_of(work, struct hl_prefetch_work, pf_work); + struct hl_prefetch_work *pfw = container_of(work, struct hl_prefetch_work, prefetch_work); struct hl_ctx *ctx = pfw->ctx; struct hl_device *hdev = ctx->hdev; @@ -723,25 +723,25 @@ put_ctx: int hl_mmu_prefetch_cache_range(struct hl_ctx *ctx, u32 flags, u32 asid, u64 va, u64 size) { - struct hl_prefetch_work *handle_pf_work; + struct hl_prefetch_work *handle_prefetch_work; - handle_pf_work = kmalloc(sizeof(*handle_pf_work), GFP_KERNEL); - if (!handle_pf_work) + handle_prefetch_work = kmalloc(sizeof(*handle_prefetch_work), GFP_KERNEL); + if (!handle_prefetch_work) return -ENOMEM; - INIT_WORK(&handle_pf_work->pf_work, hl_mmu_prefetch_work_function); - handle_pf_work->ctx = ctx; - handle_pf_work->va = va; - handle_pf_work->size = size; - handle_pf_work->flags = flags; - handle_pf_work->asid = asid; + INIT_WORK(&handle_prefetch_work->prefetch_work, hl_mmu_prefetch_work_function); + handle_prefetch_work->ctx = ctx; + handle_prefetch_work->va = va; + handle_prefetch_work->size = size; + handle_prefetch_work->flags = flags; + handle_prefetch_work->asid = asid; /* * as actual prefetch is done in a WQ we must get the context (and put it * at the end of the work function) */ hl_ctx_get(ctx); - queue_work(ctx->hdev->pf_wq, &handle_pf_work->pf_work); + queue_work(ctx->hdev->prefetch_wq, &handle_prefetch_work->prefetch_work); return 0; } diff --git a/drivers/misc/habanalabs/common/sysfs.c b/drivers/misc/habanalabs/common/sysfs.c index 36e9814139d172c8faa35c099445d8c454d70c4b..735d8bed0066040fbfba3c3afad0c7d0eef66926 100644 --- a/drivers/misc/habanalabs/common/sysfs.c +++ b/drivers/misc/habanalabs/common/sysfs.c @@ -248,8 +248,8 @@ static ssize_t device_type_show(struct device *dev, case ASIC_GAUDI2: str = "GAUDI2"; break; - case ASIC_GAUDI2_SEC: - str = "GAUDI2 SEC"; + case ASIC_GAUDI2B: + str = "GAUDI2B"; break; default: dev_err(hdev->dev, "Unrecognized ASIC type %d\n", diff --git a/drivers/misc/habanalabs/gaudi/gaudi.c b/drivers/misc/habanalabs/gaudi/gaudi.c index 92560414e84333ded0f7564eb8e1c2f137ed5f42..9f5e208701bad1186088113fb4129c7da4ccd821 100644 --- a/drivers/misc/habanalabs/gaudi/gaudi.c +++ b/drivers/misc/habanalabs/gaudi/gaudi.c @@ -6505,8 +6505,8 @@ event_not_supported: } static const char *gaudi_get_razwi_initiator_dma_name(struct hl_device *hdev, u32 x_y, - bool is_write, s32 *engine_id_1, - s32 *engine_id_2) + bool is_write, u16 *engine_id_1, + u16 *engine_id_2) { u32 dma_id[2], dma_offset, err_cause[2], mask, i; @@ -6603,7 +6603,7 @@ unknown_initiator: } static const char *gaudi_get_razwi_initiator_name(struct hl_device *hdev, bool is_write, - u32 *engine_id_1, u32 *engine_id_2) + u16 *engine_id_1, u16 *engine_id_2) { u32 val, x_y, axi_id; @@ -6719,8 +6719,8 @@ static const char *gaudi_get_razwi_initiator_name(struct hl_device *hdev, bool i return "unknown initiator"; } -static void gaudi_print_and_get_razwi_info(struct hl_device *hdev, u32 *engine_id_1, - u32 *engine_id_2) +static void gaudi_print_and_get_razwi_info(struct hl_device *hdev, u16 *engine_id_1, + u16 *engine_id_2, bool *is_read, bool *is_write) { if (RREG32(mmMMU_UP_RAZWI_WRITE_VLD)) { @@ -6728,6 +6728,7 @@ static void gaudi_print_and_get_razwi_info(struct hl_device *hdev, u32 *engine_i "RAZWI event caused by illegal write of %s\n", gaudi_get_razwi_initiator_name(hdev, true, engine_id_1, engine_id_2)); WREG32(mmMMU_UP_RAZWI_WRITE_VLD, 0); + *is_write = true; } if (RREG32(mmMMU_UP_RAZWI_READ_VLD)) { @@ -6735,10 +6736,11 @@ static void gaudi_print_and_get_razwi_info(struct hl_device *hdev, u32 *engine_i "RAZWI event caused by illegal read of %s\n", gaudi_get_razwi_initiator_name(hdev, false, engine_id_1, engine_id_2)); WREG32(mmMMU_UP_RAZWI_READ_VLD, 0); + *is_read = true; } } -static void gaudi_print_and_get_mmu_error_info(struct hl_device *hdev, u64 *addr, u8 *type) +static void gaudi_print_and_get_mmu_error_info(struct hl_device *hdev, u64 *addr, u64 *event_mask) { struct gaudi_device *gaudi = hdev->asic_specific; u32 val; @@ -6753,7 +6755,7 @@ static void gaudi_print_and_get_mmu_error_info(struct hl_device *hdev, u64 *addr *addr |= RREG32(mmMMU_UP_PAGE_ERROR_CAPTURE_VA); dev_err_ratelimited(hdev->dev, "MMU page fault on va 0x%llx\n", *addr); - *type = HL_RAZWI_PAGE_FAULT; + hl_handle_page_fault(hdev, *addr, 0, true, event_mask); WREG32(mmMMU_UP_PAGE_ERROR_CAPTURE, 0); } @@ -6765,7 +6767,6 @@ static void gaudi_print_and_get_mmu_error_info(struct hl_device *hdev, u64 *addr *addr |= RREG32(mmMMU_UP_ACCESS_ERROR_CAPTURE_VA); dev_err_ratelimited(hdev->dev, "MMU access error on va 0x%llx\n", *addr); - *type = HL_RAZWI_MMU_ACCESS_ERROR; WREG32(mmMMU_UP_ACCESS_ERROR_CAPTURE, 0); } @@ -7300,48 +7301,44 @@ static void gaudi_handle_qman_err(struct hl_device *hdev, u16 event_type, u64 *e } static void gaudi_print_irq_info(struct hl_device *hdev, u16 event_type, - bool razwi) + bool razwi, u64 *event_mask) { - u32 engine_id_1, engine_id_2; + bool is_read = false, is_write = false; + u16 engine_id[2], num_of_razwi_eng = 0; char desc[64] = ""; u64 razwi_addr = 0; - u8 razwi_type; - int rc; + u8 razwi_flags = 0; /* * Init engine id by default as not valid and only if razwi initiated from engine with * engine id it will get valid value. - * Init razwi type to default, will be changed only if razwi caused by page fault of - * MMU access error */ - engine_id_1 = U16_MAX; - engine_id_2 = U16_MAX; - razwi_type = U8_MAX; + engine_id[0] = HL_RAZWI_NA_ENG_ID; + engine_id[1] = HL_RAZWI_NA_ENG_ID; gaudi_get_event_desc(event_type, desc, sizeof(desc)); dev_err_ratelimited(hdev->dev, "Received H/W interrupt %d [\"%s\"]\n", event_type, desc); if (razwi) { - gaudi_print_and_get_razwi_info(hdev, &engine_id_1, &engine_id_2); - gaudi_print_and_get_mmu_error_info(hdev, &razwi_addr, &razwi_type); - - /* In case it's the first razwi, save its parameters*/ - rc = atomic_cmpxchg(&hdev->captured_err_info.razwi.write_enable, 1, 0); - if (rc) { - hdev->captured_err_info.razwi.timestamp = ktime_get(); - hdev->captured_err_info.razwi.addr = razwi_addr; - hdev->captured_err_info.razwi.engine_id_1 = engine_id_1; - hdev->captured_err_info.razwi.engine_id_2 = engine_id_2; - /* - * If first engine id holds non valid value the razwi initiator - * does not have engine id - */ - hdev->captured_err_info.razwi.non_engine_initiator = - (engine_id_1 == U16_MAX); - hdev->captured_err_info.razwi.type = razwi_type; - + gaudi_print_and_get_razwi_info(hdev, &engine_id[0], &engine_id[1], &is_read, + &is_write); + gaudi_print_and_get_mmu_error_info(hdev, &razwi_addr, event_mask); + + if (is_read) + razwi_flags |= HL_RAZWI_READ; + if (is_write) + razwi_flags |= HL_RAZWI_WRITE; + + if (engine_id[0] != HL_RAZWI_NA_ENG_ID) { + if (engine_id[1] != HL_RAZWI_NA_ENG_ID) + num_of_razwi_eng = 2; + else + num_of_razwi_eng = 1; } + + hl_handle_razwi(hdev, razwi_addr, engine_id, num_of_razwi_eng, razwi_flags, + event_mask); } } @@ -7350,8 +7347,8 @@ static void gaudi_print_out_of_sync_info(struct hl_device *hdev, { struct hl_hw_queue *q = &hdev->kernel_queues[GAUDI_QUEUE_ID_CPU_PQ]; - dev_err(hdev->dev, "Out of sync with FW, FW: pi=%u, ci=%u, LKD: pi=%u, ci=%u\n", - sync_err->pi, sync_err->ci, q->pi, atomic_read(&q->ci)); + dev_err(hdev->dev, "Out of sync with FW, FW: pi=%u, ci=%u, LKD: pi=%u, ci=%d\n", + le32_to_cpu(sync_err->pi), le32_to_cpu(sync_err->ci), q->pi, atomic_read(&q->ci)); } static void gaudi_print_fw_alive_info(struct hl_device *hdev, @@ -7359,9 +7356,10 @@ static void gaudi_print_fw_alive_info(struct hl_device *hdev, { dev_err(hdev->dev, "FW alive report: severity=%s, process_id=%u, thread_id=%u, uptime=%llu seconds\n", - (fw_alive->severity == FW_ALIVE_SEVERITY_MINOR) ? - "Minor" : "Critical", fw_alive->process_id, - fw_alive->thread_id, fw_alive->uptime_seconds); + (fw_alive->severity == FW_ALIVE_SEVERITY_MINOR) ? "Minor" : "Critical", + le32_to_cpu(fw_alive->process_id), + le32_to_cpu(fw_alive->thread_id), + le64_to_cpu(fw_alive->uptime_seconds)); } static void gaudi_print_nic_axi_irq_info(struct hl_device *hdev, u16 event_type, @@ -7679,7 +7677,7 @@ static void gaudi_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entr case GAUDI_EVENT_HBM_0_DERR ... GAUDI_EVENT_HBM_3_DERR: case GAUDI_EVENT_MMU_DERR: case GAUDI_EVENT_NIC0_CS_DBG_DERR ... GAUDI_EVENT_NIC4_CS_DBG_DERR: - gaudi_print_irq_info(hdev, event_type, true); + gaudi_print_irq_info(hdev, event_type, true, &event_mask); gaudi_handle_ecc_event(hdev, event_type, &eq_entry->ecc_data); event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR; fw_fatal_err_flag = HL_DRV_RESET_FW_FATAL_ERR; @@ -7689,7 +7687,7 @@ static void gaudi_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entr case GAUDI_EVENT_AXI_ECC: case GAUDI_EVENT_L2_RAM_ECC: case GAUDI_EVENT_PLL0 ... GAUDI_EVENT_PLL17: - gaudi_print_irq_info(hdev, event_type, false); + gaudi_print_irq_info(hdev, event_type, false, &event_mask); fw_fatal_err_flag = HL_DRV_RESET_FW_FATAL_ERR; event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR; goto reset_device; @@ -7698,7 +7696,7 @@ static void gaudi_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entr case GAUDI_EVENT_HBM1_SPI_0: case GAUDI_EVENT_HBM2_SPI_0: case GAUDI_EVENT_HBM3_SPI_0: - gaudi_print_irq_info(hdev, event_type, false); + gaudi_print_irq_info(hdev, event_type, false, &event_mask); gaudi_hbm_read_interrupts(hdev, gaudi_hbm_event_to_dev(event_type), &eq_entry->hbm_ecc_data); @@ -7710,7 +7708,7 @@ static void gaudi_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entr case GAUDI_EVENT_HBM1_SPI_1: case GAUDI_EVENT_HBM2_SPI_1: case GAUDI_EVENT_HBM3_SPI_1: - gaudi_print_irq_info(hdev, event_type, false); + gaudi_print_irq_info(hdev, event_type, false, &event_mask); gaudi_hbm_read_interrupts(hdev, gaudi_hbm_event_to_dev(event_type), &eq_entry->hbm_ecc_data); @@ -7732,7 +7730,7 @@ static void gaudi_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entr * if the event is a TPC Assertion or a "real" TPC DEC. */ event_mask |= HL_NOTIFIER_EVENT_TPC_ASSERT; - gaudi_print_irq_info(hdev, event_type, true); + gaudi_print_irq_info(hdev, event_type, true, &event_mask); reset_required = gaudi_tpc_read_interrupts(hdev, tpc_dec_event_to_tpc_id(event_type), "AXI_SLV_DEC_Error"); @@ -7757,7 +7755,7 @@ static void gaudi_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entr case GAUDI_EVENT_TPC5_KRN_ERR: case GAUDI_EVENT_TPC6_KRN_ERR: case GAUDI_EVENT_TPC7_KRN_ERR: - gaudi_print_irq_info(hdev, event_type, true); + gaudi_print_irq_info(hdev, event_type, true, &event_mask); reset_required = gaudi_tpc_read_interrupts(hdev, tpc_krn_event_to_tpc_id(event_type), "KRN_ERR"); @@ -7796,7 +7794,7 @@ static void gaudi_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entr case GAUDI_EVENT_HBM_0_SERR ... GAUDI_EVENT_HBM_3_SERR: fallthrough; case GAUDI_EVENT_MMU_SERR: - gaudi_print_irq_info(hdev, event_type, true); + gaudi_print_irq_info(hdev, event_type, true, &event_mask); gaudi_handle_ecc_event(hdev, event_type, &eq_entry->ecc_data); hl_fw_unmask_irq(hdev, event_type); event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR; @@ -7806,14 +7804,14 @@ static void gaudi_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entr case GAUDI_EVENT_CPU_AXI_SPLITTER: case GAUDI_EVENT_PSOC_AXI_DEC: case GAUDI_EVENT_PSOC_PRSTN_FALL: - gaudi_print_irq_info(hdev, event_type, true); + gaudi_print_irq_info(hdev, event_type, true, &event_mask); hl_fw_unmask_irq(hdev, event_type); event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR; break; case GAUDI_EVENT_MMU_PAGE_FAULT: case GAUDI_EVENT_MMU_WR_PERM: - gaudi_print_irq_info(hdev, event_type, true); + gaudi_print_irq_info(hdev, event_type, true, &event_mask); hl_fw_unmask_irq(hdev, event_type); event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR; break; @@ -7842,14 +7840,14 @@ static void gaudi_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entr case GAUDI_EVENT_NIC4_QM1: case GAUDI_EVENT_DMA0_CORE ... GAUDI_EVENT_DMA7_CORE: case GAUDI_EVENT_TPC0_QM ... GAUDI_EVENT_TPC7_QM: - gaudi_print_irq_info(hdev, event_type, true); + gaudi_print_irq_info(hdev, event_type, true, &event_mask); gaudi_handle_qman_err(hdev, event_type, &event_mask); hl_fw_unmask_irq(hdev, event_type); event_mask |= (HL_NOTIFIER_EVENT_USER_ENGINE_ERR | HL_NOTIFIER_EVENT_DEVICE_RESET); break; case GAUDI_EVENT_RAZWI_OR_ADC_SW: - gaudi_print_irq_info(hdev, event_type, true); + gaudi_print_irq_info(hdev, event_type, true, &event_mask); event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR; goto reset_device; @@ -7862,7 +7860,7 @@ static void gaudi_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entr case GAUDI_EVENT_TPC6_BMON_SPMU: case GAUDI_EVENT_TPC7_BMON_SPMU: case GAUDI_EVENT_DMA_BM_CH0 ... GAUDI_EVENT_DMA_BM_CH7: - gaudi_print_irq_info(hdev, event_type, false); + gaudi_print_irq_info(hdev, event_type, false, &event_mask); hl_fw_unmask_irq(hdev, event_type); event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR; break; @@ -7874,7 +7872,7 @@ static void gaudi_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entr break; case GAUDI_EVENT_DMA_IF_SEI_0 ... GAUDI_EVENT_DMA_IF_SEI_3: - gaudi_print_irq_info(hdev, event_type, false); + gaudi_print_irq_info(hdev, event_type, false, &event_mask); gaudi_print_sm_sei_info(hdev, event_type, &eq_entry->sm_sei_data); rc = hl_state_dump(hdev); @@ -7903,18 +7901,18 @@ static void gaudi_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entr break; case GAUDI_EVENT_DEV_RESET_REQ: - gaudi_print_irq_info(hdev, event_type, false); + gaudi_print_irq_info(hdev, event_type, false, &event_mask); event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR; goto reset_device; case GAUDI_EVENT_PKT_QUEUE_OUT_SYNC: - gaudi_print_irq_info(hdev, event_type, false); + gaudi_print_irq_info(hdev, event_type, false, &event_mask); gaudi_print_out_of_sync_info(hdev, &eq_entry->pkt_sync_err); event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR; goto reset_device; case GAUDI_EVENT_FW_ALIVE_S: - gaudi_print_irq_info(hdev, event_type, false); + gaudi_print_irq_info(hdev, event_type, false, &event_mask); gaudi_print_fw_alive_info(hdev, &eq_entry->fw_alive); event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR; goto reset_device; @@ -7946,14 +7944,14 @@ reset_device: reset_required = false; } - /* despite reset doesn't execute. a notification on - * occurred event needs to be sent here - */ - hl_notifier_event_send_all(hdev, event_mask); - if (reset_required) - hl_device_reset(hdev, flags); - else + if (reset_required) { + hl_device_cond_reset(hdev, flags, event_mask); + } else { hl_fw_unmask_irq(hdev, event_type); + /* Notification on occurred event needs to be sent although reset is not executed */ + if (event_mask) + hl_notifier_event_send_all(hdev, event_mask); + } } static void *gaudi_get_events_stat(struct hl_device *hdev, bool aggregate, u32 *size) diff --git a/drivers/misc/habanalabs/gaudi2/gaudi2.c b/drivers/misc/habanalabs/gaudi2/gaudi2.c index 65e6cae6100a469062175d7785f6df6713b69af9..e793fb2bdcbe6c9d55765420c3e9dfca3e6ed225 100644 --- a/drivers/misc/habanalabs/gaudi2/gaudi2.c +++ b/drivers/misc/habanalabs/gaudi2/gaudi2.c @@ -128,6 +128,8 @@ #define GAUDI2_VDEC_MSIX_ENTRIES (GAUDI2_IRQ_NUM_SHARED_DEC1_ABNRM - \ GAUDI2_IRQ_NUM_DCORE0_DEC0_NRM + 1) +#define ENGINE_ID_DCORE_OFFSET (GAUDI2_DCORE1_ENGINE_ID_EDMA_0 - GAUDI2_DCORE0_ENGINE_ID_EDMA_0) + enum hl_pmmu_fatal_cause { LATENCY_RD_OUT_FIFO_OVERRUN, LATENCY_WR_OUT_FIFO_OVERRUN, @@ -3966,11 +3968,7 @@ static void gaudi2_init_firmware_loader(struct hl_device *hdev) fw_loader->skip_bmc = false; fw_loader->sram_bar_id = SRAM_CFG_BAR_ID; fw_loader->dram_bar_id = DRAM_BAR_ID; - - if (hdev->asic_type == ASIC_GAUDI2 || hdev->asic_type == ASIC_GAUDI2_SEC) - fw_loader->cpu_timeout = GAUDI2_CPU_TIMEOUT_USEC; - else /* ASIC_GAUDI2_FPGA */ - fw_loader->cpu_timeout = GAUDI2_FPGA_CPU_TIMEOUT; + fw_loader->cpu_timeout = GAUDI2_CPU_TIMEOUT_USEC; /* here we update initial values for few specific dynamic regs (as * before reading the first descriptor from FW those value has to be @@ -4471,23 +4469,9 @@ static void gaudi2_init_sm(struct hl_device *hdev) reg_val = FIELD_PREP(DCORE0_SYNC_MNGR_OBJS_MON_CONFIG_CQ_EN_MASK, 1); WREG32(mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_0 + (4 * i), reg_val); - /* Init CQ0 DB */ - /* Configure the monitor to trigger MSI-X interrupt */ - /* TODO: - * Remove the if statement when virtual MSI-X doorbell is supported in simulator (SW-93022) - * and in F/W (SW-93024). - */ - if (!hdev->pdev || hdev->asic_prop.fw_security_enabled) { - u64 msix_db_reg = CFG_BASE + mmPCIE_DBI_MSIX_DOORBELL_OFF; - - WREG32(mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_L_0, lower_32_bits(msix_db_reg)); - WREG32(mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_H_0, upper_32_bits(msix_db_reg)); - } else { - WREG32(mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_L_0, - lower_32_bits(gaudi2->virt_msix_db_dma_addr)); - WREG32(mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_H_0, - upper_32_bits(gaudi2->virt_msix_db_dma_addr)); - } + /* Init CQ0 DB - configure the monitor to trigger MSI-X interrupt */ + WREG32(mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_L_0, lower_32_bits(gaudi2->virt_msix_db_dma_addr)); + WREG32(mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_H_0, upper_32_bits(gaudi2->virt_msix_db_dma_addr)); WREG32(mmDCORE0_SYNC_MNGR_GLBL_LBW_DATA_0, GAUDI2_IRQ_NUM_COMPLETION); for (i = 0 ; i < GAUDI2_RESERVED_CQ_NUMBER ; i++) { @@ -4535,7 +4519,7 @@ static void gaudi2_init_mme_acc(struct hl_device *hdev, u32 reg_base) static void gaudi2_init_dcore_mme(struct hl_device *hdev, int dcore_id, bool config_qman_only) { - u32 queue_id_base, reg_base, clk_en_addr = 0; + u32 queue_id_base, reg_base; switch (dcore_id) { case 0: @@ -4543,23 +4527,18 @@ static void gaudi2_init_dcore_mme(struct hl_device *hdev, int dcore_id, break; case 1: queue_id_base = GAUDI2_QUEUE_ID_DCORE1_MME_0_0; - clk_en_addr = mmDCORE1_MME_CTRL_LO_QM_SLV_CLK_EN; break; case 2: queue_id_base = GAUDI2_QUEUE_ID_DCORE2_MME_0_0; break; case 3: queue_id_base = GAUDI2_QUEUE_ID_DCORE3_MME_0_0; - clk_en_addr = mmDCORE3_MME_CTRL_LO_QM_SLV_CLK_EN; break; default: dev_err(hdev->dev, "Invalid dcore id %u\n", dcore_id); return; } - if (clk_en_addr && !(hdev->fw_components & FW_TYPE_BOOT_CPU)) - WREG32(clk_en_addr, 0x1); - if (!config_qman_only) { reg_base = gaudi2_mme_acc_blocks_bases[dcore_id]; gaudi2_init_mme_acc(hdev, reg_base); @@ -4660,20 +4639,6 @@ static void gaudi2_init_vdec_brdg_ctrl(struct hl_device *hdev, u64 base_addr, u3 { u32 sob_id; - /* TODO: - * Remove when virtual MSI-X doorbell is supported in simulator (SW-93022) and in F/W - * (SW-93024). - */ - if (!hdev->pdev || hdev->asic_prop.fw_security_enabled) { - u32 interrupt_id = GAUDI2_IRQ_NUM_DCORE0_DEC0_NRM + 2 * decoder_id; - - WREG32(base_addr + BRDG_CTRL_NRM_MSIX_LBW_AWADDR, mmPCIE_DBI_MSIX_DOORBELL_OFF); - WREG32(base_addr + BRDG_CTRL_NRM_MSIX_LBW_WDATA, interrupt_id); - WREG32(base_addr + BRDG_CTRL_ABNRM_MSIX_LBW_AWADDR, mmPCIE_DBI_MSIX_DOORBELL_OFF); - WREG32(base_addr + BRDG_CTRL_ABNRM_MSIX_LBW_WDATA, interrupt_id + 1); - return; - } - /* VCMD normal interrupt */ sob_id = GAUDI2_RESERVED_SOB_DEC_NRM_FIRST + decoder_id; WREG32(base_addr + BRDG_CTRL_NRM_MSIX_LBW_AWADDR, @@ -4730,30 +4695,6 @@ static void gaudi2_init_dec(struct hl_device *hdev) } } -static void gaudi2_init_msix_gw_table(struct hl_device *hdev) -{ - u32 first_reg_offset, last_reg_offset, msix_gw_table_base; - u8 first_bit, last_bit; - int i; - - msix_gw_table_base = mmPCIE_WRAP_MSIX_GW_TABLE_0; - first_reg_offset = (GAUDI2_IRQ_NUM_USER_FIRST >> 5) << 2; - first_bit = GAUDI2_IRQ_NUM_USER_FIRST % 32; - last_reg_offset = (GAUDI2_IRQ_NUM_USER_LAST >> 5) << 2; - last_bit = GAUDI2_IRQ_NUM_USER_LAST % 32; - - if (first_reg_offset == last_reg_offset) { - WREG32(msix_gw_table_base + first_reg_offset, GENMASK(last_bit, first_bit)); - return; - } - - WREG32(msix_gw_table_base + first_reg_offset, GENMASK(31, first_bit)); - WREG32(msix_gw_table_base + last_reg_offset, GENMASK(last_bit, 0)); - - for (i = first_reg_offset + 4; i < last_reg_offset ; i += 4) - WREG32(msix_gw_table_base + i, 0xFFFFFFFF); -} - static int gaudi2_mmu_update_asid_hop0_addr(struct hl_device *hdev, u32 stlb_base, u32 asid, u64 phys_addr) { @@ -5111,7 +5052,7 @@ static int gaudi2_pci_mmu_init(struct hl_device *hdev) mmu_base = mmPMMU_HBW_MMU_BASE; stlb_base = mmPMMU_HBW_STLB_BASE; - RMWREG32(stlb_base + STLB_HOP_CONFIGURATION_OFFSET, + RMWREG32_SHIFTED(stlb_base + STLB_HOP_CONFIGURATION_OFFSET, (0 << PMMU_HBW_STLB_HOP_CONFIGURATION_FIRST_HOP_SHIFT) | (5 << PMMU_HBW_STLB_HOP_CONFIGURATION_FIRST_LOOKUP_HOP_SMALL_P_SHIFT) | (4 << PMMU_HBW_STLB_HOP_CONFIGURATION_FIRST_LOOKUP_HOP_LARGE_P_SHIFT) | @@ -5127,7 +5068,7 @@ static int gaudi2_pci_mmu_init(struct hl_device *hdev) if (PAGE_SIZE == SZ_64K) { /* Set page sizes to 64K on hop5 and 16M on hop4 + enable 8 bit hops */ - RMWREG32(mmu_base + MMU_STATIC_MULTI_PAGE_SIZE_OFFSET, + RMWREG32_SHIFTED(mmu_base + MMU_STATIC_MULTI_PAGE_SIZE_OFFSET, FIELD_PREP(DCORE0_HMMU0_MMU_STATIC_MULTI_PAGE_SIZE_HOP5_PAGE_SIZE_MASK, 4) | FIELD_PREP(DCORE0_HMMU0_MMU_STATIC_MULTI_PAGE_SIZE_HOP4_PAGE_SIZE_MASK, 3) | FIELD_PREP( @@ -5175,7 +5116,7 @@ static int gaudi2_dcore_hmmu_init(struct hl_device *hdev, int dcore_id, RMWREG32(mmu_base + MMU_STATIC_MULTI_PAGE_SIZE_OFFSET, 5 /* 64MB */, MMU_STATIC_MULTI_PAGE_SIZE_HOP4_PAGE_SIZE_MASK); - RMWREG32(stlb_base + STLB_HOP_CONFIGURATION_OFFSET, + RMWREG32_SHIFTED(stlb_base + STLB_HOP_CONFIGURATION_OFFSET, FIELD_PREP(DCORE0_HMMU0_STLB_HOP_CONFIGURATION_FIRST_HOP_MASK, 0) | FIELD_PREP(DCORE0_HMMU0_STLB_HOP_CONFIGURATION_FIRST_LOOKUP_HOP_SMALL_P_MASK, 3) | FIELD_PREP(DCORE0_HMMU0_STLB_HOP_CONFIGURATION_FIRST_LOOKUP_HOP_LARGE_P_MASK, 3) | @@ -5267,8 +5208,6 @@ static int gaudi2_hw_init(struct hl_device *hdev) return rc; } - gaudi2_init_msix_gw_table(hdev); - gaudi2_init_scrambler_hbm(hdev); gaudi2_init_kdma(hdev); @@ -6863,6 +6802,7 @@ static inline bool is_info_event(u32 event) { switch (event) { case GAUDI2_EVENT_CPU_CPLD_SHUTDOWN_CAUSE: + case GAUDI2_EVENT_CPU_FIX_POWER_ENV_S ... GAUDI2_EVENT_CPU_FIX_THERMAL_ENV_E: return true; default: return false; @@ -7097,9 +7037,12 @@ static void gaudi2_handle_qman_err_generic(struct hl_device *hdev, const char *q static void gaudi2_razwi_rr_hbw_shared_printf_info(struct hl_device *hdev, u64 rtr_mstr_if_base_addr, bool is_write, char *name, - bool read_razwi_regs, struct hl_eq_razwi_info *razwi_info) + bool read_razwi_regs, struct hl_eq_razwi_info *razwi_info, + enum gaudi2_engine_id id, u64 *event_mask) { u32 razwi_hi, razwi_lo, razwi_xy; + u16 eng_id = id; + u8 rd_wr_flag; if (is_write) { if (read_razwi_regs) { @@ -7111,6 +7054,7 @@ static void gaudi2_razwi_rr_hbw_shared_printf_info(struct hl_device *hdev, razwi_lo = le32_to_cpu(razwi_info->hbw.rr_aw_razwi_lo_reg); razwi_xy = le32_to_cpu(razwi_info->hbw.rr_aw_razwi_id_reg); } + rd_wr_flag = HL_RAZWI_WRITE; } else { if (read_razwi_regs) { razwi_hi = RREG32(rtr_mstr_if_base_addr + RR_SHRD_HBW_AR_RAZWI_HI); @@ -7121,8 +7065,12 @@ static void gaudi2_razwi_rr_hbw_shared_printf_info(struct hl_device *hdev, razwi_lo = le32_to_cpu(razwi_info->hbw.rr_ar_razwi_lo_reg); razwi_xy = le32_to_cpu(razwi_info->hbw.rr_ar_razwi_id_reg); } + rd_wr_flag = HL_RAZWI_READ; } + hl_handle_razwi(hdev, (u64)razwi_hi << 32 | razwi_lo, &eng_id, 1, + rd_wr_flag | HL_RAZWI_HBW, event_mask); + dev_err_ratelimited(hdev->dev, "%s-RAZWI SHARED RR HBW %s error, address %#llx, Initiator coordinates 0x%x\n", name, is_write ? "WR" : "RD", (u64)razwi_hi << 32 | razwi_lo, razwi_xy); @@ -7130,9 +7078,12 @@ static void gaudi2_razwi_rr_hbw_shared_printf_info(struct hl_device *hdev, static void gaudi2_razwi_rr_lbw_shared_printf_info(struct hl_device *hdev, u64 rtr_mstr_if_base_addr, bool is_write, char *name, - bool read_razwi_regs, struct hl_eq_razwi_info *razwi_info) + bool read_razwi_regs, struct hl_eq_razwi_info *razwi_info, + enum gaudi2_engine_id id, u64 *event_mask) { u32 razwi_addr, razwi_xy; + u16 eng_id = id; + u8 rd_wr_flag; if (is_write) { if (read_razwi_regs) { @@ -7143,9 +7094,7 @@ static void gaudi2_razwi_rr_lbw_shared_printf_info(struct hl_device *hdev, razwi_xy = le32_to_cpu(razwi_info->lbw.rr_aw_razwi_id_reg); } - dev_err_ratelimited(hdev->dev, - "%s-RAZWI SHARED RR LBW WR error, mstr_if 0x%llx, captured address 0x%x, Initiator coordinates 0x%x\n", - name, rtr_mstr_if_base_addr, razwi_addr, razwi_xy); + rd_wr_flag = HL_RAZWI_WRITE; } else { if (read_razwi_regs) { razwi_addr = RREG32(rtr_mstr_if_base_addr + RR_SHRD_LBW_AR_RAZWI); @@ -7155,9 +7104,57 @@ static void gaudi2_razwi_rr_lbw_shared_printf_info(struct hl_device *hdev, razwi_xy = le32_to_cpu(razwi_info->lbw.rr_ar_razwi_id_reg); } - dev_err_ratelimited(hdev->dev, - "%s-RAZWI SHARED RR LBW AR error, mstr_if 0x%llx, captured address 0x%x Initiator coordinates 0x%x\n", - name, rtr_mstr_if_base_addr, razwi_addr, razwi_xy); + rd_wr_flag = HL_RAZWI_READ; + } + + hl_handle_razwi(hdev, razwi_addr, &eng_id, 1, rd_wr_flag | HL_RAZWI_LBW, event_mask); + dev_err_ratelimited(hdev->dev, + "%s-RAZWI SHARED RR LBW %s error, mstr_if 0x%llx, captured address 0x%x Initiator coordinates 0x%x\n", + name, is_write ? "WR" : "RD", rtr_mstr_if_base_addr, razwi_addr, + razwi_xy); +} + +static enum gaudi2_engine_id gaudi2_razwi_calc_engine_id(struct hl_device *hdev, + enum razwi_event_sources module, u8 module_idx) +{ + switch (module) { + case RAZWI_TPC: + if (module_idx == (NUM_OF_TPC_PER_DCORE * NUM_OF_DCORES)) + return GAUDI2_DCORE0_ENGINE_ID_TPC_6; + return (((module_idx / NUM_OF_TPC_PER_DCORE) * ENGINE_ID_DCORE_OFFSET) + + (module_idx % NUM_OF_TPC_PER_DCORE) + + (GAUDI2_DCORE0_ENGINE_ID_TPC_0 - GAUDI2_DCORE0_ENGINE_ID_EDMA_0)); + + case RAZWI_MME: + return ((GAUDI2_DCORE0_ENGINE_ID_MME - GAUDI2_DCORE0_ENGINE_ID_EDMA_0) + + (module_idx * ENGINE_ID_DCORE_OFFSET)); + + case RAZWI_EDMA: + return (((module_idx / NUM_OF_EDMA_PER_DCORE) * ENGINE_ID_DCORE_OFFSET) + + (module_idx % NUM_OF_EDMA_PER_DCORE)); + + case RAZWI_PDMA: + return (GAUDI2_ENGINE_ID_PDMA_0 + module_idx); + + case RAZWI_NIC: + return (GAUDI2_ENGINE_ID_NIC0_0 + (NIC_NUMBER_OF_QM_PER_MACRO * module_idx)); + + case RAZWI_DEC: + if (module_idx == 8) + return GAUDI2_PCIE_ENGINE_ID_DEC_0; + + if (module_idx == 9) + return GAUDI2_PCIE_ENGINE_ID_DEC_1; + ; + return (((module_idx / NUM_OF_DEC_PER_DCORE) * ENGINE_ID_DCORE_OFFSET) + + (module_idx % NUM_OF_DEC_PER_DCORE) + + (GAUDI2_DCORE0_ENGINE_ID_DEC_0 - GAUDI2_DCORE0_ENGINE_ID_EDMA_0)); + + case RAZWI_ROT: + return GAUDI2_ENGINE_ID_ROT_0 + module_idx; + + default: + return GAUDI2_ENGINE_ID_SIZE; } } @@ -7167,10 +7164,11 @@ static void gaudi2_razwi_rr_lbw_shared_printf_info(struct hl_device *hdev, */ static void gaudi2_ack_module_razwi_event_handler(struct hl_device *hdev, enum razwi_event_sources module, u8 module_idx, - u8 module_sub_idx, struct hl_eq_razwi_info *razwi_info) + u8 module_sub_idx, struct hl_eq_razwi_info *razwi_info, + u64 *event_mask) { bool via_sft = false, read_razwi_regs = false; - u32 rtr_id, dcore_id, dcore_rtr_id, sft_id; + u32 rtr_id, dcore_id, dcore_rtr_id, sft_id, eng_id; u64 rtr_mstr_if_base_addr; u32 hbw_shrd_aw = 0, hbw_shrd_ar = 0; u32 lbw_shrd_aw = 0, lbw_shrd_ar = 0; @@ -7304,9 +7302,11 @@ dump_info: if (!hbw_shrd_aw && !hbw_shrd_ar && !lbw_shrd_aw && !lbw_shrd_ar) return; + eng_id = gaudi2_razwi_calc_engine_id(hdev, module, module_idx); if (hbw_shrd_aw) { gaudi2_razwi_rr_hbw_shared_printf_info(hdev, rtr_mstr_if_base_addr, true, - initiator_name, read_razwi_regs, razwi_info); + initiator_name, read_razwi_regs, razwi_info, + eng_id, event_mask); /* Clear event indication */ if (read_razwi_regs) @@ -7315,7 +7315,8 @@ dump_info: if (hbw_shrd_ar) { gaudi2_razwi_rr_hbw_shared_printf_info(hdev, rtr_mstr_if_base_addr, false, - initiator_name, read_razwi_regs, razwi_info); + initiator_name, read_razwi_regs, razwi_info, + eng_id, event_mask); /* Clear event indication */ if (read_razwi_regs) @@ -7324,7 +7325,8 @@ dump_info: if (lbw_shrd_aw) { gaudi2_razwi_rr_lbw_shared_printf_info(hdev, rtr_mstr_if_base_addr, true, - initiator_name, read_razwi_regs, razwi_info); + initiator_name, read_razwi_regs, razwi_info, + eng_id, event_mask); /* Clear event indication */ if (read_razwi_regs) @@ -7333,7 +7335,8 @@ dump_info: if (lbw_shrd_ar) { gaudi2_razwi_rr_lbw_shared_printf_info(hdev, rtr_mstr_if_base_addr, false, - initiator_name, read_razwi_regs, razwi_info); + initiator_name, read_razwi_regs, razwi_info, + eng_id, event_mask); /* Clear event indication */ if (read_razwi_regs) @@ -7349,38 +7352,42 @@ static void gaudi2_check_if_razwi_happened(struct hl_device *hdev) /* check all TPCs */ for (mod_idx = 0 ; mod_idx < (NUM_OF_TPC_PER_DCORE * NUM_OF_DCORES + 1) ; mod_idx++) { if (prop->tpc_enabled_mask & BIT(mod_idx)) - gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_TPC, mod_idx, 0, NULL); + gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_TPC, mod_idx, 0, NULL, + NULL); } /* check all MMEs */ for (mod_idx = 0 ; mod_idx < (NUM_OF_MME_PER_DCORE * NUM_OF_DCORES) ; mod_idx++) for (sub_mod = MME_WAP0 ; sub_mod < MME_INITIATORS_MAX ; sub_mod++) gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_MME, mod_idx, - sub_mod, NULL); + sub_mod, NULL, NULL); /* check all EDMAs */ for (mod_idx = 0 ; mod_idx < (NUM_OF_EDMA_PER_DCORE * NUM_OF_DCORES) ; mod_idx++) if (prop->edma_enabled_mask & BIT(mod_idx)) - gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_EDMA, mod_idx, 0, NULL); + gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_EDMA, mod_idx, 0, NULL, + NULL); /* check all PDMAs */ for (mod_idx = 0 ; mod_idx < NUM_OF_PDMA ; mod_idx++) - gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_PDMA, mod_idx, 0, NULL); + gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_PDMA, mod_idx, 0, NULL, + NULL); /* check all NICs */ for (mod_idx = 0 ; mod_idx < NIC_NUMBER_OF_PORTS ; mod_idx++) if (hdev->nic_ports_mask & BIT(mod_idx)) gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_NIC, mod_idx >> 1, 0, - NULL); + NULL, NULL); /* check all DECs */ for (mod_idx = 0 ; mod_idx < NUMBER_OF_DEC ; mod_idx++) if (prop->decoder_enabled_mask & BIT(mod_idx)) - gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_DEC, mod_idx, 0, NULL); + gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_DEC, mod_idx, 0, NULL, + NULL); /* check all ROTs */ for (mod_idx = 0 ; mod_idx < NUM_OF_ROT ; mod_idx++) - gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_ROT, mod_idx, 0, NULL); + gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_ROT, mod_idx, 0, NULL, NULL); } static const char *gaudi2_get_initiators_name(u32 rtr_id) @@ -7455,25 +7462,176 @@ static const char *gaudi2_get_initiators_name(u32 rtr_id) } } +static u16 gaudi2_get_razwi_initiators(u32 rtr_id, u16 *engines) +{ + switch (rtr_id) { + case DCORE0_RTR0: + engines[0] = GAUDI2_DCORE0_ENGINE_ID_DEC_0; + engines[1] = GAUDI2_DCORE0_ENGINE_ID_DEC_1; + engines[2] = GAUDI2_PCIE_ENGINE_ID_DEC_0; + engines[3] = GAUDI2_PCIE_ENGINE_ID_DEC_1; + engines[4] = GAUDI2_DCORE0_ENGINE_ID_TPC_6; + engines[5] = GAUDI2_ENGINE_ID_PDMA_0; + engines[6] = GAUDI2_ENGINE_ID_PDMA_1; + engines[7] = GAUDI2_ENGINE_ID_PCIE; + engines[8] = GAUDI2_DCORE0_ENGINE_ID_EDMA_0; + engines[9] = GAUDI2_DCORE1_ENGINE_ID_EDMA_0; + engines[10] = GAUDI2_ENGINE_ID_PSOC; + return 11; + + case DCORE0_RTR1: + engines[0] = GAUDI2_DCORE0_ENGINE_ID_TPC_0; + engines[1] = GAUDI2_DCORE0_ENGINE_ID_TPC_1; + return 2; + + case DCORE0_RTR2: + engines[0] = GAUDI2_DCORE0_ENGINE_ID_TPC_2; + engines[1] = GAUDI2_DCORE0_ENGINE_ID_TPC_3; + return 2; + + case DCORE0_RTR3: + engines[0] = GAUDI2_DCORE0_ENGINE_ID_TPC_4; + engines[1] = GAUDI2_DCORE0_ENGINE_ID_TPC_5; + return 2; + + case DCORE0_RTR4: + case DCORE0_RTR5: + case DCORE0_RTR6: + case DCORE0_RTR7: + engines[0] = GAUDI2_DCORE0_ENGINE_ID_MME; + return 1; + + case DCORE1_RTR0: + case DCORE1_RTR1: + case DCORE1_RTR2: + case DCORE1_RTR3: + engines[0] = GAUDI2_DCORE1_ENGINE_ID_MME; + return 1; + + case DCORE1_RTR4: + engines[0] = GAUDI2_DCORE1_ENGINE_ID_TPC_4; + engines[1] = GAUDI2_DCORE1_ENGINE_ID_TPC_5; + return 2; + + case DCORE1_RTR5: + engines[0] = GAUDI2_DCORE1_ENGINE_ID_TPC_2; + engines[1] = GAUDI2_DCORE1_ENGINE_ID_TPC_3; + return 2; + + case DCORE1_RTR6: + engines[0] = GAUDI2_DCORE1_ENGINE_ID_TPC_0; + engines[1] = GAUDI2_DCORE1_ENGINE_ID_TPC_1; + return 2; + + case DCORE1_RTR7: + engines[0] = GAUDI2_DCORE1_ENGINE_ID_DEC_0; + engines[1] = GAUDI2_DCORE1_ENGINE_ID_DEC_1; + engines[2] = GAUDI2_ENGINE_ID_NIC0_0; + engines[3] = GAUDI2_ENGINE_ID_NIC1_0; + engines[4] = GAUDI2_ENGINE_ID_NIC2_0; + engines[5] = GAUDI2_ENGINE_ID_NIC3_0; + engines[6] = GAUDI2_ENGINE_ID_NIC4_0; + engines[7] = GAUDI2_ENGINE_ID_ARC_FARM; + engines[8] = GAUDI2_ENGINE_ID_KDMA; + engines[9] = GAUDI2_DCORE0_ENGINE_ID_EDMA_1; + engines[10] = GAUDI2_DCORE1_ENGINE_ID_EDMA_1; + return 11; + + case DCORE2_RTR0: + engines[0] = GAUDI2_DCORE2_ENGINE_ID_DEC_0; + engines[1] = GAUDI2_DCORE2_ENGINE_ID_DEC_1; + engines[2] = GAUDI2_ENGINE_ID_NIC5_0; + engines[3] = GAUDI2_ENGINE_ID_NIC6_0; + engines[4] = GAUDI2_ENGINE_ID_NIC7_0; + engines[5] = GAUDI2_ENGINE_ID_NIC8_0; + engines[6] = GAUDI2_DCORE2_ENGINE_ID_EDMA_0; + engines[7] = GAUDI2_DCORE3_ENGINE_ID_EDMA_0; + engines[8] = GAUDI2_ENGINE_ID_ROT_0; + return 9; + + case DCORE2_RTR1: + engines[0] = GAUDI2_DCORE2_ENGINE_ID_TPC_4; + engines[1] = GAUDI2_DCORE2_ENGINE_ID_TPC_5; + return 2; + + case DCORE2_RTR2: + engines[0] = GAUDI2_DCORE2_ENGINE_ID_TPC_2; + engines[1] = GAUDI2_DCORE2_ENGINE_ID_TPC_3; + return 2; + + case DCORE2_RTR3: + engines[0] = GAUDI2_DCORE2_ENGINE_ID_TPC_0; + engines[1] = GAUDI2_DCORE2_ENGINE_ID_TPC_1; + return 2; + + case DCORE2_RTR4: + case DCORE2_RTR5: + case DCORE2_RTR6: + case DCORE2_RTR7: + engines[0] = GAUDI2_DCORE2_ENGINE_ID_MME; + return 1; + case DCORE3_RTR0: + case DCORE3_RTR1: + case DCORE3_RTR2: + case DCORE3_RTR3: + engines[0] = GAUDI2_DCORE3_ENGINE_ID_MME; + return 1; + case DCORE3_RTR4: + engines[0] = GAUDI2_DCORE3_ENGINE_ID_TPC_0; + engines[1] = GAUDI2_DCORE3_ENGINE_ID_TPC_1; + return 2; + case DCORE3_RTR5: + engines[0] = GAUDI2_DCORE3_ENGINE_ID_TPC_2; + engines[1] = GAUDI2_DCORE3_ENGINE_ID_TPC_3; + return 2; + case DCORE3_RTR6: + engines[0] = GAUDI2_DCORE3_ENGINE_ID_TPC_4; + engines[1] = GAUDI2_DCORE3_ENGINE_ID_TPC_5; + return 2; + case DCORE3_RTR7: + engines[0] = GAUDI2_DCORE3_ENGINE_ID_DEC_0; + engines[1] = GAUDI2_DCORE3_ENGINE_ID_DEC_1; + engines[2] = GAUDI2_ENGINE_ID_NIC9_0; + engines[3] = GAUDI2_ENGINE_ID_NIC10_0; + engines[4] = GAUDI2_ENGINE_ID_NIC11_0; + engines[5] = GAUDI2_DCORE2_ENGINE_ID_EDMA_1; + engines[6] = GAUDI2_DCORE3_ENGINE_ID_EDMA_1; + engines[7] = GAUDI2_ENGINE_ID_ROT_1; + engines[8] = GAUDI2_ENGINE_ID_ROT_0; + return 9; + default: + return 0; + } +} + static void gaudi2_razwi_unmapped_addr_hbw_printf_info(struct hl_device *hdev, u32 rtr_id, - u64 rtr_ctrl_base_addr, bool is_write) + u64 rtr_ctrl_base_addr, bool is_write, + u64 *event_mask) { + u16 engines[HL_RAZWI_MAX_NUM_OF_ENGINES_PER_RTR], num_of_eng; u32 razwi_hi, razwi_lo; + u8 rd_wr_flag; + + num_of_eng = gaudi2_get_razwi_initiators(rtr_id, &engines[0]); if (is_write) { razwi_hi = RREG32(rtr_ctrl_base_addr + DEC_RAZWI_HBW_AW_ADDR_HI); razwi_lo = RREG32(rtr_ctrl_base_addr + DEC_RAZWI_HBW_AW_ADDR_LO); + rd_wr_flag = HL_RAZWI_WRITE; /* Clear set indication */ WREG32(rtr_ctrl_base_addr + DEC_RAZWI_HBW_AW_SET, 0x1); } else { razwi_hi = RREG32(rtr_ctrl_base_addr + DEC_RAZWI_HBW_AR_ADDR_HI); razwi_lo = RREG32(rtr_ctrl_base_addr + DEC_RAZWI_HBW_AR_ADDR_LO); + rd_wr_flag = HL_RAZWI_READ; /* Clear set indication */ WREG32(rtr_ctrl_base_addr + DEC_RAZWI_HBW_AR_SET, 0x1); } + hl_handle_razwi(hdev, (u64)razwi_hi << 32 | razwi_lo, &engines[0], num_of_eng, + rd_wr_flag | HL_RAZWI_HBW, event_mask); dev_err_ratelimited(hdev->dev, "RAZWI PSOC unmapped HBW %s error, rtr id %u, address %#llx\n", is_write ? "WR" : "RD", rtr_id, (u64)razwi_hi << 32 | razwi_lo); @@ -7483,22 +7641,31 @@ static void gaudi2_razwi_unmapped_addr_hbw_printf_info(struct hl_device *hdev, u } static void gaudi2_razwi_unmapped_addr_lbw_printf_info(struct hl_device *hdev, u32 rtr_id, - u64 rtr_ctrl_base_addr, bool is_write) + u64 rtr_ctrl_base_addr, bool is_write, + u64 *event_mask) { + u16 engines[HL_RAZWI_MAX_NUM_OF_ENGINES_PER_RTR], num_of_eng; u32 razwi_addr; + u8 rd_wr_flag; + + num_of_eng = gaudi2_get_razwi_initiators(rtr_id, &engines[0]); if (is_write) { razwi_addr = RREG32(rtr_ctrl_base_addr + DEC_RAZWI_LBW_AW_ADDR); + rd_wr_flag = HL_RAZWI_WRITE; /* Clear set indication */ WREG32(rtr_ctrl_base_addr + DEC_RAZWI_LBW_AW_SET, 0x1); } else { razwi_addr = RREG32(rtr_ctrl_base_addr + DEC_RAZWI_LBW_AR_ADDR); + rd_wr_flag = HL_RAZWI_READ; /* Clear set indication */ WREG32(rtr_ctrl_base_addr + DEC_RAZWI_LBW_AR_SET, 0x1); } + hl_handle_razwi(hdev, razwi_addr, &engines[0], num_of_eng, rd_wr_flag | HL_RAZWI_LBW, + event_mask); dev_err_ratelimited(hdev->dev, "RAZWI PSOC unmapped LBW %s error, rtr id %u, address %#x\n", is_write ? "WR" : "RD", rtr_id, razwi_addr); @@ -7508,7 +7675,7 @@ static void gaudi2_razwi_unmapped_addr_lbw_printf_info(struct hl_device *hdev, u } /* PSOC RAZWI interrupt occurs only when trying to access a bad address */ -static void gaudi2_ack_psoc_razwi_event_handler(struct hl_device *hdev) +static void gaudi2_ack_psoc_razwi_event_handler(struct hl_device *hdev, u64 *event_mask) { u32 hbw_aw_set, hbw_ar_set, lbw_aw_set, lbw_ar_set, rtr_id, dcore_id, dcore_rtr_id, xy, razwi_mask_info, razwi_intr = 0; @@ -7562,19 +7729,19 @@ static void gaudi2_ack_psoc_razwi_event_handler(struct hl_device *hdev) if (hbw_aw_set) gaudi2_razwi_unmapped_addr_hbw_printf_info(hdev, rtr_id, - rtr_ctrl_base_addr, true); + rtr_ctrl_base_addr, true, event_mask); if (hbw_ar_set) gaudi2_razwi_unmapped_addr_hbw_printf_info(hdev, rtr_id, - rtr_ctrl_base_addr, false); + rtr_ctrl_base_addr, false, event_mask); if (lbw_aw_set) gaudi2_razwi_unmapped_addr_lbw_printf_info(hdev, rtr_id, - rtr_ctrl_base_addr, true); + rtr_ctrl_base_addr, true, event_mask); if (lbw_ar_set) gaudi2_razwi_unmapped_addr_lbw_printf_info(hdev, rtr_id, - rtr_ctrl_base_addr, false); + rtr_ctrl_base_addr, false, event_mask); clear: /* Clear Interrupts only on pldm or if f/w doesn't handle interrupts */ @@ -7600,8 +7767,9 @@ static void _gaudi2_handle_qm_sei_err(struct hl_device *hdev, u64 qman_base) } static void gaudi2_handle_qm_sei_err(struct hl_device *hdev, u16 event_type, - struct hl_eq_razwi_info *razwi_info) + struct hl_eq_razwi_info *razwi_info, u64 *event_mask) { + enum razwi_event_sources module; u64 qman_base; u8 index; @@ -7611,9 +7779,11 @@ static void gaudi2_handle_qm_sei_err(struct hl_device *hdev, u16 event_type, qman_base = mmDCORE0_TPC0_QM_BASE + (index / NUM_OF_TPC_PER_DCORE) * DCORE_OFFSET + (index % NUM_OF_TPC_PER_DCORE) * DCORE_TPC_OFFSET; + module = RAZWI_TPC; break; case GAUDI2_EVENT_TPC24_AXI_ERR_RSP: qman_base = mmDCORE0_TPC6_QM_BASE; + module = RAZWI_TPC; break; case GAUDI2_EVENT_MME0_CTRL_AXI_ERROR_RESPONSE: case GAUDI2_EVENT_MME1_CTRL_AXI_ERROR_RESPONSE: @@ -7623,16 +7793,19 @@ static void gaudi2_handle_qm_sei_err(struct hl_device *hdev, u16 event_type, (GAUDI2_EVENT_MME1_CTRL_AXI_ERROR_RESPONSE - GAUDI2_EVENT_MME0_CTRL_AXI_ERROR_RESPONSE); qman_base = mmDCORE0_MME_QM_BASE + index * DCORE_OFFSET; + module = RAZWI_MME; break; case GAUDI2_EVENT_PDMA_CH0_AXI_ERR_RSP: case GAUDI2_EVENT_PDMA_CH1_AXI_ERR_RSP: index = event_type - GAUDI2_EVENT_PDMA_CH0_AXI_ERR_RSP; qman_base = mmPDMA0_QM_BASE + index * PDMA_OFFSET; + module = RAZWI_PDMA; break; case GAUDI2_EVENT_ROTATOR0_AXI_ERROR_RESPONSE: case GAUDI2_EVENT_ROTATOR1_AXI_ERROR_RESPONSE: index = event_type - GAUDI2_EVENT_ROTATOR0_AXI_ERROR_RESPONSE; qman_base = mmROT0_QM_BASE + index * ROT_OFFSET; + module = RAZWI_ROT; break; default: return; @@ -7647,7 +7820,7 @@ static void gaudi2_handle_qm_sei_err(struct hl_device *hdev, u16 event_type, /* check if RAZWI happened */ if (razwi_info) - gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_PDMA, 0, 0, razwi_info); + gaudi2_ack_module_razwi_event_handler(hdev, module, 0, 0, razwi_info, event_mask); } static void gaudi2_handle_qman_err(struct hl_device *hdev, u16 event_type) @@ -7813,7 +7986,8 @@ static void gaudi2_handle_cpu_sei_err(struct hl_device *hdev) } static void gaudi2_handle_rot_err(struct hl_device *hdev, u8 rot_index, - struct hl_eq_razwi_with_intr_cause *razwi_with_intr_cause) + struct hl_eq_razwi_with_intr_cause *razwi_with_intr_cause, + u64 *event_mask) { u64 intr_cause_data = le64_to_cpu(razwi_with_intr_cause->intr_cause.intr_cause_data); int i; @@ -7825,11 +7999,12 @@ static void gaudi2_handle_rot_err(struct hl_device *hdev, u8 rot_index, /* check if RAZWI happened */ gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_ROT, rot_index, 0, - &razwi_with_intr_cause->razwi_info); + &razwi_with_intr_cause->razwi_info, event_mask); } static void gaudi2_tpc_ack_interrupts(struct hl_device *hdev, u8 tpc_index, char *interrupt_name, - struct hl_eq_razwi_with_intr_cause *razwi_with_intr_cause) + struct hl_eq_razwi_with_intr_cause *razwi_with_intr_cause, + u64 *event_mask) { u64 intr_cause_data = le64_to_cpu(razwi_with_intr_cause->intr_cause.intr_cause_data); int i; @@ -7841,11 +8016,11 @@ static void gaudi2_tpc_ack_interrupts(struct hl_device *hdev, u8 tpc_index, char /* check if RAZWI happened */ gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_TPC, tpc_index, 0, - &razwi_with_intr_cause->razwi_info); + &razwi_with_intr_cause->razwi_info, event_mask); } static void gaudi2_handle_dec_err(struct hl_device *hdev, u8 dec_index, const char *interrupt_name, - struct hl_eq_razwi_info *razwi_info) + struct hl_eq_razwi_info *razwi_info, u64 *event_mask) { u32 sts_addr, sts_val, sts_clr_val = 0; int i; @@ -7871,14 +8046,15 @@ static void gaudi2_handle_dec_err(struct hl_device *hdev, u8 dec_index, const ch } /* check if RAZWI happened */ - gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_DEC, dec_index, 0, razwi_info); + gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_DEC, dec_index, 0, razwi_info, + event_mask); /* Write 1 clear errors */ WREG32(sts_addr, sts_clr_val); } static void gaudi2_handle_mme_err(struct hl_device *hdev, u8 mme_index, const char *interrupt_name, - struct hl_eq_razwi_info *razwi_info) + struct hl_eq_razwi_info *razwi_info, u64 *event_mask) { u32 sts_addr, sts_val, sts_clr_addr, sts_clr_val = 0; int i; @@ -7898,7 +8074,8 @@ static void gaudi2_handle_mme_err(struct hl_device *hdev, u8 mme_index, const ch /* check if RAZWI happened */ for (i = MME_WRITE ; i < MME_INITIATORS_MAX ; i++) - gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_MME, mme_index, i, razwi_info); + gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_MME, mme_index, i, razwi_info, + event_mask); WREG32(sts_clr_addr, sts_clr_val); } @@ -7915,7 +8092,7 @@ static void gaudi2_handle_mme_sbte_err(struct hl_device *hdev, u8 mme_index, u8 } static void gaudi2_handle_mme_wap_err(struct hl_device *hdev, u8 mme_index, - struct hl_eq_razwi_info *razwi_info) + struct hl_eq_razwi_info *razwi_info, u64 *event_mask) { u32 sts_addr, sts_val, sts_clr_addr, sts_clr_val = 0; int i; @@ -7935,8 +8112,10 @@ static void gaudi2_handle_mme_wap_err(struct hl_device *hdev, u8 mme_index, } /* check if RAZWI happened on WAP0/1 */ - gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_MME, mme_index, MME_WAP0, razwi_info); - gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_MME, mme_index, MME_WAP1, razwi_info); + gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_MME, mme_index, MME_WAP0, razwi_info, + event_mask); + gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_MME, mme_index, MME_WAP1, razwi_info, + event_mask); WREG32(sts_clr_addr, sts_clr_val); } @@ -7966,40 +8145,41 @@ static void gaudi2_handle_dma_core_event(struct hl_device *hdev, u64 intr_cause_ gaudi2_dma_core_interrupts_cause[i]); } -static void gaudi2_print_pcie_mstr_rr_mstr_if_razwi_info(struct hl_device *hdev) +static void gaudi2_print_pcie_mstr_rr_mstr_if_razwi_info(struct hl_device *hdev, u64 *event_mask) { u32 mstr_if_base_addr = mmPCIE_MSTR_RR_MSTR_IF_RR_SHRD_HBW_BASE, razwi_happened_addr; razwi_happened_addr = mstr_if_base_addr + RR_SHRD_HBW_AW_RAZWI_HAPPENED; if (RREG32(razwi_happened_addr)) { gaudi2_razwi_rr_hbw_shared_printf_info(hdev, mstr_if_base_addr, true, "PCIE", true, - NULL); + NULL, GAUDI2_ENGINE_ID_PCIE, event_mask); WREG32(razwi_happened_addr, 0x1); } razwi_happened_addr = mstr_if_base_addr + RR_SHRD_HBW_AR_RAZWI_HAPPENED; if (RREG32(razwi_happened_addr)) { gaudi2_razwi_rr_hbw_shared_printf_info(hdev, mstr_if_base_addr, false, "PCIE", true, - NULL); + NULL, GAUDI2_ENGINE_ID_PCIE, event_mask); WREG32(razwi_happened_addr, 0x1); } razwi_happened_addr = mstr_if_base_addr + RR_SHRD_LBW_AW_RAZWI_HAPPENED; if (RREG32(razwi_happened_addr)) { gaudi2_razwi_rr_lbw_shared_printf_info(hdev, mstr_if_base_addr, true, "PCIE", true, - NULL); + NULL, GAUDI2_ENGINE_ID_PCIE, event_mask); WREG32(razwi_happened_addr, 0x1); } razwi_happened_addr = mstr_if_base_addr + RR_SHRD_LBW_AR_RAZWI_HAPPENED; if (RREG32(razwi_happened_addr)) { gaudi2_razwi_rr_lbw_shared_printf_info(hdev, mstr_if_base_addr, false, "PCIE", true, - NULL); + NULL, GAUDI2_ENGINE_ID_PCIE, event_mask); WREG32(razwi_happened_addr, 0x1); } } -static void gaudi2_print_pcie_addr_dec_info(struct hl_device *hdev, u64 intr_cause_data) +static void gaudi2_print_pcie_addr_dec_info(struct hl_device *hdev, u64 intr_cause_data, + u64 *event_mask) { int i; @@ -8014,7 +8194,7 @@ static void gaudi2_print_pcie_addr_dec_info(struct hl_device *hdev, u64 intr_cau case PCIE_WRAP_PCIE_IC_SEI_INTR_IND_AXI_LBW_ERR_INTR_MASK: break; case PCIE_WRAP_PCIE_IC_SEI_INTR_IND_BAD_ACCESS_INTR_MASK: - gaudi2_print_pcie_mstr_rr_mstr_if_razwi_info(hdev); + gaudi2_print_pcie_mstr_rr_mstr_if_razwi_info(hdev, event_mask); break; } } @@ -8047,7 +8227,8 @@ static void gaudi2_handle_hif_fatal(struct hl_device *hdev, u16 event_type, u64 } } -static void gaudi2_handle_page_error(struct hl_device *hdev, u64 mmu_base, bool is_pmmu) +static void gaudi2_handle_page_error(struct hl_device *hdev, u64 mmu_base, bool is_pmmu, + u64 *event_mask) { u32 valid, val; u64 addr; @@ -8064,6 +8245,7 @@ static void gaudi2_handle_page_error(struct hl_device *hdev, u64 mmu_base, bool dev_err_ratelimited(hdev->dev, "%s page fault on va 0x%llx\n", is_pmmu ? "PMMU" : "HMMU", addr); + hl_handle_page_fault(hdev, addr, 0, is_pmmu, event_mask); WREG32(mmu_base + MMU_OFFSET(mmDCORE0_HMMU0_MMU_PAGE_ERROR_CAPTURE), 0); } @@ -8089,7 +8271,7 @@ static void gaudi2_handle_access_error(struct hl_device *hdev, u64 mmu_base, boo } static void gaudi2_handle_mmu_spi_sei_generic(struct hl_device *hdev, const char *mmu_name, - u64 mmu_base, bool is_pmmu) + u64 mmu_base, bool is_pmmu, u64 *event_mask) { u32 spi_sei_cause, interrupt_clr = 0x0; int i; @@ -8102,7 +8284,7 @@ static void gaudi2_handle_mmu_spi_sei_generic(struct hl_device *hdev, const char mmu_name, gaudi2_mmu_spi_sei[i].cause); if (i == 0) - gaudi2_handle_page_error(hdev, mmu_base, is_pmmu); + gaudi2_handle_page_error(hdev, mmu_base, is_pmmu, event_mask); else if (i == 1) gaudi2_handle_access_error(hdev, mmu_base, is_pmmu); @@ -8118,11 +8300,10 @@ static void gaudi2_handle_mmu_spi_sei_generic(struct hl_device *hdev, const char WREG32(mmu_base + MMU_INTERRUPT_CLR_OFFSET, interrupt_clr); } -static bool gaudi2_handle_sm_err(struct hl_device *hdev, u8 sm_index) +static void gaudi2_handle_sm_err(struct hl_device *hdev, u8 sm_index) { u32 sei_cause_addr, sei_cause_val, sei_cause_cause, sei_cause_log; u32 cq_intr_addr, cq_intr_val, cq_intr_queue_index; - bool reset = true; int i; sei_cause_addr = mmDCORE0_SYNC_MNGR_GLBL_SM_SEI_CAUSE + DCORE_OFFSET * sm_index; @@ -8147,10 +8328,6 @@ static bool gaudi2_handle_sm_err(struct hl_device *hdev, u8 sm_index) gaudi2_sm_sei_cause[i].cause_name, gaudi2_sm_sei_cause[i].log_name, sei_cause_log & gaudi2_sm_sei_cause[i].log_mask); - - /* Due to a potential H/W issue, do not reset upon BRESP errors */ - if (i == 2) - reset = false; break; } @@ -8170,11 +8347,9 @@ static bool gaudi2_handle_sm_err(struct hl_device *hdev, u8 sm_index) /* Clear CQ_INTR */ WREG32(cq_intr_addr, 0); } - - return reset; } -static void gaudi2_handle_mmu_spi_sei_err(struct hl_device *hdev, u16 event_type) +static void gaudi2_handle_mmu_spi_sei_err(struct hl_device *hdev, u16 event_type, u64 *event_mask) { bool is_pmmu = false; char desc[32]; @@ -8232,7 +8407,7 @@ static void gaudi2_handle_mmu_spi_sei_err(struct hl_device *hdev, u16 event_type return; } - gaudi2_handle_mmu_spi_sei_generic(hdev, desc, mmu_base, is_pmmu); + gaudi2_handle_mmu_spi_sei_generic(hdev, desc, mmu_base, is_pmmu, event_mask); } @@ -8476,8 +8651,8 @@ static void gaudi2_print_out_of_sync_info(struct hl_device *hdev, { struct hl_hw_queue *q = &hdev->kernel_queues[GAUDI2_QUEUE_ID_CPU_PQ]; - dev_err(hdev->dev, "Out of sync with FW, FW: pi=%u, ci=%u, LKD: pi=%u, ci=%u\n", - sync_err->pi, sync_err->ci, q->pi, atomic_read(&q->ci)); + dev_err(hdev->dev, "Out of sync with FW, FW: pi=%u, ci=%u, LKD: pi=%u, ci=%d\n", + le32_to_cpu(sync_err->pi), le32_to_cpu(sync_err->ci), q->pi, atomic_read(&q->ci)); } static void gaudi2_handle_pcie_p2p_msix(struct hl_device *hdev) @@ -8543,8 +8718,8 @@ static void gaudi2_print_cpu_pkt_failure_info(struct hl_device *hdev, struct hl_hw_queue *q = &hdev->kernel_queues[GAUDI2_QUEUE_ID_CPU_PQ]; dev_warn(hdev->dev, - "FW reported sanity check failure, FW: pi=%u, ci=%u, LKD: pi=%u, ci=%u\n", - sync_err->pi, sync_err->ci, q->pi, atomic_read(&q->ci)); + "FW reported sanity check failure, FW: pi=%u, ci=%u, LKD: pi=%u, ci=%d\n", + le32_to_cpu(sync_err->pi), le32_to_cpu(sync_err->ci), q->pi, atomic_read(&q->ci)); } static void hl_arc_event_handle(struct hl_device *hdev, @@ -8573,9 +8748,9 @@ static void hl_arc_event_handle(struct hl_device *hdev, static void gaudi2_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entry) { - u32 ctl, reset_flags = HL_DRV_RESET_HARD | HL_DRV_RESET_DELAY; struct gaudi2_device *gaudi2 = hdev->asic_specific; - bool reset_required = false, skip_reset = false; + bool reset_required = false, is_critical = false; + u32 ctl, reset_flags = HL_DRV_RESET_HARD; int index, sbte_index; u64 event_mask = 0; u16 event_type; @@ -8601,6 +8776,7 @@ static void gaudi2_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_ent reset_flags |= HL_DRV_RESET_FW_FATAL_ERR; event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR; reset_required = gaudi2_handle_ecc_event(hdev, event_type, &eq_entry->ecc_data); + is_critical = eq_entry->ecc_data.is_critical; break; case GAUDI2_EVENT_TPC0_QM ... GAUDI2_EVENT_PDMA1_QM: @@ -8626,29 +8802,30 @@ static void gaudi2_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_ent case GAUDI2_EVENT_PDMA_CH0_AXI_ERR_RSP: case GAUDI2_EVENT_PDMA_CH1_AXI_ERR_RSP: reset_flags |= HL_DRV_RESET_FW_FATAL_ERR; - gaudi2_handle_qm_sei_err(hdev, event_type, &eq_entry->razwi_info); + gaudi2_handle_qm_sei_err(hdev, event_type, &eq_entry->razwi_info, &event_mask); event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR; break; case GAUDI2_EVENT_ROTATOR0_AXI_ERROR_RESPONSE: case GAUDI2_EVENT_ROTATOR1_AXI_ERROR_RESPONSE: index = event_type - GAUDI2_EVENT_ROTATOR0_AXI_ERROR_RESPONSE; - gaudi2_handle_rot_err(hdev, index, &eq_entry->razwi_with_intr_cause); - gaudi2_handle_qm_sei_err(hdev, event_type, NULL); + gaudi2_handle_rot_err(hdev, index, &eq_entry->razwi_with_intr_cause, &event_mask); + gaudi2_handle_qm_sei_err(hdev, event_type, NULL, &event_mask); event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR; break; case GAUDI2_EVENT_TPC0_AXI_ERR_RSP ... GAUDI2_EVENT_TPC24_AXI_ERR_RSP: index = event_type - GAUDI2_EVENT_TPC0_AXI_ERR_RSP; gaudi2_tpc_ack_interrupts(hdev, index, "AXI_ERR_RSP", - &eq_entry->razwi_with_intr_cause); - gaudi2_handle_qm_sei_err(hdev, event_type, NULL); + &eq_entry->razwi_with_intr_cause, &event_mask); + gaudi2_handle_qm_sei_err(hdev, event_type, NULL, &event_mask); event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR; break; case GAUDI2_EVENT_DEC0_AXI_ERR_RSPONSE ... GAUDI2_EVENT_DEC9_AXI_ERR_RSPONSE: index = event_type - GAUDI2_EVENT_DEC0_AXI_ERR_RSPONSE; - gaudi2_handle_dec_err(hdev, index, "AXI_ERR_RESPONSE", &eq_entry->razwi_info); + gaudi2_handle_dec_err(hdev, index, "AXI_ERR_RESPONSE", &eq_entry->razwi_info, + &event_mask); event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR; break; @@ -8679,7 +8856,8 @@ static void gaudi2_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_ent case GAUDI2_EVENT_TPC24_KERNEL_ERR: index = (event_type - GAUDI2_EVENT_TPC0_KERNEL_ERR) / (GAUDI2_EVENT_TPC1_KERNEL_ERR - GAUDI2_EVENT_TPC0_KERNEL_ERR); - gaudi2_tpc_ack_interrupts(hdev, index, "KRN_ERR", &eq_entry->razwi_with_intr_cause); + gaudi2_tpc_ack_interrupts(hdev, index, "KRN_ERR", &eq_entry->razwi_with_intr_cause, + &event_mask); event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR; break; @@ -8695,7 +8873,7 @@ static void gaudi2_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_ent case GAUDI2_EVENT_DEC9_SPI: index = (event_type - GAUDI2_EVENT_DEC0_SPI) / (GAUDI2_EVENT_DEC1_SPI - GAUDI2_EVENT_DEC0_SPI); - gaudi2_handle_dec_err(hdev, index, "SPI", &eq_entry->razwi_info); + gaudi2_handle_dec_err(hdev, index, "SPI", &eq_entry->razwi_info, &event_mask); event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR; break; @@ -8707,8 +8885,8 @@ static void gaudi2_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_ent (GAUDI2_EVENT_MME1_CTRL_AXI_ERROR_RESPONSE - GAUDI2_EVENT_MME0_CTRL_AXI_ERROR_RESPONSE); gaudi2_handle_mme_err(hdev, index, - "CTRL_AXI_ERROR_RESPONSE", &eq_entry->razwi_info); - gaudi2_handle_qm_sei_err(hdev, event_type, NULL); + "CTRL_AXI_ERROR_RESPONSE", &eq_entry->razwi_info, &event_mask); + gaudi2_handle_qm_sei_err(hdev, event_type, NULL, &event_mask); event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR; break; @@ -8719,7 +8897,8 @@ static void gaudi2_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_ent index = (event_type - GAUDI2_EVENT_MME0_QMAN_SW_ERROR) / (GAUDI2_EVENT_MME1_QMAN_SW_ERROR - GAUDI2_EVENT_MME0_QMAN_SW_ERROR); - gaudi2_handle_mme_err(hdev, index, "QMAN_SW_ERROR", &eq_entry->razwi_info); + gaudi2_handle_mme_err(hdev, index, "QMAN_SW_ERROR", &eq_entry->razwi_info, + &event_mask); event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR; break; @@ -8730,7 +8909,7 @@ static void gaudi2_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_ent index = (event_type - GAUDI2_EVENT_MME0_WAP_SOURCE_RESULT_INVALID) / (GAUDI2_EVENT_MME1_WAP_SOURCE_RESULT_INVALID - GAUDI2_EVENT_MME0_WAP_SOURCE_RESULT_INVALID); - gaudi2_handle_mme_wap_err(hdev, index, &eq_entry->razwi_info); + gaudi2_handle_mme_wap_err(hdev, index, &eq_entry->razwi_info, &event_mask); event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR; break; @@ -8749,7 +8928,7 @@ static void gaudi2_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_ent case GAUDI2_EVENT_PCIE_ADDR_DEC_ERR: gaudi2_print_pcie_addr_dec_info(hdev, - le64_to_cpu(eq_entry->intr_cause.intr_cause_data)); + le64_to_cpu(eq_entry->intr_cause.intr_cause_data), &event_mask); reset_flags |= HL_DRV_RESET_FW_FATAL_ERR; event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR; break; @@ -8758,7 +8937,7 @@ static void gaudi2_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_ent case GAUDI2_EVENT_HMMU_0_AXI_ERR_RSP ... GAUDI2_EVENT_HMMU_12_AXI_ERR_RSP: case GAUDI2_EVENT_PMMU0_PAGE_FAULT_WR_PERM ... GAUDI2_EVENT_PMMU0_SECURITY_ERROR: case GAUDI2_EVENT_PMMU_AXI_ERR_RSP_0: - gaudi2_handle_mmu_spi_sei_err(hdev, event_type); + gaudi2_handle_mmu_spi_sei_err(hdev, event_type, &event_mask); reset_flags |= HL_DRV_RESET_FW_FATAL_ERR; event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR; break; @@ -8778,7 +8957,7 @@ static void gaudi2_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_ent break; case GAUDI2_EVENT_PSOC63_RAZWI_OR_PID_MIN_MAX_INTERRUPT: - gaudi2_ack_psoc_razwi_event_handler(hdev); + gaudi2_ack_psoc_razwi_event_handler(hdev, &event_mask); event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR; break; @@ -8927,7 +9106,7 @@ static void gaudi2_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_ent case GAUDI2_EVENT_SM0_AXI_ERROR_RESPONSE ... GAUDI2_EVENT_SM3_AXI_ERROR_RESPONSE: index = event_type - GAUDI2_EVENT_SM0_AXI_ERROR_RESPONSE; - skip_reset = !gaudi2_handle_sm_err(hdev, index); + gaudi2_handle_sm_err(hdev, index); event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR; break; @@ -8956,13 +9135,20 @@ static void gaudi2_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_ent event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR; break; + case GAUDI2_EVENT_CPU_FP32_NOT_SUPPORTED: + event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR; + is_critical = true; + break; + default: if (gaudi2_irq_map_table[event_type].valid) dev_err_ratelimited(hdev->dev, "Cannot find handler for event %d\n", event_type); } - if ((gaudi2_irq_map_table[event_type].reset || reset_required) && !skip_reset) + if ((gaudi2_irq_map_table[event_type].reset || reset_required) && + (hdev->hard_reset_on_fw_events || + (hdev->asic_prop.fw_security_enabled && is_critical))) goto reset_device; /* Send unmask irq only for interrupts not classified as MSG */ @@ -8975,46 +9161,84 @@ static void gaudi2_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_ent return; reset_device: - if (hdev->hard_reset_on_fw_events) { - hl_device_reset(hdev, reset_flags); - event_mask |= HL_NOTIFIER_EVENT_DEVICE_RESET; + if (hdev->asic_prop.fw_security_enabled && is_critical) { + reset_flags |= HL_DRV_RESET_BYPASS_REQ_TO_FW; + event_mask |= HL_NOTIFIER_EVENT_DEVICE_UNAVAILABLE; } else { - if (!gaudi2_irq_map_table[event_type].msg) - hl_fw_unmask_irq(hdev, event_type); + reset_flags |= HL_DRV_RESET_DELAY; } + event_mask |= HL_NOTIFIER_EVENT_DEVICE_RESET; + hl_device_cond_reset(hdev, reset_flags, event_mask); +} - if (event_mask) - hl_notifier_event_send_all(hdev, event_mask); +static int gaudi2_memset_memory_chunk_using_edma_qm(struct hl_device *hdev, + struct packet_lin_dma *lin_dma_pkt, dma_addr_t pkt_dma_addr, + u32 hw_queue_id, u32 size, u64 addr, u32 val) +{ + u32 ctl, pkt_size; + int rc = 0; + + ctl = FIELD_PREP(GAUDI2_PKT_CTL_OPCODE_MASK, PACKET_LIN_DMA); + ctl |= FIELD_PREP(GAUDI2_PKT_LIN_DMA_CTL_MEMSET_MASK, 1); + ctl |= FIELD_PREP(GAUDI2_PKT_LIN_DMA_CTL_WRCOMP_MASK, 1); + ctl |= FIELD_PREP(GAUDI2_PKT_CTL_EB_MASK, 1); + + lin_dma_pkt->ctl = cpu_to_le32(ctl); + lin_dma_pkt->src_addr = cpu_to_le64(val); + lin_dma_pkt->dst_addr = cpu_to_le64(addr); + lin_dma_pkt->tsize = cpu_to_le32(size); + + pkt_size = sizeof(struct packet_lin_dma); + + rc = hl_hw_queue_send_cb_no_cmpl(hdev, hw_queue_id, pkt_size, pkt_dma_addr); + if (rc) + dev_err(hdev->dev, "Failed to send lin dma packet to H/W queue %d\n", + hw_queue_id); + + return rc; } static int gaudi2_memset_device_memory(struct hl_device *hdev, u64 addr, u64 size, u64 val) { - struct asic_fixed_properties *prop = &hdev->asic_prop; + u32 edma_queues_id[] = {GAUDI2_QUEUE_ID_DCORE0_EDMA_0_0, + GAUDI2_QUEUE_ID_DCORE1_EDMA_0_0, + GAUDI2_QUEUE_ID_DCORE2_EDMA_0_0, + GAUDI2_QUEUE_ID_DCORE3_EDMA_0_0}; + u32 chunk_size, dcore, edma_idx, sob_offset, sob_addr, comp_val, + old_mmubp, mmubp, num_of_pkts, busy, pkt_size; u64 comp_addr, cur_addr = addr, end_addr = addr + size; - u32 chunk_size, busy, dcore, edma_idx, sob_offset, sob_addr, comp_val, edma_commit; - u32 old_mmubp, mmubp; - int rc = 0; + struct asic_fixed_properties *prop = &hdev->asic_prop; + void *lin_dma_pkts_arr; + dma_addr_t pkt_dma_addr; + int rc = 0, dma_num = 0; + + if (prop->edma_enabled_mask == 0) { + dev_info(hdev->dev, "non of the EDMA engines is enabled - skip dram scrubbing\n"); + return -EIO; + } sob_offset = hdev->asic_prop.first_available_user_sob[0] * 4; sob_addr = mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_0 + sob_offset; comp_addr = CFG_BASE + sob_addr; comp_val = FIELD_PREP(DCORE0_SYNC_MNGR_OBJS_SOB_OBJ_INC_MASK, 1) | FIELD_PREP(DCORE0_SYNC_MNGR_OBJS_SOB_OBJ_VAL_MASK, 1); - - edma_commit = FIELD_PREP(ARC_FARM_KDMA_CTX_COMMIT_LIN_MASK, 1) | - FIELD_PREP(ARC_FARM_KDMA_CTX_COMMIT_MEM_SET_MASK, 1) | - FIELD_PREP(ARC_FARM_KDMA_CTX_COMMIT_WR_COMP_EN_MASK, 1); mmubp = FIELD_PREP(ARC_FARM_KDMA_CTX_AXUSER_HB_MMU_BP_WR_MASK, 1) | FIELD_PREP(ARC_FARM_KDMA_CTX_AXUSER_HB_MMU_BP_RD_MASK, 1); - if (prop->edma_enabled_mask == 0) { - dev_info(hdev->dev, "non of the EDMA engines is enabled - skip dram scrubbing\n"); - return -EIO; - } + /* Calculate how many lin dma pkts we'll need */ + num_of_pkts = div64_u64(round_up(size, SZ_2G), SZ_2G); + pkt_size = sizeof(struct packet_lin_dma); + + lin_dma_pkts_arr = hl_asic_dma_alloc_coherent(hdev, pkt_size * num_of_pkts, + &pkt_dma_addr, GFP_KERNEL); + if (!lin_dma_pkts_arr) + return -ENOMEM; /* * set mmu bypass for the scrubbing - all ddmas are configured the same so save * only the first one to restore later + * also set the sob addr for all edma cores for completion. + * set QM as trusted to allow it to access physical address with MMU bp. */ old_mmubp = RREG32(mmDCORE0_EDMA0_CORE_CTX_AXUSER_HB_MMU_BP); for (dcore = 0 ; dcore < NUM_OF_DCORES ; dcore++) { @@ -9027,17 +9251,22 @@ static int gaudi2_memset_device_memory(struct hl_device *hdev, u64 addr, u64 siz WREG32(mmDCORE0_EDMA0_CORE_CTX_AXUSER_HB_MMU_BP + edma_offset, mmubp); + WREG32(mmDCORE0_EDMA0_CORE_CTX_WR_COMP_ADDR_LO + edma_offset, + lower_32_bits(comp_addr)); + WREG32(mmDCORE0_EDMA0_CORE_CTX_WR_COMP_ADDR_HI + edma_offset, + upper_32_bits(comp_addr)); + WREG32(mmDCORE0_EDMA0_CORE_CTX_WR_COMP_WDATA + edma_offset, + comp_val); + gaudi2_qman_set_test_mode(hdev, + edma_queues_id[dcore] + 4 * edma_idx, true); } } - while (cur_addr < end_addr) { - int dma_num = 0; + WREG32(sob_addr, 0); - WREG32(sob_addr, 0); + while (cur_addr < end_addr) { for (dcore = 0 ; dcore < NUM_OF_DCORES ; dcore++) { for (edma_idx = 0 ; edma_idx < NUM_OF_EDMA_PER_DCORE ; edma_idx++) { - u32 edma_offset = dcore * DCORE_OFFSET + - edma_idx * DCORE_EDMA_OFFSET; u32 edma_bit = dcore * NUM_OF_EDMA_PER_DCORE + edma_idx; if (!(prop->edma_enabled_mask & BIT(edma_bit))) @@ -9045,41 +9274,26 @@ static int gaudi2_memset_device_memory(struct hl_device *hdev, u64 addr, u64 siz chunk_size = min_t(u64, SZ_2G, end_addr - cur_addr); - WREG32(mmDCORE0_EDMA0_CORE_CTX_SRC_BASE_LO + edma_offset, - lower_32_bits(val)); - WREG32(mmDCORE0_EDMA0_CORE_CTX_SRC_BASE_HI + edma_offset, - upper_32_bits(val)); - - WREG32(mmDCORE0_EDMA0_CORE_CTX_DST_BASE_LO + edma_offset, - lower_32_bits(cur_addr)); - WREG32(mmDCORE0_EDMA0_CORE_CTX_DST_BASE_HI + edma_offset, - upper_32_bits(cur_addr)); - - WREG32(mmDCORE0_EDMA0_CORE_CTX_WR_COMP_ADDR_LO + edma_offset, - lower_32_bits(comp_addr)); - WREG32(mmDCORE0_EDMA0_CORE_CTX_WR_COMP_ADDR_HI + edma_offset, - upper_32_bits(comp_addr)); - WREG32(mmDCORE0_EDMA0_CORE_CTX_WR_COMP_WDATA + edma_offset, - comp_val); - - WREG32(mmDCORE0_EDMA0_CORE_CTX_DST_TSIZE_0 + edma_offset, - chunk_size); - WREG32(mmDCORE0_EDMA0_CORE_CTX_COMMIT + edma_offset, edma_commit); + rc = gaudi2_memset_memory_chunk_using_edma_qm(hdev, + (struct packet_lin_dma *)lin_dma_pkts_arr + dma_num, + pkt_dma_addr + dma_num * pkt_size, + edma_queues_id[dcore] + edma_idx * 4, + chunk_size, cur_addr, val); + if (rc) + goto end; dma_num++; - cur_addr += chunk_size; - if (cur_addr == end_addr) - goto poll; + break; } } -poll: - rc = hl_poll_timeout(hdev, sob_addr, busy, (busy == dma_num), 1000, 1000000); - if (rc) { - dev_err(hdev->dev, "DMA Timeout during HBM scrubbing\n"); - goto end; - } + } + + rc = hl_poll_timeout(hdev, sob_addr, busy, (busy == dma_num), 1000, 1000000); + if (rc) { + dev_err(hdev->dev, "DMA Timeout during HBM scrubbing\n"); + goto end; } end: for (dcore = 0 ; dcore < NUM_OF_DCORES ; dcore++) { @@ -9091,10 +9305,17 @@ end: continue; WREG32(mmDCORE0_EDMA0_CORE_CTX_AXUSER_HB_MMU_BP + edma_offset, old_mmubp); + WREG32(mmDCORE0_EDMA0_CORE_CTX_WR_COMP_ADDR_LO + edma_offset, 0); + WREG32(mmDCORE0_EDMA0_CORE_CTX_WR_COMP_ADDR_HI + edma_offset, 0); + WREG32(mmDCORE0_EDMA0_CORE_CTX_WR_COMP_WDATA + edma_offset, 0); + gaudi2_qman_set_test_mode(hdev, + edma_queues_id[dcore] + 4 * edma_idx, false); } } WREG32(sob_addr, 0); + hl_asic_dma_free_coherent(hdev, pkt_size * num_of_pkts, lin_dma_pkts_arr, pkt_dma_addr); + return rc; } @@ -9165,6 +9386,7 @@ static void gaudi2_restore_user_sm_registers(struct hl_device *hdev) gaudi2_memset_device_lbw(hdev, cq_lbw_data_addr, size, 0); gaudi2_memset_device_lbw(hdev, cq_base_l_addr, size, 0); gaudi2_memset_device_lbw(hdev, cq_base_h_addr, size, 0); + gaudi2_memset_device_lbw(hdev, cq_size_addr, size, 0); cq_lbw_l_addr = mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_L_0 + DCORE_OFFSET; cq_lbw_h_addr = mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_H_0 + DCORE_OFFSET; @@ -9990,7 +10212,7 @@ static void gaudi2_ack_mmu_error(struct hl_device *hdev, u64 mmu_id) if (gaudi2_get_mmu_base(hdev, mmu_id, &mmu_base)) return; - gaudi2_handle_page_error(hdev, mmu_base, is_pmmu); + gaudi2_handle_page_error(hdev, mmu_base, is_pmmu, NULL); gaudi2_handle_access_error(hdev, mmu_base, is_pmmu); } @@ -10141,10 +10363,9 @@ int gaudi2_send_device_activity(struct hl_device *hdev, bool open) { struct gaudi2_device *gaudi2 = hdev->asic_specific; - if (!(gaudi2->hw_cap_initialized & HW_CAP_CPU_Q) || hdev->fw_major_version < 37) + if (!(gaudi2->hw_cap_initialized & HW_CAP_CPU_Q)) return 0; - /* TODO: add check for FW version using minor ver once it's known */ return hl_fw_send_device_activity(hdev, open); } diff --git a/drivers/misc/habanalabs/gaudi2/gaudi2P.h b/drivers/misc/habanalabs/gaudi2/gaudi2P.h index a99c348bbf3907e953557c495a7eb8f0b0287802..b4383c199bbbab066f3e55b301e3eb80993bbd15 100644 --- a/drivers/misc/habanalabs/gaudi2/gaudi2P.h +++ b/drivers/misc/habanalabs/gaudi2/gaudi2P.h @@ -23,8 +23,6 @@ #define GAUDI2_CPU_TIMEOUT_USEC 30000000 /* 30s */ -#define GAUDI2_FPGA_CPU_TIMEOUT 100000000 /* 100s */ - #define NUMBER_OF_PDMA_QUEUES 2 #define NUMBER_OF_EDMA_QUEUES 8 #define NUMBER_OF_MME_QUEUES 4 diff --git a/drivers/misc/habanalabs/gaudi2/gaudi2_security.c b/drivers/misc/habanalabs/gaudi2/gaudi2_security.c index c6906fb142290c7d89b38c970c58a6505b87761d..768c2f3dc9003946879fe91d712a1f85e1fedfc0 100644 --- a/drivers/misc/habanalabs/gaudi2/gaudi2_security.c +++ b/drivers/misc/habanalabs/gaudi2/gaudi2_security.c @@ -1764,6 +1764,7 @@ static const struct range gaudi2_pb_nic0_qm_arc_aux0_unsecured_regs[] = { {mmNIC0_QM_ARC_AUX0_CLUSTER_NUM, mmNIC0_QM_ARC_AUX0_WAKE_UP_EVENT}, {mmNIC0_QM_ARC_AUX0_ARC_RST_REQ, mmNIC0_QM_ARC_AUX0_CID_OFFSET_7}, {mmNIC0_QM_ARC_AUX0_SCRATCHPAD_0, mmNIC0_QM_ARC_AUX0_INFLIGHT_LBU_RD_CNT}, + {mmNIC0_QM_ARC_AUX0_CBU_EARLY_BRESP_EN, mmNIC0_QM_ARC_AUX0_CBU_EARLY_BRESP_EN}, {mmNIC0_QM_ARC_AUX0_LBU_EARLY_BRESP_EN, mmNIC0_QM_ARC_AUX0_LBU_EARLY_BRESP_EN}, {mmNIC0_QM_ARC_AUX0_DCCM_QUEUE_BASE_ADDR_0, mmNIC0_QM_ARC_AUX0_DCCM_QUEUE_ALERT_MSG}, {mmNIC0_QM_ARC_AUX0_DCCM_Q_PUSH_FIFO_CNT, mmNIC0_QM_ARC_AUX0_QMAN_ARC_CQ_SHADOW_CI}, diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c index 5ef9e3ca97a69acb1dc704d7d7e4173d531cd5c4..0f083fcf81a6b11bd1a4f12cf614052ed6cd165a 100644 --- a/drivers/misc/habanalabs/goya/goya.c +++ b/drivers/misc/habanalabs/goya/goya.c @@ -4475,8 +4475,8 @@ static void goya_print_out_of_sync_info(struct hl_device *hdev, { struct hl_hw_queue *q = &hdev->kernel_queues[GOYA_QUEUE_ID_CPU_PQ]; - dev_err(hdev->dev, "Out of sync with FW, FW: pi=%u, ci=%u, LKD: pi=%u, ci=%u\n", - sync_err->pi, sync_err->ci, q->pi, atomic_read(&q->ci)); + dev_err(hdev->dev, "Out of sync with FW, FW: pi=%u, ci=%u, LKD: pi=%u, ci=%d\n", + le32_to_cpu(sync_err->pi), le32_to_cpu(sync_err->ci), q->pi, atomic_read(&q->ci)); } static void goya_print_irq_info(struct hl_device *hdev, u16 event_type, diff --git a/drivers/misc/habanalabs/include/gaudi2/gaudi2_async_events.h b/drivers/misc/habanalabs/include/gaudi2/gaudi2_async_events.h index 34406770a76a6f908d971205dbfa62a090d212b6..305b576222e6f39b26c14120a7a8e60019088fa7 100644 --- a/drivers/misc/habanalabs/include/gaudi2/gaudi2_async_events.h +++ b/drivers/misc/habanalabs/include/gaudi2/gaudi2_async_events.h @@ -957,6 +957,7 @@ enum gaudi2_async_event_id { GAUDI2_EVENT_CPU11_STATUS_NIC11_ENG0 = 1317, GAUDI2_EVENT_CPU11_STATUS_NIC11_ENG1 = 1318, GAUDI2_EVENT_ARC_DCCM_FULL = 1319, + GAUDI2_EVENT_CPU_FP32_NOT_SUPPORTED = 1320, GAUDI2_EVENT_SIZE, }; diff --git a/drivers/misc/habanalabs/include/gaudi2/gaudi2_async_ids_map_extended.h b/drivers/misc/habanalabs/include/gaudi2/gaudi2_async_ids_map_extended.h index 5bd4383c9f2cf9abd1b2e25080412d5d1eb82e76..d510cb10c883138de9a22c37d9bfddd14202d4c3 100644 --- a/drivers/misc/habanalabs/include/gaudi2/gaudi2_async_ids_map_extended.h +++ b/drivers/misc/habanalabs/include/gaudi2/gaudi2_async_ids_map_extended.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 * - * Copyright 2018-2021 HabanaLabs, Ltd. + * Copyright 2018-2022 HabanaLabs, Ltd. * All Rights Reserved. * */ @@ -2663,6 +2663,8 @@ static struct gaudi2_async_events_ids_map gaudi2_irq_map_table[] = { .msg = 1, .reset = 0, .name = "STATUS_NIC11_ENG1" }, { .fc_id = 1319, .cpu_id = 625, .valid = 1, .msg = 1, .reset = 0, .name = "ARC_DCCM_FULL" }, + { .fc_id = 1320, .cpu_id = 626, .valid = 1, + .msg = 1, .reset = 1, .name = "FP32_NOT_SUPPORTED" }, }; #endif /* __GAUDI2_ASYNC_IDS_MAP_EVENTS_EXT_H_ */ diff --git a/drivers/misc/habanalabs/include/hw_ip/pci/pci_general.h b/drivers/misc/habanalabs/include/hw_ip/pci/pci_general.h index d232081d4e0f7ffac7242fc1968515860657b2a3..f5d497dc9bdc1798a7c19246230fed6fb264d366 100644 --- a/drivers/misc/habanalabs/include/hw_ip/pci/pci_general.h +++ b/drivers/misc/habanalabs/include/hw_ip/pci/pci_general.h @@ -20,4 +20,11 @@ #define PCI_CONFIG_ELBI_STS_MASK (PCI_CONFIG_ELBI_STS_ERR | \ PCI_CONFIG_ELBI_STS_DONE) +enum hl_revision_id { + /* PCI revision ID 0 is not legal */ + REV_ID_INVALID = 0x00, + REV_ID_A = 0x01, + REV_ID_B = 0x02, +}; + #endif /* INCLUDE_PCI_GENERAL_H_ */ diff --git a/drivers/misc/hmc6352.c b/drivers/misc/hmc6352.c index 42b9adef28a3c092cb9587aa120d57d23179578e..8967940ecd1e8ae9bc6def638b909498372618de 100644 --- a/drivers/misc/hmc6352.c +++ b/drivers/misc/hmc6352.c @@ -101,8 +101,7 @@ static const struct attribute_group m_compass_gr = { .attrs = mid_att_compass }; -static int hmc6352_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int hmc6352_probe(struct i2c_client *client) { int res; @@ -132,7 +131,7 @@ static struct i2c_driver hmc6352_driver = { .driver = { .name = "hmc6352", }, - .probe = hmc6352_probe, + .probe_new = hmc6352_probe, .remove = hmc6352_remove, .id_table = hmc6352_id, }; diff --git a/drivers/misc/ics932s401.c b/drivers/misc/ics932s401.c index 1cb71df966a4ca4444127fd3e74a124b188bc8aa..12108a7b9b40df7c4b879644d90e8dfe44192621 100644 --- a/drivers/misc/ics932s401.c +++ b/drivers/misc/ics932s401.c @@ -89,8 +89,7 @@ struct ics932s401_data { u8 regs[NUM_REGS]; }; -static int ics932s401_probe(struct i2c_client *client, - const struct i2c_device_id *id); +static int ics932s401_probe(struct i2c_client *client); static int ics932s401_detect(struct i2c_client *client, struct i2c_board_info *info); static void ics932s401_remove(struct i2c_client *client); @@ -106,7 +105,7 @@ static struct i2c_driver ics932s401_driver = { .driver = { .name = "ics932s401", }, - .probe = ics932s401_probe, + .probe_new = ics932s401_probe, .remove = ics932s401_remove, .id_table = ics932s401_id, .detect = ics932s401_detect, @@ -429,8 +428,7 @@ static int ics932s401_detect(struct i2c_client *client, return 0; } -static int ics932s401_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int ics932s401_probe(struct i2c_client *client) { struct ics932s401_data *data; int err; diff --git a/drivers/misc/isl29003.c b/drivers/misc/isl29003.c index 8ab61be79c762939a11bc5991325fcb786aa1aac..aeda2fa89e61fcc2eeee7fc78c5e418ab4477099 100644 --- a/drivers/misc/isl29003.c +++ b/drivers/misc/isl29003.c @@ -374,8 +374,7 @@ static int isl29003_init_client(struct i2c_client *client) * I2C layer */ -static int isl29003_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int isl29003_probe(struct i2c_client *client) { struct i2c_adapter *adapter = client->adapter; struct isl29003_data *data; @@ -460,7 +459,7 @@ static struct i2c_driver isl29003_driver = { .name = ISL29003_DRV_NAME, .pm = ISL29003_PM_OPS, }, - .probe = isl29003_probe, + .probe_new = isl29003_probe, .remove = isl29003_remove, .id_table = isl29003_id, }; diff --git a/drivers/misc/isl29020.c b/drivers/misc/isl29020.c index c6f2a94f501acc58380ed51fd57bf90df8b1ffa2..3be02093368cf951e5eec2f7bda77d9097f563bc 100644 --- a/drivers/misc/isl29020.c +++ b/drivers/misc/isl29020.c @@ -151,8 +151,7 @@ static int als_set_default_config(struct i2c_client *client) return 0; } -static int isl29020_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int isl29020_probe(struct i2c_client *client) { int res; @@ -215,7 +214,7 @@ static struct i2c_driver isl29020_driver = { .name = "isl29020", .pm = ISL29020_PM_OPS, }, - .probe = isl29020_probe, + .probe_new = isl29020_probe, .remove = isl29020_remove, .id_table = isl29020_id, }; diff --git a/drivers/misc/lis3lv02d/lis3lv02d_i2c.c b/drivers/misc/lis3lv02d/lis3lv02d_i2c.c index d7daa01fe7ca957b40ad7911b52cc1d370ef2dbd..7071412d6bf63ea1eb7196cdef52f01ccbe8e5e9 100644 --- a/drivers/misc/lis3lv02d/lis3lv02d_i2c.c +++ b/drivers/misc/lis3lv02d/lis3lv02d_i2c.c @@ -100,8 +100,7 @@ static const struct of_device_id lis3lv02d_i2c_dt_ids[] = { MODULE_DEVICE_TABLE(of, lis3lv02d_i2c_dt_ids); #endif -static int lis3lv02d_i2c_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int lis3lv02d_i2c_probe(struct i2c_client *client) { int ret = 0; struct lis3lv02d_platform_data *pdata = client->dev.platform_data; @@ -263,7 +262,7 @@ static struct i2c_driver lis3lv02d_i2c_driver = { .pm = &lis3_pm_ops, .of_match_table = of_match_ptr(lis3lv02d_i2c_dt_ids), }, - .probe = lis3lv02d_i2c_probe, + .probe_new = lis3lv02d_i2c_probe, .remove = lis3lv02d_i2c_remove, .id_table = lis3lv02d_id, }; diff --git a/drivers/misc/mei/bus-fixup.c b/drivers/misc/mei/bus-fixup.c index 71fbf0bc845324ca311862659ebabba244822c42..6df7679d973916a2838df5c6f0a7bb993b4d220e 100644 --- a/drivers/misc/mei/bus-fixup.c +++ b/drivers/misc/mei/bus-fixup.c @@ -188,17 +188,20 @@ static int mei_fwver(struct mei_cl_device *cldev) return ret; } +#define GFX_MEMORY_READY_TIMEOUT 200 /* timeout in milliseconds */ + static int mei_gfx_memory_ready(struct mei_cl_device *cldev) { struct mkhi_gfx_mem_ready req = {0}; - unsigned int mode = MEI_CL_IO_TX_INTERNAL; + unsigned int mode = MEI_CL_IO_TX_INTERNAL | MEI_CL_IO_TX_BLOCKING; req.hdr.group_id = MKHI_GROUP_ID_GFX; req.hdr.command = MKHI_GFX_MEMORY_READY_CMD_REQ; req.flags = MKHI_GFX_MEM_READY_PXP_ALLOWED; dev_dbg(&cldev->dev, "Sending memory ready command\n"); - return __mei_cl_send(cldev->cl, (u8 *)&req, sizeof(req), 0, mode); + return __mei_cl_send_timeout(cldev->cl, (u8 *)&req, sizeof(req), 0, + mode, GFX_MEMORY_READY_TIMEOUT); } static void mei_mkhi_fix(struct mei_cl_device *cldev) @@ -263,12 +266,13 @@ static void mei_gsc_mkhi_fix_ver(struct mei_cl_device *cldev) if (cldev->bus->pxp_mode == MEI_DEV_PXP_INIT) { ret = mei_gfx_memory_ready(cldev); - if (ret < 0) + if (ret < 0) { dev_err(&cldev->dev, "memory ready command failed %d\n", ret); - else + } else { dev_dbg(&cldev->dev, "memory ready command sent\n"); + cldev->bus->pxp_mode = MEI_DEV_PXP_SETUP; + } /* we go to reset after that */ - cldev->bus->pxp_mode = MEI_DEV_PXP_SETUP; goto out; } diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c index 1fbe127ff63342cf13d16f6858eb2273a3895895..4a08b624910a0a4114ba43f8b2c0dfdf6c47da1d 100644 --- a/drivers/misc/mei/bus.c +++ b/drivers/misc/mei/bus.c @@ -34,6 +34,26 @@ */ ssize_t __mei_cl_send(struct mei_cl *cl, const u8 *buf, size_t length, u8 vtag, unsigned int mode) +{ + return __mei_cl_send_timeout(cl, buf, length, vtag, mode, MAX_SCHEDULE_TIMEOUT); +} + +/** + * __mei_cl_send_timeout - internal client send (write) + * + * @cl: host client + * @buf: buffer to send + * @length: buffer length + * @vtag: virtual tag + * @mode: sending mode + * @timeout: send timeout in milliseconds. + * effective only for blocking writes: the MEI_CL_IO_TX_BLOCKING mode bit is set. + * set timeout to the MAX_SCHEDULE_TIMEOUT to maixum allowed wait. + * + * Return: written size bytes or < 0 on error + */ +ssize_t __mei_cl_send_timeout(struct mei_cl *cl, const u8 *buf, size_t length, u8 vtag, + unsigned int mode, unsigned long timeout) { struct mei_device *bus; struct mei_cl_cb *cb; @@ -108,7 +128,7 @@ ssize_t __mei_cl_send(struct mei_cl *cl, const u8 *buf, size_t length, u8 vtag, cb->buf.size = 0; } - rets = mei_cl_write(cl, cb); + rets = mei_cl_write(cl, cb, timeout); if (mode & MEI_CL_IO_SGL && rets == 0) rets = length; diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c index 6c8b71ae32c840eea15e48acc2c6c00a23256871..9ddb854b8155b5476c4d618dd9441cde3fc9fdb9 100644 --- a/drivers/misc/mei/client.c +++ b/drivers/misc/mei/client.c @@ -1954,10 +1954,13 @@ err: * * @cl: host client * @cb: write callback with filled data + * @timeout: send timeout in milliseconds. + * effective only for blocking writes: the cb->blocking is set. + * set timeout to the MAX_SCHEDULE_TIMEOUT to maixum allowed wait. * * Return: number of bytes sent on success, <0 on failure. */ -ssize_t mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb) +ssize_t mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, unsigned long timeout) { struct mei_device *dev; struct mei_msg_data *buf; @@ -2081,11 +2084,20 @@ out: if (blocking && cl->writing_state != MEI_WRITE_COMPLETE) { mutex_unlock(&dev->device_lock); - rets = wait_event_interruptible(cl->tx_wait, - cl->writing_state == MEI_WRITE_COMPLETE || - (!mei_cl_is_connected(cl))); + rets = wait_event_interruptible_timeout(cl->tx_wait, + cl->writing_state == MEI_WRITE_COMPLETE || + (!mei_cl_is_connected(cl)), + msecs_to_jiffies(timeout)); mutex_lock(&dev->device_lock); + /* clean all queue on timeout as something fatal happened */ + if (rets == 0) { + rets = -ETIME; + mei_io_tx_list_free_cl(&dev->write_list, cl, NULL); + mei_io_tx_list_free_cl(&dev->write_waiting_list, cl, NULL); + } /* wait_event_interruptible returns -ERESTARTSYS */ + if (rets > 0) + rets = 0; if (rets) { if (signal_pending(current)) rets = -EINTR; diff --git a/drivers/misc/mei/client.h b/drivers/misc/mei/client.h index 418056fb1489ce80b634e8bf2cfd291d3ccd79f9..9052860bcfe03774f328ab389c3b6fec74082811 100644 --- a/drivers/misc/mei/client.h +++ b/drivers/misc/mei/client.h @@ -246,7 +246,7 @@ int mei_cl_connect(struct mei_cl *cl, struct mei_me_client *me_cl, int mei_cl_irq_connect(struct mei_cl *cl, struct mei_cl_cb *cb, struct list_head *cmpl_list); int mei_cl_read_start(struct mei_cl *cl, size_t length, const struct file *fp); -ssize_t mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb); +ssize_t mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, unsigned long timeout); int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb, struct list_head *cmpl_list); diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c index 930887e7e38d6810a7f3ef858948cc907b8fd868..632d4ae21e46551dc8d80754e89b038eec2b596c 100644 --- a/drivers/misc/mei/main.c +++ b/drivers/misc/mei/main.c @@ -383,7 +383,7 @@ static ssize_t mei_write(struct file *file, const char __user *ubuf, goto out; } - rets = mei_cl_write(cl, cb); + rets = mei_cl_write(cl, cb, MAX_SCHEDULE_TIMEOUT); out: mutex_unlock(&dev->device_lock); return rets; diff --git a/drivers/misc/mei/mei_dev.h b/drivers/misc/mei/mei_dev.h index 8d8018428d9dd8a254f0a2aff7ab036e69654c40..996b70a988becab3961efc205660bc8c9ec25c88 100644 --- a/drivers/misc/mei/mei_dev.h +++ b/drivers/misc/mei/mei_dev.h @@ -379,6 +379,8 @@ void mei_cl_bus_rescan_work(struct work_struct *work); void mei_cl_bus_dev_fixup(struct mei_cl_device *dev); ssize_t __mei_cl_send(struct mei_cl *cl, const u8 *buf, size_t length, u8 vtag, unsigned int mode); +ssize_t __mei_cl_send_timeout(struct mei_cl *cl, const u8 *buf, size_t length, u8 vtag, + unsigned int mode, unsigned long timeout); ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length, u8 *vtag, unsigned int mode, unsigned long timeout); bool mei_cl_bus_rx_event(struct mei_cl *cl); diff --git a/drivers/misc/ocxl/config.c b/drivers/misc/ocxl/config.c index e401a51596b9c6f1419f0c5f14d8951e18baa05f..92ab49705f64565b73b57fd89038626ec405e88d 100644 --- a/drivers/misc/ocxl/config.c +++ b/drivers/misc/ocxl/config.c @@ -193,6 +193,18 @@ static int read_dvsec_vendor(struct pci_dev *dev) return 0; } +/** + * get_dvsec_vendor0() - Find a related PCI device (function 0) + * @dev: PCI device to match + * @dev0: The PCI device (function 0) found + * @out_pos: The position of PCI device (function 0) + * + * Returns 0 on success, negative on failure. + * + * NOTE: If it's successful, the reference of dev0 is increased, + * so after using it, the callers must call pci_dev_put() to give + * up the reference. + */ static int get_dvsec_vendor0(struct pci_dev *dev, struct pci_dev **dev0, int *out_pos) { @@ -202,10 +214,14 @@ static int get_dvsec_vendor0(struct pci_dev *dev, struct pci_dev **dev0, dev = get_function_0(dev); if (!dev) return -1; + } else { + dev = pci_dev_get(dev); } pos = find_dvsec(dev, OCXL_DVSEC_VENDOR_ID); - if (!pos) + if (!pos) { + pci_dev_put(dev); return -1; + } *dev0 = dev; *out_pos = pos; return 0; @@ -222,6 +238,7 @@ int ocxl_config_get_reset_reload(struct pci_dev *dev, int *val) pci_read_config_dword(dev0, pos + OCXL_DVSEC_VENDOR_RESET_RELOAD, &reset_reload); + pci_dev_put(dev0); *val = !!(reset_reload & BIT(0)); return 0; } @@ -243,6 +260,7 @@ int ocxl_config_set_reset_reload(struct pci_dev *dev, int val) reset_reload &= ~BIT(0); pci_write_config_dword(dev0, pos + OCXL_DVSEC_VENDOR_RESET_RELOAD, reset_reload); + pci_dev_put(dev0); return 0; } diff --git a/drivers/misc/ocxl/file.c b/drivers/misc/ocxl/file.c index d46dba2df5a10af576e1156516643e4423c66136..3b058654b45b43548bf5c0d81eaeb666b729cdcc 100644 --- a/drivers/misc/ocxl/file.c +++ b/drivers/misc/ocxl/file.c @@ -541,8 +541,11 @@ int ocxl_file_register_afu(struct ocxl_afu *afu) goto err_put; rc = device_register(&info->dev); - if (rc) - goto err_put; + if (rc) { + free_minor(info); + put_device(&info->dev); + return rc; + } rc = ocxl_sysfs_register_afu(info); if (rc) @@ -581,7 +584,7 @@ void ocxl_file_unregister_afu(struct ocxl_afu *afu) device_unregister(&info->dev); } -static char *ocxl_devnode(struct device *dev, umode_t *mode) +static char *ocxl_devnode(const struct device *dev, umode_t *mode) { return kasprintf(GFP_KERNEL, "ocxl/%s", dev_name(dev)); } diff --git a/drivers/misc/sgi-gru/grufault.c b/drivers/misc/sgi-gru/grufault.c index d7ef61e602ede67ff7932602dd1384bb52798057..b836936e97471715bf0da41de745e80f449d22c1 100644 --- a/drivers/misc/sgi-gru/grufault.c +++ b/drivers/misc/sgi-gru/grufault.c @@ -648,6 +648,7 @@ int gru_handle_user_call_os(unsigned long cb) if ((cb & (GRU_HANDLE_STRIDE - 1)) || ucbnum >= GRU_NUM_CB) return -EINVAL; +again: gts = gru_find_lock_gts(cb); if (!gts) return -EINVAL; @@ -656,7 +657,11 @@ int gru_handle_user_call_os(unsigned long cb) if (ucbnum >= gts->ts_cbr_au_count * GRU_CBR_AU_SIZE) goto exit; - gru_check_context_placement(gts); + if (gru_check_context_placement(gts)) { + gru_unlock_gts(gts); + gru_unload_context(gts, 1); + goto again; + } /* * CCH may contain stale data if ts_force_cch_reload is set. @@ -874,7 +879,11 @@ int gru_set_context_option(unsigned long arg) } else { gts->ts_user_blade_id = req.val1; gts->ts_user_chiplet_id = req.val0; - gru_check_context_placement(gts); + if (gru_check_context_placement(gts)) { + gru_unlock_gts(gts); + gru_unload_context(gts, 1); + return ret; + } } break; case sco_gseg_owner: diff --git a/drivers/misc/sgi-gru/grumain.c b/drivers/misc/sgi-gru/grumain.c index 6706ef3c597760cc6cee6ec81627021fb304f61a..4eb4b945513907911584e0070ffcaf0c7b37d79b 100644 --- a/drivers/misc/sgi-gru/grumain.c +++ b/drivers/misc/sgi-gru/grumain.c @@ -716,9 +716,10 @@ static int gru_check_chiplet_assignment(struct gru_state *gru, * chiplet. Misassignment can occur if the process migrates to a different * blade or if the user changes the selected blade/chiplet. */ -void gru_check_context_placement(struct gru_thread_state *gts) +int gru_check_context_placement(struct gru_thread_state *gts) { struct gru_state *gru; + int ret = 0; /* * If the current task is the context owner, verify that the @@ -726,15 +727,23 @@ void gru_check_context_placement(struct gru_thread_state *gts) * references. Pthread apps use non-owner references to the CBRs. */ gru = gts->ts_gru; + /* + * If gru or gts->ts_tgid_owner isn't initialized properly, return + * success to indicate that the caller does not need to unload the + * gru context.The caller is responsible for their inspection and + * reinitialization if needed. + */ if (!gru || gts->ts_tgid_owner != current->tgid) - return; + return ret; if (!gru_check_chiplet_assignment(gru, gts)) { STAT(check_context_unload); - gru_unload_context(gts, 1); + ret = -EINVAL; } else if (gru_retarget_intr(gts)) { STAT(check_context_retarget_intr); } + + return ret; } @@ -934,7 +943,12 @@ again: mutex_lock(>s->ts_ctxlock); preempt_disable(); - gru_check_context_placement(gts); + if (gru_check_context_placement(gts)) { + preempt_enable(); + mutex_unlock(>s->ts_ctxlock); + gru_unload_context(gts, 1); + return VM_FAULT_NOPAGE; + } if (!gts->ts_gru) { STAT(load_user_context); diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h index 8c52776db23417635f9a79626283dd71da1619e0..640daf1994df71b7306e6ea0f2177e58be209a39 100644 --- a/drivers/misc/sgi-gru/grutables.h +++ b/drivers/misc/sgi-gru/grutables.h @@ -632,7 +632,7 @@ extern int gru_user_flush_tlb(unsigned long arg); extern int gru_user_unload_context(unsigned long arg); extern int gru_get_exception_detail(unsigned long arg); extern int gru_set_context_option(unsigned long address); -extern void gru_check_context_placement(struct gru_thread_state *gts); +extern int gru_check_context_placement(struct gru_thread_state *gts); extern int gru_cpu_fault_map_id(void); extern struct vm_area_struct *gru_find_vma(unsigned long vaddr); extern void gru_flush_all_tlb(struct gru_state *gru); diff --git a/drivers/misc/smpro-errmon.c b/drivers/misc/smpro-errmon.c new file mode 100644 index 0000000000000000000000000000000000000000..d1431d419aa426b85782195146dfe3a74fd7326a --- /dev/null +++ b/drivers/misc/smpro-errmon.c @@ -0,0 +1,529 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Ampere Computing SoC's SMpro Error Monitoring Driver + * + * Copyright (c) 2022, Ampere Computing LLC + * + */ + +#include +#include +#include +#include +#include + +/* GPI RAS Error Registers */ +#define GPI_RAS_ERR 0x7E + +/* Core and L2C Error Registers */ +#define CORE_CE_ERR_CNT 0x80 +#define CORE_CE_ERR_LEN 0x81 +#define CORE_CE_ERR_DATA 0x82 +#define CORE_UE_ERR_CNT 0x83 +#define CORE_UE_ERR_LEN 0x84 +#define CORE_UE_ERR_DATA 0x85 + +/* Memory Error Registers */ +#define MEM_CE_ERR_CNT 0x90 +#define MEM_CE_ERR_LEN 0x91 +#define MEM_CE_ERR_DATA 0x92 +#define MEM_UE_ERR_CNT 0x93 +#define MEM_UE_ERR_LEN 0x94 +#define MEM_UE_ERR_DATA 0x95 + +/* RAS Error/Warning Registers */ +#define ERR_SMPRO_TYPE 0xA0 +#define ERR_PMPRO_TYPE 0xA1 +#define ERR_SMPRO_INFO_LO 0xA2 +#define ERR_SMPRO_INFO_HI 0xA3 +#define ERR_SMPRO_DATA_LO 0xA4 +#define ERR_SMPRO_DATA_HI 0xA5 +#define WARN_SMPRO_INFO_LO 0xAA +#define WARN_SMPRO_INFO_HI 0xAB +#define ERR_PMPRO_INFO_LO 0xA6 +#define ERR_PMPRO_INFO_HI 0xA7 +#define ERR_PMPRO_DATA_LO 0xA8 +#define ERR_PMPRO_DATA_HI 0xA9 +#define WARN_PMPRO_INFO_LO 0xAC +#define WARN_PMPRO_INFO_HI 0xAD + +/* PCIE Error Registers */ +#define PCIE_CE_ERR_CNT 0xC0 +#define PCIE_CE_ERR_LEN 0xC1 +#define PCIE_CE_ERR_DATA 0xC2 +#define PCIE_UE_ERR_CNT 0xC3 +#define PCIE_UE_ERR_LEN 0xC4 +#define PCIE_UE_ERR_DATA 0xC5 + +/* Other Error Registers */ +#define OTHER_CE_ERR_CNT 0xD0 +#define OTHER_CE_ERR_LEN 0xD1 +#define OTHER_CE_ERR_DATA 0xD2 +#define OTHER_UE_ERR_CNT 0xD8 +#define OTHER_UE_ERR_LEN 0xD9 +#define OTHER_UE_ERR_DATA 0xDA + +/* Event Data Registers */ +#define VRD_WARN_FAULT_EVENT_DATA 0x78 +#define VRD_HOT_EVENT_DATA 0x79 +#define DIMM_HOT_EVENT_DATA 0x7A + +#define MAX_READ_BLOCK_LENGTH 48 + +#define RAS_SMPRO_ERR 0 +#define RAS_PMPRO_ERR 1 + +enum RAS_48BYTES_ERR_TYPES { + CORE_CE_ERR, + CORE_UE_ERR, + MEM_CE_ERR, + MEM_UE_ERR, + PCIE_CE_ERR, + PCIE_UE_ERR, + OTHER_CE_ERR, + OTHER_UE_ERR, + NUM_48BYTES_ERR_TYPE, +}; + +struct smpro_error_hdr { + u8 count; /* Number of the RAS errors */ + u8 len; /* Number of data bytes */ + u8 data; /* Start of 48-byte data */ + u8 max_cnt; /* Max num of errors */ +}; + +/* + * Included Address of registers to get Count, Length of data and Data + * of the 48 bytes error data + */ +static struct smpro_error_hdr smpro_error_table[] = { + [CORE_CE_ERR] = { + .count = CORE_CE_ERR_CNT, + .len = CORE_CE_ERR_LEN, + .data = CORE_CE_ERR_DATA, + .max_cnt = 32 + }, + [CORE_UE_ERR] = { + .count = CORE_UE_ERR_CNT, + .len = CORE_UE_ERR_LEN, + .data = CORE_UE_ERR_DATA, + .max_cnt = 32 + }, + [MEM_CE_ERR] = { + .count = MEM_CE_ERR_CNT, + .len = MEM_CE_ERR_LEN, + .data = MEM_CE_ERR_DATA, + .max_cnt = 16 + }, + [MEM_UE_ERR] = { + .count = MEM_UE_ERR_CNT, + .len = MEM_UE_ERR_LEN, + .data = MEM_UE_ERR_DATA, + .max_cnt = 16 + }, + [PCIE_CE_ERR] = { + .count = PCIE_CE_ERR_CNT, + .len = PCIE_CE_ERR_LEN, + .data = PCIE_CE_ERR_DATA, + .max_cnt = 96 + }, + [PCIE_UE_ERR] = { + .count = PCIE_UE_ERR_CNT, + .len = PCIE_UE_ERR_LEN, + .data = PCIE_UE_ERR_DATA, + .max_cnt = 96 + }, + [OTHER_CE_ERR] = { + .count = OTHER_CE_ERR_CNT, + .len = OTHER_CE_ERR_LEN, + .data = OTHER_CE_ERR_DATA, + .max_cnt = 8 + }, + [OTHER_UE_ERR] = { + .count = OTHER_UE_ERR_CNT, + .len = OTHER_UE_ERR_LEN, + .data = OTHER_UE_ERR_DATA, + .max_cnt = 8 + }, +}; + +/* + * List of SCP registers which are used to get + * one type of RAS Internal errors. + */ +struct smpro_int_error_hdr { + u8 type; + u8 info_l; + u8 info_h; + u8 data_l; + u8 data_h; + u8 warn_l; + u8 warn_h; +}; + +static struct smpro_int_error_hdr list_smpro_int_error_hdr[] = { + [RAS_SMPRO_ERR] = { + .type = ERR_SMPRO_TYPE, + .info_l = ERR_SMPRO_INFO_LO, + .info_h = ERR_SMPRO_INFO_HI, + .data_l = ERR_SMPRO_DATA_LO, + .data_h = ERR_SMPRO_DATA_HI, + .warn_l = WARN_SMPRO_INFO_LO, + .warn_h = WARN_SMPRO_INFO_HI, + }, + [RAS_PMPRO_ERR] = { + .type = ERR_PMPRO_TYPE, + .info_l = ERR_PMPRO_INFO_LO, + .info_h = ERR_PMPRO_INFO_HI, + .data_l = ERR_PMPRO_DATA_LO, + .data_h = ERR_PMPRO_DATA_HI, + .warn_l = WARN_PMPRO_INFO_LO, + .warn_h = WARN_PMPRO_INFO_HI, + }, +}; + +struct smpro_errmon { + struct regmap *regmap; +}; + +enum EVENT_TYPES { + VRD_WARN_FAULT_EVENT, + VRD_HOT_EVENT, + DIMM_HOT_EVENT, + NUM_EVENTS_TYPE, +}; + +/* Included Address of event source and data registers */ +static u8 smpro_event_table[NUM_EVENTS_TYPE] = { + VRD_WARN_FAULT_EVENT_DATA, + VRD_HOT_EVENT_DATA, + DIMM_HOT_EVENT_DATA, +}; + +static ssize_t smpro_event_data_read(struct device *dev, + struct device_attribute *da, char *buf, + int channel) +{ + struct smpro_errmon *errmon = dev_get_drvdata(dev); + s32 event_data; + int ret; + + ret = regmap_read(errmon->regmap, smpro_event_table[channel], &event_data); + if (ret) + return ret; + /* Clear event after read */ + if (event_data != 0) + regmap_write(errmon->regmap, smpro_event_table[channel], event_data); + + return sysfs_emit(buf, "%04x\n", event_data); +} + +static ssize_t smpro_overflow_data_read(struct device *dev, struct device_attribute *da, + char *buf, int channel) +{ + struct smpro_errmon *errmon = dev_get_drvdata(dev); + struct smpro_error_hdr *err_info; + s32 err_count; + int ret; + + err_info = &smpro_error_table[channel]; + + ret = regmap_read(errmon->regmap, err_info->count, &err_count); + if (ret) + return ret; + + /* Bit 8 indicates the overflow status */ + return sysfs_emit(buf, "%d\n", (err_count & BIT(8)) ? 1 : 0); +} + +static ssize_t smpro_error_data_read(struct device *dev, struct device_attribute *da, + char *buf, int channel) +{ + struct smpro_errmon *errmon = dev_get_drvdata(dev); + unsigned char err_data[MAX_READ_BLOCK_LENGTH]; + struct smpro_error_hdr *err_info; + s32 err_count, err_length; + int ret; + + err_info = &smpro_error_table[channel]; + + ret = regmap_read(errmon->regmap, err_info->count, &err_count); + /* Error count is the low byte */ + err_count &= 0xff; + if (ret || !err_count || err_count > err_info->max_cnt) + return ret; + + ret = regmap_read(errmon->regmap, err_info->len, &err_length); + if (ret || err_length <= 0) + return ret; + + if (err_length > MAX_READ_BLOCK_LENGTH) + err_length = MAX_READ_BLOCK_LENGTH; + + memset(err_data, 0x00, MAX_READ_BLOCK_LENGTH); + ret = regmap_noinc_read(errmon->regmap, err_info->data, err_data, err_length); + if (ret < 0) + return ret; + + /* clear the error */ + ret = regmap_write(errmon->regmap, err_info->count, 0x100); + if (ret) + return ret; + /* + * The output of Core/Memory/PCIe/Others UE/CE errors follows the format + * specified in section 5.8.1 CE/UE Error Data record in + * Altra SOC BMC Interface specification. + */ + return sysfs_emit(buf, "%*phN\n", MAX_READ_BLOCK_LENGTH, err_data); +} + +/* + * Output format: + * <4-byte hex value of error info><4-byte hex value of error extensive data> + * Where: + * + error info : The error information + * + error data : Extensive data (32 bits) + * Reference to section 5.10 RAS Internal Error Register Definition in + * Altra SOC BMC Interface specification + */ +static ssize_t smpro_internal_err_read(struct device *dev, struct device_attribute *da, + char *buf, int channel) +{ + struct smpro_errmon *errmon = dev_get_drvdata(dev); + struct smpro_int_error_hdr *err_info; + unsigned int err[4] = { 0 }; + unsigned int err_type; + unsigned int val; + int ret; + + /* read error status */ + ret = regmap_read(errmon->regmap, GPI_RAS_ERR, &val); + if (ret) + return ret; + + if ((channel == RAS_SMPRO_ERR && !(val & BIT(0))) || + (channel == RAS_PMPRO_ERR && !(val & BIT(1)))) + return 0; + + err_info = &list_smpro_int_error_hdr[channel]; + ret = regmap_read(errmon->regmap, err_info->type, &val); + if (ret) + return ret; + + err_type = (val & BIT(1)) ? BIT(1) : + (val & BIT(2)) ? BIT(2) : 0; + + if (!err_type) + return 0; + + ret = regmap_read(errmon->regmap, err_info->info_l, err + 1); + if (ret) + return ret; + + ret = regmap_read(errmon->regmap, err_info->info_h, err); + if (ret) + return ret; + + if (err_type & BIT(2)) { + /* Error with data type */ + ret = regmap_read(errmon->regmap, err_info->data_l, err + 3); + if (ret) + return ret; + + ret = regmap_read(errmon->regmap, err_info->data_h, err + 2); + if (ret) + return ret; + } + + /* clear the read errors */ + ret = regmap_write(errmon->regmap, err_info->type, err_type); + if (ret) + return ret; + + return sysfs_emit(buf, "%*phN\n", (int)sizeof(err), err); +} + +/* + * Output format: + * <4-byte hex value of warining info> + * Reference to section 5.10 RAS Internal Error Register Definition in + * Altra SOC BMC Interface specification + */ +static ssize_t smpro_internal_warn_read(struct device *dev, struct device_attribute *da, + char *buf, int channel) +{ + struct smpro_errmon *errmon = dev_get_drvdata(dev); + struct smpro_int_error_hdr *err_info; + unsigned int warn[2] = { 0 }; + unsigned int val; + int ret; + + /* read error status */ + ret = regmap_read(errmon->regmap, GPI_RAS_ERR, &val); + if (ret) + return ret; + + if ((channel == RAS_SMPRO_ERR && !(val & BIT(0))) || + (channel == RAS_PMPRO_ERR && !(val & BIT(1)))) + return 0; + + err_info = &list_smpro_int_error_hdr[channel]; + ret = regmap_read(errmon->regmap, err_info->type, &val); + if (ret) + return ret; + + if (!(val & BIT(0))) + return 0; + + ret = regmap_read(errmon->regmap, err_info->warn_l, warn + 1); + if (ret) + return ret; + + ret = regmap_read(errmon->regmap, err_info->warn_h, warn); + if (ret) + return ret; + + /* clear the warning */ + ret = regmap_write(errmon->regmap, err_info->type, BIT(0)); + if (ret) + return ret; + + return sysfs_emit(buf, "%*phN\n", (int)sizeof(warn), warn); +} + +#define ERROR_OVERFLOW_RO(_error, _index) \ + static ssize_t overflow_##_error##_show(struct device *dev, \ + struct device_attribute *da, \ + char *buf) \ + { \ + return smpro_overflow_data_read(dev, da, buf, _index); \ + } \ + static DEVICE_ATTR_RO(overflow_##_error) + +ERROR_OVERFLOW_RO(core_ce, CORE_CE_ERR); +ERROR_OVERFLOW_RO(core_ue, CORE_UE_ERR); +ERROR_OVERFLOW_RO(mem_ce, MEM_CE_ERR); +ERROR_OVERFLOW_RO(mem_ue, MEM_UE_ERR); +ERROR_OVERFLOW_RO(pcie_ce, PCIE_CE_ERR); +ERROR_OVERFLOW_RO(pcie_ue, PCIE_UE_ERR); +ERROR_OVERFLOW_RO(other_ce, OTHER_CE_ERR); +ERROR_OVERFLOW_RO(other_ue, OTHER_UE_ERR); + +#define ERROR_RO(_error, _index) \ + static ssize_t error_##_error##_show(struct device *dev, \ + struct device_attribute *da, \ + char *buf) \ + { \ + return smpro_error_data_read(dev, da, buf, _index); \ + } \ + static DEVICE_ATTR_RO(error_##_error) + +ERROR_RO(core_ce, CORE_CE_ERR); +ERROR_RO(core_ue, CORE_UE_ERR); +ERROR_RO(mem_ce, MEM_CE_ERR); +ERROR_RO(mem_ue, MEM_UE_ERR); +ERROR_RO(pcie_ce, PCIE_CE_ERR); +ERROR_RO(pcie_ue, PCIE_UE_ERR); +ERROR_RO(other_ce, OTHER_CE_ERR); +ERROR_RO(other_ue, OTHER_UE_ERR); + +static ssize_t error_smpro_show(struct device *dev, struct device_attribute *da, char *buf) +{ + return smpro_internal_err_read(dev, da, buf, RAS_SMPRO_ERR); +} +static DEVICE_ATTR_RO(error_smpro); + +static ssize_t error_pmpro_show(struct device *dev, struct device_attribute *da, char *buf) +{ + return smpro_internal_err_read(dev, da, buf, RAS_PMPRO_ERR); +} +static DEVICE_ATTR_RO(error_pmpro); + +static ssize_t warn_smpro_show(struct device *dev, struct device_attribute *da, char *buf) +{ + return smpro_internal_warn_read(dev, da, buf, RAS_SMPRO_ERR); +} +static DEVICE_ATTR_RO(warn_smpro); + +static ssize_t warn_pmpro_show(struct device *dev, struct device_attribute *da, char *buf) +{ + return smpro_internal_warn_read(dev, da, buf, RAS_PMPRO_ERR); +} +static DEVICE_ATTR_RO(warn_pmpro); + +#define EVENT_RO(_event, _index) \ + static ssize_t event_##_event##_show(struct device *dev, \ + struct device_attribute *da, \ + char *buf) \ + { \ + return smpro_event_data_read(dev, da, buf, _index); \ + } \ + static DEVICE_ATTR_RO(event_##_event) + +EVENT_RO(vrd_warn_fault, VRD_WARN_FAULT_EVENT); +EVENT_RO(vrd_hot, VRD_HOT_EVENT); +EVENT_RO(dimm_hot, DIMM_HOT_EVENT); + +static struct attribute *smpro_errmon_attrs[] = { + &dev_attr_overflow_core_ce.attr, + &dev_attr_overflow_core_ue.attr, + &dev_attr_overflow_mem_ce.attr, + &dev_attr_overflow_mem_ue.attr, + &dev_attr_overflow_pcie_ce.attr, + &dev_attr_overflow_pcie_ue.attr, + &dev_attr_overflow_other_ce.attr, + &dev_attr_overflow_other_ue.attr, + &dev_attr_error_core_ce.attr, + &dev_attr_error_core_ue.attr, + &dev_attr_error_mem_ce.attr, + &dev_attr_error_mem_ue.attr, + &dev_attr_error_pcie_ce.attr, + &dev_attr_error_pcie_ue.attr, + &dev_attr_error_other_ce.attr, + &dev_attr_error_other_ue.attr, + &dev_attr_error_smpro.attr, + &dev_attr_error_pmpro.attr, + &dev_attr_warn_smpro.attr, + &dev_attr_warn_pmpro.attr, + &dev_attr_event_vrd_warn_fault.attr, + &dev_attr_event_vrd_hot.attr, + &dev_attr_event_dimm_hot.attr, + NULL +}; + +ATTRIBUTE_GROUPS(smpro_errmon); + +static int smpro_errmon_probe(struct platform_device *pdev) +{ + struct smpro_errmon *errmon; + + errmon = devm_kzalloc(&pdev->dev, sizeof(struct smpro_errmon), GFP_KERNEL); + if (!errmon) + return -ENOMEM; + + platform_set_drvdata(pdev, errmon); + + errmon->regmap = dev_get_regmap(pdev->dev.parent, NULL); + if (!errmon->regmap) + return -ENODEV; + + return 0; +} + +static struct platform_driver smpro_errmon_driver = { + .probe = smpro_errmon_probe, + .driver = { + .name = "smpro-errmon", + .dev_groups = smpro_errmon_groups, + }, +}; + +module_platform_driver(smpro_errmon_driver); + +MODULE_AUTHOR("Tung Nguyen "); +MODULE_AUTHOR("Thinh Pham "); +MODULE_AUTHOR("Hoang Nguyen "); +MODULE_AUTHOR("Thu Nguyen "); +MODULE_AUTHOR("Quan Nguyen "); +MODULE_DESCRIPTION("Ampere Altra SMpro driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/misc/smpro-misc.c b/drivers/misc/smpro-misc.c new file mode 100644 index 0000000000000000000000000000000000000000..6c427141e51b4121267c03818d1df5d5a299a768 --- /dev/null +++ b/drivers/misc/smpro-misc.c @@ -0,0 +1,145 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Ampere Computing SoC's SMpro Misc Driver + * + * Copyright (c) 2022, Ampere Computing LLC + */ +#include +#include +#include +#include + +/* Boot Stage/Progress Registers */ +#define BOOTSTAGE 0xB0 +#define BOOTSTAGE_LO 0xB1 +#define CUR_BOOTSTAGE 0xB2 +#define BOOTSTAGE_HI 0xB3 + +/* SOC State Registers */ +#define SOC_POWER_LIMIT 0xE5 + +struct smpro_misc { + struct regmap *regmap; +}; + +static ssize_t boot_progress_show(struct device *dev, struct device_attribute *da, char *buf) +{ + struct smpro_misc *misc = dev_get_drvdata(dev); + u16 boot_progress[3] = { 0 }; + u32 bootstage; + u8 boot_stage; + u8 cur_stage; + u32 reg_lo; + u32 reg; + int ret; + + /* Read current boot stage */ + ret = regmap_read(misc->regmap, CUR_BOOTSTAGE, ®); + if (ret) + return ret; + + cur_stage = reg & 0xff; + + ret = regmap_read(misc->regmap, BOOTSTAGE, &bootstage); + if (ret) + return ret; + + boot_stage = (bootstage >> 8) & 0xff; + + if (boot_stage > cur_stage) + return -EINVAL; + + ret = regmap_read(misc->regmap, BOOTSTAGE_LO, ®_lo); + if (!ret) + ret = regmap_read(misc->regmap, BOOTSTAGE_HI, ®); + if (ret) + return ret; + + /* Firmware to report new boot stage next time */ + if (boot_stage < cur_stage) { + ret = regmap_write(misc->regmap, BOOTSTAGE, ((bootstage & 0xff00) | 0x1)); + if (ret) + return ret; + } + + boot_progress[0] = bootstage; + boot_progress[1] = swab16(reg); + boot_progress[2] = swab16(reg_lo); + + return sysfs_emit(buf, "%*phN\n", (int)sizeof(boot_progress), boot_progress); +} + +static DEVICE_ATTR_RO(boot_progress); + +static ssize_t soc_power_limit_show(struct device *dev, struct device_attribute *da, char *buf) +{ + struct smpro_misc *misc = dev_get_drvdata(dev); + unsigned int value; + int ret; + + ret = regmap_read(misc->regmap, SOC_POWER_LIMIT, &value); + if (ret) + return ret; + + return sysfs_emit(buf, "%d\n", value); +} + +static ssize_t soc_power_limit_store(struct device *dev, struct device_attribute *da, + const char *buf, size_t count) +{ + struct smpro_misc *misc = dev_get_drvdata(dev); + unsigned long val; + s32 ret; + + ret = kstrtoul(buf, 0, &val); + if (ret) + return ret; + + ret = regmap_write(misc->regmap, SOC_POWER_LIMIT, (unsigned int)val); + if (ret) + return -EPROTO; + + return count; +} + +static DEVICE_ATTR_RW(soc_power_limit); + +static struct attribute *smpro_misc_attrs[] = { + &dev_attr_boot_progress.attr, + &dev_attr_soc_power_limit.attr, + NULL +}; + +ATTRIBUTE_GROUPS(smpro_misc); + +static int smpro_misc_probe(struct platform_device *pdev) +{ + struct smpro_misc *misc; + + misc = devm_kzalloc(&pdev->dev, sizeof(struct smpro_misc), GFP_KERNEL); + if (!misc) + return -ENOMEM; + + platform_set_drvdata(pdev, misc); + + misc->regmap = dev_get_regmap(pdev->dev.parent, NULL); + if (!misc->regmap) + return -ENODEV; + + return 0; +} + +static struct platform_driver smpro_misc_driver = { + .probe = smpro_misc_probe, + .driver = { + .name = "smpro-misc", + .dev_groups = smpro_misc_groups, + }, +}; + +module_platform_driver(smpro_misc_driver); + +MODULE_AUTHOR("Tung Nguyen "); +MODULE_AUTHOR("Quan Nguyen "); +MODULE_DESCRIPTION("Ampere Altra SMpro Misc driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/misc/sram-exec.c b/drivers/misc/sram-exec.c index a948e95d43754ae69dafda0c47249b6051e0541c..b71dbbd737382527c02bb17dbf12c451da3f889d 100644 --- a/drivers/misc/sram-exec.c +++ b/drivers/misc/sram-exec.c @@ -10,9 +10,9 @@ #include #include #include +#include #include -#include #include "sram.h" @@ -106,10 +106,7 @@ void *sram_exec_copy(struct gen_pool *pool, void *dst, void *src, dst_cpy = fncpy(dst, src, size); - ret = set_memory_ro((unsigned long)base, pages); - if (ret) - goto error_out; - ret = set_memory_x((unsigned long)base, pages); + ret = set_memory_rox((unsigned long)base, pages); if (ret) goto error_out; diff --git a/drivers/misc/tifm_7xx1.c b/drivers/misc/tifm_7xx1.c index 017c2f7d6287134dafd28f5266a1222a65382d38..7dd86a9858aba2a466690872065b85a6c077de4c 100644 --- a/drivers/misc/tifm_7xx1.c +++ b/drivers/misc/tifm_7xx1.c @@ -190,7 +190,7 @@ static void tifm_7xx1_switch_media(struct work_struct *work) spin_unlock_irqrestore(&fm->lock, flags); } if (sock) - tifm_free_device(&sock->dev); + put_device(&sock->dev); } spin_lock_irqsave(&fm->lock, flags); } diff --git a/drivers/misc/tsl2550.c b/drivers/misc/tsl2550.c index 1652fb9b3856ce2a425dc9c1dc7780f6efde9f6a..6c62b94e0acdb3c79d461ac445363fb6f92e84e5 100644 --- a/drivers/misc/tsl2550.c +++ b/drivers/misc/tsl2550.c @@ -331,8 +331,7 @@ static int tsl2550_init_client(struct i2c_client *client) */ static struct i2c_driver tsl2550_driver; -static int tsl2550_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int tsl2550_probe(struct i2c_client *client) { struct i2c_adapter *adapter = client->adapter; struct tsl2550_data *data; @@ -438,7 +437,7 @@ static struct i2c_driver tsl2550_driver = { .of_match_table = tsl2550_of_match, .pm = TSL2550_PM_OPS, }, - .probe = tsl2550_probe, + .probe_new = tsl2550_probe, .remove = tsl2550_remove, .id_table = tsl2550_id, }; diff --git a/drivers/misc/uacce/uacce.c b/drivers/misc/uacce/uacce.c index b70a013139c74becfc22e01daf7b5a8c75fc4a44..905eff1f840ed296c70bf767de330b0dac349b13 100644 --- a/drivers/misc/uacce/uacce.c +++ b/drivers/misc/uacce/uacce.c @@ -108,7 +108,7 @@ static int uacce_bind_queue(struct uacce_device *uacce, struct uacce_queue *q) if (!(uacce->flags & UACCE_DEV_SVA)) return 0; - handle = iommu_sva_bind_device(uacce->parent, current->mm, NULL); + handle = iommu_sva_bind_device(uacce->parent, current->mm); if (IS_ERR(handle)) return PTR_ERR(handle); diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index f7767afe116b33266de506d0728b3e04f17ee57c..b4c65783960a5aa14de5d64aeea190f02a04be44 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -2654,8 +2654,9 @@ static void bond_miimon_link_change(struct bonding *bond, static void bond_miimon_commit(struct bonding *bond) { - struct list_head *iter; struct slave *slave, *primary; + bool do_failover = false; + struct list_head *iter; bond_for_each_slave(bond, slave, iter) { switch (slave->link_new_state) { @@ -2699,8 +2700,9 @@ static void bond_miimon_commit(struct bonding *bond) bond_miimon_link_change(bond, slave, BOND_LINK_UP); - if (!bond->curr_active_slave || slave == primary) - goto do_failover; + if (!rcu_access_pointer(bond->curr_active_slave) || slave == primary || + slave->prio > rcu_dereference(bond->curr_active_slave)->prio) + do_failover = true; continue; @@ -2721,7 +2723,7 @@ static void bond_miimon_commit(struct bonding *bond) bond_miimon_link_change(bond, slave, BOND_LINK_DOWN); if (slave == rcu_access_pointer(bond->curr_active_slave)) - goto do_failover; + do_failover = true; continue; @@ -2732,8 +2734,9 @@ static void bond_miimon_commit(struct bonding *bond) continue; } + } -do_failover: + if (do_failover) { block_netpoll_tx(); bond_select_active_slave(bond); unblock_netpoll_tx(); @@ -3531,6 +3534,7 @@ static int bond_ab_arp_inspect(struct bonding *bond) */ static void bond_ab_arp_commit(struct bonding *bond) { + bool do_failover = false; struct list_head *iter; unsigned long last_tx; struct slave *slave; @@ -3560,8 +3564,9 @@ static void bond_ab_arp_commit(struct bonding *bond) slave_info(bond->dev, slave->dev, "link status definitely up\n"); if (!rtnl_dereference(bond->curr_active_slave) || - slave == rtnl_dereference(bond->primary_slave)) - goto do_failover; + slave == rtnl_dereference(bond->primary_slave) || + slave->prio > rtnl_dereference(bond->curr_active_slave)->prio) + do_failover = true; } @@ -3580,7 +3585,7 @@ static void bond_ab_arp_commit(struct bonding *bond) if (slave == rtnl_dereference(bond->curr_active_slave)) { RCU_INIT_POINTER(bond->current_arp_slave, NULL); - goto do_failover; + do_failover = true; } continue; @@ -3604,8 +3609,9 @@ static void bond_ab_arp_commit(struct bonding *bond) slave->link_new_state); continue; } + } -do_failover: + if (do_failover) { block_netpoll_tx(); bond_select_active_slave(bond); unblock_netpoll_tx(); diff --git a/drivers/net/can/flexcan/flexcan-core.c b/drivers/net/can/flexcan/flexcan-core.c index 0aeff34e5ae17c9e12795d421d131f3b78d71f86..6d638c93977bc7744c2c44b89dd82d3d2d967f4e 100644 --- a/drivers/net/can/flexcan/flexcan-core.c +++ b/drivers/net/can/flexcan/flexcan-core.c @@ -2349,9 +2349,15 @@ static int __maybe_unused flexcan_noirq_resume(struct device *device) if (netif_running(dev)) { int err; - err = pm_runtime_force_resume(device); - if (err) - return err; + /* For the wakeup in auto stop mode, no need to gate on the + * clock here, hardware will do this automatically. + */ + if (!(device_may_wakeup(device) && + priv->devtype_data.quirks & FLEXCAN_QUIRK_AUTO_STOP_MODE)) { + err = pm_runtime_force_resume(device); + if (err) + return err; + } if (device_may_wakeup(device)) flexcan_enable_wakeup_irq(priv, false); diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c index f688124d6d6696e35e9043fe845a4cb12f788dcf..ef341c4254fc8b14dd3f81b0d1909100b3cde393 100644 --- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c +++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c @@ -545,6 +545,7 @@ static int kvaser_usb_hydra_send_simple_cmd(struct kvaser_usb *dev, u8 cmd_no, int channel) { struct kvaser_cmd *cmd; + size_t cmd_len; int err; cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); @@ -552,6 +553,7 @@ static int kvaser_usb_hydra_send_simple_cmd(struct kvaser_usb *dev, return -ENOMEM; cmd->header.cmd_no = cmd_no; + cmd_len = kvaser_usb_hydra_cmd_size(cmd); if (channel < 0) { kvaser_usb_hydra_set_cmd_dest_he (cmd, KVASER_USB_HYDRA_HE_ADDRESS_ILLEGAL); @@ -568,7 +570,7 @@ static int kvaser_usb_hydra_send_simple_cmd(struct kvaser_usb *dev, kvaser_usb_hydra_set_cmd_transid (cmd, kvaser_usb_hydra_get_next_transid(dev)); - err = kvaser_usb_send_cmd(dev, cmd, kvaser_usb_hydra_cmd_size(cmd)); + err = kvaser_usb_send_cmd(dev, cmd, cmd_len); if (err) goto end; @@ -584,6 +586,7 @@ kvaser_usb_hydra_send_simple_cmd_async(struct kvaser_usb_net_priv *priv, { struct kvaser_cmd *cmd; struct kvaser_usb *dev = priv->dev; + size_t cmd_len; int err; cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC); @@ -591,14 +594,14 @@ kvaser_usb_hydra_send_simple_cmd_async(struct kvaser_usb_net_priv *priv, return -ENOMEM; cmd->header.cmd_no = cmd_no; + cmd_len = kvaser_usb_hydra_cmd_size(cmd); kvaser_usb_hydra_set_cmd_dest_he (cmd, dev->card_data.hydra.channel_to_he[priv->channel]); kvaser_usb_hydra_set_cmd_transid (cmd, kvaser_usb_hydra_get_next_transid(dev)); - err = kvaser_usb_send_cmd_async(priv, cmd, - kvaser_usb_hydra_cmd_size(cmd)); + err = kvaser_usb_send_cmd_async(priv, cmd, cmd_len); if (err) kfree(cmd); @@ -742,6 +745,7 @@ static int kvaser_usb_hydra_get_single_capability(struct kvaser_usb *dev, { struct kvaser_usb_dev_card_data *card_data = &dev->card_data; struct kvaser_cmd *cmd; + size_t cmd_len; u32 value = 0; u32 mask = 0; u16 cap_cmd_res; @@ -753,13 +757,14 @@ static int kvaser_usb_hydra_get_single_capability(struct kvaser_usb *dev, return -ENOMEM; cmd->header.cmd_no = CMD_GET_CAPABILITIES_REQ; + cmd_len = kvaser_usb_hydra_cmd_size(cmd); cmd->cap_req.cap_cmd = cpu_to_le16(cap_cmd_req); kvaser_usb_hydra_set_cmd_dest_he(cmd, card_data->hydra.sysdbg_he); kvaser_usb_hydra_set_cmd_transid (cmd, kvaser_usb_hydra_get_next_transid(dev)); - err = kvaser_usb_send_cmd(dev, cmd, kvaser_usb_hydra_cmd_size(cmd)); + err = kvaser_usb_send_cmd(dev, cmd, cmd_len); if (err) goto end; @@ -1578,6 +1583,7 @@ static int kvaser_usb_hydra_get_busparams(struct kvaser_usb_net_priv *priv, struct kvaser_usb *dev = priv->dev; struct kvaser_usb_net_hydra_priv *hydra = priv->sub_priv; struct kvaser_cmd *cmd; + size_t cmd_len; int err; if (!hydra) @@ -1588,6 +1594,7 @@ static int kvaser_usb_hydra_get_busparams(struct kvaser_usb_net_priv *priv, return -ENOMEM; cmd->header.cmd_no = CMD_GET_BUSPARAMS_REQ; + cmd_len = kvaser_usb_hydra_cmd_size(cmd); kvaser_usb_hydra_set_cmd_dest_he (cmd, dev->card_data.hydra.channel_to_he[priv->channel]); kvaser_usb_hydra_set_cmd_transid @@ -1597,7 +1604,7 @@ static int kvaser_usb_hydra_get_busparams(struct kvaser_usb_net_priv *priv, reinit_completion(&priv->get_busparams_comp); - err = kvaser_usb_send_cmd(dev, cmd, kvaser_usb_hydra_cmd_size(cmd)); + err = kvaser_usb_send_cmd(dev, cmd, cmd_len); if (err) return err; @@ -1624,6 +1631,7 @@ static int kvaser_usb_hydra_set_bittiming(const struct net_device *netdev, struct kvaser_cmd *cmd; struct kvaser_usb_net_priv *priv = netdev_priv(netdev); struct kvaser_usb *dev = priv->dev; + size_t cmd_len; int err; cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); @@ -1631,6 +1639,7 @@ static int kvaser_usb_hydra_set_bittiming(const struct net_device *netdev, return -ENOMEM; cmd->header.cmd_no = CMD_SET_BUSPARAMS_REQ; + cmd_len = kvaser_usb_hydra_cmd_size(cmd); memcpy(&cmd->set_busparams_req.busparams_nominal, busparams, sizeof(cmd->set_busparams_req.busparams_nominal)); @@ -1639,7 +1648,7 @@ static int kvaser_usb_hydra_set_bittiming(const struct net_device *netdev, kvaser_usb_hydra_set_cmd_transid (cmd, kvaser_usb_hydra_get_next_transid(dev)); - err = kvaser_usb_send_cmd(dev, cmd, kvaser_usb_hydra_cmd_size(cmd)); + err = kvaser_usb_send_cmd(dev, cmd, cmd_len); kfree(cmd); @@ -1652,6 +1661,7 @@ static int kvaser_usb_hydra_set_data_bittiming(const struct net_device *netdev, struct kvaser_cmd *cmd; struct kvaser_usb_net_priv *priv = netdev_priv(netdev); struct kvaser_usb *dev = priv->dev; + size_t cmd_len; int err; cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); @@ -1659,6 +1669,7 @@ static int kvaser_usb_hydra_set_data_bittiming(const struct net_device *netdev, return -ENOMEM; cmd->header.cmd_no = CMD_SET_BUSPARAMS_FD_REQ; + cmd_len = kvaser_usb_hydra_cmd_size(cmd); memcpy(&cmd->set_busparams_req.busparams_data, busparams, sizeof(cmd->set_busparams_req.busparams_data)); @@ -1676,7 +1687,7 @@ static int kvaser_usb_hydra_set_data_bittiming(const struct net_device *netdev, kvaser_usb_hydra_set_cmd_transid (cmd, kvaser_usb_hydra_get_next_transid(dev)); - err = kvaser_usb_send_cmd(dev, cmd, kvaser_usb_hydra_cmd_size(cmd)); + err = kvaser_usb_send_cmd(dev, cmd, cmd_len); kfree(cmd); @@ -1804,6 +1815,7 @@ static int kvaser_usb_hydra_get_software_info(struct kvaser_usb *dev) static int kvaser_usb_hydra_get_software_details(struct kvaser_usb *dev) { struct kvaser_cmd *cmd; + size_t cmd_len; int err; u32 flags; struct kvaser_usb_dev_card_data *card_data = &dev->card_data; @@ -1813,6 +1825,7 @@ static int kvaser_usb_hydra_get_software_details(struct kvaser_usb *dev) return -ENOMEM; cmd->header.cmd_no = CMD_GET_SOFTWARE_DETAILS_REQ; + cmd_len = kvaser_usb_hydra_cmd_size(cmd); cmd->sw_detail_req.use_ext_cmd = 1; kvaser_usb_hydra_set_cmd_dest_he (cmd, KVASER_USB_HYDRA_HE_ADDRESS_ILLEGAL); @@ -1820,7 +1833,7 @@ static int kvaser_usb_hydra_get_software_details(struct kvaser_usb *dev) kvaser_usb_hydra_set_cmd_transid (cmd, kvaser_usb_hydra_get_next_transid(dev)); - err = kvaser_usb_send_cmd(dev, cmd, kvaser_usb_hydra_cmd_size(cmd)); + err = kvaser_usb_send_cmd(dev, cmd, cmd_len); if (err) goto end; @@ -1938,6 +1951,7 @@ static int kvaser_usb_hydra_set_opt_mode(const struct kvaser_usb_net_priv *priv) { struct kvaser_usb *dev = priv->dev; struct kvaser_cmd *cmd; + size_t cmd_len; int err; if ((priv->can.ctrlmode & @@ -1953,6 +1967,7 @@ static int kvaser_usb_hydra_set_opt_mode(const struct kvaser_usb_net_priv *priv) return -ENOMEM; cmd->header.cmd_no = CMD_SET_DRIVERMODE_REQ; + cmd_len = kvaser_usb_hydra_cmd_size(cmd); kvaser_usb_hydra_set_cmd_dest_he (cmd, dev->card_data.hydra.channel_to_he[priv->channel]); kvaser_usb_hydra_set_cmd_transid @@ -1962,7 +1977,7 @@ static int kvaser_usb_hydra_set_opt_mode(const struct kvaser_usb_net_priv *priv) else cmd->set_ctrlmode.mode = KVASER_USB_HYDRA_CTRLMODE_NORMAL; - err = kvaser_usb_send_cmd(dev, cmd, kvaser_usb_hydra_cmd_size(cmd)); + err = kvaser_usb_send_cmd(dev, cmd, cmd_len); kfree(cmd); return err; diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c index 423f944cc34c36c9afbc57148282754905ea7d12..9b20c2ee6d62a3782e50cfa1b9d6ef04a60b28de 100644 --- a/drivers/net/dsa/microchip/ksz_common.c +++ b/drivers/net/dsa/microchip/ksz_common.c @@ -1996,8 +1996,7 @@ static int ksz_irq_common_setup(struct ksz_device *dev, struct ksz_irq *kirq) irq_create_mapping(kirq->domain, n); ret = request_threaded_irq(kirq->irq_num, NULL, ksz_irq_thread_fn, - IRQF_ONESHOT | IRQF_TRIGGER_FALLING, - kirq->name, kirq); + IRQF_ONESHOT, kirq->name, kirq); if (ret) goto out; diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c index e74c6b4061728e5bbb5d14e5d2c4f903c7c4752e..908fa89444c90681c84c371ad800a4711433e29c 100644 --- a/drivers/net/dsa/mt7530.c +++ b/drivers/net/dsa/mt7530.c @@ -2919,9 +2919,6 @@ static void mt753x_phylink_get_caps(struct dsa_switch *ds, int port, config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE | MAC_10 | MAC_100 | MAC_1000FD; - if ((priv->id == ID_MT7531) && mt753x_is_mac_port(port)) - config->mac_capabilities |= MAC_2500FD; - /* This driver does not make use of the speed, duplex, pause or the * advertisement in its mac_config, so it is safe to mark this driver * as non-legacy. diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index ba4fff8690aa52861caafca72dd8b65e9ddb01b1..242b8b325504a464e1dbdb3399b3f13d2a4b434b 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -689,13 +689,12 @@ static void mv88e6352_phylink_get_caps(struct mv88e6xxx_chip *chip, int port, /* Port 4 supports automedia if the serdes is associated with it. */ if (port == 4) { - mv88e6xxx_reg_lock(chip); err = mv88e6352_g2_scratch_port_has_serdes(chip, port); if (err < 0) dev_err(chip->dev, "p%d: failed to read scratch\n", port); if (err <= 0) - goto unlock; + return; cmode = mv88e6352_get_port4_serdes_cmode(chip); if (cmode < 0) @@ -703,8 +702,6 @@ static void mv88e6352_phylink_get_caps(struct mv88e6xxx_chip *chip, int port, port); else mv88e6xxx_translate_cmode(cmode, supported); -unlock: - mv88e6xxx_reg_unlock(chip); } } @@ -831,7 +828,9 @@ static void mv88e6xxx_get_caps(struct dsa_switch *ds, int port, { struct mv88e6xxx_chip *chip = ds->priv; + mv88e6xxx_reg_lock(chip); chip->info->ops->phylink_get_caps(chip, port, config); + mv88e6xxx_reg_unlock(chip); if (mv88e6xxx_phy_is_internal(ds, port)) { __set_bit(PHY_INTERFACE_MODE_INTERNAL, @@ -3307,7 +3306,7 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port) struct phylink_config pl_config = {}; unsigned long caps; - mv88e6xxx_get_caps(ds, port, &pl_config); + chip->info->ops->phylink_get_caps(chip, port, &pl_config); caps = pl_config.mac_capabilities; diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c index 8671591cb750148adcd1e6284c72a9e00a4796ab..3a79ead5219ae27d1ff0ccf5a6f62f9d8ed39128 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc.c +++ b/drivers/net/ethernet/freescale/enetc/enetc.c @@ -1489,23 +1489,6 @@ static void enetc_xdp_drop(struct enetc_bdr *rx_ring, int rx_ring_first, rx_ring->stats.xdp_drops++; } -static void enetc_xdp_free(struct enetc_bdr *rx_ring, int rx_ring_first, - int rx_ring_last) -{ - while (rx_ring_first != rx_ring_last) { - struct enetc_rx_swbd *rx_swbd = &rx_ring->rx_swbd[rx_ring_first]; - - if (rx_swbd->page) { - dma_unmap_page(rx_ring->dev, rx_swbd->dma, PAGE_SIZE, - rx_swbd->dir); - __free_page(rx_swbd->page); - rx_swbd->page = NULL; - } - enetc_bdr_idx_inc(rx_ring, &rx_ring_first); - } - rx_ring->stats.xdp_redirect_failures++; -} - static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring, struct napi_struct *napi, int work_limit, struct bpf_prog *prog) @@ -1527,8 +1510,8 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring, int orig_i, orig_cleaned_cnt; struct xdp_buff xdp_buff; struct sk_buff *skb; - int tmp_orig_i, err; u32 bd_status; + int err; rxbd = enetc_rxbd(rx_ring, i); bd_status = le32_to_cpu(rxbd->r.lstatus); @@ -1615,18 +1598,16 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring, break; } - tmp_orig_i = orig_i; - - while (orig_i != i) { - enetc_flip_rx_buff(rx_ring, - &rx_ring->rx_swbd[orig_i]); - enetc_bdr_idx_inc(rx_ring, &orig_i); - } - err = xdp_do_redirect(rx_ring->ndev, &xdp_buff, prog); if (unlikely(err)) { - enetc_xdp_free(rx_ring, tmp_orig_i, i); + enetc_xdp_drop(rx_ring, orig_i, i); + rx_ring->stats.xdp_redirect_failures++; } else { + while (orig_i != i) { + enetc_flip_rx_buff(rx_ring, + &rx_ring->rx_swbd[orig_i]); + enetc_bdr_idx_inc(rx_ring, &orig_i); + } xdp_redirect_frm_cnt++; rx_ring->stats.xdp_redirect++; } diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 5528b0af82ae984e83296ffe93e72cac917ef7fd..644f3c96373029af02ea9c8cec801c04e2329228 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -1674,6 +1674,14 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) * bridging applications. */ skb = build_skb(page_address(page), PAGE_SIZE); + if (unlikely(!skb)) { + page_pool_recycle_direct(rxq->page_pool, page); + ndev->stats.rx_dropped++; + + netdev_err_once(ndev, "build_skb failed!\n"); + goto rx_processing_done; + } + skb_reserve(skb, data_start); skb_put(skb, pkt_len - sub_len); skb_mark_for_recycle(skb); diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 97290fc0fddd89b1423a1f5d5e360ed0739eeaf3..3c0c35ecea10c716dc7146adee8fcbc358530f1a 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -7525,7 +7525,7 @@ static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf) { struct e1000_hw *hw = &adapter->hw; unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses; - u32 reg, msgbuf[3]; + u32 reg, msgbuf[3] = {}; u8 *addr = (u8 *)(&msgbuf[1]); /* process all the same items cleared in a function level reset */ diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h index 1e7e7071f64d25b5c886eedd7af8118ad4a8dfd6..df3e26c0cf01ac79000128139508c412c6100492 100644 --- a/drivers/net/ethernet/intel/igc/igc.h +++ b/drivers/net/ethernet/intel/igc/igc.h @@ -94,6 +94,8 @@ struct igc_ring { u8 queue_index; /* logical index of the ring*/ u8 reg_idx; /* physical index of the ring */ bool launchtime_enable; /* true if LaunchTime is enabled */ + ktime_t last_tx_cycle; /* end of the cycle with a launchtime transmission */ + ktime_t last_ff_cycle; /* Last cycle with an active first flag */ u32 start_time; u32 end_time; @@ -182,6 +184,7 @@ struct igc_adapter { ktime_t base_time; ktime_t cycle_time; + bool qbv_enable; /* OS defined structs */ struct pci_dev *pdev; diff --git a/drivers/net/ethernet/intel/igc/igc_defines.h b/drivers/net/ethernet/intel/igc/igc_defines.h index f7311aeb293b9af7680c9751e63a2f2f31a53807..a7b22639cfcd9b6b5c94865b5029d0bf66fcd6af 100644 --- a/drivers/net/ethernet/intel/igc/igc_defines.h +++ b/drivers/net/ethernet/intel/igc/igc_defines.h @@ -321,6 +321,8 @@ #define IGC_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */ #define IGC_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */ +#define IGC_ADVTXD_TSN_CNTX_FIRST 0x00000080 + /* Transmit Control */ #define IGC_TCTL_EN 0x00000002 /* enable Tx */ #define IGC_TCTL_PSP 0x00000008 /* pad short packets */ diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c index 1586e1e435c6f2f6e97adfa04736265a693f61f7..44b1740dc09834888b2946793057e250c76313bc 100644 --- a/drivers/net/ethernet/intel/igc/igc_main.c +++ b/drivers/net/ethernet/intel/igc/igc_main.c @@ -1000,25 +1000,118 @@ static int igc_write_mc_addr_list(struct net_device *netdev) return netdev_mc_count(netdev); } -static __le32 igc_tx_launchtime(struct igc_adapter *adapter, ktime_t txtime) +static __le32 igc_tx_launchtime(struct igc_ring *ring, ktime_t txtime, + bool *first_flag, bool *insert_empty) { + struct igc_adapter *adapter = netdev_priv(ring->netdev); ktime_t cycle_time = adapter->cycle_time; ktime_t base_time = adapter->base_time; + ktime_t now = ktime_get_clocktai(); + ktime_t baset_est, end_of_cycle; u32 launchtime; + s64 n; - /* FIXME: when using ETF together with taprio, we may have a - * case where 'delta' is larger than the cycle_time, this may - * cause problems if we don't read the current value of - * IGC_BASET, as the value writen into the launchtime - * descriptor field may be misinterpreted. + n = div64_s64(ktime_sub_ns(now, base_time), cycle_time); + + baset_est = ktime_add_ns(base_time, cycle_time * (n)); + end_of_cycle = ktime_add_ns(baset_est, cycle_time); + + if (ktime_compare(txtime, end_of_cycle) >= 0) { + if (baset_est != ring->last_ff_cycle) { + *first_flag = true; + ring->last_ff_cycle = baset_est; + + if (ktime_compare(txtime, ring->last_tx_cycle) > 0) + *insert_empty = true; + } + } + + /* Introducing a window at end of cycle on which packets + * potentially not honor launchtime. Window of 5us chosen + * considering software update the tail pointer and packets + * are dma'ed to packet buffer. */ - div_s64_rem(ktime_sub_ns(txtime, base_time), cycle_time, &launchtime); + if ((ktime_sub_ns(end_of_cycle, now) < 5 * NSEC_PER_USEC)) + netdev_warn(ring->netdev, "Packet with txtime=%llu may not be honoured\n", + txtime); + + ring->last_tx_cycle = end_of_cycle; + + launchtime = ktime_sub_ns(txtime, baset_est); + if (launchtime > 0) + div_s64_rem(launchtime, cycle_time, &launchtime); + else + launchtime = 0; return cpu_to_le32(launchtime); } +static int igc_init_empty_frame(struct igc_ring *ring, + struct igc_tx_buffer *buffer, + struct sk_buff *skb) +{ + unsigned int size; + dma_addr_t dma; + + size = skb_headlen(skb); + + dma = dma_map_single(ring->dev, skb->data, size, DMA_TO_DEVICE); + if (dma_mapping_error(ring->dev, dma)) { + netdev_err_once(ring->netdev, "Failed to map DMA for TX\n"); + return -ENOMEM; + } + + buffer->skb = skb; + buffer->protocol = 0; + buffer->bytecount = skb->len; + buffer->gso_segs = 1; + buffer->time_stamp = jiffies; + dma_unmap_len_set(buffer, len, skb->len); + dma_unmap_addr_set(buffer, dma, dma); + + return 0; +} + +static int igc_init_tx_empty_descriptor(struct igc_ring *ring, + struct sk_buff *skb, + struct igc_tx_buffer *first) +{ + union igc_adv_tx_desc *desc; + u32 cmd_type, olinfo_status; + int err; + + if (!igc_desc_unused(ring)) + return -EBUSY; + + err = igc_init_empty_frame(ring, first, skb); + if (err) + return err; + + cmd_type = IGC_ADVTXD_DTYP_DATA | IGC_ADVTXD_DCMD_DEXT | + IGC_ADVTXD_DCMD_IFCS | IGC_TXD_DCMD | + first->bytecount; + olinfo_status = first->bytecount << IGC_ADVTXD_PAYLEN_SHIFT; + + desc = IGC_TX_DESC(ring, ring->next_to_use); + desc->read.cmd_type_len = cpu_to_le32(cmd_type); + desc->read.olinfo_status = cpu_to_le32(olinfo_status); + desc->read.buffer_addr = cpu_to_le64(dma_unmap_addr(first, dma)); + + netdev_tx_sent_queue(txring_txq(ring), skb->len); + + first->next_to_watch = desc; + + ring->next_to_use++; + if (ring->next_to_use == ring->count) + ring->next_to_use = 0; + + return 0; +} + +#define IGC_EMPTY_FRAME_SIZE 60 + static void igc_tx_ctxtdesc(struct igc_ring *tx_ring, - struct igc_tx_buffer *first, + __le32 launch_time, bool first_flag, u32 vlan_macip_lens, u32 type_tucmd, u32 mss_l4len_idx) { @@ -1037,26 +1130,17 @@ static void igc_tx_ctxtdesc(struct igc_ring *tx_ring, if (test_bit(IGC_RING_FLAG_TX_CTX_IDX, &tx_ring->flags)) mss_l4len_idx |= tx_ring->reg_idx << 4; + if (first_flag) + mss_l4len_idx |= IGC_ADVTXD_TSN_CNTX_FIRST; + context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd); context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); - - /* We assume there is always a valid Tx time available. Invalid times - * should have been handled by the upper layers. - */ - if (tx_ring->launchtime_enable) { - struct igc_adapter *adapter = netdev_priv(tx_ring->netdev); - ktime_t txtime = first->skb->tstamp; - - skb_txtime_consumed(first->skb); - context_desc->launch_time = igc_tx_launchtime(adapter, - txtime); - } else { - context_desc->launch_time = 0; - } + context_desc->launch_time = launch_time; } -static void igc_tx_csum(struct igc_ring *tx_ring, struct igc_tx_buffer *first) +static void igc_tx_csum(struct igc_ring *tx_ring, struct igc_tx_buffer *first, + __le32 launch_time, bool first_flag) { struct sk_buff *skb = first->skb; u32 vlan_macip_lens = 0; @@ -1096,7 +1180,8 @@ no_csum: vlan_macip_lens |= skb_network_offset(skb) << IGC_ADVTXD_MACLEN_SHIFT; vlan_macip_lens |= first->tx_flags & IGC_TX_FLAGS_VLAN_MASK; - igc_tx_ctxtdesc(tx_ring, first, vlan_macip_lens, type_tucmd, 0); + igc_tx_ctxtdesc(tx_ring, launch_time, first_flag, + vlan_macip_lens, type_tucmd, 0); } static int __igc_maybe_stop_tx(struct igc_ring *tx_ring, const u16 size) @@ -1320,6 +1405,7 @@ dma_error: static int igc_tso(struct igc_ring *tx_ring, struct igc_tx_buffer *first, + __le32 launch_time, bool first_flag, u8 *hdr_len) { u32 vlan_macip_lens, type_tucmd, mss_l4len_idx; @@ -1406,8 +1492,8 @@ static int igc_tso(struct igc_ring *tx_ring, vlan_macip_lens |= (ip.hdr - skb->data) << IGC_ADVTXD_MACLEN_SHIFT; vlan_macip_lens |= first->tx_flags & IGC_TX_FLAGS_VLAN_MASK; - igc_tx_ctxtdesc(tx_ring, first, vlan_macip_lens, - type_tucmd, mss_l4len_idx); + igc_tx_ctxtdesc(tx_ring, launch_time, first_flag, + vlan_macip_lens, type_tucmd, mss_l4len_idx); return 1; } @@ -1415,11 +1501,14 @@ static int igc_tso(struct igc_ring *tx_ring, static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb, struct igc_ring *tx_ring) { + bool first_flag = false, insert_empty = false; u16 count = TXD_USE_COUNT(skb_headlen(skb)); __be16 protocol = vlan_get_protocol(skb); struct igc_tx_buffer *first; + __le32 launch_time = 0; u32 tx_flags = 0; unsigned short f; + ktime_t txtime; u8 hdr_len = 0; int tso = 0; @@ -1433,11 +1522,40 @@ static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb, count += TXD_USE_COUNT(skb_frag_size( &skb_shinfo(skb)->frags[f])); - if (igc_maybe_stop_tx(tx_ring, count + 3)) { + if (igc_maybe_stop_tx(tx_ring, count + 5)) { /* this is a hard error */ return NETDEV_TX_BUSY; } + if (!tx_ring->launchtime_enable) + goto done; + + txtime = skb->tstamp; + skb->tstamp = ktime_set(0, 0); + launch_time = igc_tx_launchtime(tx_ring, txtime, &first_flag, &insert_empty); + + if (insert_empty) { + struct igc_tx_buffer *empty_info; + struct sk_buff *empty; + void *data; + + empty_info = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; + empty = alloc_skb(IGC_EMPTY_FRAME_SIZE, GFP_ATOMIC); + if (!empty) + goto done; + + data = skb_put(empty, IGC_EMPTY_FRAME_SIZE); + memset(data, 0, IGC_EMPTY_FRAME_SIZE); + + igc_tx_ctxtdesc(tx_ring, 0, false, 0, 0, 0); + + if (igc_init_tx_empty_descriptor(tx_ring, + empty, + empty_info) < 0) + dev_kfree_skb_any(empty); + } + +done: /* record the location of the first descriptor for this packet */ first = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; first->type = IGC_TX_BUFFER_TYPE_SKB; @@ -1474,11 +1592,11 @@ static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb, first->tx_flags = tx_flags; first->protocol = protocol; - tso = igc_tso(tx_ring, first, &hdr_len); + tso = igc_tso(tx_ring, first, launch_time, first_flag, &hdr_len); if (tso < 0) goto out_drop; else if (!tso) - igc_tx_csum(tx_ring, first); + igc_tx_csum(tx_ring, first, launch_time, first_flag); igc_tx_map(tx_ring, first, hdr_len); @@ -5925,10 +6043,16 @@ static int igc_save_qbv_schedule(struct igc_adapter *adapter, bool queue_configured[IGC_MAX_TX_QUEUES] = { }; u32 start_time = 0, end_time = 0; size_t n; + int i; + + adapter->qbv_enable = qopt->enable; if (!qopt->enable) return igc_tsn_clear_schedule(adapter); + if (qopt->base_time < 0) + return -ERANGE; + if (adapter->base_time) return -EALREADY; @@ -5940,10 +6064,24 @@ static int igc_save_qbv_schedule(struct igc_adapter *adapter, for (n = 0; n < qopt->num_entries; n++) { struct tc_taprio_sched_entry *e = &qopt->entries[n]; - int i; end_time += e->interval; + /* If any of the conditions below are true, we need to manually + * control the end time of the cycle. + * 1. Qbv users can specify a cycle time that is not equal + * to the total GCL intervals. Hence, recalculation is + * necessary here to exclude the time interval that + * exceeds the cycle time. + * 2. According to IEEE Std. 802.1Q-2018 section 8.6.9.2, + * once the end of the list is reached, it will switch + * to the END_OF_CYCLE state and leave the gates in the + * same state until the next cycle is started. + */ + if (end_time > adapter->cycle_time || + n + 1 == qopt->num_entries) + end_time = adapter->cycle_time; + for (i = 0; i < adapter->num_tx_queues; i++) { struct igc_ring *ring = adapter->tx_ring[i]; @@ -5964,6 +6102,18 @@ static int igc_save_qbv_schedule(struct igc_adapter *adapter, start_time += e->interval; } + /* Check whether a queue gets configured. + * If not, set the start and end time to be end time. + */ + for (i = 0; i < adapter->num_tx_queues; i++) { + if (!queue_configured[i]) { + struct igc_ring *ring = adapter->tx_ring[i]; + + ring->start_time = end_time; + ring->end_time = end_time; + } + } + return 0; } diff --git a/drivers/net/ethernet/intel/igc/igc_tsn.c b/drivers/net/ethernet/intel/igc/igc_tsn.c index f975ed807da1e8276ae99f94ee95cc3858c4aa68..bb10d7b65232a68dfb729ff28ca084a3e5e43373 100644 --- a/drivers/net/ethernet/intel/igc/igc_tsn.c +++ b/drivers/net/ethernet/intel/igc/igc_tsn.c @@ -36,7 +36,7 @@ static unsigned int igc_tsn_new_flags(struct igc_adapter *adapter) { unsigned int new_flags = adapter->flags & ~IGC_FLAG_TSN_ANY_ENABLED; - if (adapter->base_time) + if (adapter->qbv_enable) new_flags |= IGC_FLAG_TSN_QBV_ENABLED; if (is_any_launchtime(adapter)) @@ -140,15 +140,8 @@ static int igc_tsn_enable_offload(struct igc_adapter *adapter) wr32(IGC_STQT(i), ring->start_time); wr32(IGC_ENDQT(i), ring->end_time); - if (adapter->base_time) { - /* If we have a base_time we are in "taprio" - * mode and we need to be strict about the - * cycles: only transmit a packet if it can be - * completed during that cycle. - */ - txqctl |= IGC_TXQCTL_STRICT_CYCLE | - IGC_TXQCTL_STRICT_END; - } + txqctl |= IGC_TXQCTL_STRICT_CYCLE | + IGC_TXQCTL_STRICT_END; if (ring->launchtime_enable) txqctl |= IGC_TXQCTL_QUEUE_MODE_LAUNCHT; diff --git a/drivers/net/ethernet/microchip/vcap/vcap_api_debugfs.c b/drivers/net/ethernet/microchip/vcap/vcap_api_debugfs.c index 895bfff550d23d2e1092e631258bf497b3e8755f..e0b206247f2eb5bec4c914c77c2fbdb005d1c5ae 100644 --- a/drivers/net/ethernet/microchip/vcap/vcap_api_debugfs.c +++ b/drivers/net/ethernet/microchip/vcap/vcap_api_debugfs.c @@ -83,6 +83,8 @@ static void vcap_debugfs_show_rule_keyfield(struct vcap_control *vctrl, hex = true; break; case VCAP_FIELD_U128: + value = data->u128.value; + mask = data->u128.mask; if (key == VCAP_KF_L3_IP6_SIP || key == VCAP_KF_L3_IP6_DIP) { u8 nvalue[16], nmask[16]; diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c b/drivers/net/ethernet/microsoft/mana/gdma_main.c index 690b69cae4e3a8a25d16f39487bb0dcedac2beab..e708c2d049839f27b578dab501dd47c5b9061bc4 100644 --- a/drivers/net/ethernet/microsoft/mana/gdma_main.c +++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c @@ -671,8 +671,7 @@ free_q: return err; } -int mana_gd_destroy_dma_region(struct gdma_context *gc, - gdma_obj_handle_t dma_region_handle) +int mana_gd_destroy_dma_region(struct gdma_context *gc, u64 dma_region_handle) { struct gdma_destroy_dma_region_req req = {}; struct gdma_general_resp resp = {}; diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c index 8073d7a90a2645a0956d3d42a669825ee6ba6e89..c5687d94ea88506e408fe7a89067f2ddbf005c35 100644 --- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c +++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c @@ -3912,6 +3912,7 @@ abort_with_slices: myri10ge_free_slices(mgp); abort_with_firmware: + kfree(mgp->msix_vectors); myri10ge_dummy_rdma(mgp, 0); abort_with_ioremap: diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index 2314cf55e82132457ec56f69872e2f768050f2fc..09053373288fe7899fa4213fae490f185c250bbd 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -2509,7 +2509,7 @@ static int nfp_net_read_caps(struct nfp_net *nn) { /* Get some of the read-only fields from the BAR */ nn->cap = nn_readl(nn, NFP_NET_CFG_CAP); - nn->cap_w1 = nn_readq(nn, NFP_NET_CFG_CAP_WORD1); + nn->cap_w1 = nn_readl(nn, NFP_NET_CFG_CAP_WORD1); nn->max_mtu = nn_readl(nn, NFP_NET_CFG_MAX_MTU); /* ABI 4.x and ctrl vNIC always use chained metadata, in other cases diff --git a/drivers/net/ethernet/rdc/r6040.c b/drivers/net/ethernet/rdc/r6040.c index eecd52ed1ed21869c073ed74db8e192866e026cf..f4d434c379e7c4ea2f9cacc82ef19b9eb969d825 100644 --- a/drivers/net/ethernet/rdc/r6040.c +++ b/drivers/net/ethernet/rdc/r6040.c @@ -1159,10 +1159,12 @@ static int r6040_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) err = register_netdev(dev); if (err) { dev_err(&pdev->dev, "Failed to register net device\n"); - goto err_out_mdio_unregister; + goto err_out_phy_disconnect; } return 0; +err_out_phy_disconnect: + phy_disconnect(dev->phydev); err_out_mdio_unregister: mdiobus_unregister(lp->mii_bus); err_out_mdio: @@ -1186,6 +1188,7 @@ static void r6040_remove_one(struct pci_dev *pdev) struct r6040_private *lp = netdev_priv(dev); unregister_netdev(dev); + phy_disconnect(dev->phydev); mdiobus_unregister(lp->mii_bus); mdiobus_free(lp->mii_bus); netif_napi_del(&lp->napi); diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index 33f723a9f471b7d7cc29b159a4338a9c9e007933..b4e0fc7f65bdfed65e376351f0bf7b30cb3734c6 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -2903,12 +2903,12 @@ static int ravb_remove(struct platform_device *pdev) priv->desc_bat_dma); /* Set reset mode */ ravb_write(ndev, CCC_OPC_RESET, CCC); - pm_runtime_put_sync(&pdev->dev); unregister_netdev(ndev); if (info->nc_queues) netif_napi_del(&priv->napi[RAVB_NC]); netif_napi_del(&priv->napi[RAVB_BE]); ravb_mdio_release(priv); + pm_runtime_put_sync(&pdev->dev); pm_runtime_disable(&pdev->dev); reset_control_assert(priv->rstc); free_netdev(ndev); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index ec64b65dee34f6f44f850ff4e4c2588ec53ee450..c6951c976f5ddb2b67dc760bbb433c871c3efab4 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -7099,6 +7099,7 @@ int stmmac_dvr_probe(struct device *device, priv->wq = create_singlethread_workqueue("stmmac_wq"); if (!priv->wq) { dev_err(priv->device, "failed to create workqueue\n"); + ret = -ENOMEM; goto error_wq_init; } diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c index 9decb0c7961bbe5f244f9164faf55ba439ae4120..ecbde83b52436c88d676bf8b97e7cf1348b679a7 100644 --- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c +++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c @@ -2878,7 +2878,6 @@ static int am65_cpsw_nuss_remove(struct platform_device *pdev) return 0; } -#ifdef CONFIG_PM_SLEEP static int am65_cpsw_nuss_suspend(struct device *dev) { struct am65_cpsw_common *common = dev_get_drvdata(dev); @@ -2964,10 +2963,9 @@ static int am65_cpsw_nuss_resume(struct device *dev) return 0; } -#endif /* CONFIG_PM_SLEEP */ static const struct dev_pm_ops am65_cpsw_nuss_dev_pm_ops = { - SET_SYSTEM_SLEEP_PM_OPS(am65_cpsw_nuss_suspend, am65_cpsw_nuss_resume) + SYSTEM_SLEEP_PM_OPS(am65_cpsw_nuss_suspend, am65_cpsw_nuss_resume) }; static struct platform_driver am65_cpsw_nuss_driver = { diff --git a/drivers/net/ipvlan/ipvtap.c b/drivers/net/ipvlan/ipvtap.c index cbabca167a0785d136f59c11935f4c934cf8ad51..dde272586e80b5b2b7b0068a782af9cf6d96d8e0 100644 --- a/drivers/net/ipvlan/ipvtap.c +++ b/drivers/net/ipvlan/ipvtap.c @@ -30,9 +30,9 @@ static dev_t ipvtap_major; static struct cdev ipvtap_cdev; -static const void *ipvtap_net_namespace(struct device *d) +static const void *ipvtap_net_namespace(const struct device *d) { - struct net_device *dev = to_net_dev(d->parent); + const struct net_device *dev = to_net_dev(d->parent); return dev_net(dev); } diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c index 937f5b1f04ffcb31bf0d34fe46349000b0ba7fd6..bf8ac7a3ded751df2673236a5fad7fb9ee24af28 100644 --- a/drivers/net/macsec.c +++ b/drivers/net/macsec.c @@ -2593,7 +2593,7 @@ static int macsec_upd_offload(struct sk_buff *skb, struct genl_info *info) const struct macsec_ops *ops; struct macsec_context ctx; struct macsec_dev *macsec; - int ret; + int ret = 0; if (!attrs[MACSEC_ATTR_IFINDEX]) return -EINVAL; @@ -2606,28 +2606,36 @@ static int macsec_upd_offload(struct sk_buff *skb, struct genl_info *info) macsec_genl_offload_policy, NULL)) return -EINVAL; + rtnl_lock(); + dev = get_dev_from_nl(genl_info_net(info), attrs); - if (IS_ERR(dev)) - return PTR_ERR(dev); + if (IS_ERR(dev)) { + ret = PTR_ERR(dev); + goto out; + } macsec = macsec_priv(dev); - if (!tb_offload[MACSEC_OFFLOAD_ATTR_TYPE]) - return -EINVAL; + if (!tb_offload[MACSEC_OFFLOAD_ATTR_TYPE]) { + ret = -EINVAL; + goto out; + } offload = nla_get_u8(tb_offload[MACSEC_OFFLOAD_ATTR_TYPE]); if (macsec->offload == offload) - return 0; + goto out; /* Check if the offloading mode is supported by the underlying layers */ if (offload != MACSEC_OFFLOAD_OFF && - !macsec_check_offload(offload, macsec)) - return -EOPNOTSUPP; + !macsec_check_offload(offload, macsec)) { + ret = -EOPNOTSUPP; + goto out; + } /* Check if the net device is busy. */ - if (netif_running(dev)) - return -EBUSY; - - rtnl_lock(); + if (netif_running(dev)) { + ret = -EBUSY; + goto out; + } prev_offload = macsec->offload; macsec->offload = offload; @@ -2662,7 +2670,7 @@ static int macsec_upd_offload(struct sk_buff *skb, struct genl_info *info) rollback: macsec->offload = prev_offload; - +out: rtnl_unlock(); return ret; } diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c index d1f435788e9021277f4e904db5f240128d22c38b..031344239f27143506998685f2c01162d87b10f4 100644 --- a/drivers/net/macvtap.c +++ b/drivers/net/macvtap.c @@ -35,9 +35,9 @@ struct macvtap_dev { */ static dev_t macvtap_major; -static const void *macvtap_net_namespace(struct device *d) +static const void *macvtap_net_namespace(const struct device *d) { - struct net_device *dev = to_net_dev(d->parent); + const struct net_device *dev = to_net_dev(d->parent); return dev_net(dev); } diff --git a/drivers/net/mctp/mctp-serial.c b/drivers/net/mctp/mctp-serial.c index 7cd103fd34ef76f3561592a82bbe1c5972b316d8..9f9eaf896047c1a8ed5a5f3b8f18d4a69014fe55 100644 --- a/drivers/net/mctp/mctp-serial.c +++ b/drivers/net/mctp/mctp-serial.c @@ -35,6 +35,8 @@ #define BYTE_FRAME 0x7e #define BYTE_ESC 0x7d +#define FCS_INIT 0xffff + static DEFINE_IDA(mctp_serial_ida); enum mctp_serial_state { @@ -123,7 +125,7 @@ static void mctp_serial_tx_work(struct work_struct *work) buf[2] = dev->txlen; if (!dev->txpos) - dev->txfcs = crc_ccitt(0, buf + 1, 2); + dev->txfcs = crc_ccitt(FCS_INIT, buf + 1, 2); txlen = write_chunk(dev, buf + dev->txpos, 3 - dev->txpos); if (txlen <= 0) { @@ -303,7 +305,7 @@ static void mctp_serial_push_header(struct mctp_serial *dev, unsigned char c) case 1: if (c == MCTP_SERIAL_VERSION) { dev->rxpos++; - dev->rxfcs = crc_ccitt_byte(0, c); + dev->rxfcs = crc_ccitt_byte(FCS_INIT, c); } else { dev->rxstate = STATE_ERR; } diff --git a/drivers/net/wireguard/timers.c b/drivers/net/wireguard/timers.c index b5706b6718b19ec01877ef028b03051c46a99125..53d8a57a0dfa391b386e3ef2ca4ca3b0cf13d7f7 100644 --- a/drivers/net/wireguard/timers.c +++ b/drivers/net/wireguard/timers.c @@ -46,7 +46,7 @@ static void wg_expired_retransmit_handshake(struct timer_list *timer) if (peer->timer_handshake_attempts > MAX_TIMER_HANDSHAKES) { pr_debug("%s: Handshake for peer %llu (%pISpfsc) did not complete after %d attempts, giving up\n", peer->device->dev->name, peer->internal_id, - &peer->endpoint.addr, MAX_TIMER_HANDSHAKES + 2); + &peer->endpoint.addr, (int)MAX_TIMER_HANDSHAKES + 2); del_timer(&peer->timer_send_keepalive); /* We drop all packets without a keypair and don't try again, @@ -64,7 +64,7 @@ static void wg_expired_retransmit_handshake(struct timer_list *timer) ++peer->timer_handshake_attempts; pr_debug("%s: Handshake for peer %llu (%pISpfsc) did not complete after %d seconds, retrying (try %d)\n", peer->device->dev->name, peer->internal_id, - &peer->endpoint.addr, REKEY_TIMEOUT, + &peer->endpoint.addr, (int)REKEY_TIMEOUT, peer->timer_handshake_attempts + 1); /* We clear the endpoint address src address, in case this is @@ -94,7 +94,7 @@ static void wg_expired_new_handshake(struct timer_list *timer) pr_debug("%s: Retrying handshake with peer %llu (%pISpfsc) because we stopped hearing back after %d seconds\n", peer->device->dev->name, peer->internal_id, - &peer->endpoint.addr, KEEPALIVE_TIMEOUT + REKEY_TIMEOUT); + &peer->endpoint.addr, (int)(KEEPALIVE_TIMEOUT + REKEY_TIMEOUT)); /* We clear the endpoint address src address, in case this is the cause * of trouble. */ @@ -126,7 +126,7 @@ static void wg_queued_expired_zero_key_material(struct work_struct *work) pr_debug("%s: Zeroing out all keys for peer %llu (%pISpfsc), since we haven't received a new one in %d seconds\n", peer->device->dev->name, peer->internal_id, - &peer->endpoint.addr, REJECT_AFTER_TIME * 3); + &peer->endpoint.addr, (int)REJECT_AFTER_TIME * 3); wg_noise_handshake_clear(&peer->handshake); wg_noise_keypairs_clear(&peer->keypairs); wg_peer_put(peer); diff --git a/drivers/nfc/pn533/pn533.c b/drivers/nfc/pn533/pn533.c index d9f6367b9993dd4b1880cce6625b8f6e2d985af1..f0cac19005527afc3b0049eee2d0fd45cddf629b 100644 --- a/drivers/nfc/pn533/pn533.c +++ b/drivers/nfc/pn533/pn533.c @@ -1295,6 +1295,8 @@ static int pn533_poll_dep_complete(struct pn533 *dev, void *arg, if (IS_ERR(resp)) return PTR_ERR(resp); + memset(&nfc_target, 0, sizeof(struct nfc_target)); + rsp = (struct pn533_cmd_jump_dep_response *)resp->data; rc = rsp->status & PN533_CMD_RET_MASK; @@ -1926,6 +1928,8 @@ static int pn533_in_dep_link_up_complete(struct pn533 *dev, void *arg, dev_dbg(dev->dev, "Creating new target\n"); + memset(&nfc_target, 0, sizeof(struct nfc_target)); + nfc_target.supported_protocols = NFC_PROTO_NFC_DEP_MASK; nfc_target.nfcid1_len = 10; memcpy(nfc_target.nfcid1, rsp->nfcid3t, nfc_target.nfcid1_len); diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index e26b085a007aea529c997c75e18e50bc4821e796..f72fc3bd07c3489221835fc5722e669f05c8c39d 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -4599,9 +4599,9 @@ void nvme_remove_namespaces(struct nvme_ctrl *ctrl) } EXPORT_SYMBOL_GPL(nvme_remove_namespaces); -static int nvme_class_uevent(struct device *dev, struct kobj_uevent_env *env) +static int nvme_class_uevent(const struct device *dev, struct kobj_uevent_env *env) { - struct nvme_ctrl *ctrl = + const struct nvme_ctrl *ctrl = container_of(dev, struct nvme_ctrl, ctrl_device); struct nvmf_ctrl_options *opts = ctrl->opts; int ret; diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c index b69b89166b6b96f131aa3c55b56ae7572475973e..8cedc1ef496c70937ce2d7fd264ff4e09e28d11d 100644 --- a/drivers/nvme/host/tcp.c +++ b/drivers/nvme/host/tcp.c @@ -1537,6 +1537,7 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl, int qid) queue->sock->sk->sk_rcvtimeo = 10 * HZ; queue->sock->sk->sk_allocation = GFP_ATOMIC; + queue->sock->sk->sk_use_task_frag = false; nvme_tcp_set_queue_io_cpu(queue); queue->request = NULL; queue->data_remaining = 0; diff --git a/drivers/nvmem/Kconfig b/drivers/nvmem/Kconfig index ec8a49c040031205dbe69a2b4f7ae9a6a639f63d..755f551426b55247737f4ed81530829e15d89ee2 100644 --- a/drivers/nvmem/Kconfig +++ b/drivers/nvmem/Kconfig @@ -164,7 +164,7 @@ config NVMEM_MICROCHIP_OTPC depends on ARCH_AT91 || COMPILE_TEST help This driver enable the OTP controller available on Microchip SAMA7G5 - SoCs. It controlls the access to the OTP memory connected to it. + SoCs. It controls the access to the OTP memory connected to it. config NVMEM_MTK_EFUSE tristate "Mediatek SoCs EFUSE support" diff --git a/drivers/nvmem/stm32-romem.c b/drivers/nvmem/stm32-romem.c index 354be526897ff027d259747f38d3b8c355d64012..d1d03c2ad0816bf78704f7fe30784f59661bdf62 100644 --- a/drivers/nvmem/stm32-romem.c +++ b/drivers/nvmem/stm32-romem.c @@ -19,19 +19,18 @@ #define STM32_SMC_WRITE_SHADOW 0x03 #define STM32_SMC_READ_OTP 0x04 -/* shadow registers offest */ +/* shadow registers offset */ #define STM32MP15_BSEC_DATA0 0x200 -/* 32 (x 32-bits) lower shadow registers */ -#define STM32MP15_BSEC_NUM_LOWER 32 - struct stm32_romem_cfg { int size; + u8 lower; }; struct stm32_romem_priv { void __iomem *base; struct nvmem_config cfg; + u8 lower; }; static int stm32_romem_read(void *context, unsigned int offset, void *buf, @@ -85,7 +84,7 @@ static int stm32_bsec_read(void *context, unsigned int offset, void *buf, for (i = roffset; (i < roffset + rbytes); i += 4) { u32 otp = i >> 2; - if (otp < STM32MP15_BSEC_NUM_LOWER) { + if (otp < priv->lower) { /* read lower data from shadow registers */ val = readl_relaxed( priv->base + STM32MP15_BSEC_DATA0 + i); @@ -133,6 +132,9 @@ static int stm32_bsec_write(void *context, unsigned int offset, void *buf, } } + if (offset + bytes >= priv->lower * 4) + dev_warn(dev, "Update of upper OTPs with ECC protection (word programming, only once)\n"); + return 0; } @@ -158,6 +160,9 @@ static int stm32_romem_probe(struct platform_device *pdev) priv->cfg.dev = dev; priv->cfg.priv = priv; priv->cfg.owner = THIS_MODULE; + priv->cfg.type = NVMEM_TYPE_OTP; + + priv->lower = 0; cfg = (const struct stm32_romem_cfg *) of_match_device(dev->driver->of_match_table, dev)->data; @@ -167,6 +172,7 @@ static int stm32_romem_probe(struct platform_device *pdev) priv->cfg.reg_read = stm32_romem_read; } else { priv->cfg.size = cfg->size; + priv->lower = cfg->lower; priv->cfg.reg_read = stm32_bsec_read; priv->cfg.reg_write = stm32_bsec_write; } @@ -174,8 +180,17 @@ static int stm32_romem_probe(struct platform_device *pdev) return PTR_ERR_OR_ZERO(devm_nvmem_register(dev, &priv->cfg)); } +/* + * STM32MP15 BSEC OTP regions: 4096 OTP bits (with 3072 effective bits) + * => 96 x 32-bits data words + * - Lower: 1K bits, 2:1 redundancy, incremental bit programming + * => 32 (x 32-bits) lower shadow registers = words 0 to 31 + * - Upper: 2K bits, ECC protection, word programming only + * => 64 (x 32-bits) = words 32 to 95 + */ static const struct stm32_romem_cfg stm32mp15_bsec_cfg = { - .size = 384, /* 96 x 32-bits data words */ + .size = 384, + .lower = 32, }; static const struct of_device_id stm32_romem_of_match[] = { diff --git a/drivers/nvmem/u-boot-env.c b/drivers/nvmem/u-boot-env.c index 4fdbdccebda16e8ab4069c5883d0b72349d6cc5c..29b1d87a3c51554b53229160c4dc2db806747c89 100644 --- a/drivers/nvmem/u-boot-env.c +++ b/drivers/nvmem/u-boot-env.c @@ -16,6 +16,7 @@ enum u_boot_env_format { U_BOOT_FORMAT_SINGLE, U_BOOT_FORMAT_REDUNDANT, + U_BOOT_FORMAT_BROADCOM, }; struct u_boot_env { @@ -40,6 +41,13 @@ struct u_boot_env_image_redundant { uint8_t data[]; } __packed; +struct u_boot_env_image_broadcom { + __le32 magic; + __le32 len; + __le32 crc32; + uint8_t data[0]; +} __packed; + static int u_boot_env_read(void *context, unsigned int offset, void *val, size_t bytes) { @@ -138,6 +146,11 @@ static int u_boot_env_parse(struct u_boot_env *priv) crc32_data_offset = offsetof(struct u_boot_env_image_redundant, data); data_offset = offsetof(struct u_boot_env_image_redundant, data); break; + case U_BOOT_FORMAT_BROADCOM: + crc32_offset = offsetof(struct u_boot_env_image_broadcom, crc32); + crc32_data_offset = offsetof(struct u_boot_env_image_broadcom, data); + data_offset = offsetof(struct u_boot_env_image_broadcom, data); + break; } crc32 = le32_to_cpu(*(__le32 *)(buf + crc32_offset)); crc32_data_len = priv->mtd->size - crc32_data_offset; @@ -202,6 +215,7 @@ static const struct of_device_id u_boot_env_of_match_table[] = { { .compatible = "u-boot,env", .data = (void *)U_BOOT_FORMAT_SINGLE, }, { .compatible = "u-boot,env-redundant-bool", .data = (void *)U_BOOT_FORMAT_REDUNDANT, }, { .compatible = "u-boot,env-redundant-count", .data = (void *)U_BOOT_FORMAT_REDUNDANT, }, + { .compatible = "brcm,env", .data = (void *)U_BOOT_FORMAT_BROADCOM, }, {}, }; diff --git a/drivers/of/device.c b/drivers/of/device.c index 8cefe5a7d04e2e8143975562395ced6c0ec9fd62..c674a13c305587ee5584618e69f2fe3d15d13b22 100644 --- a/drivers/of/device.c +++ b/drivers/of/device.c @@ -332,10 +332,10 @@ EXPORT_SYMBOL_GPL(of_device_modalias); /** * of_device_uevent - Display OF related uevent information - * @dev: Device to apply DMA configuration - * @env: Kernel object's userspace event reference + * @dev: Device to display the uevent information for + * @env: Kernel object's userspace event reference to fill up */ -void of_device_uevent(struct device *dev, struct kobj_uevent_env *env) +void of_device_uevent(const struct device *dev, struct kobj_uevent_env *env) { const char *compat, *type; struct alias_prop *app; diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c index 7b571a631639702817cd550ad599896fc3bfb2ae..b2272bccf85c95ac68c24eb530c743fe21d1223e 100644 --- a/drivers/of/fdt.c +++ b/drivers/of/fdt.c @@ -1173,26 +1173,6 @@ int __init early_init_dt_scan_chosen(char *cmdline) if (p != NULL && l > 0) strscpy(cmdline, p, min(l, COMMAND_LINE_SIZE)); - /* - * CONFIG_CMDLINE is meant to be a default in case nothing else - * managed to set the command line, unless CONFIG_CMDLINE_FORCE - * is set in which case we override whatever was found earlier. - */ -#ifdef CONFIG_CMDLINE -#if defined(CONFIG_CMDLINE_EXTEND) - strlcat(cmdline, " ", COMMAND_LINE_SIZE); - strlcat(cmdline, CONFIG_CMDLINE, COMMAND_LINE_SIZE); -#elif defined(CONFIG_CMDLINE_FORCE) - strscpy(cmdline, CONFIG_CMDLINE, COMMAND_LINE_SIZE); -#else - /* No arguments from boot loader, use kernel's cmdl*/ - if (!((char *)cmdline)[0]) - strscpy(cmdline, CONFIG_CMDLINE, COMMAND_LINE_SIZE); -#endif -#endif /* CONFIG_CMDLINE */ - - pr_debug("Command line is: %s\n", (char *)cmdline); - rng_seed = of_get_flat_dt_prop(node, "rng-seed", &l); if (rng_seed && l > 0) { add_bootloader_randomness(rng_seed, l); @@ -1297,6 +1277,26 @@ void __init early_init_dt_scan_nodes(void) if (rc) pr_warn("No chosen node found, continuing without\n"); + /* + * CONFIG_CMDLINE is meant to be a default in case nothing else + * managed to set the command line, unless CONFIG_CMDLINE_FORCE + * is set in which case we override whatever was found earlier. + */ +#ifdef CONFIG_CMDLINE +#if defined(CONFIG_CMDLINE_EXTEND) + strlcat(boot_command_line, " ", COMMAND_LINE_SIZE); + strlcat(boot_command_line, CONFIG_CMDLINE, COMMAND_LINE_SIZE); +#elif defined(CONFIG_CMDLINE_FORCE) + strscpy(boot_command_line, CONFIG_CMDLINE, COMMAND_LINE_SIZE); +#else + /* No arguments from boot loader, use kernel's cmdl */ + if (!boot_command_line[0]) + strscpy(boot_command_line, CONFIG_CMDLINE, COMMAND_LINE_SIZE); +#endif +#endif /* CONFIG_CMDLINE */ + + pr_debug("Command line is: %s\n", boot_command_line); + /* Setup memory, calling early_init_dt_add_memory_arch */ early_init_dt_scan_memory(); diff --git a/drivers/of/irq.c b/drivers/of/irq.c index 2bac44f09554b146fdc5a20435f2fcd60a585ca7..e9bf5236ed892765f33a1ae4e8ff6c8b0ab53aca 100644 --- a/drivers/of/irq.c +++ b/drivers/of/irq.c @@ -730,6 +730,7 @@ struct irq_domain *of_msi_get_domain(struct device *dev, return NULL; } +EXPORT_SYMBOL_GPL(of_msi_get_domain); /** * of_msi_configure - Set the msi_domain field of a device diff --git a/drivers/parisc/led.c b/drivers/parisc/led.c index d4be9d2ee74d9254b7d9a57797d2460c646060ad..8bdc5e043831cdbab4117a999d6127ca7f356dcb 100644 --- a/drivers/parisc/led.c +++ b/drivers/parisc/led.c @@ -137,6 +137,9 @@ static int start_task(void) /* Create the work queue and queue the LED task */ led_wq = create_singlethread_workqueue("led_wq"); + if (!led_wq) + return -ENOMEM; + queue_delayed_work(led_wq, &led_task, 0); return 0; diff --git a/drivers/pci/access.c b/drivers/pci/access.c index 708c7529647fd12dcd923063a32a8f03417a4c88..3c230ca3de5844f9e768178617506c3e9442d3c5 100644 --- a/drivers/pci/access.c +++ b/drivers/pci/access.c @@ -350,6 +350,11 @@ bool pcie_cap_has_lnkctl(const struct pci_dev *dev) type == PCI_EXP_TYPE_PCIE_BRIDGE; } +bool pcie_cap_has_lnkctl2(const struct pci_dev *dev) +{ + return pcie_cap_has_lnkctl(dev) && pcie_cap_version(dev) > 1; +} + static inline bool pcie_cap_has_sltctl(const struct pci_dev *dev) { return pcie_downstream_port(dev) && @@ -390,10 +395,11 @@ static bool pcie_capability_reg_implemented(struct pci_dev *dev, int pos) return pcie_cap_has_rtctl(dev); case PCI_EXP_DEVCAP2: case PCI_EXP_DEVCTL2: + return pcie_cap_version(dev) > 1; case PCI_EXP_LNKCAP2: case PCI_EXP_LNKCTL2: case PCI_EXP_LNKSTA2: - return pcie_cap_version(dev) > 1; + return pcie_cap_has_lnkctl2(dev); default: return false; } diff --git a/drivers/pci/ats.c b/drivers/pci/ats.c index c967ad6e262678790583bb3137ff273ec31a16c3..f9cc2e10b676a96a0a61701751fc6b12791405fc 100644 --- a/drivers/pci/ats.c +++ b/drivers/pci/ats.c @@ -382,6 +382,9 @@ int pci_enable_pasid(struct pci_dev *pdev, int features) if (!pasid) return -EINVAL; + if (!pci_acs_path_enabled(pdev, NULL, PCI_ACS_RR | PCI_ACS_UF)) + return -EINVAL; + pci_read_config_word(pdev, pasid + PCI_PASID_CAP, &supported); supported &= PCI_PASID_CAP_EXEC | PCI_PASID_CAP_PRIV; diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c index 3cef835b375fd6a6adaacd1384987a5f905e9c46..83ae838ceb5f0a3ea93750ea51568eb83bd91764 100644 --- a/drivers/pci/bus.c +++ b/drivers/pci/bus.c @@ -197,6 +197,10 @@ static int pci_bus_alloc_from_region(struct pci_bus *bus, struct resource *res, max = avail.end; + /* Don't bother if available space isn't large enough */ + if (size > max - min_used + 1) + continue; + /* Ok, try it out.. */ ret = allocate_resource(r, res, size, min_used, max, align, alignf, alignf_data); diff --git a/drivers/pci/controller/cadence/pci-j721e.c b/drivers/pci/controller/cadence/pci-j721e.c index a82f845cc4b521c9b5410ab226d51a585ab1a261..cc83a8925ce03ba3bfee3a9937a72d7961a3f2a9 100644 --- a/drivers/pci/controller/cadence/pci-j721e.c +++ b/drivers/pci/controller/cadence/pci-j721e.c @@ -15,7 +15,6 @@ #include #include #include -#include #include #include #include diff --git a/drivers/pci/controller/dwc/Kconfig b/drivers/pci/controller/dwc/Kconfig index f3c462130627640b3a1fa444964d2d95331dd2b5..a0d2713f0e8896ef3936b0774707604ab1e1203b 100644 --- a/drivers/pci/controller/dwc/Kconfig +++ b/drivers/pci/controller/dwc/Kconfig @@ -222,6 +222,15 @@ config PCIE_ARTPEC6_EP Enables support for the PCIe controller in the ARTPEC-6 SoC to work in endpoint mode. This uses the DesignWare core. +config PCIE_BT1 + tristate "Baikal-T1 PCIe controller" + depends on MIPS_BAIKAL_T1 || COMPILE_TEST + depends on PCI_MSI_IRQ_DOMAIN + select PCIE_DW_HOST + help + Enables support for the PCIe controller in the Baikal-T1 SoC to work + in host mode. It's based on the Synopsys DWC PCIe v4.60a IP-core. + config PCIE_ROCKCHIP_DW_HOST bool "Rockchip DesignWare PCIe controller" select PCIE_DW diff --git a/drivers/pci/controller/dwc/Makefile b/drivers/pci/controller/dwc/Makefile index 8ba7b67f5e50a9ab268728cf8563c5143e4f1f55..bf5c311875a1ee388ed6ce1dbc7c66029724a207 100644 --- a/drivers/pci/controller/dwc/Makefile +++ b/drivers/pci/controller/dwc/Makefile @@ -3,6 +3,7 @@ obj-$(CONFIG_PCIE_DW) += pcie-designware.o obj-$(CONFIG_PCIE_DW_HOST) += pcie-designware-host.o obj-$(CONFIG_PCIE_DW_EP) += pcie-designware-ep.o obj-$(CONFIG_PCIE_DW_PLAT) += pcie-designware-plat.o +obj-$(CONFIG_PCIE_BT1) += pcie-bt1.o obj-$(CONFIG_PCI_DRA7XX) += pci-dra7xx.o obj-$(CONFIG_PCI_EXYNOS) += pci-exynos.o obj-$(CONFIG_PCIE_FU740) += pcie-fu740.o diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c index 2616585ca5f8aa0982775c06eeebd1062c918976..1dde5c579edc82a0d38925b45c078a48ba390619 100644 --- a/drivers/pci/controller/dwc/pci-imx6.c +++ b/drivers/pci/controller/dwc/pci-imx6.c @@ -952,12 +952,6 @@ static int imx6_pcie_host_init(struct dw_pcie_rp *pp) } } - ret = imx6_pcie_deassert_core_reset(imx6_pcie); - if (ret < 0) { - dev_err(dev, "pcie deassert core reset failed: %d\n", ret); - goto err_phy_off; - } - if (imx6_pcie->phy) { ret = phy_power_on(imx6_pcie->phy); if (ret) { @@ -965,6 +959,13 @@ static int imx6_pcie_host_init(struct dw_pcie_rp *pp) goto err_phy_off; } } + + ret = imx6_pcie_deassert_core_reset(imx6_pcie); + if (ret < 0) { + dev_err(dev, "pcie deassert core reset failed: %d\n", ret); + goto err_phy_off; + } + imx6_setup_phy_mpll(imx6_pcie); return 0; diff --git a/drivers/pci/controller/dwc/pci-layerscape.c b/drivers/pci/controller/dwc/pci-layerscape.c index 879b8692f96a515ac8b85d149ab409dc4506b73c..ed5fb492fe084fbb4f20dc004a275834d57087c5 100644 --- a/drivers/pci/controller/dwc/pci-layerscape.c +++ b/drivers/pci/controller/dwc/pci-layerscape.c @@ -13,7 +13,6 @@ #include #include #include -#include #include #include #include diff --git a/drivers/pci/controller/dwc/pcie-armada8k.c b/drivers/pci/controller/dwc/pcie-armada8k.c index dc469ef8e99b3b2e3cd625bd83a4ad4ecba5cb4a..5c999e15c357f0a598d631d094afb3818dd708d0 100644 --- a/drivers/pci/controller/dwc/pcie-armada8k.c +++ b/drivers/pci/controller/dwc/pcie-armada8k.c @@ -21,7 +21,6 @@ #include #include #include -#include #include "pcie-designware.h" diff --git a/drivers/pci/controller/dwc/pcie-bt1.c b/drivers/pci/controller/dwc/pcie-bt1.c new file mode 100644 index 0000000000000000000000000000000000000000..3346770e6654794ffd5886f978a9d8e6d22c0c05 --- /dev/null +++ b/drivers/pci/controller/dwc/pcie-bt1.c @@ -0,0 +1,643 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2021 BAIKAL ELECTRONICS, JSC + * + * Authors: + * Vadim Vlasov + * Serge Semin + * + * Baikal-T1 PCIe controller driver + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "pcie-designware.h" + +/* Baikal-T1 System CCU control registers */ +#define BT1_CCU_PCIE_CLKC 0x140 +#define BT1_CCU_PCIE_REQ_PCS_CLK BIT(16) +#define BT1_CCU_PCIE_REQ_MAC_CLK BIT(17) +#define BT1_CCU_PCIE_REQ_PIPE_CLK BIT(18) + +#define BT1_CCU_PCIE_RSTC 0x144 +#define BT1_CCU_PCIE_REQ_LINK_RST BIT(13) +#define BT1_CCU_PCIE_REQ_SMLH_RST BIT(14) +#define BT1_CCU_PCIE_REQ_PHY_RST BIT(16) +#define BT1_CCU_PCIE_REQ_CORE_RST BIT(24) +#define BT1_CCU_PCIE_REQ_STICKY_RST BIT(26) +#define BT1_CCU_PCIE_REQ_NSTICKY_RST BIT(27) + +#define BT1_CCU_PCIE_PMSC 0x148 +#define BT1_CCU_PCIE_LTSSM_STATE_MASK GENMASK(5, 0) +#define BT1_CCU_PCIE_LTSSM_DET_QUIET 0x00 +#define BT1_CCU_PCIE_LTSSM_DET_ACT 0x01 +#define BT1_CCU_PCIE_LTSSM_POLL_ACT 0x02 +#define BT1_CCU_PCIE_LTSSM_POLL_COMP 0x03 +#define BT1_CCU_PCIE_LTSSM_POLL_CONF 0x04 +#define BT1_CCU_PCIE_LTSSM_PRE_DET_QUIET 0x05 +#define BT1_CCU_PCIE_LTSSM_DET_WAIT 0x06 +#define BT1_CCU_PCIE_LTSSM_CFG_LNKWD_START 0x07 +#define BT1_CCU_PCIE_LTSSM_CFG_LNKWD_ACEPT 0x08 +#define BT1_CCU_PCIE_LTSSM_CFG_LNNUM_WAIT 0x09 +#define BT1_CCU_PCIE_LTSSM_CFG_LNNUM_ACEPT 0x0a +#define BT1_CCU_PCIE_LTSSM_CFG_COMPLETE 0x0b +#define BT1_CCU_PCIE_LTSSM_CFG_IDLE 0x0c +#define BT1_CCU_PCIE_LTSSM_RCVR_LOCK 0x0d +#define BT1_CCU_PCIE_LTSSM_RCVR_SPEED 0x0e +#define BT1_CCU_PCIE_LTSSM_RCVR_RCVRCFG 0x0f +#define BT1_CCU_PCIE_LTSSM_RCVR_IDLE 0x10 +#define BT1_CCU_PCIE_LTSSM_L0 0x11 +#define BT1_CCU_PCIE_LTSSM_L0S 0x12 +#define BT1_CCU_PCIE_LTSSM_L123_SEND_IDLE 0x13 +#define BT1_CCU_PCIE_LTSSM_L1_IDLE 0x14 +#define BT1_CCU_PCIE_LTSSM_L2_IDLE 0x15 +#define BT1_CCU_PCIE_LTSSM_L2_WAKE 0x16 +#define BT1_CCU_PCIE_LTSSM_DIS_ENTRY 0x17 +#define BT1_CCU_PCIE_LTSSM_DIS_IDLE 0x18 +#define BT1_CCU_PCIE_LTSSM_DISABLE 0x19 +#define BT1_CCU_PCIE_LTSSM_LPBK_ENTRY 0x1a +#define BT1_CCU_PCIE_LTSSM_LPBK_ACTIVE 0x1b +#define BT1_CCU_PCIE_LTSSM_LPBK_EXIT 0x1c +#define BT1_CCU_PCIE_LTSSM_LPBK_EXIT_TOUT 0x1d +#define BT1_CCU_PCIE_LTSSM_HOT_RST_ENTRY 0x1e +#define BT1_CCU_PCIE_LTSSM_HOT_RST 0x1f +#define BT1_CCU_PCIE_LTSSM_RCVR_EQ0 0x20 +#define BT1_CCU_PCIE_LTSSM_RCVR_EQ1 0x21 +#define BT1_CCU_PCIE_LTSSM_RCVR_EQ2 0x22 +#define BT1_CCU_PCIE_LTSSM_RCVR_EQ3 0x23 +#define BT1_CCU_PCIE_SMLH_LINKUP BIT(6) +#define BT1_CCU_PCIE_RDLH_LINKUP BIT(7) +#define BT1_CCU_PCIE_PM_LINKSTATE_L0S BIT(8) +#define BT1_CCU_PCIE_PM_LINKSTATE_L1 BIT(9) +#define BT1_CCU_PCIE_PM_LINKSTATE_L2 BIT(10) +#define BT1_CCU_PCIE_L1_PENDING BIT(12) +#define BT1_CCU_PCIE_REQ_EXIT_L1 BIT(14) +#define BT1_CCU_PCIE_LTSSM_RCVR_EQ BIT(15) +#define BT1_CCU_PCIE_PM_DSTAT_MASK GENMASK(18, 16) +#define BT1_CCU_PCIE_PM_PME_EN BIT(20) +#define BT1_CCU_PCIE_PM_PME_STATUS BIT(21) +#define BT1_CCU_PCIE_AUX_PM_EN BIT(22) +#define BT1_CCU_PCIE_AUX_PWR_DET BIT(23) +#define BT1_CCU_PCIE_WAKE_DET BIT(24) +#define BT1_CCU_PCIE_TURNOFF_REQ BIT(30) +#define BT1_CCU_PCIE_TURNOFF_ACK BIT(31) + +#define BT1_CCU_PCIE_GENC 0x14c +#define BT1_CCU_PCIE_LTSSM_EN BIT(1) +#define BT1_CCU_PCIE_DBI2_MODE BIT(2) +#define BT1_CCU_PCIE_MGMT_EN BIT(3) +#define BT1_CCU_PCIE_RXLANE_FLIP_EN BIT(16) +#define BT1_CCU_PCIE_TXLANE_FLIP_EN BIT(17) +#define BT1_CCU_PCIE_SLV_XFER_PEND BIT(24) +#define BT1_CCU_PCIE_RCV_XFER_PEND BIT(25) +#define BT1_CCU_PCIE_DBI_XFER_PEND BIT(26) +#define BT1_CCU_PCIE_DMA_XFER_PEND BIT(27) + +#define BT1_CCU_PCIE_LTSSM_LINKUP(_pmsc) \ +({ \ + int __state = FIELD_GET(BT1_CCU_PCIE_LTSSM_STATE_MASK, _pmsc); \ + __state >= BT1_CCU_PCIE_LTSSM_L0 && __state <= BT1_CCU_PCIE_LTSSM_L2_WAKE; \ +}) + +/* Baikal-T1 PCIe specific control registers */ +#define BT1_PCIE_AXI2MGM_LANENUM 0xd04 +#define BT1_PCIE_AXI2MGM_LANESEL_MASK GENMASK(3, 0) + +#define BT1_PCIE_AXI2MGM_ADDRCTL 0xd08 +#define BT1_PCIE_AXI2MGM_PHYREG_ADDR_MASK GENMASK(20, 0) +#define BT1_PCIE_AXI2MGM_READ_FLAG BIT(29) +#define BT1_PCIE_AXI2MGM_DONE BIT(30) +#define BT1_PCIE_AXI2MGM_BUSY BIT(31) + +#define BT1_PCIE_AXI2MGM_WRITEDATA 0xd0c +#define BT1_PCIE_AXI2MGM_WDATA GENMASK(15, 0) + +#define BT1_PCIE_AXI2MGM_READDATA 0xd10 +#define BT1_PCIE_AXI2MGM_RDATA GENMASK(15, 0) + +/* Generic Baikal-T1 PCIe interface resources */ +#define BT1_PCIE_NUM_APP_CLKS ARRAY_SIZE(bt1_pcie_app_clks) +#define BT1_PCIE_NUM_CORE_CLKS ARRAY_SIZE(bt1_pcie_core_clks) +#define BT1_PCIE_NUM_APP_RSTS ARRAY_SIZE(bt1_pcie_app_rsts) +#define BT1_PCIE_NUM_CORE_RSTS ARRAY_SIZE(bt1_pcie_core_rsts) + +/* PCIe bus setup delays and timeouts */ +#define BT1_PCIE_RST_DELAY_MS 100 +#define BT1_PCIE_RUN_DELAY_US 100 +#define BT1_PCIE_REQ_DELAY_US 1 +#define BT1_PCIE_REQ_TIMEOUT_US 1000 +#define BT1_PCIE_LNK_DELAY_US 1000 +#define BT1_PCIE_LNK_TIMEOUT_US 1000000 + +static const enum dw_pcie_app_clk bt1_pcie_app_clks[] = { + DW_PCIE_DBI_CLK, DW_PCIE_MSTR_CLK, DW_PCIE_SLV_CLK, +}; + +static const enum dw_pcie_core_clk bt1_pcie_core_clks[] = { + DW_PCIE_REF_CLK, +}; + +static const enum dw_pcie_app_rst bt1_pcie_app_rsts[] = { + DW_PCIE_MSTR_RST, DW_PCIE_SLV_RST, +}; + +static const enum dw_pcie_core_rst bt1_pcie_core_rsts[] = { + DW_PCIE_NON_STICKY_RST, DW_PCIE_STICKY_RST, DW_PCIE_CORE_RST, + DW_PCIE_PIPE_RST, DW_PCIE_PHY_RST, DW_PCIE_HOT_RST, DW_PCIE_PWR_RST, +}; + +struct bt1_pcie { + struct dw_pcie dw; + struct platform_device *pdev; + struct regmap *sys_regs; +}; +#define to_bt1_pcie(_dw) container_of(_dw, struct bt1_pcie, dw) + +/* + * Baikal-T1 MMIO space must be read/written by the dword-aligned + * instructions. Note the methods are optimized to have the dword operations + * performed with minimum overhead as the most frequently used ones. + */ +static int bt1_pcie_read_mmio(void __iomem *addr, int size, u32 *val) +{ + unsigned int ofs = (uintptr_t)addr & 0x3; + + if (!IS_ALIGNED((uintptr_t)addr, size)) + return -EINVAL; + + *val = readl(addr - ofs) >> ofs * BITS_PER_BYTE; + if (size == 4) { + return 0; + } else if (size == 2) { + *val &= 0xffff; + return 0; + } else if (size == 1) { + *val &= 0xff; + return 0; + } + + return -EINVAL; +} + +static int bt1_pcie_write_mmio(void __iomem *addr, int size, u32 val) +{ + unsigned int ofs = (uintptr_t)addr & 0x3; + u32 tmp, mask; + + if (!IS_ALIGNED((uintptr_t)addr, size)) + return -EINVAL; + + if (size == 4) { + writel(val, addr); + return 0; + } else if (size == 2 || size == 1) { + mask = GENMASK(size * BITS_PER_BYTE - 1, 0); + tmp = readl(addr - ofs) & ~(mask << ofs * BITS_PER_BYTE); + tmp |= (val & mask) << ofs * BITS_PER_BYTE; + writel(tmp, addr - ofs); + return 0; + } + + return -EINVAL; +} + +static u32 bt1_pcie_read_dbi(struct dw_pcie *pci, void __iomem *base, u32 reg, + size_t size) +{ + int ret; + u32 val; + + ret = bt1_pcie_read_mmio(base + reg, size, &val); + if (ret) { + dev_err(pci->dev, "Read DBI address failed\n"); + return ~0U; + } + + return val; +} + +static void bt1_pcie_write_dbi(struct dw_pcie *pci, void __iomem *base, u32 reg, + size_t size, u32 val) +{ + int ret; + + ret = bt1_pcie_write_mmio(base + reg, size, val); + if (ret) + dev_err(pci->dev, "Write DBI address failed\n"); +} + +static void bt1_pcie_write_dbi2(struct dw_pcie *pci, void __iomem *base, u32 reg, + size_t size, u32 val) +{ + struct bt1_pcie *btpci = to_bt1_pcie(pci); + int ret; + + regmap_update_bits(btpci->sys_regs, BT1_CCU_PCIE_GENC, + BT1_CCU_PCIE_DBI2_MODE, BT1_CCU_PCIE_DBI2_MODE); + + ret = bt1_pcie_write_mmio(base + reg, size, val); + if (ret) + dev_err(pci->dev, "Write DBI2 address failed\n"); + + regmap_update_bits(btpci->sys_regs, BT1_CCU_PCIE_GENC, + BT1_CCU_PCIE_DBI2_MODE, 0); +} + +static int bt1_pcie_start_link(struct dw_pcie *pci) +{ + struct bt1_pcie *btpci = to_bt1_pcie(pci); + u32 val; + int ret; + + /* + * Enable LTSSM and make sure it was able to establish both PHY and + * data links. This procedure shall work fine to reach 2.5 GT/s speed. + */ + regmap_update_bits(btpci->sys_regs, BT1_CCU_PCIE_GENC, + BT1_CCU_PCIE_LTSSM_EN, BT1_CCU_PCIE_LTSSM_EN); + + ret = regmap_read_poll_timeout(btpci->sys_regs, BT1_CCU_PCIE_PMSC, val, + (val & BT1_CCU_PCIE_SMLH_LINKUP), + BT1_PCIE_LNK_DELAY_US, BT1_PCIE_LNK_TIMEOUT_US); + if (ret) { + dev_err(pci->dev, "LTSSM failed to set PHY link up\n"); + return ret; + } + + ret = regmap_read_poll_timeout(btpci->sys_regs, BT1_CCU_PCIE_PMSC, val, + (val & BT1_CCU_PCIE_RDLH_LINKUP), + BT1_PCIE_LNK_DELAY_US, BT1_PCIE_LNK_TIMEOUT_US); + if (ret) { + dev_err(pci->dev, "LTSSM failed to set data link up\n"); + return ret; + } + + /* + * Activate direct speed change after the link is established in an + * attempt to reach a higher bus performance (up to Gen.3 - 8.0 GT/s). + * This is required at least to get 8.0 GT/s speed. + */ + val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL); + val |= PORT_LOGIC_SPEED_CHANGE; + dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val); + + ret = regmap_read_poll_timeout(btpci->sys_regs, BT1_CCU_PCIE_PMSC, val, + BT1_CCU_PCIE_LTSSM_LINKUP(val), + BT1_PCIE_LNK_DELAY_US, BT1_PCIE_LNK_TIMEOUT_US); + if (ret) + dev_err(pci->dev, "LTSSM failed to get into L0 state\n"); + + return ret; +} + +static void bt1_pcie_stop_link(struct dw_pcie *pci) +{ + struct bt1_pcie *btpci = to_bt1_pcie(pci); + + regmap_update_bits(btpci->sys_regs, BT1_CCU_PCIE_GENC, + BT1_CCU_PCIE_LTSSM_EN, 0); +} + +static const struct dw_pcie_ops bt1_pcie_ops = { + .read_dbi = bt1_pcie_read_dbi, + .write_dbi = bt1_pcie_write_dbi, + .write_dbi2 = bt1_pcie_write_dbi2, + .start_link = bt1_pcie_start_link, + .stop_link = bt1_pcie_stop_link, +}; + +static struct pci_ops bt1_pci_ops = { + .map_bus = dw_pcie_own_conf_map_bus, + .read = pci_generic_config_read32, + .write = pci_generic_config_write32, +}; + +static int bt1_pcie_get_resources(struct bt1_pcie *btpci) +{ + struct device *dev = btpci->dw.dev; + int i; + + /* DBI access is supposed to be performed by the dword-aligned IOs */ + btpci->dw.pp.bridge->ops = &bt1_pci_ops; + + /* These CSRs are in MMIO so we won't check the regmap-methods status */ + btpci->sys_regs = + syscon_regmap_lookup_by_phandle(dev->of_node, "baikal,bt1-syscon"); + if (IS_ERR(btpci->sys_regs)) + return dev_err_probe(dev, PTR_ERR(btpci->sys_regs), + "Failed to get syscon\n"); + + /* Make sure all the required resources have been specified */ + for (i = 0; i < BT1_PCIE_NUM_APP_CLKS; i++) { + if (!btpci->dw.app_clks[bt1_pcie_app_clks[i]].clk) { + dev_err(dev, "App clocks set is incomplete\n"); + return -ENOENT; + } + } + + for (i = 0; i < BT1_PCIE_NUM_CORE_CLKS; i++) { + if (!btpci->dw.core_clks[bt1_pcie_core_clks[i]].clk) { + dev_err(dev, "Core clocks set is incomplete\n"); + return -ENOENT; + } + } + + for (i = 0; i < BT1_PCIE_NUM_APP_RSTS; i++) { + if (!btpci->dw.app_rsts[bt1_pcie_app_rsts[i]].rstc) { + dev_err(dev, "App resets set is incomplete\n"); + return -ENOENT; + } + } + + for (i = 0; i < BT1_PCIE_NUM_CORE_RSTS; i++) { + if (!btpci->dw.core_rsts[bt1_pcie_core_rsts[i]].rstc) { + dev_err(dev, "Core resets set is incomplete\n"); + return -ENOENT; + } + } + + return 0; +} + +static void bt1_pcie_full_stop_bus(struct bt1_pcie *btpci, bool init) +{ + struct device *dev = btpci->dw.dev; + struct dw_pcie *pci = &btpci->dw; + int ret; + + /* Disable LTSSM for sure */ + regmap_update_bits(btpci->sys_regs, BT1_CCU_PCIE_GENC, + BT1_CCU_PCIE_LTSSM_EN, 0); + + /* + * Application reset controls are trigger-based so assert the core + * resets only. + */ + ret = reset_control_bulk_assert(DW_PCIE_NUM_CORE_RSTS, pci->core_rsts); + if (ret) + dev_err(dev, "Failed to assert core resets\n"); + + /* + * Clocks are disabled by default at least in accordance with the clk + * enable counter value on init stage. + */ + if (!init) { + clk_bulk_disable_unprepare(DW_PCIE_NUM_CORE_CLKS, pci->core_clks); + + clk_bulk_disable_unprepare(DW_PCIE_NUM_APP_CLKS, pci->app_clks); + } + + /* The peripheral devices are unavailable anyway so reset them too */ + gpiod_set_value_cansleep(pci->pe_rst, 1); + + /* Make sure all the resets are settled */ + msleep(BT1_PCIE_RST_DELAY_MS); +} + +/* + * Implements the cold reset procedure in accordance with the reference manual + * and available PM signals. + */ +static int bt1_pcie_cold_start_bus(struct bt1_pcie *btpci) +{ + struct device *dev = btpci->dw.dev; + struct dw_pcie *pci = &btpci->dw; + u32 val; + int ret; + + /* First get out of the Power/Hot reset state */ + ret = reset_control_deassert(pci->core_rsts[DW_PCIE_PWR_RST].rstc); + if (ret) { + dev_err(dev, "Failed to deassert PHY reset\n"); + return ret; + } + + ret = reset_control_deassert(pci->core_rsts[DW_PCIE_HOT_RST].rstc); + if (ret) { + dev_err(dev, "Failed to deassert hot reset\n"); + goto err_assert_pwr_rst; + } + + /* Wait for the PM-core to stop requesting the PHY reset */ + ret = regmap_read_poll_timeout(btpci->sys_regs, BT1_CCU_PCIE_RSTC, val, + !(val & BT1_CCU_PCIE_REQ_PHY_RST), + BT1_PCIE_REQ_DELAY_US, BT1_PCIE_REQ_TIMEOUT_US); + if (ret) { + dev_err(dev, "Timed out waiting for PM to stop PHY resetting\n"); + goto err_assert_hot_rst; + } + + ret = reset_control_deassert(pci->core_rsts[DW_PCIE_PHY_RST].rstc); + if (ret) { + dev_err(dev, "Failed to deassert PHY reset\n"); + goto err_assert_hot_rst; + } + + /* Clocks can be now enabled, but the ref one is crucial at this stage */ + ret = clk_bulk_prepare_enable(DW_PCIE_NUM_APP_CLKS, pci->app_clks); + if (ret) { + dev_err(dev, "Failed to enable app clocks\n"); + goto err_assert_phy_rst; + } + + ret = clk_bulk_prepare_enable(DW_PCIE_NUM_CORE_CLKS, pci->core_clks); + if (ret) { + dev_err(dev, "Failed to enable ref clocks\n"); + goto err_disable_app_clk; + } + + /* Wait for the PM to stop requesting the controller core reset */ + ret = regmap_read_poll_timeout(btpci->sys_regs, BT1_CCU_PCIE_RSTC, val, + !(val & BT1_CCU_PCIE_REQ_CORE_RST), + BT1_PCIE_REQ_DELAY_US, BT1_PCIE_REQ_TIMEOUT_US); + if (ret) { + dev_err(dev, "Timed out waiting for PM to stop core resetting\n"); + goto err_disable_core_clk; + } + + /* PCS-PIPE interface and controller core can be now activated */ + ret = reset_control_deassert(pci->core_rsts[DW_PCIE_PIPE_RST].rstc); + if (ret) { + dev_err(dev, "Failed to deassert PIPE reset\n"); + goto err_disable_core_clk; + } + + ret = reset_control_deassert(pci->core_rsts[DW_PCIE_CORE_RST].rstc); + if (ret) { + dev_err(dev, "Failed to deassert core reset\n"); + goto err_assert_pipe_rst; + } + + /* It's recommended to reset the core and application logic together */ + ret = reset_control_bulk_reset(DW_PCIE_NUM_APP_RSTS, pci->app_rsts); + if (ret) { + dev_err(dev, "Failed to reset app domain\n"); + goto err_assert_core_rst; + } + + /* Sticky/Non-sticky CSR flags can be now unreset too */ + ret = reset_control_deassert(pci->core_rsts[DW_PCIE_STICKY_RST].rstc); + if (ret) { + dev_err(dev, "Failed to deassert sticky reset\n"); + goto err_assert_core_rst; + } + + ret = reset_control_deassert(pci->core_rsts[DW_PCIE_NON_STICKY_RST].rstc); + if (ret) { + dev_err(dev, "Failed to deassert non-sticky reset\n"); + goto err_assert_sticky_rst; + } + + /* Activate the PCIe bus peripheral devices */ + gpiod_set_value_cansleep(pci->pe_rst, 0); + + /* Make sure the state is settled (LTSSM is still disabled though) */ + usleep_range(BT1_PCIE_RUN_DELAY_US, BT1_PCIE_RUN_DELAY_US + 100); + + return 0; + +err_assert_sticky_rst: + reset_control_assert(pci->core_rsts[DW_PCIE_STICKY_RST].rstc); + +err_assert_core_rst: + reset_control_assert(pci->core_rsts[DW_PCIE_CORE_RST].rstc); + +err_assert_pipe_rst: + reset_control_assert(pci->core_rsts[DW_PCIE_PIPE_RST].rstc); + +err_disable_core_clk: + clk_bulk_disable_unprepare(DW_PCIE_NUM_CORE_CLKS, pci->core_clks); + +err_disable_app_clk: + clk_bulk_disable_unprepare(DW_PCIE_NUM_APP_CLKS, pci->app_clks); + +err_assert_phy_rst: + reset_control_assert(pci->core_rsts[DW_PCIE_PHY_RST].rstc); + +err_assert_hot_rst: + reset_control_assert(pci->core_rsts[DW_PCIE_HOT_RST].rstc); + +err_assert_pwr_rst: + reset_control_assert(pci->core_rsts[DW_PCIE_PWR_RST].rstc); + + return ret; +} + +static int bt1_pcie_host_init(struct dw_pcie_rp *pp) +{ + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct bt1_pcie *btpci = to_bt1_pcie(pci); + int ret; + + ret = bt1_pcie_get_resources(btpci); + if (ret) + return ret; + + bt1_pcie_full_stop_bus(btpci, true); + + return bt1_pcie_cold_start_bus(btpci); +} + +static void bt1_pcie_host_deinit(struct dw_pcie_rp *pp) +{ + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct bt1_pcie *btpci = to_bt1_pcie(pci); + + bt1_pcie_full_stop_bus(btpci, false); +} + +static const struct dw_pcie_host_ops bt1_pcie_host_ops = { + .host_init = bt1_pcie_host_init, + .host_deinit = bt1_pcie_host_deinit, +}; + +static struct bt1_pcie *bt1_pcie_create_data(struct platform_device *pdev) +{ + struct bt1_pcie *btpci; + + btpci = devm_kzalloc(&pdev->dev, sizeof(*btpci), GFP_KERNEL); + if (!btpci) + return ERR_PTR(-ENOMEM); + + btpci->pdev = pdev; + + platform_set_drvdata(pdev, btpci); + + return btpci; +} + +static int bt1_pcie_add_port(struct bt1_pcie *btpci) +{ + struct device *dev = &btpci->pdev->dev; + int ret; + + btpci->dw.version = DW_PCIE_VER_460A; + btpci->dw.dev = dev; + btpci->dw.ops = &bt1_pcie_ops; + + btpci->dw.pp.num_vectors = MAX_MSI_IRQS; + btpci->dw.pp.ops = &bt1_pcie_host_ops; + + dw_pcie_cap_set(&btpci->dw, REQ_RES); + + ret = dw_pcie_host_init(&btpci->dw.pp); + + return dev_err_probe(dev, ret, "Failed to initialize DWC PCIe host\n"); +} + +static void bt1_pcie_del_port(struct bt1_pcie *btpci) +{ + dw_pcie_host_deinit(&btpci->dw.pp); +} + +static int bt1_pcie_probe(struct platform_device *pdev) +{ + struct bt1_pcie *btpci; + + btpci = bt1_pcie_create_data(pdev); + if (IS_ERR(btpci)) + return PTR_ERR(btpci); + + return bt1_pcie_add_port(btpci); +} + +static int bt1_pcie_remove(struct platform_device *pdev) +{ + struct bt1_pcie *btpci = platform_get_drvdata(pdev); + + bt1_pcie_del_port(btpci); + + return 0; +} + +static const struct of_device_id bt1_pcie_of_match[] = { + { .compatible = "baikal,bt1-pcie" }, + {}, +}; +MODULE_DEVICE_TABLE(of, bt1_pcie_of_match); + +static struct platform_driver bt1_pcie_driver = { + .probe = bt1_pcie_probe, + .remove = bt1_pcie_remove, + .driver = { + .name = "bt1-pcie", + .of_match_table = bt1_pcie_of_match, + }, +}; +module_platform_driver(bt1_pcie_driver); + +MODULE_AUTHOR("Serge Semin "); +MODULE_DESCRIPTION("Baikal-T1 PCIe driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c index 83ddb190292e4f5569254d735851f51ad4ef986d..d06654895ebae11886ea8c1ddaff00bfd407b266 100644 --- a/drivers/pci/controller/dwc/pcie-designware-ep.c +++ b/drivers/pci/controller/dwc/pcie-designware-ep.c @@ -13,8 +13,6 @@ #include #include -#include "../../pci.h" - void dw_pcie_ep_linkup(struct dw_pcie_ep *ep) { struct pci_epc *epc = ep->epc; @@ -171,8 +169,8 @@ static int dw_pcie_ep_inbound_atu(struct dw_pcie_ep *ep, u8 func_no, int type, return -EINVAL; } - ret = dw_pcie_prog_inbound_atu(pci, func_no, free_win, type, - cpu_addr, bar); + ret = dw_pcie_prog_ep_inbound_atu(pci, func_no, free_win, type, + cpu_addr, bar); if (ret < 0) { dev_err(pci->dev, "Failed to program IB window\n"); return ret; @@ -643,7 +641,7 @@ static unsigned int dw_pcie_ep_find_ext_capability(struct dw_pcie *pci, int cap) int dw_pcie_ep_init_complete(struct dw_pcie_ep *ep) { struct dw_pcie *pci = to_dw_pcie_from_ep(ep); - unsigned int offset; + unsigned int offset, ptm_cap_base; unsigned int nbars; u8 hdr_type; u32 reg; @@ -659,6 +657,7 @@ int dw_pcie_ep_init_complete(struct dw_pcie_ep *ep) } offset = dw_pcie_ep_find_ext_capability(pci, PCI_EXT_CAP_ID_REBAR); + ptm_cap_base = dw_pcie_ep_find_ext_capability(pci, PCI_EXT_CAP_ID_PTM); dw_pcie_dbi_ro_wr_en(pci); @@ -671,6 +670,22 @@ int dw_pcie_ep_init_complete(struct dw_pcie_ep *ep) dw_pcie_writel_dbi(pci, offset + PCI_REBAR_CAP, 0x0); } + /* + * PTM responder capability can be disabled only after disabling + * PTM root capability. + */ + if (ptm_cap_base) { + dw_pcie_dbi_ro_wr_en(pci); + reg = dw_pcie_readl_dbi(pci, ptm_cap_base + PCI_PTM_CAP); + reg &= ~PCI_PTM_CAP_ROOT; + dw_pcie_writel_dbi(pci, ptm_cap_base + PCI_PTM_CAP, reg); + + reg = dw_pcie_readl_dbi(pci, ptm_cap_base + PCI_PTM_CAP); + reg &= ~(PCI_PTM_CAP_RES | PCI_PTM_GRANULARITY_MASK); + dw_pcie_writel_dbi(pci, ptm_cap_base + PCI_PTM_CAP, reg); + dw_pcie_dbi_ro_wr_dis(pci); + } + dw_pcie_setup(pci); dw_pcie_dbi_ro_wr_dis(pci); @@ -694,23 +709,9 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep) INIT_LIST_HEAD(&ep->func_list); - if (!pci->dbi_base) { - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi"); - pci->dbi_base = devm_pci_remap_cfg_resource(dev, res); - if (IS_ERR(pci->dbi_base)) - return PTR_ERR(pci->dbi_base); - } - - if (!pci->dbi_base2) { - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi2"); - if (!res) { - pci->dbi_base2 = pci->dbi_base + SZ_4K; - } else { - pci->dbi_base2 = devm_pci_remap_cfg_resource(dev, res); - if (IS_ERR(pci->dbi_base2)) - return PTR_ERR(pci->dbi_base2); - } - } + ret = dw_pcie_get_resources(pci); + if (ret) + return ret; res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space"); if (!res) @@ -739,9 +740,6 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep) return -ENOMEM; ep->outbound_addr = addr; - if (pci->link_gen < 1) - pci->link_gen = of_pci_get_max_link_speed(np); - epc = devm_pci_epc_create(dev, &epc_ops); if (IS_ERR(epc)) { dev_err(dev, "Failed to create epc device\n"); diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c index 39f3b37d4033ceb17471cbb6a5e76833deacc8db..3ab6ae3712c428a53ff0540c0998fbfc55d5cd06 100644 --- a/drivers/pci/controller/dwc/pcie-designware-host.c +++ b/drivers/pci/controller/dwc/pcie-designware-host.c @@ -16,7 +16,6 @@ #include #include -#include "../../pci.h" #include "pcie-designware.h" static struct pci_ops dw_pcie_ops; @@ -395,6 +394,10 @@ int dw_pcie_host_init(struct dw_pcie_rp *pp) raw_spin_lock_init(&pp->lock); + ret = dw_pcie_get_resources(pci); + if (ret) + return ret; + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config"); if (res) { pp->cfg0_size = resource_size(res); @@ -408,13 +411,6 @@ int dw_pcie_host_init(struct dw_pcie_rp *pp) return -ENODEV; } - if (!pci->dbi_base) { - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi"); - pci->dbi_base = devm_pci_remap_cfg_resource(dev, res); - if (IS_ERR(pci->dbi_base)) - return PTR_ERR(pci->dbi_base); - } - bridge = devm_pci_alloc_host_bridge(dev, 0); if (!bridge) return -ENOMEM; @@ -429,9 +425,6 @@ int dw_pcie_host_init(struct dw_pcie_rp *pp) pp->io_base = pci_pio_to_address(win->res->start); } - if (pci->link_gen < 1) - pci->link_gen = of_pci_get_max_link_speed(np); - /* Set default bus ops */ bridge->ops = &dw_pcie_ops; bridge->child_ops = &dw_child_pcie_ops; @@ -643,12 +636,15 @@ static int dw_pcie_iatu_setup(struct dw_pcie_rp *pp) } /* - * Ensure all outbound windows are disabled before proceeding with - * the MEM/IO ranges setups. + * Ensure all out/inbound windows are disabled before proceeding with + * the MEM/IO (dma-)ranges setups. */ for (i = 0; i < pci->num_ob_windows; i++) dw_pcie_disable_atu(pci, PCIE_ATU_REGION_DIR_OB, i); + for (i = 0; i < pci->num_ib_windows; i++) + dw_pcie_disable_atu(pci, PCIE_ATU_REGION_DIR_IB, i); + i = 0; resource_list_for_each_entry(entry, &pp->bridge->windows) { if (resource_type(entry->res) != IORESOURCE_MEM) @@ -685,9 +681,32 @@ static int dw_pcie_iatu_setup(struct dw_pcie_rp *pp) } if (pci->num_ob_windows <= i) - dev_warn(pci->dev, "Resources exceed number of ATU entries (%d)\n", + dev_warn(pci->dev, "Ranges exceed outbound iATU size (%d)\n", pci->num_ob_windows); + i = 0; + resource_list_for_each_entry(entry, &pp->bridge->dma_ranges) { + if (resource_type(entry->res) != IORESOURCE_MEM) + continue; + + if (pci->num_ib_windows <= i) + break; + + ret = dw_pcie_prog_inbound_atu(pci, i++, PCIE_ATU_TYPE_MEM, + entry->res->start, + entry->res->start - entry->offset, + resource_size(entry->res)); + if (ret) { + dev_err(pci->dev, "Failed to set DMA range %pr\n", + entry->res); + return ret; + } + } + + if (pci->num_ib_windows <= i) + dev_warn(pci->dev, "Dma-ranges exceed inbound iATU size (%u)\n", + pci->num_ib_windows); + return 0; } diff --git a/drivers/pci/controller/dwc/pcie-designware.c b/drivers/pci/controller/dwc/pcie-designware.c index c6725c519a479976f34c4dbb2973eec974fab66b..6d5d619ab2e9412e0c4cd4376433ed67daba7927 100644 --- a/drivers/pci/controller/dwc/pcie-designware.c +++ b/drivers/pci/controller/dwc/pcie-designware.c @@ -10,7 +10,10 @@ #include #include +#include #include +#include +#include #include #include #include @@ -19,6 +22,148 @@ #include "../../pci.h" #include "pcie-designware.h" +static const char * const dw_pcie_app_clks[DW_PCIE_NUM_APP_CLKS] = { + [DW_PCIE_DBI_CLK] = "dbi", + [DW_PCIE_MSTR_CLK] = "mstr", + [DW_PCIE_SLV_CLK] = "slv", +}; + +static const char * const dw_pcie_core_clks[DW_PCIE_NUM_CORE_CLKS] = { + [DW_PCIE_PIPE_CLK] = "pipe", + [DW_PCIE_CORE_CLK] = "core", + [DW_PCIE_AUX_CLK] = "aux", + [DW_PCIE_REF_CLK] = "ref", +}; + +static const char * const dw_pcie_app_rsts[DW_PCIE_NUM_APP_RSTS] = { + [DW_PCIE_DBI_RST] = "dbi", + [DW_PCIE_MSTR_RST] = "mstr", + [DW_PCIE_SLV_RST] = "slv", +}; + +static const char * const dw_pcie_core_rsts[DW_PCIE_NUM_CORE_RSTS] = { + [DW_PCIE_NON_STICKY_RST] = "non-sticky", + [DW_PCIE_STICKY_RST] = "sticky", + [DW_PCIE_CORE_RST] = "core", + [DW_PCIE_PIPE_RST] = "pipe", + [DW_PCIE_PHY_RST] = "phy", + [DW_PCIE_HOT_RST] = "hot", + [DW_PCIE_PWR_RST] = "pwr", +}; + +static int dw_pcie_get_clocks(struct dw_pcie *pci) +{ + int i, ret; + + for (i = 0; i < DW_PCIE_NUM_APP_CLKS; i++) + pci->app_clks[i].id = dw_pcie_app_clks[i]; + + for (i = 0; i < DW_PCIE_NUM_CORE_CLKS; i++) + pci->core_clks[i].id = dw_pcie_core_clks[i]; + + ret = devm_clk_bulk_get_optional(pci->dev, DW_PCIE_NUM_APP_CLKS, + pci->app_clks); + if (ret) + return ret; + + return devm_clk_bulk_get_optional(pci->dev, DW_PCIE_NUM_CORE_CLKS, + pci->core_clks); +} + +static int dw_pcie_get_resets(struct dw_pcie *pci) +{ + int i, ret; + + for (i = 0; i < DW_PCIE_NUM_APP_RSTS; i++) + pci->app_rsts[i].id = dw_pcie_app_rsts[i]; + + for (i = 0; i < DW_PCIE_NUM_CORE_RSTS; i++) + pci->core_rsts[i].id = dw_pcie_core_rsts[i]; + + ret = devm_reset_control_bulk_get_optional_shared(pci->dev, + DW_PCIE_NUM_APP_RSTS, + pci->app_rsts); + if (ret) + return ret; + + ret = devm_reset_control_bulk_get_optional_exclusive(pci->dev, + DW_PCIE_NUM_CORE_RSTS, + pci->core_rsts); + if (ret) + return ret; + + pci->pe_rst = devm_gpiod_get_optional(pci->dev, "reset", GPIOD_OUT_HIGH); + if (IS_ERR(pci->pe_rst)) + return PTR_ERR(pci->pe_rst); + + return 0; +} + +int dw_pcie_get_resources(struct dw_pcie *pci) +{ + struct platform_device *pdev = to_platform_device(pci->dev); + struct device_node *np = dev_of_node(pci->dev); + struct resource *res; + int ret; + + if (!pci->dbi_base) { + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi"); + pci->dbi_base = devm_pci_remap_cfg_resource(pci->dev, res); + if (IS_ERR(pci->dbi_base)) + return PTR_ERR(pci->dbi_base); + } + + /* DBI2 is mainly useful for the endpoint controller */ + if (!pci->dbi_base2) { + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi2"); + if (res) { + pci->dbi_base2 = devm_pci_remap_cfg_resource(pci->dev, res); + if (IS_ERR(pci->dbi_base2)) + return PTR_ERR(pci->dbi_base2); + } else { + pci->dbi_base2 = pci->dbi_base + SZ_4K; + } + } + + /* For non-unrolled iATU/eDMA platforms this range will be ignored */ + if (!pci->atu_base) { + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "atu"); + if (res) { + pci->atu_size = resource_size(res); + pci->atu_base = devm_ioremap_resource(pci->dev, res); + if (IS_ERR(pci->atu_base)) + return PTR_ERR(pci->atu_base); + } else { + pci->atu_base = pci->dbi_base + DEFAULT_DBI_ATU_OFFSET; + } + } + + /* Set a default value suitable for at most 8 in and 8 out windows */ + if (!pci->atu_size) + pci->atu_size = SZ_4K; + + /* LLDD is supposed to manually switch the clocks and resets state */ + if (dw_pcie_cap_is(pci, REQ_RES)) { + ret = dw_pcie_get_clocks(pci); + if (ret) + return ret; + + ret = dw_pcie_get_resets(pci); + if (ret) + return ret; + } + + if (pci->link_gen < 1) + pci->link_gen = of_pci_get_max_link_speed(np); + + of_property_read_u32(np, "num-lanes", &pci->num_lanes); + + if (of_property_read_bool(np, "snps,enable-cdm-check")) + dw_pcie_cap_set(pci, CDM_CHECK); + + return 0; +} + void dw_pcie_version_detect(struct dw_pcie *pci) { u32 ver; @@ -211,7 +356,7 @@ void dw_pcie_write_dbi2(struct dw_pcie *pci, u32 reg, size_t size, u32 val) static inline void __iomem *dw_pcie_select_atu(struct dw_pcie *pci, u32 dir, u32 index) { - if (pci->iatu_unroll_enabled) + if (dw_pcie_cap_is(pci, IATU_UNROLL)) return pci->atu_base + PCIE_ATU_UNROLL_BASE(dir, index); dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, dir | index); @@ -393,8 +538,60 @@ static inline void dw_pcie_writel_atu_ib(struct dw_pcie *pci, u32 index, u32 reg dw_pcie_writel_atu(pci, PCIE_ATU_REGION_DIR_IB, index, reg, val); } -int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, u8 func_no, int index, - int type, u64 cpu_addr, u8 bar) +int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, int index, int type, + u64 cpu_addr, u64 pci_addr, u64 size) +{ + u64 limit_addr = pci_addr + size - 1; + u32 retries, val; + + if ((limit_addr & ~pci->region_limit) != (pci_addr & ~pci->region_limit) || + !IS_ALIGNED(cpu_addr, pci->region_align) || + !IS_ALIGNED(pci_addr, pci->region_align) || !size) { + return -EINVAL; + } + + dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_LOWER_BASE, + lower_32_bits(pci_addr)); + dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_UPPER_BASE, + upper_32_bits(pci_addr)); + + dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_LIMIT, + lower_32_bits(limit_addr)); + if (dw_pcie_ver_is_ge(pci, 460A)) + dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_UPPER_LIMIT, + upper_32_bits(limit_addr)); + + dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_LOWER_TARGET, + lower_32_bits(cpu_addr)); + dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_UPPER_TARGET, + upper_32_bits(cpu_addr)); + + val = type; + if (upper_32_bits(limit_addr) > upper_32_bits(pci_addr) && + dw_pcie_ver_is_ge(pci, 460A)) + val |= PCIE_ATU_INCREASE_REGION_SIZE; + dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_REGION_CTRL1, val); + dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_REGION_CTRL2, PCIE_ATU_ENABLE); + + /* + * Make sure ATU enable takes effect before any subsequent config + * and I/O accesses. + */ + for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) { + val = dw_pcie_readl_atu_ib(pci, index, PCIE_ATU_REGION_CTRL2); + if (val & PCIE_ATU_ENABLE) + return 0; + + mdelay(LINK_WAIT_IATU); + } + + dev_err(pci->dev, "Inbound iATU is not being enabled\n"); + + return -ETIMEDOUT; +} + +int dw_pcie_prog_ep_inbound_atu(struct dw_pcie *pci, u8 func_no, int index, + int type, u64 cpu_addr, u8 bar) { u32 retries, val; @@ -448,7 +645,7 @@ int dw_pcie_wait_for_link(struct dw_pcie *pci) } if (retries >= LINK_WAIT_MAX_RETRIES) { - dev_err(pci->dev, "Phy link never came up\n"); + dev_info(pci->dev, "Phy link never came up\n"); return -ETIMEDOUT; } @@ -522,26 +719,21 @@ static void dw_pcie_link_set_max_speed(struct dw_pcie *pci, u32 link_gen) } -static bool dw_pcie_iatu_unroll_enabled(struct dw_pcie *pci) -{ - u32 val; - - val = dw_pcie_readl_dbi(pci, PCIE_ATU_VIEWPORT); - if (val == 0xffffffff) - return true; - - return false; -} - -static void dw_pcie_iatu_detect_regions(struct dw_pcie *pci) +void dw_pcie_iatu_detect(struct dw_pcie *pci) { int max_region, ob, ib; u32 val, min, dir; u64 max; - if (pci->iatu_unroll_enabled) { + val = dw_pcie_readl_dbi(pci, PCIE_ATU_VIEWPORT); + if (val == 0xFFFFFFFF) { + dw_pcie_cap_set(pci, IATU_UNROLL); + max_region = min((int)pci->atu_size / 512, 256); } else { + pci->atu_base = pci->dbi_base + PCIE_ATU_VIEWPORT_BASE; + pci->atu_size = PCIE_ATU_VIEWPORT_SIZE; + dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, 0xFF); max_region = dw_pcie_readl_dbi(pci, PCIE_ATU_VIEWPORT) + 1; } @@ -583,46 +775,15 @@ static void dw_pcie_iatu_detect_regions(struct dw_pcie *pci) pci->num_ib_windows = ib; pci->region_align = 1 << fls(min); pci->region_limit = (max << 32) | (SZ_4G - 1); -} -void dw_pcie_iatu_detect(struct dw_pcie *pci) -{ - struct platform_device *pdev = to_platform_device(pci->dev); - - pci->iatu_unroll_enabled = dw_pcie_iatu_unroll_enabled(pci); - if (pci->iatu_unroll_enabled) { - if (!pci->atu_base) { - struct resource *res = - platform_get_resource_byname(pdev, IORESOURCE_MEM, "atu"); - if (res) { - pci->atu_size = resource_size(res); - pci->atu_base = devm_ioremap_resource(pci->dev, res); - } - if (!pci->atu_base || IS_ERR(pci->atu_base)) - pci->atu_base = pci->dbi_base + DEFAULT_DBI_ATU_OFFSET; - } - - if (!pci->atu_size) - /* Pick a minimal default, enough for 8 in and 8 out windows */ - pci->atu_size = SZ_4K; - } else { - pci->atu_base = pci->dbi_base + PCIE_ATU_VIEWPORT_BASE; - pci->atu_size = PCIE_ATU_VIEWPORT_SIZE; - } - - dw_pcie_iatu_detect_regions(pci); - - dev_info(pci->dev, "iATU unroll: %s\n", pci->iatu_unroll_enabled ? - "enabled" : "disabled"); - - dev_info(pci->dev, "iATU regions: %u ob, %u ib, align %uK, limit %lluG\n", + dev_info(pci->dev, "iATU: unroll %s, %u ob, %u ib, align %uK, limit %lluG\n", + dw_pcie_cap_is(pci, IATU_UNROLL) ? "T" : "F", pci->num_ob_windows, pci->num_ib_windows, pci->region_align / SZ_1K, (pci->region_limit + 1) / SZ_1G); } void dw_pcie_setup(struct dw_pcie *pci) { - struct device_node *np = pci->dev->of_node; u32 val; if (pci->link_gen > 0) @@ -641,7 +802,7 @@ void dw_pcie_setup(struct dw_pcie *pci) if (pci->n_fts[1]) { val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL); val &= ~PORT_LOGIC_N_FTS_MASK; - val |= pci->n_fts[pci->link_gen - 1]; + val |= pci->n_fts[1]; dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val); } @@ -650,14 +811,13 @@ void dw_pcie_setup(struct dw_pcie *pci) val |= PORT_LINK_DLL_LINK_EN; dw_pcie_writel_dbi(pci, PCIE_PORT_LINK_CONTROL, val); - if (of_property_read_bool(np, "snps,enable-cdm-check")) { + if (dw_pcie_cap_is(pci, CDM_CHECK)) { val = dw_pcie_readl_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS); val |= PCIE_PL_CHK_REG_CHK_REG_CONTINUOUS | PCIE_PL_CHK_REG_CHK_REG_START; dw_pcie_writel_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS, val); } - of_property_read_u32(np, "num-lanes", &pci->num_lanes); if (!pci->num_lanes) { dev_dbg(pci->dev, "Using h/w default number of lanes\n"); return; diff --git a/drivers/pci/controller/dwc/pcie-designware.h b/drivers/pci/controller/dwc/pcie-designware.h index a871ae7eb59eca4cf2ded8ab4368e4227f3c5755..393dfb931df6cc66258b47d02683ccec511efa22 100644 --- a/drivers/pci/controller/dwc/pcie-designware.h +++ b/drivers/pci/controller/dwc/pcie-designware.h @@ -12,10 +12,14 @@ #define _PCIE_DESIGNWARE_H #include +#include +#include #include +#include #include #include #include +#include #include #include @@ -43,6 +47,17 @@ (__dw_pcie_ver_cmp(_pci, _ver, ==) && \ __dw_pcie_ver_cmp(_pci, TYPE_ ## _type, >=)) +/* DWC PCIe controller capabilities */ +#define DW_PCIE_CAP_REQ_RES 0 +#define DW_PCIE_CAP_IATU_UNROLL 1 +#define DW_PCIE_CAP_CDM_CHECK 2 + +#define dw_pcie_cap_is(_pci, _cap) \ + test_bit(DW_PCIE_CAP_ ## _cap, &(_pci)->caps) + +#define dw_pcie_cap_set(_pci, _cap) \ + set_bit(DW_PCIE_CAP_ ## _cap, &(_pci)->caps) + /* Parameters for the waiting for link up routine */ #define LINK_WAIT_MAX_RETRIES 10 #define LINK_WAIT_USLEEP_MIN 90000 @@ -222,6 +237,39 @@ enum dw_pcie_device_mode { DW_PCIE_RC_TYPE, }; +enum dw_pcie_app_clk { + DW_PCIE_DBI_CLK, + DW_PCIE_MSTR_CLK, + DW_PCIE_SLV_CLK, + DW_PCIE_NUM_APP_CLKS +}; + +enum dw_pcie_core_clk { + DW_PCIE_PIPE_CLK, + DW_PCIE_CORE_CLK, + DW_PCIE_AUX_CLK, + DW_PCIE_REF_CLK, + DW_PCIE_NUM_CORE_CLKS +}; + +enum dw_pcie_app_rst { + DW_PCIE_DBI_RST, + DW_PCIE_MSTR_RST, + DW_PCIE_SLV_RST, + DW_PCIE_NUM_APP_RSTS +}; + +enum dw_pcie_core_rst { + DW_PCIE_NON_STICKY_RST, + DW_PCIE_STICKY_RST, + DW_PCIE_CORE_RST, + DW_PCIE_PIPE_RST, + DW_PCIE_PHY_RST, + DW_PCIE_HOT_RST, + DW_PCIE_PWR_RST, + DW_PCIE_NUM_CORE_RSTS +}; + struct dw_pcie_host_ops { int (*host_init)(struct dw_pcie_rp *pp); void (*host_deinit)(struct dw_pcie_rp *pp); @@ -317,10 +365,15 @@ struct dw_pcie { const struct dw_pcie_ops *ops; u32 version; u32 type; + unsigned long caps; int num_lanes; int link_gen; u8 n_fts[2]; - bool iatu_unroll_enabled: 1; + struct clk_bulk_data app_clks[DW_PCIE_NUM_APP_CLKS]; + struct clk_bulk_data core_clks[DW_PCIE_NUM_CORE_CLKS]; + struct reset_control_bulk_data app_rsts[DW_PCIE_NUM_APP_RSTS]; + struct reset_control_bulk_data core_rsts[DW_PCIE_NUM_CORE_RSTS]; + struct gpio_desc *pe_rst; }; #define to_dw_pcie_from_pp(port) container_of((port), struct dw_pcie, pp) @@ -328,6 +381,8 @@ struct dw_pcie { #define to_dw_pcie_from_ep(endpoint) \ container_of((endpoint), struct dw_pcie, ep) +int dw_pcie_get_resources(struct dw_pcie *pci); + void dw_pcie_version_detect(struct dw_pcie *pci); u8 dw_pcie_find_capability(struct dw_pcie *pci, u8 cap); @@ -346,8 +401,10 @@ int dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, int type, u64 cpu_addr, u64 pci_addr, u64 size); int dw_pcie_prog_ep_outbound_atu(struct dw_pcie *pci, u8 func_no, int index, int type, u64 cpu_addr, u64 pci_addr, u64 size); -int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, u8 func_no, int index, - int type, u64 cpu_addr, u8 bar); +int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, int index, int type, + u64 cpu_addr, u64 pci_addr, u64 size); +int dw_pcie_prog_ep_inbound_atu(struct dw_pcie *pci, u8 func_no, int index, + int type, u64 cpu_addr, u8 bar); void dw_pcie_disable_atu(struct dw_pcie *pci, u32 dir, int index); void dw_pcie_setup(struct dw_pcie *pci); void dw_pcie_iatu_detect(struct dw_pcie *pci); diff --git a/drivers/pci/controller/dwc/pcie-histb.c b/drivers/pci/controller/dwc/pcie-histb.c index e2b80f10030d0dbd5312f4a7078c91710ac4f936..43c27812dd6d4d84cd4a72e1f91b0a795fb92700 100644 --- a/drivers/pci/controller/dwc/pcie-histb.c +++ b/drivers/pci/controller/dwc/pcie-histb.c @@ -10,11 +10,11 @@ #include #include +#include #include #include #include #include -#include #include #include #include @@ -60,7 +60,7 @@ struct histb_pcie { struct reset_control *sys_reset; struct reset_control *bus_reset; void __iomem *ctrl; - int reset_gpio; + struct gpio_desc *reset_gpio; struct regulator *vpcie; }; @@ -212,8 +212,8 @@ static void histb_pcie_host_disable(struct histb_pcie *hipcie) clk_disable_unprepare(hipcie->sys_clk); clk_disable_unprepare(hipcie->bus_clk); - if (gpio_is_valid(hipcie->reset_gpio)) - gpio_set_value_cansleep(hipcie->reset_gpio, 0); + if (hipcie->reset_gpio) + gpiod_set_value_cansleep(hipcie->reset_gpio, 1); if (hipcie->vpcie) regulator_disable(hipcie->vpcie); @@ -235,8 +235,8 @@ static int histb_pcie_host_enable(struct dw_pcie_rp *pp) } } - if (gpio_is_valid(hipcie->reset_gpio)) - gpio_set_value_cansleep(hipcie->reset_gpio, 1); + if (hipcie->reset_gpio) + gpiod_set_value_cansleep(hipcie->reset_gpio, 0); ret = clk_prepare_enable(hipcie->bus_clk); if (ret) { @@ -298,10 +298,7 @@ static int histb_pcie_probe(struct platform_device *pdev) struct histb_pcie *hipcie; struct dw_pcie *pci; struct dw_pcie_rp *pp; - struct device_node *np = pdev->dev.of_node; struct device *dev = &pdev->dev; - enum of_gpio_flags of_flags; - unsigned long flag = GPIOF_DIR_OUT; int ret; hipcie = devm_kzalloc(dev, sizeof(*hipcie), GFP_KERNEL); @@ -336,17 +333,19 @@ static int histb_pcie_probe(struct platform_device *pdev) hipcie->vpcie = NULL; } - hipcie->reset_gpio = of_get_named_gpio_flags(np, - "reset-gpios", 0, &of_flags); - if (of_flags & OF_GPIO_ACTIVE_LOW) - flag |= GPIOF_ACTIVE_LOW; - if (gpio_is_valid(hipcie->reset_gpio)) { - ret = devm_gpio_request_one(dev, hipcie->reset_gpio, - flag, "PCIe device power control"); - if (ret) { - dev_err(dev, "unable to request gpio\n"); - return ret; - } + hipcie->reset_gpio = devm_gpiod_get_optional(dev, "reset", + GPIOD_OUT_HIGH); + ret = PTR_ERR_OR_ZERO(hipcie->reset_gpio); + if (ret) { + dev_err(dev, "unable to request reset gpio: %d\n", ret); + return ret; + } + + ret = gpiod_set_consumer_name(hipcie->reset_gpio, + "PCIe device power control"); + if (ret) { + dev_err(dev, "unable to set reset gpio name: %d\n", ret); + return ret; } hipcie->aux_clk = devm_clk_get(dev, "aux"); diff --git a/drivers/pci/controller/dwc/pcie-qcom-ep.c b/drivers/pci/controller/dwc/pcie-qcom-ep.c index 6d0d1b759ca24a8a85f042090a25e6f1951aa36b..19b32839ea2610ae7cbee6d5870a5582632691e0 100644 --- a/drivers/pci/controller/dwc/pcie-qcom-ep.c +++ b/drivers/pci/controller/dwc/pcie-qcom-ep.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include #include @@ -268,6 +269,10 @@ static int qcom_pcie_enable_resources(struct qcom_pcie_ep *pcie_ep) if (ret) goto err_disable_clk; + ret = phy_set_mode_ext(pcie_ep->phy, PHY_MODE_PCIE, PHY_MODE_PCIE_EP); + if (ret) + goto err_phy_exit; + ret = phy_power_on(pcie_ep->phy); if (ret) goto err_phy_exit; diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c index f711acacaeaf8f163a399a8922a72685863b223c..77e5dc7b88ad4b86c0af2a9df764c6e8e47b7851 100644 --- a/drivers/pci/controller/dwc/pcie-qcom.c +++ b/drivers/pci/controller/dwc/pcie-qcom.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #include #include @@ -22,6 +23,7 @@ #include #include #include +#include #include #include #include @@ -223,6 +225,7 @@ struct qcom_pcie { union qcom_pcie_resources res; struct phy *phy; struct gpio_desc *reset; + struct icc_path *icc_mem; const struct qcom_pcie_cfg *cfg; }; @@ -1236,7 +1239,7 @@ static int qcom_pcie_init_2_7_0(struct qcom_pcie *pcie) ret = reset_control_assert(res->pci_reset); if (ret < 0) { - dev_err(dev, "cannot deassert pci reset\n"); + dev_err(dev, "cannot assert pci reset\n"); goto err_disable_clocks; } @@ -1497,6 +1500,10 @@ static int qcom_pcie_host_init(struct dw_pcie_rp *pp) if (ret) return ret; + ret = phy_set_mode_ext(pcie->phy, PHY_MODE_PCIE, PHY_MODE_PCIE_RC); + if (ret) + goto err_deinit; + ret = phy_power_on(pcie->phy); if (ret) goto err_deinit; @@ -1639,6 +1646,74 @@ static const struct dw_pcie_ops dw_pcie_ops = { .start_link = qcom_pcie_start_link, }; +static int qcom_pcie_icc_init(struct qcom_pcie *pcie) +{ + struct dw_pcie *pci = pcie->pci; + int ret; + + pcie->icc_mem = devm_of_icc_get(pci->dev, "pcie-mem"); + if (IS_ERR(pcie->icc_mem)) + return PTR_ERR(pcie->icc_mem); + + /* + * Some Qualcomm platforms require interconnect bandwidth constraints + * to be set before enabling interconnect clocks. + * + * Set an initial peak bandwidth corresponding to single-lane Gen 1 + * for the pcie-mem path. + */ + ret = icc_set_bw(pcie->icc_mem, 0, MBps_to_icc(250)); + if (ret) { + dev_err(pci->dev, "failed to set interconnect bandwidth: %d\n", + ret); + return ret; + } + + return 0; +} + +static void qcom_pcie_icc_update(struct qcom_pcie *pcie) +{ + struct dw_pcie *pci = pcie->pci; + u32 offset, status, bw; + int speed, width; + int ret; + + if (!pcie->icc_mem) + return; + + offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP); + status = readw(pci->dbi_base + offset + PCI_EXP_LNKSTA); + + /* Only update constraints if link is up. */ + if (!(status & PCI_EXP_LNKSTA_DLLLA)) + return; + + speed = FIELD_GET(PCI_EXP_LNKSTA_CLS, status); + width = FIELD_GET(PCI_EXP_LNKSTA_NLW, status); + + switch (speed) { + case 1: + bw = MBps_to_icc(250); + break; + case 2: + bw = MBps_to_icc(500); + break; + default: + WARN_ON_ONCE(1); + fallthrough; + case 3: + bw = MBps_to_icc(985); + break; + } + + ret = icc_set_bw(pcie->icc_mem, 0, width * bw); + if (ret) { + dev_err(pci->dev, "failed to set interconnect bandwidth: %d\n", + ret); + } +} + static int qcom_pcie_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; @@ -1699,6 +1774,10 @@ static int qcom_pcie_probe(struct platform_device *pdev) goto err_pm_runtime_put; } + ret = qcom_pcie_icc_init(pcie); + if (ret) + goto err_pm_runtime_put; + ret = pcie->cfg->ops->get_resources(pcie); if (ret) goto err_pm_runtime_put; @@ -1717,6 +1796,8 @@ static int qcom_pcie_probe(struct platform_device *pdev) goto err_phy_exit; } + qcom_pcie_icc_update(pcie); + return 0; err_phy_exit: diff --git a/drivers/pci/controller/dwc/pcie-tegra194.c b/drivers/pci/controller/dwc/pcie-tegra194.c index 1b6b437823d22f2fb6770493da737448ed5a58e5..02d78a12b6e72591a6534ce4c5f4f69107b5a85c 100644 --- a/drivers/pci/controller/dwc/pcie-tegra194.c +++ b/drivers/pci/controller/dwc/pcie-tegra194.c @@ -21,7 +21,6 @@ #include #include #include -#include #include #include #include diff --git a/drivers/pci/controller/pci-aardvark.c b/drivers/pci/controller/pci-aardvark.c index ba36bbc5897d0023fcbb901ed08bba049602451f..513d8edf3a5cffb19d6a15697f25dd793d39c0cc 100644 --- a/drivers/pci/controller/pci-aardvark.c +++ b/drivers/pci/controller/pci-aardvark.c @@ -1859,20 +1859,18 @@ static int advk_pcie_probe(struct platform_device *pdev) return ret; } - pcie->reset_gpio = devm_gpiod_get_from_of_node(dev, dev->of_node, - "reset-gpios", 0, - GPIOD_OUT_LOW, - "pcie1-reset"); + pcie->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW); ret = PTR_ERR_OR_ZERO(pcie->reset_gpio); if (ret) { - if (ret == -ENOENT) { - pcie->reset_gpio = NULL; - } else { - if (ret != -EPROBE_DEFER) - dev_err(dev, "Failed to get reset-gpio: %i\n", - ret); - return ret; - } + if (ret != -EPROBE_DEFER) + dev_err(dev, "Failed to get reset-gpio: %i\n", ret); + return ret; + } + + ret = gpiod_set_consumer_name(pcie->reset_gpio, "pcie1-reset"); + if (ret) { + dev_err(dev, "Failed to set reset gpio name: %d\n", ret); + return ret; } ret = of_pci_get_max_link_speed(dev->of_node); diff --git a/drivers/pci/controller/pci-ftpci100.c b/drivers/pci/controller/pci-ftpci100.c index 0cfd9d5a497c9d7c8cbbef2200f9d495b835a1fd..ecd3009df586d19bd82b031769a9be3d76d27ec0 100644 --- a/drivers/pci/controller/pci-ftpci100.c +++ b/drivers/pci/controller/pci-ftpci100.c @@ -553,7 +553,7 @@ static const struct of_device_id faraday_pci_of_match[] = { static struct platform_driver faraday_pci_driver = { .driver = { .name = "ftpci100", - .of_match_table = of_match_ptr(faraday_pci_of_match), + .of_match_table = faraday_pci_of_match, .suppress_bind_attrs = true, }, .probe = faraday_pci_probe, diff --git a/drivers/pci/controller/pci-mvebu.c b/drivers/pci/controller/pci-mvebu.c index 1ced73726a2671a4ae8bf851871c86bc52044180..1dc209f6f53aefa9e013f5290630ff52d21009a7 100644 --- a/drivers/pci/controller/pci-mvebu.c +++ b/drivers/pci/controller/pci-mvebu.c @@ -11,14 +11,15 @@ #include #include #include -#include +#include #include +#include +#include #include #include #include #include #include -#include #include #include @@ -1261,9 +1262,8 @@ static int mvebu_pcie_parse_port(struct mvebu_pcie *pcie, struct mvebu_pcie_port *port, struct device_node *child) { struct device *dev = &pcie->pdev->dev; - enum of_gpio_flags flags; u32 slot_power_limit; - int reset_gpio, ret; + int ret; u32 num_lanes; port->pcie = pcie; @@ -1327,40 +1327,24 @@ static int mvebu_pcie_parse_port(struct mvebu_pcie *pcie, port->name, child); } - reset_gpio = of_get_named_gpio_flags(child, "reset-gpios", 0, &flags); - if (reset_gpio == -EPROBE_DEFER) { - ret = reset_gpio; + port->reset_name = devm_kasprintf(dev, GFP_KERNEL, "%s-reset", + port->name); + if (!port->reset_name) { + ret = -ENOMEM; goto err; } - if (gpio_is_valid(reset_gpio)) { - unsigned long gpio_flags; - - port->reset_name = devm_kasprintf(dev, GFP_KERNEL, "%s-reset", - port->name); - if (!port->reset_name) { - ret = -ENOMEM; + port->reset_gpio = devm_fwnode_gpiod_get(dev, of_fwnode_handle(child), + "reset", GPIOD_OUT_HIGH, + port->name); + ret = PTR_ERR_OR_ZERO(port->reset_gpio); + if (ret) { + if (ret != -ENOENT) goto err; - } - - if (flags & OF_GPIO_ACTIVE_LOW) { - dev_info(dev, "%pOF: reset gpio is active low\n", - child); - gpio_flags = GPIOF_ACTIVE_LOW | - GPIOF_OUT_INIT_LOW; - } else { - gpio_flags = GPIOF_OUT_INIT_HIGH; - } - - ret = devm_gpio_request_one(dev, reset_gpio, gpio_flags, - port->reset_name); - if (ret) { - if (ret == -EPROBE_DEFER) - goto err; - goto skip; - } - - port->reset_gpio = gpio_to_desc(reset_gpio); + /* reset gpio is optional */ + port->reset_gpio = NULL; + devm_kfree(dev, port->reset_name); + port->reset_name = NULL; } slot_power_limit = of_pci_get_slot_power_limit(child, diff --git a/drivers/pci/controller/pci-tegra.c b/drivers/pci/controller/pci-tegra.c index 8e323e93be91576b3bb988c57696e37d858c4cbd..929f9363e94bec7120b9f83238d964975fc4fe5f 100644 --- a/drivers/pci/controller/pci-tegra.c +++ b/drivers/pci/controller/pci-tegra.c @@ -2202,10 +2202,11 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie) * and in this case fall back to using AFI per port register * to toggle PERST# SFIO line. */ - rp->reset_gpio = devm_gpiod_get_from_of_node(dev, port, - "reset-gpios", 0, - GPIOD_OUT_LOW, - label); + rp->reset_gpio = devm_fwnode_gpiod_get(dev, + of_fwnode_handle(port), + "reset", + GPIOD_OUT_LOW, + label); if (IS_ERR(rp->reset_gpio)) { if (PTR_ERR(rp->reset_gpio) == -ENOENT) { rp->reset_gpio = NULL; diff --git a/drivers/pci/controller/pci-v3-semi.c b/drivers/pci/controller/pci-v3-semi.c index 154a5398633ccb3a539a377583e02c2c0c2a4684..ca44b0c83d1bffbccf078d0182e6bbddcd02ee9b 100644 --- a/drivers/pci/controller/pci-v3-semi.c +++ b/drivers/pci/controller/pci-v3-semi.c @@ -22,7 +22,6 @@ #include #include #include -#include #include #include #include @@ -902,7 +901,7 @@ static const struct of_device_id v3_pci_of_match[] = { static struct platform_driver v3_pci_driver = { .driver = { .name = "pci-v3-semi", - .of_match_table = of_match_ptr(v3_pci_of_match), + .of_match_table = v3_pci_of_match, .suppress_bind_attrs = true, }, .probe = v3_pci_probe, diff --git a/drivers/pci/controller/pci-xgene-msi.c b/drivers/pci/controller/pci-xgene-msi.c index bfa259781b692dc4792332e8474d79df8ad0dd12..d7987b281f7991581f30c61f6aba363d43535eae 100644 --- a/drivers/pci/controller/pci-xgene-msi.c +++ b/drivers/pci/controller/pci-xgene-msi.c @@ -8,9 +8,9 @@ */ #include #include +#include #include #include -#include #include #include #include diff --git a/drivers/pci/controller/pci-xgene.c b/drivers/pci/controller/pci-xgene.c index 549d3bd6d1c27c8869ca1e41eab5f3400ad3fdce..887b4941ff32f58f83d1da11e8217345ec0cb5e9 100644 --- a/drivers/pci/controller/pci-xgene.c +++ b/drivers/pci/controller/pci-xgene.c @@ -14,7 +14,6 @@ #include #include #include -#include #include #include #include diff --git a/drivers/pci/controller/pcie-altera-msi.c b/drivers/pci/controller/pcie-altera-msi.c index 7b1d3ebc34ecd72ee0f7c914e1a71afd432c43d6..65e8a20cc44263451f26564afacb56123b60b967 100644 --- a/drivers/pci/controller/pcie-altera-msi.c +++ b/drivers/pci/controller/pcie-altera-msi.c @@ -9,11 +9,11 @@ #include #include +#include #include #include #include #include -#include #include #include #include diff --git a/drivers/pci/controller/pcie-brcmstb.c b/drivers/pci/controller/pcie-brcmstb.c index 521acd632f1ac8d65f7e63d5729271049236da4d..edf283e2b5dd017cce84488be0bcea3c51cba959 100644 --- a/drivers/pci/controller/pcie-brcmstb.c +++ b/drivers/pci/controller/pcie-brcmstb.c @@ -9,6 +9,7 @@ #include #include #include +#include #include #include #include @@ -52,6 +53,8 @@ #define PCIE_RC_DL_MDIO_RD_DATA 0x1108 #define PCIE_MISC_MISC_CTRL 0x4008 +#define PCIE_MISC_MISC_CTRL_PCIE_RCB_64B_MODE_MASK 0x80 +#define PCIE_MISC_MISC_CTRL_PCIE_RCB_MPS_MODE_MASK 0x400 #define PCIE_MISC_MISC_CTRL_SCB_ACCESS_EN_MASK 0x1000 #define PCIE_MISC_MISC_CTRL_CFG_READ_UR_MODE_MASK 0x2000 #define PCIE_MISC_MISC_CTRL_MAX_BURST_SIZE_MASK 0x300000 @@ -302,42 +305,34 @@ static u32 brcm_pcie_mdio_form_pkt(int port, int regad, int cmd) /* negative return value indicates error */ static int brcm_pcie_mdio_read(void __iomem *base, u8 port, u8 regad, u32 *val) { - int tries; u32 data; + int err; writel(brcm_pcie_mdio_form_pkt(port, regad, MDIO_CMD_READ), base + PCIE_RC_DL_MDIO_ADDR); readl(base + PCIE_RC_DL_MDIO_ADDR); - - data = readl(base + PCIE_RC_DL_MDIO_RD_DATA); - for (tries = 0; !MDIO_RD_DONE(data) && tries < 10; tries++) { - udelay(10); - data = readl(base + PCIE_RC_DL_MDIO_RD_DATA); - } - + err = readl_poll_timeout_atomic(base + PCIE_RC_DL_MDIO_RD_DATA, data, + MDIO_RD_DONE(data), 10, 100); *val = FIELD_GET(MDIO_DATA_MASK, data); - return MDIO_RD_DONE(data) ? 0 : -EIO; + + return err; } /* negative return value indicates error */ static int brcm_pcie_mdio_write(void __iomem *base, u8 port, u8 regad, u16 wrdata) { - int tries; u32 data; + int err; writel(brcm_pcie_mdio_form_pkt(port, regad, MDIO_CMD_WRITE), base + PCIE_RC_DL_MDIO_ADDR); readl(base + PCIE_RC_DL_MDIO_ADDR); writel(MDIO_DATA_DONE_MASK | wrdata, base + PCIE_RC_DL_MDIO_WR_DATA); - data = readl(base + PCIE_RC_DL_MDIO_WR_DATA); - for (tries = 0; !MDIO_WT_DONE(data) && tries < 10; tries++) { - udelay(10); - data = readl(base + PCIE_RC_DL_MDIO_WR_DATA); - } - - return MDIO_WT_DONE(data) ? 0 : -EIO; + err = readw_poll_timeout_atomic(base + PCIE_RC_DL_MDIO_WR_DATA, data, + MDIO_WT_DONE(data), 10, 100); + return err; } /* @@ -445,7 +440,8 @@ static struct irq_chip brcm_msi_irq_chip = { static struct msi_domain_info brcm_msi_domain_info = { /* Multi MSI is supported by the controller, but not by this driver */ - .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS), + .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | + MSI_FLAG_MULTI_PCI_MSI), .chip = &brcm_msi_irq_chip, }; @@ -505,21 +501,23 @@ static struct irq_chip brcm_msi_bottom_irq_chip = { .irq_ack = brcm_msi_ack_irq, }; -static int brcm_msi_alloc(struct brcm_msi *msi) +static int brcm_msi_alloc(struct brcm_msi *msi, unsigned int nr_irqs) { int hwirq; mutex_lock(&msi->lock); - hwirq = bitmap_find_free_region(msi->used, msi->nr, 0); + hwirq = bitmap_find_free_region(msi->used, msi->nr, + order_base_2(nr_irqs)); mutex_unlock(&msi->lock); return hwirq; } -static void brcm_msi_free(struct brcm_msi *msi, unsigned long hwirq) +static void brcm_msi_free(struct brcm_msi *msi, unsigned long hwirq, + unsigned int nr_irqs) { mutex_lock(&msi->lock); - bitmap_release_region(msi->used, hwirq, 0); + bitmap_release_region(msi->used, hwirq, order_base_2(nr_irqs)); mutex_unlock(&msi->lock); } @@ -527,16 +525,17 @@ static int brcm_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs, void *args) { struct brcm_msi *msi = domain->host_data; - int hwirq; + int hwirq, i; - hwirq = brcm_msi_alloc(msi); + hwirq = brcm_msi_alloc(msi, nr_irqs); if (hwirq < 0) return hwirq; - irq_domain_set_info(domain, virq, (irq_hw_number_t)hwirq, - &brcm_msi_bottom_irq_chip, domain->host_data, - handle_edge_irq, NULL, NULL); + for (i = 0; i < nr_irqs; i++) + irq_domain_set_info(domain, virq + i, hwirq + i, + &brcm_msi_bottom_irq_chip, domain->host_data, + handle_edge_irq, NULL, NULL); return 0; } @@ -546,7 +545,7 @@ static void brcm_irq_domain_free(struct irq_domain *domain, struct irq_data *d = irq_domain_get_irq_data(domain, virq); struct brcm_msi *msi = irq_data_get_irq_chip_data(d); - brcm_msi_free(msi, d->hwirq); + brcm_msi_free(msi, d->hwirq, nr_irqs); } static const struct irq_domain_ops msi_domain_ops = { @@ -726,7 +725,7 @@ static void __iomem *brcm7425_pcie_map_bus(struct pci_bus *bus, return base + DATA_ADDR(pcie); } -static inline void brcm_pcie_bridge_sw_init_set_generic(struct brcm_pcie *pcie, u32 val) +static void brcm_pcie_bridge_sw_init_set_generic(struct brcm_pcie *pcie, u32 val) { u32 tmp, mask = RGR1_SW_INIT_1_INIT_GENERIC_MASK; u32 shift = RGR1_SW_INIT_1_INIT_GENERIC_SHIFT; @@ -736,7 +735,7 @@ static inline void brcm_pcie_bridge_sw_init_set_generic(struct brcm_pcie *pcie, writel(tmp, pcie->base + PCIE_RGR1_SW_INIT_1(pcie)); } -static inline void brcm_pcie_bridge_sw_init_set_7278(struct brcm_pcie *pcie, u32 val) +static void brcm_pcie_bridge_sw_init_set_7278(struct brcm_pcie *pcie, u32 val) { u32 tmp, mask = RGR1_SW_INIT_1_INIT_7278_MASK; u32 shift = RGR1_SW_INIT_1_INIT_7278_SHIFT; @@ -746,7 +745,7 @@ static inline void brcm_pcie_bridge_sw_init_set_7278(struct brcm_pcie *pcie, u32 writel(tmp, pcie->base + PCIE_RGR1_SW_INIT_1(pcie)); } -static inline void brcm_pcie_perst_set_4908(struct brcm_pcie *pcie, u32 val) +static void brcm_pcie_perst_set_4908(struct brcm_pcie *pcie, u32 val) { if (WARN_ONCE(!pcie->perst_reset, "missing PERST# reset controller\n")) return; @@ -757,7 +756,7 @@ static inline void brcm_pcie_perst_set_4908(struct brcm_pcie *pcie, u32 val) reset_control_deassert(pcie->perst_reset); } -static inline void brcm_pcie_perst_set_7278(struct brcm_pcie *pcie, u32 val) +static void brcm_pcie_perst_set_7278(struct brcm_pcie *pcie, u32 val) { u32 tmp; @@ -767,7 +766,7 @@ static inline void brcm_pcie_perst_set_7278(struct brcm_pcie *pcie, u32 val) writel(tmp, pcie->base + PCIE_MISC_PCIE_CTRL); } -static inline void brcm_pcie_perst_set_generic(struct brcm_pcie *pcie, u32 val) +static void brcm_pcie_perst_set_generic(struct brcm_pcie *pcie, u32 val) { u32 tmp; @@ -776,7 +775,7 @@ static inline void brcm_pcie_perst_set_generic(struct brcm_pcie *pcie, u32 val) writel(tmp, pcie->base + PCIE_RGR1_SW_INIT_1(pcie)); } -static inline int brcm_pcie_get_rc_bar2_size_and_offset(struct brcm_pcie *pcie, +static int brcm_pcie_get_rc_bar2_size_and_offset(struct brcm_pcie *pcie, u64 *rc_bar2_size, u64 *rc_bar2_offset) { @@ -903,11 +902,16 @@ static int brcm_pcie_setup(struct brcm_pcie *pcie) else burst = 0x2; /* 512 bytes */ - /* Set SCB_MAX_BURST_SIZE, CFG_READ_UR_MODE, SCB_ACCESS_EN */ + /* + * Set SCB_MAX_BURST_SIZE, CFG_READ_UR_MODE, SCB_ACCESS_EN, + * RCB_MPS_MODE, RCB_64B_MODE + */ tmp = readl(base + PCIE_MISC_MISC_CTRL); u32p_replace_bits(&tmp, 1, PCIE_MISC_MISC_CTRL_SCB_ACCESS_EN_MASK); u32p_replace_bits(&tmp, 1, PCIE_MISC_MISC_CTRL_CFG_READ_UR_MODE_MASK); u32p_replace_bits(&tmp, burst, PCIE_MISC_MISC_CTRL_MAX_BURST_SIZE_MASK); + u32p_replace_bits(&tmp, 1, PCIE_MISC_MISC_CTRL_PCIE_RCB_MPS_MODE_MASK); + u32p_replace_bits(&tmp, 1, PCIE_MISC_MISC_CTRL_PCIE_RCB_64B_MODE_MASK); writel(tmp, base + PCIE_MISC_MISC_CTRL); ret = brcm_pcie_get_rc_bar2_size_and_offset(pcie, &rc_bar2_size, @@ -1033,8 +1037,15 @@ static int brcm_pcie_start_link(struct brcm_pcie *pcie) pcie->perst_set(pcie, 0); /* - * Give the RC/EP time to wake up, before trying to configure RC. - * Intermittently check status for link-up, up to a total of 100ms. + * Wait for 100ms after PERST# deassertion; see PCIe CEM specification + * sections 2.2, PCIe r5.0, 6.6.1. + */ + msleep(100); + + /* + * Give the RC/EP even more time to wake up, before trying to + * configure RC. Intermittently check status for link-up, up to a + * total of 100ms. */ for (i = 0; i < 100 && !brcm_pcie_link_up(pcie); i += 5) msleep(5); diff --git a/drivers/pci/controller/pcie-iproc-platform.c b/drivers/pci/controller/pcie-iproc-platform.c index 538115246c799ceef5df93af12915dbc977c1dc1..4142a73e611d111553d83d8fbff788d2b1e55a5a 100644 --- a/drivers/pci/controller/pcie-iproc-platform.c +++ b/drivers/pci/controller/pcie-iproc-platform.c @@ -12,7 +12,6 @@ #include #include #include -#include #include #include diff --git a/drivers/pci/controller/pcie-iproc.c b/drivers/pci/controller/pcie-iproc.c index 2519201b0e51c4cc8d477136ab0b2ce46638cca1..83029bdfd88444966da8215a2d4a2120d254a15f 100644 --- a/drivers/pci/controller/pcie-iproc.c +++ b/drivers/pci/controller/pcie-iproc.c @@ -18,7 +18,6 @@ #include #include #include -#include #include #include diff --git a/drivers/pci/controller/pcie-microchip-host.c b/drivers/pci/controller/pcie-microchip-host.c index 7263d175b5adb7338ac77034aa1e5e1dea2856a4..0ebf7015e9afd79c4de63522e1825dede6fdae8a 100644 --- a/drivers/pci/controller/pcie-microchip-host.c +++ b/drivers/pci/controller/pcie-microchip-host.c @@ -9,10 +9,10 @@ #include #include +#include #include #include #include -#include #include #include #include diff --git a/drivers/pci/controller/pcie-mt7621.c b/drivers/pci/controller/pcie-mt7621.c index 4bd1abf26008f7205f9a19b4772de97a1ea16d40..ee7aad09d6277ef121ec806a4674e7f83fdef135 100644 --- a/drivers/pci/controller/pcie-mt7621.c +++ b/drivers/pci/controller/pcie-mt7621.c @@ -466,7 +466,8 @@ static int mt7621_pcie_register_host(struct pci_host_bridge *host) } static const struct soc_device_attribute mt7621_pcie_quirks_match[] = { - { .soc_id = "mt7621", .revision = "E2" } + { .soc_id = "mt7621", .revision = "E2" }, + { /* sentinel */ } }; static int mt7621_pcie_probe(struct platform_device *pdev) diff --git a/drivers/pci/controller/pcie-rockchip-host.c b/drivers/pci/controller/pcie-rockchip-host.c index 7352b5ff8d35981e8b3093f8dbe5e395938db00a..c96c0f454570ce06d8dcde3f5b6426382fc86524 100644 --- a/drivers/pci/controller/pcie-rockchip-host.c +++ b/drivers/pci/controller/pcie-rockchip-host.c @@ -28,7 +28,6 @@ #include #include #include -#include #include #include #include diff --git a/drivers/pci/controller/pcie-xilinx-cpm.c b/drivers/pci/controller/pcie-xilinx-cpm.c index e4ab48041eb6d48c953d2704af53214ef5814241..4a787a941674bc77f54bfd7ed1428527cd7f2337 100644 --- a/drivers/pci/controller/pcie-xilinx-cpm.c +++ b/drivers/pci/controller/pcie-xilinx-cpm.c @@ -16,7 +16,6 @@ #include #include #include -#include #include #include #include diff --git a/drivers/pci/controller/pcie-xilinx-nwl.c b/drivers/pci/controller/pcie-xilinx-nwl.c index 40d070e54ad2e4b8ff2744d8ee00b5f5b85d6e0d..176686bdb15c18e5dc951fdc315d00a1bd2142fc 100644 --- a/drivers/pci/controller/pcie-xilinx-nwl.c +++ b/drivers/pci/controller/pcie-xilinx-nwl.c @@ -17,7 +17,6 @@ #include #include #include -#include #include #include #include @@ -474,15 +473,15 @@ static int nwl_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, for (i = 0; i < nr_irqs; i++) { irq_domain_set_info(domain, virq + i, bit + i, &nwl_irq_chip, - domain->host_data, handle_simple_irq, - NULL, NULL); + domain->host_data, handle_simple_irq, + NULL, NULL); } mutex_unlock(&msi->lock); return 0; } static void nwl_irq_domain_free(struct irq_domain *domain, unsigned int virq, - unsigned int nr_irqs) + unsigned int nr_irqs) { struct irq_data *data = irq_domain_get_irq_data(domain, virq); struct nwl_pcie *pcie = irq_data_get_irq_chip_data(data); @@ -722,7 +721,6 @@ static int nwl_pcie_bridge_init(struct nwl_pcie *pcie) /* Enable all misc interrupts */ nwl_bridge_writel(pcie, MSGF_MISC_SR_MASKALL, MSGF_MISC_MASK); - /* Disable all legacy interrupts */ nwl_bridge_writel(pcie, (u32)~MSGF_LEG_SR_MASKALL, MSGF_LEG_MASK); diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c index e06e9f4fc50f72b40c72a8a7fb1105b5a8fca1bb..769eedeb8802a248d104d206a84e200d5c3ca326 100644 --- a/drivers/pci/controller/vmd.c +++ b/drivers/pci/controller/vmd.c @@ -719,6 +719,7 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features) resource_size_t offset[2] = {0}; resource_size_t membar2_offset = 0x2000; struct pci_bus *child; + struct pci_dev *dev; int ret; /* @@ -859,8 +860,25 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features) pci_scan_child_bus(vmd->bus); vmd_domain_reset(vmd); - list_for_each_entry(child, &vmd->bus->children, node) - pci_reset_bus(child->self); + + /* When Intel VMD is enabled, the OS does not discover the Root Ports + * owned by Intel VMD within the MMCFG space. pci_reset_bus() applies + * a reset to the parent of the PCI device supplied as argument. This + * is why we pass a child device, so the reset can be triggered at + * the Intel bridge level and propagated to all the children in the + * hierarchy. + */ + list_for_each_entry(child, &vmd->bus->children, node) { + if (!list_empty(&child->devices)) { + dev = list_first_entry(&child->devices, + struct pci_dev, bus_list); + if (pci_reset_bus(dev)) + pci_warn(dev, "can't reset device: %d\n", ret); + + break; + } + } + pci_assign_unassigned_bus_resources(vmd->bus); /* @@ -980,6 +998,11 @@ static int vmd_resume(struct device *dev) struct vmd_dev *vmd = pci_get_drvdata(pdev); int err, i; + if (vmd->irq_domain) + vmd_set_msi_remapping(vmd, true); + else + vmd_set_msi_remapping(vmd, false); + for (i = 0; i < vmd->msix_count; i++) { err = devm_request_irq(dev, vmd->irqs[i].virq, vmd_irq, IRQF_NO_THREAD, diff --git a/drivers/pci/doe.c b/drivers/pci/doe.c index e402f05068a5340f6609ab097b71b24f2775e087..66d9ab2886468d02a32eb40353ad5bb24909f3ac 100644 --- a/drivers/pci/doe.c +++ b/drivers/pci/doe.c @@ -29,6 +29,9 @@ #define PCI_DOE_FLAG_CANCEL 0 #define PCI_DOE_FLAG_DEAD 1 +/* Max data object length is 2^18 dwords */ +#define PCI_DOE_MAX_LENGTH (1 << 18) + /** * struct pci_doe_mb - State for a single DOE mailbox * @@ -107,6 +110,7 @@ static int pci_doe_send_req(struct pci_doe_mb *doe_mb, { struct pci_dev *pdev = doe_mb->pdev; int offset = doe_mb->cap_offset; + size_t length; u32 val; int i; @@ -123,15 +127,20 @@ static int pci_doe_send_req(struct pci_doe_mb *doe_mb, if (FIELD_GET(PCI_DOE_STATUS_ERROR, val)) return -EIO; + /* Length is 2 DW of header + length of payload in DW */ + length = 2 + task->request_pl_sz / sizeof(u32); + if (length > PCI_DOE_MAX_LENGTH) + return -EIO; + if (length == PCI_DOE_MAX_LENGTH) + length = 0; + /* Write DOE Header */ val = FIELD_PREP(PCI_DOE_DATA_OBJECT_HEADER_1_VID, task->prot.vid) | FIELD_PREP(PCI_DOE_DATA_OBJECT_HEADER_1_TYPE, task->prot.type); pci_write_config_dword(pdev, offset + PCI_DOE_WRITE, val); - /* Length is 2 DW of header + length of payload in DW */ pci_write_config_dword(pdev, offset + PCI_DOE_WRITE, FIELD_PREP(PCI_DOE_DATA_OBJECT_HEADER_2_LENGTH, - 2 + task->request_pl_sz / - sizeof(u32))); + length)); for (i = 0; i < task->request_pl_sz / sizeof(u32); i++) pci_write_config_dword(pdev, offset + PCI_DOE_WRITE, task->request_pl[i]); @@ -178,7 +187,10 @@ static int pci_doe_recv_resp(struct pci_doe_mb *doe_mb, struct pci_doe_task *tas pci_write_config_dword(pdev, offset + PCI_DOE_READ, 0); length = FIELD_GET(PCI_DOE_DATA_OBJECT_HEADER_2_LENGTH, val); - if (length > SZ_1M || length < 2) + /* A value of 0x0 indicates max data object length */ + if (!length) + length = PCI_DOE_MAX_LENGTH; + if (length < 2) return -EIO; /* First 2 dwords have already been read */ diff --git a/drivers/pci/endpoint/functions/Kconfig b/drivers/pci/endpoint/functions/Kconfig index 295a033ee9a2730eae5c500a4f5eb937f324b716..9fd5608868718df40011f408c9c4d8d9940ab796 100644 --- a/drivers/pci/endpoint/functions/Kconfig +++ b/drivers/pci/endpoint/functions/Kconfig @@ -27,13 +27,13 @@ config PCI_EPF_NTB If in doubt, say "N" to disable Endpoint NTB driver. config PCI_EPF_VNTB - tristate "PCI Endpoint NTB driver" - depends on PCI_ENDPOINT - depends on NTB - select CONFIGFS_FS - help - Select this configuration option to enable the Non-Transparent - Bridge (NTB) driver for PCIe Endpoint. NTB driver implements NTB - between PCI Root Port and PCIe Endpoint. + tristate "PCI Endpoint NTB driver" + depends on PCI_ENDPOINT + depends on NTB + select CONFIGFS_FS + help + Select this configuration option to enable the Non-Transparent + Bridge (NTB) driver for PCIe Endpoint. NTB driver implements NTB + between PCI Root Port and PCIe Endpoint. - If in doubt, say "N" to disable Endpoint NTB driver. + If in doubt, say "N" to disable Endpoint NTB driver. diff --git a/drivers/pci/endpoint/functions/pci-epf-test.c b/drivers/pci/endpoint/functions/pci-epf-test.c index 36b1801a061b7c007839f1dae35cdb3ffe51c358..55283d2379a6a42cd151213cd5eef892ed7406d4 100644 --- a/drivers/pci/endpoint/functions/pci-epf-test.c +++ b/drivers/pci/endpoint/functions/pci-epf-test.c @@ -979,7 +979,7 @@ static int pci_epf_test_bind(struct pci_epf *epf) if (ret) epf_test->dma_supported = false; - if (linkup_notifier) { + if (linkup_notifier || core_init_notifier) { epf->nb.notifier_call = pci_epf_test_notifier; pci_epc_register_notifier(epc, &epf->nb); } else { diff --git a/drivers/pci/endpoint/functions/pci-epf-vntb.c b/drivers/pci/endpoint/functions/pci-epf-vntb.c index 0ea85e1d292ece294055d3dce6f064cfe505823c..04698e7995a54c7f22e7c597d9b93f21b4cff7ea 100644 --- a/drivers/pci/endpoint/functions/pci-epf-vntb.c +++ b/drivers/pci/endpoint/functions/pci-epf-vntb.c @@ -11,7 +11,7 @@ * Author: Kishon Vijay Abraham I */ -/** +/* * +------------+ +---------------------------------------+ * | | | | * +------------+ | +--------------+ @@ -99,20 +99,20 @@ enum epf_ntb_bar { * NTB Driver NTB Driver */ struct epf_ntb_ctrl { - u32 command; - u32 argument; - u16 command_status; - u16 link_status; - u32 topology; - u64 addr; - u64 size; - u32 num_mws; - u32 reserved; - u32 spad_offset; - u32 spad_count; - u32 db_entry_size; - u32 db_data[MAX_DB_COUNT]; - u32 db_offset[MAX_DB_COUNT]; + u32 command; + u32 argument; + u16 command_status; + u16 link_status; + u32 topology; + u64 addr; + u64 size; + u32 num_mws; + u32 reserved; + u32 spad_offset; + u32 spad_count; + u32 db_entry_size; + u32 db_data[MAX_DB_COUNT]; + u32 db_offset[MAX_DB_COUNT]; } __packed; struct epf_ntb { @@ -136,8 +136,7 @@ struct epf_ntb { struct epf_ntb_ctrl *reg; - phys_addr_t epf_db_phy; - void __iomem *epf_db; + u32 *epf_db; phys_addr_t vpci_mw_phy[MAX_MW]; void __iomem *vpci_mw_addr[MAX_MW]; @@ -156,12 +155,14 @@ static struct pci_epf_header epf_ntb_header = { }; /** - * epf_ntb_link_up() - Raise link_up interrupt to Virtual Host + * epf_ntb_link_up() - Raise link_up interrupt to Virtual Host (VHOST) * @ntb: NTB device that facilitates communication between HOST and VHOST * @link_up: true or false indicating Link is UP or Down * * Once NTB function in HOST invoke ntb_link_enable(), - * this NTB function driver will trigger a link event to vhost. + * this NTB function driver will trigger a link event to VHOST. + * + * Returns: Zero for success, or an error code in case of failure */ static int epf_ntb_link_up(struct epf_ntb *ntb, bool link_up) { @@ -175,9 +176,9 @@ static int epf_ntb_link_up(struct epf_ntb *ntb, bool link_up) } /** - * epf_ntb_configure_mw() - Configure the Outbound Address Space for vhost - * to access the memory window of host - * @ntb: NTB device that facilitates communication between host and vhost + * epf_ntb_configure_mw() - Configure the Outbound Address Space for VHOST + * to access the memory window of HOST + * @ntb: NTB device that facilitates communication between HOST and VHOST * @mw: Index of the memory window (either 0, 1, 2 or 3) * * EP Outbound Window @@ -194,7 +195,9 @@ static int epf_ntb_link_up(struct epf_ntb *ntb, bool link_up) * | | | | * | | | | * +--------+ +-----------+ - * VHost PCI EP + * VHOST PCI EP + * + * Returns: Zero for success, or an error code in case of failure */ static int epf_ntb_configure_mw(struct epf_ntb *ntb, u32 mw) { @@ -219,7 +222,7 @@ static int epf_ntb_configure_mw(struct epf_ntb *ntb, u32 mw) /** * epf_ntb_teardown_mw() - Teardown the configured OB ATU - * @ntb: NTB device that facilitates communication between HOST and vHOST + * @ntb: NTB device that facilitates communication between HOST and VHOST * @mw: Index of the memory window (either 0, 1, 2 or 3) * * Teardown the configured OB ATU configured in epf_ntb_configure_mw() using @@ -234,12 +237,12 @@ static void epf_ntb_teardown_mw(struct epf_ntb *ntb, u32 mw) } /** - * epf_ntb_cmd_handler() - Handle commands provided by the NTB Host + * epf_ntb_cmd_handler() - Handle commands provided by the NTB HOST * @work: work_struct for the epf_ntb_epc * * Workqueue function that gets invoked for the two epf_ntb_epc * periodically (once every 5ms) to see if it has received any commands - * from NTB host. The host can send commands to configure doorbell or + * from NTB HOST. The HOST can send commands to configure doorbell or * configure memory window or to update link status. */ static void epf_ntb_cmd_handler(struct work_struct *work) @@ -254,12 +257,10 @@ static void epf_ntb_cmd_handler(struct work_struct *work) ntb = container_of(work, struct epf_ntb, cmd_handler.work); for (i = 1; i < ntb->db_count; i++) { - if (readl(ntb->epf_db + i * 4)) { - if (readl(ntb->epf_db + i * 4)) - ntb->db |= 1 << (i - 1); - + if (ntb->epf_db[i]) { + ntb->db |= 1 << (i - 1); ntb_db_event(&ntb->ntb, i); - writel(0, ntb->epf_db + i * 4); + ntb->epf_db[i] = 0; } } @@ -321,8 +322,8 @@ reset_handler: /** * epf_ntb_config_sspad_bar_clear() - Clear Config + Self scratchpad BAR - * @ntb_epc: EPC associated with one of the HOST which holds peer's outbound - * address. + * @ntb: EPC associated with one of the HOST which holds peer's outbound + * address. * * Clear BAR0 of EP CONTROLLER 1 which contains the HOST1's config and * self scratchpad region (removes inbound ATU configuration). While BAR0 is @@ -331,8 +332,10 @@ reset_handler: * used for self scratchpad from epf_ntb_bar[BAR_CONFIG]. * * Please note the self scratchpad region and config region is combined to - * a single region and mapped using the same BAR. Also note HOST2's peer - * scratchpad is HOST1's self scratchpad. + * a single region and mapped using the same BAR. Also note VHOST's peer + * scratchpad is HOST's self scratchpad. + * + * Returns: void */ static void epf_ntb_config_sspad_bar_clear(struct epf_ntb *ntb) { @@ -347,13 +350,15 @@ static void epf_ntb_config_sspad_bar_clear(struct epf_ntb *ntb) /** * epf_ntb_config_sspad_bar_set() - Set Config + Self scratchpad BAR - * @ntb: NTB device that facilitates communication between HOST and vHOST + * @ntb: NTB device that facilitates communication between HOST and VHOST * - * Map BAR0 of EP CONTROLLER 1 which contains the HOST1's config and + * Map BAR0 of EP CONTROLLER which contains the VHOST's config and * self scratchpad region. * * Please note the self scratchpad region and config region is combined to * a single region and mapped using the same BAR. + * + * Returns: Zero for success, or an error code in case of failure */ static int epf_ntb_config_sspad_bar_set(struct epf_ntb *ntb) { @@ -380,7 +385,7 @@ static int epf_ntb_config_sspad_bar_set(struct epf_ntb *ntb) /** * epf_ntb_config_spad_bar_free() - Free the physical memory associated with * config + scratchpad region - * @ntb: NTB device that facilitates communication between HOST and vHOST + * @ntb: NTB device that facilitates communication between HOST and VHOST */ static void epf_ntb_config_spad_bar_free(struct epf_ntb *ntb) { @@ -393,11 +398,13 @@ static void epf_ntb_config_spad_bar_free(struct epf_ntb *ntb) /** * epf_ntb_config_spad_bar_alloc() - Allocate memory for config + scratchpad * region - * @ntb: NTB device that facilitates communication between HOST1 and HOST2 + * @ntb: NTB device that facilitates communication between HOST and VHOST * * Allocate the Local Memory mentioned in the above diagram. The size of * CONFIG REGION is sizeof(struct epf_ntb_ctrl) and size of SCRATCHPAD REGION * is obtained from "spad-count" configfs entry. + * + * Returns: Zero for success, or an error code in case of failure */ static int epf_ntb_config_spad_bar_alloc(struct epf_ntb *ntb) { @@ -424,7 +431,7 @@ static int epf_ntb_config_spad_bar_alloc(struct epf_ntb *ntb) spad_count = ntb->spad_count; ctrl_size = sizeof(struct epf_ntb_ctrl); - spad_size = 2 * spad_count * 4; + spad_size = 2 * spad_count * sizeof(u32); if (!align) { ctrl_size = roundup_pow_of_two(ctrl_size); @@ -454,7 +461,7 @@ static int epf_ntb_config_spad_bar_alloc(struct epf_ntb *ntb) ctrl->num_mws = ntb->num_mws; ntb->spad_size = spad_size; - ctrl->db_entry_size = 4; + ctrl->db_entry_size = sizeof(u32); for (i = 0; i < ntb->db_count; i++) { ntb->reg->db_data[i] = 1 + i; @@ -465,11 +472,13 @@ static int epf_ntb_config_spad_bar_alloc(struct epf_ntb *ntb) } /** - * epf_ntb_configure_interrupt() - Configure MSI/MSI-X capaiblity - * @ntb: NTB device that facilitates communication between HOST and vHOST + * epf_ntb_configure_interrupt() - Configure MSI/MSI-X capability + * @ntb: NTB device that facilitates communication between HOST and VHOST * * Configure MSI/MSI-X capability for each interface with number of * interrupts equal to "db_count" configfs entry. + * + * Returns: Zero for success, or an error code in case of failure */ static int epf_ntb_configure_interrupt(struct epf_ntb *ntb) { @@ -511,7 +520,9 @@ static int epf_ntb_configure_interrupt(struct epf_ntb *ntb) /** * epf_ntb_db_bar_init() - Configure Doorbell window BARs - * @ntb: NTB device that facilitates communication between HOST and vHOST + * @ntb: NTB device that facilitates communication between HOST and VHOST + * + * Returns: Zero for success, or an error code in case of failure */ static int epf_ntb_db_bar_init(struct epf_ntb *ntb) { @@ -522,7 +533,7 @@ static int epf_ntb_db_bar_init(struct epf_ntb *ntb) struct pci_epf_bar *epf_bar; void __iomem *mw_addr; enum pci_barno barno; - size_t size = 4 * ntb->db_count; + size_t size = sizeof(u32) * ntb->db_count; epc_features = pci_epc_get_features(ntb->epf->epc, ntb->epf->func_no, @@ -557,7 +568,7 @@ static int epf_ntb_db_bar_init(struct epf_ntb *ntb) return ret; err_alloc_peer_mem: - pci_epc_mem_free_addr(ntb->epf->epc, epf_bar->phys_addr, mw_addr, epf_bar->size); + pci_epf_free_space(ntb->epf, mw_addr, barno, 0); return -1; } @@ -566,7 +577,7 @@ static void epf_ntb_mw_bar_clear(struct epf_ntb *ntb, int num_mws); /** * epf_ntb_db_bar_clear() - Clear doorbell BAR and free memory * allocated in peer's outbound address space - * @ntb: NTB device that facilitates communication between HOST and vHOST + * @ntb: NTB device that facilitates communication between HOST and VHOST */ static void epf_ntb_db_bar_clear(struct epf_ntb *ntb) { @@ -582,8 +593,9 @@ static void epf_ntb_db_bar_clear(struct epf_ntb *ntb) /** * epf_ntb_mw_bar_init() - Configure Memory window BARs - * @ntb: NTB device that facilitates communication between HOST and vHOST + * @ntb: NTB device that facilitates communication between HOST and VHOST * + * Returns: Zero for success, or an error code in case of failure */ static int epf_ntb_mw_bar_init(struct epf_ntb *ntb) { @@ -639,7 +651,7 @@ err_alloc_mem: /** * epf_ntb_mw_bar_clear() - Clear Memory window BARs - * @ntb: NTB device that facilitates communication between HOST and vHOST + * @ntb: NTB device that facilitates communication between HOST and VHOST */ static void epf_ntb_mw_bar_clear(struct epf_ntb *ntb, int num_mws) { @@ -662,7 +674,7 @@ static void epf_ntb_mw_bar_clear(struct epf_ntb *ntb, int num_mws) /** * epf_ntb_epc_destroy() - Cleanup NTB EPC interface - * @ntb: NTB device that facilitates communication between HOST and vHOST + * @ntb: NTB device that facilitates communication between HOST and VHOST * * Wrapper for epf_ntb_epc_destroy_interface() to cleanup all the NTB interfaces */ @@ -675,7 +687,9 @@ static void epf_ntb_epc_destroy(struct epf_ntb *ntb) /** * epf_ntb_init_epc_bar() - Identify BARs to be used for each of the NTB * constructs (scratchpad region, doorbell, memorywindow) - * @ntb: NTB device that facilitates communication between HOST and vHOST + * @ntb: NTB device that facilitates communication between HOST and VHOST + * + * Returns: Zero for success, or an error code in case of failure */ static int epf_ntb_init_epc_bar(struct epf_ntb *ntb) { @@ -716,11 +730,13 @@ static int epf_ntb_init_epc_bar(struct epf_ntb *ntb) /** * epf_ntb_epc_init() - Initialize NTB interface - * @ntb: NTB device that facilitates communication between HOST and vHOST2 + * @ntb: NTB device that facilitates communication between HOST and VHOST * * Wrapper to initialize a particular EPC interface and start the workqueue - * to check for commands from host. This function will write to the + * to check for commands from HOST. This function will write to the * EP controller HW for configuring it. + * + * Returns: Zero for success, or an error code in case of failure */ static int epf_ntb_epc_init(struct epf_ntb *ntb) { @@ -787,7 +803,7 @@ err_config_interrupt: /** * epf_ntb_epc_cleanup() - Cleanup all NTB interfaces - * @ntb: NTB device that facilitates communication between HOST1 and HOST2 + * @ntb: NTB device that facilitates communication between HOST and VHOST * * Wrapper to cleanup all NTB interfaces. */ @@ -951,6 +967,8 @@ static const struct config_item_type ntb_group_type = { * * Add configfs directory specific to NTB. This directory will hold * NTB specific properties like db_count, spad_count, num_mws etc., + * + * Returns: Pointer to config_group */ static struct config_group *epf_ntb_add_cfs(struct pci_epf *epf, struct config_group *group) @@ -1101,11 +1119,11 @@ static int vntb_epf_link_enable(struct ntb_dev *ntb, static u32 vntb_epf_spad_read(struct ntb_dev *ndev, int idx) { struct epf_ntb *ntb = ntb_ndev(ndev); - int off = ntb->reg->spad_offset, ct = ntb->reg->spad_count * 4; + int off = ntb->reg->spad_offset, ct = ntb->reg->spad_count * sizeof(u32); u32 val; - void __iomem *base = ntb->reg; + void __iomem *base = (void __iomem *)ntb->reg; - val = readl(base + off + ct + idx * 4); + val = readl(base + off + ct + idx * sizeof(u32)); return val; } @@ -1113,10 +1131,10 @@ static int vntb_epf_spad_write(struct ntb_dev *ndev, int idx, u32 val) { struct epf_ntb *ntb = ntb_ndev(ndev); struct epf_ntb_ctrl *ctrl = ntb->reg; - int off = ctrl->spad_offset, ct = ctrl->spad_count * 4; - void __iomem *base = ntb->reg; + int off = ctrl->spad_offset, ct = ctrl->spad_count * sizeof(u32); + void __iomem *base = (void __iomem *)ntb->reg; - writel(val, base + off + ct + idx * 4); + writel(val, base + off + ct + idx * sizeof(u32)); return 0; } @@ -1125,10 +1143,10 @@ static u32 vntb_epf_peer_spad_read(struct ntb_dev *ndev, int pidx, int idx) struct epf_ntb *ntb = ntb_ndev(ndev); struct epf_ntb_ctrl *ctrl = ntb->reg; int off = ctrl->spad_offset; - void __iomem *base = ntb->reg; + void __iomem *base = (void __iomem *)ntb->reg; u32 val; - val = readl(base + off + idx * 4); + val = readl(base + off + idx * sizeof(u32)); return val; } @@ -1137,9 +1155,9 @@ static int vntb_epf_peer_spad_write(struct ntb_dev *ndev, int pidx, int idx, u32 struct epf_ntb *ntb = ntb_ndev(ndev); struct epf_ntb_ctrl *ctrl = ntb->reg; int off = ctrl->spad_offset; - void __iomem *base = ntb->reg; + void __iomem *base = (void __iomem *)ntb->reg; - writel(val, base + off + idx * 4); + writel(val, base + off + idx * sizeof(u32)); return 0; } @@ -1292,6 +1310,8 @@ static struct pci_driver vntb_pci_driver = { * Invoked when a primary interface or secondary interface is bound to EPC * device. This function will succeed only when EPC is bound to both the * interfaces. + * + * Returns: Zero for success, or an error code in case of failure */ static int epf_ntb_bind(struct pci_epf *epf) { @@ -1377,6 +1397,8 @@ static struct pci_epf_ops epf_ntb_ops = { * * Probe NTB function driver when endpoint function bus detects a NTB * endpoint function. + * + * Returns: Zero for success, or an error code in case of failure */ static int epf_ntb_probe(struct pci_epf *epf) { diff --git a/drivers/pci/endpoint/pci-epc-core.c b/drivers/pci/endpoint/pci-epc-core.c index 3bc9273d0a08293ca47d564b795be46326f31ea7..2542196e8c3d95ab5b5a92dbec50b3cb63054c09 100644 --- a/drivers/pci/endpoint/pci-epc-core.c +++ b/drivers/pci/endpoint/pci-epc-core.c @@ -724,7 +724,6 @@ void pci_epc_destroy(struct pci_epc *epc) { pci_ep_cfs_remove_epc_group(epc->group); device_unregister(&epc->dev); - kfree(epc); } EXPORT_SYMBOL_GPL(pci_epc_destroy); @@ -746,6 +745,11 @@ void devm_pci_epc_destroy(struct device *dev, struct pci_epc *epc) } EXPORT_SYMBOL_GPL(devm_pci_epc_destroy); +static void pci_epc_release(struct device *dev) +{ + kfree(to_pci_epc(dev)); +} + /** * __pci_epc_create() - create a new endpoint controller (EPC) device * @dev: device that is creating the new EPC @@ -779,6 +783,7 @@ __pci_epc_create(struct device *dev, const struct pci_epc_ops *ops, device_initialize(&epc->dev); epc->dev.class = pci_epc_class; epc->dev.parent = dev; + epc->dev.release = pci_epc_release; epc->ops = ops; ret = dev_set_name(&epc->dev, "%s", dev_name(dev)); diff --git a/drivers/pci/hotplug/Kconfig b/drivers/pci/hotplug/Kconfig index 840a84bb5ee26e54ad6ec4abab52de5e43053715..48113b210cf9384fff963de10799c88366bcdb36 100644 --- a/drivers/pci/hotplug/Kconfig +++ b/drivers/pci/hotplug/Kconfig @@ -6,11 +6,14 @@ menuconfig HOTPLUG_PCI bool "Support for PCI Hotplug" depends on PCI && SYSFS + default y if USB4 help Say Y here if you have a motherboard with a PCI Hotplug controller. This allows you to add and remove PCI cards while the machine is powered up and running. + Thunderbolt/USB4 PCIe tunneling depends on native PCIe hotplug. + When in doubt, say N. if HOTPLUG_PCI diff --git a/drivers/pci/hotplug/TODO b/drivers/pci/hotplug/TODO index 88f217c82b4ffd1ae4f9d8671ccce372fcbadba7..fdb8dd6ea24dc8bb6afdfe1c767d3b36f6d52080 100644 --- a/drivers/pci/hotplug/TODO +++ b/drivers/pci/hotplug/TODO @@ -58,9 +58,6 @@ shpchp: pciehp with commit 82a9e79ef132 ("PCI: pciehp: remove hpc_ops"). Clarify if there was a specific reason not to apply the same change to shpchp. -* The ->get_mode1_ECC_cap callback in shpchp_hpc_ops is never invoked. - Why was it introduced? Can it be removed? - * The hardirq handler shpc_isr() queues events on a workqueue. It can be simplified by converting it to threaded IRQ handling. Use pciehp as a template. diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c index 6efa3d8db9a561907b4a7bfc7d1bc334a7fc7044..5b1f271c6034be045aa446f7e5aacb2d6e3f2e2a 100644 --- a/drivers/pci/hotplug/acpiphp_glue.c +++ b/drivers/pci/hotplug/acpiphp_glue.c @@ -411,6 +411,14 @@ static void check_hotplug_bridge(struct acpiphp_slot *slot, struct pci_dev *dev) if (dev->is_hotplug_bridge) return; + /* + * In the PCIe case, only Root Ports and Downstream Ports are capable of + * accommodating hotplug devices, so avoid marking Upstream Ports as + * "hotplug bridges". + */ + if (pci_is_pcie(dev) && pci_pcie_type(dev) == PCI_EXP_TYPE_UPSTREAM) + return; + list_for_each_entry(func, &slot->funcs, sibling) { if (PCI_FUNC(dev->devfn) == func->function) { dev->is_hotplug_bridge = 1; diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c index 040ae076ec0e98b0f4731ec0e275514dbd75e2c8..10e9670eea0b06fafb82e945d23d0cbe6e4a9b31 100644 --- a/drivers/pci/hotplug/pciehp_hpc.c +++ b/drivers/pci/hotplug/pciehp_hpc.c @@ -811,7 +811,9 @@ static void pcie_enable_notification(struct controller *ctrl) else cmd |= PCI_EXP_SLTCTL_PDCE; if (!pciehp_poll_mode) - cmd |= PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_CCIE; + cmd |= PCI_EXP_SLTCTL_HPIE; + if (!pciehp_poll_mode && !NO_CMD_CMPL(ctrl)) + cmd |= PCI_EXP_SLTCTL_CCIE; mask = (PCI_EXP_SLTCTL_PDCE | PCI_EXP_SLTCTL_ABPE | PCI_EXP_SLTCTL_PFDE | diff --git a/drivers/pci/hotplug/shpchp.h b/drivers/pci/hotplug/shpchp.h index 6e85885b554c5da58d7b7e732488407fa113b4d2..3a97f455336e6729ec6a448b75a8f768789250ec 100644 --- a/drivers/pci/hotplug/shpchp.h +++ b/drivers/pci/hotplug/shpchp.h @@ -311,7 +311,6 @@ struct hpc_ops { int (*get_latch_status)(struct slot *slot, u8 *status); int (*get_adapter_status)(struct slot *slot, u8 *status); int (*get_adapter_speed)(struct slot *slot, enum pci_bus_speed *speed); - int (*get_mode1_ECC_cap)(struct slot *slot, u8 *mode); int (*get_prog_int)(struct slot *slot, u8 *prog_int); int (*query_power_fault)(struct slot *slot); void (*green_led_on)(struct slot *slot); diff --git a/drivers/pci/hotplug/shpchp_hpc.c b/drivers/pci/hotplug/shpchp_hpc.c index bd7557ca49108de988fbfa3451ca85393cd03b42..48e4daefc44af42c8bb58339df3d4c1f35f844b0 100644 --- a/drivers/pci/hotplug/shpchp_hpc.c +++ b/drivers/pci/hotplug/shpchp_hpc.c @@ -489,23 +489,6 @@ static int hpc_get_adapter_speed(struct slot *slot, enum pci_bus_speed *value) return retval; } -static int hpc_get_mode1_ECC_cap(struct slot *slot, u8 *mode) -{ - int retval = 0; - struct controller *ctrl = slot->ctrl; - u16 sec_bus_status = shpc_readw(ctrl, SEC_BUS_CONFIG); - u8 pi = shpc_readb(ctrl, PROG_INTERFACE); - - if (pi == 2) { - *mode = (sec_bus_status & 0x0100) >> 8; - } else { - retval = -1; - } - - ctrl_dbg(ctrl, "Mode 1 ECC cap = %d\n", *mode); - return retval; -} - static int hpc_query_power_fault(struct slot *slot) { struct controller *ctrl = slot->ctrl; @@ -900,7 +883,6 @@ static const struct hpc_ops shpchp_hpc_ops = { .get_adapter_status = hpc_get_adapter_status, .get_adapter_speed = hpc_get_adapter_speed, - .get_mode1_ECC_cap = hpc_get_mode1_ECC_cap, .get_prog_int = hpc_get_prog_int, .query_power_fault = hpc_query_power_fault, diff --git a/drivers/pci/irq.c b/drivers/pci/irq.c index 12ecd0aaa28d6dbd331f2da7c8aaae4c3ffe22da..0050e8f6814ed655ee06a87252ed468d22963722 100644 --- a/drivers/pci/irq.c +++ b/drivers/pci/irq.c @@ -44,6 +44,8 @@ int pci_request_irq(struct pci_dev *dev, unsigned int nr, irq_handler_t handler, va_start(ap, fmt); devname = kvasprintf(GFP_KERNEL, fmt, ap); va_end(ap); + if (!devname) + return -ENOMEM; ret = request_threaded_irq(pci_irq_vector(dev, nr), handler, thread_fn, irqflags, devname, dev_id); diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c index a46fec776ad77b12220358ff2269b9214bd4c924..068d6745bf98cb5f8dc38b46c28dacfa26944a51 100644 --- a/drivers/pci/pci-acpi.c +++ b/drivers/pci/pci-acpi.c @@ -67,7 +67,7 @@ static acpi_status acpi_match_rc(acpi_handle handle, u32 lvl, void *context, unsigned long long uid; acpi_status status; - status = acpi_evaluate_integer(handle, "_UID", NULL, &uid); + status = acpi_evaluate_integer(handle, METHOD_NAME__UID, NULL, &uid); if (ACPI_FAILURE(status) || uid != *segment) return AE_CTRL_DEPTH; diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c index 107d77f3c846708f0eac6c857b757502e2cc6de0..a2ceeacc33eb6e50bb5c6fd27a9019f2bb002ee2 100644 --- a/drivers/pci/pci-driver.c +++ b/drivers/pci/pci-driver.c @@ -646,7 +646,7 @@ static int pci_legacy_suspend(struct device *dev, pm_message_t state) return 0; } -static int pci_legacy_suspend_late(struct device *dev, pm_message_t state) +static int pci_legacy_suspend_late(struct device *dev) { struct pci_dev *pci_dev = to_pci_dev(dev); @@ -848,7 +848,7 @@ static int pci_pm_suspend_noirq(struct device *dev) return 0; if (pci_has_legacy_pm_support(pci_dev)) - return pci_legacy_suspend_late(dev, PMSG_SUSPEND); + return pci_legacy_suspend_late(dev); if (!pm) { pci_save_state(pci_dev); @@ -1060,7 +1060,7 @@ static int pci_pm_freeze_noirq(struct device *dev) const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; if (pci_has_legacy_pm_support(pci_dev)) - return pci_legacy_suspend_late(dev, PMSG_FREEZE); + return pci_legacy_suspend_late(dev); if (pm && pm->freeze_noirq) { int error; @@ -1179,7 +1179,7 @@ static int pci_pm_poweroff_noirq(struct device *dev) return 0; if (pci_has_legacy_pm_support(pci_dev)) - return pci_legacy_suspend_late(dev, PMSG_HIBERNATE); + return pci_legacy_suspend_late(dev); if (!pm) { pci_fixup_device(pci_fixup_suspend_late, pci_dev); diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c index 6c250eb214e80f4962c4298e4918507d9a2bc11a..dd0d9d9bc5097e567c7eeb50b101b0822d0b2589 100644 --- a/drivers/pci/pci-sysfs.c +++ b/drivers/pci/pci-sysfs.c @@ -1182,11 +1182,9 @@ static int pci_create_attr(struct pci_dev *pdev, int num, int write_combine) sysfs_bin_attr_init(res_attr); if (write_combine) { - pdev->res_attr_wc[num] = res_attr; sprintf(res_attr_name, "resource%d_wc", num); res_attr->mmap = pci_mmap_resource_wc; } else { - pdev->res_attr[num] = res_attr; sprintf(res_attr_name, "resource%d", num); if (pci_resource_flags(pdev, num) & IORESOURCE_IO) { res_attr->read = pci_read_resource_io; @@ -1204,10 +1202,17 @@ static int pci_create_attr(struct pci_dev *pdev, int num, int write_combine) res_attr->size = pci_resource_len(pdev, num); res_attr->private = (void *)(unsigned long)num; retval = sysfs_create_bin_file(&pdev->dev.kobj, res_attr); - if (retval) + if (retval) { kfree(res_attr); + return retval; + } + + if (write_combine) + pdev->res_attr_wc[num] = res_attr; + else + pdev->res_attr[num] = res_attr; - return retval; + return 0; } /** diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 2127aba3550b5d611970bc8d3dccd7d5a60fa301..fba95486caaf2fad52bea9e7176b7ec21c77121d 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -6447,6 +6447,8 @@ bool pci_device_is_present(struct pci_dev *pdev) { u32 v; + /* Check PF if pdev is a VF, since VF Vendor/Device IDs are 0xffff */ + pdev = pci_physfn(pdev); if (pci_dev_is_disconnected(pdev)) return false; return pci_bus_read_dev_vendor_id(pdev->bus, pdev->devfn, &v, 0); @@ -6743,60 +6745,70 @@ static void pci_no_domains(void) } #ifdef CONFIG_PCI_DOMAINS_GENERIC -static atomic_t __domain_nr = ATOMIC_INIT(-1); +static DEFINE_IDA(pci_domain_nr_static_ida); +static DEFINE_IDA(pci_domain_nr_dynamic_ida); -static int pci_get_new_domain_nr(void) +static void of_pci_reserve_static_domain_nr(void) { - return atomic_inc_return(&__domain_nr); + struct device_node *np; + int domain_nr; + + for_each_node_by_type(np, "pci") { + domain_nr = of_get_pci_domain_nr(np); + if (domain_nr < 0) + continue; + /* + * Permanently allocate domain_nr in dynamic_ida + * to prevent it from dynamic allocation. + */ + ida_alloc_range(&pci_domain_nr_dynamic_ida, + domain_nr, domain_nr, GFP_KERNEL); + } } static int of_pci_bus_find_domain_nr(struct device *parent) { - static int use_dt_domains = -1; - int domain = -1; + static bool static_domains_reserved = false; + int domain_nr; - if (parent) - domain = of_get_pci_domain_nr(parent->of_node); + /* On the first call scan device tree for static allocations. */ + if (!static_domains_reserved) { + of_pci_reserve_static_domain_nr(); + static_domains_reserved = true; + } + + if (parent) { + /* + * If domain is in DT, allocate it in static IDA. This + * prevents duplicate static allocations in case of errors + * in DT. + */ + domain_nr = of_get_pci_domain_nr(parent->of_node); + if (domain_nr >= 0) + return ida_alloc_range(&pci_domain_nr_static_ida, + domain_nr, domain_nr, + GFP_KERNEL); + } /* - * Check DT domain and use_dt_domains values. - * - * If DT domain property is valid (domain >= 0) and - * use_dt_domains != 0, the DT assignment is valid since this means - * we have not previously allocated a domain number by using - * pci_get_new_domain_nr(); we should also update use_dt_domains to - * 1, to indicate that we have just assigned a domain number from - * DT. - * - * If DT domain property value is not valid (ie domain < 0), and we - * have not previously assigned a domain number from DT - * (use_dt_domains != 1) we should assign a domain number by - * using the: - * - * pci_get_new_domain_nr() - * - * API and update the use_dt_domains value to keep track of method we - * are using to assign domain numbers (use_dt_domains = 0). - * - * All other combinations imply we have a platform that is trying - * to mix domain numbers obtained from DT and pci_get_new_domain_nr(), - * which is a recipe for domain mishandling and it is prevented by - * invalidating the domain value (domain = -1) and printing a - * corresponding error. + * If domain was not specified in DT, choose a free ID from dynamic + * allocations. All domain numbers from DT are permanently in + * dynamic allocations to prevent assigning them to other DT nodes + * without static domain. */ - if (domain >= 0 && use_dt_domains) { - use_dt_domains = 1; - } else if (domain < 0 && use_dt_domains != 1) { - use_dt_domains = 0; - domain = pci_get_new_domain_nr(); - } else { - if (parent) - pr_err("Node %pOF has ", parent->of_node); - pr_err("Inconsistent \"linux,pci-domain\" property in DT\n"); - domain = -1; - } + return ida_alloc(&pci_domain_nr_dynamic_ida, GFP_KERNEL); +} - return domain; +static void of_pci_bus_release_domain_nr(struct pci_bus *bus, struct device *parent) +{ + if (bus->domain_nr < 0) + return; + + /* Release domain from IDA where it was allocated. */ + if (of_get_pci_domain_nr(parent->of_node) == bus->domain_nr) + ida_free(&pci_domain_nr_static_ida, bus->domain_nr); + else + ida_free(&pci_domain_nr_dynamic_ida, bus->domain_nr); } int pci_bus_find_domain_nr(struct pci_bus *bus, struct device *parent) @@ -6804,6 +6816,13 @@ int pci_bus_find_domain_nr(struct pci_bus *bus, struct device *parent) return acpi_disabled ? of_pci_bus_find_domain_nr(parent) : acpi_pci_bus_find_domain_nr(bus); } + +void pci_bus_release_domain_nr(struct pci_bus *bus, struct device *parent) +{ + if (!acpi_disabled) + return; + of_pci_bus_release_domain_nr(bus, parent); +} #endif /** diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h index b1ebb7ab8805170d224b15b292be52b092090457..9ed3b5550043267725f963d12a5dc03a9b469c03 100644 --- a/drivers/pci/pci.h +++ b/drivers/pci/pci.h @@ -15,6 +15,7 @@ extern const unsigned char pcie_link_speed[]; extern bool pci_early_dump; bool pcie_cap_has_lnkctl(const struct pci_dev *dev); +bool pcie_cap_has_lnkctl2(const struct pci_dev *dev); bool pcie_cap_has_rtctl(const struct pci_dev *dev); /* Functions internal to the PCI core code */ diff --git a/drivers/pci/pcie/Kconfig b/drivers/pci/pcie/Kconfig index 788ac8df3f9dbf9891ceb612af909e7b07dc7805..228652a59f27e0c29ecef7fb337596653dd9e0b4 100644 --- a/drivers/pci/pcie/Kconfig +++ b/drivers/pci/pcie/Kconfig @@ -4,6 +4,7 @@ # config PCIEPORTBUS bool "PCI Express Port Bus support" + default y if USB4 help This enables PCI Express Port Bus support. Users can then enable support for Native Hot-Plug, Advanced Error Reporting, Power @@ -15,9 +16,12 @@ config PCIEPORTBUS config HOTPLUG_PCI_PCIE bool "PCI Express Hotplug driver" depends on HOTPLUG_PCI && PCIEPORTBUS + default y if USB4 help - Say Y here if you have a motherboard that supports PCI Express Native - Hotplug + Say Y here if you have a motherboard that supports PCIe native + hotplug. + + Thunderbolt/USB4 PCIe tunneling depends on native PCIe hotplug. When in doubt, say N. diff --git a/drivers/pci/pcie/Makefile b/drivers/pci/pcie/Makefile index 5783a2f79e6a2eda315003f6f5cfe1ba510b585f..8de4ed5f98f145065b6c2678f7e09dd184d09a24 100644 --- a/drivers/pci/pcie/Makefile +++ b/drivers/pci/pcie/Makefile @@ -2,7 +2,7 @@ # # Makefile for PCI Express features and port driver -pcieportdrv-y := portdrv_core.o portdrv_pci.o rcec.o +pcieportdrv-y := portdrv.o rcec.o obj-$(CONFIG_PCIEPORTBUS) += pcieportdrv.o diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv.c similarity index 68% rename from drivers/pci/pcie/portdrv_core.c rename to drivers/pci/pcie/portdrv.c index 1ac7fec47d6fb9452641aed61abb95024dfe47c8..2cc2e60bcb3962bd38f5e1c28dabe1854d36a2e5 100644 --- a/drivers/pci/pcie/portdrv_core.c +++ b/drivers/pci/pcie/portdrv.c @@ -1,11 +1,13 @@ // SPDX-License-Identifier: GPL-2.0 /* - * Purpose: PCI Express Port Bus Driver's Core Functions + * Purpose: PCI Express Port Bus Driver * * Copyright (C) 2004 Intel * Copyright (C) Tom Long Nguyen (tom.l.nguyen@intel.com) */ +#include +#include #include #include #include @@ -19,6 +21,15 @@ #include "../pci.h" #include "portdrv.h" +/* + * The PCIe Capability Interrupt Message Number (PCIe r3.1, sec 7.8.2) must + * be one of the first 32 MSI-X entries. Per PCI r3.0, sec 6.8.3.1, MSI + * supports a maximum of 32 vectors per function. + */ +#define PCIE_PORT_MAX_MSI_ENTRIES 32 + +#define get_descriptor_id(type, service) (((type - 4) << 8) | service) + struct portdrv_service_data { struct pcie_port_service_driver *drv; struct device *dev; @@ -209,6 +220,8 @@ static int get_port_device_capability(struct pci_dev *dev) int services = 0; if (dev->is_hotplug_bridge && + (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT || + pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM) && (pcie_ports_native || host->native_pcie_hotplug)) { services |= PCIE_PORT_SERVICE_HP; @@ -221,7 +234,9 @@ static int get_port_device_capability(struct pci_dev *dev) } #ifdef CONFIG_PCIEAER - if (dev->aer_cap && pci_aer_available() && + if ((pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT || + pci_pcie_type(dev) == PCI_EXP_TYPE_RC_EC) && + dev->aer_cap && pci_aer_available() && (pcie_ports_native || host->native_aer)) services |= PCIE_PORT_SERVICE_AER; #endif @@ -308,7 +323,7 @@ static int pcie_device_init(struct pci_dev *pdev, int service, int irq) * Allocate the port extension structure and register services associated with * the port. */ -int pcie_port_device_register(struct pci_dev *dev) +static int pcie_port_device_register(struct pci_dev *dev) { int status, capabilities, i, nr_service; int irqs[PCIE_PORT_DEVICE_MAXSERVICES]; @@ -362,7 +377,7 @@ error_disable: typedef int (*pcie_callback_t)(struct pcie_device *); -int pcie_port_device_iter(struct device *dev, void *data) +static int pcie_port_device_iter(struct device *dev, void *data) { struct pcie_port_service_driver *service_driver; size_t offset = *(size_t *)data; @@ -382,13 +397,13 @@ int pcie_port_device_iter(struct device *dev, void *data) * pcie_port_device_suspend - suspend port services associated with a PCIe port * @dev: PCI Express port to handle */ -int pcie_port_device_suspend(struct device *dev) +static int pcie_port_device_suspend(struct device *dev) { size_t off = offsetof(struct pcie_port_service_driver, suspend); return device_for_each_child(dev, &off, pcie_port_device_iter); } -int pcie_port_device_resume_noirq(struct device *dev) +static int pcie_port_device_resume_noirq(struct device *dev) { size_t off = offsetof(struct pcie_port_service_driver, resume_noirq); return device_for_each_child(dev, &off, pcie_port_device_iter); @@ -398,7 +413,7 @@ int pcie_port_device_resume_noirq(struct device *dev) * pcie_port_device_resume - resume port services associated with a PCIe port * @dev: PCI Express port to handle */ -int pcie_port_device_resume(struct device *dev) +static int pcie_port_device_resume(struct device *dev) { size_t off = offsetof(struct pcie_port_service_driver, resume); return device_for_each_child(dev, &off, pcie_port_device_iter); @@ -408,7 +423,7 @@ int pcie_port_device_resume(struct device *dev) * pcie_port_device_runtime_suspend - runtime suspend port services * @dev: PCI Express port to handle */ -int pcie_port_device_runtime_suspend(struct device *dev) +static int pcie_port_device_runtime_suspend(struct device *dev) { size_t off = offsetof(struct pcie_port_service_driver, runtime_suspend); return device_for_each_child(dev, &off, pcie_port_device_iter); @@ -418,7 +433,7 @@ int pcie_port_device_runtime_suspend(struct device *dev) * pcie_port_device_runtime_resume - runtime resume port services * @dev: PCI Express port to handle */ -int pcie_port_device_runtime_resume(struct device *dev) +static int pcie_port_device_runtime_resume(struct device *dev) { size_t off = offsetof(struct pcie_port_service_driver, runtime_resume); return device_for_each_child(dev, &off, pcie_port_device_iter); @@ -482,7 +497,7 @@ EXPORT_SYMBOL_GPL(pcie_port_find_device); * Remove PCI Express port service devices associated with given port and * disable MSI-X or MSI for the port. */ -void pcie_port_device_remove(struct pci_dev *dev) +static void pcie_port_device_remove(struct pci_dev *dev) { device_for_each_child(&dev->dev, NULL, remove_iter); pci_free_irq_vectors(dev); @@ -573,7 +588,6 @@ int pcie_port_service_register(struct pcie_port_service_driver *new) return driver_register(&new->driver); } -EXPORT_SYMBOL(pcie_port_service_register); /** * pcie_port_service_unregister - unregister PCI Express port service driver @@ -583,4 +597,235 @@ void pcie_port_service_unregister(struct pcie_port_service_driver *drv) { driver_unregister(&drv->driver); } -EXPORT_SYMBOL(pcie_port_service_unregister); + +/* If this switch is set, PCIe port native services should not be enabled. */ +bool pcie_ports_disabled; + +/* + * If the user specified "pcie_ports=native", use the PCIe services regardless + * of whether the platform has given us permission. On ACPI systems, this + * means we ignore _OSC. + */ +bool pcie_ports_native; + +/* + * If the user specified "pcie_ports=dpc-native", use the Linux DPC PCIe + * service even if the platform hasn't given us permission. + */ +bool pcie_ports_dpc_native; + +static int __init pcie_port_setup(char *str) +{ + if (!strncmp(str, "compat", 6)) + pcie_ports_disabled = true; + else if (!strncmp(str, "native", 6)) + pcie_ports_native = true; + else if (!strncmp(str, "dpc-native", 10)) + pcie_ports_dpc_native = true; + + return 1; +} +__setup("pcie_ports=", pcie_port_setup); + +/* global data */ + +#ifdef CONFIG_PM +static int pcie_port_runtime_suspend(struct device *dev) +{ + if (!to_pci_dev(dev)->bridge_d3) + return -EBUSY; + + return pcie_port_device_runtime_suspend(dev); +} + +static int pcie_port_runtime_idle(struct device *dev) +{ + /* + * Assume the PCI core has set bridge_d3 whenever it thinks the port + * should be good to go to D3. Everything else, including moving + * the port to D3, is handled by the PCI core. + */ + return to_pci_dev(dev)->bridge_d3 ? 0 : -EBUSY; +} + +static const struct dev_pm_ops pcie_portdrv_pm_ops = { + .suspend = pcie_port_device_suspend, + .resume_noirq = pcie_port_device_resume_noirq, + .resume = pcie_port_device_resume, + .freeze = pcie_port_device_suspend, + .thaw = pcie_port_device_resume, + .poweroff = pcie_port_device_suspend, + .restore_noirq = pcie_port_device_resume_noirq, + .restore = pcie_port_device_resume, + .runtime_suspend = pcie_port_runtime_suspend, + .runtime_resume = pcie_port_device_runtime_resume, + .runtime_idle = pcie_port_runtime_idle, +}; + +#define PCIE_PORTDRV_PM_OPS (&pcie_portdrv_pm_ops) + +#else /* !PM */ + +#define PCIE_PORTDRV_PM_OPS NULL +#endif /* !PM */ + +/* + * pcie_portdrv_probe - Probe PCI-Express port devices + * @dev: PCI-Express port device being probed + * + * If detected invokes the pcie_port_device_register() method for + * this port device. + * + */ +static int pcie_portdrv_probe(struct pci_dev *dev, + const struct pci_device_id *id) +{ + int type = pci_pcie_type(dev); + int status; + + if (!pci_is_pcie(dev) || + ((type != PCI_EXP_TYPE_ROOT_PORT) && + (type != PCI_EXP_TYPE_UPSTREAM) && + (type != PCI_EXP_TYPE_DOWNSTREAM) && + (type != PCI_EXP_TYPE_RC_EC))) + return -ENODEV; + + if (type == PCI_EXP_TYPE_RC_EC) + pcie_link_rcec(dev); + + status = pcie_port_device_register(dev); + if (status) + return status; + + pci_save_state(dev); + + dev_pm_set_driver_flags(&dev->dev, DPM_FLAG_NO_DIRECT_COMPLETE | + DPM_FLAG_SMART_SUSPEND); + + if (pci_bridge_d3_possible(dev)) { + /* + * Keep the port resumed 100ms to make sure things like + * config space accesses from userspace (lspci) will not + * cause the port to repeatedly suspend and resume. + */ + pm_runtime_set_autosuspend_delay(&dev->dev, 100); + pm_runtime_use_autosuspend(&dev->dev); + pm_runtime_mark_last_busy(&dev->dev); + pm_runtime_put_autosuspend(&dev->dev); + pm_runtime_allow(&dev->dev); + } + + return 0; +} + +static void pcie_portdrv_remove(struct pci_dev *dev) +{ + if (pci_bridge_d3_possible(dev)) { + pm_runtime_forbid(&dev->dev); + pm_runtime_get_noresume(&dev->dev); + pm_runtime_dont_use_autosuspend(&dev->dev); + } + + pcie_port_device_remove(dev); +} + +static pci_ers_result_t pcie_portdrv_error_detected(struct pci_dev *dev, + pci_channel_state_t error) +{ + if (error == pci_channel_io_frozen) + return PCI_ERS_RESULT_NEED_RESET; + return PCI_ERS_RESULT_CAN_RECOVER; +} + +static pci_ers_result_t pcie_portdrv_slot_reset(struct pci_dev *dev) +{ + size_t off = offsetof(struct pcie_port_service_driver, slot_reset); + device_for_each_child(&dev->dev, &off, pcie_port_device_iter); + + pci_restore_state(dev); + pci_save_state(dev); + return PCI_ERS_RESULT_RECOVERED; +} + +static pci_ers_result_t pcie_portdrv_mmio_enabled(struct pci_dev *dev) +{ + return PCI_ERS_RESULT_RECOVERED; +} + +/* + * LINUX Device Driver Model + */ +static const struct pci_device_id port_pci_ids[] = { + /* handle any PCI-Express port */ + { PCI_DEVICE_CLASS(PCI_CLASS_BRIDGE_PCI_NORMAL, ~0) }, + /* subtractive decode PCI-to-PCI bridge, class type is 060401h */ + { PCI_DEVICE_CLASS(PCI_CLASS_BRIDGE_PCI_SUBTRACTIVE, ~0) }, + /* handle any Root Complex Event Collector */ + { PCI_DEVICE_CLASS(((PCI_CLASS_SYSTEM_RCEC << 8) | 0x00), ~0) }, + { }, +}; + +static const struct pci_error_handlers pcie_portdrv_err_handler = { + .error_detected = pcie_portdrv_error_detected, + .slot_reset = pcie_portdrv_slot_reset, + .mmio_enabled = pcie_portdrv_mmio_enabled, +}; + +static struct pci_driver pcie_portdriver = { + .name = "pcieport", + .id_table = &port_pci_ids[0], + + .probe = pcie_portdrv_probe, + .remove = pcie_portdrv_remove, + .shutdown = pcie_portdrv_remove, + + .err_handler = &pcie_portdrv_err_handler, + + .driver_managed_dma = true, + + .driver.pm = PCIE_PORTDRV_PM_OPS, +}; + +static int __init dmi_pcie_pme_disable_msi(const struct dmi_system_id *d) +{ + pr_notice("%s detected: will not use MSI for PCIe PME signaling\n", + d->ident); + pcie_pme_disable_msi(); + return 0; +} + +static const struct dmi_system_id pcie_portdrv_dmi_table[] __initconst = { + /* + * Boxes that should not use MSI for PCIe PME signaling. + */ + { + .callback = dmi_pcie_pme_disable_msi, + .ident = "MSI Wind U-100", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, + "MICRO-STAR INTERNATIONAL CO., LTD"), + DMI_MATCH(DMI_PRODUCT_NAME, "U-100"), + }, + }, + {} +}; + +static void __init pcie_init_services(void) +{ + pcie_aer_init(); + pcie_pme_init(); + pcie_dpc_init(); + pcie_hp_init(); +} + +static int __init pcie_portdrv_init(void) +{ + if (pcie_ports_disabled) + return -EACCES; + + pcie_init_services(); + dmi_check_system(pcie_portdrv_dmi_table); + + return pci_register_driver(&pcie_portdriver); +} +device_initcall(pcie_portdrv_init); diff --git a/drivers/pci/pcie/portdrv.h b/drivers/pci/pcie/portdrv.h index 0ef4bf5f811d9d74ee134f1041fc1538fc0f838c..58a2b1a1cae4c03c60416cf72f909c33fecf6d90 100644 --- a/drivers/pci/pcie/portdrv.h +++ b/drivers/pci/pcie/portdrv.h @@ -98,26 +98,7 @@ struct pcie_port_service_driver { int pcie_port_service_register(struct pcie_port_service_driver *new); void pcie_port_service_unregister(struct pcie_port_service_driver *new); -/* - * The PCIe Capability Interrupt Message Number (PCIe r3.1, sec 7.8.2) must - * be one of the first 32 MSI-X entries. Per PCI r3.0, sec 6.8.3.1, MSI - * supports a maximum of 32 vectors per function. - */ -#define PCIE_PORT_MAX_MSI_ENTRIES 32 - -#define get_descriptor_id(type, service) (((type - 4) << 8) | service) - extern struct bus_type pcie_port_bus_type; -int pcie_port_device_register(struct pci_dev *dev); -int pcie_port_device_iter(struct device *dev, void *data); -#ifdef CONFIG_PM -int pcie_port_device_suspend(struct device *dev); -int pcie_port_device_resume_noirq(struct device *dev); -int pcie_port_device_resume(struct device *dev); -int pcie_port_device_runtime_suspend(struct device *dev); -int pcie_port_device_runtime_resume(struct device *dev); -#endif -void pcie_port_device_remove(struct pci_dev *dev); struct pci_dev; diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c deleted file mode 100644 index 7f8788a970ae18f1be9db6d7d2e863b0c82435bb..0000000000000000000000000000000000000000 --- a/drivers/pci/pcie/portdrv_pci.c +++ /dev/null @@ -1,252 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Purpose: PCI Express Port Bus Driver - * Author: Tom Nguyen - * - * Copyright (C) 2004 Intel - * Copyright (C) Tom Long Nguyen (tom.l.nguyen@intel.com) - */ - -#include -#include -#include -#include -#include -#include -#include -#include - -#include "../pci.h" -#include "portdrv.h" - -/* If this switch is set, PCIe port native services should not be enabled. */ -bool pcie_ports_disabled; - -/* - * If the user specified "pcie_ports=native", use the PCIe services regardless - * of whether the platform has given us permission. On ACPI systems, this - * means we ignore _OSC. - */ -bool pcie_ports_native; - -/* - * If the user specified "pcie_ports=dpc-native", use the Linux DPC PCIe - * service even if the platform hasn't given us permission. - */ -bool pcie_ports_dpc_native; - -static int __init pcie_port_setup(char *str) -{ - if (!strncmp(str, "compat", 6)) - pcie_ports_disabled = true; - else if (!strncmp(str, "native", 6)) - pcie_ports_native = true; - else if (!strncmp(str, "dpc-native", 10)) - pcie_ports_dpc_native = true; - - return 1; -} -__setup("pcie_ports=", pcie_port_setup); - -/* global data */ - -#ifdef CONFIG_PM -static int pcie_port_runtime_suspend(struct device *dev) -{ - if (!to_pci_dev(dev)->bridge_d3) - return -EBUSY; - - return pcie_port_device_runtime_suspend(dev); -} - -static int pcie_port_runtime_idle(struct device *dev) -{ - /* - * Assume the PCI core has set bridge_d3 whenever it thinks the port - * should be good to go to D3. Everything else, including moving - * the port to D3, is handled by the PCI core. - */ - return to_pci_dev(dev)->bridge_d3 ? 0 : -EBUSY; -} - -static const struct dev_pm_ops pcie_portdrv_pm_ops = { - .suspend = pcie_port_device_suspend, - .resume_noirq = pcie_port_device_resume_noirq, - .resume = pcie_port_device_resume, - .freeze = pcie_port_device_suspend, - .thaw = pcie_port_device_resume, - .poweroff = pcie_port_device_suspend, - .restore_noirq = pcie_port_device_resume_noirq, - .restore = pcie_port_device_resume, - .runtime_suspend = pcie_port_runtime_suspend, - .runtime_resume = pcie_port_device_runtime_resume, - .runtime_idle = pcie_port_runtime_idle, -}; - -#define PCIE_PORTDRV_PM_OPS (&pcie_portdrv_pm_ops) - -#else /* !PM */ - -#define PCIE_PORTDRV_PM_OPS NULL -#endif /* !PM */ - -/* - * pcie_portdrv_probe - Probe PCI-Express port devices - * @dev: PCI-Express port device being probed - * - * If detected invokes the pcie_port_device_register() method for - * this port device. - * - */ -static int pcie_portdrv_probe(struct pci_dev *dev, - const struct pci_device_id *id) -{ - int type = pci_pcie_type(dev); - int status; - - if (!pci_is_pcie(dev) || - ((type != PCI_EXP_TYPE_ROOT_PORT) && - (type != PCI_EXP_TYPE_UPSTREAM) && - (type != PCI_EXP_TYPE_DOWNSTREAM) && - (type != PCI_EXP_TYPE_RC_EC))) - return -ENODEV; - - if (type == PCI_EXP_TYPE_RC_EC) - pcie_link_rcec(dev); - - status = pcie_port_device_register(dev); - if (status) - return status; - - pci_save_state(dev); - - dev_pm_set_driver_flags(&dev->dev, DPM_FLAG_NO_DIRECT_COMPLETE | - DPM_FLAG_SMART_SUSPEND); - - if (pci_bridge_d3_possible(dev)) { - /* - * Keep the port resumed 100ms to make sure things like - * config space accesses from userspace (lspci) will not - * cause the port to repeatedly suspend and resume. - */ - pm_runtime_set_autosuspend_delay(&dev->dev, 100); - pm_runtime_use_autosuspend(&dev->dev); - pm_runtime_mark_last_busy(&dev->dev); - pm_runtime_put_autosuspend(&dev->dev); - pm_runtime_allow(&dev->dev); - } - - return 0; -} - -static void pcie_portdrv_remove(struct pci_dev *dev) -{ - if (pci_bridge_d3_possible(dev)) { - pm_runtime_forbid(&dev->dev); - pm_runtime_get_noresume(&dev->dev); - pm_runtime_dont_use_autosuspend(&dev->dev); - } - - pcie_port_device_remove(dev); -} - -static pci_ers_result_t pcie_portdrv_error_detected(struct pci_dev *dev, - pci_channel_state_t error) -{ - if (error == pci_channel_io_frozen) - return PCI_ERS_RESULT_NEED_RESET; - return PCI_ERS_RESULT_CAN_RECOVER; -} - -static pci_ers_result_t pcie_portdrv_slot_reset(struct pci_dev *dev) -{ - size_t off = offsetof(struct pcie_port_service_driver, slot_reset); - device_for_each_child(&dev->dev, &off, pcie_port_device_iter); - - pci_restore_state(dev); - pci_save_state(dev); - return PCI_ERS_RESULT_RECOVERED; -} - -static pci_ers_result_t pcie_portdrv_mmio_enabled(struct pci_dev *dev) -{ - return PCI_ERS_RESULT_RECOVERED; -} - -/* - * LINUX Device Driver Model - */ -static const struct pci_device_id port_pci_ids[] = { - /* handle any PCI-Express port */ - { PCI_DEVICE_CLASS(PCI_CLASS_BRIDGE_PCI_NORMAL, ~0) }, - /* subtractive decode PCI-to-PCI bridge, class type is 060401h */ - { PCI_DEVICE_CLASS(PCI_CLASS_BRIDGE_PCI_SUBTRACTIVE, ~0) }, - /* handle any Root Complex Event Collector */ - { PCI_DEVICE_CLASS(((PCI_CLASS_SYSTEM_RCEC << 8) | 0x00), ~0) }, - { }, -}; - -static const struct pci_error_handlers pcie_portdrv_err_handler = { - .error_detected = pcie_portdrv_error_detected, - .slot_reset = pcie_portdrv_slot_reset, - .mmio_enabled = pcie_portdrv_mmio_enabled, -}; - -static struct pci_driver pcie_portdriver = { - .name = "pcieport", - .id_table = &port_pci_ids[0], - - .probe = pcie_portdrv_probe, - .remove = pcie_portdrv_remove, - .shutdown = pcie_portdrv_remove, - - .err_handler = &pcie_portdrv_err_handler, - - .driver_managed_dma = true, - - .driver.pm = PCIE_PORTDRV_PM_OPS, -}; - -static int __init dmi_pcie_pme_disable_msi(const struct dmi_system_id *d) -{ - pr_notice("%s detected: will not use MSI for PCIe PME signaling\n", - d->ident); - pcie_pme_disable_msi(); - return 0; -} - -static const struct dmi_system_id pcie_portdrv_dmi_table[] __initconst = { - /* - * Boxes that should not use MSI for PCIe PME signaling. - */ - { - .callback = dmi_pcie_pme_disable_msi, - .ident = "MSI Wind U-100", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, - "MICRO-STAR INTERNATIONAL CO., LTD"), - DMI_MATCH(DMI_PRODUCT_NAME, "U-100"), - }, - }, - {} -}; - -static void __init pcie_init_services(void) -{ - pcie_aer_init(); - pcie_pme_init(); - pcie_dpc_init(); - pcie_hp_init(); -} - -static int __init pcie_portdrv_init(void) -{ - if (pcie_ports_disabled) - return -EACCES; - - pcie_init_services(); - dmi_check_system(pcie_portdrv_dmi_table); - - return pci_register_driver(&pcie_portdriver); -} -device_initcall(pcie_portdrv_init); diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index 334fd91f01e1e6e528105cae7230339066c8aea1..1779582fb5007cd1ba0b054e78c545d0d4f644c3 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c @@ -904,6 +904,10 @@ static int pci_register_host_bridge(struct pci_host_bridge *bridge) bus->domain_nr = pci_bus_find_domain_nr(bus, parent); else bus->domain_nr = bridge->domain_nr; + if (bus->domain_nr < 0) { + err = bus->domain_nr; + goto free; + } #endif b = pci_find_bus(pci_domain_nr(bus), bridge->busnr); @@ -1028,6 +1032,9 @@ unregister: device_del(&bridge->dev); free: +#ifdef CONFIG_PCI_DOMAINS_GENERIC + pci_bus_release_domain_nr(bus, parent); +#endif kfree(bus); return err; } @@ -1889,9 +1896,6 @@ int pci_setup_device(struct pci_dev *dev) dev->broken_intx_masking = pci_intx_mask_broken(dev); - /* Clear errors left from system firmware */ - pci_write_config_word(dev, PCI_STATUS, 0xffff); - switch (dev->hdr_type) { /* header type */ case PCI_HEADER_TYPE_NORMAL: /* standard header */ if (class == PCI_CLASS_BRIDGE_PCI) diff --git a/drivers/pci/remove.c b/drivers/pci/remove.c index 4c54c75050dc12179b53d82425699715d7ad3597..0145aef1b9301823e3a406611353b596036e8a77 100644 --- a/drivers/pci/remove.c +++ b/drivers/pci/remove.c @@ -160,6 +160,12 @@ void pci_remove_root_bus(struct pci_bus *bus) pci_remove_bus(bus); host_bridge->bus = NULL; +#ifdef CONFIG_PCI_DOMAINS_GENERIC + /* Release domain_nr if it was dynamically allocated */ + if (host_bridge->domain_nr == PCI_DOMAIN_NR_NOT_SET) + pci_bus_release_domain_nr(bus, host_bridge->dev.parent); +#endif + /* remove the host bridge */ device_del(&host_bridge->dev); } diff --git a/drivers/pcmcia/cs.c b/drivers/pcmcia/cs.c index f70197154a36256ce59dcaf31d22faa09cb5fcaf..e3224e49c43fd687bc0b4a5572f83ac62fec6cd7 100644 --- a/drivers/pcmcia/cs.c +++ b/drivers/pcmcia/cs.c @@ -810,10 +810,10 @@ int pcmcia_reset_card(struct pcmcia_socket *skt) EXPORT_SYMBOL(pcmcia_reset_card); -static int pcmcia_socket_uevent(struct device *dev, +static int pcmcia_socket_uevent(const struct device *dev, struct kobj_uevent_env *env) { - struct pcmcia_socket *s = container_of(dev, struct pcmcia_socket, dev); + const struct pcmcia_socket *s = container_of(dev, struct pcmcia_socket, dev); if (add_uevent_var(env, "SOCKET_NO=%u", s->sock)) return -ENOMEM; diff --git a/drivers/perf/riscv_pmu_sbi.c b/drivers/perf/riscv_pmu_sbi.c index 3852c18362f53ec2630a7f10c5a6d6e6baf59a9c..f6507efe2a5846d8180b1baf1047424fd65303bb 100644 --- a/drivers/perf/riscv_pmu_sbi.c +++ b/drivers/perf/riscv_pmu_sbi.c @@ -20,6 +20,7 @@ #include #include +#include #include #include @@ -47,6 +48,8 @@ static const struct attribute_group *riscv_pmu_attr_groups[] = { * per_cpu in case of harts with different pmu counters */ static union sbi_pmu_ctr_info *pmu_ctr_list; +static bool riscv_pmu_use_irq; +static unsigned int riscv_pmu_irq_num; static unsigned int riscv_pmu_irq; struct sbi_pmu_event_data { @@ -580,7 +583,7 @@ static irqreturn_t pmu_sbi_ovf_handler(int irq, void *dev) fidx = find_first_bit(cpu_hw_evt->used_hw_ctrs, RISCV_MAX_COUNTERS); event = cpu_hw_evt->events[fidx]; if (!event) { - csr_clear(CSR_SIP, SIP_LCOFIP); + csr_clear(CSR_SIP, BIT(riscv_pmu_irq_num)); return IRQ_NONE; } @@ -588,13 +591,13 @@ static irqreturn_t pmu_sbi_ovf_handler(int irq, void *dev) pmu_sbi_stop_hw_ctrs(pmu); /* Overflow status register should only be read after counter are stopped */ - overflow = csr_read(CSR_SSCOUNTOVF); + ALT_SBI_PMU_OVERFLOW(overflow); /* * Overflow interrupt pending bit should only be cleared after stopping * all the counters to avoid any race condition. */ - csr_clear(CSR_SIP, SIP_LCOFIP); + csr_clear(CSR_SIP, BIT(riscv_pmu_irq_num)); /* No overflow bit is set */ if (!overflow) @@ -661,10 +664,10 @@ static int pmu_sbi_starting_cpu(unsigned int cpu, struct hlist_node *node) /* Stop all the counters so that they can be enabled from perf */ pmu_sbi_stop_all(pmu); - if (riscv_isa_extension_available(NULL, SSCOFPMF)) { + if (riscv_pmu_use_irq) { cpu_hw_evt->irq = riscv_pmu_irq; - csr_clear(CSR_IP, BIT(RV_IRQ_PMU)); - csr_set(CSR_IE, BIT(RV_IRQ_PMU)); + csr_clear(CSR_IP, BIT(riscv_pmu_irq_num)); + csr_set(CSR_IE, BIT(riscv_pmu_irq_num)); enable_percpu_irq(riscv_pmu_irq, IRQ_TYPE_NONE); } @@ -673,9 +676,9 @@ static int pmu_sbi_starting_cpu(unsigned int cpu, struct hlist_node *node) static int pmu_sbi_dying_cpu(unsigned int cpu, struct hlist_node *node) { - if (riscv_isa_extension_available(NULL, SSCOFPMF)) { + if (riscv_pmu_use_irq) { disable_percpu_irq(riscv_pmu_irq); - csr_clear(CSR_IE, BIT(RV_IRQ_PMU)); + csr_clear(CSR_IE, BIT(riscv_pmu_irq_num)); } /* Disable all counters access for user mode now */ @@ -691,7 +694,18 @@ static int pmu_sbi_setup_irqs(struct riscv_pmu *pmu, struct platform_device *pde struct device_node *cpu, *child; struct irq_domain *domain = NULL; - if (!riscv_isa_extension_available(NULL, SSCOFPMF)) + if (riscv_isa_extension_available(NULL, SSCOFPMF)) { + riscv_pmu_irq_num = RV_IRQ_PMU; + riscv_pmu_use_irq = true; + } else if (IS_ENABLED(CONFIG_ERRATA_THEAD_PMU) && + riscv_cached_mvendorid(0) == THEAD_VENDOR_ID && + riscv_cached_marchid(0) == 0 && + riscv_cached_mimpid(0) == 0) { + riscv_pmu_irq_num = THEAD_C9XX_RV_IRQ_PMU; + riscv_pmu_use_irq = true; + } + + if (!riscv_pmu_use_irq) return -EOPNOTSUPP; for_each_of_cpu_node(cpu) { @@ -713,7 +727,7 @@ static int pmu_sbi_setup_irqs(struct riscv_pmu *pmu, struct platform_device *pde return -ENODEV; } - riscv_pmu_irq = irq_create_mapping(domain, RV_IRQ_PMU); + riscv_pmu_irq = irq_create_mapping(domain, riscv_pmu_irq_num); if (!riscv_pmu_irq) { pr_err("Failed to map PMU interrupt for node\n"); return -ENODEV; diff --git a/drivers/phy/allwinner/phy-sun4i-usb.c b/drivers/phy/allwinner/phy-sun4i-usb.c index 3a3831f6059a3f5e9733902a16ad947f34697ed6..5472db9e87ef88af6f743233d529fd29284cc111 100644 --- a/drivers/phy/allwinner/phy-sun4i-usb.c +++ b/drivers/phy/allwinner/phy-sun4i-usb.c @@ -120,6 +120,7 @@ struct sun4i_usb_phy_cfg { u8 phyctl_offset; bool dedicated_clocks; bool phy0_dual_route; + bool needs_phy2_siddq; int missing_phys; }; @@ -289,6 +290,50 @@ static int sun4i_usb_phy_init(struct phy *_phy) return ret; } + /* Some PHYs on some SoCs need the help of PHY2 to work. */ + if (data->cfg->needs_phy2_siddq && phy->index != 2) { + struct sun4i_usb_phy *phy2 = &data->phys[2]; + + ret = clk_prepare_enable(phy2->clk); + if (ret) { + reset_control_assert(phy->reset); + clk_disable_unprepare(phy->clk2); + clk_disable_unprepare(phy->clk); + return ret; + } + + ret = reset_control_deassert(phy2->reset); + if (ret) { + clk_disable_unprepare(phy2->clk); + reset_control_assert(phy->reset); + clk_disable_unprepare(phy->clk2); + clk_disable_unprepare(phy->clk); + return ret; + } + + /* + * This extra clock is just needed to access the + * REG_HCI_PHY_CTL PMU register for PHY2. + */ + ret = clk_prepare_enable(phy2->clk2); + if (ret) { + reset_control_assert(phy2->reset); + clk_disable_unprepare(phy2->clk); + reset_control_assert(phy->reset); + clk_disable_unprepare(phy->clk2); + clk_disable_unprepare(phy->clk); + return ret; + } + + if (phy2->pmu && data->cfg->hci_phy_ctl_clear) { + val = readl(phy2->pmu + REG_HCI_PHY_CTL); + val &= ~data->cfg->hci_phy_ctl_clear; + writel(val, phy2->pmu + REG_HCI_PHY_CTL); + } + + clk_disable_unprepare(phy->clk2); + } + if (phy->pmu && data->cfg->hci_phy_ctl_clear) { val = readl(phy->pmu + REG_HCI_PHY_CTL); val &= ~data->cfg->hci_phy_ctl_clear; @@ -354,6 +399,13 @@ static int sun4i_usb_phy_exit(struct phy *_phy) data->phy0_init = false; } + if (data->cfg->needs_phy2_siddq && phy->index != 2) { + struct sun4i_usb_phy *phy2 = &data->phys[2]; + + clk_disable_unprepare(phy2->clk); + reset_control_assert(phy2->reset); + } + sun4i_usb_phy_passby(phy, 0); reset_control_assert(phy->reset); clk_disable_unprepare(phy->clk2); @@ -785,6 +837,13 @@ static int sun4i_usb_phy_probe(struct platform_device *pdev) dev_err(dev, "failed to get clock %s\n", name); return PTR_ERR(phy->clk2); } + } else { + snprintf(name, sizeof(name), "pmu%d_clk", i); + phy->clk2 = devm_clk_get_optional(dev, name); + if (IS_ERR(phy->clk2)) { + dev_err(dev, "failed to get clock %s\n", name); + return PTR_ERR(phy->clk2); + } } snprintf(name, sizeof(name), "usb%d_reset", i); @@ -973,6 +1032,17 @@ static const struct sun4i_usb_phy_cfg sun50i_h6_cfg = { .missing_phys = BIT(1) | BIT(2), }; +static const struct sun4i_usb_phy_cfg sun50i_h616_cfg = { + .num_phys = 4, + .type = sun50i_h6_phy, + .disc_thresh = 3, + .phyctl_offset = REG_PHYCTL_A33, + .dedicated_clocks = true, + .phy0_dual_route = true, + .hci_phy_ctl_clear = PHY_CTL_SIDDQ, + .needs_phy2_siddq = true, +}; + static const struct of_device_id sun4i_usb_phy_of_match[] = { { .compatible = "allwinner,sun4i-a10-usb-phy", .data = &sun4i_a10_cfg }, { .compatible = "allwinner,sun5i-a13-usb-phy", .data = &sun5i_a13_cfg }, @@ -988,6 +1058,7 @@ static const struct of_device_id sun4i_usb_phy_of_match[] = { { .compatible = "allwinner,sun50i-a64-usb-phy", .data = &sun50i_a64_cfg}, { .compatible = "allwinner,sun50i-h6-usb-phy", .data = &sun50i_h6_cfg }, + { .compatible = "allwinner,sun50i-h616-usb-phy", .data = &sun50i_h616_cfg }, { }, }; MODULE_DEVICE_TABLE(of, sun4i_usb_phy_of_match); diff --git a/drivers/phy/allwinner/phy-sun6i-mipi-dphy.c b/drivers/phy/allwinner/phy-sun6i-mipi-dphy.c index 3900f165085153d5c9d0b8954a6344a464d75445..36eab95271b2dcdea9e336c56c400c9f20223696 100644 --- a/drivers/phy/allwinner/phy-sun6i-mipi-dphy.c +++ b/drivers/phy/allwinner/phy-sun6i-mipi-dphy.c @@ -70,11 +70,19 @@ #define SUN6I_DPHY_ANA0_REG 0x4c #define SUN6I_DPHY_ANA0_REG_PWS BIT(31) +#define SUN6I_DPHY_ANA0_REG_PWEND BIT(30) +#define SUN6I_DPHY_ANA0_REG_PWENC BIT(29) #define SUN6I_DPHY_ANA0_REG_DMPC BIT(28) #define SUN6I_DPHY_ANA0_REG_DMPD(n) (((n) & 0xf) << 24) +#define SUN6I_DPHY_ANA0_REG_SRXDT(n) (((n) & 0xf) << 20) +#define SUN6I_DPHY_ANA0_REG_SRXCK(n) (((n) & 0xf) << 16) +#define SUN6I_DPHY_ANA0_REG_SDIV2 BIT(15) #define SUN6I_DPHY_ANA0_REG_SLV(n) (((n) & 7) << 12) #define SUN6I_DPHY_ANA0_REG_DEN(n) (((n) & 0xf) << 8) +#define SUN6I_DPHY_ANA0_REG_PLR(n) (((n) & 0xf) << 4) #define SUN6I_DPHY_ANA0_REG_SFB(n) (((n) & 3) << 2) +#define SUN6I_DPHY_ANA0_REG_RSD BIT(1) +#define SUN6I_DPHY_ANA0_REG_SELSCK BIT(0) #define SUN6I_DPHY_ANA1_REG 0x50 #define SUN6I_DPHY_ANA1_REG_VTTMODE BIT(31) @@ -97,8 +105,13 @@ #define SUN6I_DPHY_ANA3_EN_LDOR BIT(18) #define SUN6I_DPHY_ANA4_REG 0x5c +#define SUN6I_DPHY_ANA4_REG_EN_MIPI BIT(31) +#define SUN6I_DPHY_ANA4_REG_EN_COMTEST BIT(30) +#define SUN6I_DPHY_ANA4_REG_COMTEST(n) (((n) & 3) << 28) +#define SUN6I_DPHY_ANA4_REG_IB(n) (((n) & 3) << 25) #define SUN6I_DPHY_ANA4_REG_DMPLVC BIT(24) #define SUN6I_DPHY_ANA4_REG_DMPLVD(n) (((n) & 0xf) << 20) +#define SUN6I_DPHY_ANA4_REG_VTT_SET(n) (((n) & 0x7) << 17) #define SUN6I_DPHY_ANA4_REG_CKDV(n) (((n) & 0x1f) << 12) #define SUN6I_DPHY_ANA4_REG_TMSC(n) (((n) & 3) << 10) #define SUN6I_DPHY_ANA4_REG_TMSD(n) (((n) & 3) << 8) @@ -109,11 +122,68 @@ #define SUN6I_DPHY_DBG5_REG 0xf4 +#define SUN50I_DPHY_TX_SLEW_REG0 0xf8 +#define SUN50I_DPHY_TX_SLEW_REG1 0xfc +#define SUN50I_DPHY_TX_SLEW_REG2 0x100 + +#define SUN50I_DPHY_PLL_REG0 0x104 +#define SUN50I_DPHY_PLL_REG0_CP36_EN BIT(23) +#define SUN50I_DPHY_PLL_REG0_LDO_EN BIT(22) +#define SUN50I_DPHY_PLL_REG0_EN_LVS BIT(21) +#define SUN50I_DPHY_PLL_REG0_PLL_EN BIT(20) +#define SUN50I_DPHY_PLL_REG0_P(n) (((n) & 0xf) << 16) +#define SUN50I_DPHY_PLL_REG0_N(n) (((n) & 0xff) << 8) +#define SUN50I_DPHY_PLL_REG0_NDET BIT(7) +#define SUN50I_DPHY_PLL_REG0_TDIV BIT(6) +#define SUN50I_DPHY_PLL_REG0_M0(n) (((n) & 3) << 4) +#define SUN50I_DPHY_PLL_REG0_M1(n) ((n) & 0xf) + +#define SUN50I_DPHY_PLL_REG1 0x108 +#define SUN50I_DPHY_PLL_REG1_UNLOCK_MDSEL(n) (((n) & 3) << 14) +#define SUN50I_DPHY_PLL_REG1_LOCKMDSEL BIT(13) +#define SUN50I_DPHY_PLL_REG1_LOCKDET_EN BIT(12) +#define SUN50I_DPHY_PLL_REG1_VSETA(n) (((n) & 0x7) << 9) +#define SUN50I_DPHY_PLL_REG1_VSETD(n) (((n) & 0x7) << 6) +#define SUN50I_DPHY_PLL_REG1_LPF_SW BIT(5) +#define SUN50I_DPHY_PLL_REG1_ICP_SEL(n) (((n) & 3) << 3) +#define SUN50I_DPHY_PLL_REG1_ATEST_SEL(n) (((n) & 3) << 1) +#define SUN50I_DPHY_PLL_REG1_TEST_EN BIT(0) + +#define SUN50I_DPHY_PLL_REG2 0x10c +#define SUN50I_DPHY_PLL_REG2_SDM_EN BIT(31) +#define SUN50I_DPHY_PLL_REG2_FF_EN BIT(30) +#define SUN50I_DPHY_PLL_REG2_SS_EN BIT(29) +#define SUN50I_DPHY_PLL_REG2_SS_FRAC(n) (((n) & 0x1ff) << 20) +#define SUN50I_DPHY_PLL_REG2_SS_INT(n) (((n) & 0xff) << 12) +#define SUN50I_DPHY_PLL_REG2_FRAC(n) ((n) & 0xfff) + +#define SUN50I_COMBO_PHY_REG0 0x110 +#define SUN50I_COMBO_PHY_REG0_EN_TEST_COMBOLDO BIT(5) +#define SUN50I_COMBO_PHY_REG0_EN_TEST_0P8 BIT(4) +#define SUN50I_COMBO_PHY_REG0_EN_MIPI BIT(3) +#define SUN50I_COMBO_PHY_REG0_EN_LVDS BIT(2) +#define SUN50I_COMBO_PHY_REG0_EN_COMBOLDO BIT(1) +#define SUN50I_COMBO_PHY_REG0_EN_CP BIT(0) + +#define SUN50I_COMBO_PHY_REG1 0x114 +#define SUN50I_COMBO_PHY_REG2_REG_VREF1P6(n) (((n) & 0x7) << 4) +#define SUN50I_COMBO_PHY_REG2_REG_VREF0P8(n) ((n) & 0x7) + +#define SUN50I_COMBO_PHY_REG2 0x118 +#define SUN50I_COMBO_PHY_REG2_HS_STOP_DLY(n) ((n) & 0xff) + enum sun6i_dphy_direction { SUN6I_DPHY_DIRECTION_TX, SUN6I_DPHY_DIRECTION_RX, }; +struct sun6i_dphy; + +struct sun6i_dphy_variant { + void (*tx_power_on)(struct sun6i_dphy *dphy); + bool rx_supported; +}; + struct sun6i_dphy { struct clk *bus_clk; struct clk *mod_clk; @@ -123,6 +193,7 @@ struct sun6i_dphy { struct phy *phy; struct phy_configure_opts_mipi_dphy config; + const struct sun6i_dphy_variant *variant; enum sun6i_dphy_direction direction; }; @@ -151,37 +222,10 @@ static int sun6i_dphy_configure(struct phy *phy, union phy_configure_opts *opts) return 0; } -static int sun6i_dphy_tx_power_on(struct sun6i_dphy *dphy) +static void sun6i_a31_mipi_dphy_tx_power_on(struct sun6i_dphy *dphy) { u8 lanes_mask = GENMASK(dphy->config.lanes - 1, 0); - regmap_write(dphy->regs, SUN6I_DPHY_TX_CTL_REG, - SUN6I_DPHY_TX_CTL_HS_TX_CLK_CONT); - - regmap_write(dphy->regs, SUN6I_DPHY_TX_TIME0_REG, - SUN6I_DPHY_TX_TIME0_LP_CLK_DIV(14) | - SUN6I_DPHY_TX_TIME0_HS_PREPARE(6) | - SUN6I_DPHY_TX_TIME0_HS_TRAIL(10)); - - regmap_write(dphy->regs, SUN6I_DPHY_TX_TIME1_REG, - SUN6I_DPHY_TX_TIME1_CLK_PREPARE(7) | - SUN6I_DPHY_TX_TIME1_CLK_ZERO(50) | - SUN6I_DPHY_TX_TIME1_CLK_PRE(3) | - SUN6I_DPHY_TX_TIME1_CLK_POST(10)); - - regmap_write(dphy->regs, SUN6I_DPHY_TX_TIME2_REG, - SUN6I_DPHY_TX_TIME2_CLK_TRAIL(30)); - - regmap_write(dphy->regs, SUN6I_DPHY_TX_TIME3_REG, 0); - - regmap_write(dphy->regs, SUN6I_DPHY_TX_TIME4_REG, - SUN6I_DPHY_TX_TIME4_HS_TX_ANA0(3) | - SUN6I_DPHY_TX_TIME4_HS_TX_ANA1(3)); - - regmap_write(dphy->regs, SUN6I_DPHY_GCTL_REG, - SUN6I_DPHY_GCTL_LANE_NUM(dphy->config.lanes) | - SUN6I_DPHY_GCTL_EN); - regmap_write(dphy->regs, SUN6I_DPHY_ANA0_REG, SUN6I_DPHY_ANA0_REG_PWS | SUN6I_DPHY_ANA0_REG_DMPC | @@ -213,6 +257,106 @@ static int sun6i_dphy_tx_power_on(struct sun6i_dphy *dphy) SUN6I_DPHY_ANA3_EN_LDOC | SUN6I_DPHY_ANA3_EN_LDOD); udelay(1); +} + +static void sun50i_a100_mipi_dphy_tx_power_on(struct sun6i_dphy *dphy) +{ + unsigned long mipi_symbol_rate = dphy->config.hs_clk_rate; + unsigned int div, n; + + regmap_write(dphy->regs, SUN6I_DPHY_ANA4_REG, + SUN6I_DPHY_ANA4_REG_IB(2) | + SUN6I_DPHY_ANA4_REG_DMPLVD(4) | + SUN6I_DPHY_ANA4_REG_VTT_SET(3) | + SUN6I_DPHY_ANA4_REG_CKDV(3) | + SUN6I_DPHY_ANA4_REG_TMSD(1) | + SUN6I_DPHY_ANA4_REG_TMSC(1) | + SUN6I_DPHY_ANA4_REG_TXPUSD(2) | + SUN6I_DPHY_ANA4_REG_TXPUSC(3) | + SUN6I_DPHY_ANA4_REG_TXDNSD(2) | + SUN6I_DPHY_ANA4_REG_TXDNSC(3)); + + regmap_update_bits(dphy->regs, SUN6I_DPHY_ANA2_REG, + SUN6I_DPHY_ANA2_EN_CK_CPU, + SUN6I_DPHY_ANA2_EN_CK_CPU); + + regmap_update_bits(dphy->regs, SUN6I_DPHY_ANA2_REG, + SUN6I_DPHY_ANA2_REG_ENIB, + SUN6I_DPHY_ANA2_REG_ENIB); + + regmap_write(dphy->regs, SUN6I_DPHY_ANA3_REG, + SUN6I_DPHY_ANA3_EN_LDOR | + SUN6I_DPHY_ANA3_EN_LDOC | + SUN6I_DPHY_ANA3_EN_LDOD); + + regmap_write(dphy->regs, SUN6I_DPHY_ANA0_REG, + SUN6I_DPHY_ANA0_REG_PLR(4) | + SUN6I_DPHY_ANA0_REG_SFB(1)); + + regmap_write(dphy->regs, SUN50I_COMBO_PHY_REG0, + SUN50I_COMBO_PHY_REG0_EN_CP); + + /* Choose a divider to limit the VCO frequency to around 2 GHz. */ + div = 16 >> order_base_2(DIV_ROUND_UP(mipi_symbol_rate, 264000000)); + n = mipi_symbol_rate * div / 24000000; + + regmap_write(dphy->regs, SUN50I_DPHY_PLL_REG0, + SUN50I_DPHY_PLL_REG0_CP36_EN | + SUN50I_DPHY_PLL_REG0_LDO_EN | + SUN50I_DPHY_PLL_REG0_EN_LVS | + SUN50I_DPHY_PLL_REG0_PLL_EN | + SUN50I_DPHY_PLL_REG0_NDET | + SUN50I_DPHY_PLL_REG0_P((div - 1) % 8) | + SUN50I_DPHY_PLL_REG0_N(n) | + SUN50I_DPHY_PLL_REG0_M0((div - 1) / 8) | + SUN50I_DPHY_PLL_REG0_M1(2)); + + /* Disable sigma-delta modulation. */ + regmap_write(dphy->regs, SUN50I_DPHY_PLL_REG2, 0); + + regmap_update_bits(dphy->regs, SUN6I_DPHY_ANA4_REG, + SUN6I_DPHY_ANA4_REG_EN_MIPI, + SUN6I_DPHY_ANA4_REG_EN_MIPI); + + regmap_update_bits(dphy->regs, SUN50I_COMBO_PHY_REG0, + SUN50I_COMBO_PHY_REG0_EN_MIPI | + SUN50I_COMBO_PHY_REG0_EN_COMBOLDO, + SUN50I_COMBO_PHY_REG0_EN_MIPI | + SUN50I_COMBO_PHY_REG0_EN_COMBOLDO); + + regmap_write(dphy->regs, SUN50I_COMBO_PHY_REG2, + SUN50I_COMBO_PHY_REG2_HS_STOP_DLY(20)); + udelay(1); +} + +static int sun6i_dphy_tx_power_on(struct sun6i_dphy *dphy) +{ + u8 lanes_mask = GENMASK(dphy->config.lanes - 1, 0); + + regmap_write(dphy->regs, SUN6I_DPHY_TX_CTL_REG, + SUN6I_DPHY_TX_CTL_HS_TX_CLK_CONT); + + regmap_write(dphy->regs, SUN6I_DPHY_TX_TIME0_REG, + SUN6I_DPHY_TX_TIME0_LP_CLK_DIV(14) | + SUN6I_DPHY_TX_TIME0_HS_PREPARE(6) | + SUN6I_DPHY_TX_TIME0_HS_TRAIL(10)); + + regmap_write(dphy->regs, SUN6I_DPHY_TX_TIME1_REG, + SUN6I_DPHY_TX_TIME1_CLK_PREPARE(7) | + SUN6I_DPHY_TX_TIME1_CLK_ZERO(50) | + SUN6I_DPHY_TX_TIME1_CLK_PRE(3) | + SUN6I_DPHY_TX_TIME1_CLK_POST(10)); + + regmap_write(dphy->regs, SUN6I_DPHY_TX_TIME2_REG, + SUN6I_DPHY_TX_TIME2_CLK_TRAIL(30)); + + regmap_write(dphy->regs, SUN6I_DPHY_TX_TIME3_REG, 0); + + regmap_write(dphy->regs, SUN6I_DPHY_TX_TIME4_REG, + SUN6I_DPHY_TX_TIME4_HS_TX_ANA0(3) | + SUN6I_DPHY_TX_TIME4_HS_TX_ANA1(3)); + + dphy->variant->tx_power_on(dphy); regmap_update_bits(dphy->regs, SUN6I_DPHY_ANA3_REG, SUN6I_DPHY_ANA3_EN_VTTC | @@ -239,6 +383,10 @@ static int sun6i_dphy_tx_power_on(struct sun6i_dphy *dphy) SUN6I_DPHY_ANA2_EN_P2S_CPU_MASK, SUN6I_DPHY_ANA2_EN_P2S_CPU(lanes_mask)); + regmap_write(dphy->regs, SUN6I_DPHY_GCTL_REG, + SUN6I_DPHY_GCTL_LANE_NUM(dphy->config.lanes) | + SUN6I_DPHY_GCTL_EN); + return 0; } @@ -393,7 +541,7 @@ static const struct regmap_config sun6i_dphy_regmap_config = { .reg_bits = 32, .val_bits = 32, .reg_stride = 4, - .max_register = SUN6I_DPHY_DBG5_REG, + .max_register = SUN50I_COMBO_PHY_REG2, .name = "mipi-dphy", }; @@ -409,6 +557,10 @@ static int sun6i_dphy_probe(struct platform_device *pdev) if (!dphy) return -ENOMEM; + dphy->variant = device_get_match_data(&pdev->dev); + if (!dphy->variant) + return -EINVAL; + regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(regs)) { dev_err(&pdev->dev, "Couldn't map the DPHY encoder registers\n"); @@ -445,8 +597,14 @@ static int sun6i_dphy_probe(struct platform_device *pdev) ret = of_property_read_string(pdev->dev.of_node, "allwinner,direction", &direction); - if (!ret && !strncmp(direction, "rx", 2)) + if (!ret && !strncmp(direction, "rx", 2)) { + if (!dphy->variant->rx_supported) { + dev_err(&pdev->dev, "RX not supported on this variant\n"); + return -EOPNOTSUPP; + } + dphy->direction = SUN6I_DPHY_DIRECTION_RX; + } phy_set_drvdata(dphy->phy, dphy); phy_provider = devm_of_phy_provider_register(&pdev->dev, of_phy_simple_xlate); @@ -454,8 +612,24 @@ static int sun6i_dphy_probe(struct platform_device *pdev) return PTR_ERR_OR_ZERO(phy_provider); } +static const struct sun6i_dphy_variant sun6i_a31_mipi_dphy_variant = { + .tx_power_on = sun6i_a31_mipi_dphy_tx_power_on, + .rx_supported = true, +}; + +static const struct sun6i_dphy_variant sun50i_a100_mipi_dphy_variant = { + .tx_power_on = sun50i_a100_mipi_dphy_tx_power_on, +}; + static const struct of_device_id sun6i_dphy_of_table[] = { - { .compatible = "allwinner,sun6i-a31-mipi-dphy" }, + { + .compatible = "allwinner,sun6i-a31-mipi-dphy", + .data = &sun6i_a31_mipi_dphy_variant, + }, + { + .compatible = "allwinner,sun50i-a100-mipi-dphy", + .data = &sun50i_a100_mipi_dphy_variant, + }, { } }; MODULE_DEVICE_TABLE(of, sun6i_dphy_of_table); diff --git a/drivers/phy/broadcom/phy-brcm-usb-init-synopsys.c b/drivers/phy/broadcom/phy-brcm-usb-init-synopsys.c index d2524b70ea16101cf9463153b48226896922230e..76cf4280d7ed943d8f85b9dbf7e001ff7159a67f 100644 --- a/drivers/phy/broadcom/phy-brcm-usb-init-synopsys.c +++ b/drivers/phy/broadcom/phy-brcm-usb-init-synopsys.c @@ -18,14 +18,14 @@ #define PIARBCTL_CAM 0x00 #define PIARBCTL_SPLITTER 0x04 #define PIARBCTL_MISC 0x08 -#define PIARBCTL_MISC_SECURE_MASK 0x80000000 -#define PIARBCTL_MISC_USB_SELECT_MASK 0x40000000 -#define PIARBCTL_MISC_USB_4G_SDRAM_MASK 0x20000000 -#define PIARBCTL_MISC_USB_PRIORITY_MASK 0x000f0000 -#define PIARBCTL_MISC_USB_MEM_PAGE_MASK 0x0000f000 -#define PIARBCTL_MISC_CAM1_MEM_PAGE_MASK 0x00000f00 -#define PIARBCTL_MISC_CAM0_MEM_PAGE_MASK 0x000000f0 -#define PIARBCTL_MISC_SATA_PRIORITY_MASK 0x0000000f +#define PIARBCTL_MISC_SATA_PRIORITY_MASK GENMASK(3, 0) +#define PIARBCTL_MISC_CAM0_MEM_PAGE_MASK GENMASK(7, 4) +#define PIARBCTL_MISC_CAM1_MEM_PAGE_MASK GENMASK(11, 8) +#define PIARBCTL_MISC_USB_MEM_PAGE_MASK GENMASK(15, 12) +#define PIARBCTL_MISC_USB_PRIORITY_MASK GENMASK(19, 16) +#define PIARBCTL_MISC_USB_4G_SDRAM_MASK BIT(29) +#define PIARBCTL_MISC_USB_SELECT_MASK BIT(30) +#define PIARBCTL_MISC_SECURE_MASK BIT(31) #define PIARBCTL_MISC_USB_ONLY_MASK \ (PIARBCTL_MISC_USB_SELECT_MASK | \ @@ -35,46 +35,47 @@ /* Register definitions for the USB CTRL block */ #define USB_CTRL_SETUP 0x00 -#define USB_CTRL_SETUP_STRAP_IPP_SEL_MASK 0x02000000 -#define USB_CTRL_SETUP_SCB2_EN_MASK 0x00008000 -#define USB_CTRL_SETUP_tca_drv_sel_MASK 0x01000000 -#define USB_CTRL_SETUP_SCB1_EN_MASK 0x00004000 -#define USB_CTRL_SETUP_SOFT_SHUTDOWN_MASK 0x00000200 -#define USB_CTRL_SETUP_IPP_MASK 0x00000020 -#define USB_CTRL_SETUP_IOC_MASK 0x00000010 +#define USB_CTRL_SETUP_IOC_MASK BIT(4) +#define USB_CTRL_SETUP_IPP_MASK BIT(5) +#define USB_CTRL_SETUP_SOFT_SHUTDOWN_MASK BIT(9) +#define USB_CTRL_SETUP_SCB1_EN_MASK BIT(14) +#define USB_CTRL_SETUP_SCB2_EN_MASK BIT(15) +#define USB_CTRL_SETUP_tca_drv_sel_MASK BIT(24) +#define USB_CTRL_SETUP_STRAP_IPP_SEL_MASK BIT(25) #define USB_CTRL_USB_PM 0x04 -#define USB_CTRL_USB_PM_USB_PWRDN_MASK 0x80000000 -#define USB_CTRL_USB_PM_SOFT_RESET_MASK 0x40000000 -#define USB_CTRL_USB_PM_BDC_SOFT_RESETB_MASK 0x00800000 -#define USB_CTRL_USB_PM_XHC_SOFT_RESETB_MASK 0x00400000 -#define USB_CTRL_USB_PM_XHC_PME_EN_MASK 0x00000010 -#define USB_CTRL_USB_PM_XHC_S2_CLK_SWITCH_EN_MASK 0x00000008 +#define USB_CTRL_USB_PM_XHC_S2_CLK_SWITCH_EN_MASK BIT(3) +#define USB_CTRL_USB_PM_XHC_PME_EN_MASK BIT(4) +#define USB_CTRL_USB_PM_XHC_SOFT_RESETB_MASK BIT(22) +#define USB_CTRL_USB_PM_BDC_SOFT_RESETB_MASK BIT(23) +#define USB_CTRL_USB_PM_SOFT_RESET_MASK BIT(30) +#define USB_CTRL_USB_PM_USB_PWRDN_MASK BIT(31) #define USB_CTRL_USB_PM_STATUS 0x08 #define USB_CTRL_USB_DEVICE_CTL1 0x10 -#define USB_CTRL_USB_DEVICE_CTL1_PORT_MODE_MASK 0x00000003 +#define USB_CTRL_USB_DEVICE_CTL1_PORT_MODE_MASK GENMASK(1, 0) #define USB_CTRL_TEST_PORT_CTL 0x30 -#define USB_CTRL_TEST_PORT_CTL_TPOUT_SEL_MASK 0x000000ff +#define USB_CTRL_TEST_PORT_CTL_TPOUT_SEL_MASK GENMASK(7, 0) #define USB_CTRL_TEST_PORT_CTL_TPOUT_SEL_PME_GEN_MASK 0x0000002e #define USB_CTRL_TP_DIAG1 0x34 -#define USB_CTLR_TP_DIAG1_wake_MASK 0x00000002 +#define USB_CTLR_TP_DIAG1_wake_MASK BIT(1) #define USB_CTRL_CTLR_CSHCR 0x50 -#define USB_CTRL_CTLR_CSHCR_ctl_pme_en_MASK 0x00040000 +#define USB_CTRL_CTLR_CSHCR_ctl_pme_en_MASK BIT(18) /* Register definitions for the USB_PHY block in 7211b0 */ #define USB_PHY_PLL_CTL 0x00 -#define USB_PHY_PLL_CTL_PLL_RESETB_MASK 0x40000000 +#define USB_PHY_PLL_CTL_PLL_SUSPEND_MASK BIT(27) +#define USB_PHY_PLL_CTL_PLL_RESETB_MASK BIT(30) #define USB_PHY_PLL_LDO_CTL 0x08 -#define USB_PHY_PLL_LDO_CTL_AFE_CORERDY_MASK 0x00000004 -#define USB_PHY_PLL_LDO_CTL_AFE_LDO_PWRDWNB_MASK 0x00000002 -#define USB_PHY_PLL_LDO_CTL_AFE_BG_PWRDWNB_MASK 0x00000001 +#define USB_PHY_PLL_LDO_CTL_AFE_BG_PWRDWNB_MASK BIT(0) +#define USB_PHY_PLL_LDO_CTL_AFE_LDO_PWRDWNB_MASK BIT(1) +#define USB_PHY_PLL_LDO_CTL_AFE_CORERDY_MASK BIT(2) #define USB_PHY_UTMI_CTL_1 0x04 -#define USB_PHY_UTMI_CTL_1_POWER_UP_FSM_EN_MASK 0x00000800 -#define USB_PHY_UTMI_CTL_1_PHY_MODE_MASK 0x0000000c +#define USB_PHY_UTMI_CTL_1_PHY_MODE_MASK GENMASK(3, 2) #define USB_PHY_UTMI_CTL_1_PHY_MODE_SHIFT 2 +#define USB_PHY_UTMI_CTL_1_POWER_UP_FSM_EN_MASK BIT(11) #define USB_PHY_IDDQ 0x1c -#define USB_PHY_IDDQ_phy_iddq_MASK 0x00000001 +#define USB_PHY_IDDQ_phy_iddq_MASK BIT(0) #define USB_PHY_STATUS 0x20 -#define USB_PHY_STATUS_pll_lock_MASK 0x00000001 +#define USB_PHY_STATUS_pll_lock_MASK BIT(0) /* Register definitions for the MDIO registers in the DWC2 block of * the 7211b0. @@ -86,7 +87,7 @@ /* Register definitions for the BDC EC block in 7211b0 */ #define BDC_EC_AXIRDA 0x0c -#define BDC_EC_AXIRDA_RTS_MASK 0xf0000000 +#define BDC_EC_AXIRDA_RTS_MASK GENMASK(31, 28) #define BDC_EC_AXIRDA_RTS_SHIFT 28 @@ -195,10 +196,10 @@ static void usb_init_common(struct brcm_usb_init_params *params) if (USB_CTRL_MASK(USB_DEVICE_CTL1, PORT_MODE)) { reg = brcm_usb_readl(USB_CTRL_REG(ctrl, USB_DEVICE_CTL1)); reg &= ~USB_CTRL_MASK(USB_DEVICE_CTL1, PORT_MODE); - reg |= params->mode; + reg |= params->port_mode; brcm_usb_writel(reg, USB_CTRL_REG(ctrl, USB_DEVICE_CTL1)); } - switch (params->mode) { + switch (params->supported_port_modes) { case USB_CTLR_MODE_HOST: USB_CTRL_UNSET(ctrl, USB_PM, BDC_SOFT_RESETB); break; @@ -259,6 +260,11 @@ static void usb_init_common_7211b0(struct brcm_usb_init_params *params) brcm_usb_writel(reg, usb_phy + USB_PHY_UTMI_CTL_1); } + /* Disable PLL auto suspend */ + reg = brcm_usb_readl(usb_phy + USB_PHY_PLL_CTL); + reg |= USB_PHY_PLL_CTL_PLL_SUSPEND_MASK; + brcm_usb_writel(reg, usb_phy + USB_PHY_PLL_CTL); + /* Init the PHY */ reg = USB_PHY_PLL_LDO_CTL_AFE_CORERDY_MASK | USB_PHY_PLL_LDO_CTL_AFE_LDO_PWRDWNB_MASK | @@ -276,7 +282,7 @@ static void usb_init_common_7211b0(struct brcm_usb_init_params *params) /* Set the PHY_MODE */ reg = brcm_usb_readl(usb_phy + USB_PHY_UTMI_CTL_1); reg &= ~USB_PHY_UTMI_CTL_1_PHY_MODE_MASK; - reg |= params->mode << USB_PHY_UTMI_CTL_1_PHY_MODE_SHIFT; + reg |= params->supported_port_modes << USB_PHY_UTMI_CTL_1_PHY_MODE_SHIFT; brcm_usb_writel(reg, usb_phy + USB_PHY_UTMI_CTL_1); usb_init_common(params); @@ -286,7 +292,7 @@ static void usb_init_common_7211b0(struct brcm_usb_init_params *params) * the default "Read Transaction Size" of 6 (1024 bytes). * Set it to 4 (256 bytes). */ - if ((params->mode != USB_CTLR_MODE_HOST) && bdc_ec) { + if ((params->supported_port_modes != USB_CTLR_MODE_HOST) && bdc_ec) { reg = brcm_usb_readl(bdc_ec + BDC_EC_AXIRDA); reg &= ~BDC_EC_AXIRDA_RTS_MASK; reg |= (0x4 << BDC_EC_AXIRDA_RTS_SHIFT); @@ -331,13 +337,12 @@ static void usb_uninit_common_7216(struct brcm_usb_init_params *params) pr_debug("%s\n", __func__); - if (!params->wake_enabled) { - USB_CTRL_SET(ctrl, USB_PM, USB_PWRDN); - + if (params->wake_enabled) { /* Switch to using slower clock during suspend to save power */ USB_CTRL_SET(ctrl, USB_PM, XHC_S2_CLK_SWITCH_EN); - } else { usb_wake_enable_7216(params, true); + } else { + USB_CTRL_SET(ctrl, USB_PM, USB_PWRDN); } } @@ -385,7 +390,7 @@ static int usb_get_dual_select(struct brcm_usb_init_params *params) return reg; } -static void usb_set_dual_select(struct brcm_usb_init_params *params, int mode) +static void usb_set_dual_select(struct brcm_usb_init_params *params) { void __iomem *ctrl = params->regs[BRCM_REGS_CTRL]; u32 reg; @@ -394,7 +399,7 @@ static void usb_set_dual_select(struct brcm_usb_init_params *params, int mode) reg = brcm_usb_readl(USB_CTRL_REG(ctrl, USB_DEVICE_CTL1)); reg &= ~USB_CTRL_MASK(USB_DEVICE_CTL1, PORT_MODE); - reg |= mode; + reg |= params->port_mode; brcm_usb_writel(reg, USB_CTRL_REG(ctrl, USB_DEVICE_CTL1)); } @@ -425,7 +430,6 @@ void brcm_usb_dvr_init_7216(struct brcm_usb_init_params *params) params->family_name = "7216"; params->ops = &bcm7216_ops; - params->suspend_with_clocks = true; } void brcm_usb_dvr_init_7211b0(struct brcm_usb_init_params *params) @@ -435,5 +439,4 @@ void brcm_usb_dvr_init_7211b0(struct brcm_usb_init_params *params) params->family_name = "7211"; params->ops = &bcm7211b0_ops; - params->suspend_with_clocks = true; } diff --git a/drivers/phy/broadcom/phy-brcm-usb-init.c b/drivers/phy/broadcom/phy-brcm-usb-init.c index dddcbd3cd5f3da10ed2f6cdf89bf51d53d36da4f..a1ca83308f98d365c12eb29698b0cea4589d85d1 100644 --- a/drivers/phy/broadcom/phy-brcm-usb-init.c +++ b/drivers/phy/broadcom/phy-brcm-usb-init.c @@ -21,57 +21,57 @@ /* Register definitions for the USB CTRL block */ #define USB_CTRL_SETUP 0x00 -#define USB_CTRL_SETUP_IOC_MASK 0x00000010 -#define USB_CTRL_SETUP_IPP_MASK 0x00000020 -#define USB_CTRL_SETUP_BABO_MASK 0x00000001 -#define USB_CTRL_SETUP_FNHW_MASK 0x00000002 -#define USB_CTRL_SETUP_FNBO_MASK 0x00000004 -#define USB_CTRL_SETUP_WABO_MASK 0x00000008 -#define USB_CTRL_SETUP_SCB_CLIENT_SWAP_MASK 0x00002000 /* option */ -#define USB_CTRL_SETUP_SCB1_EN_MASK 0x00004000 /* option */ -#define USB_CTRL_SETUP_SCB2_EN_MASK 0x00008000 /* option */ -#define USB_CTRL_SETUP_SS_EHCI64BIT_EN_MASK 0X00020000 /* option */ -#define USB_CTRL_SETUP_SS_EHCI64BIT_EN_VAR_MASK 0x00010000 /* option */ -#define USB_CTRL_SETUP_STRAP_IPP_SEL_MASK 0x02000000 /* option */ -#define USB_CTRL_SETUP_CC_DRD_MODE_ENABLE_MASK 0x04000000 /* option */ -#define USB_CTRL_SETUP_STRAP_CC_DRD_MODE_ENABLE_SEL_MASK 0x08000000 /* opt */ -#define USB_CTRL_SETUP_OC3_DISABLE_MASK 0xc0000000 /* option */ +#define USB_CTRL_SETUP_BABO_MASK BIT(0) +#define USB_CTRL_SETUP_FNHW_MASK BIT(1) +#define USB_CTRL_SETUP_FNBO_MASK BIT(2) +#define USB_CTRL_SETUP_WABO_MASK BIT(3) +#define USB_CTRL_SETUP_IOC_MASK BIT(4) +#define USB_CTRL_SETUP_IPP_MASK BIT(5) +#define USB_CTRL_SETUP_SCB_CLIENT_SWAP_MASK BIT(13) /* option */ +#define USB_CTRL_SETUP_SCB1_EN_MASK BIT(14) /* option */ +#define USB_CTRL_SETUP_SCB2_EN_MASK BIT(15) /* option */ +#define USB_CTRL_SETUP_SS_EHCI64BIT_EN_MASK BIT(17) /* option */ +#define USB_CTRL_SETUP_SS_EHCI64BIT_EN_VAR_MASK BIT(16) /* option */ +#define USB_CTRL_SETUP_STRAP_IPP_SEL_MASK BIT(25) /* option */ +#define USB_CTRL_SETUP_CC_DRD_MODE_ENABLE_MASK BIT(26) /* option */ +#define USB_CTRL_SETUP_STRAP_CC_DRD_MODE_ENABLE_SEL_MASK BIT(27) /* opt */ +#define USB_CTRL_SETUP_OC3_DISABLE_MASK GENMASK(31, 30) /* option */ #define USB_CTRL_PLL_CTL 0x04 -#define USB_CTRL_PLL_CTL_PLL_SUSPEND_EN_MASK 0x08000000 -#define USB_CTRL_PLL_CTL_PLL_RESETB_MASK 0x40000000 -#define USB_CTRL_PLL_CTL_PLL_IDDQ_PWRDN_MASK 0x80000000 /* option */ +#define USB_CTRL_PLL_CTL_PLL_SUSPEND_EN_MASK BIT(27) +#define USB_CTRL_PLL_CTL_PLL_RESETB_MASK BIT(30) +#define USB_CTRL_PLL_CTL_PLL_IDDQ_PWRDN_MASK BIT(31) /* option */ #define USB_CTRL_EBRIDGE 0x0c -#define USB_CTRL_EBRIDGE_ESTOP_SCB_REQ_MASK 0x00020000 /* option */ -#define USB_CTRL_EBRIDGE_EBR_SCB_SIZE_MASK 0x00000f80 /* option */ +#define USB_CTRL_EBRIDGE_EBR_SCB_SIZE_MASK GENMASK(11, 7) /* option */ +#define USB_CTRL_EBRIDGE_ESTOP_SCB_REQ_MASK BIT(17) /* option */ #define USB_CTRL_OBRIDGE 0x10 -#define USB_CTRL_OBRIDGE_LS_KEEP_ALIVE_MASK 0x08000000 +#define USB_CTRL_OBRIDGE_LS_KEEP_ALIVE_MASK BIT(27) #define USB_CTRL_MDIO 0x14 #define USB_CTRL_MDIO2 0x18 #define USB_CTRL_UTMI_CTL_1 0x2c -#define USB_CTRL_UTMI_CTL_1_POWER_UP_FSM_EN_MASK 0x00000800 -#define USB_CTRL_UTMI_CTL_1_POWER_UP_FSM_EN_P1_MASK 0x08000000 +#define USB_CTRL_UTMI_CTL_1_POWER_UP_FSM_EN_MASK BIT(11) +#define USB_CTRL_UTMI_CTL_1_POWER_UP_FSM_EN_P1_MASK BIT(27) #define USB_CTRL_USB_PM 0x34 -#define USB_CTRL_USB_PM_BDC_SOFT_RESETB_MASK 0x00800000 /* option */ -#define USB_CTRL_USB_PM_XHC_SOFT_RESETB_MASK 0x00400000 /* option */ -#define USB_CTRL_USB_PM_XHC_SOFT_RESETB_VAR_MASK 0x40000000 /* option */ -#define USB_CTRL_USB_PM_USB_PWRDN_MASK 0x80000000 /* option */ -#define USB_CTRL_USB_PM_SOFT_RESET_MASK 0x40000000 /* option */ -#define USB_CTRL_USB_PM_USB20_HC_RESETB_MASK 0x30000000 /* option */ -#define USB_CTRL_USB_PM_USB20_HC_RESETB_VAR_MASK 0x00300000 /* option */ -#define USB_CTRL_USB_PM_RMTWKUP_EN_MASK 0x00000001 +#define USB_CTRL_USB_PM_RMTWKUP_EN_MASK BIT(0) +#define USB_CTRL_USB_PM_USB20_HC_RESETB_VAR_MASK GENMASK(21, 20) /* option */ +#define USB_CTRL_USB_PM_XHC_SOFT_RESETB_MASK BIT(22) /* option */ +#define USB_CTRL_USB_PM_BDC_SOFT_RESETB_MASK BIT(23) /* option */ +#define USB_CTRL_USB_PM_USB20_HC_RESETB_MASK GENMASK(29, 28) /* option */ +#define USB_CTRL_USB_PM_XHC_SOFT_RESETB_VAR_MASK BIT(30) /* option */ +#define USB_CTRL_USB_PM_SOFT_RESET_MASK BIT(30) /* option */ +#define USB_CTRL_USB_PM_USB_PWRDN_MASK BIT(31) /* option */ #define USB_CTRL_USB_PM_STATUS 0x38 #define USB_CTRL_USB30_CTL1 0x60 -#define USB_CTRL_USB30_CTL1_PHY3_PLL_SEQ_START_MASK 0x00000010 -#define USB_CTRL_USB30_CTL1_PHY3_RESETB_MASK 0x00010000 -#define USB_CTRL_USB30_CTL1_XHC_SOFT_RESETB_MASK 0x00020000 /* option */ -#define USB_CTRL_USB30_CTL1_USB3_IOC_MASK 0x10000000 /* option */ -#define USB_CTRL_USB30_CTL1_USB3_IPP_MASK 0x20000000 /* option */ +#define USB_CTRL_USB30_CTL1_PHY3_PLL_SEQ_START_MASK BIT(4) +#define USB_CTRL_USB30_CTL1_PHY3_RESETB_MASK BIT(16) +#define USB_CTRL_USB30_CTL1_XHC_SOFT_RESETB_MASK BIT(17) /* option */ +#define USB_CTRL_USB30_CTL1_USB3_IOC_MASK BIT(28) /* option */ +#define USB_CTRL_USB30_CTL1_USB3_IPP_MASK BIT(29) /* option */ #define USB_CTRL_USB30_PCTL 0x70 -#define USB_CTRL_USB30_PCTL_PHY3_SOFT_RESETB_MASK 0x00000002 -#define USB_CTRL_USB30_PCTL_PHY3_IDDQ_OVERRIDE_MASK 0x00008000 -#define USB_CTRL_USB30_PCTL_PHY3_SOFT_RESETB_P1_MASK 0x00020000 +#define USB_CTRL_USB30_PCTL_PHY3_SOFT_RESETB_MASK BIT(1) +#define USB_CTRL_USB30_PCTL_PHY3_IDDQ_OVERRIDE_MASK BIT(15) +#define USB_CTRL_USB30_PCTL_PHY3_SOFT_RESETB_P1_MASK BIT(17) #define USB_CTRL_USB_DEVICE_CTL1 0x90 -#define USB_CTRL_USB_DEVICE_CTL1_PORT_MODE_MASK 0x00000003 /* option */ +#define USB_CTRL_USB_DEVICE_CTL1_PORT_MODE_MASK GENMASK(1, 0) /* option */ /* Register definitions for the XHCI EC block */ #define USB_XHCI_EC_IRAADR 0x658 @@ -876,11 +876,11 @@ static void usb_init_common(struct brcm_usb_init_params *params) reg = brcm_usb_readl(USB_CTRL_REG(ctrl, USB_DEVICE_CTL1)); reg &= ~USB_CTRL_MASK_FAMILY(params, USB_DEVICE_CTL1, PORT_MODE); - reg |= params->mode; + reg |= params->port_mode; brcm_usb_writel(reg, USB_CTRL_REG(ctrl, USB_DEVICE_CTL1)); } if (USB_CTRL_MASK_FAMILY(params, USB_PM, BDC_SOFT_RESETB)) { - switch (params->mode) { + switch (params->supported_port_modes) { case USB_CTLR_MODE_HOST: USB_CTRL_UNSET_FAMILY(params, USB_PM, BDC_SOFT_RESETB); break; @@ -891,7 +891,7 @@ static void usb_init_common(struct brcm_usb_init_params *params) } } if (USB_CTRL_MASK_FAMILY(params, SETUP, CC_DRD_MODE_ENABLE)) { - if (params->mode == USB_CTLR_MODE_TYPEC_PD) + if (params->supported_port_modes == USB_CTLR_MODE_TYPEC_PD) USB_CTRL_SET_FAMILY(params, SETUP, CC_DRD_MODE_ENABLE); else USB_CTRL_UNSET_FAMILY(params, SETUP, @@ -1000,7 +1000,7 @@ static int usb_get_dual_select(struct brcm_usb_init_params *params) return reg; } -static void usb_set_dual_select(struct brcm_usb_init_params *params, int mode) +static void usb_set_dual_select(struct brcm_usb_init_params *params) { void __iomem *ctrl = params->regs[BRCM_REGS_CTRL]; u32 reg; @@ -1011,7 +1011,7 @@ static void usb_set_dual_select(struct brcm_usb_init_params *params, int mode) reg = brcm_usb_readl(USB_CTRL_REG(ctrl, USB_DEVICE_CTL1)); reg &= ~USB_CTRL_MASK_FAMILY(params, USB_DEVICE_CTL1, PORT_MODE); - reg |= mode; + reg |= params->port_mode; brcm_usb_writel(reg, USB_CTRL_REG(ctrl, USB_DEVICE_CTL1)); } } diff --git a/drivers/phy/broadcom/phy-brcm-usb-init.h b/drivers/phy/broadcom/phy-brcm-usb-init.h index 1ccb5ddab865c362571fc70c6048e8a9b6109eee..f9fbf8fb80e54b236253fcfcd6440c0d1eeb14d3 100644 --- a/drivers/phy/broadcom/phy-brcm-usb-init.h +++ b/drivers/phy/broadcom/phy-brcm-usb-init.h @@ -45,14 +45,15 @@ struct brcm_usb_init_ops { void (*uninit_eohci)(struct brcm_usb_init_params *params); void (*uninit_xhci)(struct brcm_usb_init_params *params); int (*get_dual_select)(struct brcm_usb_init_params *params); - void (*set_dual_select)(struct brcm_usb_init_params *params, int mode); + void (*set_dual_select)(struct brcm_usb_init_params *params); }; struct brcm_usb_init_params { void __iomem *regs[BRCM_REGS_MAX]; int ioc; int ipp; - int mode; + int supported_port_modes; + int port_mode; u32 family_id; u32 product_id; int selected_family; @@ -61,7 +62,6 @@ struct brcm_usb_init_params { const struct brcm_usb_init_ops *ops; struct regmap *syscon_piarbctl; bool wake_enabled; - bool suspend_with_clocks; }; void brcm_usb_dvr_init_4908(struct brcm_usb_init_params *params); @@ -153,11 +153,10 @@ static inline int brcm_usb_get_dual_select(struct brcm_usb_init_params *ini) return 0; } -static inline void brcm_usb_set_dual_select(struct brcm_usb_init_params *ini, - int mode) +static inline void brcm_usb_set_dual_select(struct brcm_usb_init_params *ini) { if (ini->ops->set_dual_select) - ini->ops->set_dual_select(ini, mode); + ini->ops->set_dual_select(ini); } #endif /* _USB_BRCM_COMMON_INIT_H */ diff --git a/drivers/phy/broadcom/phy-brcm-usb.c b/drivers/phy/broadcom/phy-brcm-usb.c index 2cb3779fcdf8246cadfae38be26114c5b29798b2..4de39999f43d39e4d2323e67cbab885dc50e78e4 100644 --- a/drivers/phy/broadcom/phy-brcm-usb.c +++ b/drivers/phy/broadcom/phy-brcm-usb.c @@ -102,9 +102,9 @@ static int brcm_pm_notifier(struct notifier_block *notifier, static irqreturn_t brcm_usb_phy_wake_isr(int irq, void *dev_id) { - struct phy *gphy = dev_id; + struct device *dev = dev_id; - pm_wakeup_event(&gphy->dev, 0); + pm_wakeup_event(dev, 0); return IRQ_HANDLED; } @@ -233,7 +233,7 @@ static ssize_t dr_mode_show(struct device *dev, return sprintf(buf, "%s\n", value_to_name(&brcm_dr_mode_to_name[0], ARRAY_SIZE(brcm_dr_mode_to_name), - priv->ini.mode)); + priv->ini.supported_port_modes)); } static DEVICE_ATTR_RO(dr_mode); @@ -249,7 +249,8 @@ static ssize_t dual_select_store(struct device *dev, res = name_to_value(&brcm_dual_mode_to_name[0], ARRAY_SIZE(brcm_dual_mode_to_name), buf, &value); if (!res) { - brcm_usb_set_dual_select(&priv->ini, value); + priv->ini.port_mode = value; + brcm_usb_set_dual_select(&priv->ini); res = len; } mutex_unlock(&sysfs_lock); @@ -445,13 +446,13 @@ static int brcm_usb_phy_dvr_init(struct platform_device *pdev, priv->suspend_clk = NULL; } - priv->wake_irq = platform_get_irq_byname(pdev, "wake"); + priv->wake_irq = platform_get_irq_byname_optional(pdev, "wake"); if (priv->wake_irq < 0) - priv->wake_irq = platform_get_irq_byname(pdev, "wakeup"); + priv->wake_irq = platform_get_irq_byname_optional(pdev, "wakeup"); if (priv->wake_irq >= 0) { err = devm_request_irq(dev, priv->wake_irq, brcm_usb_phy_wake_isr, 0, - dev_name(dev), gphy); + dev_name(dev), dev); if (err < 0) return err; device_set_wakeup_capable(dev, 1); @@ -495,13 +496,16 @@ static int brcm_usb_phy_probe(struct platform_device *pdev) of_property_read_u32(dn, "brcm,ipp", &priv->ini.ipp); of_property_read_u32(dn, "brcm,ioc", &priv->ini.ioc); - priv->ini.mode = USB_CTLR_MODE_HOST; + priv->ini.supported_port_modes = USB_CTLR_MODE_HOST; err = of_property_read_string(dn, "dr_mode", &mode); if (err == 0) { name_to_value(&brcm_dr_mode_to_name[0], ARRAY_SIZE(brcm_dr_mode_to_name), - mode, &priv->ini.mode); + mode, &priv->ini.supported_port_modes); } + /* Default port_mode to supported port_modes */ + priv->ini.port_mode = priv->ini.supported_port_modes; + if (of_property_read_bool(dn, "brcm,has-xhci")) priv->has_xhci = true; if (of_property_read_bool(dn, "brcm,has-eohci")) @@ -539,7 +543,7 @@ static int brcm_usb_phy_probe(struct platform_device *pdev) * Create sysfs entries for mode. * Remove "dual_select" attribute if not in dual mode */ - if (priv->ini.mode != USB_CTLR_MODE_DRD) + if (priv->ini.supported_port_modes != USB_CTLR_MODE_DRD) brcm_usb_phy_attrs[1] = NULL; err = sysfs_create_group(&dev->kobj, &brcm_usb_phy_group); if (err) @@ -598,7 +602,7 @@ static int brcm_usb_phy_suspend(struct device *dev) * and newer XHCI->2.0-clks/3.0-clks. */ - if (!priv->ini.suspend_with_clocks) { + if (!priv->ini.wake_enabled) { if (priv->phys[BRCM_USB_PHY_3_0].inited) clk_disable_unprepare(priv->usb_30_clk); if (priv->phys[BRCM_USB_PHY_2_0].inited || @@ -615,8 +619,10 @@ static int brcm_usb_phy_resume(struct device *dev) { struct brcm_usb_phy_data *priv = dev_get_drvdata(dev); - clk_prepare_enable(priv->usb_20_clk); - clk_prepare_enable(priv->usb_30_clk); + if (!priv->ini.wake_enabled) { + clk_prepare_enable(priv->usb_20_clk); + clk_prepare_enable(priv->usb_30_clk); + } brcm_usb_init_ipp(&priv->ini); /* diff --git a/drivers/phy/freescale/phy-fsl-imx8m-pcie.c b/drivers/phy/freescale/phy-fsl-imx8m-pcie.c index c93286483b4259178a5d46ef0d73f708af564825..7585e8080b77d58247aa94f5020f2aa98bcc9c30 100644 --- a/drivers/phy/freescale/phy-fsl-imx8m-pcie.c +++ b/drivers/phy/freescale/phy-fsl-imx8m-pcie.c @@ -11,6 +11,7 @@ #include #include #include +#include #include #include #include @@ -31,12 +32,10 @@ #define IMX8MM_PCIE_PHY_CMN_REG065 0x194 #define ANA_AUX_RX_TERM (BIT(7) | BIT(4)) #define ANA_AUX_TX_LVL GENMASK(3, 0) -#define IMX8MM_PCIE_PHY_CMN_REG75 0x1D4 -#define PCIE_PHY_CMN_REG75_PLL_DONE 0x3 +#define IMX8MM_PCIE_PHY_CMN_REG075 0x1D4 +#define ANA_PLL_DONE 0x3 #define PCIE_PHY_TRSV_REG5 0x414 -#define PCIE_PHY_TRSV_REG5_GEN1_DEEMP 0x2D #define PCIE_PHY_TRSV_REG6 0x418 -#define PCIE_PHY_TRSV_REG6_GEN2_DEEMP 0xF #define IMX8MM_GPR_PCIE_REF_CLK_SEL GENMASK(25, 24) #define IMX8MM_GPR_PCIE_REF_CLK_PLL FIELD_PREP(IMX8MM_GPR_PCIE_REF_CLK_SEL, 0x3) @@ -47,16 +46,28 @@ #define IMX8MM_GPR_PCIE_SSC_EN BIT(16) #define IMX8MM_GPR_PCIE_AUX_EN_OVERRIDE BIT(9) +enum imx8_pcie_phy_type { + IMX8MM, + IMX8MP, +}; + +struct imx8_pcie_phy_drvdata { + const char *gpr; + enum imx8_pcie_phy_type variant; +}; + struct imx8_pcie_phy { void __iomem *base; struct clk *clk; struct phy *phy; struct regmap *iomuxc_gpr; + struct reset_control *perst; struct reset_control *reset; u32 refclk_pad_mode; u32 tx_deemph_gen1; u32 tx_deemph_gen2; bool clkreq_unused; + const struct imx8_pcie_phy_drvdata *drvdata; }; static int imx8_pcie_phy_power_on(struct phy *phy) @@ -65,34 +76,22 @@ static int imx8_pcie_phy_power_on(struct phy *phy) u32 val, pad_mode; struct imx8_pcie_phy *imx8_phy = phy_get_drvdata(phy); - reset_control_assert(imx8_phy->reset); - pad_mode = imx8_phy->refclk_pad_mode; - /* Set AUX_EN_OVERRIDE 1'b0, when the CLKREQ# isn't hooked */ - regmap_update_bits(imx8_phy->iomuxc_gpr, IOMUXC_GPR14, - IMX8MM_GPR_PCIE_AUX_EN_OVERRIDE, - imx8_phy->clkreq_unused ? - 0 : IMX8MM_GPR_PCIE_AUX_EN_OVERRIDE); - regmap_update_bits(imx8_phy->iomuxc_gpr, IOMUXC_GPR14, - IMX8MM_GPR_PCIE_AUX_EN, - IMX8MM_GPR_PCIE_AUX_EN); - regmap_update_bits(imx8_phy->iomuxc_gpr, IOMUXC_GPR14, - IMX8MM_GPR_PCIE_POWER_OFF, 0); - regmap_update_bits(imx8_phy->iomuxc_gpr, IOMUXC_GPR14, - IMX8MM_GPR_PCIE_SSC_EN, 0); - - regmap_update_bits(imx8_phy->iomuxc_gpr, IOMUXC_GPR14, - IMX8MM_GPR_PCIE_REF_CLK_SEL, - pad_mode == IMX8_PCIE_REFCLK_PAD_INPUT ? - IMX8MM_GPR_PCIE_REF_CLK_EXT : - IMX8MM_GPR_PCIE_REF_CLK_PLL); - usleep_range(100, 200); - - /* Do the PHY common block reset */ - regmap_update_bits(imx8_phy->iomuxc_gpr, IOMUXC_GPR14, - IMX8MM_GPR_PCIE_CMN_RST, - IMX8MM_GPR_PCIE_CMN_RST); - usleep_range(200, 500); + switch (imx8_phy->drvdata->variant) { + case IMX8MM: + reset_control_assert(imx8_phy->reset); + + /* Tune PHY de-emphasis setting to pass PCIe compliance. */ + if (imx8_phy->tx_deemph_gen1) + writel(imx8_phy->tx_deemph_gen1, + imx8_phy->base + PCIE_PHY_TRSV_REG5); + if (imx8_phy->tx_deemph_gen2) + writel(imx8_phy->tx_deemph_gen2, + imx8_phy->base + PCIE_PHY_TRSV_REG6); + break; + case IMX8MP: /* Do nothing. */ + break; + } if (pad_mode == IMX8_PCIE_REFCLK_PAD_INPUT || pad_mode == IMX8_PCIE_REFCLK_PAD_UNUSED) { @@ -120,20 +119,44 @@ static int imx8_pcie_phy_power_on(struct phy *phy) imx8_phy->base + IMX8MM_PCIE_PHY_CMN_REG065); } - /* Tune PHY de-emphasis setting to pass PCIe compliance. */ - if (imx8_phy->tx_deemph_gen1) - writel(imx8_phy->tx_deemph_gen1, - imx8_phy->base + PCIE_PHY_TRSV_REG5); - if (imx8_phy->tx_deemph_gen2) - writel(imx8_phy->tx_deemph_gen2, - imx8_phy->base + PCIE_PHY_TRSV_REG6); + /* Set AUX_EN_OVERRIDE 1'b0, when the CLKREQ# isn't hooked */ + regmap_update_bits(imx8_phy->iomuxc_gpr, IOMUXC_GPR14, + IMX8MM_GPR_PCIE_AUX_EN_OVERRIDE, + imx8_phy->clkreq_unused ? + 0 : IMX8MM_GPR_PCIE_AUX_EN_OVERRIDE); + regmap_update_bits(imx8_phy->iomuxc_gpr, IOMUXC_GPR14, + IMX8MM_GPR_PCIE_AUX_EN, + IMX8MM_GPR_PCIE_AUX_EN); + regmap_update_bits(imx8_phy->iomuxc_gpr, IOMUXC_GPR14, + IMX8MM_GPR_PCIE_POWER_OFF, 0); + regmap_update_bits(imx8_phy->iomuxc_gpr, IOMUXC_GPR14, + IMX8MM_GPR_PCIE_SSC_EN, 0); + + regmap_update_bits(imx8_phy->iomuxc_gpr, IOMUXC_GPR14, + IMX8MM_GPR_PCIE_REF_CLK_SEL, + pad_mode == IMX8_PCIE_REFCLK_PAD_INPUT ? + IMX8MM_GPR_PCIE_REF_CLK_EXT : + IMX8MM_GPR_PCIE_REF_CLK_PLL); + usleep_range(100, 200); + + /* Do the PHY common block reset */ + regmap_update_bits(imx8_phy->iomuxc_gpr, IOMUXC_GPR14, + IMX8MM_GPR_PCIE_CMN_RST, + IMX8MM_GPR_PCIE_CMN_RST); - reset_control_deassert(imx8_phy->reset); + switch (imx8_phy->drvdata->variant) { + case IMX8MP: + reset_control_deassert(imx8_phy->perst); + fallthrough; + case IMX8MM: + reset_control_deassert(imx8_phy->reset); + usleep_range(200, 500); + break; + } /* Polling to check the phy is ready or not. */ - ret = readl_poll_timeout(imx8_phy->base + IMX8MM_PCIE_PHY_CMN_REG75, - val, val == PCIE_PHY_CMN_REG75_PLL_DONE, - 10, 20000); + ret = readl_poll_timeout(imx8_phy->base + IMX8MM_PCIE_PHY_CMN_REG075, + val, val == ANA_PLL_DONE, 10, 20000); return ret; } @@ -160,6 +183,23 @@ static const struct phy_ops imx8_pcie_phy_ops = { .owner = THIS_MODULE, }; +static const struct imx8_pcie_phy_drvdata imx8mm_drvdata = { + .gpr = "fsl,imx8mm-iomuxc-gpr", + .variant = IMX8MM, +}; + +static const struct imx8_pcie_phy_drvdata imx8mp_drvdata = { + .gpr = "fsl,imx8mp-iomuxc-gpr", + .variant = IMX8MP, +}; + +static const struct of_device_id imx8_pcie_phy_of_match[] = { + {.compatible = "fsl,imx8mm-pcie-phy", .data = &imx8mm_drvdata, }, + {.compatible = "fsl,imx8mp-pcie-phy", .data = &imx8mp_drvdata, }, + { }, +}; +MODULE_DEVICE_TABLE(of, imx8_pcie_phy_of_match); + static int imx8_pcie_phy_probe(struct platform_device *pdev) { struct phy_provider *phy_provider; @@ -172,6 +212,8 @@ static int imx8_pcie_phy_probe(struct platform_device *pdev) if (!imx8_phy) return -ENOMEM; + imx8_phy->drvdata = of_device_get_match_data(dev); + /* get PHY refclk pad mode */ of_property_read_u32(np, "fsl,refclk-pad-mode", &imx8_phy->refclk_pad_mode); @@ -197,7 +239,7 @@ static int imx8_pcie_phy_probe(struct platform_device *pdev) /* Grab GPR config register range */ imx8_phy->iomuxc_gpr = - syscon_regmap_lookup_by_compatible("fsl,imx6q-iomuxc-gpr"); + syscon_regmap_lookup_by_compatible(imx8_phy->drvdata->gpr); if (IS_ERR(imx8_phy->iomuxc_gpr)) { dev_err(dev, "unable to find iomuxc registers\n"); return PTR_ERR(imx8_phy->iomuxc_gpr); @@ -209,6 +251,14 @@ static int imx8_pcie_phy_probe(struct platform_device *pdev) return PTR_ERR(imx8_phy->reset); } + if (imx8_phy->drvdata->variant == IMX8MP) { + imx8_phy->perst = + devm_reset_control_get_exclusive(dev, "perst"); + if (IS_ERR(imx8_phy->perst)) + dev_err_probe(dev, PTR_ERR(imx8_phy->perst), + "Failed to get PCIE PHY PERST control\n"); + } + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); imx8_phy->base = devm_ioremap_resource(dev, res); if (IS_ERR(imx8_phy->base)) @@ -225,12 +275,6 @@ static int imx8_pcie_phy_probe(struct platform_device *pdev) return PTR_ERR_OR_ZERO(phy_provider); } -static const struct of_device_id imx8_pcie_phy_of_match[] = { - {.compatible = "fsl,imx8mm-pcie-phy",}, - { }, -}; -MODULE_DEVICE_TABLE(of, imx8_pcie_phy_of_match); - static struct platform_driver imx8_pcie_phy_driver = { .probe = imx8_pcie_phy_probe, .driver = { diff --git a/drivers/phy/marvell/phy-mmp3-hsic.c b/drivers/phy/marvell/phy-mmp3-hsic.c index 7cccf01848d8d5657c8ae809348b1a0859f99473..f2537fdcc3ab4dfebf62e830e58cc4f0094ea0b8 100644 --- a/drivers/phy/marvell/phy-mmp3-hsic.c +++ b/drivers/phy/marvell/phy-mmp3-hsic.c @@ -41,12 +41,10 @@ static int mmp3_hsic_phy_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct phy_provider *provider; - struct resource *resource; void __iomem *base; struct phy *phy; - resource = platform_get_resource(pdev, IORESOURCE_MEM, 0); - base = devm_ioremap_resource(dev, resource); + base = devm_platform_get_and_ioremap_resource(pdev, 0, NULL); if (IS_ERR(base)) return PTR_ERR(base); diff --git a/drivers/phy/marvell/phy-mvebu-a3700-comphy.c b/drivers/phy/marvell/phy-mvebu-a3700-comphy.c index 67712c77d806f2658e87ebd80fbe701c2463989a..d641b345afa35cf8392fb335c8214749a4efbf07 100644 --- a/drivers/phy/marvell/phy-mvebu-a3700-comphy.c +++ b/drivers/phy/marvell/phy-mvebu-a3700-comphy.c @@ -826,6 +826,9 @@ mvebu_a3700_comphy_usb3_power_on(struct mvebu_a3700_comphy_lane *lane) if (ret) return ret; + /* COMPHY register reset (cleared automatically) */ + comphy_lane_reg_set(lane, COMPHY_SFT_RESET, SFT_RST, SFT_RST); + /* * 0. Set PHY OTG Control(0x5d034), bit 4, Power up OTG module The * register belong to UTMI module, so it is set in UTMI phy driver. diff --git a/drivers/phy/qualcomm/Kconfig b/drivers/phy/qualcomm/Kconfig index 5c98850f5a360a38c48598f7d6b9093d2888da18..eb9ddc685b3852cd26d1df17889fed94e0d33fba 100644 --- a/drivers/phy/qualcomm/Kconfig +++ b/drivers/phy/qualcomm/Kconfig @@ -54,6 +54,7 @@ config PHY_QCOM_QMP tristate "Qualcomm QMP PHY Driver" depends on OF && COMMON_CLK && (ARCH_QCOM || COMPILE_TEST) select GENERIC_PHY + select MFD_SYSCON help Enable this to support the QMP PHY transceiver that is used with controllers such as PCIe, UFS, and USB on Qualcomm chips. diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-combo.c b/drivers/phy/qualcomm/phy-qcom-qmp-combo.c index ba9d761ec49a75b39f12d183a9a94fc7b16e3834..77052c66cf70bc3f586bec526553b3c5a55c2fef 100644 --- a/drivers/phy/qualcomm/phy-qcom-qmp-combo.c +++ b/drivers/phy/qualcomm/phy-qcom-qmp-combo.c @@ -20,7 +20,7 @@ #include #include -#include +#include #include "phy-qcom-qmp.h" @@ -63,17 +63,10 @@ #define CLAMP_EN BIT(0) /* enables i/o clamp_n */ #define PHY_INIT_COMPLETE_TIMEOUT 10000 -#define POWER_DOWN_DELAY_US_MIN 10 -#define POWER_DOWN_DELAY_US_MAX 11 struct qmp_phy_init_tbl { unsigned int offset; unsigned int val; - /* - * register part of layout ? - * if yes, then offset gives index in the reg-layout - */ - bool in_layout; /* * mask of lanes for which this register is written * for cases when second lane needs different values @@ -88,14 +81,6 @@ struct qmp_phy_init_tbl { .lane_mask = 0xff, \ } -#define QMP_PHY_INIT_CFG_L(o, v) \ - { \ - .offset = o, \ - .val = v, \ - .in_layout = true, \ - .lane_mask = 0xff, \ - } - #define QMP_PHY_INIT_CFG_LANE(o, v, l) \ { \ .offset = o, \ @@ -121,6 +106,7 @@ static const unsigned int qmp_v3_usb3phy_regs_layout[QPHY_LAYOUT_SIZE] = { [QPHY_SW_RESET] = 0x00, [QPHY_START_CTRL] = 0x08, [QPHY_PCS_STATUS] = 0x174, + [QPHY_PCS_POWER_DOWN_CONTROL] = 0x04, [QPHY_PCS_AUTONOMOUS_MODE_CTRL] = 0x0d8, [QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR] = 0x0dc, [QPHY_PCS_LFPS_RXTERM_IRQ_STATUS] = 0x170, @@ -810,13 +796,24 @@ static const u8 qmp_dp_v5_voltage_swing_hbr_rbr[4][4] = { { 0x3f, 0xff, 0xff, 0xff } }; -struct qmp_phy; +struct qmp_combo; + +struct qmp_combo_offsets { + u16 com; + u16 txa; + u16 rxa; + u16 txb; + u16 rxb; + u16 usb3_serdes; + u16 usb3_pcs_misc; + u16 usb3_pcs; + u16 usb3_pcs_usb; + u16 dp_serdes; + u16 dp_dp_phy; +}; -/* struct qmp_phy_cfg - per-PHY initialization config */ struct qmp_phy_cfg { - /* phy-type - PCIE/UFS/USB */ - unsigned int type; - int lanes; + const struct qmp_combo_offsets *offsets; /* Init sequence for PHY blocks - serdes, tx, rx, pcs */ const struct qmp_phy_init_tbl *serdes_tbl; @@ -830,6 +827,11 @@ struct qmp_phy_cfg { const struct qmp_phy_init_tbl *pcs_usb_tbl; int pcs_usb_tbl_num; + const struct qmp_phy_init_tbl *dp_serdes_tbl; + int dp_serdes_tbl_num; + const struct qmp_phy_init_tbl *dp_tx_tbl; + int dp_tx_tbl_num; + /* Init sequence for DP PHY block link rates */ const struct qmp_phy_init_tbl *serdes_tbl_rbr; int serdes_tbl_rbr_num; @@ -847,10 +849,10 @@ struct qmp_phy_cfg { const u8 (*pre_emphasis_hbr3_hbr2)[4][4]; /* DP PHY callbacks */ - int (*configure_dp_phy)(struct qmp_phy *qphy); - void (*configure_dp_tx)(struct qmp_phy *qphy); - int (*calibrate_dp_phy)(struct qmp_phy *qphy); - void (*dp_aux_init)(struct qmp_phy *qphy); + int (*configure_dp_phy)(struct qmp_combo *qmp); + void (*configure_dp_tx)(struct qmp_combo *qmp); + int (*calibrate_dp_phy)(struct qmp_combo *qmp); + void (*dp_aux_init)(struct qmp_combo *qmp); /* clock ids to be requested */ const char * const *clk_list; @@ -865,50 +867,21 @@ struct qmp_phy_cfg { /* array of registers with different offsets */ const unsigned int *regs; - unsigned int start_ctrl; - unsigned int pwrdn_ctrl; - /* bit offset of PHYSTATUS in QPHY_PCS_STATUS register */ - unsigned int phy_status; - /* true, if PHY needs delay after POWER_DOWN */ bool has_pwrdn_delay; - /* power_down delay in usec */ - int pwrdn_delay_min; - int pwrdn_delay_max; /* Offset from PCS to PCS_USB region */ unsigned int pcs_usb_offset; }; -struct qmp_phy_combo_cfg { - const struct qmp_phy_cfg *usb_cfg; - const struct qmp_phy_cfg *dp_cfg; -}; +struct qmp_combo { + struct device *dev; -/** - * struct qmp_phy - per-lane phy descriptor - * - * @phy: generic phy - * @cfg: phy specific configuration - * @serdes: iomapped memory space for phy's serdes (i.e. PLL) - * @tx: iomapped memory space for lane's tx - * @rx: iomapped memory space for lane's rx - * @pcs: iomapped memory space for lane's pcs - * @tx2: iomapped memory space for second lane's tx (in dual lane PHYs) - * @rx2: iomapped memory space for second lane's rx (in dual lane PHYs) - * @pcs_misc: iomapped memory space for lane's pcs_misc - * @pcs_usb: iomapped memory space for lane's pcs_usb - * @pipe_clk: pipe clock - * @qmp: QMP phy to which this lane belongs - * @mode: current PHY mode - * @dp_aux_cfg: Display port aux config - * @dp_opts: Display port optional config - * @dp_clks: Display port clocks - */ -struct qmp_phy { - struct phy *phy; const struct qmp_phy_cfg *cfg; + + void __iomem *com; + void __iomem *serdes; void __iomem *tx; void __iomem *rx; @@ -917,62 +890,43 @@ struct qmp_phy { void __iomem *rx2; void __iomem *pcs_misc; void __iomem *pcs_usb; - struct clk *pipe_clk; - struct qcom_qmp *qmp; - enum phy_mode mode; - unsigned int dp_aux_cfg; - struct phy_configure_opts_dp dp_opts; - struct qmp_phy_dp_clks *dp_clks; -}; -struct qmp_phy_dp_clks { - struct qmp_phy *qphy; - struct clk_hw dp_link_hw; - struct clk_hw dp_pixel_hw; -}; - -/** - * struct qcom_qmp - structure holding QMP phy block attributes - * - * @dev: device - * @dp_com: iomapped memory space for phy's dp_com control block - * - * @clks: array of clocks required by phy - * @resets: array of resets required by phy - * @vregs: regulator supplies bulk data - * - * @phys: array of per-lane phy descriptors - * @phy_mutex: mutex lock for PHY common block initialization - * @init_count: phy common block initialization count - * @ufs_reset: optional UFS PHY reset handle - */ -struct qcom_qmp { - struct device *dev; - void __iomem *dp_com; + void __iomem *dp_serdes; + void __iomem *dp_tx; + void __iomem *dp_tx2; + void __iomem *dp_dp_phy; + struct clk *pipe_clk; struct clk_bulk_data *clks; struct reset_control_bulk_data *resets; struct regulator_bulk_data *vregs; - struct qmp_phy **phys; - struct mutex phy_mutex; int init_count; - struct reset_control *ufs_reset; + struct phy *usb_phy; + enum phy_mode mode; + + struct phy *dp_phy; + unsigned int dp_aux_cfg; + struct phy_configure_opts_dp dp_opts; + + struct clk_fixed_rate pipe_clk_fixed; + struct clk_hw dp_link_hw; + struct clk_hw dp_pixel_hw; }; -static void qcom_qmp_v3_phy_dp_aux_init(struct qmp_phy *qphy); -static void qcom_qmp_v3_phy_configure_dp_tx(struct qmp_phy *qphy); -static int qcom_qmp_v3_phy_configure_dp_phy(struct qmp_phy *qphy); -static int qcom_qmp_v3_dp_phy_calibrate(struct qmp_phy *qphy); +static void qmp_v3_dp_aux_init(struct qmp_combo *qmp); +static void qmp_v3_configure_dp_tx(struct qmp_combo *qmp); +static int qmp_v3_configure_dp_phy(struct qmp_combo *qmp); +static int qmp_v3_calibrate_dp_phy(struct qmp_combo *qmp); -static void qcom_qmp_v4_phy_dp_aux_init(struct qmp_phy *qphy); -static void qcom_qmp_v4_phy_configure_dp_tx(struct qmp_phy *qphy); -static int qcom_qmp_v4_phy_configure_dp_phy(struct qmp_phy *qphy); -static int qcom_qmp_v4_dp_phy_calibrate(struct qmp_phy *qphy); +static void qmp_v4_dp_aux_init(struct qmp_combo *qmp); +static void qmp_v4_configure_dp_tx(struct qmp_combo *qmp); +static int qmp_v4_configure_dp_phy(struct qmp_combo *qmp); +static int qmp_v4_calibrate_dp_phy(struct qmp_combo *qmp); -static int qcom_qmp_v5_phy_configure_dp_phy(struct qmp_phy *qphy); +static int qmp_v5_configure_dp_phy(struct qmp_combo *qmp); static inline void qphy_setbits(void __iomem *base, u32 offset, u32 val) { @@ -1004,7 +958,7 @@ static const char * const qmp_v3_phy_clk_l[] = { }; static const char * const qmp_v4_phy_clk_l[] = { - "aux", "ref_clk_src", "ref", "com_aux", + "aux", "ref", "com_aux", }; /* the primary usb3 phy on sm8250 doesn't have a ref clock */ @@ -1021,10 +975,21 @@ static const char * const sc7180_usb3phy_reset_l[] = { "phy", }; -static const struct qmp_phy_cfg sc7180_usb3phy_cfg = { - .type = PHY_TYPE_USB3, - .lanes = 2, +static const struct qmp_combo_offsets qmp_combo_offsets_v5 = { + .com = 0x0000, + .txa = 0x0400, + .rxa = 0x0600, + .txb = 0x0a00, + .rxb = 0x0c00, + .usb3_serdes = 0x1000, + .usb3_pcs_misc = 0x1200, + .usb3_pcs = 0x1400, + .usb3_pcs_usb = 0x1700, + .dp_serdes = 0x2000, + .dp_dp_phy = 0x2200, +}; +static const struct qmp_phy_cfg sc7180_usb3dpphy_cfg = { .serdes_tbl = qmp_v3_usb3_serdes_tbl, .serdes_tbl_num = ARRAY_SIZE(qmp_v3_usb3_serdes_tbl), .tx_tbl = qmp_v3_usb3_tx_tbl, @@ -1033,31 +998,11 @@ static const struct qmp_phy_cfg sc7180_usb3phy_cfg = { .rx_tbl_num = ARRAY_SIZE(qmp_v3_usb3_rx_tbl), .pcs_tbl = qmp_v3_usb3_pcs_tbl, .pcs_tbl_num = ARRAY_SIZE(qmp_v3_usb3_pcs_tbl), - .clk_list = qmp_v3_phy_clk_l, - .num_clks = ARRAY_SIZE(qmp_v3_phy_clk_l), - .reset_list = sc7180_usb3phy_reset_l, - .num_resets = ARRAY_SIZE(sc7180_usb3phy_reset_l), - .vreg_list = qmp_phy_vreg_l, - .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l), - .regs = qmp_v3_usb3phy_regs_layout, - - .start_ctrl = SERDES_START | PCS_START, - .pwrdn_ctrl = SW_PWRDN, - .phy_status = PHYSTATUS, - - .has_pwrdn_delay = true, - .pwrdn_delay_min = POWER_DOWN_DELAY_US_MIN, - .pwrdn_delay_max = POWER_DOWN_DELAY_US_MAX, -}; -static const struct qmp_phy_cfg sc7180_dpphy_cfg = { - .type = PHY_TYPE_DP, - .lanes = 2, - - .serdes_tbl = qmp_v3_dp_serdes_tbl, - .serdes_tbl_num = ARRAY_SIZE(qmp_v3_dp_serdes_tbl), - .tx_tbl = qmp_v3_dp_tx_tbl, - .tx_tbl_num = ARRAY_SIZE(qmp_v3_dp_tx_tbl), + .dp_serdes_tbl = qmp_v3_dp_serdes_tbl, + .dp_serdes_tbl_num = ARRAY_SIZE(qmp_v3_dp_serdes_tbl), + .dp_tx_tbl = qmp_v3_dp_tx_tbl, + .dp_tx_tbl_num = ARRAY_SIZE(qmp_v3_dp_tx_tbl), .serdes_tbl_rbr = qmp_v3_dp_serdes_tbl_rbr, .serdes_tbl_rbr_num = ARRAY_SIZE(qmp_v3_dp_serdes_tbl_rbr), @@ -1073,6 +1018,11 @@ static const struct qmp_phy_cfg sc7180_dpphy_cfg = { .swing_hbr3_hbr2 = &qmp_dp_v3_voltage_swing_hbr3_hbr2, .pre_emphasis_hbr3_hbr2 = &qmp_dp_v3_pre_emphasis_hbr3_hbr2, + .dp_aux_init = qmp_v3_dp_aux_init, + .configure_dp_tx = qmp_v3_configure_dp_tx, + .configure_dp_phy = qmp_v3_configure_dp_phy, + .calibrate_dp_phy = qmp_v3_calibrate_dp_phy, + .clk_list = qmp_v3_phy_clk_l, .num_clks = ARRAY_SIZE(qmp_v3_phy_clk_l), .reset_list = sc7180_usb3phy_reset_l, @@ -1081,21 +1031,10 @@ static const struct qmp_phy_cfg sc7180_dpphy_cfg = { .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l), .regs = qmp_v3_usb3phy_regs_layout, - .dp_aux_init = qcom_qmp_v3_phy_dp_aux_init, - .configure_dp_tx = qcom_qmp_v3_phy_configure_dp_tx, - .configure_dp_phy = qcom_qmp_v3_phy_configure_dp_phy, - .calibrate_dp_phy = qcom_qmp_v3_dp_phy_calibrate, -}; - -static const struct qmp_phy_combo_cfg sc7180_usb3dpphy_cfg = { - .usb_cfg = &sc7180_usb3phy_cfg, - .dp_cfg = &sc7180_dpphy_cfg, + .has_pwrdn_delay = true, }; -static const struct qmp_phy_cfg sdm845_usb3phy_cfg = { - .type = PHY_TYPE_USB3, - .lanes = 2, - +static const struct qmp_phy_cfg sdm845_usb3dpphy_cfg = { .serdes_tbl = qmp_v3_usb3_serdes_tbl, .serdes_tbl_num = ARRAY_SIZE(qmp_v3_usb3_serdes_tbl), .tx_tbl = qmp_v3_usb3_tx_tbl, @@ -1104,6 +1043,31 @@ static const struct qmp_phy_cfg sdm845_usb3phy_cfg = { .rx_tbl_num = ARRAY_SIZE(qmp_v3_usb3_rx_tbl), .pcs_tbl = qmp_v3_usb3_pcs_tbl, .pcs_tbl_num = ARRAY_SIZE(qmp_v3_usb3_pcs_tbl), + + .dp_serdes_tbl = qmp_v3_dp_serdes_tbl, + .dp_serdes_tbl_num = ARRAY_SIZE(qmp_v3_dp_serdes_tbl), + .dp_tx_tbl = qmp_v3_dp_tx_tbl, + .dp_tx_tbl_num = ARRAY_SIZE(qmp_v3_dp_tx_tbl), + + .serdes_tbl_rbr = qmp_v3_dp_serdes_tbl_rbr, + .serdes_tbl_rbr_num = ARRAY_SIZE(qmp_v3_dp_serdes_tbl_rbr), + .serdes_tbl_hbr = qmp_v3_dp_serdes_tbl_hbr, + .serdes_tbl_hbr_num = ARRAY_SIZE(qmp_v3_dp_serdes_tbl_hbr), + .serdes_tbl_hbr2 = qmp_v3_dp_serdes_tbl_hbr2, + .serdes_tbl_hbr2_num = ARRAY_SIZE(qmp_v3_dp_serdes_tbl_hbr2), + .serdes_tbl_hbr3 = qmp_v3_dp_serdes_tbl_hbr3, + .serdes_tbl_hbr3_num = ARRAY_SIZE(qmp_v3_dp_serdes_tbl_hbr3), + + .swing_hbr_rbr = &qmp_dp_v3_voltage_swing_hbr_rbr, + .pre_emphasis_hbr_rbr = &qmp_dp_v3_pre_emphasis_hbr_rbr, + .swing_hbr3_hbr2 = &qmp_dp_v3_voltage_swing_hbr3_hbr2, + .pre_emphasis_hbr3_hbr2 = &qmp_dp_v3_pre_emphasis_hbr3_hbr2, + + .dp_aux_init = qmp_v3_dp_aux_init, + .configure_dp_tx = qmp_v3_configure_dp_tx, + .configure_dp_phy = qmp_v3_configure_dp_phy, + .calibrate_dp_phy = qmp_v3_calibrate_dp_phy, + .clk_list = qmp_v3_phy_clk_l, .num_clks = ARRAY_SIZE(qmp_v3_phy_clk_l), .reset_list = msm8996_usb3phy_reset_l, @@ -1112,24 +1076,10 @@ static const struct qmp_phy_cfg sdm845_usb3phy_cfg = { .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l), .regs = qmp_v3_usb3phy_regs_layout, - .start_ctrl = SERDES_START | PCS_START, - .pwrdn_ctrl = SW_PWRDN, - .phy_status = PHYSTATUS, - .has_pwrdn_delay = true, - .pwrdn_delay_min = POWER_DOWN_DELAY_US_MIN, - .pwrdn_delay_max = POWER_DOWN_DELAY_US_MAX, }; -static const struct qmp_phy_combo_cfg sdm845_usb3dpphy_cfg = { - .usb_cfg = &sdm845_usb3phy_cfg, - .dp_cfg = &sc7180_dpphy_cfg, -}; - -static const struct qmp_phy_cfg sm8150_usb3phy_cfg = { - .type = PHY_TYPE_USB3, - .lanes = 2, - +static const struct qmp_phy_cfg sc8180x_usb3dpphy_cfg = { .serdes_tbl = sm8150_usb3_serdes_tbl, .serdes_tbl_num = ARRAY_SIZE(sm8150_usb3_serdes_tbl), .tx_tbl = sm8150_usb3_tx_tbl, @@ -1140,33 +1090,11 @@ static const struct qmp_phy_cfg sm8150_usb3phy_cfg = { .pcs_tbl_num = ARRAY_SIZE(sm8150_usb3_pcs_tbl), .pcs_usb_tbl = sm8150_usb3_pcs_usb_tbl, .pcs_usb_tbl_num = ARRAY_SIZE(sm8150_usb3_pcs_usb_tbl), - .clk_list = qmp_v4_phy_clk_l, - .num_clks = ARRAY_SIZE(qmp_v4_phy_clk_l), - .reset_list = msm8996_usb3phy_reset_l, - .num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l), - .vreg_list = qmp_phy_vreg_l, - .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l), - .regs = qmp_v4_usb3phy_regs_layout, - .pcs_usb_offset = 0x300, - - .start_ctrl = SERDES_START | PCS_START, - .pwrdn_ctrl = SW_PWRDN, - .phy_status = PHYSTATUS, - - .has_pwrdn_delay = true, - .pwrdn_delay_min = POWER_DOWN_DELAY_US_MIN, - .pwrdn_delay_max = POWER_DOWN_DELAY_US_MAX, -}; - -static const struct qmp_phy_cfg sc8180x_dpphy_cfg = { - .type = PHY_TYPE_DP, - .lanes = 2, - - .serdes_tbl = qmp_v4_dp_serdes_tbl, - .serdes_tbl_num = ARRAY_SIZE(qmp_v4_dp_serdes_tbl), - .tx_tbl = qmp_v4_dp_tx_tbl, - .tx_tbl_num = ARRAY_SIZE(qmp_v4_dp_tx_tbl), + .dp_serdes_tbl = qmp_v4_dp_serdes_tbl, + .dp_serdes_tbl_num = ARRAY_SIZE(qmp_v4_dp_serdes_tbl), + .dp_tx_tbl = qmp_v4_dp_tx_tbl, + .dp_tx_tbl_num = ARRAY_SIZE(qmp_v4_dp_tx_tbl), .serdes_tbl_rbr = qmp_v4_dp_serdes_tbl_rbr, .serdes_tbl_rbr_num = ARRAY_SIZE(qmp_v4_dp_serdes_tbl_rbr), @@ -1182,28 +1110,25 @@ static const struct qmp_phy_cfg sc8180x_dpphy_cfg = { .swing_hbr3_hbr2 = &qmp_dp_v3_voltage_swing_hbr3_hbr2, .pre_emphasis_hbr3_hbr2 = &qmp_dp_v3_pre_emphasis_hbr3_hbr2, - .clk_list = qmp_v3_phy_clk_l, - .num_clks = ARRAY_SIZE(qmp_v3_phy_clk_l), - .reset_list = sc7180_usb3phy_reset_l, - .num_resets = ARRAY_SIZE(sc7180_usb3phy_reset_l), + .dp_aux_init = qmp_v4_dp_aux_init, + .configure_dp_tx = qmp_v4_configure_dp_tx, + .configure_dp_phy = qmp_v4_configure_dp_phy, + .calibrate_dp_phy = qmp_v4_calibrate_dp_phy, + + .clk_list = qmp_v4_phy_clk_l, + .num_clks = ARRAY_SIZE(qmp_v4_phy_clk_l), + .reset_list = msm8996_usb3phy_reset_l, + .num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l), .vreg_list = qmp_phy_vreg_l, .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l), - .regs = qmp_v3_usb3phy_regs_layout, - - .dp_aux_init = qcom_qmp_v4_phy_dp_aux_init, - .configure_dp_tx = qcom_qmp_v4_phy_configure_dp_tx, - .configure_dp_phy = qcom_qmp_v4_phy_configure_dp_phy, - .calibrate_dp_phy = qcom_qmp_v4_dp_phy_calibrate, -}; + .regs = qmp_v4_usb3phy_regs_layout, + .pcs_usb_offset = 0x300, -static const struct qmp_phy_combo_cfg sc8180x_usb3dpphy_cfg = { - .usb_cfg = &sm8150_usb3phy_cfg, - .dp_cfg = &sc8180x_dpphy_cfg, + .has_pwrdn_delay = true, }; -static const struct qmp_phy_cfg sc8280xp_usb43dp_usb_cfg = { - .type = PHY_TYPE_USB3, - .lanes = 2, +static const struct qmp_phy_cfg sc8280xp_usb43dpphy_cfg = { + .offsets = &qmp_combo_offsets_v5, .serdes_tbl = sc8280xp_usb43dp_serdes_tbl, .serdes_tbl_num = ARRAY_SIZE(sc8280xp_usb43dp_serdes_tbl), @@ -1213,32 +1138,11 @@ static const struct qmp_phy_cfg sc8280xp_usb43dp_usb_cfg = { .rx_tbl_num = ARRAY_SIZE(sc8280xp_usb43dp_rx_tbl), .pcs_tbl = sc8280xp_usb43dp_pcs_tbl, .pcs_tbl_num = ARRAY_SIZE(sc8280xp_usb43dp_pcs_tbl), - .clk_list = qmp_v4_phy_clk_l, - .num_clks = ARRAY_SIZE(qmp_v4_phy_clk_l), - .reset_list = msm8996_usb3phy_reset_l, - .num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l), - .vreg_list = qmp_phy_vreg_l, - .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l), - .regs = qmp_v4_usb3phy_regs_layout, - .pcs_usb_offset = 0x300, - - .start_ctrl = SERDES_START | PCS_START, - .pwrdn_ctrl = SW_PWRDN, - .phy_status = PHYSTATUS, - - .has_pwrdn_delay = true, - .pwrdn_delay_min = POWER_DOWN_DELAY_US_MIN, - .pwrdn_delay_max = POWER_DOWN_DELAY_US_MAX, -}; - -static const struct qmp_phy_cfg sc8280xp_usb43dp_dp_cfg = { - .type = PHY_TYPE_DP, - .lanes = 2, - .serdes_tbl = qmp_v5_dp_serdes_tbl, - .serdes_tbl_num = ARRAY_SIZE(qmp_v5_dp_serdes_tbl), - .tx_tbl = qmp_v5_5nm_dp_tx_tbl, - .tx_tbl_num = ARRAY_SIZE(qmp_v5_5nm_dp_tx_tbl), + .dp_serdes_tbl = qmp_v5_dp_serdes_tbl, + .dp_serdes_tbl_num = ARRAY_SIZE(qmp_v5_dp_serdes_tbl), + .dp_tx_tbl = qmp_v5_5nm_dp_tx_tbl, + .dp_tx_tbl_num = ARRAY_SIZE(qmp_v5_5nm_dp_tx_tbl), .serdes_tbl_rbr = qmp_v4_dp_serdes_tbl_rbr, .serdes_tbl_rbr_num = ARRAY_SIZE(qmp_v4_dp_serdes_tbl_rbr), @@ -1254,6 +1158,11 @@ static const struct qmp_phy_cfg sc8280xp_usb43dp_dp_cfg = { .swing_hbr3_hbr2 = &qmp_dp_v5_voltage_swing_hbr3_hbr2, .pre_emphasis_hbr3_hbr2 = &qmp_dp_v5_pre_emphasis_hbr3_hbr2, + .dp_aux_init = qmp_v4_dp_aux_init, + .configure_dp_tx = qmp_v4_configure_dp_tx, + .configure_dp_phy = qmp_v5_configure_dp_phy, + .calibrate_dp_phy = qmp_v4_calibrate_dp_phy, + .clk_list = qmp_v4_phy_clk_l, .num_clks = ARRAY_SIZE(qmp_v4_phy_clk_l), .reset_list = msm8996_usb3phy_reset_l, @@ -1261,22 +1170,9 @@ static const struct qmp_phy_cfg sc8280xp_usb43dp_dp_cfg = { .vreg_list = qmp_phy_vreg_l, .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l), .regs = qmp_v4_usb3phy_regs_layout, - - .dp_aux_init = qcom_qmp_v4_phy_dp_aux_init, - .configure_dp_tx = qcom_qmp_v4_phy_configure_dp_tx, - .configure_dp_phy = qcom_qmp_v5_phy_configure_dp_phy, - .calibrate_dp_phy = qcom_qmp_v4_dp_phy_calibrate, -}; - -static const struct qmp_phy_combo_cfg sc8280xp_usb43dpphy_combo_cfg = { - .usb_cfg = &sc8280xp_usb43dp_usb_cfg, - .dp_cfg = &sc8280xp_usb43dp_dp_cfg, }; -static const struct qmp_phy_cfg sm8250_usb3phy_cfg = { - .type = PHY_TYPE_USB3, - .lanes = 2, - +static const struct qmp_phy_cfg sm8250_usb3dpphy_cfg = { .serdes_tbl = sm8150_usb3_serdes_tbl, .serdes_tbl_num = ARRAY_SIZE(sm8150_usb3_serdes_tbl), .tx_tbl = sm8250_usb3_tx_tbl, @@ -1287,32 +1183,11 @@ static const struct qmp_phy_cfg sm8250_usb3phy_cfg = { .pcs_tbl_num = ARRAY_SIZE(sm8250_usb3_pcs_tbl), .pcs_usb_tbl = sm8250_usb3_pcs_usb_tbl, .pcs_usb_tbl_num = ARRAY_SIZE(sm8250_usb3_pcs_usb_tbl), - .clk_list = qmp_v4_sm8250_usbphy_clk_l, - .num_clks = ARRAY_SIZE(qmp_v4_sm8250_usbphy_clk_l), - .reset_list = msm8996_usb3phy_reset_l, - .num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l), - .vreg_list = qmp_phy_vreg_l, - .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l), - .regs = qmp_v4_usb3phy_regs_layout, - .pcs_usb_offset = 0x300, - .start_ctrl = SERDES_START | PCS_START, - .pwrdn_ctrl = SW_PWRDN, - .phy_status = PHYSTATUS, - - .has_pwrdn_delay = true, - .pwrdn_delay_min = POWER_DOWN_DELAY_US_MIN, - .pwrdn_delay_max = POWER_DOWN_DELAY_US_MAX, -}; - -static const struct qmp_phy_cfg sm8250_dpphy_cfg = { - .type = PHY_TYPE_DP, - .lanes = 2, - - .serdes_tbl = qmp_v4_dp_serdes_tbl, - .serdes_tbl_num = ARRAY_SIZE(qmp_v4_dp_serdes_tbl), - .tx_tbl = qmp_v4_dp_tx_tbl, - .tx_tbl_num = ARRAY_SIZE(qmp_v4_dp_tx_tbl), + .dp_serdes_tbl = qmp_v4_dp_serdes_tbl, + .dp_serdes_tbl_num = ARRAY_SIZE(qmp_v4_dp_serdes_tbl), + .dp_tx_tbl = qmp_v4_dp_tx_tbl, + .dp_tx_tbl_num = ARRAY_SIZE(qmp_v4_dp_tx_tbl), .serdes_tbl_rbr = qmp_v4_dp_serdes_tbl_rbr, .serdes_tbl_rbr_num = ARRAY_SIZE(qmp_v4_dp_serdes_tbl_rbr), @@ -1328,27 +1203,24 @@ static const struct qmp_phy_cfg sm8250_dpphy_cfg = { .swing_hbr3_hbr2 = &qmp_dp_v3_voltage_swing_hbr3_hbr2, .pre_emphasis_hbr3_hbr2 = &qmp_dp_v3_pre_emphasis_hbr3_hbr2, - .clk_list = qmp_v4_phy_clk_l, - .num_clks = ARRAY_SIZE(qmp_v4_phy_clk_l), + .dp_aux_init = qmp_v4_dp_aux_init, + .configure_dp_tx = qmp_v4_configure_dp_tx, + .configure_dp_phy = qmp_v4_configure_dp_phy, + .calibrate_dp_phy = qmp_v4_calibrate_dp_phy, + + .clk_list = qmp_v4_sm8250_usbphy_clk_l, + .num_clks = ARRAY_SIZE(qmp_v4_sm8250_usbphy_clk_l), .reset_list = msm8996_usb3phy_reset_l, .num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l), .vreg_list = qmp_phy_vreg_l, .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l), .regs = qmp_v4_usb3phy_regs_layout, + .pcs_usb_offset = 0x300, - .dp_aux_init = qcom_qmp_v4_phy_dp_aux_init, - .configure_dp_tx = qcom_qmp_v4_phy_configure_dp_tx, - .configure_dp_phy = qcom_qmp_v4_phy_configure_dp_phy, - .calibrate_dp_phy = qcom_qmp_v4_dp_phy_calibrate, -}; - -static const struct qmp_phy_combo_cfg sm8250_usb3dpphy_cfg = { - .usb_cfg = &sm8250_usb3phy_cfg, - .dp_cfg = &sm8250_dpphy_cfg, + .has_pwrdn_delay = true, }; static void qmp_combo_configure_lane(void __iomem *base, - const unsigned int *regs, const struct qmp_phy_init_tbl tbl[], int num, u8 lane_mask) @@ -1363,110 +1235,98 @@ static void qmp_combo_configure_lane(void __iomem *base, if (!(t->lane_mask & lane_mask)) continue; - if (t->in_layout) - writel(t->val, base + regs[t->offset]); - else - writel(t->val, base + t->offset); + writel(t->val, base + t->offset); } } static void qmp_combo_configure(void __iomem *base, - const unsigned int *regs, const struct qmp_phy_init_tbl tbl[], int num) { - qmp_combo_configure_lane(base, regs, tbl, num, 0xff); + qmp_combo_configure_lane(base, tbl, num, 0xff); } -static int qmp_combo_serdes_init(struct qmp_phy *qphy) +static int qmp_combo_dp_serdes_init(struct qmp_combo *qmp) { - const struct qmp_phy_cfg *cfg = qphy->cfg; - void __iomem *serdes = qphy->serdes; - const struct phy_configure_opts_dp *dp_opts = &qphy->dp_opts; - const struct qmp_phy_init_tbl *serdes_tbl = cfg->serdes_tbl; - int serdes_tbl_num = cfg->serdes_tbl_num; - - qmp_combo_configure(serdes, cfg->regs, serdes_tbl, serdes_tbl_num); - - if (cfg->type == PHY_TYPE_DP) { - switch (dp_opts->link_rate) { - case 1620: - qmp_combo_configure(serdes, cfg->regs, - cfg->serdes_tbl_rbr, - cfg->serdes_tbl_rbr_num); - break; - case 2700: - qmp_combo_configure(serdes, cfg->regs, - cfg->serdes_tbl_hbr, - cfg->serdes_tbl_hbr_num); - break; - case 5400: - qmp_combo_configure(serdes, cfg->regs, - cfg->serdes_tbl_hbr2, - cfg->serdes_tbl_hbr2_num); - break; - case 8100: - qmp_combo_configure(serdes, cfg->regs, - cfg->serdes_tbl_hbr3, - cfg->serdes_tbl_hbr3_num); - break; - default: - /* Other link rates aren't supported */ - return -EINVAL; - } + const struct qmp_phy_cfg *cfg = qmp->cfg; + void __iomem *serdes = qmp->dp_serdes; + const struct phy_configure_opts_dp *dp_opts = &qmp->dp_opts; + + qmp_combo_configure(serdes, cfg->dp_serdes_tbl, cfg->dp_serdes_tbl_num); + + switch (dp_opts->link_rate) { + case 1620: + qmp_combo_configure(serdes, cfg->serdes_tbl_rbr, + cfg->serdes_tbl_rbr_num); + break; + case 2700: + qmp_combo_configure(serdes, cfg->serdes_tbl_hbr, + cfg->serdes_tbl_hbr_num); + break; + case 5400: + qmp_combo_configure(serdes, cfg->serdes_tbl_hbr2, + cfg->serdes_tbl_hbr2_num); + break; + case 8100: + qmp_combo_configure(serdes, cfg->serdes_tbl_hbr3, + cfg->serdes_tbl_hbr3_num); + break; + default: + /* Other link rates aren't supported */ + return -EINVAL; } return 0; } -static void qcom_qmp_v3_phy_dp_aux_init(struct qmp_phy *qphy) +static void qmp_v3_dp_aux_init(struct qmp_combo *qmp) { writel(DP_PHY_PD_CTL_PWRDN | DP_PHY_PD_CTL_AUX_PWRDN | DP_PHY_PD_CTL_PLL_PWRDN | DP_PHY_PD_CTL_DP_CLAMP_EN, - qphy->pcs + QSERDES_DP_PHY_PD_CTL); + qmp->dp_dp_phy + QSERDES_DP_PHY_PD_CTL); /* Turn on BIAS current for PHY/PLL */ writel(QSERDES_V3_COM_BIAS_EN | QSERDES_V3_COM_BIAS_EN_MUX | QSERDES_V3_COM_CLKBUF_L_EN | QSERDES_V3_COM_EN_SYSCLK_TX_SEL, - qphy->serdes + QSERDES_V3_COM_BIAS_EN_CLKBUFLR_EN); + qmp->dp_serdes + QSERDES_V3_COM_BIAS_EN_CLKBUFLR_EN); - writel(DP_PHY_PD_CTL_PSR_PWRDN, qphy->pcs + QSERDES_DP_PHY_PD_CTL); + writel(DP_PHY_PD_CTL_PSR_PWRDN, qmp->dp_dp_phy + QSERDES_DP_PHY_PD_CTL); writel(DP_PHY_PD_CTL_PWRDN | DP_PHY_PD_CTL_AUX_PWRDN | DP_PHY_PD_CTL_LANE_0_1_PWRDN | DP_PHY_PD_CTL_LANE_2_3_PWRDN | DP_PHY_PD_CTL_PLL_PWRDN | DP_PHY_PD_CTL_DP_CLAMP_EN, - qphy->pcs + QSERDES_DP_PHY_PD_CTL); + qmp->dp_dp_phy + QSERDES_DP_PHY_PD_CTL); writel(QSERDES_V3_COM_BIAS_EN | QSERDES_V3_COM_BIAS_EN_MUX | QSERDES_V3_COM_CLKBUF_R_EN | QSERDES_V3_COM_CLKBUF_L_EN | QSERDES_V3_COM_EN_SYSCLK_TX_SEL | QSERDES_V3_COM_CLKBUF_RX_DRIVE_L, - qphy->serdes + QSERDES_V3_COM_BIAS_EN_CLKBUFLR_EN); - - writel(0x00, qphy->pcs + QSERDES_DP_PHY_AUX_CFG0); - writel(0x13, qphy->pcs + QSERDES_DP_PHY_AUX_CFG1); - writel(0x24, qphy->pcs + QSERDES_DP_PHY_AUX_CFG2); - writel(0x00, qphy->pcs + QSERDES_DP_PHY_AUX_CFG3); - writel(0x0a, qphy->pcs + QSERDES_DP_PHY_AUX_CFG4); - writel(0x26, qphy->pcs + QSERDES_DP_PHY_AUX_CFG5); - writel(0x0a, qphy->pcs + QSERDES_DP_PHY_AUX_CFG6); - writel(0x03, qphy->pcs + QSERDES_DP_PHY_AUX_CFG7); - writel(0xbb, qphy->pcs + QSERDES_DP_PHY_AUX_CFG8); - writel(0x03, qphy->pcs + QSERDES_DP_PHY_AUX_CFG9); - qphy->dp_aux_cfg = 0; + qmp->dp_serdes + QSERDES_V3_COM_BIAS_EN_CLKBUFLR_EN); + + writel(0x00, qmp->dp_dp_phy + QSERDES_DP_PHY_AUX_CFG0); + writel(0x13, qmp->dp_dp_phy + QSERDES_DP_PHY_AUX_CFG1); + writel(0x24, qmp->dp_dp_phy + QSERDES_DP_PHY_AUX_CFG2); + writel(0x00, qmp->dp_dp_phy + QSERDES_DP_PHY_AUX_CFG3); + writel(0x0a, qmp->dp_dp_phy + QSERDES_DP_PHY_AUX_CFG4); + writel(0x26, qmp->dp_dp_phy + QSERDES_DP_PHY_AUX_CFG5); + writel(0x0a, qmp->dp_dp_phy + QSERDES_DP_PHY_AUX_CFG6); + writel(0x03, qmp->dp_dp_phy + QSERDES_DP_PHY_AUX_CFG7); + writel(0xbb, qmp->dp_dp_phy + QSERDES_DP_PHY_AUX_CFG8); + writel(0x03, qmp->dp_dp_phy + QSERDES_DP_PHY_AUX_CFG9); + qmp->dp_aux_cfg = 0; writel(PHY_AUX_STOP_ERR_MASK | PHY_AUX_DEC_ERR_MASK | PHY_AUX_SYNC_ERR_MASK | PHY_AUX_ALIGN_ERR_MASK | PHY_AUX_REQ_ERR_MASK, - qphy->pcs + QSERDES_V3_DP_PHY_AUX_INTERRUPT_MASK); + qmp->dp_dp_phy + QSERDES_V3_DP_PHY_AUX_INTERRUPT_MASK); } -static int qmp_combo_configure_dp_swing(struct qmp_phy *qphy, +static int qmp_combo_configure_dp_swing(struct qmp_combo *qmp, unsigned int drv_lvl_reg, unsigned int emp_post_reg) { - const struct phy_configure_opts_dp *dp_opts = &qphy->dp_opts; - const struct qmp_phy_cfg *cfg = qphy->cfg; + const struct phy_configure_opts_dp *dp_opts = &qmp->dp_opts; + const struct qmp_phy_cfg *cfg = qmp->cfg; unsigned int v_level = 0, p_level = 0; u8 voltage_swing_cfg, pre_emphasis_cfg; int i; @@ -1492,20 +1352,20 @@ static int qmp_combo_configure_dp_swing(struct qmp_phy *qphy, voltage_swing_cfg |= DP_PHY_TXn_TX_DRV_LVL_MUX_EN; pre_emphasis_cfg |= DP_PHY_TXn_TX_EMP_POST1_LVL_MUX_EN; - writel(voltage_swing_cfg, qphy->tx + drv_lvl_reg); - writel(pre_emphasis_cfg, qphy->tx + emp_post_reg); - writel(voltage_swing_cfg, qphy->tx2 + drv_lvl_reg); - writel(pre_emphasis_cfg, qphy->tx2 + emp_post_reg); + writel(voltage_swing_cfg, qmp->dp_tx + drv_lvl_reg); + writel(pre_emphasis_cfg, qmp->dp_tx + emp_post_reg); + writel(voltage_swing_cfg, qmp->dp_tx2 + drv_lvl_reg); + writel(pre_emphasis_cfg, qmp->dp_tx2 + emp_post_reg); return 0; } -static void qcom_qmp_v3_phy_configure_dp_tx(struct qmp_phy *qphy) +static void qmp_v3_configure_dp_tx(struct qmp_combo *qmp) { - const struct phy_configure_opts_dp *dp_opts = &qphy->dp_opts; + const struct phy_configure_opts_dp *dp_opts = &qmp->dp_opts; u32 bias_en, drvr_en; - if (qmp_combo_configure_dp_swing(qphy, QSERDES_V3_TX_TX_DRV_LVL, + if (qmp_combo_configure_dp_swing(qmp, QSERDES_V3_TX_TX_DRV_LVL, QSERDES_V3_TX_TX_EMP_POST1_LVL) < 0) return; @@ -1517,13 +1377,13 @@ static void qcom_qmp_v3_phy_configure_dp_tx(struct qmp_phy *qphy) drvr_en = 0x10; } - writel(drvr_en, qphy->tx + QSERDES_V3_TX_HIGHZ_DRVR_EN); - writel(bias_en, qphy->tx + QSERDES_V3_TX_TRANSCEIVER_BIAS_EN); - writel(drvr_en, qphy->tx2 + QSERDES_V3_TX_HIGHZ_DRVR_EN); - writel(bias_en, qphy->tx2 + QSERDES_V3_TX_TRANSCEIVER_BIAS_EN); + writel(drvr_en, qmp->dp_tx + QSERDES_V3_TX_HIGHZ_DRVR_EN); + writel(bias_en, qmp->dp_tx + QSERDES_V3_TX_TRANSCEIVER_BIAS_EN); + writel(drvr_en, qmp->dp_tx2 + QSERDES_V3_TX_HIGHZ_DRVR_EN); + writel(bias_en, qmp->dp_tx2 + QSERDES_V3_TX_TRANSCEIVER_BIAS_EN); } -static bool qmp_combo_configure_dp_mode(struct qmp_phy *qphy) +static bool qmp_combo_configure_dp_mode(struct qmp_combo *qmp) { u32 val; bool reverse = false; @@ -1543,27 +1403,26 @@ static bool qmp_combo_configure_dp_mode(struct qmp_phy *qphy) * if (lane_cnt == 4 || orientation == ORIENTATION_CC1) * val |= DP_PHY_PD_CTL_LANE_2_3_PWRDN; * if (orientation == ORIENTATION_CC2) - * writel(0x4c, qphy->pcs + QSERDES_V3_DP_PHY_MODE); + * writel(0x4c, qmp->dp_dp_phy + QSERDES_V3_DP_PHY_MODE); */ val |= DP_PHY_PD_CTL_LANE_2_3_PWRDN; - writel(val, qphy->pcs + QSERDES_DP_PHY_PD_CTL); + writel(val, qmp->dp_dp_phy + QSERDES_DP_PHY_PD_CTL); - writel(0x5c, qphy->pcs + QSERDES_DP_PHY_MODE); + writel(0x5c, qmp->dp_dp_phy + QSERDES_DP_PHY_MODE); return reverse; } -static int qcom_qmp_v3_phy_configure_dp_phy(struct qmp_phy *qphy) +static int qmp_v3_configure_dp_phy(struct qmp_combo *qmp) { - const struct qmp_phy_dp_clks *dp_clks = qphy->dp_clks; - const struct phy_configure_opts_dp *dp_opts = &qphy->dp_opts; + const struct phy_configure_opts_dp *dp_opts = &qmp->dp_opts; u32 phy_vco_div, status; unsigned long pixel_freq; - qmp_combo_configure_dp_mode(qphy); + qmp_combo_configure_dp_mode(qmp); - writel(0x05, qphy->pcs + QSERDES_V3_DP_PHY_TX0_TX1_LANE_CTL); - writel(0x05, qphy->pcs + QSERDES_V3_DP_PHY_TX2_TX3_LANE_CTL); + writel(0x05, qmp->dp_dp_phy + QSERDES_V3_DP_PHY_TX0_TX1_LANE_CTL); + writel(0x05, qmp->dp_dp_phy + QSERDES_V3_DP_PHY_TX2_TX3_LANE_CTL); switch (dp_opts->link_rate) { case 1620: @@ -1586,40 +1445,40 @@ static int qcom_qmp_v3_phy_configure_dp_phy(struct qmp_phy *qphy) /* Other link rates aren't supported */ return -EINVAL; } - writel(phy_vco_div, qphy->pcs + QSERDES_V3_DP_PHY_VCO_DIV); + writel(phy_vco_div, qmp->dp_dp_phy + QSERDES_V3_DP_PHY_VCO_DIV); - clk_set_rate(dp_clks->dp_link_hw.clk, dp_opts->link_rate * 100000); - clk_set_rate(dp_clks->dp_pixel_hw.clk, pixel_freq); + clk_set_rate(qmp->dp_link_hw.clk, dp_opts->link_rate * 100000); + clk_set_rate(qmp->dp_pixel_hw.clk, pixel_freq); - writel(0x04, qphy->pcs + QSERDES_DP_PHY_AUX_CFG2); - writel(0x01, qphy->pcs + QSERDES_DP_PHY_CFG); - writel(0x05, qphy->pcs + QSERDES_DP_PHY_CFG); - writel(0x01, qphy->pcs + QSERDES_DP_PHY_CFG); - writel(0x09, qphy->pcs + QSERDES_DP_PHY_CFG); + writel(0x04, qmp->dp_dp_phy + QSERDES_DP_PHY_AUX_CFG2); + writel(0x01, qmp->dp_dp_phy + QSERDES_DP_PHY_CFG); + writel(0x05, qmp->dp_dp_phy + QSERDES_DP_PHY_CFG); + writel(0x01, qmp->dp_dp_phy + QSERDES_DP_PHY_CFG); + writel(0x09, qmp->dp_dp_phy + QSERDES_DP_PHY_CFG); - writel(0x20, qphy->serdes + QSERDES_V3_COM_RESETSM_CNTRL); + writel(0x20, qmp->dp_serdes + QSERDES_V3_COM_RESETSM_CNTRL); - if (readl_poll_timeout(qphy->serdes + QSERDES_V3_COM_C_READY_STATUS, + if (readl_poll_timeout(qmp->dp_serdes + QSERDES_V3_COM_C_READY_STATUS, status, ((status & BIT(0)) > 0), 500, 10000)) return -ETIMEDOUT; - writel(0x19, qphy->pcs + QSERDES_DP_PHY_CFG); + writel(0x19, qmp->dp_dp_phy + QSERDES_DP_PHY_CFG); - if (readl_poll_timeout(qphy->pcs + QSERDES_V3_DP_PHY_STATUS, + if (readl_poll_timeout(qmp->dp_dp_phy + QSERDES_V3_DP_PHY_STATUS, status, ((status & BIT(1)) > 0), 500, 10000)) return -ETIMEDOUT; - writel(0x18, qphy->pcs + QSERDES_DP_PHY_CFG); + writel(0x18, qmp->dp_dp_phy + QSERDES_DP_PHY_CFG); udelay(2000); - writel(0x19, qphy->pcs + QSERDES_DP_PHY_CFG); + writel(0x19, qmp->dp_dp_phy + QSERDES_DP_PHY_CFG); - return readl_poll_timeout(qphy->pcs + QSERDES_V3_DP_PHY_STATUS, + return readl_poll_timeout(qmp->dp_dp_phy + QSERDES_V3_DP_PHY_STATUS, status, ((status & BIT(1)) > 0), 500, @@ -1630,76 +1489,75 @@ static int qcom_qmp_v3_phy_configure_dp_phy(struct qmp_phy *qphy) * We need to calibrate the aux setting here as many times * as the caller tries */ -static int qcom_qmp_v3_dp_phy_calibrate(struct qmp_phy *qphy) +static int qmp_v3_calibrate_dp_phy(struct qmp_combo *qmp) { static const u8 cfg1_settings[] = { 0x13, 0x23, 0x1d }; u8 val; - qphy->dp_aux_cfg++; - qphy->dp_aux_cfg %= ARRAY_SIZE(cfg1_settings); - val = cfg1_settings[qphy->dp_aux_cfg]; + qmp->dp_aux_cfg++; + qmp->dp_aux_cfg %= ARRAY_SIZE(cfg1_settings); + val = cfg1_settings[qmp->dp_aux_cfg]; - writel(val, qphy->pcs + QSERDES_DP_PHY_AUX_CFG1); + writel(val, qmp->dp_dp_phy + QSERDES_DP_PHY_AUX_CFG1); return 0; } -static void qcom_qmp_v4_phy_dp_aux_init(struct qmp_phy *qphy) +static void qmp_v4_dp_aux_init(struct qmp_combo *qmp) { writel(DP_PHY_PD_CTL_PWRDN | DP_PHY_PD_CTL_PSR_PWRDN | DP_PHY_PD_CTL_AUX_PWRDN | DP_PHY_PD_CTL_PLL_PWRDN | DP_PHY_PD_CTL_DP_CLAMP_EN, - qphy->pcs + QSERDES_DP_PHY_PD_CTL); + qmp->dp_dp_phy + QSERDES_DP_PHY_PD_CTL); /* Turn on BIAS current for PHY/PLL */ - writel(0x17, qphy->serdes + QSERDES_V4_COM_BIAS_EN_CLKBUFLR_EN); - - writel(0x00, qphy->pcs + QSERDES_DP_PHY_AUX_CFG0); - writel(0x13, qphy->pcs + QSERDES_DP_PHY_AUX_CFG1); - writel(0xa4, qphy->pcs + QSERDES_DP_PHY_AUX_CFG2); - writel(0x00, qphy->pcs + QSERDES_DP_PHY_AUX_CFG3); - writel(0x0a, qphy->pcs + QSERDES_DP_PHY_AUX_CFG4); - writel(0x26, qphy->pcs + QSERDES_DP_PHY_AUX_CFG5); - writel(0x0a, qphy->pcs + QSERDES_DP_PHY_AUX_CFG6); - writel(0x03, qphy->pcs + QSERDES_DP_PHY_AUX_CFG7); - writel(0xb7, qphy->pcs + QSERDES_DP_PHY_AUX_CFG8); - writel(0x03, qphy->pcs + QSERDES_DP_PHY_AUX_CFG9); - qphy->dp_aux_cfg = 0; + writel(0x17, qmp->dp_serdes + QSERDES_V4_COM_BIAS_EN_CLKBUFLR_EN); + + writel(0x00, qmp->dp_dp_phy + QSERDES_DP_PHY_AUX_CFG0); + writel(0x13, qmp->dp_dp_phy + QSERDES_DP_PHY_AUX_CFG1); + writel(0xa4, qmp->dp_dp_phy + QSERDES_DP_PHY_AUX_CFG2); + writel(0x00, qmp->dp_dp_phy + QSERDES_DP_PHY_AUX_CFG3); + writel(0x0a, qmp->dp_dp_phy + QSERDES_DP_PHY_AUX_CFG4); + writel(0x26, qmp->dp_dp_phy + QSERDES_DP_PHY_AUX_CFG5); + writel(0x0a, qmp->dp_dp_phy + QSERDES_DP_PHY_AUX_CFG6); + writel(0x03, qmp->dp_dp_phy + QSERDES_DP_PHY_AUX_CFG7); + writel(0xb7, qmp->dp_dp_phy + QSERDES_DP_PHY_AUX_CFG8); + writel(0x03, qmp->dp_dp_phy + QSERDES_DP_PHY_AUX_CFG9); + qmp->dp_aux_cfg = 0; writel(PHY_AUX_STOP_ERR_MASK | PHY_AUX_DEC_ERR_MASK | PHY_AUX_SYNC_ERR_MASK | PHY_AUX_ALIGN_ERR_MASK | PHY_AUX_REQ_ERR_MASK, - qphy->pcs + QSERDES_V4_DP_PHY_AUX_INTERRUPT_MASK); + qmp->dp_dp_phy + QSERDES_V4_DP_PHY_AUX_INTERRUPT_MASK); } -static void qcom_qmp_v4_phy_configure_dp_tx(struct qmp_phy *qphy) +static void qmp_v4_configure_dp_tx(struct qmp_combo *qmp) { /* Program default values before writing proper values */ - writel(0x27, qphy->tx + QSERDES_V4_TX_TX_DRV_LVL); - writel(0x27, qphy->tx2 + QSERDES_V4_TX_TX_DRV_LVL); + writel(0x27, qmp->dp_tx + QSERDES_V4_TX_TX_DRV_LVL); + writel(0x27, qmp->dp_tx2 + QSERDES_V4_TX_TX_DRV_LVL); - writel(0x20, qphy->tx + QSERDES_V4_TX_TX_EMP_POST1_LVL); - writel(0x20, qphy->tx2 + QSERDES_V4_TX_TX_EMP_POST1_LVL); + writel(0x20, qmp->dp_tx + QSERDES_V4_TX_TX_EMP_POST1_LVL); + writel(0x20, qmp->dp_tx2 + QSERDES_V4_TX_TX_EMP_POST1_LVL); - qmp_combo_configure_dp_swing(qphy, QSERDES_V4_TX_TX_DRV_LVL, + qmp_combo_configure_dp_swing(qmp, QSERDES_V4_TX_TX_DRV_LVL, QSERDES_V4_TX_TX_EMP_POST1_LVL); } -static int qcom_qmp_v45_phy_configure_dp_phy(struct qmp_phy *qphy) +static int qmp_v45_configure_dp_phy(struct qmp_combo *qmp) { - const struct qmp_phy_dp_clks *dp_clks = qphy->dp_clks; - const struct phy_configure_opts_dp *dp_opts = &qphy->dp_opts; + const struct phy_configure_opts_dp *dp_opts = &qmp->dp_opts; u32 phy_vco_div, status; unsigned long pixel_freq; - writel(0x0f, qphy->pcs + QSERDES_V4_DP_PHY_CFG_1); + writel(0x0f, qmp->dp_dp_phy + QSERDES_V4_DP_PHY_CFG_1); - qmp_combo_configure_dp_mode(qphy); + qmp_combo_configure_dp_mode(qmp); - writel(0x13, qphy->pcs + QSERDES_DP_PHY_AUX_CFG1); - writel(0xa4, qphy->pcs + QSERDES_DP_PHY_AUX_CFG2); + writel(0x13, qmp->dp_dp_phy + QSERDES_DP_PHY_AUX_CFG1); + writel(0xa4, qmp->dp_dp_phy + QSERDES_DP_PHY_AUX_CFG2); - writel(0x05, qphy->pcs + QSERDES_V4_DP_PHY_TX0_TX1_LANE_CTL); - writel(0x05, qphy->pcs + QSERDES_V4_DP_PHY_TX2_TX3_LANE_CTL); + writel(0x05, qmp->dp_dp_phy + QSERDES_V4_DP_PHY_TX0_TX1_LANE_CTL); + writel(0x05, qmp->dp_dp_phy + QSERDES_V4_DP_PHY_TX2_TX3_LANE_CTL); switch (dp_opts->link_rate) { case 1620: @@ -1722,49 +1580,49 @@ static int qcom_qmp_v45_phy_configure_dp_phy(struct qmp_phy *qphy) /* Other link rates aren't supported */ return -EINVAL; } - writel(phy_vco_div, qphy->pcs + QSERDES_V4_DP_PHY_VCO_DIV); + writel(phy_vco_div, qmp->dp_dp_phy + QSERDES_V4_DP_PHY_VCO_DIV); - clk_set_rate(dp_clks->dp_link_hw.clk, dp_opts->link_rate * 100000); - clk_set_rate(dp_clks->dp_pixel_hw.clk, pixel_freq); + clk_set_rate(qmp->dp_link_hw.clk, dp_opts->link_rate * 100000); + clk_set_rate(qmp->dp_pixel_hw.clk, pixel_freq); - writel(0x01, qphy->pcs + QSERDES_DP_PHY_CFG); - writel(0x05, qphy->pcs + QSERDES_DP_PHY_CFG); - writel(0x01, qphy->pcs + QSERDES_DP_PHY_CFG); - writel(0x09, qphy->pcs + QSERDES_DP_PHY_CFG); + writel(0x01, qmp->dp_dp_phy + QSERDES_DP_PHY_CFG); + writel(0x05, qmp->dp_dp_phy + QSERDES_DP_PHY_CFG); + writel(0x01, qmp->dp_dp_phy + QSERDES_DP_PHY_CFG); + writel(0x09, qmp->dp_dp_phy + QSERDES_DP_PHY_CFG); - writel(0x20, qphy->serdes + QSERDES_V4_COM_RESETSM_CNTRL); + writel(0x20, qmp->dp_serdes + QSERDES_V4_COM_RESETSM_CNTRL); - if (readl_poll_timeout(qphy->serdes + QSERDES_V4_COM_C_READY_STATUS, + if (readl_poll_timeout(qmp->dp_serdes + QSERDES_V4_COM_C_READY_STATUS, status, ((status & BIT(0)) > 0), 500, 10000)) return -ETIMEDOUT; - if (readl_poll_timeout(qphy->serdes + QSERDES_V4_COM_CMN_STATUS, + if (readl_poll_timeout(qmp->dp_serdes + QSERDES_V4_COM_CMN_STATUS, status, ((status & BIT(0)) > 0), 500, 10000)) return -ETIMEDOUT; - if (readl_poll_timeout(qphy->serdes + QSERDES_V4_COM_CMN_STATUS, + if (readl_poll_timeout(qmp->dp_serdes + QSERDES_V4_COM_CMN_STATUS, status, ((status & BIT(1)) > 0), 500, 10000)) return -ETIMEDOUT; - writel(0x19, qphy->pcs + QSERDES_DP_PHY_CFG); + writel(0x19, qmp->dp_dp_phy + QSERDES_DP_PHY_CFG); - if (readl_poll_timeout(qphy->pcs + QSERDES_V4_DP_PHY_STATUS, + if (readl_poll_timeout(qmp->dp_dp_phy + QSERDES_V4_DP_PHY_STATUS, status, ((status & BIT(0)) > 0), 500, 10000)) return -ETIMEDOUT; - if (readl_poll_timeout(qphy->pcs + QSERDES_V4_DP_PHY_STATUS, + if (readl_poll_timeout(qmp->dp_dp_phy + QSERDES_V4_DP_PHY_STATUS, status, ((status & BIT(1)) > 0), 500, @@ -1774,15 +1632,15 @@ static int qcom_qmp_v45_phy_configure_dp_phy(struct qmp_phy *qphy) return 0; } -static int qcom_qmp_v4_phy_configure_dp_phy(struct qmp_phy *qphy) +static int qmp_v4_configure_dp_phy(struct qmp_combo *qmp) { - const struct phy_configure_opts_dp *dp_opts = &qphy->dp_opts; + const struct phy_configure_opts_dp *dp_opts = &qmp->dp_opts; u32 bias0_en, drvr0_en, bias1_en, drvr1_en; bool reverse = false; u32 status; int ret; - ret = qcom_qmp_v45_phy_configure_dp_phy(qphy); + ret = qmp_v45_configure_dp_phy(qmp); if (ret < 0) return ret; @@ -1808,43 +1666,43 @@ static int qcom_qmp_v4_phy_configure_dp_phy(struct qmp_phy *qphy) drvr1_en = 0x10; } - writel(drvr0_en, qphy->tx + QSERDES_V4_TX_HIGHZ_DRVR_EN); - writel(bias0_en, qphy->tx + QSERDES_V4_TX_TRANSCEIVER_BIAS_EN); - writel(drvr1_en, qphy->tx2 + QSERDES_V4_TX_HIGHZ_DRVR_EN); - writel(bias1_en, qphy->tx2 + QSERDES_V4_TX_TRANSCEIVER_BIAS_EN); + writel(drvr0_en, qmp->dp_tx + QSERDES_V4_TX_HIGHZ_DRVR_EN); + writel(bias0_en, qmp->dp_tx + QSERDES_V4_TX_TRANSCEIVER_BIAS_EN); + writel(drvr1_en, qmp->dp_tx2 + QSERDES_V4_TX_HIGHZ_DRVR_EN); + writel(bias1_en, qmp->dp_tx2 + QSERDES_V4_TX_TRANSCEIVER_BIAS_EN); - writel(0x18, qphy->pcs + QSERDES_DP_PHY_CFG); + writel(0x18, qmp->dp_dp_phy + QSERDES_DP_PHY_CFG); udelay(2000); - writel(0x19, qphy->pcs + QSERDES_DP_PHY_CFG); + writel(0x19, qmp->dp_dp_phy + QSERDES_DP_PHY_CFG); - if (readl_poll_timeout(qphy->pcs + QSERDES_V4_DP_PHY_STATUS, + if (readl_poll_timeout(qmp->dp_dp_phy + QSERDES_V4_DP_PHY_STATUS, status, ((status & BIT(1)) > 0), 500, 10000)) return -ETIMEDOUT; - writel(0x0a, qphy->tx + QSERDES_V4_TX_TX_POL_INV); - writel(0x0a, qphy->tx2 + QSERDES_V4_TX_TX_POL_INV); + writel(0x0a, qmp->dp_tx + QSERDES_V4_TX_TX_POL_INV); + writel(0x0a, qmp->dp_tx2 + QSERDES_V4_TX_TX_POL_INV); - writel(0x27, qphy->tx + QSERDES_V4_TX_TX_DRV_LVL); - writel(0x27, qphy->tx2 + QSERDES_V4_TX_TX_DRV_LVL); + writel(0x27, qmp->dp_tx + QSERDES_V4_TX_TX_DRV_LVL); + writel(0x27, qmp->dp_tx2 + QSERDES_V4_TX_TX_DRV_LVL); - writel(0x20, qphy->tx + QSERDES_V4_TX_TX_EMP_POST1_LVL); - writel(0x20, qphy->tx2 + QSERDES_V4_TX_TX_EMP_POST1_LVL); + writel(0x20, qmp->dp_tx + QSERDES_V4_TX_TX_EMP_POST1_LVL); + writel(0x20, qmp->dp_tx2 + QSERDES_V4_TX_TX_EMP_POST1_LVL); return 0; } -static int qcom_qmp_v5_phy_configure_dp_phy(struct qmp_phy *qphy) +static int qmp_v5_configure_dp_phy(struct qmp_combo *qmp) { - const struct phy_configure_opts_dp *dp_opts = &qphy->dp_opts; + const struct phy_configure_opts_dp *dp_opts = &qmp->dp_opts; u32 bias0_en, drvr0_en, bias1_en, drvr1_en; bool reverse = false; u32 status; int ret; - ret = qcom_qmp_v45_phy_configure_dp_phy(qphy); + ret = qmp_v45_configure_dp_phy(qmp); if (ret < 0) return ret; @@ -1865,30 +1723,30 @@ static int qcom_qmp_v5_phy_configure_dp_phy(struct qmp_phy *qphy) drvr1_en = 0x10; } - writel(drvr0_en, qphy->tx + QSERDES_V5_5NM_TX_HIGHZ_DRVR_EN); - writel(bias0_en, qphy->tx + QSERDES_V5_5NM_TX_TRANSCEIVER_BIAS_EN); - writel(drvr1_en, qphy->tx2 + QSERDES_V5_5NM_TX_HIGHZ_DRVR_EN); - writel(bias1_en, qphy->tx2 + QSERDES_V5_5NM_TX_TRANSCEIVER_BIAS_EN); + writel(drvr0_en, qmp->dp_tx + QSERDES_V5_5NM_TX_HIGHZ_DRVR_EN); + writel(bias0_en, qmp->dp_tx + QSERDES_V5_5NM_TX_TRANSCEIVER_BIAS_EN); + writel(drvr1_en, qmp->dp_tx2 + QSERDES_V5_5NM_TX_HIGHZ_DRVR_EN); + writel(bias1_en, qmp->dp_tx2 + QSERDES_V5_5NM_TX_TRANSCEIVER_BIAS_EN); - writel(0x18, qphy->pcs + QSERDES_DP_PHY_CFG); + writel(0x18, qmp->dp_dp_phy + QSERDES_DP_PHY_CFG); udelay(2000); - writel(0x19, qphy->pcs + QSERDES_DP_PHY_CFG); + writel(0x19, qmp->dp_dp_phy + QSERDES_DP_PHY_CFG); - if (readl_poll_timeout(qphy->pcs + QSERDES_V4_DP_PHY_STATUS, + if (readl_poll_timeout(qmp->dp_dp_phy + QSERDES_V4_DP_PHY_STATUS, status, ((status & BIT(1)) > 0), 500, 10000)) return -ETIMEDOUT; - writel(0x0a, qphy->tx + QSERDES_V5_5NM_TX_TX_POL_INV); - writel(0x0a, qphy->tx2 + QSERDES_V5_5NM_TX_TX_POL_INV); + writel(0x0a, qmp->dp_tx + QSERDES_V5_5NM_TX_TX_POL_INV); + writel(0x0a, qmp->dp_tx2 + QSERDES_V5_5NM_TX_TX_POL_INV); - writel(0x27, qphy->tx + QSERDES_V5_5NM_TX_TX_DRV_LVL); - writel(0x27, qphy->tx2 + QSERDES_V5_5NM_TX_TX_DRV_LVL); + writel(0x27, qmp->dp_tx + QSERDES_V5_5NM_TX_TX_DRV_LVL); + writel(0x27, qmp->dp_tx2 + QSERDES_V5_5NM_TX_TX_DRV_LVL); - writel(0x20, qphy->tx + QSERDES_V5_5NM_TX_TX_EMP_POST1_LVL); - writel(0x20, qphy->tx2 + QSERDES_V5_5NM_TX_TX_EMP_POST1_LVL); + writel(0x20, qmp->dp_tx + QSERDES_V5_5NM_TX_TX_EMP_POST1_LVL); + writel(0x20, qmp->dp_tx2 + QSERDES_V5_5NM_TX_TX_EMP_POST1_LVL); return 0; } @@ -1897,52 +1755,50 @@ static int qcom_qmp_v5_phy_configure_dp_phy(struct qmp_phy *qphy) * We need to calibrate the aux setting here as many times * as the caller tries */ -static int qcom_qmp_v4_dp_phy_calibrate(struct qmp_phy *qphy) +static int qmp_v4_calibrate_dp_phy(struct qmp_combo *qmp) { static const u8 cfg1_settings[] = { 0x20, 0x13, 0x23, 0x1d }; u8 val; - qphy->dp_aux_cfg++; - qphy->dp_aux_cfg %= ARRAY_SIZE(cfg1_settings); - val = cfg1_settings[qphy->dp_aux_cfg]; + qmp->dp_aux_cfg++; + qmp->dp_aux_cfg %= ARRAY_SIZE(cfg1_settings); + val = cfg1_settings[qmp->dp_aux_cfg]; - writel(val, qphy->pcs + QSERDES_DP_PHY_AUX_CFG1); + writel(val, qmp->dp_dp_phy + QSERDES_DP_PHY_AUX_CFG1); return 0; } -static int qcom_qmp_dp_phy_configure(struct phy *phy, union phy_configure_opts *opts) +static int qmp_combo_dp_configure(struct phy *phy, union phy_configure_opts *opts) { const struct phy_configure_opts_dp *dp_opts = &opts->dp; - struct qmp_phy *qphy = phy_get_drvdata(phy); - const struct qmp_phy_cfg *cfg = qphy->cfg; + struct qmp_combo *qmp = phy_get_drvdata(phy); + const struct qmp_phy_cfg *cfg = qmp->cfg; - memcpy(&qphy->dp_opts, dp_opts, sizeof(*dp_opts)); - if (qphy->dp_opts.set_voltages) { - cfg->configure_dp_tx(qphy); - qphy->dp_opts.set_voltages = 0; + memcpy(&qmp->dp_opts, dp_opts, sizeof(*dp_opts)); + if (qmp->dp_opts.set_voltages) { + cfg->configure_dp_tx(qmp); + qmp->dp_opts.set_voltages = 0; } return 0; } -static int qcom_qmp_dp_phy_calibrate(struct phy *phy) +static int qmp_combo_dp_calibrate(struct phy *phy) { - struct qmp_phy *qphy = phy_get_drvdata(phy); - const struct qmp_phy_cfg *cfg = qphy->cfg; + struct qmp_combo *qmp = phy_get_drvdata(phy); + const struct qmp_phy_cfg *cfg = qmp->cfg; if (cfg->calibrate_dp_phy) - return cfg->calibrate_dp_phy(qphy); + return cfg->calibrate_dp_phy(qmp); return 0; } -static int qmp_combo_com_init(struct qmp_phy *qphy) +static int qmp_combo_com_init(struct qmp_combo *qmp) { - struct qcom_qmp *qmp = qphy->qmp; - const struct qmp_phy_cfg *cfg = qphy->cfg; - void __iomem *pcs = qphy->pcs; - void __iomem *dp_com = qmp->dp_com; + const struct qmp_phy_cfg *cfg = qmp->cfg; + void __iomem *com = qmp->com; int ret; mutex_lock(&qmp->phy_mutex); @@ -1951,7 +1807,6 @@ static int qmp_combo_com_init(struct qmp_phy *qphy) return 0; } - /* turn on regulator supplies */ ret = regulator_bulk_enable(cfg->num_vregs, qmp->vregs); if (ret) { dev_err(qmp->dev, "failed to enable regulators, err=%d\n", ret); @@ -1974,33 +1829,28 @@ static int qmp_combo_com_init(struct qmp_phy *qphy) if (ret) goto err_assert_reset; - qphy_setbits(dp_com, QPHY_V3_DP_COM_POWER_DOWN_CTRL, SW_PWRDN); + qphy_setbits(com, QPHY_V3_DP_COM_POWER_DOWN_CTRL, SW_PWRDN); /* override hardware control for reset of qmp phy */ - qphy_setbits(dp_com, QPHY_V3_DP_COM_RESET_OVRD_CTRL, + qphy_setbits(com, QPHY_V3_DP_COM_RESET_OVRD_CTRL, SW_DPPHY_RESET_MUX | SW_DPPHY_RESET | SW_USB3PHY_RESET_MUX | SW_USB3PHY_RESET); /* Default type-c orientation, i.e CC1 */ - qphy_setbits(dp_com, QPHY_V3_DP_COM_TYPEC_CTRL, 0x02); + qphy_setbits(com, QPHY_V3_DP_COM_TYPEC_CTRL, 0x02); - qphy_setbits(dp_com, QPHY_V3_DP_COM_PHY_MODE_CTRL, USB3_MODE | DP_MODE); + qphy_setbits(com, QPHY_V3_DP_COM_PHY_MODE_CTRL, USB3_MODE | DP_MODE); /* bring both QMP USB and QMP DP PHYs PCS block out of reset */ - qphy_clrbits(dp_com, QPHY_V3_DP_COM_RESET_OVRD_CTRL, + qphy_clrbits(com, QPHY_V3_DP_COM_RESET_OVRD_CTRL, SW_DPPHY_RESET_MUX | SW_DPPHY_RESET | SW_USB3PHY_RESET_MUX | SW_USB3PHY_RESET); - qphy_clrbits(dp_com, QPHY_V3_DP_COM_SWI_CTRL, 0x03); - qphy_clrbits(dp_com, QPHY_V3_DP_COM_SW_RESET, SW_RESET); + qphy_clrbits(com, QPHY_V3_DP_COM_SWI_CTRL, 0x03); + qphy_clrbits(com, QPHY_V3_DP_COM_SW_RESET, SW_RESET); - if (cfg->regs[QPHY_PCS_POWER_DOWN_CONTROL]) - qphy_setbits(pcs, - cfg->regs[QPHY_PCS_POWER_DOWN_CONTROL], - cfg->pwrdn_ctrl); - else - qphy_setbits(pcs, QPHY_V2_PCS_POWER_DOWN_CONTROL, - cfg->pwrdn_ctrl); + qphy_setbits(qmp->pcs, cfg->regs[QPHY_PCS_POWER_DOWN_CONTROL], + SW_PWRDN); mutex_unlock(&qmp->phy_mutex); @@ -2016,10 +1866,9 @@ err_unlock: return ret; } -static int qmp_combo_com_exit(struct qmp_phy *qphy) +static int qmp_combo_com_exit(struct qmp_combo *qmp) { - struct qcom_qmp *qmp = qphy->qmp; - const struct qmp_phy_cfg *cfg = qphy->cfg; + const struct qmp_phy_cfg *cfg = qmp->cfg; mutex_lock(&qmp->phy_mutex); if (--qmp->init_count) { @@ -2027,8 +1876,6 @@ static int qmp_combo_com_exit(struct qmp_phy *qphy) return 0; } - reset_control_assert(qmp->ufs_reset); - reset_control_bulk_assert(cfg->num_resets, qmp->resets); clk_bulk_disable_unprepare(cfg->num_clks, qmp->clks); @@ -2040,183 +1887,201 @@ static int qmp_combo_com_exit(struct qmp_phy *qphy) return 0; } -static int qmp_combo_init(struct phy *phy) +static int qmp_combo_dp_init(struct phy *phy) { - struct qmp_phy *qphy = phy_get_drvdata(phy); - struct qcom_qmp *qmp = qphy->qmp; - const struct qmp_phy_cfg *cfg = qphy->cfg; + struct qmp_combo *qmp = phy_get_drvdata(phy); + const struct qmp_phy_cfg *cfg = qmp->cfg; int ret; - dev_vdbg(qmp->dev, "Initializing QMP phy\n"); - ret = qmp_combo_com_init(qphy); + ret = qmp_combo_com_init(qmp); if (ret) return ret; - if (cfg->type == PHY_TYPE_DP) - cfg->dp_aux_init(qphy); + cfg->dp_aux_init(qmp); + + return 0; +} + +static int qmp_combo_dp_exit(struct phy *phy) +{ + struct qmp_combo *qmp = phy_get_drvdata(phy); + + qmp_combo_com_exit(qmp); return 0; } -static int qmp_combo_power_on(struct phy *phy) +static int qmp_combo_dp_power_on(struct phy *phy) { - struct qmp_phy *qphy = phy_get_drvdata(phy); - struct qcom_qmp *qmp = qphy->qmp; - const struct qmp_phy_cfg *cfg = qphy->cfg; - void __iomem *tx = qphy->tx; - void __iomem *rx = qphy->rx; - void __iomem *pcs = qphy->pcs; + struct qmp_combo *qmp = phy_get_drvdata(phy); + const struct qmp_phy_cfg *cfg = qmp->cfg; + void __iomem *tx = qmp->dp_tx; + void __iomem *tx2 = qmp->dp_tx2; + + qmp_combo_dp_serdes_init(qmp); + + qmp_combo_configure_lane(tx, cfg->dp_tx_tbl, cfg->dp_tx_tbl_num, 1); + qmp_combo_configure_lane(tx2, cfg->dp_tx_tbl, cfg->dp_tx_tbl_num, 2); + + /* Configure special DP tx tunings */ + cfg->configure_dp_tx(qmp); + + /* Configure link rate, swing, etc. */ + cfg->configure_dp_phy(qmp); + + return 0; +} + +static int qmp_combo_dp_power_off(struct phy *phy) +{ + struct qmp_combo *qmp = phy_get_drvdata(phy); + + /* Assert DP PHY power down */ + writel(DP_PHY_PD_CTL_PSR_PWRDN, qmp->dp_dp_phy + QSERDES_DP_PHY_PD_CTL); + + return 0; +} + +static int qmp_combo_usb_power_on(struct phy *phy) +{ + struct qmp_combo *qmp = phy_get_drvdata(phy); + const struct qmp_phy_cfg *cfg = qmp->cfg; + void __iomem *serdes = qmp->serdes; + void __iomem *tx = qmp->tx; + void __iomem *rx = qmp->rx; + void __iomem *tx2 = qmp->tx2; + void __iomem *rx2 = qmp->rx2; + void __iomem *pcs = qmp->pcs; void __iomem *status; - unsigned int mask, val, ready; + unsigned int val; int ret; - qmp_combo_serdes_init(qphy); + qmp_combo_configure(serdes, cfg->serdes_tbl, cfg->serdes_tbl_num); - ret = clk_prepare_enable(qphy->pipe_clk); + ret = clk_prepare_enable(qmp->pipe_clk); if (ret) { dev_err(qmp->dev, "pipe_clk enable failed err=%d\n", ret); return ret; } /* Tx, Rx, and PCS configurations */ - qmp_combo_configure_lane(tx, cfg->regs, cfg->tx_tbl, cfg->tx_tbl_num, 1); + qmp_combo_configure_lane(tx, cfg->tx_tbl, cfg->tx_tbl_num, 1); + qmp_combo_configure_lane(tx2, cfg->tx_tbl, cfg->tx_tbl_num, 2); - if (cfg->lanes >= 2) { - qmp_combo_configure_lane(qphy->tx2, cfg->regs, cfg->tx_tbl, - cfg->tx_tbl_num, 2); - } + qmp_combo_configure_lane(rx, cfg->rx_tbl, cfg->rx_tbl_num, 1); + qmp_combo_configure_lane(rx2, cfg->rx_tbl, cfg->rx_tbl_num, 2); - /* Configure special DP tx tunings */ - if (cfg->type == PHY_TYPE_DP) - cfg->configure_dp_tx(qphy); - - qmp_combo_configure_lane(rx, cfg->regs, cfg->rx_tbl, cfg->rx_tbl_num, 1); - - if (cfg->lanes >= 2) { - qmp_combo_configure_lane(qphy->rx2, cfg->regs, cfg->rx_tbl, - cfg->rx_tbl_num, 2); - } - - /* Configure link rate, swing, etc. */ - if (cfg->type == PHY_TYPE_DP) - cfg->configure_dp_phy(qphy); - else - qmp_combo_configure(pcs, cfg->regs, cfg->pcs_tbl, cfg->pcs_tbl_num); - - ret = reset_control_deassert(qmp->ufs_reset); - if (ret) - goto err_disable_pipe_clk; + qmp_combo_configure(pcs, cfg->pcs_tbl, cfg->pcs_tbl_num); if (cfg->has_pwrdn_delay) - usleep_range(cfg->pwrdn_delay_min, cfg->pwrdn_delay_max); + usleep_range(10, 20); - if (cfg->type != PHY_TYPE_DP) { - /* Pull PHY out of reset state */ - qphy_clrbits(pcs, cfg->regs[QPHY_SW_RESET], SW_RESET); - /* start SerDes and Phy-Coding-Sublayer */ - qphy_setbits(pcs, cfg->regs[QPHY_START_CTRL], cfg->start_ctrl); + /* Pull PHY out of reset state */ + qphy_clrbits(pcs, cfg->regs[QPHY_SW_RESET], SW_RESET); - status = pcs + cfg->regs[QPHY_PCS_STATUS]; - mask = cfg->phy_status; - ready = 0; + /* start SerDes and Phy-Coding-Sublayer */ + qphy_setbits(pcs, cfg->regs[QPHY_START_CTRL], SERDES_START | PCS_START); - ret = readl_poll_timeout(status, val, (val & mask) == ready, 10, - PHY_INIT_COMPLETE_TIMEOUT); - if (ret) { - dev_err(qmp->dev, "phy initialization timed-out\n"); - goto err_disable_pipe_clk; - } + status = pcs + cfg->regs[QPHY_PCS_STATUS]; + ret = readl_poll_timeout(status, val, !(val & PHYSTATUS), 200, + PHY_INIT_COMPLETE_TIMEOUT); + if (ret) { + dev_err(qmp->dev, "phy initialization timed-out\n"); + goto err_disable_pipe_clk; } + return 0; err_disable_pipe_clk: - clk_disable_unprepare(qphy->pipe_clk); + clk_disable_unprepare(qmp->pipe_clk); return ret; } -static int qmp_combo_power_off(struct phy *phy) +static int qmp_combo_usb_power_off(struct phy *phy) { - struct qmp_phy *qphy = phy_get_drvdata(phy); - const struct qmp_phy_cfg *cfg = qphy->cfg; + struct qmp_combo *qmp = phy_get_drvdata(phy); + const struct qmp_phy_cfg *cfg = qmp->cfg; - clk_disable_unprepare(qphy->pipe_clk); + clk_disable_unprepare(qmp->pipe_clk); - if (cfg->type == PHY_TYPE_DP) { - /* Assert DP PHY power down */ - writel(DP_PHY_PD_CTL_PSR_PWRDN, qphy->pcs + QSERDES_DP_PHY_PD_CTL); - } else { - /* PHY reset */ - qphy_setbits(qphy->pcs, cfg->regs[QPHY_SW_RESET], SW_RESET); - - /* stop SerDes and Phy-Coding-Sublayer */ - qphy_clrbits(qphy->pcs, cfg->regs[QPHY_START_CTRL], cfg->start_ctrl); - - /* Put PHY into POWER DOWN state: active low */ - if (cfg->regs[QPHY_PCS_POWER_DOWN_CONTROL]) { - qphy_clrbits(qphy->pcs, cfg->regs[QPHY_PCS_POWER_DOWN_CONTROL], - cfg->pwrdn_ctrl); - } else { - qphy_clrbits(qphy->pcs, QPHY_V2_PCS_POWER_DOWN_CONTROL, - cfg->pwrdn_ctrl); - } - } + /* PHY reset */ + qphy_setbits(qmp->pcs, cfg->regs[QPHY_SW_RESET], SW_RESET); - return 0; -} - -static int qmp_combo_exit(struct phy *phy) -{ - struct qmp_phy *qphy = phy_get_drvdata(phy); + /* stop SerDes and Phy-Coding-Sublayer */ + qphy_clrbits(qmp->pcs, cfg->regs[QPHY_START_CTRL], + SERDES_START | PCS_START); - qmp_combo_com_exit(qphy); + /* Put PHY into POWER DOWN state: active low */ + qphy_clrbits(qmp->pcs, cfg->regs[QPHY_PCS_POWER_DOWN_CONTROL], + SW_PWRDN); return 0; } -static int qmp_combo_enable(struct phy *phy) +static int qmp_combo_usb_init(struct phy *phy) { + struct qmp_combo *qmp = phy_get_drvdata(phy); int ret; - ret = qmp_combo_init(phy); + ret = qmp_combo_com_init(qmp); if (ret) return ret; - ret = qmp_combo_power_on(phy); + ret = qmp_combo_usb_power_on(phy); if (ret) - qmp_combo_exit(phy); + qmp_combo_com_exit(qmp); return ret; } -static int qmp_combo_disable(struct phy *phy) +static int qmp_combo_usb_exit(struct phy *phy) { + struct qmp_combo *qmp = phy_get_drvdata(phy); int ret; - ret = qmp_combo_power_off(phy); + ret = qmp_combo_usb_power_off(phy); if (ret) return ret; - return qmp_combo_exit(phy); + + return qmp_combo_com_exit(qmp); } -static int qmp_combo_set_mode(struct phy *phy, enum phy_mode mode, int submode) +static int qmp_combo_usb_set_mode(struct phy *phy, enum phy_mode mode, int submode) { - struct qmp_phy *qphy = phy_get_drvdata(phy); + struct qmp_combo *qmp = phy_get_drvdata(phy); - qphy->mode = mode; + qmp->mode = mode; return 0; } -static void qmp_combo_enable_autonomous_mode(struct qmp_phy *qphy) +static const struct phy_ops qmp_combo_usb_phy_ops = { + .init = qmp_combo_usb_init, + .exit = qmp_combo_usb_exit, + .set_mode = qmp_combo_usb_set_mode, + .owner = THIS_MODULE, +}; + +static const struct phy_ops qmp_combo_dp_phy_ops = { + .init = qmp_combo_dp_init, + .configure = qmp_combo_dp_configure, + .power_on = qmp_combo_dp_power_on, + .calibrate = qmp_combo_dp_calibrate, + .power_off = qmp_combo_dp_power_off, + .exit = qmp_combo_dp_exit, + .owner = THIS_MODULE, +}; + +static void qmp_combo_enable_autonomous_mode(struct qmp_combo *qmp) { - const struct qmp_phy_cfg *cfg = qphy->cfg; - void __iomem *pcs_usb = qphy->pcs_usb ?: qphy->pcs; - void __iomem *pcs_misc = qphy->pcs_misc; + const struct qmp_phy_cfg *cfg = qmp->cfg; + void __iomem *pcs_usb = qmp->pcs_usb ?: qmp->pcs; + void __iomem *pcs_misc = qmp->pcs_misc; u32 intr_mask; - if (qphy->mode == PHY_MODE_USB_HOST_SS || - qphy->mode == PHY_MODE_USB_DEVICE_SS) + if (qmp->mode == PHY_MODE_USB_HOST_SS || + qmp->mode == PHY_MODE_USB_DEVICE_SS) intr_mask = ARCVR_DTCT_EN | ALFPS_DTCT_EN; else intr_mask = ARCVR_DTCT_EN | ARCVR_DTCT_EVENT_SEL; @@ -2237,11 +2102,11 @@ static void qmp_combo_enable_autonomous_mode(struct qmp_phy *qphy) qphy_clrbits(pcs_misc, QPHY_V3_PCS_MISC_CLAMP_ENABLE, CLAMP_EN); } -static void qmp_combo_disable_autonomous_mode(struct qmp_phy *qphy) +static void qmp_combo_disable_autonomous_mode(struct qmp_combo *qmp) { - const struct qmp_phy_cfg *cfg = qphy->cfg; - void __iomem *pcs_usb = qphy->pcs_usb ?: qphy->pcs; - void __iomem *pcs_misc = qphy->pcs_misc; + const struct qmp_phy_cfg *cfg = qmp->cfg; + void __iomem *pcs_usb = qmp->pcs_usb ?: qmp->pcs; + void __iomem *pcs_misc = qmp->pcs_misc; /* Disable i/o clamp_n on resume for normal mode */ if (pcs_misc) @@ -2257,24 +2122,19 @@ static void qmp_combo_disable_autonomous_mode(struct qmp_phy *qphy) static int __maybe_unused qmp_combo_runtime_suspend(struct device *dev) { - struct qcom_qmp *qmp = dev_get_drvdata(dev); - struct qmp_phy *qphy = qmp->phys[0]; - const struct qmp_phy_cfg *cfg = qphy->cfg; + struct qmp_combo *qmp = dev_get_drvdata(dev); + const struct qmp_phy_cfg *cfg = qmp->cfg; - dev_vdbg(dev, "Suspending QMP phy, mode:%d\n", qphy->mode); - - /* Supported only for USB3 PHY and luckily USB3 is the first phy */ - if (cfg->type != PHY_TYPE_USB3) - return 0; + dev_vdbg(dev, "Suspending QMP phy, mode:%d\n", qmp->mode); if (!qmp->init_count) { dev_vdbg(dev, "PHY not initialized, bailing out\n"); return 0; } - qmp_combo_enable_autonomous_mode(qphy); + qmp_combo_enable_autonomous_mode(qmp); - clk_disable_unprepare(qphy->pipe_clk); + clk_disable_unprepare(qmp->pipe_clk); clk_bulk_disable_unprepare(cfg->num_clks, qmp->clks); return 0; @@ -2282,16 +2142,11 @@ static int __maybe_unused qmp_combo_runtime_suspend(struct device *dev) static int __maybe_unused qmp_combo_runtime_resume(struct device *dev) { - struct qcom_qmp *qmp = dev_get_drvdata(dev); - struct qmp_phy *qphy = qmp->phys[0]; - const struct qmp_phy_cfg *cfg = qphy->cfg; + struct qmp_combo *qmp = dev_get_drvdata(dev); + const struct qmp_phy_cfg *cfg = qmp->cfg; int ret = 0; - dev_vdbg(dev, "Resuming QMP phy, mode:%d\n", qphy->mode); - - /* Supported only for USB3 PHY and luckily USB3 is the first phy */ - if (cfg->type != PHY_TYPE_USB3) - return 0; + dev_vdbg(dev, "Resuming QMP phy, mode:%d\n", qmp->mode); if (!qmp->init_count) { dev_vdbg(dev, "PHY not initialized, bailing out\n"); @@ -2302,21 +2157,27 @@ static int __maybe_unused qmp_combo_runtime_resume(struct device *dev) if (ret) return ret; - ret = clk_prepare_enable(qphy->pipe_clk); + ret = clk_prepare_enable(qmp->pipe_clk); if (ret) { dev_err(dev, "pipe_clk enable failed, err=%d\n", ret); clk_bulk_disable_unprepare(cfg->num_clks, qmp->clks); return ret; } - qmp_combo_disable_autonomous_mode(qphy); + qmp_combo_disable_autonomous_mode(qmp); return 0; } -static int qmp_combo_vreg_init(struct device *dev, const struct qmp_phy_cfg *cfg) +static const struct dev_pm_ops qmp_combo_pm_ops = { + SET_RUNTIME_PM_OPS(qmp_combo_runtime_suspend, + qmp_combo_runtime_resume, NULL) +}; + +static int qmp_combo_vreg_init(struct qmp_combo *qmp) { - struct qcom_qmp *qmp = dev_get_drvdata(dev); + const struct qmp_phy_cfg *cfg = qmp->cfg; + struct device *dev = qmp->dev; int num = cfg->num_vregs; int ret, i; @@ -2346,9 +2207,10 @@ static int qmp_combo_vreg_init(struct device *dev, const struct qmp_phy_cfg *cfg return 0; } -static int qmp_combo_reset_init(struct device *dev, const struct qmp_phy_cfg *cfg) +static int qmp_combo_reset_init(struct qmp_combo *qmp) { - struct qcom_qmp *qmp = dev_get_drvdata(dev); + const struct qmp_phy_cfg *cfg = qmp->cfg; + struct device *dev = qmp->dev; int i; int ret; @@ -2367,9 +2229,10 @@ static int qmp_combo_reset_init(struct device *dev, const struct qmp_phy_cfg *cf return 0; } -static int qmp_combo_clk_init(struct device *dev, const struct qmp_phy_cfg *cfg) +static int qmp_combo_clk_init(struct qmp_combo *qmp) { - struct qcom_qmp *qmp = dev_get_drvdata(dev); + const struct qmp_phy_cfg *cfg = qmp->cfg; + struct device *dev = qmp->dev; int num = cfg->num_clks; int i; @@ -2406,41 +2269,21 @@ static void phy_clk_release_provider(void *res) * clk | +-------+ | +-----+ * +---------------+ */ -static int phy_pipe_clk_register(struct qcom_qmp *qmp, struct device_node *np) +static int phy_pipe_clk_register(struct qmp_combo *qmp, struct device_node *np) { - struct clk_fixed_rate *fixed; + struct clk_fixed_rate *fixed = &qmp->pipe_clk_fixed; struct clk_init_data init = { }; - int ret; - - ret = of_property_read_string(np, "clock-output-names", &init.name); - if (ret) { - dev_err(qmp->dev, "%pOFn: No clock-output-names\n", np); - return ret; - } - - fixed = devm_kzalloc(qmp->dev, sizeof(*fixed), GFP_KERNEL); - if (!fixed) - return -ENOMEM; + char name[64]; + snprintf(name, sizeof(name), "%s::pipe_clk", dev_name(qmp->dev)); + init.name = name; init.ops = &clk_fixed_rate_ops; /* controllers using QMP phys use 125MHz pipe clock interface */ fixed->fixed_rate = 125000000; fixed->hw.init = &init; - ret = devm_clk_hw_register(qmp->dev, &fixed->hw); - if (ret) - return ret; - - ret = of_clk_add_hw_provider(np, of_clk_hw_simple_get, &fixed->hw); - if (ret) - return ret; - - /* - * Roll a devm action because the clock provider is the child node, but - * the child node is not actually a device. - */ - return devm_add_action_or_reset(qmp->dev, phy_clk_release_provider, np); + return devm_clk_hw_register(qmp->dev, &fixed->hw); } /* @@ -2492,8 +2335,7 @@ static int phy_pipe_clk_register(struct qcom_qmp *qmp, struct device_node *np) * for DP pixel clock * */ -static int qcom_qmp_dp_pixel_clk_determine_rate(struct clk_hw *hw, - struct clk_rate_request *req) +static int qmp_dp_pixel_clk_determine_rate(struct clk_hw *hw, struct clk_rate_request *req) { switch (req->rate) { case 1620000000UL / 2: @@ -2505,16 +2347,13 @@ static int qcom_qmp_dp_pixel_clk_determine_rate(struct clk_hw *hw, } } -static unsigned long -qcom_qmp_dp_pixel_clk_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) +static unsigned long qmp_dp_pixel_clk_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) { - const struct qmp_phy_dp_clks *dp_clks; - const struct qmp_phy *qphy; + const struct qmp_combo *qmp; const struct phy_configure_opts_dp *dp_opts; - dp_clks = container_of(hw, struct qmp_phy_dp_clks, dp_pixel_hw); - qphy = dp_clks->qphy; - dp_opts = &qphy->dp_opts; + qmp = container_of(hw, struct qmp_combo, dp_pixel_hw); + dp_opts = &qmp->dp_opts; switch (dp_opts->link_rate) { case 1620: @@ -2530,13 +2369,12 @@ qcom_qmp_dp_pixel_clk_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) } } -static const struct clk_ops qcom_qmp_dp_pixel_clk_ops = { - .determine_rate = qcom_qmp_dp_pixel_clk_determine_rate, - .recalc_rate = qcom_qmp_dp_pixel_clk_recalc_rate, +static const struct clk_ops qmp_dp_pixel_clk_ops = { + .determine_rate = qmp_dp_pixel_clk_determine_rate, + .recalc_rate = qmp_dp_pixel_clk_recalc_rate, }; -static int qcom_qmp_dp_link_clk_determine_rate(struct clk_hw *hw, - struct clk_rate_request *req) +static int qmp_dp_link_clk_determine_rate(struct clk_hw *hw, struct clk_rate_request *req) { switch (req->rate) { case 162000000: @@ -2549,16 +2387,13 @@ static int qcom_qmp_dp_link_clk_determine_rate(struct clk_hw *hw, } } -static unsigned long -qcom_qmp_dp_link_clk_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) +static unsigned long qmp_dp_link_clk_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) { - const struct qmp_phy_dp_clks *dp_clks; - const struct qmp_phy *qphy; + const struct qmp_combo *qmp; const struct phy_configure_opts_dp *dp_opts; - dp_clks = container_of(hw, struct qmp_phy_dp_clks, dp_link_hw); - qphy = dp_clks->qphy; - dp_opts = &qphy->dp_opts; + qmp = container_of(hw, struct qmp_combo, dp_link_hw); + dp_opts = &qmp->dp_opts; switch (dp_opts->link_rate) { case 1620: @@ -2571,15 +2406,14 @@ qcom_qmp_dp_link_clk_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) } } -static const struct clk_ops qcom_qmp_dp_link_clk_ops = { - .determine_rate = qcom_qmp_dp_link_clk_determine_rate, - .recalc_rate = qcom_qmp_dp_link_clk_recalc_rate, +static const struct clk_ops qmp_dp_link_clk_ops = { + .determine_rate = qmp_dp_link_clk_determine_rate, + .recalc_rate = qmp_dp_link_clk_recalc_rate, }; -static struct clk_hw * -qcom_qmp_dp_clks_hw_get(struct of_phandle_args *clkspec, void *data) +static struct clk_hw *qmp_dp_clks_hw_get(struct of_phandle_args *clkspec, void *data) { - struct qmp_phy_dp_clks *dp_clks = data; + struct qmp_combo *qmp = data; unsigned int idx = clkspec->args[0]; if (idx >= 2) { @@ -2588,43 +2422,76 @@ qcom_qmp_dp_clks_hw_get(struct of_phandle_args *clkspec, void *data) } if (idx == 0) - return &dp_clks->dp_link_hw; + return &qmp->dp_link_hw; - return &dp_clks->dp_pixel_hw; + return &qmp->dp_pixel_hw; } -static int phy_dp_clks_register(struct qcom_qmp *qmp, struct qmp_phy *qphy, - struct device_node *np) +static int phy_dp_clks_register(struct qmp_combo *qmp, struct device_node *np) { struct clk_init_data init = { }; - struct qmp_phy_dp_clks *dp_clks; char name[64]; int ret; - dp_clks = devm_kzalloc(qmp->dev, sizeof(*dp_clks), GFP_KERNEL); - if (!dp_clks) - return -ENOMEM; - - dp_clks->qphy = qphy; - qphy->dp_clks = dp_clks; - snprintf(name, sizeof(name), "%s::link_clk", dev_name(qmp->dev)); - init.ops = &qcom_qmp_dp_link_clk_ops; + init.ops = &qmp_dp_link_clk_ops; init.name = name; - dp_clks->dp_link_hw.init = &init; - ret = devm_clk_hw_register(qmp->dev, &dp_clks->dp_link_hw); + qmp->dp_link_hw.init = &init; + ret = devm_clk_hw_register(qmp->dev, &qmp->dp_link_hw); if (ret) return ret; snprintf(name, sizeof(name), "%s::vco_div_clk", dev_name(qmp->dev)); - init.ops = &qcom_qmp_dp_pixel_clk_ops; + init.ops = &qmp_dp_pixel_clk_ops; init.name = name; - dp_clks->dp_pixel_hw.init = &init; - ret = devm_clk_hw_register(qmp->dev, &dp_clks->dp_pixel_hw); + qmp->dp_pixel_hw.init = &init; + ret = devm_clk_hw_register(qmp->dev, &qmp->dp_pixel_hw); + if (ret) + return ret; + + return 0; +} + +static struct clk_hw *qmp_combo_clk_hw_get(struct of_phandle_args *clkspec, void *data) +{ + struct qmp_combo *qmp = data; + + switch (clkspec->args[0]) { + case QMP_USB43DP_USB3_PIPE_CLK: + return &qmp->pipe_clk_fixed.hw; + case QMP_USB43DP_DP_LINK_CLK: + return &qmp->dp_link_hw; + case QMP_USB43DP_DP_VCO_DIV_CLK: + return &qmp->dp_pixel_hw; + } + + return ERR_PTR(-EINVAL); +} + +static int qmp_combo_register_clocks(struct qmp_combo *qmp, struct device_node *usb_np, + struct device_node *dp_np) +{ + int ret; + + ret = phy_pipe_clk_register(qmp, usb_np); + if (ret) + return ret; + + ret = phy_dp_clks_register(qmp, dp_np); if (ret) return ret; - ret = of_clk_add_hw_provider(np, qcom_qmp_dp_clks_hw_get, dp_clks); + /* + * Register a single provider for bindings without child nodes. + */ + if (usb_np == qmp->dev->of_node) + return devm_of_clk_add_hw_provider(qmp->dev, qmp_combo_clk_hw_get, qmp); + + /* + * Register multiple providers for legacy bindings with child nodes. + */ + ret = of_clk_add_hw_provider(usb_np, of_clk_hw_simple_get, + &qmp->pipe_clk_fixed.hw); if (ret) return ret; @@ -2632,162 +2499,184 @@ static int phy_dp_clks_register(struct qcom_qmp *qmp, struct qmp_phy *qphy, * Roll a devm action because the clock provider is the child node, but * the child node is not actually a device. */ - return devm_add_action_or_reset(qmp->dev, phy_clk_release_provider, np); -} + ret = devm_add_action_or_reset(qmp->dev, phy_clk_release_provider, usb_np); + if (ret) + return ret; -static const struct phy_ops qmp_combo_usb_ops = { - .init = qmp_combo_enable, - .exit = qmp_combo_disable, - .set_mode = qmp_combo_set_mode, - .owner = THIS_MODULE, -}; + ret = of_clk_add_hw_provider(dp_np, qmp_dp_clks_hw_get, qmp); + if (ret) + return ret; -static const struct phy_ops qmp_combo_dp_ops = { - .init = qmp_combo_init, - .configure = qcom_qmp_dp_phy_configure, - .power_on = qmp_combo_power_on, - .calibrate = qcom_qmp_dp_phy_calibrate, - .power_off = qmp_combo_power_off, - .exit = qmp_combo_exit, - .set_mode = qmp_combo_set_mode, - .owner = THIS_MODULE, -}; + return devm_add_action_or_reset(qmp->dev, phy_clk_release_provider, dp_np); +} -static int qmp_combo_create(struct device *dev, struct device_node *np, int id, - void __iomem *serdes, const struct qmp_phy_cfg *cfg) +static int qmp_combo_parse_dt_lecacy_dp(struct qmp_combo *qmp, struct device_node *np) { - struct qcom_qmp *qmp = dev_get_drvdata(dev); - struct phy *generic_phy; - struct qmp_phy *qphy; - const struct phy_ops *ops; - int ret; + struct device *dev = qmp->dev; - qphy = devm_kzalloc(dev, sizeof(*qphy), GFP_KERNEL); - if (!qphy) - return -ENOMEM; + /* + * Get memory resources from the DP child node: + * Resources are indexed as: tx -> 0; rx -> 1; pcs -> 2; + * tx2 -> 3; rx2 -> 4 + * + * Note that only tx/tx2 and pcs (dp_phy) are used by the DP + * implementation. + */ + qmp->dp_tx = devm_of_iomap(dev, np, 0, NULL); + if (IS_ERR(qmp->dp_tx)) + return PTR_ERR(qmp->dp_tx); + + qmp->dp_dp_phy = devm_of_iomap(dev, np, 2, NULL); + if (IS_ERR(qmp->dp_dp_phy)) + return PTR_ERR(qmp->dp_dp_phy); + + qmp->dp_tx2 = devm_of_iomap(dev, np, 3, NULL); + if (IS_ERR(qmp->dp_tx2)) + return PTR_ERR(qmp->dp_tx2); + + return 0; +} + +static int qmp_combo_parse_dt_lecacy_usb(struct qmp_combo *qmp, struct device_node *np) +{ + const struct qmp_phy_cfg *cfg = qmp->cfg; + struct device *dev = qmp->dev; - qphy->cfg = cfg; - qphy->serdes = serdes; /* - * Get memory resources for each phy lane: - * Resources are indexed as: tx -> 0; rx -> 1; pcs -> 2. - * For dual lane PHYs: tx2 -> 3, rx2 -> 4, pcs_misc (optional) -> 5 - * For single lane PHYs: pcs_misc (optional) -> 3. + * Get memory resources from the USB child node: + * Resources are indexed as: tx -> 0; rx -> 1; pcs -> 2; + * tx2 -> 3; rx2 -> 4; pcs_misc (optional) -> 5 */ - qphy->tx = devm_of_iomap(dev, np, 0, NULL); - if (IS_ERR(qphy->tx)) - return PTR_ERR(qphy->tx); + qmp->tx = devm_of_iomap(dev, np, 0, NULL); + if (IS_ERR(qmp->tx)) + return PTR_ERR(qmp->tx); - qphy->rx = devm_of_iomap(dev, np, 1, NULL); - if (IS_ERR(qphy->rx)) - return PTR_ERR(qphy->rx); + qmp->rx = devm_of_iomap(dev, np, 1, NULL); + if (IS_ERR(qmp->rx)) + return PTR_ERR(qmp->rx); - qphy->pcs = devm_of_iomap(dev, np, 2, NULL); - if (IS_ERR(qphy->pcs)) - return PTR_ERR(qphy->pcs); + qmp->pcs = devm_of_iomap(dev, np, 2, NULL); + if (IS_ERR(qmp->pcs)) + return PTR_ERR(qmp->pcs); if (cfg->pcs_usb_offset) - qphy->pcs_usb = qphy->pcs + cfg->pcs_usb_offset; - - if (cfg->lanes >= 2) { - qphy->tx2 = devm_of_iomap(dev, np, 3, NULL); - if (IS_ERR(qphy->tx2)) - return PTR_ERR(qphy->tx2); + qmp->pcs_usb = qmp->pcs + cfg->pcs_usb_offset; - qphy->rx2 = devm_of_iomap(dev, np, 4, NULL); - if (IS_ERR(qphy->rx2)) - return PTR_ERR(qphy->rx2); + qmp->tx2 = devm_of_iomap(dev, np, 3, NULL); + if (IS_ERR(qmp->tx2)) + return PTR_ERR(qmp->tx2); - qphy->pcs_misc = devm_of_iomap(dev, np, 5, NULL); - } else { - qphy->pcs_misc = devm_of_iomap(dev, np, 3, NULL); - } + qmp->rx2 = devm_of_iomap(dev, np, 4, NULL); + if (IS_ERR(qmp->rx2)) + return PTR_ERR(qmp->rx2); - if (IS_ERR(qphy->pcs_misc)) { + qmp->pcs_misc = devm_of_iomap(dev, np, 5, NULL); + if (IS_ERR(qmp->pcs_misc)) { dev_vdbg(dev, "PHY pcs_misc-reg not used\n"); - qphy->pcs_misc = NULL; + qmp->pcs_misc = NULL; } - /* - * Get PHY's Pipe clock, if any. USB3 and PCIe are PIPE3 - * based phys, so they essentially have pipe clock. So, - * we return error in case phy is USB3 or PIPE type. - * Otherwise, we initialize pipe clock to NULL for - * all phys that don't need this. - */ - qphy->pipe_clk = devm_get_clk_from_child(dev, np, NULL); - if (IS_ERR(qphy->pipe_clk)) { - if (cfg->type == PHY_TYPE_USB3) - return dev_err_probe(dev, PTR_ERR(qphy->pipe_clk), - "failed to get lane%d pipe_clk\n", - id); - qphy->pipe_clk = NULL; + qmp->pipe_clk = devm_get_clk_from_child(dev, np, NULL); + if (IS_ERR(qmp->pipe_clk)) { + return dev_err_probe(dev, PTR_ERR(qmp->pipe_clk), + "failed to get pipe clock\n"); } - if (cfg->type == PHY_TYPE_DP) - ops = &qmp_combo_dp_ops; - else - ops = &qmp_combo_usb_ops; + return 0; +} - generic_phy = devm_phy_create(dev, np, ops); - if (IS_ERR(generic_phy)) { - ret = PTR_ERR(generic_phy); - dev_err(dev, "failed to create qphy %d\n", ret); +static int qmp_combo_parse_dt_legacy(struct qmp_combo *qmp, struct device_node *usb_np, + struct device_node *dp_np) +{ + struct platform_device *pdev = to_platform_device(qmp->dev); + int ret; + + qmp->serdes = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(qmp->serdes)) + return PTR_ERR(qmp->serdes); + + qmp->com = devm_platform_ioremap_resource(pdev, 1); + if (IS_ERR(qmp->com)) + return PTR_ERR(qmp->com); + + qmp->dp_serdes = devm_platform_ioremap_resource(pdev, 2); + if (IS_ERR(qmp->dp_serdes)) + return PTR_ERR(qmp->dp_serdes); + + ret = qmp_combo_parse_dt_lecacy_usb(qmp, usb_np); + if (ret) return ret; - } - qphy->phy = generic_phy; - qphy->qmp = qmp; - qmp->phys[id] = qphy; - phy_set_drvdata(generic_phy, qphy); + ret = qmp_combo_parse_dt_lecacy_dp(qmp, dp_np); + if (ret) + return ret; return 0; } -static const struct of_device_id qmp_combo_of_match_table[] = { - { - .compatible = "qcom,sc7180-qmp-usb3-dp-phy", - .data = &sc7180_usb3dpphy_cfg, - }, - { - .compatible = "qcom,sdm845-qmp-usb3-dp-phy", - .data = &sdm845_usb3dpphy_cfg, - }, - { - .compatible = "qcom,sm8250-qmp-usb3-dp-phy", - .data = &sm8250_usb3dpphy_cfg, - }, - { - .compatible = "qcom,sc8180x-qmp-usb3-dp-phy", - .data = &sc8180x_usb3dpphy_cfg, - }, - { - .compatible = "qcom,sc8280xp-qmp-usb43dp-phy", - .data = &sc8280xp_usb43dpphy_combo_cfg, - }, - { } -}; -MODULE_DEVICE_TABLE(of, qmp_combo_of_match_table); +static int qmp_combo_parse_dt(struct qmp_combo *qmp) +{ + struct platform_device *pdev = to_platform_device(qmp->dev); + const struct qmp_phy_cfg *cfg = qmp->cfg; + const struct qmp_combo_offsets *offs = cfg->offsets; + struct device *dev = qmp->dev; + void __iomem *base; -static const struct dev_pm_ops qmp_combo_pm_ops = { - SET_RUNTIME_PM_OPS(qmp_combo_runtime_suspend, - qmp_combo_runtime_resume, NULL) -}; + if (!offs) + return -EINVAL; + + base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(base)) + return PTR_ERR(base); + + qmp->com = base + offs->com; + qmp->tx = base + offs->txa; + qmp->rx = base + offs->rxa; + qmp->tx2 = base + offs->txb; + qmp->rx2 = base + offs->rxb; + + qmp->serdes = base + offs->usb3_serdes; + qmp->pcs_misc = base + offs->usb3_pcs_misc; + qmp->pcs = base + offs->usb3_pcs; + qmp->pcs_usb = base + offs->usb3_pcs_usb; + + qmp->dp_serdes = base + offs->dp_serdes; + qmp->dp_tx = base + offs->txa; + qmp->dp_tx2 = base + offs->txb; + qmp->dp_dp_phy = base + offs->dp_dp_phy; + + qmp->pipe_clk = devm_clk_get(dev, "usb3_pipe"); + if (IS_ERR(qmp->pipe_clk)) { + return dev_err_probe(dev, PTR_ERR(qmp->pipe_clk), + "failed to get usb3_pipe clock\n"); + } + + return 0; +} + +static struct phy *qmp_combo_phy_xlate(struct device *dev, struct of_phandle_args *args) +{ + struct qmp_combo *qmp = dev_get_drvdata(dev); + + if (args->args_count == 0) + return ERR_PTR(-EINVAL); + + switch (args->args[0]) { + case QMP_USB43DP_USB3_PHY: + return qmp->usb_phy; + case QMP_USB43DP_DP_PHY: + return qmp->dp_phy; + } + + return ERR_PTR(-EINVAL); +} static int qmp_combo_probe(struct platform_device *pdev) { - struct qcom_qmp *qmp; + struct qmp_combo *qmp; struct device *dev = &pdev->dev; - struct device_node *child; + struct device_node *dp_np, *usb_np; struct phy_provider *phy_provider; - void __iomem *serdes; - void __iomem *usb_serdes; - void __iomem *dp_serdes = NULL; - const struct qmp_phy_combo_cfg *combo_cfg = NULL; - const struct qmp_phy_cfg *cfg = NULL; - const struct qmp_phy_cfg *usb_cfg = NULL; - const struct qmp_phy_cfg *dp_cfg = NULL; - int num, id, expected_phys; int ret; qmp = devm_kzalloc(dev, sizeof(*qmp), GFP_KERNEL); @@ -2795,123 +2684,119 @@ static int qmp_combo_probe(struct platform_device *pdev) return -ENOMEM; qmp->dev = dev; - dev_set_drvdata(dev, qmp); - /* Get the specific init parameters of QMP phy */ - combo_cfg = of_device_get_match_data(dev); - if (!combo_cfg) + qmp->cfg = of_device_get_match_data(dev); + if (!qmp->cfg) return -EINVAL; - usb_cfg = combo_cfg->usb_cfg; - cfg = usb_cfg; /* Setup clks and regulators */ - - /* per PHY serdes; usually located at base address */ - usb_serdes = serdes = devm_platform_ioremap_resource(pdev, 0); - if (IS_ERR(serdes)) - return PTR_ERR(serdes); - - qmp->dp_com = devm_platform_ioremap_resource(pdev, 1); - if (IS_ERR(qmp->dp_com)) - return PTR_ERR(qmp->dp_com); - - /* Only two serdes for combo PHY */ - dp_serdes = devm_platform_ioremap_resource(pdev, 2); - if (IS_ERR(dp_serdes)) - return PTR_ERR(dp_serdes); - - dp_cfg = combo_cfg->dp_cfg; - expected_phys = 2; - mutex_init(&qmp->phy_mutex); - ret = qmp_combo_clk_init(dev, cfg); + ret = qmp_combo_clk_init(qmp); if (ret) return ret; - ret = qmp_combo_reset_init(dev, cfg); + ret = qmp_combo_reset_init(qmp); if (ret) return ret; - ret = qmp_combo_vreg_init(dev, cfg); + ret = qmp_combo_vreg_init(qmp); if (ret) - return dev_err_probe(dev, ret, - "failed to get regulator supplies\n"); + return ret; - num = of_get_available_child_count(dev->of_node); - /* do we have a rogue child node ? */ - if (num > expected_phys) - return -EINVAL; + /* Check for legacy binding with child nodes. */ + usb_np = of_get_child_by_name(dev->of_node, "usb3-phy"); + if (usb_np) { + dp_np = of_get_child_by_name(dev->of_node, "dp-phy"); + if (!dp_np) { + of_node_put(usb_np); + return -EINVAL; + } - qmp->phys = devm_kcalloc(dev, num, sizeof(*qmp->phys), GFP_KERNEL); - if (!qmp->phys) - return -ENOMEM; + ret = qmp_combo_parse_dt_legacy(qmp, usb_np, dp_np); + } else { + usb_np = of_node_get(dev->of_node); + dp_np = of_node_get(dev->of_node); + + ret = qmp_combo_parse_dt(qmp); + } + if (ret) + goto err_node_put; pm_runtime_set_active(dev); ret = devm_pm_runtime_enable(dev); if (ret) - return ret; + goto err_node_put; /* * Prevent runtime pm from being ON by default. Users can enable * it using power/control in sysfs. */ pm_runtime_forbid(dev); - id = 0; - for_each_available_child_of_node(dev->of_node, child) { - if (of_node_name_eq(child, "dp-phy")) { - cfg = dp_cfg; - serdes = dp_serdes; - - /* Create per-lane phy */ - ret = qmp_combo_create(dev, child, id, serdes, cfg); - if (ret) { - dev_err(dev, "failed to create lane%d phy, %d\n", - id, ret); - goto err_node_put; - } - - ret = phy_dp_clks_register(qmp, qmp->phys[id], child); - if (ret) { - dev_err(qmp->dev, - "failed to register DP clock source\n"); - goto err_node_put; - } - } else if (of_node_name_eq(child, "usb3-phy")) { - cfg = usb_cfg; - serdes = usb_serdes; - - /* Create per-lane phy */ - ret = qmp_combo_create(dev, child, id, serdes, cfg); - if (ret) { - dev_err(dev, "failed to create lane%d phy, %d\n", - id, ret); - goto err_node_put; - } - - /* - * Register the pipe clock provided by phy. - * See function description to see details of this pipe clock. - */ - ret = phy_pipe_clk_register(qmp, child); - if (ret) { - dev_err(qmp->dev, - "failed to register pipe clock source\n"); - goto err_node_put; - } - } + ret = qmp_combo_register_clocks(qmp, usb_np, dp_np); + if (ret) + goto err_node_put; + + qmp->usb_phy = devm_phy_create(dev, usb_np, &qmp_combo_usb_phy_ops); + if (IS_ERR(qmp->usb_phy)) { + ret = PTR_ERR(qmp->usb_phy); + dev_err(dev, "failed to create USB PHY: %d\n", ret); + goto err_node_put; + } + + phy_set_drvdata(qmp->usb_phy, qmp); - id++; + qmp->dp_phy = devm_phy_create(dev, dp_np, &qmp_combo_dp_phy_ops); + if (IS_ERR(qmp->dp_phy)) { + ret = PTR_ERR(qmp->dp_phy); + dev_err(dev, "failed to create DP PHY: %d\n", ret); + goto err_node_put; } - phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate); + phy_set_drvdata(qmp->dp_phy, qmp); + + dev_set_drvdata(dev, qmp); + + if (usb_np == dev->of_node) + phy_provider = devm_of_phy_provider_register(dev, qmp_combo_phy_xlate); + else + phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate); + + of_node_put(usb_np); + of_node_put(dp_np); return PTR_ERR_OR_ZERO(phy_provider); err_node_put: - of_node_put(child); + of_node_put(usb_np); + of_node_put(dp_np); return ret; } +static const struct of_device_id qmp_combo_of_match_table[] = { + { + .compatible = "qcom,sc7180-qmp-usb3-dp-phy", + .data = &sc7180_usb3dpphy_cfg, + }, + { + .compatible = "qcom,sc8180x-qmp-usb3-dp-phy", + .data = &sc8180x_usb3dpphy_cfg, + }, + { + .compatible = "qcom,sc8280xp-qmp-usb43dp-phy", + .data = &sc8280xp_usb43dpphy_cfg, + }, + { + .compatible = "qcom,sdm845-qmp-usb3-dp-phy", + .data = &sdm845_usb3dpphy_cfg, + }, + { + .compatible = "qcom,sm8250-qmp-usb3-dp-phy", + .data = &sm8250_usb3dpphy_cfg, + }, + { } +}; +MODULE_DEVICE_TABLE(of, qmp_combo_of_match_table); + static struct platform_driver qmp_combo_driver = { .probe = qmp_combo_probe, .driver = { diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcie-msm8996.c b/drivers/phy/qualcomm/phy-qcom-qmp-pcie-msm8996.c index 461f0b5d464a8b788449111dda43bab95938a419..a088477e274f74db9b127e813b21bfc4135a12d0 100644 --- a/drivers/phy/qualcomm/phy-qcom-qmp-pcie-msm8996.c +++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcie-msm8996.c @@ -20,8 +20,6 @@ #include #include -#include - #include "phy-qcom-qmp.h" /* QPHY_SW_RESET bit */ @@ -35,22 +33,16 @@ #define PLL_READY_GATE_EN BIT(3) /* QPHY_PCS_STATUS bit */ #define PHYSTATUS BIT(6) -#define PHYSTATUS_4_20 BIT(7) /* QPHY_COM_PCS_READY_STATUS bit */ #define PCS_READY BIT(0) #define PHY_INIT_COMPLETE_TIMEOUT 10000 #define POWER_DOWN_DELAY_US_MIN 10 -#define POWER_DOWN_DELAY_US_MAX 11 +#define POWER_DOWN_DELAY_US_MAX 20 struct qmp_phy_init_tbl { unsigned int offset; unsigned int val; - /* - * register part of layout ? - * if yes, then offset gives index in the reg-layout - */ - bool in_layout; /* * mask of lanes for which this register is written * for cases when second lane needs different values @@ -65,14 +57,6 @@ struct qmp_phy_init_tbl { .lane_mask = 0xff, \ } -#define QMP_PHY_INIT_CFG_L(o, v) \ - { \ - .offset = o, \ - .val = v, \ - .in_layout = true, \ - .lane_mask = 0xff, \ - } - #define QMP_PHY_INIT_CFG_LANE(o, v, l) \ { \ .offset = o, \ @@ -91,7 +75,6 @@ enum qphy_reg_layout { QPHY_SW_RESET, QPHY_START_CTRL, QPHY_PCS_STATUS, - QPHY_PCS_POWER_DOWN_CONTROL, /* Keep last to ensure regs_layout arrays are properly initialized */ QPHY_LAYOUT_SIZE }; @@ -211,18 +194,6 @@ struct qmp_phy_cfg { /* array of registers with different offsets */ const unsigned int *regs; - - unsigned int start_ctrl; - unsigned int pwrdn_ctrl; - unsigned int mask_com_pcs_ready; - /* bit offset of PHYSTATUS in QPHY_PCS_STATUS register */ - unsigned int phy_status; - - /* true, if PHY needs delay after POWER_DOWN */ - bool has_pwrdn_delay; - /* power_down delay in usec */ - int pwrdn_delay_min; - int pwrdn_delay_max; }; /** @@ -335,19 +306,9 @@ static const struct qmp_phy_cfg msm8996_pciephy_cfg = { .vreg_list = qmp_phy_vreg_l, .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l), .regs = pciephy_regs_layout, - - .start_ctrl = PCS_START | PLL_READY_GATE_EN, - .pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL, - .mask_com_pcs_ready = PCS_READY, - .phy_status = PHYSTATUS, - - .has_pwrdn_delay = true, - .pwrdn_delay_min = POWER_DOWN_DELAY_US_MIN, - .pwrdn_delay_max = POWER_DOWN_DELAY_US_MAX, }; static void qmp_pcie_msm8996_configure_lane(void __iomem *base, - const unsigned int *regs, const struct qmp_phy_init_tbl tbl[], int num, u8 lane_mask) @@ -362,19 +323,15 @@ static void qmp_pcie_msm8996_configure_lane(void __iomem *base, if (!(t->lane_mask & lane_mask)) continue; - if (t->in_layout) - writel(t->val, base + regs[t->offset]); - else - writel(t->val, base + t->offset); + writel(t->val, base + t->offset); } } static void qmp_pcie_msm8996_configure(void __iomem *base, - const unsigned int *regs, const struct qmp_phy_init_tbl tbl[], int num) { - qmp_pcie_msm8996_configure_lane(base, regs, tbl, num, 0xff); + qmp_pcie_msm8996_configure_lane(base, tbl, num, 0xff); } static int qmp_pcie_msm8996_serdes_init(struct qmp_phy *qphy) @@ -385,19 +342,17 @@ static int qmp_pcie_msm8996_serdes_init(struct qmp_phy *qphy) const struct qmp_phy_init_tbl *serdes_tbl = cfg->serdes_tbl; int serdes_tbl_num = cfg->serdes_tbl_num; void __iomem *status; - unsigned int mask, val; + unsigned int val; int ret; - qmp_pcie_msm8996_configure(serdes, cfg->regs, serdes_tbl, serdes_tbl_num); + qmp_pcie_msm8996_configure(serdes, serdes_tbl, serdes_tbl_num); qphy_clrbits(serdes, cfg->regs[QPHY_COM_SW_RESET], SW_RESET); qphy_setbits(serdes, cfg->regs[QPHY_COM_START_CONTROL], SERDES_START | PCS_START); status = serdes + cfg->regs[QPHY_COM_PCS_READY_STATUS]; - mask = cfg->mask_com_pcs_ready; - - ret = readl_poll_timeout(status, val, (val & mask), 10, + ret = readl_poll_timeout(status, val, (val & PCS_READY), 200, PHY_INIT_COMPLETE_TIMEOUT); if (ret) { dev_err(qmp->dev, @@ -421,7 +376,6 @@ static int qmp_pcie_msm8996_com_init(struct qmp_phy *qphy) return 0; } - /* turn on regulator supplies */ ret = regulator_bulk_enable(cfg->num_vregs, qmp->vregs); if (ret) { dev_err(qmp->dev, "failed to enable regulators, err=%d\n", ret); @@ -514,7 +468,7 @@ static int qmp_pcie_msm8996_power_on(struct phy *phy) void __iomem *rx = qphy->rx; void __iomem *pcs = qphy->pcs; void __iomem *status; - unsigned int mask, val, ready; + unsigned int val; int ret; qmp_pcie_msm8996_serdes_init(qphy); @@ -533,34 +487,28 @@ static int qmp_pcie_msm8996_power_on(struct phy *phy) } /* Tx, Rx, and PCS configurations */ - qmp_pcie_msm8996_configure_lane(tx, cfg->regs, cfg->tx_tbl, - cfg->tx_tbl_num, 1); - - qmp_pcie_msm8996_configure_lane(rx, cfg->regs, cfg->rx_tbl, - cfg->rx_tbl_num, 1); - - qmp_pcie_msm8996_configure(pcs, cfg->regs, cfg->pcs_tbl, cfg->pcs_tbl_num); + qmp_pcie_msm8996_configure_lane(tx, cfg->tx_tbl, cfg->tx_tbl_num, 1); + qmp_pcie_msm8996_configure_lane(rx, cfg->rx_tbl, cfg->rx_tbl_num, 1); + qmp_pcie_msm8996_configure(pcs, cfg->pcs_tbl, cfg->pcs_tbl_num); /* * Pull out PHY from POWER DOWN state. * This is active low enable signal to power-down PHY. */ - qphy_setbits(pcs, QPHY_V2_PCS_POWER_DOWN_CONTROL, cfg->pwrdn_ctrl); + qphy_setbits(pcs, QPHY_V2_PCS_POWER_DOWN_CONTROL, + SW_PWRDN | REFCLK_DRV_DSBL); - if (cfg->has_pwrdn_delay) - usleep_range(cfg->pwrdn_delay_min, cfg->pwrdn_delay_max); + usleep_range(POWER_DOWN_DELAY_US_MIN, POWER_DOWN_DELAY_US_MAX); /* Pull PHY out of reset state */ qphy_clrbits(pcs, cfg->regs[QPHY_SW_RESET], SW_RESET); /* start SerDes and Phy-Coding-Sublayer */ - qphy_setbits(pcs, cfg->regs[QPHY_START_CTRL], cfg->start_ctrl); + qphy_setbits(pcs, cfg->regs[QPHY_START_CTRL], + PCS_START | PLL_READY_GATE_EN); status = pcs + cfg->regs[QPHY_PCS_STATUS]; - mask = cfg->phy_status; - ready = 0; - - ret = readl_poll_timeout(status, val, (val & mask) == ready, 10, + ret = readl_poll_timeout(status, val, !(val & PHYSTATUS), 200, PHY_INIT_COMPLETE_TIMEOUT); if (ret) { dev_err(qmp->dev, "phy initialization timed-out\n"); @@ -588,16 +536,12 @@ static int qmp_pcie_msm8996_power_off(struct phy *phy) qphy_setbits(qphy->pcs, cfg->regs[QPHY_SW_RESET], SW_RESET); /* stop SerDes and Phy-Coding-Sublayer */ - qphy_clrbits(qphy->pcs, cfg->regs[QPHY_START_CTRL], cfg->start_ctrl); + qphy_clrbits(qphy->pcs, cfg->regs[QPHY_START_CTRL], + SERDES_START | PCS_START); /* Put PHY into POWER DOWN state: active low */ - if (cfg->regs[QPHY_PCS_POWER_DOWN_CONTROL]) { - qphy_clrbits(qphy->pcs, cfg->regs[QPHY_PCS_POWER_DOWN_CONTROL], - cfg->pwrdn_ctrl); - } else { - qphy_clrbits(qphy->pcs, QPHY_V2_PCS_POWER_DOWN_CONTROL, - cfg->pwrdn_ctrl); - } + qphy_clrbits(qphy->pcs, QPHY_V2_PCS_POWER_DOWN_CONTROL, + SW_PWRDN | REFCLK_DRV_DSBL); return 0; } @@ -777,7 +721,7 @@ static int qmp_pcie_msm8996_create(struct device *dev, struct device_node *np, i qphy->cfg = cfg; qphy->serdes = serdes; /* - * Get memory resources for each phy lane: + * Get memory resources for each PHY: * Resources are indexed as: tx -> 0; rx -> 1; pcs -> 2. */ qphy->tx = devm_of_iomap(dev, np, 0, NULL); @@ -851,12 +795,10 @@ static int qmp_pcie_msm8996_probe(struct platform_device *pdev) qmp->dev = dev; dev_set_drvdata(dev, qmp); - /* Get the specific init parameters of QMP phy */ cfg = of_device_get_match_data(dev); if (!cfg) return -EINVAL; - /* per PHY serdes; usually located at base address */ serdes = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(serdes)) return PTR_ERR(serdes); @@ -875,8 +817,7 @@ static int qmp_pcie_msm8996_probe(struct platform_device *pdev) ret = qmp_pcie_msm8996_vreg_init(dev, cfg); if (ret) - return dev_err_probe(dev, ret, - "failed to get regulator supplies\n"); + return ret; num = of_get_available_child_count(dev->of_node); /* do we have a rogue child node ? */ diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c b/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c index 5be5348fbb26b023039e254451045c6ce0af6b8e..1b136a87053f881482cb4966da1edfbfd1472920 100644 --- a/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c +++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c @@ -10,18 +10,19 @@ #include #include #include +#include #include #include #include #include +#include #include #include +#include #include #include #include -#include - #include "phy-qcom-qmp.h" /* QPHY_SW_RESET bit */ @@ -41,11 +42,6 @@ struct qmp_phy_init_tbl { unsigned int offset; unsigned int val; - /* - * register part of layout ? - * if yes, then offset gives index in the reg-layout - */ - bool in_layout; /* * mask of lanes for which this register is written * for cases when second lane needs different values @@ -60,14 +56,6 @@ struct qmp_phy_init_tbl { .lane_mask = 0xff, \ } -#define QMP_PHY_INIT_CFG_L(o, v) \ - { \ - .offset = o, \ - .val = v, \ - .in_layout = true, \ - .lane_mask = 0xff, \ - } - #define QMP_PHY_INIT_CFG_LANE(o, v, l) \ { \ .offset = o, \ @@ -77,11 +65,6 @@ struct qmp_phy_init_tbl { /* set of registers with offsets different per-PHY */ enum qphy_reg_layout { - /* Common block control registers */ - QPHY_COM_SW_RESET, - QPHY_COM_POWER_DOWN_CONTROL, - QPHY_COM_START_CONTROL, - QPHY_COM_PCS_READY_STATUS, /* PCS registers */ QPHY_SW_RESET, QPHY_START_CTRL, @@ -99,25 +82,24 @@ static const unsigned int ipq_pciephy_gen3_regs_layout[QPHY_LAYOUT_SIZE] = { }; static const unsigned int pciephy_regs_layout[QPHY_LAYOUT_SIZE] = { - [QPHY_COM_SW_RESET] = 0x400, - [QPHY_COM_POWER_DOWN_CONTROL] = 0x404, - [QPHY_COM_START_CONTROL] = 0x408, - [QPHY_COM_PCS_READY_STATUS] = 0x448, [QPHY_SW_RESET] = 0x00, [QPHY_START_CTRL] = 0x08, [QPHY_PCS_STATUS] = 0x174, + [QPHY_PCS_POWER_DOWN_CONTROL] = 0x04, }; static const unsigned int sdm845_qmp_pciephy_regs_layout[QPHY_LAYOUT_SIZE] = { [QPHY_SW_RESET] = 0x00, [QPHY_START_CTRL] = 0x08, [QPHY_PCS_STATUS] = 0x174, + [QPHY_PCS_POWER_DOWN_CONTROL] = 0x04, }; static const unsigned int sdm845_qhp_pciephy_regs_layout[QPHY_LAYOUT_SIZE] = { [QPHY_SW_RESET] = 0x00, [QPHY_START_CTRL] = 0x08, [QPHY_PCS_STATUS] = 0x2ac, + [QPHY_PCS_POWER_DOWN_CONTROL] = 0x04, }; static const unsigned int sm8250_pcie_regs_layout[QPHY_LAYOUT_SIZE] = { @@ -393,8 +375,6 @@ static const struct qmp_phy_init_tbl ipq8074_pcie_pcs_tbl[] = { QMP_PHY_INIT_CFG(QPHY_V2_PCS_RX_SIGDET_LVL, 0x99), QMP_PHY_INIT_CFG(QPHY_V2_PCS_TXDEEMPH_M6DB_V0, 0x15), QMP_PHY_INIT_CFG(QPHY_V2_PCS_TXDEEMPH_M3P5DB_V0, 0xe), - QMP_PHY_INIT_CFG_L(QPHY_SW_RESET, 0x0), - QMP_PHY_INIT_CFG_L(QPHY_START_CTRL, 0x3), }; static const struct qmp_phy_init_tbl ipq8074_pcie_gen3_serdes_tbl[] = { @@ -505,6 +485,13 @@ static const struct qmp_phy_init_tbl ipq8074_pcie_gen3_pcs_tbl[] = { QMP_PHY_INIT_CFG(QPHY_V4_PCS_FLL_CNTRL1, 0x01), QMP_PHY_INIT_CFG(QPHY_V4_PCS_P2U3_WAKEUP_DLY_TIME_AUXCLK_H, 0x0), QMP_PHY_INIT_CFG(QPHY_V4_PCS_P2U3_WAKEUP_DLY_TIME_AUXCLK_L, 0x1), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_G12S1_TXDEEMPH_M3P5DB, 0x10), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_RX_DCC_CAL_CONFIG, 0x01), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_RX_SIGDET_LVL, 0xaa), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_REFGEN_REQ_CONFIG1, 0x0d), +}; + +static const struct qmp_phy_init_tbl ipq8074_pcie_gen3_pcs_misc_tbl[] = { QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_OSC_DTCT_ACTIONS, 0x0), QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_L1P1_WAKEUP_DLY_TIME_AUXCLK_H, 0x00), QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_L1P1_WAKEUP_DLY_TIME_AUXCLK_L, 0x01), @@ -517,11 +504,7 @@ static const struct qmp_phy_init_tbl ipq8074_pcie_gen3_pcs_tbl[] = { QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_OSC_DTCT_MODE2_CONFIG2, 0x50), QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_OSC_DTCT_MODE2_CONFIG4, 0x1a), QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_OSC_DTCT_MODE2_CONFIG5, 0x6), - QMP_PHY_INIT_CFG(QPHY_V4_PCS_G12S1_TXDEEMPH_M3P5DB, 0x10), QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_ENDPOINT_REFCLK_DRIVE, 0xc1), - QMP_PHY_INIT_CFG(QPHY_V4_PCS_RX_DCC_CAL_CONFIG, 0x01), - QMP_PHY_INIT_CFG(QPHY_V4_PCS_RX_SIGDET_LVL, 0xaa), - QMP_PHY_INIT_CFG(QPHY_V4_PCS_REFGEN_REQ_CONFIG1, 0x0d), }; static const struct qmp_phy_init_tbl sdm845_qmp_pcie_serdes_tbl[] = { @@ -854,6 +837,147 @@ static const struct qmp_phy_init_tbl sc8180x_qmp_pcie_pcs_misc_tbl[] = { QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_ENDPOINT_REFCLK_DRIVE, 0xc1), }; +static const struct qmp_phy_init_tbl sc8280xp_qmp_pcie_serdes_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_EN_CENTER, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_PER1, 0x31), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_PER2, 0x01), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_STEP_SIZE1_MODE0, 0xde), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_STEP_SIZE2_MODE0, 0x07), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_STEP_SIZE1_MODE1, 0x4c), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_STEP_SIZE2_MODE1, 0x06), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_CLK_ENABLE1, 0x90), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_IVCO, 0x0f), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_CP_CTRL_MODE0, 0x06), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_CP_CTRL_MODE1, 0x06), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_RCTRL_MODE0, 0x16), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_RCTRL_MODE1, 0x16), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_CCTRL_MODE0, 0x36), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_CCTRL_MODE1, 0x36), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_SYSCLK_EN_SEL, 0x08), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP_EN, 0x42), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP1_MODE0, 0x0a), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP2_MODE0, 0x1a), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP1_MODE1, 0x14), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP2_MODE1, 0x34), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_DEC_START_MODE0, 0x82), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_DEC_START_MODE1, 0x68), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START1_MODE0, 0x55), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START2_MODE0, 0x55), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START3_MODE0, 0x03), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START1_MODE1, 0xab), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START2_MODE1, 0xaa), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START3_MODE1, 0x02), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_VCO_TUNE_MAP, 0x02), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_VCO_TUNE1_MODE0, 0x24), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_VCO_TUNE1_MODE1, 0xb4), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_VCO_TUNE2_MODE1, 0x03), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_CLK_SELECT, 0x34), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_HSCLK_SEL, 0x01), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_CORECLK_DIV_MODE1, 0x08), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_BIN_VCOCAL_CMP_CODE1_MODE0, 0xb9), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_BIN_VCOCAL_CMP_CODE2_MODE0, 0x1e), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_BIN_VCOCAL_CMP_CODE1_MODE1, 0x94), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_BIN_VCOCAL_CMP_CODE2_MODE1, 0x18), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_BIN_VCOCAL_HSCLK_SEL, 0x11), +}; + +static const struct qmp_phy_init_tbl sc8280xp_qmp_gen3x1_pcie_rc_serdes_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_V5_COM_SYSCLK_BUF_ENABLE, 0x07), +}; + +static const struct qmp_phy_init_tbl sc8280xp_qmp_gen3x2_pcie_rc_serdes_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_V5_COM_BIAS_EN_CLKBUFLR_EN, 0x14), +}; + +static const struct qmp_phy_init_tbl sc8280xp_qmp_gen3x4_pcie_serdes_4ln_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_V5_COM_BIAS_EN_CLKBUFLR_EN, 0x1c), +}; + +static const struct qmp_phy_init_tbl sc8280xp_qmp_gen3x1_pcie_tx_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_V5_TX_PI_QEC_CTRL, 0x20), + QMP_PHY_INIT_CFG(QSERDES_V5_TX_LANE_MODE_1, 0x75), + QMP_PHY_INIT_CFG(QSERDES_V5_TX_LANE_MODE_4, 0x3f), + QMP_PHY_INIT_CFG(QSERDES_V5_TX_RES_CODE_LANE_OFFSET_TX, 0x1d), + QMP_PHY_INIT_CFG(QSERDES_V5_TX_RES_CODE_LANE_OFFSET_RX, 0x0c), +}; + +static const struct qmp_phy_init_tbl sc8280xp_qmp_gen3x1_pcie_rx_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_LOW, 0x7f), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH, 0xff), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH2, 0xbf), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH3, 0x3f), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH4, 0xd8), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_LOW, 0xdc), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH, 0xdc), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH2, 0x5c), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH3, 0x34), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH4, 0xa6), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_TX_ADAPT_POST_THRESH, 0xf0), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_10_HIGH3, 0x34), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_VGA_CAL_CNTRL2, 0x07), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_GM_CAL, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SB2_THRESH1, 0x08), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SB2_THRESH2, 0x08), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_PI_CONTROLS, 0xf0), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_DFE_CTLE_POST_CAL_OFFSET, 0x38), +}; + +static const struct qmp_phy_init_tbl sc8280xp_qmp_gen3x1_pcie_pcs_tbl[] = { + QMP_PHY_INIT_CFG(QPHY_V5_PCS_REFGEN_REQ_CONFIG1, 0x05), + QMP_PHY_INIT_CFG(QPHY_V5_PCS_RX_SIGDET_LVL, 0x77), + QMP_PHY_INIT_CFG(QPHY_V5_PCS_RATE_SLEW_CNTRL1, 0x0b), +}; + +static const struct qmp_phy_init_tbl sc8280xp_qmp_gen3x1_pcie_pcs_misc_tbl[] = { + QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCIE_OSC_DTCT_ACTIONS, 0x00), + QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCIE_INT_AUX_CLK_CONFIG1, 0x00), + QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCIE_EQ_CONFIG2, 0x0f), + QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCIE_ENDPOINT_REFCLK_DRIVE, 0xc1), +}; + +static const struct qmp_phy_init_tbl sc8280xp_qmp_gen3x2_pcie_tx_tbl[] = { + QMP_PHY_INIT_CFG_LANE(QSERDES_V5_TX_PI_QEC_CTRL, 0x02, 1), + QMP_PHY_INIT_CFG_LANE(QSERDES_V5_TX_PI_QEC_CTRL, 0x04, 2), + QMP_PHY_INIT_CFG(QSERDES_V5_TX_LANE_MODE_1, 0xd5), + QMP_PHY_INIT_CFG(QSERDES_V5_TX_LANE_MODE_4, 0x3f), + QMP_PHY_INIT_CFG(QSERDES_V5_TX_RES_CODE_LANE_OFFSET_TX, 0x11), + QMP_PHY_INIT_CFG(QSERDES_V5_TX_RES_CODE_LANE_OFFSET_RX, 0x0c), +}; + +static const struct qmp_phy_init_tbl sc8280xp_qmp_gen3x2_pcie_rx_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_LOW, 0x7f), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH, 0xff), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH2, 0x7f), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH3, 0x34), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH4, 0xd8), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_LOW, 0xdc), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH, 0xdc), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH2, 0x5c), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH3, 0x34), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH4, 0xa6), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_10_HIGH3, 0x34), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_VGA_CAL_CNTRL2, 0x0f), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_GM_CAL, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SB2_THRESH1, 0x08), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SB2_THRESH2, 0x08), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_PI_CONTROLS, 0xf0), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_DFE_CTLE_POST_CAL_OFFSET, 0x38), +}; + +static const struct qmp_phy_init_tbl sc8280xp_qmp_gen3x2_pcie_pcs_tbl[] = { + QMP_PHY_INIT_CFG(QPHY_V5_PCS_REFGEN_REQ_CONFIG1, 0x05), + QMP_PHY_INIT_CFG(QPHY_V5_PCS_RX_SIGDET_LVL, 0x88), + QMP_PHY_INIT_CFG(QPHY_V5_PCS_RATE_SLEW_CNTRL1, 0x0b), + QMP_PHY_INIT_CFG(QPHY_V5_PCS_EQ_CONFIG3, 0x0f), +}; + +static const struct qmp_phy_init_tbl sc8280xp_qmp_gen3x2_pcie_pcs_misc_tbl[] = { + QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCIE_POWER_STATE_CONFIG2, 0x1d), + QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCIE_POWER_STATE_CONFIG4, 0x07), + QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCIE_ENDPOINT_REFCLK_DRIVE, 0xc1), + QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCIE_OSC_DTCT_ACTIONS, 0x00), +}; + static const struct qmp_phy_init_tbl sm8250_qmp_pcie_serdes_tbl[] = { QMP_PHY_INIT_CFG(QSERDES_V4_COM_SYSCLK_EN_SEL, 0x08), QMP_PHY_INIT_CFG(QSERDES_V4_COM_CLK_SELECT, 0x34), @@ -1184,15 +1308,29 @@ static const struct qmp_phy_init_tbl sm8450_qmp_gen3x1_pcie_pcs_misc_tbl[] = { }; static const struct qmp_phy_init_tbl sm8450_qmp_gen4x2_pcie_serdes_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_V5_COM_BIAS_EN_CLKBUFLR_EN, 0x14), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_IVCO, 0x0f), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP_EN, 0x46), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP_CFG, 0x04), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_VCO_TUNE_MAP, 0x02), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_HSCLK_SEL, 0x12), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_HSCLK_HS_SWITCH_SEL, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_CORECLK_DIV_MODE0, 0x0a), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_CORECLK_DIV_MODE1, 0x04), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_CMN_MISC1, 0x88), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_CMN_CONFIG, 0x06), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_CMN_MODE, 0x14), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_VCO_DC_LEVEL_CTRL, 0x0f), +}; + +static const struct qmp_phy_init_tbl sm8450_qmp_gen4x2_pcie_rc_serdes_tbl[] = { QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_PER1, 0x31), QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_PER2, 0x01), QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_STEP_SIZE1_MODE0, 0xde), QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_STEP_SIZE2_MODE0, 0x07), QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_STEP_SIZE1_MODE1, 0x97), QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_STEP_SIZE2_MODE1, 0x0c), - QMP_PHY_INIT_CFG(QSERDES_V5_COM_BIAS_EN_CLKBUFLR_EN, 0x14), QMP_PHY_INIT_CFG(QSERDES_V5_COM_CLK_ENABLE1, 0x90), - QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_IVCO, 0x0f), QMP_PHY_INIT_CFG(QSERDES_V5_COM_CP_CTRL_MODE0, 0x06), QMP_PHY_INIT_CFG(QSERDES_V5_COM_CP_CTRL_MODE1, 0x06), QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_RCTRL_MODE0, 0x16), @@ -1200,8 +1338,6 @@ static const struct qmp_phy_init_tbl sm8450_qmp_gen4x2_pcie_serdes_tbl[] = { QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_CCTRL_MODE0, 0x36), QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_CCTRL_MODE1, 0x36), QMP_PHY_INIT_CFG(QSERDES_V5_COM_SYSCLK_EN_SEL, 0x08), - QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP_EN, 0x46), - QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP_CFG, 0x04), QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP1_MODE0, 0x0a), QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP2_MODE0, 0x1a), QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP1_MODE1, 0x14), @@ -1214,17 +1350,8 @@ static const struct qmp_phy_init_tbl sm8450_qmp_gen4x2_pcie_serdes_tbl[] = { QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START1_MODE1, 0x55), QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START2_MODE1, 0x55), QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START3_MODE1, 0x05), - QMP_PHY_INIT_CFG(QSERDES_V5_COM_VCO_TUNE_MAP, 0x02), QMP_PHY_INIT_CFG(QSERDES_V5_COM_CLK_SELECT, 0x34), - QMP_PHY_INIT_CFG(QSERDES_V5_COM_HSCLK_SEL, 0x12), - QMP_PHY_INIT_CFG(QSERDES_V5_COM_HSCLK_HS_SWITCH_SEL, 0x00), - QMP_PHY_INIT_CFG(QSERDES_V5_COM_CORECLK_DIV_MODE0, 0x0a), - QMP_PHY_INIT_CFG(QSERDES_V5_COM_CORECLK_DIV_MODE1, 0x04), - QMP_PHY_INIT_CFG(QSERDES_V5_COM_CMN_MISC1, 0x88), QMP_PHY_INIT_CFG(QSERDES_V5_COM_CORE_CLK_EN, 0x20), - QMP_PHY_INIT_CFG(QSERDES_V5_COM_CMN_CONFIG, 0x06), - QMP_PHY_INIT_CFG(QSERDES_V5_COM_CMN_MODE, 0x14), - QMP_PHY_INIT_CFG(QSERDES_V5_COM_VCO_DC_LEVEL_CTRL, 0x0f), }; static const struct qmp_phy_init_tbl sm8450_qmp_gen4x2_pcie_tx_tbl[] = { @@ -1285,46 +1412,95 @@ static const struct qmp_phy_init_tbl sm8450_qmp_gen4x2_pcie_rx_tbl[] = { }; static const struct qmp_phy_init_tbl sm8450_qmp_gen4x2_pcie_pcs_tbl[] = { - QMP_PHY_INIT_CFG(QPHY_V5_PCS_EQ_CONFIG2, 0x16), - QMP_PHY_INIT_CFG(QPHY_V5_PCS_EQ_CONFIG3, 0x22), - QMP_PHY_INIT_CFG(QPHY_V5_PCS_G3S2_PRE_GAIN, 0x2e), - QMP_PHY_INIT_CFG(QPHY_V5_PCS_RX_SIGDET_LVL, 0x99), + QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_EQ_CONFIG4, 0x16), + QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_EQ_CONFIG5, 0x22), + QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_G3S2_PRE_GAIN, 0x2e), + QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_RX_SIGDET_LVL, 0x99), }; static const struct qmp_phy_init_tbl sm8450_qmp_gen4x2_pcie_pcs_misc_tbl[] = { - QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_PCIE_ENDPOINT_REFCLK_DRIVE, 0xc1), - QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_PCIE_OSC_DTCT_ACTIONS, 0x00), QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_PCIE_G4_EQ_CONFIG5, 0x02), QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_PCIE_EQ_CONFIG1, 0x16), QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_PCIE_RX_MARGINING_CONFIG3, 0x28), QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_PCIE_G4_PRE_GAIN, 0x2e), }; +static const struct qmp_phy_init_tbl sm8450_qmp_gen4x2_pcie_rc_pcs_misc_tbl[] = { + QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_PCIE_ENDPOINT_REFCLK_DRIVE, 0xc1), + QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_PCIE_OSC_DTCT_ACTIONS, 0x00), + QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_PCIE_PRESET_P10_POST, 0x00), +}; + +static const struct qmp_phy_init_tbl sm8450_qmp_gen4x2_pcie_ep_serdes_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_V5_COM_BG_TIMER, 0x02), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_SYS_CLK_CTRL, 0x07), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_CP_CTRL_MODE0, 0x27), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_CP_CTRL_MODE1, 0x0a), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_RCTRL_MODE0, 0x17), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_RCTRL_MODE1, 0x19), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_CCTRL_MODE0, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_CCTRL_MODE1, 0x03), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_SYSCLK_EN_SEL, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP1_MODE0, 0xff), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP2_MODE0, 0x04), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP1_MODE1, 0xff), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP2_MODE1, 0x09), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_DEC_START_MODE0, 0x19), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_DEC_START_MODE1, 0x28), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_INTEGLOOP_GAIN0_MODE0, 0xfb), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_INTEGLOOP_GAIN1_MODE0, 0x01), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_INTEGLOOP_GAIN0_MODE1, 0xfb), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_INTEGLOOP_GAIN1_MODE1, 0x01), + QMP_PHY_INIT_CFG(QSERDES_V5_COM_CORE_CLK_EN, 0x60), +}; + +static const struct qmp_phy_init_tbl sm8450_qmp_gen4x2_pcie_ep_pcs_misc_tbl[] = { + QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_PCIE_OSC_DTCT_MODE2_CONFIG5, 0x08), +}; + +struct qmp_pcie_offsets { + u16 serdes; + u16 pcs; + u16 pcs_misc; + u16 tx; + u16 rx; + u16 tx2; + u16 rx2; +}; + +struct qmp_phy_cfg_tbls { + const struct qmp_phy_init_tbl *serdes; + int serdes_num; + const struct qmp_phy_init_tbl *tx; + int tx_num; + const struct qmp_phy_init_tbl *rx; + int rx_num; + const struct qmp_phy_init_tbl *pcs; + int pcs_num; + const struct qmp_phy_init_tbl *pcs_misc; + int pcs_misc_num; +}; + /* struct qmp_phy_cfg - per-PHY initialization config */ struct qmp_phy_cfg { int lanes; - /* Init sequence for PHY blocks - serdes, tx, rx, pcs */ - const struct qmp_phy_init_tbl *serdes_tbl; - int serdes_tbl_num; - const struct qmp_phy_init_tbl *serdes_tbl_sec; - int serdes_tbl_num_sec; - const struct qmp_phy_init_tbl *tx_tbl; - int tx_tbl_num; - const struct qmp_phy_init_tbl *tx_tbl_sec; - int tx_tbl_num_sec; - const struct qmp_phy_init_tbl *rx_tbl; - int rx_tbl_num; - const struct qmp_phy_init_tbl *rx_tbl_sec; - int rx_tbl_num_sec; - const struct qmp_phy_init_tbl *pcs_tbl; - int pcs_tbl_num; - const struct qmp_phy_init_tbl *pcs_tbl_sec; - int pcs_tbl_num_sec; - const struct qmp_phy_init_tbl *pcs_misc_tbl; - int pcs_misc_tbl_num; - const struct qmp_phy_init_tbl *pcs_misc_tbl_sec; - int pcs_misc_tbl_num_sec; + const struct qmp_pcie_offsets *offsets; + + /* Main init sequence for PHY blocks - serdes, tx, rx, pcs */ + const struct qmp_phy_cfg_tbls tbls; + /* + * Additional init sequences for PHY blocks, providing additional + * register programming. They are used for providing separate sequences + * for the Root Complex and End Point use cases. + * + * If EP mode is not supported, both tables can be left unset. + */ + const struct qmp_phy_cfg_tbls *tbls_rc; + const struct qmp_phy_cfg_tbls *tbls_ep; + + const struct qmp_phy_init_tbl *serdes_4ln_tbl; + int serdes_4ln_num; /* clock ids to be requested */ const char * const *clk_list; @@ -1339,69 +1515,43 @@ struct qmp_phy_cfg { /* array of registers with different offsets */ const unsigned int *regs; - unsigned int start_ctrl; unsigned int pwrdn_ctrl; /* bit offset of PHYSTATUS in QPHY_PCS_STATUS register */ unsigned int phy_status; - /* true, if PHY needs delay after POWER_DOWN */ - bool has_pwrdn_delay; - /* power_down delay in usec */ - int pwrdn_delay_min; - int pwrdn_delay_max; + bool skip_start_delay; /* QMP PHY pipe clock interface rate */ unsigned long pipe_clock_rate; }; -/** - * struct qmp_phy - per-lane phy descriptor - * - * @phy: generic phy - * @cfg: phy specific configuration - * @serdes: iomapped memory space for phy's serdes (i.e. PLL) - * @tx: iomapped memory space for lane's tx - * @rx: iomapped memory space for lane's rx - * @pcs: iomapped memory space for lane's pcs - * @tx2: iomapped memory space for second lane's tx (in dual lane PHYs) - * @rx2: iomapped memory space for second lane's rx (in dual lane PHYs) - * @pcs_misc: iomapped memory space for lane's pcs_misc - * @pipe_clk: pipe clock - * @qmp: QMP phy to which this lane belongs - */ -struct qmp_phy { - struct phy *phy; +struct qmp_pcie { + struct device *dev; + const struct qmp_phy_cfg *cfg; + bool tcsr_4ln_config; + void __iomem *serdes; + void __iomem *pcs; + void __iomem *pcs_misc; void __iomem *tx; void __iomem *rx; - void __iomem *pcs; void __iomem *tx2; void __iomem *rx2; - void __iomem *pcs_misc; - struct clk *pipe_clk; - struct qcom_qmp *qmp; -}; -/** - * struct qcom_qmp - structure holding QMP phy block attributes - * - * @dev: device - * - * @clks: array of clocks required by phy - * @resets: array of resets required by phy - * @vregs: regulator supplies bulk data - * - * @phys: array of per-lane phy descriptors - */ -struct qcom_qmp { - struct device *dev; + void __iomem *port_b; struct clk_bulk_data *clks; + struct clk_bulk_data pipe_clks[2]; + int num_pipe_clks; + struct reset_control_bulk_data *resets; struct regulator_bulk_data *vregs; - struct qmp_phy **phys; + struct phy *phy; + int mode; + + struct clk_fixed_rate pipe_clk_fixed; }; static inline void qphy_setbits(void __iomem *base, u32 offset, u32 val) @@ -1429,10 +1579,17 @@ static inline void qphy_clrbits(void __iomem *base, u32 offset, u32 val) } /* list of clocks required by phy */ +static const char * const ipq8074_pciephy_clk_l[] = { + "aux", "cfg_ahb", +}; + static const char * const msm8996_phy_clk_l[] = { "aux", "cfg_ahb", "ref", }; +static const char * const sc8280xp_pciephy_clk_l[] = { + "aux", "cfg_ahb", "ref", "rchng", +}; static const char * const sdm845_pciephy_clk_l[] = { "aux", "cfg_ahb", "ref", "refgen", @@ -1443,10 +1600,6 @@ static const char * const qmp_phy_vreg_l[] = { "vdda-phy", "vdda-pll", }; -static const char * const ipq8074_pciephy_clk_l[] = { - "aux", "cfg_ahb", -}; - /* list of resets */ static const char * const ipq8074_pciephy_reset_l[] = { "phy", "common", @@ -1456,17 +1609,29 @@ static const char * const sdm845_pciephy_reset_l[] = { "phy", }; +static const struct qmp_pcie_offsets qmp_pcie_offsets_v5 = { + .serdes = 0, + .pcs = 0x0200, + .pcs_misc = 0x0600, + .tx = 0x0e00, + .rx = 0x1000, + .tx2 = 0x1600, + .rx2 = 0x1800, +}; + static const struct qmp_phy_cfg ipq8074_pciephy_cfg = { .lanes = 1, - .serdes_tbl = ipq8074_pcie_serdes_tbl, - .serdes_tbl_num = ARRAY_SIZE(ipq8074_pcie_serdes_tbl), - .tx_tbl = ipq8074_pcie_tx_tbl, - .tx_tbl_num = ARRAY_SIZE(ipq8074_pcie_tx_tbl), - .rx_tbl = ipq8074_pcie_rx_tbl, - .rx_tbl_num = ARRAY_SIZE(ipq8074_pcie_rx_tbl), - .pcs_tbl = ipq8074_pcie_pcs_tbl, - .pcs_tbl_num = ARRAY_SIZE(ipq8074_pcie_pcs_tbl), + .tbls = { + .serdes = ipq8074_pcie_serdes_tbl, + .serdes_num = ARRAY_SIZE(ipq8074_pcie_serdes_tbl), + .tx = ipq8074_pcie_tx_tbl, + .tx_num = ARRAY_SIZE(ipq8074_pcie_tx_tbl), + .rx = ipq8074_pcie_rx_tbl, + .rx_num = ARRAY_SIZE(ipq8074_pcie_rx_tbl), + .pcs = ipq8074_pcie_pcs_tbl, + .pcs_num = ARRAY_SIZE(ipq8074_pcie_pcs_tbl), + }, .clk_list = ipq8074_pciephy_clk_l, .num_clks = ARRAY_SIZE(ipq8074_pciephy_clk_l), .reset_list = ipq8074_pciephy_reset_l, @@ -1475,26 +1640,25 @@ static const struct qmp_phy_cfg ipq8074_pciephy_cfg = { .num_vregs = 0, .regs = pciephy_regs_layout, - .start_ctrl = SERDES_START | PCS_START, .pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL, .phy_status = PHYSTATUS, - - .has_pwrdn_delay = true, - .pwrdn_delay_min = 995, /* us */ - .pwrdn_delay_max = 1005, /* us */ }; static const struct qmp_phy_cfg ipq8074_pciephy_gen3_cfg = { .lanes = 1, - .serdes_tbl = ipq8074_pcie_gen3_serdes_tbl, - .serdes_tbl_num = ARRAY_SIZE(ipq8074_pcie_gen3_serdes_tbl), - .tx_tbl = ipq8074_pcie_gen3_tx_tbl, - .tx_tbl_num = ARRAY_SIZE(ipq8074_pcie_gen3_tx_tbl), - .rx_tbl = ipq8074_pcie_gen3_rx_tbl, - .rx_tbl_num = ARRAY_SIZE(ipq8074_pcie_gen3_rx_tbl), - .pcs_tbl = ipq8074_pcie_gen3_pcs_tbl, - .pcs_tbl_num = ARRAY_SIZE(ipq8074_pcie_gen3_pcs_tbl), + .tbls = { + .serdes = ipq8074_pcie_gen3_serdes_tbl, + .serdes_num = ARRAY_SIZE(ipq8074_pcie_gen3_serdes_tbl), + .tx = ipq8074_pcie_gen3_tx_tbl, + .tx_num = ARRAY_SIZE(ipq8074_pcie_gen3_tx_tbl), + .rx = ipq8074_pcie_gen3_rx_tbl, + .rx_num = ARRAY_SIZE(ipq8074_pcie_gen3_rx_tbl), + .pcs = ipq8074_pcie_gen3_pcs_tbl, + .pcs_num = ARRAY_SIZE(ipq8074_pcie_gen3_pcs_tbl), + .pcs_misc = ipq8074_pcie_gen3_pcs_misc_tbl, + .pcs_misc_num = ARRAY_SIZE(ipq8074_pcie_gen3_pcs_misc_tbl), + }, .clk_list = ipq8074_pciephy_clk_l, .num_clks = ARRAY_SIZE(ipq8074_pciephy_clk_l), .reset_list = ipq8074_pciephy_reset_l, @@ -1503,12 +1667,8 @@ static const struct qmp_phy_cfg ipq8074_pciephy_gen3_cfg = { .num_vregs = 0, .regs = ipq_pciephy_gen3_regs_layout, - .start_ctrl = SERDES_START | PCS_START, .pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL, - - .has_pwrdn_delay = true, - .pwrdn_delay_min = 995, /* us */ - .pwrdn_delay_max = 1005, /* us */ + .phy_status = PHYSTATUS, .pipe_clock_rate = 250000000, }; @@ -1516,16 +1676,18 @@ static const struct qmp_phy_cfg ipq8074_pciephy_gen3_cfg = { static const struct qmp_phy_cfg ipq6018_pciephy_cfg = { .lanes = 1, - .serdes_tbl = ipq6018_pcie_serdes_tbl, - .serdes_tbl_num = ARRAY_SIZE(ipq6018_pcie_serdes_tbl), - .tx_tbl = ipq6018_pcie_tx_tbl, - .tx_tbl_num = ARRAY_SIZE(ipq6018_pcie_tx_tbl), - .rx_tbl = ipq6018_pcie_rx_tbl, - .rx_tbl_num = ARRAY_SIZE(ipq6018_pcie_rx_tbl), - .pcs_tbl = ipq6018_pcie_pcs_tbl, - .pcs_tbl_num = ARRAY_SIZE(ipq6018_pcie_pcs_tbl), - .pcs_misc_tbl = ipq6018_pcie_pcs_misc_tbl, - .pcs_misc_tbl_num = ARRAY_SIZE(ipq6018_pcie_pcs_misc_tbl), + .tbls = { + .serdes = ipq6018_pcie_serdes_tbl, + .serdes_num = ARRAY_SIZE(ipq6018_pcie_serdes_tbl), + .tx = ipq6018_pcie_tx_tbl, + .tx_num = ARRAY_SIZE(ipq6018_pcie_tx_tbl), + .rx = ipq6018_pcie_rx_tbl, + .rx_num = ARRAY_SIZE(ipq6018_pcie_rx_tbl), + .pcs = ipq6018_pcie_pcs_tbl, + .pcs_num = ARRAY_SIZE(ipq6018_pcie_pcs_tbl), + .pcs_misc = ipq6018_pcie_pcs_misc_tbl, + .pcs_misc_num = ARRAY_SIZE(ipq6018_pcie_pcs_misc_tbl), + }, .clk_list = ipq8074_pciephy_clk_l, .num_clks = ARRAY_SIZE(ipq8074_pciephy_clk_l), .reset_list = ipq8074_pciephy_reset_l, @@ -1534,27 +1696,25 @@ static const struct qmp_phy_cfg ipq6018_pciephy_cfg = { .num_vregs = 0, .regs = ipq_pciephy_gen3_regs_layout, - .start_ctrl = SERDES_START | PCS_START, .pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL, - - .has_pwrdn_delay = true, - .pwrdn_delay_min = 995, /* us */ - .pwrdn_delay_max = 1005, /* us */ + .phy_status = PHYSTATUS, }; static const struct qmp_phy_cfg sdm845_qmp_pciephy_cfg = { .lanes = 1, - .serdes_tbl = sdm845_qmp_pcie_serdes_tbl, - .serdes_tbl_num = ARRAY_SIZE(sdm845_qmp_pcie_serdes_tbl), - .tx_tbl = sdm845_qmp_pcie_tx_tbl, - .tx_tbl_num = ARRAY_SIZE(sdm845_qmp_pcie_tx_tbl), - .rx_tbl = sdm845_qmp_pcie_rx_tbl, - .rx_tbl_num = ARRAY_SIZE(sdm845_qmp_pcie_rx_tbl), - .pcs_tbl = sdm845_qmp_pcie_pcs_tbl, - .pcs_tbl_num = ARRAY_SIZE(sdm845_qmp_pcie_pcs_tbl), - .pcs_misc_tbl = sdm845_qmp_pcie_pcs_misc_tbl, - .pcs_misc_tbl_num = ARRAY_SIZE(sdm845_qmp_pcie_pcs_misc_tbl), + .tbls = { + .serdes = sdm845_qmp_pcie_serdes_tbl, + .serdes_num = ARRAY_SIZE(sdm845_qmp_pcie_serdes_tbl), + .tx = sdm845_qmp_pcie_tx_tbl, + .tx_num = ARRAY_SIZE(sdm845_qmp_pcie_tx_tbl), + .rx = sdm845_qmp_pcie_rx_tbl, + .rx_num = ARRAY_SIZE(sdm845_qmp_pcie_rx_tbl), + .pcs = sdm845_qmp_pcie_pcs_tbl, + .pcs_num = ARRAY_SIZE(sdm845_qmp_pcie_pcs_tbl), + .pcs_misc = sdm845_qmp_pcie_pcs_misc_tbl, + .pcs_misc_num = ARRAY_SIZE(sdm845_qmp_pcie_pcs_misc_tbl), + }, .clk_list = sdm845_pciephy_clk_l, .num_clks = ARRAY_SIZE(sdm845_pciephy_clk_l), .reset_list = sdm845_pciephy_reset_l, @@ -1563,26 +1723,23 @@ static const struct qmp_phy_cfg sdm845_qmp_pciephy_cfg = { .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l), .regs = sdm845_qmp_pciephy_regs_layout, - .start_ctrl = PCS_START | SERDES_START, .pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL, .phy_status = PHYSTATUS, - - .has_pwrdn_delay = true, - .pwrdn_delay_min = 995, /* us */ - .pwrdn_delay_max = 1005, /* us */ }; static const struct qmp_phy_cfg sdm845_qhp_pciephy_cfg = { .lanes = 1, - .serdes_tbl = sdm845_qhp_pcie_serdes_tbl, - .serdes_tbl_num = ARRAY_SIZE(sdm845_qhp_pcie_serdes_tbl), - .tx_tbl = sdm845_qhp_pcie_tx_tbl, - .tx_tbl_num = ARRAY_SIZE(sdm845_qhp_pcie_tx_tbl), - .rx_tbl = sdm845_qhp_pcie_rx_tbl, - .rx_tbl_num = ARRAY_SIZE(sdm845_qhp_pcie_rx_tbl), - .pcs_tbl = sdm845_qhp_pcie_pcs_tbl, - .pcs_tbl_num = ARRAY_SIZE(sdm845_qhp_pcie_pcs_tbl), + .tbls = { + .serdes = sdm845_qhp_pcie_serdes_tbl, + .serdes_num = ARRAY_SIZE(sdm845_qhp_pcie_serdes_tbl), + .tx = sdm845_qhp_pcie_tx_tbl, + .tx_num = ARRAY_SIZE(sdm845_qhp_pcie_tx_tbl), + .rx = sdm845_qhp_pcie_rx_tbl, + .rx_num = ARRAY_SIZE(sdm845_qhp_pcie_rx_tbl), + .pcs = sdm845_qhp_pcie_pcs_tbl, + .pcs_num = ARRAY_SIZE(sdm845_qhp_pcie_pcs_tbl), + }, .clk_list = sdm845_pciephy_clk_l, .num_clks = ARRAY_SIZE(sdm845_pciephy_clk_l), .reset_list = sdm845_pciephy_reset_l, @@ -1591,36 +1748,35 @@ static const struct qmp_phy_cfg sdm845_qhp_pciephy_cfg = { .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l), .regs = sdm845_qhp_pciephy_regs_layout, - .start_ctrl = PCS_START | SERDES_START, .pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL, .phy_status = PHYSTATUS, - - .has_pwrdn_delay = true, - .pwrdn_delay_min = 995, /* us */ - .pwrdn_delay_max = 1005, /* us */ }; static const struct qmp_phy_cfg sm8250_qmp_gen3x1_pciephy_cfg = { .lanes = 1, - .serdes_tbl = sm8250_qmp_pcie_serdes_tbl, - .serdes_tbl_num = ARRAY_SIZE(sm8250_qmp_pcie_serdes_tbl), - .serdes_tbl_sec = sm8250_qmp_gen3x1_pcie_serdes_tbl, - .serdes_tbl_num_sec = ARRAY_SIZE(sm8250_qmp_gen3x1_pcie_serdes_tbl), - .tx_tbl = sm8250_qmp_pcie_tx_tbl, - .tx_tbl_num = ARRAY_SIZE(sm8250_qmp_pcie_tx_tbl), - .rx_tbl = sm8250_qmp_pcie_rx_tbl, - .rx_tbl_num = ARRAY_SIZE(sm8250_qmp_pcie_rx_tbl), - .rx_tbl_sec = sm8250_qmp_gen3x1_pcie_rx_tbl, - .rx_tbl_num_sec = ARRAY_SIZE(sm8250_qmp_gen3x1_pcie_rx_tbl), - .pcs_tbl = sm8250_qmp_pcie_pcs_tbl, - .pcs_tbl_num = ARRAY_SIZE(sm8250_qmp_pcie_pcs_tbl), - .pcs_tbl_sec = sm8250_qmp_gen3x1_pcie_pcs_tbl, - .pcs_tbl_num_sec = ARRAY_SIZE(sm8250_qmp_gen3x1_pcie_pcs_tbl), - .pcs_misc_tbl = sm8250_qmp_pcie_pcs_misc_tbl, - .pcs_misc_tbl_num = ARRAY_SIZE(sm8250_qmp_pcie_pcs_misc_tbl), - .pcs_misc_tbl_sec = sm8250_qmp_gen3x1_pcie_pcs_misc_tbl, - .pcs_misc_tbl_num_sec = ARRAY_SIZE(sm8250_qmp_gen3x1_pcie_pcs_misc_tbl), + .tbls = { + .serdes = sm8250_qmp_pcie_serdes_tbl, + .serdes_num = ARRAY_SIZE(sm8250_qmp_pcie_serdes_tbl), + .tx = sm8250_qmp_pcie_tx_tbl, + .tx_num = ARRAY_SIZE(sm8250_qmp_pcie_tx_tbl), + .rx = sm8250_qmp_pcie_rx_tbl, + .rx_num = ARRAY_SIZE(sm8250_qmp_pcie_rx_tbl), + .pcs = sm8250_qmp_pcie_pcs_tbl, + .pcs_num = ARRAY_SIZE(sm8250_qmp_pcie_pcs_tbl), + .pcs_misc = sm8250_qmp_pcie_pcs_misc_tbl, + .pcs_misc_num = ARRAY_SIZE(sm8250_qmp_pcie_pcs_misc_tbl), + }, + .tbls_rc = &(const struct qmp_phy_cfg_tbls) { + .serdes = sm8250_qmp_gen3x1_pcie_serdes_tbl, + .serdes_num = ARRAY_SIZE(sm8250_qmp_gen3x1_pcie_serdes_tbl), + .rx = sm8250_qmp_gen3x1_pcie_rx_tbl, + .rx_num = ARRAY_SIZE(sm8250_qmp_gen3x1_pcie_rx_tbl), + .pcs = sm8250_qmp_gen3x1_pcie_pcs_tbl, + .pcs_num = ARRAY_SIZE(sm8250_qmp_gen3x1_pcie_pcs_tbl), + .pcs_misc = sm8250_qmp_gen3x1_pcie_pcs_misc_tbl, + .pcs_misc_num = ARRAY_SIZE(sm8250_qmp_gen3x1_pcie_pcs_misc_tbl), + }, .clk_list = sdm845_pciephy_clk_l, .num_clks = ARRAY_SIZE(sdm845_pciephy_clk_l), .reset_list = sdm845_pciephy_reset_l, @@ -1629,36 +1785,35 @@ static const struct qmp_phy_cfg sm8250_qmp_gen3x1_pciephy_cfg = { .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l), .regs = sm8250_pcie_regs_layout, - .start_ctrl = PCS_START | SERDES_START, .pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL, .phy_status = PHYSTATUS, - - .has_pwrdn_delay = true, - .pwrdn_delay_min = 995, /* us */ - .pwrdn_delay_max = 1005, /* us */ }; static const struct qmp_phy_cfg sm8250_qmp_gen3x2_pciephy_cfg = { .lanes = 2, - .serdes_tbl = sm8250_qmp_pcie_serdes_tbl, - .serdes_tbl_num = ARRAY_SIZE(sm8250_qmp_pcie_serdes_tbl), - .tx_tbl = sm8250_qmp_pcie_tx_tbl, - .tx_tbl_num = ARRAY_SIZE(sm8250_qmp_pcie_tx_tbl), - .tx_tbl_sec = sm8250_qmp_gen3x2_pcie_tx_tbl, - .tx_tbl_num_sec = ARRAY_SIZE(sm8250_qmp_gen3x2_pcie_tx_tbl), - .rx_tbl = sm8250_qmp_pcie_rx_tbl, - .rx_tbl_num = ARRAY_SIZE(sm8250_qmp_pcie_rx_tbl), - .rx_tbl_sec = sm8250_qmp_gen3x2_pcie_rx_tbl, - .rx_tbl_num_sec = ARRAY_SIZE(sm8250_qmp_gen3x2_pcie_rx_tbl), - .pcs_tbl = sm8250_qmp_pcie_pcs_tbl, - .pcs_tbl_num = ARRAY_SIZE(sm8250_qmp_pcie_pcs_tbl), - .pcs_tbl_sec = sm8250_qmp_gen3x2_pcie_pcs_tbl, - .pcs_tbl_num_sec = ARRAY_SIZE(sm8250_qmp_gen3x2_pcie_pcs_tbl), - .pcs_misc_tbl = sm8250_qmp_pcie_pcs_misc_tbl, - .pcs_misc_tbl_num = ARRAY_SIZE(sm8250_qmp_pcie_pcs_misc_tbl), - .pcs_misc_tbl_sec = sm8250_qmp_gen3x2_pcie_pcs_misc_tbl, - .pcs_misc_tbl_num_sec = ARRAY_SIZE(sm8250_qmp_gen3x2_pcie_pcs_misc_tbl), + .tbls = { + .serdes = sm8250_qmp_pcie_serdes_tbl, + .serdes_num = ARRAY_SIZE(sm8250_qmp_pcie_serdes_tbl), + .tx = sm8250_qmp_pcie_tx_tbl, + .tx_num = ARRAY_SIZE(sm8250_qmp_pcie_tx_tbl), + .rx = sm8250_qmp_pcie_rx_tbl, + .rx_num = ARRAY_SIZE(sm8250_qmp_pcie_rx_tbl), + .pcs = sm8250_qmp_pcie_pcs_tbl, + .pcs_num = ARRAY_SIZE(sm8250_qmp_pcie_pcs_tbl), + .pcs_misc = sm8250_qmp_pcie_pcs_misc_tbl, + .pcs_misc_num = ARRAY_SIZE(sm8250_qmp_pcie_pcs_misc_tbl), + }, + .tbls_rc = &(const struct qmp_phy_cfg_tbls) { + .tx = sm8250_qmp_gen3x2_pcie_tx_tbl, + .tx_num = ARRAY_SIZE(sm8250_qmp_gen3x2_pcie_tx_tbl), + .rx = sm8250_qmp_gen3x2_pcie_rx_tbl, + .rx_num = ARRAY_SIZE(sm8250_qmp_gen3x2_pcie_rx_tbl), + .pcs = sm8250_qmp_gen3x2_pcie_pcs_tbl, + .pcs_num = ARRAY_SIZE(sm8250_qmp_gen3x2_pcie_pcs_tbl), + .pcs_misc = sm8250_qmp_gen3x2_pcie_pcs_misc_tbl, + .pcs_misc_num = ARRAY_SIZE(sm8250_qmp_gen3x2_pcie_pcs_misc_tbl), + }, .clk_list = sdm845_pciephy_clk_l, .num_clks = ARRAY_SIZE(sdm845_pciephy_clk_l), .reset_list = sdm845_pciephy_reset_l, @@ -1667,26 +1822,23 @@ static const struct qmp_phy_cfg sm8250_qmp_gen3x2_pciephy_cfg = { .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l), .regs = sm8250_pcie_regs_layout, - .start_ctrl = PCS_START | SERDES_START, .pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL, .phy_status = PHYSTATUS, - - .has_pwrdn_delay = true, - .pwrdn_delay_min = 995, /* us */ - .pwrdn_delay_max = 1005, /* us */ }; static const struct qmp_phy_cfg msm8998_pciephy_cfg = { .lanes = 1, - .serdes_tbl = msm8998_pcie_serdes_tbl, - .serdes_tbl_num = ARRAY_SIZE(msm8998_pcie_serdes_tbl), - .tx_tbl = msm8998_pcie_tx_tbl, - .tx_tbl_num = ARRAY_SIZE(msm8998_pcie_tx_tbl), - .rx_tbl = msm8998_pcie_rx_tbl, - .rx_tbl_num = ARRAY_SIZE(msm8998_pcie_rx_tbl), - .pcs_tbl = msm8998_pcie_pcs_tbl, - .pcs_tbl_num = ARRAY_SIZE(msm8998_pcie_pcs_tbl), + .tbls = { + .serdes = msm8998_pcie_serdes_tbl, + .serdes_num = ARRAY_SIZE(msm8998_pcie_serdes_tbl), + .tx = msm8998_pcie_tx_tbl, + .tx_num = ARRAY_SIZE(msm8998_pcie_tx_tbl), + .rx = msm8998_pcie_rx_tbl, + .rx_num = ARRAY_SIZE(msm8998_pcie_rx_tbl), + .pcs = msm8998_pcie_pcs_tbl, + .pcs_num = ARRAY_SIZE(msm8998_pcie_pcs_tbl), + }, .clk_list = msm8996_phy_clk_l, .num_clks = ARRAY_SIZE(msm8996_phy_clk_l), .reset_list = ipq8074_pciephy_reset_l, @@ -1695,24 +1847,27 @@ static const struct qmp_phy_cfg msm8998_pciephy_cfg = { .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l), .regs = pciephy_regs_layout, - .start_ctrl = SERDES_START | PCS_START, .pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL, .phy_status = PHYSTATUS, + + .skip_start_delay = true, }; static const struct qmp_phy_cfg sc8180x_pciephy_cfg = { .lanes = 1, - .serdes_tbl = sc8180x_qmp_pcie_serdes_tbl, - .serdes_tbl_num = ARRAY_SIZE(sc8180x_qmp_pcie_serdes_tbl), - .tx_tbl = sc8180x_qmp_pcie_tx_tbl, - .tx_tbl_num = ARRAY_SIZE(sc8180x_qmp_pcie_tx_tbl), - .rx_tbl = sc8180x_qmp_pcie_rx_tbl, - .rx_tbl_num = ARRAY_SIZE(sc8180x_qmp_pcie_rx_tbl), - .pcs_tbl = sc8180x_qmp_pcie_pcs_tbl, - .pcs_tbl_num = ARRAY_SIZE(sc8180x_qmp_pcie_pcs_tbl), - .pcs_misc_tbl = sc8180x_qmp_pcie_pcs_misc_tbl, - .pcs_misc_tbl_num = ARRAY_SIZE(sc8180x_qmp_pcie_pcs_misc_tbl), + .tbls = { + .serdes = sc8180x_qmp_pcie_serdes_tbl, + .serdes_num = ARRAY_SIZE(sc8180x_qmp_pcie_serdes_tbl), + .tx = sc8180x_qmp_pcie_tx_tbl, + .tx_num = ARRAY_SIZE(sc8180x_qmp_pcie_tx_tbl), + .rx = sc8180x_qmp_pcie_rx_tbl, + .rx_num = ARRAY_SIZE(sc8180x_qmp_pcie_rx_tbl), + .pcs = sc8180x_qmp_pcie_pcs_tbl, + .pcs_num = ARRAY_SIZE(sc8180x_qmp_pcie_pcs_tbl), + .pcs_misc = sc8180x_qmp_pcie_pcs_misc_tbl, + .pcs_misc_num = ARRAY_SIZE(sc8180x_qmp_pcie_pcs_misc_tbl), + }, .clk_list = sdm845_pciephy_clk_l, .num_clks = ARRAY_SIZE(sdm845_pciephy_clk_l), .reset_list = sdm845_pciephy_reset_l, @@ -1721,27 +1876,133 @@ static const struct qmp_phy_cfg sc8180x_pciephy_cfg = { .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l), .regs = sm8250_pcie_regs_layout, - .start_ctrl = PCS_START | SERDES_START, .pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL, + .phy_status = PHYSTATUS, +}; + +static const struct qmp_phy_cfg sc8280xp_qmp_gen3x1_pciephy_cfg = { + .lanes = 1, + + .offsets = &qmp_pcie_offsets_v5, + + .tbls = { + .serdes = sc8280xp_qmp_pcie_serdes_tbl, + .serdes_num = ARRAY_SIZE(sc8280xp_qmp_pcie_serdes_tbl), + .tx = sc8280xp_qmp_gen3x1_pcie_tx_tbl, + .tx_num = ARRAY_SIZE(sc8280xp_qmp_gen3x1_pcie_tx_tbl), + .rx = sc8280xp_qmp_gen3x1_pcie_rx_tbl, + .rx_num = ARRAY_SIZE(sc8280xp_qmp_gen3x1_pcie_rx_tbl), + .pcs = sc8280xp_qmp_gen3x1_pcie_pcs_tbl, + .pcs_num = ARRAY_SIZE(sc8280xp_qmp_gen3x1_pcie_pcs_tbl), + .pcs_misc = sc8280xp_qmp_gen3x1_pcie_pcs_misc_tbl, + .pcs_misc_num = ARRAY_SIZE(sc8280xp_qmp_gen3x1_pcie_pcs_misc_tbl), + }, + + .tbls_rc = &(const struct qmp_phy_cfg_tbls) { + .serdes = sc8280xp_qmp_gen3x1_pcie_rc_serdes_tbl, + .serdes_num = ARRAY_SIZE(sc8280xp_qmp_gen3x1_pcie_rc_serdes_tbl), + }, + + .clk_list = sc8280xp_pciephy_clk_l, + .num_clks = ARRAY_SIZE(sc8280xp_pciephy_clk_l), + .reset_list = sdm845_pciephy_reset_l, + .num_resets = ARRAY_SIZE(sdm845_pciephy_reset_l), + .vreg_list = qmp_phy_vreg_l, + .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l), + .regs = sm8250_pcie_regs_layout, + + .pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL, + .phy_status = PHYSTATUS, +}; + +static const struct qmp_phy_cfg sc8280xp_qmp_gen3x2_pciephy_cfg = { + .lanes = 2, + + .offsets = &qmp_pcie_offsets_v5, + + .tbls = { + .serdes = sc8280xp_qmp_pcie_serdes_tbl, + .serdes_num = ARRAY_SIZE(sc8280xp_qmp_pcie_serdes_tbl), + .tx = sc8280xp_qmp_gen3x2_pcie_tx_tbl, + .tx_num = ARRAY_SIZE(sc8280xp_qmp_gen3x2_pcie_tx_tbl), + .rx = sc8280xp_qmp_gen3x2_pcie_rx_tbl, + .rx_num = ARRAY_SIZE(sc8280xp_qmp_gen3x2_pcie_rx_tbl), + .pcs = sc8280xp_qmp_gen3x2_pcie_pcs_tbl, + .pcs_num = ARRAY_SIZE(sc8280xp_qmp_gen3x2_pcie_pcs_tbl), + .pcs_misc = sc8280xp_qmp_gen3x2_pcie_pcs_misc_tbl, + .pcs_misc_num = ARRAY_SIZE(sc8280xp_qmp_gen3x2_pcie_pcs_misc_tbl), + }, + + .tbls_rc = &(const struct qmp_phy_cfg_tbls) { + .serdes = sc8280xp_qmp_gen3x2_pcie_rc_serdes_tbl, + .serdes_num = ARRAY_SIZE(sc8280xp_qmp_gen3x2_pcie_rc_serdes_tbl), + }, + + .clk_list = sc8280xp_pciephy_clk_l, + .num_clks = ARRAY_SIZE(sc8280xp_pciephy_clk_l), + .reset_list = sdm845_pciephy_reset_l, + .num_resets = ARRAY_SIZE(sdm845_pciephy_reset_l), + .vreg_list = qmp_phy_vreg_l, + .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l), + .regs = sm8250_pcie_regs_layout, + + .pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL, + .phy_status = PHYSTATUS, +}; + +static const struct qmp_phy_cfg sc8280xp_qmp_gen3x4_pciephy_cfg = { + .lanes = 4, - .has_pwrdn_delay = true, - .pwrdn_delay_min = 995, /* us */ - .pwrdn_delay_max = 1005, /* us */ + .offsets = &qmp_pcie_offsets_v5, + + .tbls = { + .serdes = sc8280xp_qmp_pcie_serdes_tbl, + .serdes_num = ARRAY_SIZE(sc8280xp_qmp_pcie_serdes_tbl), + .tx = sc8280xp_qmp_gen3x2_pcie_tx_tbl, + .tx_num = ARRAY_SIZE(sc8280xp_qmp_gen3x2_pcie_tx_tbl), + .rx = sc8280xp_qmp_gen3x2_pcie_rx_tbl, + .rx_num = ARRAY_SIZE(sc8280xp_qmp_gen3x2_pcie_rx_tbl), + .pcs = sc8280xp_qmp_gen3x2_pcie_pcs_tbl, + .pcs_num = ARRAY_SIZE(sc8280xp_qmp_gen3x2_pcie_pcs_tbl), + .pcs_misc = sc8280xp_qmp_gen3x2_pcie_pcs_misc_tbl, + .pcs_misc_num = ARRAY_SIZE(sc8280xp_qmp_gen3x2_pcie_pcs_misc_tbl), + }, + + .tbls_rc = &(const struct qmp_phy_cfg_tbls) { + .serdes = sc8280xp_qmp_gen3x2_pcie_rc_serdes_tbl, + .serdes_num = ARRAY_SIZE(sc8280xp_qmp_gen3x2_pcie_rc_serdes_tbl), + }, + + .serdes_4ln_tbl = sc8280xp_qmp_gen3x4_pcie_serdes_4ln_tbl, + .serdes_4ln_num = ARRAY_SIZE(sc8280xp_qmp_gen3x4_pcie_serdes_4ln_tbl), + + .clk_list = sc8280xp_pciephy_clk_l, + .num_clks = ARRAY_SIZE(sc8280xp_pciephy_clk_l), + .reset_list = sdm845_pciephy_reset_l, + .num_resets = ARRAY_SIZE(sdm845_pciephy_reset_l), + .vreg_list = qmp_phy_vreg_l, + .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l), + .regs = sm8250_pcie_regs_layout, + + .pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL, + .phy_status = PHYSTATUS, }; static const struct qmp_phy_cfg sdx55_qmp_pciephy_cfg = { .lanes = 2, - .serdes_tbl = sdx55_qmp_pcie_serdes_tbl, - .serdes_tbl_num = ARRAY_SIZE(sdx55_qmp_pcie_serdes_tbl), - .tx_tbl = sdx55_qmp_pcie_tx_tbl, - .tx_tbl_num = ARRAY_SIZE(sdx55_qmp_pcie_tx_tbl), - .rx_tbl = sdx55_qmp_pcie_rx_tbl, - .rx_tbl_num = ARRAY_SIZE(sdx55_qmp_pcie_rx_tbl), - .pcs_tbl = sdx55_qmp_pcie_pcs_tbl, - .pcs_tbl_num = ARRAY_SIZE(sdx55_qmp_pcie_pcs_tbl), - .pcs_misc_tbl = sdx55_qmp_pcie_pcs_misc_tbl, - .pcs_misc_tbl_num = ARRAY_SIZE(sdx55_qmp_pcie_pcs_misc_tbl), + .tbls = { + .serdes = sdx55_qmp_pcie_serdes_tbl, + .serdes_num = ARRAY_SIZE(sdx55_qmp_pcie_serdes_tbl), + .tx = sdx55_qmp_pcie_tx_tbl, + .tx_num = ARRAY_SIZE(sdx55_qmp_pcie_tx_tbl), + .rx = sdx55_qmp_pcie_rx_tbl, + .rx_num = ARRAY_SIZE(sdx55_qmp_pcie_rx_tbl), + .pcs = sdx55_qmp_pcie_pcs_tbl, + .pcs_num = ARRAY_SIZE(sdx55_qmp_pcie_pcs_tbl), + .pcs_misc = sdx55_qmp_pcie_pcs_misc_tbl, + .pcs_misc_num = ARRAY_SIZE(sdx55_qmp_pcie_pcs_misc_tbl), + }, .clk_list = sdm845_pciephy_clk_l, .num_clks = ARRAY_SIZE(sdm845_pciephy_clk_l), .reset_list = sdm845_pciephy_reset_l, @@ -1750,28 +2011,25 @@ static const struct qmp_phy_cfg sdx55_qmp_pciephy_cfg = { .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l), .regs = sm8250_pcie_regs_layout, - .start_ctrl = PCS_START | SERDES_START, .pwrdn_ctrl = SW_PWRDN, .phy_status = PHYSTATUS_4_20, - - .has_pwrdn_delay = true, - .pwrdn_delay_min = 995, /* us */ - .pwrdn_delay_max = 1005, /* us */ }; static const struct qmp_phy_cfg sm8450_qmp_gen3x1_pciephy_cfg = { .lanes = 1, - .serdes_tbl = sm8450_qmp_gen3x1_pcie_serdes_tbl, - .serdes_tbl_num = ARRAY_SIZE(sm8450_qmp_gen3x1_pcie_serdes_tbl), - .tx_tbl = sm8450_qmp_gen3x1_pcie_tx_tbl, - .tx_tbl_num = ARRAY_SIZE(sm8450_qmp_gen3x1_pcie_tx_tbl), - .rx_tbl = sm8450_qmp_gen3x1_pcie_rx_tbl, - .rx_tbl_num = ARRAY_SIZE(sm8450_qmp_gen3x1_pcie_rx_tbl), - .pcs_tbl = sm8450_qmp_gen3x1_pcie_pcs_tbl, - .pcs_tbl_num = ARRAY_SIZE(sm8450_qmp_gen3x1_pcie_pcs_tbl), - .pcs_misc_tbl = sm8450_qmp_gen3x1_pcie_pcs_misc_tbl, - .pcs_misc_tbl_num = ARRAY_SIZE(sm8450_qmp_gen3x1_pcie_pcs_misc_tbl), + .tbls = { + .serdes = sm8450_qmp_gen3x1_pcie_serdes_tbl, + .serdes_num = ARRAY_SIZE(sm8450_qmp_gen3x1_pcie_serdes_tbl), + .tx = sm8450_qmp_gen3x1_pcie_tx_tbl, + .tx_num = ARRAY_SIZE(sm8450_qmp_gen3x1_pcie_tx_tbl), + .rx = sm8450_qmp_gen3x1_pcie_rx_tbl, + .rx_num = ARRAY_SIZE(sm8450_qmp_gen3x1_pcie_rx_tbl), + .pcs = sm8450_qmp_gen3x1_pcie_pcs_tbl, + .pcs_num = ARRAY_SIZE(sm8450_qmp_gen3x1_pcie_pcs_tbl), + .pcs_misc = sm8450_qmp_gen3x1_pcie_pcs_misc_tbl, + .pcs_misc_num = ARRAY_SIZE(sm8450_qmp_gen3x1_pcie_pcs_misc_tbl), + }, .clk_list = sdm845_pciephy_clk_l, .num_clks = ARRAY_SIZE(sdm845_pciephy_clk_l), .reset_list = sdm845_pciephy_reset_l, @@ -1780,28 +2038,40 @@ static const struct qmp_phy_cfg sm8450_qmp_gen3x1_pciephy_cfg = { .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l), .regs = sm8250_pcie_regs_layout, - .start_ctrl = SERDES_START | PCS_START, .pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL, .phy_status = PHYSTATUS, - - .has_pwrdn_delay = true, - .pwrdn_delay_min = 995, /* us */ - .pwrdn_delay_max = 1005, /* us */ }; static const struct qmp_phy_cfg sm8450_qmp_gen4x2_pciephy_cfg = { .lanes = 2, - .serdes_tbl = sm8450_qmp_gen4x2_pcie_serdes_tbl, - .serdes_tbl_num = ARRAY_SIZE(sm8450_qmp_gen4x2_pcie_serdes_tbl), - .tx_tbl = sm8450_qmp_gen4x2_pcie_tx_tbl, - .tx_tbl_num = ARRAY_SIZE(sm8450_qmp_gen4x2_pcie_tx_tbl), - .rx_tbl = sm8450_qmp_gen4x2_pcie_rx_tbl, - .rx_tbl_num = ARRAY_SIZE(sm8450_qmp_gen4x2_pcie_rx_tbl), - .pcs_tbl = sm8450_qmp_gen4x2_pcie_pcs_tbl, - .pcs_tbl_num = ARRAY_SIZE(sm8450_qmp_gen4x2_pcie_pcs_tbl), - .pcs_misc_tbl = sm8450_qmp_gen4x2_pcie_pcs_misc_tbl, - .pcs_misc_tbl_num = ARRAY_SIZE(sm8450_qmp_gen4x2_pcie_pcs_misc_tbl), + .tbls = { + .serdes = sm8450_qmp_gen4x2_pcie_serdes_tbl, + .serdes_num = ARRAY_SIZE(sm8450_qmp_gen4x2_pcie_serdes_tbl), + .tx = sm8450_qmp_gen4x2_pcie_tx_tbl, + .tx_num = ARRAY_SIZE(sm8450_qmp_gen4x2_pcie_tx_tbl), + .rx = sm8450_qmp_gen4x2_pcie_rx_tbl, + .rx_num = ARRAY_SIZE(sm8450_qmp_gen4x2_pcie_rx_tbl), + .pcs = sm8450_qmp_gen4x2_pcie_pcs_tbl, + .pcs_num = ARRAY_SIZE(sm8450_qmp_gen4x2_pcie_pcs_tbl), + .pcs_misc = sm8450_qmp_gen4x2_pcie_pcs_misc_tbl, + .pcs_misc_num = ARRAY_SIZE(sm8450_qmp_gen4x2_pcie_pcs_misc_tbl), + }, + + .tbls_rc = &(const struct qmp_phy_cfg_tbls) { + .serdes = sm8450_qmp_gen4x2_pcie_rc_serdes_tbl, + .serdes_num = ARRAY_SIZE(sm8450_qmp_gen4x2_pcie_rc_serdes_tbl), + .pcs_misc = sm8450_qmp_gen4x2_pcie_rc_pcs_misc_tbl, + .pcs_misc_num = ARRAY_SIZE(sm8450_qmp_gen4x2_pcie_rc_pcs_misc_tbl), + }, + + .tbls_ep = &(const struct qmp_phy_cfg_tbls) { + .serdes = sm8450_qmp_gen4x2_pcie_ep_serdes_tbl, + .serdes_num = ARRAY_SIZE(sm8450_qmp_gen4x2_pcie_ep_serdes_tbl), + .pcs_misc = sm8450_qmp_gen4x2_pcie_ep_pcs_misc_tbl, + .pcs_misc_num = ARRAY_SIZE(sm8450_qmp_gen4x2_pcie_ep_pcs_misc_tbl), + }, + .clk_list = sdm845_pciephy_clk_l, .num_clks = ARRAY_SIZE(sdm845_pciephy_clk_l), .reset_list = sdm845_pciephy_reset_l, @@ -1810,17 +2080,11 @@ static const struct qmp_phy_cfg sm8450_qmp_gen4x2_pciephy_cfg = { .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l), .regs = sm8250_pcie_regs_layout, - .start_ctrl = SERDES_START | PCS_START, .pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL, .phy_status = PHYSTATUS_4_20, - - .has_pwrdn_delay = true, - .pwrdn_delay_min = 995, /* us */ - .pwrdn_delay_max = 1005, /* us */ }; static void qmp_pcie_configure_lane(void __iomem *base, - const unsigned int *regs, const struct qmp_phy_init_tbl tbl[], int num, u8 lane_mask) @@ -1835,43 +2099,74 @@ static void qmp_pcie_configure_lane(void __iomem *base, if (!(t->lane_mask & lane_mask)) continue; - if (t->in_layout) - writel(t->val, base + regs[t->offset]); - else - writel(t->val, base + t->offset); + writel(t->val, base + t->offset); } } static void qmp_pcie_configure(void __iomem *base, - const unsigned int *regs, const struct qmp_phy_init_tbl tbl[], int num) { - qmp_pcie_configure_lane(base, regs, tbl, num, 0xff); + qmp_pcie_configure_lane(base, tbl, num, 0xff); } -static int qmp_pcie_serdes_init(struct qmp_phy *qphy) +static void qmp_pcie_init_port_b(struct qmp_pcie *qmp, const struct qmp_phy_cfg_tbls *tbls) { - const struct qmp_phy_cfg *cfg = qphy->cfg; - void __iomem *serdes = qphy->serdes; - const struct qmp_phy_init_tbl *serdes_tbl = cfg->serdes_tbl; - int serdes_tbl_num = cfg->serdes_tbl_num; + const struct qmp_phy_cfg *cfg = qmp->cfg; + const struct qmp_pcie_offsets *offs = cfg->offsets; + void __iomem *tx3, *rx3, *tx4, *rx4; - qmp_pcie_configure(serdes, cfg->regs, serdes_tbl, serdes_tbl_num); - qmp_pcie_configure(serdes, cfg->regs, cfg->serdes_tbl_sec, cfg->serdes_tbl_num_sec); + tx3 = qmp->port_b + offs->tx; + rx3 = qmp->port_b + offs->rx; + tx4 = qmp->port_b + offs->tx2; + rx4 = qmp->port_b + offs->rx2; - return 0; + qmp_pcie_configure_lane(tx3, tbls->tx, tbls->tx_num, 1); + qmp_pcie_configure_lane(rx3, tbls->rx, tbls->rx_num, 1); + + qmp_pcie_configure_lane(tx4, tbls->tx, tbls->tx_num, 2); + qmp_pcie_configure_lane(rx4, tbls->rx, tbls->rx_num, 2); +} + +static void qmp_pcie_init_registers(struct qmp_pcie *qmp, const struct qmp_phy_cfg_tbls *tbls) +{ + const struct qmp_phy_cfg *cfg = qmp->cfg; + void __iomem *serdes = qmp->serdes; + void __iomem *tx = qmp->tx; + void __iomem *rx = qmp->rx; + void __iomem *tx2 = qmp->tx2; + void __iomem *rx2 = qmp->rx2; + void __iomem *pcs = qmp->pcs; + void __iomem *pcs_misc = qmp->pcs_misc; + + if (!tbls) + return; + + qmp_pcie_configure(serdes, tbls->serdes, tbls->serdes_num); + + qmp_pcie_configure_lane(tx, tbls->tx, tbls->tx_num, 1); + qmp_pcie_configure_lane(rx, tbls->rx, tbls->rx_num, 1); + + if (cfg->lanes >= 2) { + qmp_pcie_configure_lane(tx2, tbls->tx, tbls->tx_num, 2); + qmp_pcie_configure_lane(rx2, tbls->rx, tbls->rx_num, 2); + } + + qmp_pcie_configure(pcs, tbls->pcs, tbls->pcs_num); + qmp_pcie_configure(pcs_misc, tbls->pcs_misc, tbls->pcs_misc_num); + + if (cfg->lanes >= 4 && qmp->tcsr_4ln_config) { + qmp_pcie_configure(serdes, cfg->serdes_4ln_tbl, cfg->serdes_4ln_num); + qmp_pcie_init_port_b(qmp, tbls); + } } static int qmp_pcie_init(struct phy *phy) { - struct qmp_phy *qphy = phy_get_drvdata(phy); - struct qcom_qmp *qmp = qphy->qmp; - const struct qmp_phy_cfg *cfg = qphy->cfg; - void __iomem *pcs = qphy->pcs; + struct qmp_pcie *qmp = phy_get_drvdata(phy); + const struct qmp_phy_cfg *cfg = qmp->cfg; int ret; - /* turn on regulator supplies */ ret = regulator_bulk_enable(cfg->num_vregs, qmp->vregs); if (ret) { dev_err(qmp->dev, "failed to enable regulators, err=%d\n", ret); @@ -1884,6 +2179,8 @@ static int qmp_pcie_init(struct phy *phy) goto err_disable_regulators; } + usleep_range(200, 300); + ret = reset_control_bulk_deassert(cfg->num_resets, qmp->resets); if (ret) { dev_err(qmp->dev, "reset deassert failed\n"); @@ -1894,14 +2191,6 @@ static int qmp_pcie_init(struct phy *phy) if (ret) goto err_assert_reset; - if (cfg->regs[QPHY_PCS_POWER_DOWN_CONTROL]) - qphy_setbits(pcs, - cfg->regs[QPHY_PCS_POWER_DOWN_CONTROL], - cfg->pwrdn_ctrl); - else - qphy_setbits(pcs, QPHY_V2_PCS_POWER_DOWN_CONTROL, - cfg->pwrdn_ctrl); - return 0; err_assert_reset: @@ -1914,9 +2203,8 @@ err_disable_regulators: static int qmp_pcie_exit(struct phy *phy) { - struct qmp_phy *qphy = phy_get_drvdata(phy); - struct qcom_qmp *qmp = qphy->qmp; - const struct qmp_phy_cfg *cfg = qphy->cfg; + struct qmp_pcie *qmp = phy_get_drvdata(phy); + const struct qmp_phy_cfg *cfg = qmp->cfg; reset_control_bulk_assert(cfg->num_resets, qmp->resets); @@ -1929,72 +2217,41 @@ static int qmp_pcie_exit(struct phy *phy) static int qmp_pcie_power_on(struct phy *phy) { - struct qmp_phy *qphy = phy_get_drvdata(phy); - struct qcom_qmp *qmp = qphy->qmp; - const struct qmp_phy_cfg *cfg = qphy->cfg; - void __iomem *tx = qphy->tx; - void __iomem *rx = qphy->rx; - void __iomem *pcs = qphy->pcs; - void __iomem *pcs_misc = qphy->pcs_misc; + struct qmp_pcie *qmp = phy_get_drvdata(phy); + const struct qmp_phy_cfg *cfg = qmp->cfg; + const struct qmp_phy_cfg_tbls *mode_tbls; + void __iomem *pcs = qmp->pcs; void __iomem *status; - unsigned int mask, val, ready; + unsigned int mask, val; int ret; - qmp_pcie_serdes_init(qphy); - - ret = clk_prepare_enable(qphy->pipe_clk); - if (ret) { - dev_err(qmp->dev, "pipe_clk enable failed err=%d\n", ret); - return ret; - } - - /* Tx, Rx, and PCS configurations */ - qmp_pcie_configure_lane(tx, cfg->regs, cfg->tx_tbl, cfg->tx_tbl_num, 1); - qmp_pcie_configure_lane(tx, cfg->regs, cfg->tx_tbl_sec, cfg->tx_tbl_num_sec, 1); - - if (cfg->lanes >= 2) { - qmp_pcie_configure_lane(qphy->tx2, cfg->regs, cfg->tx_tbl, - cfg->tx_tbl_num, 2); - qmp_pcie_configure_lane(qphy->tx2, cfg->regs, cfg->tx_tbl_sec, - cfg->tx_tbl_num_sec, 2); - } - - qmp_pcie_configure_lane(rx, cfg->regs, cfg->rx_tbl, cfg->rx_tbl_num, 1); - qmp_pcie_configure_lane(rx, cfg->regs, cfg->rx_tbl_sec, cfg->rx_tbl_num_sec, 1); + qphy_setbits(pcs, cfg->regs[QPHY_PCS_POWER_DOWN_CONTROL], + cfg->pwrdn_ctrl); - if (cfg->lanes >= 2) { - qmp_pcie_configure_lane(qphy->rx2, cfg->regs, cfg->rx_tbl, - cfg->rx_tbl_num, 2); - qmp_pcie_configure_lane(qphy->rx2, cfg->regs, cfg->rx_tbl_sec, - cfg->rx_tbl_num_sec, 2); - } - - qmp_pcie_configure(pcs, cfg->regs, cfg->pcs_tbl, cfg->pcs_tbl_num); - qmp_pcie_configure(pcs, cfg->regs, cfg->pcs_tbl_sec, cfg->pcs_tbl_num_sec); + if (qmp->mode == PHY_MODE_PCIE_RC) + mode_tbls = cfg->tbls_rc; + else + mode_tbls = cfg->tbls_ep; - qmp_pcie_configure(pcs_misc, cfg->regs, cfg->pcs_misc_tbl, cfg->pcs_misc_tbl_num); - qmp_pcie_configure(pcs_misc, cfg->regs, cfg->pcs_misc_tbl_sec, cfg->pcs_misc_tbl_num_sec); + qmp_pcie_init_registers(qmp, &cfg->tbls); + qmp_pcie_init_registers(qmp, mode_tbls); - /* - * Pull out PHY from POWER DOWN state. - * This is active low enable signal to power-down PHY. - */ - qphy_setbits(pcs, QPHY_V2_PCS_POWER_DOWN_CONTROL, cfg->pwrdn_ctrl); - - if (cfg->has_pwrdn_delay) - usleep_range(cfg->pwrdn_delay_min, cfg->pwrdn_delay_max); + ret = clk_bulk_prepare_enable(qmp->num_pipe_clks, qmp->pipe_clks); + if (ret) + return ret; /* Pull PHY out of reset state */ qphy_clrbits(pcs, cfg->regs[QPHY_SW_RESET], SW_RESET); /* start SerDes and Phy-Coding-Sublayer */ - qphy_setbits(pcs, cfg->regs[QPHY_START_CTRL], cfg->start_ctrl); + qphy_setbits(pcs, cfg->regs[QPHY_START_CTRL], SERDES_START | PCS_START); + + if (!cfg->skip_start_delay) + usleep_range(1000, 1200); status = pcs + cfg->regs[QPHY_PCS_STATUS]; mask = cfg->phy_status; - ready = 0; - - ret = readl_poll_timeout(status, val, (val & mask) == ready, 10, + ret = readl_poll_timeout(status, val, !(val & mask), 200, PHY_INIT_COMPLETE_TIMEOUT); if (ret) { dev_err(qmp->dev, "phy initialization timed-out\n"); @@ -2004,32 +2261,28 @@ static int qmp_pcie_power_on(struct phy *phy) return 0; err_disable_pipe_clk: - clk_disable_unprepare(qphy->pipe_clk); + clk_bulk_disable_unprepare(qmp->num_pipe_clks, qmp->pipe_clks); return ret; } static int qmp_pcie_power_off(struct phy *phy) { - struct qmp_phy *qphy = phy_get_drvdata(phy); - const struct qmp_phy_cfg *cfg = qphy->cfg; + struct qmp_pcie *qmp = phy_get_drvdata(phy); + const struct qmp_phy_cfg *cfg = qmp->cfg; - clk_disable_unprepare(qphy->pipe_clk); + clk_bulk_disable_unprepare(qmp->num_pipe_clks, qmp->pipe_clks); /* PHY reset */ - qphy_setbits(qphy->pcs, cfg->regs[QPHY_SW_RESET], SW_RESET); + qphy_setbits(qmp->pcs, cfg->regs[QPHY_SW_RESET], SW_RESET); /* stop SerDes and Phy-Coding-Sublayer */ - qphy_clrbits(qphy->pcs, cfg->regs[QPHY_START_CTRL], cfg->start_ctrl); + qphy_clrbits(qmp->pcs, cfg->regs[QPHY_START_CTRL], + SERDES_START | PCS_START); /* Put PHY into POWER DOWN state: active low */ - if (cfg->regs[QPHY_PCS_POWER_DOWN_CONTROL]) { - qphy_clrbits(qphy->pcs, cfg->regs[QPHY_PCS_POWER_DOWN_CONTROL], - cfg->pwrdn_ctrl); - } else { - qphy_clrbits(qphy->pcs, QPHY_V2_PCS_POWER_DOWN_CONTROL, - cfg->pwrdn_ctrl); - } + qphy_clrbits(qmp->pcs, cfg->regs[QPHY_PCS_POWER_DOWN_CONTROL], + cfg->pwrdn_ctrl); return 0; } @@ -2060,9 +2313,34 @@ static int qmp_pcie_disable(struct phy *phy) return qmp_pcie_exit(phy); } -static int qmp_pcie_vreg_init(struct device *dev, const struct qmp_phy_cfg *cfg) +static int qmp_pcie_set_mode(struct phy *phy, enum phy_mode mode, int submode) +{ + struct qmp_pcie *qmp = phy_get_drvdata(phy); + + switch (submode) { + case PHY_MODE_PCIE_RC: + case PHY_MODE_PCIE_EP: + qmp->mode = submode; + break; + default: + dev_err(&phy->dev, "Unsupported submode %d\n", submode); + return -EINVAL; + } + + return 0; +} + +static const struct phy_ops qmp_pcie_phy_ops = { + .power_on = qmp_pcie_enable, + .power_off = qmp_pcie_disable, + .set_mode = qmp_pcie_set_mode, + .owner = THIS_MODULE, +}; + +static int qmp_pcie_vreg_init(struct qmp_pcie *qmp) { - struct qcom_qmp *qmp = dev_get_drvdata(dev); + const struct qmp_phy_cfg *cfg = qmp->cfg; + struct device *dev = qmp->dev; int num = cfg->num_vregs; int i; @@ -2076,9 +2354,10 @@ static int qmp_pcie_vreg_init(struct device *dev, const struct qmp_phy_cfg *cfg) return devm_regulator_bulk_get(dev, num, qmp->vregs); } -static int qmp_pcie_reset_init(struct device *dev, const struct qmp_phy_cfg *cfg) +static int qmp_pcie_reset_init(struct qmp_pcie *qmp) { - struct qcom_qmp *qmp = dev_get_drvdata(dev); + const struct qmp_phy_cfg *cfg = qmp->cfg; + struct device *dev = qmp->dev; int i; int ret; @@ -2097,9 +2376,10 @@ static int qmp_pcie_reset_init(struct device *dev, const struct qmp_phy_cfg *cfg return 0; } -static int qmp_pcie_clk_init(struct device *dev, const struct qmp_phy_cfg *cfg) +static int qmp_pcie_clk_init(struct qmp_pcie *qmp) { - struct qcom_qmp *qmp = dev_get_drvdata(dev); + const struct qmp_phy_cfg *cfg = qmp->cfg; + struct device *dev = qmp->dev; int num = cfg->num_clks; int i; @@ -2136,9 +2416,9 @@ static void phy_clk_release_provider(void *res) * clk | +-------+ | +-----+ * +---------------+ */ -static int phy_pipe_clk_register(struct qcom_qmp *qmp, struct device_node *np) +static int phy_pipe_clk_register(struct qmp_pcie *qmp, struct device_node *np) { - struct clk_fixed_rate *fixed; + struct clk_fixed_rate *fixed = &qmp->pipe_clk_fixed; struct clk_init_data init = { }; int ret; @@ -2148,18 +2428,14 @@ static int phy_pipe_clk_register(struct qcom_qmp *qmp, struct device_node *np) return ret; } - fixed = devm_kzalloc(qmp->dev, sizeof(*fixed), GFP_KERNEL); - if (!fixed) - return -ENOMEM; - init.ops = &clk_fixed_rate_ops; /* * Controllers using QMP PHY-s use 125MHz pipe clock interface * unless other frequency is specified in the PHY config. */ - if (qmp->phys[0]->cfg->pipe_clock_rate) - fixed->fixed_rate = qmp->phys[0]->cfg->pipe_clock_rate; + if (qmp->cfg->pipe_clock_rate) + fixed->fixed_rate = qmp->cfg->pipe_clock_rate; else fixed->fixed_rate = 125000000; @@ -2180,145 +2456,162 @@ static int phy_pipe_clk_register(struct qcom_qmp *qmp, struct device_node *np) return devm_add_action_or_reset(qmp->dev, phy_clk_release_provider, np); } -static const struct phy_ops qmp_pcie_ops = { - .power_on = qmp_pcie_enable, - .power_off = qmp_pcie_disable, - .owner = THIS_MODULE, -}; - -static int qmp_pcie_create(struct device *dev, struct device_node *np, int id, - void __iomem *serdes, const struct qmp_phy_cfg *cfg) +static int qmp_pcie_parse_dt_legacy(struct qmp_pcie *qmp, struct device_node *np) { - struct qcom_qmp *qmp = dev_get_drvdata(dev); - struct phy *generic_phy; - struct qmp_phy *qphy; - int ret; + struct platform_device *pdev = to_platform_device(qmp->dev); + const struct qmp_phy_cfg *cfg = qmp->cfg; + struct device *dev = qmp->dev; + struct clk *clk; - qphy = devm_kzalloc(dev, sizeof(*qphy), GFP_KERNEL); - if (!qphy) - return -ENOMEM; + qmp->serdes = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(qmp->serdes)) + return PTR_ERR(qmp->serdes); - qphy->cfg = cfg; - qphy->serdes = serdes; /* - * Get memory resources for each phy lane: + * Get memory resources for the PHY: * Resources are indexed as: tx -> 0; rx -> 1; pcs -> 2. * For dual lane PHYs: tx2 -> 3, rx2 -> 4, pcs_misc (optional) -> 5 * For single lane PHYs: pcs_misc (optional) -> 3. */ - qphy->tx = devm_of_iomap(dev, np, 0, NULL); - if (IS_ERR(qphy->tx)) - return PTR_ERR(qphy->tx); + qmp->tx = devm_of_iomap(dev, np, 0, NULL); + if (IS_ERR(qmp->tx)) + return PTR_ERR(qmp->tx); if (of_device_is_compatible(dev->of_node, "qcom,sdm845-qhp-pcie-phy")) - qphy->rx = qphy->tx; + qmp->rx = qmp->tx; else - qphy->rx = devm_of_iomap(dev, np, 1, NULL); - if (IS_ERR(qphy->rx)) - return PTR_ERR(qphy->rx); + qmp->rx = devm_of_iomap(dev, np, 1, NULL); + if (IS_ERR(qmp->rx)) + return PTR_ERR(qmp->rx); - qphy->pcs = devm_of_iomap(dev, np, 2, NULL); - if (IS_ERR(qphy->pcs)) - return PTR_ERR(qphy->pcs); + qmp->pcs = devm_of_iomap(dev, np, 2, NULL); + if (IS_ERR(qmp->pcs)) + return PTR_ERR(qmp->pcs); if (cfg->lanes >= 2) { - qphy->tx2 = devm_of_iomap(dev, np, 3, NULL); - if (IS_ERR(qphy->tx2)) - return PTR_ERR(qphy->tx2); + qmp->tx2 = devm_of_iomap(dev, np, 3, NULL); + if (IS_ERR(qmp->tx2)) + return PTR_ERR(qmp->tx2); - qphy->rx2 = devm_of_iomap(dev, np, 4, NULL); - if (IS_ERR(qphy->rx2)) - return PTR_ERR(qphy->rx2); + qmp->rx2 = devm_of_iomap(dev, np, 4, NULL); + if (IS_ERR(qmp->rx2)) + return PTR_ERR(qmp->rx2); - qphy->pcs_misc = devm_of_iomap(dev, np, 5, NULL); + qmp->pcs_misc = devm_of_iomap(dev, np, 5, NULL); } else { - qphy->pcs_misc = devm_of_iomap(dev, np, 3, NULL); + qmp->pcs_misc = devm_of_iomap(dev, np, 3, NULL); } - if (IS_ERR(qphy->pcs_misc) && + if (IS_ERR(qmp->pcs_misc) && of_device_is_compatible(dev->of_node, "qcom,ipq6018-qmp-pcie-phy")) - qphy->pcs_misc = qphy->pcs + 0x400; + qmp->pcs_misc = qmp->pcs + 0x400; - if (IS_ERR(qphy->pcs_misc)) { - if (cfg->pcs_misc_tbl || cfg->pcs_misc_tbl_sec) - return PTR_ERR(qphy->pcs_misc); + if (IS_ERR(qmp->pcs_misc)) { + if (cfg->tbls.pcs_misc || + (cfg->tbls_rc && cfg->tbls_rc->pcs_misc) || + (cfg->tbls_ep && cfg->tbls_ep->pcs_misc)) { + return PTR_ERR(qmp->pcs_misc); + } } - qphy->pipe_clk = devm_get_clk_from_child(dev, np, NULL); - if (IS_ERR(qphy->pipe_clk)) { - return dev_err_probe(dev, PTR_ERR(qphy->pipe_clk), - "failed to get lane%d pipe clock\n", id); + clk = devm_get_clk_from_child(dev, np, NULL); + if (IS_ERR(clk)) { + return dev_err_probe(dev, PTR_ERR(clk), + "failed to get pipe clock\n"); + } + + qmp->num_pipe_clks = 1; + qmp->pipe_clks[0].id = "pipe"; + qmp->pipe_clks[0].clk = clk; + + return 0; +} + +static int qmp_pcie_get_4ln_config(struct qmp_pcie *qmp) +{ + struct regmap *tcsr; + unsigned int args[2]; + int ret; + + tcsr = syscon_regmap_lookup_by_phandle_args(qmp->dev->of_node, + "qcom,4ln-config-sel", + ARRAY_SIZE(args), args); + if (IS_ERR(tcsr)) { + ret = PTR_ERR(tcsr); + if (ret == -ENOENT) + return 0; + + dev_err(qmp->dev, "failed to lookup syscon: %d\n", ret); + return ret; } - generic_phy = devm_phy_create(dev, np, &qmp_pcie_ops); - if (IS_ERR(generic_phy)) { - ret = PTR_ERR(generic_phy); - dev_err(dev, "failed to create qphy %d\n", ret); + ret = regmap_test_bits(tcsr, args[0], BIT(args[1])); + if (ret < 0) { + dev_err(qmp->dev, "failed to read tcsr: %d\n", ret); return ret; } - qphy->phy = generic_phy; - qphy->qmp = qmp; - qmp->phys[id] = qphy; - phy_set_drvdata(generic_phy, qphy); + qmp->tcsr_4ln_config = ret; + + dev_dbg(qmp->dev, "4ln_config_sel = %d\n", qmp->tcsr_4ln_config); return 0; } -static const struct of_device_id qmp_pcie_of_match_table[] = { - { - .compatible = "qcom,msm8998-qmp-pcie-phy", - .data = &msm8998_pciephy_cfg, - }, { - .compatible = "qcom,ipq8074-qmp-pcie-phy", - .data = &ipq8074_pciephy_cfg, - }, { - .compatible = "qcom,ipq8074-qmp-gen3-pcie-phy", - .data = &ipq8074_pciephy_gen3_cfg, - }, { - .compatible = "qcom,ipq6018-qmp-pcie-phy", - .data = &ipq6018_pciephy_cfg, - }, { - .compatible = "qcom,sc8180x-qmp-pcie-phy", - .data = &sc8180x_pciephy_cfg, - }, { - .compatible = "qcom,sdm845-qhp-pcie-phy", - .data = &sdm845_qhp_pciephy_cfg, - }, { - .compatible = "qcom,sdm845-qmp-pcie-phy", - .data = &sdm845_qmp_pciephy_cfg, - }, { - .compatible = "qcom,sm8250-qmp-gen3x1-pcie-phy", - .data = &sm8250_qmp_gen3x1_pciephy_cfg, - }, { - .compatible = "qcom,sm8250-qmp-gen3x2-pcie-phy", - .data = &sm8250_qmp_gen3x2_pciephy_cfg, - }, { - .compatible = "qcom,sm8250-qmp-modem-pcie-phy", - .data = &sm8250_qmp_gen3x2_pciephy_cfg, - }, { - .compatible = "qcom,sdx55-qmp-pcie-phy", - .data = &sdx55_qmp_pciephy_cfg, - }, { - .compatible = "qcom,sm8450-qmp-gen3x1-pcie-phy", - .data = &sm8450_qmp_gen3x1_pciephy_cfg, - }, { - .compatible = "qcom,sm8450-qmp-gen4x2-pcie-phy", - .data = &sm8450_qmp_gen4x2_pciephy_cfg, - }, - { }, -}; -MODULE_DEVICE_TABLE(of, qmp_pcie_of_match_table); +static int qmp_pcie_parse_dt(struct qmp_pcie *qmp) +{ + struct platform_device *pdev = to_platform_device(qmp->dev); + const struct qmp_phy_cfg *cfg = qmp->cfg; + const struct qmp_pcie_offsets *offs = cfg->offsets; + struct device *dev = qmp->dev; + void __iomem *base; + int ret; + + if (!offs) + return -EINVAL; + + ret = qmp_pcie_get_4ln_config(qmp); + if (ret) + return ret; + + base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(base)) + return PTR_ERR(base); + + qmp->serdes = base + offs->serdes; + qmp->pcs = base + offs->pcs; + qmp->pcs_misc = base + offs->pcs_misc; + qmp->tx = base + offs->tx; + qmp->rx = base + offs->rx; + + if (cfg->lanes >= 2) { + qmp->tx2 = base + offs->tx2; + qmp->rx2 = base + offs->rx2; + } + + if (qmp->cfg->lanes >= 4 && qmp->tcsr_4ln_config) { + qmp->port_b = devm_platform_ioremap_resource(pdev, 1); + if (IS_ERR(qmp->port_b)) + return PTR_ERR(qmp->port_b); + } + + qmp->num_pipe_clks = 2; + qmp->pipe_clks[0].id = "pipe"; + qmp->pipe_clks[1].id = "pipediv2"; + + ret = devm_clk_bulk_get(dev, qmp->num_pipe_clks, qmp->pipe_clks); + if (ret) + return ret; + + return 0; +} static int qmp_pcie_probe(struct platform_device *pdev) { - struct qcom_qmp *qmp; struct device *dev = &pdev->dev; - struct device_node *child; struct phy_provider *phy_provider; - void __iomem *serdes; - const struct qmp_phy_cfg *cfg = NULL; - int num, id; + struct device_node *np; + struct qmp_pcie *qmp; int ret; qmp = devm_kzalloc(dev, sizeof(*qmp), GFP_KERNEL); @@ -2326,73 +2619,117 @@ static int qmp_pcie_probe(struct platform_device *pdev) return -ENOMEM; qmp->dev = dev; - dev_set_drvdata(dev, qmp); - /* Get the specific init parameters of QMP phy */ - cfg = of_device_get_match_data(dev); - if (!cfg) + qmp->cfg = of_device_get_match_data(dev); + if (!qmp->cfg) return -EINVAL; - /* per PHY serdes; usually located at base address */ - serdes = devm_platform_ioremap_resource(pdev, 0); - if (IS_ERR(serdes)) - return PTR_ERR(serdes); + WARN_ON_ONCE(!qmp->cfg->pwrdn_ctrl); + WARN_ON_ONCE(!qmp->cfg->phy_status); - ret = qmp_pcie_clk_init(dev, cfg); + ret = qmp_pcie_clk_init(qmp); if (ret) return ret; - ret = qmp_pcie_reset_init(dev, cfg); + ret = qmp_pcie_reset_init(qmp); if (ret) return ret; - ret = qmp_pcie_vreg_init(dev, cfg); + ret = qmp_pcie_vreg_init(qmp); if (ret) - return dev_err_probe(dev, ret, - "failed to get regulator supplies\n"); - - num = of_get_available_child_count(dev->of_node); - /* do we have a rogue child node ? */ - if (num > 1) - return -EINVAL; + return ret; - qmp->phys = devm_kcalloc(dev, num, sizeof(*qmp->phys), GFP_KERNEL); - if (!qmp->phys) - return -ENOMEM; + /* Check for legacy binding with child node. */ + np = of_get_next_available_child(dev->of_node, NULL); + if (np) { + ret = qmp_pcie_parse_dt_legacy(qmp, np); + } else { + np = of_node_get(dev->of_node); + ret = qmp_pcie_parse_dt(qmp); + } + if (ret) + goto err_node_put; - id = 0; - for_each_available_child_of_node(dev->of_node, child) { - /* Create per-lane phy */ - ret = qmp_pcie_create(dev, child, id, serdes, cfg); - if (ret) { - dev_err(dev, "failed to create lane%d phy, %d\n", - id, ret); - goto err_node_put; - } + ret = phy_pipe_clk_register(qmp, np); + if (ret) + goto err_node_put; - /* - * Register the pipe clock provided by phy. - * See function description to see details of this pipe clock. - */ - ret = phy_pipe_clk_register(qmp, child); - if (ret) { - dev_err(qmp->dev, - "failed to register pipe clock source\n"); - goto err_node_put; - } + qmp->mode = PHY_MODE_PCIE_RC; - id++; + qmp->phy = devm_phy_create(dev, np, &qmp_pcie_phy_ops); + if (IS_ERR(qmp->phy)) { + ret = PTR_ERR(qmp->phy); + dev_err(dev, "failed to create PHY: %d\n", ret); + goto err_node_put; } + phy_set_drvdata(qmp->phy, qmp); + + of_node_put(np); + phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate); return PTR_ERR_OR_ZERO(phy_provider); err_node_put: - of_node_put(child); + of_node_put(np); return ret; } +static const struct of_device_id qmp_pcie_of_match_table[] = { + { + .compatible = "qcom,ipq6018-qmp-pcie-phy", + .data = &ipq6018_pciephy_cfg, + }, { + .compatible = "qcom,ipq8074-qmp-gen3-pcie-phy", + .data = &ipq8074_pciephy_gen3_cfg, + }, { + .compatible = "qcom,ipq8074-qmp-pcie-phy", + .data = &ipq8074_pciephy_cfg, + }, { + .compatible = "qcom,msm8998-qmp-pcie-phy", + .data = &msm8998_pciephy_cfg, + }, { + .compatible = "qcom,sc8180x-qmp-pcie-phy", + .data = &sc8180x_pciephy_cfg, + }, { + .compatible = "qcom,sc8280xp-qmp-gen3x1-pcie-phy", + .data = &sc8280xp_qmp_gen3x1_pciephy_cfg, + }, { + .compatible = "qcom,sc8280xp-qmp-gen3x2-pcie-phy", + .data = &sc8280xp_qmp_gen3x2_pciephy_cfg, + }, { + .compatible = "qcom,sc8280xp-qmp-gen3x4-pcie-phy", + .data = &sc8280xp_qmp_gen3x4_pciephy_cfg, + }, { + .compatible = "qcom,sdm845-qhp-pcie-phy", + .data = &sdm845_qhp_pciephy_cfg, + }, { + .compatible = "qcom,sdm845-qmp-pcie-phy", + .data = &sdm845_qmp_pciephy_cfg, + }, { + .compatible = "qcom,sdx55-qmp-pcie-phy", + .data = &sdx55_qmp_pciephy_cfg, + }, { + .compatible = "qcom,sm8250-qmp-gen3x1-pcie-phy", + .data = &sm8250_qmp_gen3x1_pciephy_cfg, + }, { + .compatible = "qcom,sm8250-qmp-gen3x2-pcie-phy", + .data = &sm8250_qmp_gen3x2_pciephy_cfg, + }, { + .compatible = "qcom,sm8250-qmp-modem-pcie-phy", + .data = &sm8250_qmp_gen3x2_pciephy_cfg, + }, { + .compatible = "qcom,sm8450-qmp-gen3x1-pcie-phy", + .data = &sm8450_qmp_gen3x1_pciephy_cfg, + }, { + .compatible = "qcom,sm8450-qmp-gen4x2-pcie-phy", + .data = &sm8450_qmp_gen4x2_pciephy_cfg, + }, + { }, +}; +MODULE_DEVICE_TABLE(of, qmp_pcie_of_match_table); + static struct platform_driver qmp_pcie_driver = { .probe = qmp_pcie_probe, .driver = { diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v5.h b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v5.h index 2e19fb3f051e028355d819acd64b4980b1e5d7c9..a469ae2a10a135f98162311138423155959ec02f 100644 --- a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v5.h +++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v5.h @@ -8,6 +8,8 @@ #define QCOM_PHY_QMP_PCS_PCIE_V5_H_ /* Only for QMP V5 PHY - PCS_PCIE registers */ +#define QPHY_V5_PCS_PCIE_POWER_STATE_CONFIG2 0x0c +#define QPHY_V5_PCS_PCIE_POWER_STATE_CONFIG4 0x14 #define QPHY_V5_PCS_PCIE_ENDPOINT_REFCLK_DRIVE 0x20 #define QPHY_V5_PCS_PCIE_INT_AUX_CLK_CONFIG1 0x54 #define QPHY_V5_PCS_PCIE_OSC_DTCT_ACTIONS 0x94 diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v5_20.h b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v5_20.h index 1eedf50cf9cbccc38b47d4c8b278f59fb9356f85..3d9713d348fe6b827a991b7f8e8c8f0703b54167 100644 --- a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v5_20.h +++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v5_20.h @@ -8,8 +8,10 @@ /* Only for QMP V5_20 PHY - PCIe PCS registers */ #define QPHY_V5_20_PCS_PCIE_ENDPOINT_REFCLK_DRIVE 0x01c +#define QPHY_V5_20_PCS_PCIE_OSC_DTCT_MODE2_CONFIG5 0x084 #define QPHY_V5_20_PCS_PCIE_OSC_DTCT_ACTIONS 0x090 #define QPHY_V5_20_PCS_PCIE_EQ_CONFIG1 0x0a0 +#define QPHY_V5_20_PCS_PCIE_PRESET_P10_POST 0x0e0 #define QPHY_V5_20_PCS_PCIE_G4_EQ_CONFIG5 0x108 #define QPHY_V5_20_PCS_PCIE_G4_PRE_GAIN 0x15c #define QPHY_V5_20_PCS_PCIE_RX_MARGINING_CONFIG3 0x184 diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-v5_20.h b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-v5_20.h new file mode 100644 index 0000000000000000000000000000000000000000..9a5a20daf62cdb81fd637655aec485177e18ef64 --- /dev/null +++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-v5_20.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2022, Linaro Ltd. + */ + +#ifndef QCOM_PHY_QMP_PCS_V5_20_H_ +#define QCOM_PHY_QMP_PCS_V5_20_H_ + +#define QPHY_V5_20_PCS_G3S2_PRE_GAIN 0x170 +#define QPHY_V5_20_PCS_RX_SIGDET_LVL 0x188 +#define QPHY_V5_20_PCS_EQ_CONFIG4 0x1e0 +#define QPHY_V5_20_PCS_EQ_CONFIG5 0x1e4 + +#endif diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-ufs.c b/drivers/phy/qualcomm/phy-qcom-qmp-ufs.c index c08d34ad1313e77db98773debe32d97f1875404d..318eea35b9721969dbaa996b1b7c58c9234c1c15 100644 --- a/drivers/phy/qualcomm/phy-qcom-qmp-ufs.c +++ b/drivers/phy/qualcomm/phy-qcom-qmp-ufs.c @@ -20,8 +20,6 @@ #include #include -#include - #include "phy-qcom-qmp.h" /* QPHY_SW_RESET bit */ @@ -31,8 +29,6 @@ /* QPHY_START_CONTROL bits */ #define SERDES_START BIT(0) #define PCS_START BIT(1) -/* QPHY_PCS_STATUS bit */ -#define PHYSTATUS BIT(6) /* QPHY_PCS_READY_STATUS bit */ #define PCS_READY BIT(0) @@ -41,11 +37,6 @@ struct qmp_phy_init_tbl { unsigned int offset; unsigned int val; - /* - * register part of layout ? - * if yes, then offset gives index in the reg-layout - */ - bool in_layout; /* * mask of lanes for which this register is written * for cases when second lane needs different values @@ -60,14 +51,6 @@ struct qmp_phy_init_tbl { .lane_mask = 0xff, \ } -#define QMP_PHY_INIT_CFG_L(o, v) \ - { \ - .offset = o, \ - .val = v, \ - .in_layout = true, \ - .lane_mask = 0xff, \ - } - #define QMP_PHY_INIT_CFG_LANE(o, v, l) \ { \ .offset = o, \ @@ -89,22 +72,26 @@ enum qphy_reg_layout { static const unsigned int msm8996_ufsphy_regs_layout[QPHY_LAYOUT_SIZE] = { [QPHY_START_CTRL] = 0x00, [QPHY_PCS_READY_STATUS] = 0x168, + [QPHY_PCS_POWER_DOWN_CONTROL] = 0x04, }; static const unsigned int sdm845_ufsphy_regs_layout[QPHY_LAYOUT_SIZE] = { [QPHY_START_CTRL] = 0x00, [QPHY_PCS_READY_STATUS] = 0x160, + [QPHY_PCS_POWER_DOWN_CONTROL] = 0x04, }; static const unsigned int sm6115_ufsphy_regs_layout[QPHY_LAYOUT_SIZE] = { [QPHY_START_CTRL] = 0x00, [QPHY_PCS_READY_STATUS] = 0x168, + [QPHY_PCS_POWER_DOWN_CONTROL] = 0x04, }; static const unsigned int sm8150_ufsphy_regs_layout[QPHY_LAYOUT_SIZE] = { [QPHY_START_CTRL] = QPHY_V4_PCS_UFS_PHY_START, [QPHY_PCS_READY_STATUS] = QPHY_V4_PCS_UFS_READY_STATUS, [QPHY_SW_RESET] = QPHY_V4_PCS_UFS_SW_RESET, + [QPHY_PCS_POWER_DOWN_CONTROL] = QPHY_V4_PCS_UFS_POWER_DOWN_CONTROL, }; static const struct qmp_phy_init_tbl msm8996_ufs_serdes_tbl[] = { @@ -531,10 +518,21 @@ static const struct qmp_phy_init_tbl sm8350_ufsphy_pcs_tbl[] = { QMP_PHY_INIT_CFG(QPHY_V5_PCS_UFS_MULTI_LANE_CTRL1, 0x02), }; +struct qmp_ufs_offsets { + u16 serdes; + u16 pcs; + u16 tx; + u16 rx; + u16 tx2; + u16 rx2; +}; + /* struct qmp_phy_cfg - per-PHY initialization config */ struct qmp_phy_cfg { int lanes; + const struct qmp_ufs_offsets *offsets; + /* Init sequence for PHY blocks - serdes, tx, rx, pcs */ const struct qmp_phy_init_tbl *serdes_tbl; int serdes_tbl_num; @@ -555,63 +553,28 @@ struct qmp_phy_cfg { /* array of registers with different offsets */ const unsigned int *regs; - unsigned int start_ctrl; - unsigned int pwrdn_ctrl; - /* bit offset of PHYSTATUS in QPHY_PCS_STATUS register */ - unsigned int phy_status; - /* true, if PCS block has no separate SW_RESET register */ bool no_pcs_sw_reset; }; -/** - * struct qmp_phy - per-lane phy descriptor - * - * @phy: generic phy - * @cfg: phy specific configuration - * @serdes: iomapped memory space for phy's serdes (i.e. PLL) - * @tx: iomapped memory space for lane's tx - * @rx: iomapped memory space for lane's rx - * @pcs: iomapped memory space for lane's pcs - * @tx2: iomapped memory space for second lane's tx (in dual lane PHYs) - * @rx2: iomapped memory space for second lane's rx (in dual lane PHYs) - * @pcs_misc: iomapped memory space for lane's pcs_misc - * @qmp: QMP phy to which this lane belongs - */ -struct qmp_phy { - struct phy *phy; +struct qmp_ufs { + struct device *dev; + const struct qmp_phy_cfg *cfg; + void __iomem *serdes; + void __iomem *pcs; + void __iomem *pcs_misc; void __iomem *tx; void __iomem *rx; - void __iomem *pcs; void __iomem *tx2; void __iomem *rx2; - void __iomem *pcs_misc; - struct qcom_qmp *qmp; -}; - -/** - * struct qcom_qmp - structure holding QMP phy block attributes - * - * @dev: device - * - * @clks: array of clocks required by phy - * @resets: array of resets required by phy - * @vregs: regulator supplies bulk data - * - * @phys: array of per-lane phy descriptors - * @ufs_reset: optional UFS PHY reset handle - */ -struct qcom_qmp { - struct device *dev; struct clk_bulk_data *clks; struct regulator_bulk_data *vregs; - - struct qmp_phy **phys; - struct reset_control *ufs_reset; + + struct phy *phy; }; static inline void qphy_setbits(void __iomem *base, u32 offset, u32 val) @@ -657,6 +620,15 @@ static const char * const qmp_phy_vreg_l[] = { "vdda-phy", "vdda-pll", }; +static const struct qmp_ufs_offsets qmp_ufs_offsets_v5 = { + .serdes = 0, + .pcs = 0xc00, + .tx = 0x400, + .rx = 0x600, + .tx2 = 0x800, + .rx2 = 0xa00, +}; + static const struct qmp_phy_cfg msm8996_ufs_cfg = { .lanes = 1, @@ -675,13 +647,29 @@ static const struct qmp_phy_cfg msm8996_ufs_cfg = { .regs = msm8996_ufsphy_regs_layout, - .start_ctrl = SERDES_START, - .pwrdn_ctrl = SW_PWRDN, - .phy_status = PHYSTATUS, - .no_pcs_sw_reset = true, }; +static const struct qmp_phy_cfg sc8280xp_ufsphy_cfg = { + .lanes = 2, + + .offsets = &qmp_ufs_offsets_v5, + + .serdes_tbl = sm8350_ufsphy_serdes_tbl, + .serdes_tbl_num = ARRAY_SIZE(sm8350_ufsphy_serdes_tbl), + .tx_tbl = sm8350_ufsphy_tx_tbl, + .tx_tbl_num = ARRAY_SIZE(sm8350_ufsphy_tx_tbl), + .rx_tbl = sm8350_ufsphy_rx_tbl, + .rx_tbl_num = ARRAY_SIZE(sm8350_ufsphy_rx_tbl), + .pcs_tbl = sm8350_ufsphy_pcs_tbl, + .pcs_tbl_num = ARRAY_SIZE(sm8350_ufsphy_pcs_tbl), + .clk_list = sdm845_ufs_phy_clk_l, + .num_clks = ARRAY_SIZE(sdm845_ufs_phy_clk_l), + .vreg_list = qmp_phy_vreg_l, + .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l), + .regs = sm8150_ufsphy_regs_layout, +}; + static const struct qmp_phy_cfg sdm845_ufsphy_cfg = { .lanes = 2, @@ -699,10 +687,6 @@ static const struct qmp_phy_cfg sdm845_ufsphy_cfg = { .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l), .regs = sdm845_ufsphy_regs_layout, - .start_ctrl = SERDES_START, - .pwrdn_ctrl = SW_PWRDN, - .phy_status = PHYSTATUS, - .no_pcs_sw_reset = true, }; @@ -723,9 +707,6 @@ static const struct qmp_phy_cfg sm6115_ufsphy_cfg = { .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l), .regs = sm6115_ufsphy_regs_layout, - .start_ctrl = SERDES_START, - .pwrdn_ctrl = SW_PWRDN, - .no_pcs_sw_reset = true, }; @@ -745,10 +726,6 @@ static const struct qmp_phy_cfg sm8150_ufsphy_cfg = { .vreg_list = qmp_phy_vreg_l, .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l), .regs = sm8150_ufsphy_regs_layout, - - .start_ctrl = SERDES_START, - .pwrdn_ctrl = SW_PWRDN, - .phy_status = PHYSTATUS, }; static const struct qmp_phy_cfg sm8350_ufsphy_cfg = { @@ -767,10 +744,6 @@ static const struct qmp_phy_cfg sm8350_ufsphy_cfg = { .vreg_list = qmp_phy_vreg_l, .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l), .regs = sm8150_ufsphy_regs_layout, - - .start_ctrl = SERDES_START, - .pwrdn_ctrl = SW_PWRDN, - .phy_status = PHYSTATUS, }; static const struct qmp_phy_cfg sm8450_ufsphy_cfg = { @@ -789,14 +762,9 @@ static const struct qmp_phy_cfg sm8450_ufsphy_cfg = { .vreg_list = qmp_phy_vreg_l, .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l), .regs = sm8150_ufsphy_regs_layout, - - .start_ctrl = SERDES_START, - .pwrdn_ctrl = SW_PWRDN, - .phy_status = PHYSTATUS, }; static void qmp_ufs_configure_lane(void __iomem *base, - const unsigned int *regs, const struct qmp_phy_init_tbl tbl[], int num, u8 lane_mask) @@ -811,41 +779,35 @@ static void qmp_ufs_configure_lane(void __iomem *base, if (!(t->lane_mask & lane_mask)) continue; - if (t->in_layout) - writel(t->val, base + regs[t->offset]); - else - writel(t->val, base + t->offset); + writel(t->val, base + t->offset); } } static void qmp_ufs_configure(void __iomem *base, - const unsigned int *regs, const struct qmp_phy_init_tbl tbl[], int num) { - qmp_ufs_configure_lane(base, regs, tbl, num, 0xff); + qmp_ufs_configure_lane(base, tbl, num, 0xff); } -static int qmp_ufs_serdes_init(struct qmp_phy *qphy) +static int qmp_ufs_serdes_init(struct qmp_ufs *qmp) { - const struct qmp_phy_cfg *cfg = qphy->cfg; - void __iomem *serdes = qphy->serdes; + const struct qmp_phy_cfg *cfg = qmp->cfg; + void __iomem *serdes = qmp->serdes; const struct qmp_phy_init_tbl *serdes_tbl = cfg->serdes_tbl; int serdes_tbl_num = cfg->serdes_tbl_num; - qmp_ufs_configure(serdes, cfg->regs, serdes_tbl, serdes_tbl_num); + qmp_ufs_configure(serdes, serdes_tbl, serdes_tbl_num); return 0; } -static int qmp_ufs_com_init(struct qmp_phy *qphy) +static int qmp_ufs_com_init(struct qmp_ufs *qmp) { - struct qcom_qmp *qmp = qphy->qmp; - const struct qmp_phy_cfg *cfg = qphy->cfg; - void __iomem *pcs = qphy->pcs; + const struct qmp_phy_cfg *cfg = qmp->cfg; + void __iomem *pcs = qmp->pcs; int ret; - /* turn on regulator supplies */ ret = regulator_bulk_enable(cfg->num_vregs, qmp->vregs); if (ret) { dev_err(qmp->dev, "failed to enable regulators, err=%d\n", ret); @@ -856,13 +818,7 @@ static int qmp_ufs_com_init(struct qmp_phy *qphy) if (ret) goto err_disable_regulators; - if (cfg->regs[QPHY_PCS_POWER_DOWN_CONTROL]) - qphy_setbits(pcs, - cfg->regs[QPHY_PCS_POWER_DOWN_CONTROL], - cfg->pwrdn_ctrl); - else - qphy_setbits(pcs, QPHY_V2_PCS_POWER_DOWN_CONTROL, - cfg->pwrdn_ctrl); + qphy_setbits(pcs, cfg->regs[QPHY_PCS_POWER_DOWN_CONTROL], SW_PWRDN); return 0; @@ -872,10 +828,9 @@ err_disable_regulators: return ret; } -static int qmp_ufs_com_exit(struct qmp_phy *qphy) +static int qmp_ufs_com_exit(struct qmp_ufs *qmp) { - struct qcom_qmp *qmp = qphy->qmp; - const struct qmp_phy_cfg *cfg = qphy->cfg; + const struct qmp_phy_cfg *cfg = qmp->cfg; reset_control_assert(qmp->ufs_reset); @@ -888,9 +843,8 @@ static int qmp_ufs_com_exit(struct qmp_phy *qphy) static int qmp_ufs_init(struct phy *phy) { - struct qmp_phy *qphy = phy_get_drvdata(phy); - struct qcom_qmp *qmp = qphy->qmp; - const struct qmp_phy_cfg *cfg = qphy->cfg; + struct qmp_ufs *qmp = phy_get_drvdata(phy); + const struct qmp_phy_cfg *cfg = qmp->cfg; int ret; dev_vdbg(qmp->dev, "Initializing QMP phy\n"); @@ -921,7 +875,7 @@ static int qmp_ufs_init(struct phy *phy) return ret; } - ret = qmp_ufs_com_init(qphy); + ret = qmp_ufs_com_init(qmp); if (ret) return ret; @@ -930,34 +884,27 @@ static int qmp_ufs_init(struct phy *phy) static int qmp_ufs_power_on(struct phy *phy) { - struct qmp_phy *qphy = phy_get_drvdata(phy); - struct qcom_qmp *qmp = qphy->qmp; - const struct qmp_phy_cfg *cfg = qphy->cfg; - void __iomem *tx = qphy->tx; - void __iomem *rx = qphy->rx; - void __iomem *pcs = qphy->pcs; + struct qmp_ufs *qmp = phy_get_drvdata(phy); + const struct qmp_phy_cfg *cfg = qmp->cfg; + void __iomem *tx = qmp->tx; + void __iomem *rx = qmp->rx; + void __iomem *pcs = qmp->pcs; void __iomem *status; - unsigned int mask, val, ready; + unsigned int val; int ret; - qmp_ufs_serdes_init(qphy); + qmp_ufs_serdes_init(qmp); /* Tx, Rx, and PCS configurations */ - qmp_ufs_configure_lane(tx, cfg->regs, cfg->tx_tbl, cfg->tx_tbl_num, 1); - - if (cfg->lanes >= 2) { - qmp_ufs_configure_lane(qphy->tx2, cfg->regs, - cfg->tx_tbl, cfg->tx_tbl_num, 2); - } - - qmp_ufs_configure_lane(rx, cfg->regs, cfg->rx_tbl, cfg->rx_tbl_num, 1); + qmp_ufs_configure_lane(tx, cfg->tx_tbl, cfg->tx_tbl_num, 1); + qmp_ufs_configure_lane(rx, cfg->rx_tbl, cfg->rx_tbl_num, 1); if (cfg->lanes >= 2) { - qmp_ufs_configure_lane(qphy->rx2, cfg->regs, - cfg->rx_tbl, cfg->rx_tbl_num, 2); + qmp_ufs_configure_lane(qmp->tx2, cfg->tx_tbl, cfg->tx_tbl_num, 2); + qmp_ufs_configure_lane(qmp->rx2, cfg->rx_tbl, cfg->rx_tbl_num, 2); } - qmp_ufs_configure(pcs, cfg->regs, cfg->pcs_tbl, cfg->pcs_tbl_num); + qmp_ufs_configure(pcs, cfg->pcs_tbl, cfg->pcs_tbl_num); ret = reset_control_deassert(qmp->ufs_reset); if (ret) @@ -966,14 +913,12 @@ static int qmp_ufs_power_on(struct phy *phy) /* Pull PHY out of reset state */ if (!cfg->no_pcs_sw_reset) qphy_clrbits(pcs, cfg->regs[QPHY_SW_RESET], SW_RESET); - /* start SerDes and Phy-Coding-Sublayer */ - qphy_setbits(pcs, cfg->regs[QPHY_START_CTRL], cfg->start_ctrl); - status = pcs + cfg->regs[QPHY_PCS_READY_STATUS]; - mask = PCS_READY; - ready = PCS_READY; + /* start SerDes */ + qphy_setbits(pcs, cfg->regs[QPHY_START_CTRL], SERDES_START); - ret = readl_poll_timeout(status, val, (val & mask) == ready, 10, + status = pcs + cfg->regs[QPHY_PCS_READY_STATUS]; + ret = readl_poll_timeout(status, val, (val & PCS_READY), 200, PHY_INIT_COMPLETE_TIMEOUT); if (ret) { dev_err(qmp->dev, "phy initialization timed-out\n"); @@ -985,33 +930,28 @@ static int qmp_ufs_power_on(struct phy *phy) static int qmp_ufs_power_off(struct phy *phy) { - struct qmp_phy *qphy = phy_get_drvdata(phy); - const struct qmp_phy_cfg *cfg = qphy->cfg; + struct qmp_ufs *qmp = phy_get_drvdata(phy); + const struct qmp_phy_cfg *cfg = qmp->cfg; /* PHY reset */ if (!cfg->no_pcs_sw_reset) - qphy_setbits(qphy->pcs, cfg->regs[QPHY_SW_RESET], SW_RESET); + qphy_setbits(qmp->pcs, cfg->regs[QPHY_SW_RESET], SW_RESET); - /* stop SerDes and Phy-Coding-Sublayer */ - qphy_clrbits(qphy->pcs, cfg->regs[QPHY_START_CTRL], cfg->start_ctrl); + /* stop SerDes */ + qphy_clrbits(qmp->pcs, cfg->regs[QPHY_START_CTRL], SERDES_START); /* Put PHY into POWER DOWN state: active low */ - if (cfg->regs[QPHY_PCS_POWER_DOWN_CONTROL]) { - qphy_clrbits(qphy->pcs, cfg->regs[QPHY_PCS_POWER_DOWN_CONTROL], - cfg->pwrdn_ctrl); - } else { - qphy_clrbits(qphy->pcs, QPHY_V2_PCS_POWER_DOWN_CONTROL, - cfg->pwrdn_ctrl); - } + qphy_clrbits(qmp->pcs, cfg->regs[QPHY_PCS_POWER_DOWN_CONTROL], + SW_PWRDN); return 0; } static int qmp_ufs_exit(struct phy *phy) { - struct qmp_phy *qphy = phy_get_drvdata(phy); + struct qmp_ufs *qmp = phy_get_drvdata(phy); - qmp_ufs_com_exit(qphy); + qmp_ufs_com_exit(qmp); return 0; } @@ -1041,9 +981,16 @@ static int qmp_ufs_disable(struct phy *phy) return qmp_ufs_exit(phy); } -static int qmp_ufs_vreg_init(struct device *dev, const struct qmp_phy_cfg *cfg) +static const struct phy_ops qcom_qmp_ufs_phy_ops = { + .power_on = qmp_ufs_enable, + .power_off = qmp_ufs_disable, + .owner = THIS_MODULE, +}; + +static int qmp_ufs_vreg_init(struct qmp_ufs *qmp) { - struct qcom_qmp *qmp = dev_get_drvdata(dev); + const struct qmp_phy_cfg *cfg = qmp->cfg; + struct device *dev = qmp->dev; int num = cfg->num_vregs; int i; @@ -1057,9 +1004,10 @@ static int qmp_ufs_vreg_init(struct device *dev, const struct qmp_phy_cfg *cfg) return devm_regulator_bulk_get(dev, num, qmp->vregs); } -static int qmp_ufs_clk_init(struct device *dev, const struct qmp_phy_cfg *cfg) +static int qmp_ufs_clk_init(struct qmp_ufs *qmp) { - struct qcom_qmp *qmp = dev_get_drvdata(dev); + const struct qmp_phy_cfg *cfg = qmp->cfg; + struct device *dev = qmp->dev; int num = cfg->num_clks; int i; @@ -1073,74 +1021,136 @@ static int qmp_ufs_clk_init(struct device *dev, const struct qmp_phy_cfg *cfg) return devm_clk_bulk_get(dev, num, qmp->clks); } -static const struct phy_ops qcom_qmp_ufs_ops = { - .power_on = qmp_ufs_enable, - .power_off = qmp_ufs_disable, - .owner = THIS_MODULE, -}; - -static int qmp_ufs_create(struct device *dev, struct device_node *np, int id, - void __iomem *serdes, const struct qmp_phy_cfg *cfg) +static int qmp_ufs_parse_dt_legacy(struct qmp_ufs *qmp, struct device_node *np) { - struct qcom_qmp *qmp = dev_get_drvdata(dev); - struct phy *generic_phy; - struct qmp_phy *qphy; - int ret; + struct platform_device *pdev = to_platform_device(qmp->dev); + const struct qmp_phy_cfg *cfg = qmp->cfg; + struct device *dev = qmp->dev; - qphy = devm_kzalloc(dev, sizeof(*qphy), GFP_KERNEL); - if (!qphy) - return -ENOMEM; + qmp->serdes = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(qmp->serdes)) + return PTR_ERR(qmp->serdes); - qphy->cfg = cfg; - qphy->serdes = serdes; /* - * Get memory resources for each phy lane: + * Get memory resources for the PHY: * Resources are indexed as: tx -> 0; rx -> 1; pcs -> 2. * For dual lane PHYs: tx2 -> 3, rx2 -> 4, pcs_misc (optional) -> 5 * For single lane PHYs: pcs_misc (optional) -> 3. */ - qphy->tx = devm_of_iomap(dev, np, 0, NULL); - if (IS_ERR(qphy->tx)) - return PTR_ERR(qphy->tx); + qmp->tx = devm_of_iomap(dev, np, 0, NULL); + if (IS_ERR(qmp->tx)) + return PTR_ERR(qmp->tx); - qphy->rx = devm_of_iomap(dev, np, 1, NULL); - if (IS_ERR(qphy->rx)) - return PTR_ERR(qphy->rx); + qmp->rx = devm_of_iomap(dev, np, 1, NULL); + if (IS_ERR(qmp->rx)) + return PTR_ERR(qmp->rx); - qphy->pcs = devm_of_iomap(dev, np, 2, NULL); - if (IS_ERR(qphy->pcs)) - return PTR_ERR(qphy->pcs); + qmp->pcs = devm_of_iomap(dev, np, 2, NULL); + if (IS_ERR(qmp->pcs)) + return PTR_ERR(qmp->pcs); if (cfg->lanes >= 2) { - qphy->tx2 = devm_of_iomap(dev, np, 3, NULL); - if (IS_ERR(qphy->tx2)) - return PTR_ERR(qphy->tx2); + qmp->tx2 = devm_of_iomap(dev, np, 3, NULL); + if (IS_ERR(qmp->tx2)) + return PTR_ERR(qmp->tx2); - qphy->rx2 = devm_of_iomap(dev, np, 4, NULL); - if (IS_ERR(qphy->rx2)) - return PTR_ERR(qphy->rx2); + qmp->rx2 = devm_of_iomap(dev, np, 4, NULL); + if (IS_ERR(qmp->rx2)) + return PTR_ERR(qmp->rx2); - qphy->pcs_misc = devm_of_iomap(dev, np, 5, NULL); + qmp->pcs_misc = devm_of_iomap(dev, np, 5, NULL); } else { - qphy->pcs_misc = devm_of_iomap(dev, np, 3, NULL); + qmp->pcs_misc = devm_of_iomap(dev, np, 3, NULL); } - if (IS_ERR(qphy->pcs_misc)) + if (IS_ERR(qmp->pcs_misc)) dev_vdbg(dev, "PHY pcs_misc-reg not used\n"); - generic_phy = devm_phy_create(dev, np, &qcom_qmp_ufs_ops); - if (IS_ERR(generic_phy)) { - ret = PTR_ERR(generic_phy); - dev_err(dev, "failed to create qphy %d\n", ret); + return 0; +} + +static int qmp_ufs_parse_dt(struct qmp_ufs *qmp) +{ + struct platform_device *pdev = to_platform_device(qmp->dev); + const struct qmp_phy_cfg *cfg = qmp->cfg; + const struct qmp_ufs_offsets *offs = cfg->offsets; + void __iomem *base; + + if (!offs) + return -EINVAL; + + base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(base)) + return PTR_ERR(base); + + qmp->serdes = base + offs->serdes; + qmp->pcs = base + offs->pcs; + qmp->tx = base + offs->tx; + qmp->rx = base + offs->rx; + + if (cfg->lanes >= 2) { + qmp->tx2 = base + offs->tx2; + qmp->rx2 = base + offs->rx2; + } + + return 0; +} + +static int qmp_ufs_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct phy_provider *phy_provider; + struct device_node *np; + struct qmp_ufs *qmp; + int ret; + + qmp = devm_kzalloc(dev, sizeof(*qmp), GFP_KERNEL); + if (!qmp) + return -ENOMEM; + + qmp->dev = dev; + + qmp->cfg = of_device_get_match_data(dev); + if (!qmp->cfg) + return -EINVAL; + + ret = qmp_ufs_clk_init(qmp); + if (ret) return ret; + + ret = qmp_ufs_vreg_init(qmp); + if (ret) + return ret; + + /* Check for legacy binding with child node. */ + np = of_get_next_available_child(dev->of_node, NULL); + if (np) { + ret = qmp_ufs_parse_dt_legacy(qmp, np); + } else { + np = of_node_get(dev->of_node); + ret = qmp_ufs_parse_dt(qmp); } + if (ret) + goto err_node_put; - qphy->phy = generic_phy; - qphy->qmp = qmp; - qmp->phys[id] = qphy; - phy_set_drvdata(generic_phy, qphy); + qmp->phy = devm_phy_create(dev, np, &qcom_qmp_ufs_phy_ops); + if (IS_ERR(qmp->phy)) { + ret = PTR_ERR(qmp->phy); + dev_err(dev, "failed to create PHY: %d\n", ret); + goto err_node_put; + } - return 0; + phy_set_drvdata(qmp->phy, qmp); + + of_node_put(np); + + phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate); + + return PTR_ERR_OR_ZERO(phy_provider); + +err_node_put: + of_node_put(np); + return ret; } static const struct of_device_id qmp_ufs_of_match_table[] = { @@ -1155,7 +1165,7 @@ static const struct of_device_id qmp_ufs_of_match_table[] = { .data = &sm8150_ufsphy_cfg, }, { .compatible = "qcom,sc8280xp-qmp-ufs-phy", - .data = &sm8350_ufsphy_cfg, + .data = &sc8280xp_ufsphy_cfg, }, { .compatible = "qcom,sdm845-qmp-ufs-phy", .data = &sdm845_ufsphy_cfg, @@ -1182,74 +1192,6 @@ static const struct of_device_id qmp_ufs_of_match_table[] = { }; MODULE_DEVICE_TABLE(of, qmp_ufs_of_match_table); -static int qmp_ufs_probe(struct platform_device *pdev) -{ - struct qcom_qmp *qmp; - struct device *dev = &pdev->dev; - struct device_node *child; - struct phy_provider *phy_provider; - void __iomem *serdes; - const struct qmp_phy_cfg *cfg = NULL; - int num, id; - int ret; - - qmp = devm_kzalloc(dev, sizeof(*qmp), GFP_KERNEL); - if (!qmp) - return -ENOMEM; - - qmp->dev = dev; - dev_set_drvdata(dev, qmp); - - /* Get the specific init parameters of QMP phy */ - cfg = of_device_get_match_data(dev); - if (!cfg) - return -EINVAL; - - /* per PHY serdes; usually located at base address */ - serdes = devm_platform_ioremap_resource(pdev, 0); - if (IS_ERR(serdes)) - return PTR_ERR(serdes); - - ret = qmp_ufs_clk_init(dev, cfg); - if (ret) - return ret; - - ret = qmp_ufs_vreg_init(dev, cfg); - if (ret) - return dev_err_probe(dev, ret, - "failed to get regulator supplies\n"); - - num = of_get_available_child_count(dev->of_node); - /* do we have a rogue child node ? */ - if (num > 1) - return -EINVAL; - - qmp->phys = devm_kcalloc(dev, num, sizeof(*qmp->phys), GFP_KERNEL); - if (!qmp->phys) - return -ENOMEM; - - id = 0; - for_each_available_child_of_node(dev->of_node, child) { - /* Create per-lane phy */ - ret = qmp_ufs_create(dev, child, id, serdes, cfg); - if (ret) { - dev_err(dev, "failed to create lane%d phy, %d\n", - id, ret); - goto err_node_put; - } - - id++; - } - - phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate); - - return PTR_ERR_OR_ZERO(phy_provider); - -err_node_put: - of_node_put(child); - return ret; -} - static struct platform_driver qmp_ufs_driver = { .probe = qmp_ufs_probe, .driver = { diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-usb.c b/drivers/phy/qualcomm/phy-qcom-qmp-usb.c index b84c0d4b57541644e96dc0b567f724a1775ff725..4aa338fc4643c3e39561d95ad349f8eb109e5cc0 100644 --- a/drivers/phy/qualcomm/phy-qcom-qmp-usb.c +++ b/drivers/phy/qualcomm/phy-qcom-qmp-usb.c @@ -20,8 +20,6 @@ #include #include -#include - #include "phy-qcom-qmp.h" /* QPHY_SW_RESET bit */ @@ -63,17 +61,10 @@ #define CLAMP_EN BIT(0) /* enables i/o clamp_n */ #define PHY_INIT_COMPLETE_TIMEOUT 10000 -#define POWER_DOWN_DELAY_US_MIN 10 -#define POWER_DOWN_DELAY_US_MAX 11 struct qmp_phy_init_tbl { unsigned int offset; unsigned int val; - /* - * register part of layout ? - * if yes, then offset gives index in the reg-layout - */ - bool in_layout; /* * mask of lanes for which this register is written * for cases when second lane needs different values @@ -88,14 +79,6 @@ struct qmp_phy_init_tbl { .lane_mask = 0xff, \ } -#define QMP_PHY_INIT_CFG_L(o, v) \ - { \ - .offset = o, \ - .val = v, \ - .in_layout = true, \ - .lane_mask = 0xff, \ - } - #define QMP_PHY_INIT_CFG_LANE(o, v, l) \ { \ .offset = o, \ @@ -126,6 +109,7 @@ static const unsigned int usb3phy_regs_layout[QPHY_LAYOUT_SIZE] = { [QPHY_PCS_AUTONOMOUS_MODE_CTRL] = 0x0d4, [QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR] = 0x0d8, [QPHY_PCS_LFPS_RXTERM_IRQ_STATUS] = 0x178, + [QPHY_PCS_POWER_DOWN_CONTROL] = 0x04, }; static const unsigned int qmp_v3_usb3phy_regs_layout[QPHY_LAYOUT_SIZE] = { @@ -135,6 +119,7 @@ static const unsigned int qmp_v3_usb3phy_regs_layout[QPHY_LAYOUT_SIZE] = { [QPHY_PCS_AUTONOMOUS_MODE_CTRL] = 0x0d8, [QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR] = 0x0dc, [QPHY_PCS_LFPS_RXTERM_IRQ_STATUS] = 0x170, + [QPHY_PCS_POWER_DOWN_CONTROL] = 0x04, }; static const unsigned int qmp_v4_usb3phy_regs_layout[QPHY_LAYOUT_SIZE] = { @@ -1427,10 +1412,20 @@ static const struct qmp_phy_init_tbl sc8280xp_usb3_uniphy_pcs_tbl[] = { QMP_PHY_INIT_CFG(QPHY_V5_PCS_REFGEN_REQ_CONFIG1, 0x21), }; +struct qmp_usb_offsets { + u16 serdes; + u16 pcs; + u16 pcs_usb; + u16 tx; + u16 rx; +}; + /* struct qmp_phy_cfg - per-PHY initialization config */ struct qmp_phy_cfg { int lanes; + const struct qmp_usb_offsets *offsets; + /* Init sequence for PHY blocks - serdes, tx, rx, pcs */ const struct qmp_phy_init_tbl *serdes_tbl; int serdes_tbl_num; @@ -1456,16 +1451,8 @@ struct qmp_phy_cfg { /* array of registers with different offsets */ const unsigned int *regs; - unsigned int start_ctrl; - unsigned int pwrdn_ctrl; - /* bit offset of PHYSTATUS in QPHY_PCS_STATUS register */ - unsigned int phy_status; - /* true, if PHY needs delay after POWER_DOWN */ bool has_pwrdn_delay; - /* power_down delay in usec */ - int pwrdn_delay_min; - int pwrdn_delay_max; /* true, if PHY has a separate DP_COM control block */ bool has_phy_dp_com_ctrl; @@ -1474,60 +1461,32 @@ struct qmp_phy_cfg { unsigned int pcs_usb_offset; }; -/** - * struct qmp_phy - per-lane phy descriptor - * - * @phy: generic phy - * @cfg: phy specific configuration - * @serdes: iomapped memory space for phy's serdes (i.e. PLL) - * @tx: iomapped memory space for lane's tx - * @rx: iomapped memory space for lane's rx - * @pcs: iomapped memory space for lane's pcs - * @tx2: iomapped memory space for second lane's tx (in dual lane PHYs) - * @rx2: iomapped memory space for second lane's rx (in dual lane PHYs) - * @pcs_misc: iomapped memory space for lane's pcs_misc - * @pcs_usb: iomapped memory space for lane's pcs_usb - * @pipe_clk: pipe clock - * @qmp: QMP phy to which this lane belongs - * @mode: current PHY mode - */ -struct qmp_phy { - struct phy *phy; +struct qmp_usb { + struct device *dev; + const struct qmp_phy_cfg *cfg; + void __iomem *serdes; + void __iomem *pcs; + void __iomem *pcs_misc; + void __iomem *pcs_usb; void __iomem *tx; void __iomem *rx; - void __iomem *pcs; void __iomem *tx2; void __iomem *rx2; - void __iomem *pcs_misc; - void __iomem *pcs_usb; - struct clk *pipe_clk; - struct qcom_qmp *qmp; - enum phy_mode mode; -}; -/** - * struct qcom_qmp - structure holding QMP phy block attributes - * - * @dev: device - * @dp_com: iomapped memory space for phy's dp_com control block - * - * @clks: array of clocks required by phy - * @resets: array of resets required by phy - * @vregs: regulator supplies bulk data - * - * @phys: array of per-lane phy descriptors - */ -struct qcom_qmp { - struct device *dev; void __iomem *dp_com; + struct clk *pipe_clk; struct clk_bulk_data *clks; struct reset_control_bulk_data *resets; struct regulator_bulk_data *vregs; - struct qmp_phy **phys; + enum phy_mode mode; + + struct phy *phy; + + struct clk_fixed_rate pipe_clk_fixed; }; static inline void qphy_setbits(void __iomem *base, u32 offset, u32 val) @@ -1564,6 +1523,10 @@ static const char * const qmp_v3_phy_clk_l[] = { }; static const char * const qmp_v4_phy_clk_l[] = { + "aux", "ref", "com_aux", +}; + +static const char * const qmp_v4_ref_phy_clk_l[] = { "aux", "ref_clk_src", "ref", "com_aux", }; @@ -1599,6 +1562,14 @@ static const char * const qmp_phy_vreg_l[] = { "vdda-phy", "vdda-pll", }; +static const struct qmp_usb_offsets qmp_usb_offsets_v5 = { + .serdes = 0, + .pcs = 0x0200, + .pcs_usb = 0x1200, + .tx = 0x0e00, + .rx = 0x1000, +}; + static const struct qmp_phy_cfg ipq8074_usb3phy_cfg = { .lanes = 1, @@ -1616,11 +1587,7 @@ static const struct qmp_phy_cfg ipq8074_usb3phy_cfg = { .num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l), .vreg_list = qmp_phy_vreg_l, .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l), - .regs = usb3phy_regs_layout, - - .start_ctrl = SERDES_START | PCS_START, - .pwrdn_ctrl = SW_PWRDN, - .phy_status = PHYSTATUS, + .regs = qmp_v3_usb3phy_regs_layout, }; static const struct qmp_phy_cfg msm8996_usb3phy_cfg = { @@ -1641,10 +1608,6 @@ static const struct qmp_phy_cfg msm8996_usb3phy_cfg = { .vreg_list = qmp_phy_vreg_l, .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l), .regs = usb3phy_regs_layout, - - .start_ctrl = SERDES_START | PCS_START, - .pwrdn_ctrl = SW_PWRDN, - .phy_status = PHYSTATUS, }; static const struct qmp_phy_cfg qmp_v3_usb3phy_cfg = { @@ -1666,14 +1629,7 @@ static const struct qmp_phy_cfg qmp_v3_usb3phy_cfg = { .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l), .regs = qmp_v3_usb3phy_regs_layout, - .start_ctrl = SERDES_START | PCS_START, - .pwrdn_ctrl = SW_PWRDN, - .phy_status = PHYSTATUS, - .has_pwrdn_delay = true, - .pwrdn_delay_min = POWER_DOWN_DELAY_US_MIN, - .pwrdn_delay_max = POWER_DOWN_DELAY_US_MAX, - .has_phy_dp_com_ctrl = true, }; @@ -1696,20 +1652,15 @@ static const struct qmp_phy_cfg sc7180_usb3phy_cfg = { .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l), .regs = qmp_v3_usb3phy_regs_layout, - .start_ctrl = SERDES_START | PCS_START, - .pwrdn_ctrl = SW_PWRDN, - .phy_status = PHYSTATUS, - .has_pwrdn_delay = true, - .pwrdn_delay_min = POWER_DOWN_DELAY_US_MIN, - .pwrdn_delay_max = POWER_DOWN_DELAY_US_MAX, - .has_phy_dp_com_ctrl = true, }; static const struct qmp_phy_cfg sc8280xp_usb3_uniphy_cfg = { .lanes = 1, + .offsets = &qmp_usb_offsets_v5, + .serdes_tbl = sc8280xp_usb3_uniphy_serdes_tbl, .serdes_tbl_num = ARRAY_SIZE(sc8280xp_usb3_uniphy_serdes_tbl), .tx_tbl = sc8280xp_usb3_uniphy_tx_tbl, @@ -1720,19 +1671,11 @@ static const struct qmp_phy_cfg sc8280xp_usb3_uniphy_cfg = { .pcs_tbl_num = ARRAY_SIZE(sc8280xp_usb3_uniphy_pcs_tbl), .clk_list = qmp_v4_phy_clk_l, .num_clks = ARRAY_SIZE(qmp_v4_phy_clk_l), - .reset_list = msm8996_usb3phy_reset_l, - .num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l), + .reset_list = qcm2290_usb3phy_reset_l, + .num_resets = ARRAY_SIZE(qcm2290_usb3phy_reset_l), .vreg_list = qmp_phy_vreg_l, .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l), .regs = qmp_v4_usb3phy_regs_layout, - - .start_ctrl = SERDES_START | PCS_START, - .pwrdn_ctrl = SW_PWRDN, - .phy_status = PHYSTATUS, - - .has_pwrdn_delay = true, - .pwrdn_delay_min = POWER_DOWN_DELAY_US_MIN, - .pwrdn_delay_max = POWER_DOWN_DELAY_US_MAX, }; static const struct qmp_phy_cfg qmp_v3_usb3_uniphy_cfg = { @@ -1754,13 +1697,7 @@ static const struct qmp_phy_cfg qmp_v3_usb3_uniphy_cfg = { .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l), .regs = qmp_v3_usb3phy_regs_layout, - .start_ctrl = SERDES_START | PCS_START, - .pwrdn_ctrl = SW_PWRDN, - .phy_status = PHYSTATUS, - .has_pwrdn_delay = true, - .pwrdn_delay_min = POWER_DOWN_DELAY_US_MIN, - .pwrdn_delay_max = POWER_DOWN_DELAY_US_MAX, }; static const struct qmp_phy_cfg msm8998_usb3phy_cfg = { @@ -1781,10 +1718,6 @@ static const struct qmp_phy_cfg msm8998_usb3phy_cfg = { .vreg_list = qmp_phy_vreg_l, .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l), .regs = qmp_v3_usb3phy_regs_layout, - - .start_ctrl = SERDES_START | PCS_START, - .pwrdn_ctrl = SW_PWRDN, - .phy_status = PHYSTATUS, }; static const struct qmp_phy_cfg sm8150_usb3phy_cfg = { @@ -1800,8 +1733,8 @@ static const struct qmp_phy_cfg sm8150_usb3phy_cfg = { .pcs_tbl_num = ARRAY_SIZE(sm8150_usb3_pcs_tbl), .pcs_usb_tbl = sm8150_usb3_pcs_usb_tbl, .pcs_usb_tbl_num = ARRAY_SIZE(sm8150_usb3_pcs_usb_tbl), - .clk_list = qmp_v4_phy_clk_l, - .num_clks = ARRAY_SIZE(qmp_v4_phy_clk_l), + .clk_list = qmp_v4_ref_phy_clk_l, + .num_clks = ARRAY_SIZE(qmp_v4_ref_phy_clk_l), .reset_list = msm8996_usb3phy_reset_l, .num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l), .vreg_list = qmp_phy_vreg_l, @@ -1809,15 +1742,7 @@ static const struct qmp_phy_cfg sm8150_usb3phy_cfg = { .regs = qmp_v4_usb3phy_regs_layout, .pcs_usb_offset = 0x300, - .start_ctrl = SERDES_START | PCS_START, - .pwrdn_ctrl = SW_PWRDN, - .phy_status = PHYSTATUS, - - .has_pwrdn_delay = true, - .pwrdn_delay_min = POWER_DOWN_DELAY_US_MIN, - .pwrdn_delay_max = POWER_DOWN_DELAY_US_MAX, - .has_phy_dp_com_ctrl = true, }; @@ -1834,8 +1759,8 @@ static const struct qmp_phy_cfg sm8150_usb3_uniphy_cfg = { .pcs_tbl_num = ARRAY_SIZE(sm8150_usb3_uniphy_pcs_tbl), .pcs_usb_tbl = sm8150_usb3_uniphy_pcs_usb_tbl, .pcs_usb_tbl_num = ARRAY_SIZE(sm8150_usb3_uniphy_pcs_usb_tbl), - .clk_list = qmp_v4_phy_clk_l, - .num_clks = ARRAY_SIZE(qmp_v4_phy_clk_l), + .clk_list = qmp_v4_ref_phy_clk_l, + .num_clks = ARRAY_SIZE(qmp_v4_ref_phy_clk_l), .reset_list = msm8996_usb3phy_reset_l, .num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l), .vreg_list = qmp_phy_vreg_l, @@ -1843,13 +1768,7 @@ static const struct qmp_phy_cfg sm8150_usb3_uniphy_cfg = { .regs = qmp_v4_usb3phy_regs_layout, .pcs_usb_offset = 0x600, - .start_ctrl = SERDES_START | PCS_START, - .pwrdn_ctrl = SW_PWRDN, - .phy_status = PHYSTATUS, - .has_pwrdn_delay = true, - .pwrdn_delay_min = POWER_DOWN_DELAY_US_MIN, - .pwrdn_delay_max = POWER_DOWN_DELAY_US_MAX, }; static const struct qmp_phy_cfg sm8250_usb3phy_cfg = { @@ -1874,14 +1793,7 @@ static const struct qmp_phy_cfg sm8250_usb3phy_cfg = { .regs = qmp_v4_usb3phy_regs_layout, .pcs_usb_offset = 0x300, - .start_ctrl = SERDES_START | PCS_START, - .pwrdn_ctrl = SW_PWRDN, - .phy_status = PHYSTATUS, - .has_pwrdn_delay = true, - .pwrdn_delay_min = POWER_DOWN_DELAY_US_MIN, - .pwrdn_delay_max = POWER_DOWN_DELAY_US_MAX, - .has_phy_dp_com_ctrl = true, }; @@ -1898,8 +1810,8 @@ static const struct qmp_phy_cfg sm8250_usb3_uniphy_cfg = { .pcs_tbl_num = ARRAY_SIZE(sm8250_usb3_uniphy_pcs_tbl), .pcs_usb_tbl = sm8250_usb3_uniphy_pcs_usb_tbl, .pcs_usb_tbl_num = ARRAY_SIZE(sm8250_usb3_uniphy_pcs_usb_tbl), - .clk_list = qmp_v4_phy_clk_l, - .num_clks = ARRAY_SIZE(qmp_v4_phy_clk_l), + .clk_list = qmp_v4_ref_phy_clk_l, + .num_clks = ARRAY_SIZE(qmp_v4_ref_phy_clk_l), .reset_list = msm8996_usb3phy_reset_l, .num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l), .vreg_list = qmp_phy_vreg_l, @@ -1907,13 +1819,7 @@ static const struct qmp_phy_cfg sm8250_usb3_uniphy_cfg = { .regs = qmp_v4_usb3phy_regs_layout, .pcs_usb_offset = 0x600, - .start_ctrl = SERDES_START | PCS_START, - .pwrdn_ctrl = SW_PWRDN, - .phy_status = PHYSTATUS, - .has_pwrdn_delay = true, - .pwrdn_delay_min = POWER_DOWN_DELAY_US_MIN, - .pwrdn_delay_max = POWER_DOWN_DELAY_US_MAX, }; static const struct qmp_phy_cfg sdx55_usb3_uniphy_cfg = { @@ -1938,13 +1844,7 @@ static const struct qmp_phy_cfg sdx55_usb3_uniphy_cfg = { .regs = qmp_v4_usb3phy_regs_layout, .pcs_usb_offset = 0x600, - .start_ctrl = SERDES_START | PCS_START, - .pwrdn_ctrl = SW_PWRDN, - .phy_status = PHYSTATUS, - .has_pwrdn_delay = true, - .pwrdn_delay_min = POWER_DOWN_DELAY_US_MIN, - .pwrdn_delay_max = POWER_DOWN_DELAY_US_MAX, }; static const struct qmp_phy_cfg sdx65_usb3_uniphy_cfg = { @@ -1969,13 +1869,7 @@ static const struct qmp_phy_cfg sdx65_usb3_uniphy_cfg = { .regs = qmp_v4_usb3phy_regs_layout, .pcs_usb_offset = 0x1000, - .start_ctrl = SERDES_START | PCS_START, - .pwrdn_ctrl = SW_PWRDN, - .phy_status = PHYSTATUS, - .has_pwrdn_delay = true, - .pwrdn_delay_min = POWER_DOWN_DELAY_US_MIN, - .pwrdn_delay_max = POWER_DOWN_DELAY_US_MAX, }; static const struct qmp_phy_cfg sm8350_usb3phy_cfg = { @@ -2000,14 +1894,7 @@ static const struct qmp_phy_cfg sm8350_usb3phy_cfg = { .regs = qmp_v4_usb3phy_regs_layout, .pcs_usb_offset = 0x300, - .start_ctrl = SERDES_START | PCS_START, - .pwrdn_ctrl = SW_PWRDN, - .phy_status = PHYSTATUS, - .has_pwrdn_delay = true, - .pwrdn_delay_min = POWER_DOWN_DELAY_US_MIN, - .pwrdn_delay_max = POWER_DOWN_DELAY_US_MAX, - .has_phy_dp_com_ctrl = true, }; @@ -2024,8 +1911,8 @@ static const struct qmp_phy_cfg sm8350_usb3_uniphy_cfg = { .pcs_tbl_num = ARRAY_SIZE(sm8350_usb3_uniphy_pcs_tbl), .pcs_usb_tbl = sm8350_usb3_uniphy_pcs_usb_tbl, .pcs_usb_tbl_num = ARRAY_SIZE(sm8350_usb3_uniphy_pcs_usb_tbl), - .clk_list = qmp_v4_phy_clk_l, - .num_clks = ARRAY_SIZE(qmp_v4_phy_clk_l), + .clk_list = qmp_v4_ref_phy_clk_l, + .num_clks = ARRAY_SIZE(qmp_v4_ref_phy_clk_l), .reset_list = msm8996_usb3phy_reset_l, .num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l), .vreg_list = qmp_phy_vreg_l, @@ -2033,13 +1920,7 @@ static const struct qmp_phy_cfg sm8350_usb3_uniphy_cfg = { .regs = qmp_v4_usb3phy_regs_layout, .pcs_usb_offset = 0x1000, - .start_ctrl = SERDES_START | PCS_START, - .pwrdn_ctrl = SW_PWRDN, - .phy_status = PHYSTATUS, - .has_pwrdn_delay = true, - .pwrdn_delay_min = POWER_DOWN_DELAY_US_MIN, - .pwrdn_delay_max = POWER_DOWN_DELAY_US_MAX, }; static const struct qmp_phy_cfg qcm2290_usb3phy_cfg = { @@ -2060,14 +1941,9 @@ static const struct qmp_phy_cfg qcm2290_usb3phy_cfg = { .vreg_list = qmp_phy_vreg_l, .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l), .regs = qcm2290_usb3phy_regs_layout, - - .start_ctrl = SERDES_START | PCS_START, - .pwrdn_ctrl = SW_PWRDN, - .phy_status = PHYSTATUS, }; static void qmp_usb_configure_lane(void __iomem *base, - const unsigned int *regs, const struct qmp_phy_init_tbl tbl[], int num, u8 lane_mask) @@ -2082,43 +1958,37 @@ static void qmp_usb_configure_lane(void __iomem *base, if (!(t->lane_mask & lane_mask)) continue; - if (t->in_layout) - writel(t->val, base + regs[t->offset]); - else - writel(t->val, base + t->offset); + writel(t->val, base + t->offset); } } static void qmp_usb_configure(void __iomem *base, - const unsigned int *regs, const struct qmp_phy_init_tbl tbl[], int num) { - qmp_usb_configure_lane(base, regs, tbl, num, 0xff); + qmp_usb_configure_lane(base, tbl, num, 0xff); } -static int qmp_usb_serdes_init(struct qmp_phy *qphy) +static int qmp_usb_serdes_init(struct qmp_usb *qmp) { - const struct qmp_phy_cfg *cfg = qphy->cfg; - void __iomem *serdes = qphy->serdes; + const struct qmp_phy_cfg *cfg = qmp->cfg; + void __iomem *serdes = qmp->serdes; const struct qmp_phy_init_tbl *serdes_tbl = cfg->serdes_tbl; int serdes_tbl_num = cfg->serdes_tbl_num; - qmp_usb_configure(serdes, cfg->regs, serdes_tbl, serdes_tbl_num); + qmp_usb_configure(serdes, serdes_tbl, serdes_tbl_num); return 0; } static int qmp_usb_init(struct phy *phy) { - struct qmp_phy *qphy = phy_get_drvdata(phy); - struct qcom_qmp *qmp = qphy->qmp; - const struct qmp_phy_cfg *cfg = qphy->cfg; - void __iomem *pcs = qphy->pcs; + struct qmp_usb *qmp = phy_get_drvdata(phy); + const struct qmp_phy_cfg *cfg = qmp->cfg; + void __iomem *pcs = qmp->pcs; void __iomem *dp_com = qmp->dp_com; int ret; - /* turn on regulator supplies */ ret = regulator_bulk_enable(cfg->num_vregs, qmp->vregs); if (ret) { dev_err(qmp->dev, "failed to enable regulators, err=%d\n", ret); @@ -2164,13 +2034,7 @@ static int qmp_usb_init(struct phy *phy) qphy_clrbits(dp_com, QPHY_V3_DP_COM_SW_RESET, SW_RESET); } - if (cfg->regs[QPHY_PCS_POWER_DOWN_CONTROL]) - qphy_setbits(pcs, - cfg->regs[QPHY_PCS_POWER_DOWN_CONTROL], - cfg->pwrdn_ctrl); - else - qphy_setbits(pcs, QPHY_V2_PCS_POWER_DOWN_CONTROL, - cfg->pwrdn_ctrl); + qphy_setbits(pcs, cfg->regs[QPHY_PCS_POWER_DOWN_CONTROL], SW_PWRDN); return 0; @@ -2184,9 +2048,8 @@ err_disable_regulators: static int qmp_usb_exit(struct phy *phy) { - struct qmp_phy *qphy = phy_get_drvdata(phy); - struct qcom_qmp *qmp = qphy->qmp; - const struct qmp_phy_cfg *cfg = qphy->cfg; + struct qmp_usb *qmp = phy_get_drvdata(phy); + const struct qmp_phy_cfg *cfg = qmp->cfg; reset_control_bulk_assert(cfg->num_resets, qmp->resets); @@ -2199,56 +2062,45 @@ static int qmp_usb_exit(struct phy *phy) static int qmp_usb_power_on(struct phy *phy) { - struct qmp_phy *qphy = phy_get_drvdata(phy); - struct qcom_qmp *qmp = qphy->qmp; - const struct qmp_phy_cfg *cfg = qphy->cfg; - void __iomem *tx = qphy->tx; - void __iomem *rx = qphy->rx; - void __iomem *pcs = qphy->pcs; + struct qmp_usb *qmp = phy_get_drvdata(phy); + const struct qmp_phy_cfg *cfg = qmp->cfg; + void __iomem *tx = qmp->tx; + void __iomem *rx = qmp->rx; + void __iomem *pcs = qmp->pcs; void __iomem *status; - unsigned int mask, val, ready; + unsigned int val; int ret; - qmp_usb_serdes_init(qphy); + qmp_usb_serdes_init(qmp); - ret = clk_prepare_enable(qphy->pipe_clk); + ret = clk_prepare_enable(qmp->pipe_clk); if (ret) { dev_err(qmp->dev, "pipe_clk enable failed err=%d\n", ret); return ret; } /* Tx, Rx, and PCS configurations */ - qmp_usb_configure_lane(tx, cfg->regs, cfg->tx_tbl, cfg->tx_tbl_num, 1); - - if (cfg->lanes >= 2) { - qmp_usb_configure_lane(qphy->tx2, cfg->regs, - cfg->tx_tbl, cfg->tx_tbl_num, 2); - } - - qmp_usb_configure_lane(rx, cfg->regs, cfg->rx_tbl, cfg->rx_tbl_num, 1); + qmp_usb_configure_lane(tx, cfg->tx_tbl, cfg->tx_tbl_num, 1); + qmp_usb_configure_lane(rx, cfg->rx_tbl, cfg->rx_tbl_num, 1); if (cfg->lanes >= 2) { - qmp_usb_configure_lane(qphy->rx2, cfg->regs, - cfg->rx_tbl, cfg->rx_tbl_num, 2); + qmp_usb_configure_lane(qmp->tx2, cfg->tx_tbl, cfg->tx_tbl_num, 2); + qmp_usb_configure_lane(qmp->rx2, cfg->rx_tbl, cfg->rx_tbl_num, 2); } - /* Configure link rate, swing, etc. */ - qmp_usb_configure(pcs, cfg->regs, cfg->pcs_tbl, cfg->pcs_tbl_num); + qmp_usb_configure(pcs, cfg->pcs_tbl, cfg->pcs_tbl_num); if (cfg->has_pwrdn_delay) - usleep_range(cfg->pwrdn_delay_min, cfg->pwrdn_delay_max); + usleep_range(10, 20); /* Pull PHY out of reset state */ qphy_clrbits(pcs, cfg->regs[QPHY_SW_RESET], SW_RESET); /* start SerDes and Phy-Coding-Sublayer */ - qphy_setbits(pcs, cfg->regs[QPHY_START_CTRL], cfg->start_ctrl); + qphy_setbits(pcs, cfg->regs[QPHY_START_CTRL], SERDES_START | PCS_START); status = pcs + cfg->regs[QPHY_PCS_STATUS]; - mask = cfg->phy_status; - ready = 0; - - ret = readl_poll_timeout(status, val, (val & mask) == ready, 10, + ret = readl_poll_timeout(status, val, !(val & PHYSTATUS), 200, PHY_INIT_COMPLETE_TIMEOUT); if (ret) { dev_err(qmp->dev, "phy initialization timed-out\n"); @@ -2258,32 +2110,28 @@ static int qmp_usb_power_on(struct phy *phy) return 0; err_disable_pipe_clk: - clk_disable_unprepare(qphy->pipe_clk); + clk_disable_unprepare(qmp->pipe_clk); return ret; } static int qmp_usb_power_off(struct phy *phy) { - struct qmp_phy *qphy = phy_get_drvdata(phy); - const struct qmp_phy_cfg *cfg = qphy->cfg; + struct qmp_usb *qmp = phy_get_drvdata(phy); + const struct qmp_phy_cfg *cfg = qmp->cfg; - clk_disable_unprepare(qphy->pipe_clk); + clk_disable_unprepare(qmp->pipe_clk); /* PHY reset */ - qphy_setbits(qphy->pcs, cfg->regs[QPHY_SW_RESET], SW_RESET); + qphy_setbits(qmp->pcs, cfg->regs[QPHY_SW_RESET], SW_RESET); /* stop SerDes and Phy-Coding-Sublayer */ - qphy_clrbits(qphy->pcs, cfg->regs[QPHY_START_CTRL], cfg->start_ctrl); + qphy_clrbits(qmp->pcs, cfg->regs[QPHY_START_CTRL], + SERDES_START | PCS_START); /* Put PHY into POWER DOWN state: active low */ - if (cfg->regs[QPHY_PCS_POWER_DOWN_CONTROL]) { - qphy_clrbits(qphy->pcs, cfg->regs[QPHY_PCS_POWER_DOWN_CONTROL], - cfg->pwrdn_ctrl); - } else { - qphy_clrbits(qphy->pcs, QPHY_V2_PCS_POWER_DOWN_CONTROL, - cfg->pwrdn_ctrl); - } + qphy_clrbits(qmp->pcs, cfg->regs[QPHY_PCS_POWER_DOWN_CONTROL], + SW_PWRDN); return 0; } @@ -2315,22 +2163,29 @@ static int qmp_usb_disable(struct phy *phy) static int qmp_usb_set_mode(struct phy *phy, enum phy_mode mode, int submode) { - struct qmp_phy *qphy = phy_get_drvdata(phy); + struct qmp_usb *qmp = phy_get_drvdata(phy); - qphy->mode = mode; + qmp->mode = mode; return 0; } -static void qmp_usb_enable_autonomous_mode(struct qmp_phy *qphy) +static const struct phy_ops qmp_usb_phy_ops = { + .init = qmp_usb_enable, + .exit = qmp_usb_disable, + .set_mode = qmp_usb_set_mode, + .owner = THIS_MODULE, +}; + +static void qmp_usb_enable_autonomous_mode(struct qmp_usb *qmp) { - const struct qmp_phy_cfg *cfg = qphy->cfg; - void __iomem *pcs_usb = qphy->pcs_usb ?: qphy->pcs; - void __iomem *pcs_misc = qphy->pcs_misc; + const struct qmp_phy_cfg *cfg = qmp->cfg; + void __iomem *pcs_usb = qmp->pcs_usb ?: qmp->pcs; + void __iomem *pcs_misc = qmp->pcs_misc; u32 intr_mask; - if (qphy->mode == PHY_MODE_USB_HOST_SS || - qphy->mode == PHY_MODE_USB_DEVICE_SS) + if (qmp->mode == PHY_MODE_USB_HOST_SS || + qmp->mode == PHY_MODE_USB_DEVICE_SS) intr_mask = ARCVR_DTCT_EN | ALFPS_DTCT_EN; else intr_mask = ARCVR_DTCT_EN | ARCVR_DTCT_EVENT_SEL; @@ -2351,11 +2206,11 @@ static void qmp_usb_enable_autonomous_mode(struct qmp_phy *qphy) qphy_clrbits(pcs_misc, QPHY_V3_PCS_MISC_CLAMP_ENABLE, CLAMP_EN); } -static void qmp_usb_disable_autonomous_mode(struct qmp_phy *qphy) +static void qmp_usb_disable_autonomous_mode(struct qmp_usb *qmp) { - const struct qmp_phy_cfg *cfg = qphy->cfg; - void __iomem *pcs_usb = qphy->pcs_usb ?: qphy->pcs; - void __iomem *pcs_misc = qphy->pcs_misc; + const struct qmp_phy_cfg *cfg = qmp->cfg; + void __iomem *pcs_usb = qmp->pcs_usb ?: qmp->pcs; + void __iomem *pcs_misc = qmp->pcs_misc; /* Disable i/o clamp_n on resume for normal mode */ if (pcs_misc) @@ -2371,20 +2226,19 @@ static void qmp_usb_disable_autonomous_mode(struct qmp_phy *qphy) static int __maybe_unused qmp_usb_runtime_suspend(struct device *dev) { - struct qcom_qmp *qmp = dev_get_drvdata(dev); - struct qmp_phy *qphy = qmp->phys[0]; - const struct qmp_phy_cfg *cfg = qphy->cfg; + struct qmp_usb *qmp = dev_get_drvdata(dev); + const struct qmp_phy_cfg *cfg = qmp->cfg; - dev_vdbg(dev, "Suspending QMP phy, mode:%d\n", qphy->mode); + dev_vdbg(dev, "Suspending QMP phy, mode:%d\n", qmp->mode); - if (!qphy->phy->init_count) { + if (!qmp->phy->init_count) { dev_vdbg(dev, "PHY not initialized, bailing out\n"); return 0; } - qmp_usb_enable_autonomous_mode(qphy); + qmp_usb_enable_autonomous_mode(qmp); - clk_disable_unprepare(qphy->pipe_clk); + clk_disable_unprepare(qmp->pipe_clk); clk_bulk_disable_unprepare(cfg->num_clks, qmp->clks); return 0; @@ -2392,14 +2246,13 @@ static int __maybe_unused qmp_usb_runtime_suspend(struct device *dev) static int __maybe_unused qmp_usb_runtime_resume(struct device *dev) { - struct qcom_qmp *qmp = dev_get_drvdata(dev); - struct qmp_phy *qphy = qmp->phys[0]; - const struct qmp_phy_cfg *cfg = qphy->cfg; + struct qmp_usb *qmp = dev_get_drvdata(dev); + const struct qmp_phy_cfg *cfg = qmp->cfg; int ret = 0; - dev_vdbg(dev, "Resuming QMP phy, mode:%d\n", qphy->mode); + dev_vdbg(dev, "Resuming QMP phy, mode:%d\n", qmp->mode); - if (!qphy->phy->init_count) { + if (!qmp->phy->init_count) { dev_vdbg(dev, "PHY not initialized, bailing out\n"); return 0; } @@ -2408,21 +2261,27 @@ static int __maybe_unused qmp_usb_runtime_resume(struct device *dev) if (ret) return ret; - ret = clk_prepare_enable(qphy->pipe_clk); + ret = clk_prepare_enable(qmp->pipe_clk); if (ret) { dev_err(dev, "pipe_clk enable failed, err=%d\n", ret); clk_bulk_disable_unprepare(cfg->num_clks, qmp->clks); return ret; } - qmp_usb_disable_autonomous_mode(qphy); + qmp_usb_disable_autonomous_mode(qmp); return 0; } -static int qmp_usb_vreg_init(struct device *dev, const struct qmp_phy_cfg *cfg) +static const struct dev_pm_ops qmp_usb_pm_ops = { + SET_RUNTIME_PM_OPS(qmp_usb_runtime_suspend, + qmp_usb_runtime_resume, NULL) +}; + +static int qmp_usb_vreg_init(struct qmp_usb *qmp) { - struct qcom_qmp *qmp = dev_get_drvdata(dev); + const struct qmp_phy_cfg *cfg = qmp->cfg; + struct device *dev = qmp->dev; int num = cfg->num_vregs; int i; @@ -2436,9 +2295,10 @@ static int qmp_usb_vreg_init(struct device *dev, const struct qmp_phy_cfg *cfg) return devm_regulator_bulk_get(dev, num, qmp->vregs); } -static int qmp_usb_reset_init(struct device *dev, const struct qmp_phy_cfg *cfg) +static int qmp_usb_reset_init(struct qmp_usb *qmp) { - struct qcom_qmp *qmp = dev_get_drvdata(dev); + const struct qmp_phy_cfg *cfg = qmp->cfg; + struct device *dev = qmp->dev; int i; int ret; @@ -2457,9 +2317,10 @@ static int qmp_usb_reset_init(struct device *dev, const struct qmp_phy_cfg *cfg) return 0; } -static int qmp_usb_clk_init(struct device *dev, const struct qmp_phy_cfg *cfg) +static int qmp_usb_clk_init(struct qmp_usb *qmp) { - struct qcom_qmp *qmp = dev_get_drvdata(dev); + const struct qmp_phy_cfg *cfg = qmp->cfg; + struct device *dev = qmp->dev; int num = cfg->num_clks; int i; @@ -2496,9 +2357,9 @@ static void phy_clk_release_provider(void *res) * clk | +-------+ | +-----+ * +---------------+ */ -static int phy_pipe_clk_register(struct qcom_qmp *qmp, struct device_node *np) +static int phy_pipe_clk_register(struct qmp_usb *qmp, struct device_node *np) { - struct clk_fixed_rate *fixed; + struct clk_fixed_rate *fixed = &qmp->pipe_clk_fixed; struct clk_init_data init = { }; int ret; @@ -2508,10 +2369,6 @@ static int phy_pipe_clk_register(struct qcom_qmp *qmp, struct device_node *np) return ret; } - fixed = devm_kzalloc(qmp->dev, sizeof(*fixed), GFP_KERNEL); - if (!fixed) - return -ENOMEM; - init.ops = &clk_fixed_rate_ops; /* controllers using QMP phys use 125MHz pipe clock interface */ @@ -2533,13 +2390,6 @@ static int phy_pipe_clk_register(struct qcom_qmp *qmp, struct device_node *np) return devm_add_action_or_reset(qmp->dev, phy_clk_release_provider, np); } -static const struct phy_ops qmp_usb_ops = { - .init = qmp_usb_enable, - .exit = qmp_usb_disable, - .set_mode = qmp_usb_set_mode, - .owner = THIS_MODULE, -}; - static void __iomem *qmp_usb_iomap(struct device *dev, struct device_node *np, int index, bool exclusive) { @@ -2555,15 +2405,22 @@ static void __iomem *qmp_usb_iomap(struct device *dev, struct device_node *np, return devm_of_iomap(dev, np, index, NULL); } -static -int qmp_usb_create(struct device *dev, struct device_node *np, int id, - void __iomem *serdes, const struct qmp_phy_cfg *cfg) +static int qmp_usb_parse_dt_legacy(struct qmp_usb *qmp, struct device_node *np) { - struct qcom_qmp *qmp = dev_get_drvdata(dev); - struct phy *generic_phy; - struct qmp_phy *qphy; + struct platform_device *pdev = to_platform_device(qmp->dev); + const struct qmp_phy_cfg *cfg = qmp->cfg; + struct device *dev = qmp->dev; bool exclusive = true; - int ret; + + qmp->serdes = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(qmp->serdes)) + return PTR_ERR(qmp->serdes); + + if (cfg->has_phy_dp_com_ctrl) { + qmp->dp_com = devm_platform_ioremap_resource(pdev, 1); + if (IS_ERR(qmp->dp_com)) + return PTR_ERR(qmp->dp_com); + } /* * FIXME: These bindings should be fixed to not rely on overlapping @@ -2574,83 +2431,176 @@ int qmp_usb_create(struct device *dev, struct device_node *np, int id, if (of_device_is_compatible(dev->of_node, "qcom,sm8350-qmp-usb3-uni-phy")) exclusive = false; - qphy = devm_kzalloc(dev, sizeof(*qphy), GFP_KERNEL); - if (!qphy) - return -ENOMEM; - - qphy->cfg = cfg; - qphy->serdes = serdes; /* - * Get memory resources for each phy lane: + * Get memory resources for the PHY: * Resources are indexed as: tx -> 0; rx -> 1; pcs -> 2. * For dual lane PHYs: tx2 -> 3, rx2 -> 4, pcs_misc (optional) -> 5 * For single lane PHYs: pcs_misc (optional) -> 3. */ - qphy->tx = devm_of_iomap(dev, np, 0, NULL); - if (IS_ERR(qphy->tx)) - return PTR_ERR(qphy->tx); + qmp->tx = devm_of_iomap(dev, np, 0, NULL); + if (IS_ERR(qmp->tx)) + return PTR_ERR(qmp->tx); - qphy->rx = devm_of_iomap(dev, np, 1, NULL); - if (IS_ERR(qphy->rx)) - return PTR_ERR(qphy->rx); + qmp->rx = devm_of_iomap(dev, np, 1, NULL); + if (IS_ERR(qmp->rx)) + return PTR_ERR(qmp->rx); - qphy->pcs = qmp_usb_iomap(dev, np, 2, exclusive); - if (IS_ERR(qphy->pcs)) - return PTR_ERR(qphy->pcs); + qmp->pcs = qmp_usb_iomap(dev, np, 2, exclusive); + if (IS_ERR(qmp->pcs)) + return PTR_ERR(qmp->pcs); if (cfg->pcs_usb_offset) - qphy->pcs_usb = qphy->pcs + cfg->pcs_usb_offset; + qmp->pcs_usb = qmp->pcs + cfg->pcs_usb_offset; if (cfg->lanes >= 2) { - qphy->tx2 = devm_of_iomap(dev, np, 3, NULL); - if (IS_ERR(qphy->tx2)) - return PTR_ERR(qphy->tx2); + qmp->tx2 = devm_of_iomap(dev, np, 3, NULL); + if (IS_ERR(qmp->tx2)) + return PTR_ERR(qmp->tx2); - qphy->rx2 = devm_of_iomap(dev, np, 4, NULL); - if (IS_ERR(qphy->rx2)) - return PTR_ERR(qphy->rx2); + qmp->rx2 = devm_of_iomap(dev, np, 4, NULL); + if (IS_ERR(qmp->rx2)) + return PTR_ERR(qmp->rx2); - qphy->pcs_misc = devm_of_iomap(dev, np, 5, NULL); + qmp->pcs_misc = devm_of_iomap(dev, np, 5, NULL); } else { - qphy->pcs_misc = devm_of_iomap(dev, np, 3, NULL); + qmp->pcs_misc = devm_of_iomap(dev, np, 3, NULL); } - if (IS_ERR(qphy->pcs_misc)) { + if (IS_ERR(qmp->pcs_misc)) { dev_vdbg(dev, "PHY pcs_misc-reg not used\n"); - qphy->pcs_misc = NULL; + qmp->pcs_misc = NULL; } - qphy->pipe_clk = devm_get_clk_from_child(dev, np, NULL); - if (IS_ERR(qphy->pipe_clk)) { - return dev_err_probe(dev, PTR_ERR(qphy->pipe_clk), - "failed to get lane%d pipe clock\n", id); + qmp->pipe_clk = devm_get_clk_from_child(dev, np, NULL); + if (IS_ERR(qmp->pipe_clk)) { + return dev_err_probe(dev, PTR_ERR(qmp->pipe_clk), + "failed to get pipe clock\n"); + } + + return 0; +} + +static int qmp_usb_parse_dt(struct qmp_usb *qmp) +{ + struct platform_device *pdev = to_platform_device(qmp->dev); + const struct qmp_phy_cfg *cfg = qmp->cfg; + const struct qmp_usb_offsets *offs = cfg->offsets; + struct device *dev = qmp->dev; + void __iomem *base; + + if (!offs) + return -EINVAL; + + base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(base)) + return PTR_ERR(base); + + qmp->serdes = base + offs->serdes; + qmp->pcs = base + offs->pcs; + qmp->pcs_usb = base + offs->pcs_usb; + qmp->tx = base + offs->tx; + qmp->rx = base + offs->rx; + + qmp->pipe_clk = devm_clk_get(dev, "pipe"); + if (IS_ERR(qmp->pipe_clk)) { + return dev_err_probe(dev, PTR_ERR(qmp->pipe_clk), + "failed to get pipe clock\n"); } - generic_phy = devm_phy_create(dev, np, &qmp_usb_ops); - if (IS_ERR(generic_phy)) { - ret = PTR_ERR(generic_phy); - dev_err(dev, "failed to create qphy %d\n", ret); + return 0; +} + +static int qmp_usb_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct phy_provider *phy_provider; + struct device_node *np; + struct qmp_usb *qmp; + int ret; + + qmp = devm_kzalloc(dev, sizeof(*qmp), GFP_KERNEL); + if (!qmp) + return -ENOMEM; + + qmp->dev = dev; + + qmp->cfg = of_device_get_match_data(dev); + if (!qmp->cfg) + return -EINVAL; + + ret = qmp_usb_clk_init(qmp); + if (ret) return ret; + + ret = qmp_usb_reset_init(qmp); + if (ret) + return ret; + + ret = qmp_usb_vreg_init(qmp); + if (ret) + return ret; + + /* Check for legacy binding with child node. */ + np = of_get_next_available_child(dev->of_node, NULL); + if (np) { + ret = qmp_usb_parse_dt_legacy(qmp, np); + } else { + np = of_node_get(dev->of_node); + ret = qmp_usb_parse_dt(qmp); + } + if (ret) + goto err_node_put; + + pm_runtime_set_active(dev); + ret = devm_pm_runtime_enable(dev); + if (ret) + goto err_node_put; + /* + * Prevent runtime pm from being ON by default. Users can enable + * it using power/control in sysfs. + */ + pm_runtime_forbid(dev); + + ret = phy_pipe_clk_register(qmp, np); + if (ret) + goto err_node_put; + + qmp->phy = devm_phy_create(dev, np, &qmp_usb_phy_ops); + if (IS_ERR(qmp->phy)) { + ret = PTR_ERR(qmp->phy); + dev_err(dev, "failed to create PHY: %d\n", ret); + goto err_node_put; } - qphy->phy = generic_phy; - qphy->qmp = qmp; - qmp->phys[id] = qphy; - phy_set_drvdata(generic_phy, qphy); + phy_set_drvdata(qmp->phy, qmp); - return 0; + of_node_put(np); + + phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate); + + return PTR_ERR_OR_ZERO(phy_provider); + +err_node_put: + of_node_put(np); + return ret; } static const struct of_device_id qmp_usb_of_match_table[] = { { + .compatible = "qcom,ipq6018-qmp-usb3-phy", + .data = &ipq8074_usb3phy_cfg, + }, { .compatible = "qcom,ipq8074-qmp-usb3-phy", .data = &ipq8074_usb3phy_cfg, }, { .compatible = "qcom,msm8996-qmp-usb3-phy", .data = &msm8996_usb3phy_cfg, }, { - .compatible = "qcom,ipq6018-qmp-usb3-phy", - .data = &ipq8074_usb3phy_cfg, + .compatible = "qcom,msm8998-qmp-usb3-phy", + .data = &msm8998_usb3phy_cfg, + }, { + .compatible = "qcom,qcm2290-qmp-usb3-phy", + .data = &qcm2290_usb3phy_cfg, }, { .compatible = "qcom,sc7180-qmp-usb3-phy", .data = &sc7180_usb3phy_cfg, @@ -2667,8 +2617,11 @@ static const struct of_device_id qmp_usb_of_match_table[] = { .compatible = "qcom,sdm845-qmp-usb3-uni-phy", .data = &qmp_v3_usb3_uniphy_cfg, }, { - .compatible = "qcom,msm8998-qmp-usb3-phy", - .data = &msm8998_usb3phy_cfg, + .compatible = "qcom,sdx55-qmp-usb3-uni-phy", + .data = &sdx55_usb3_uniphy_cfg, + }, { + .compatible = "qcom,sdx65-qmp-usb3-uni-phy", + .data = &sdx65_usb3_uniphy_cfg, }, { .compatible = "qcom,sm8150-qmp-usb3-phy", .data = &sm8150_usb3phy_cfg, @@ -2681,12 +2634,6 @@ static const struct of_device_id qmp_usb_of_match_table[] = { }, { .compatible = "qcom,sm8250-qmp-usb3-uni-phy", .data = &sm8250_usb3_uniphy_cfg, - }, { - .compatible = "qcom,sdx55-qmp-usb3-uni-phy", - .data = &sdx55_usb3_uniphy_cfg, - }, { - .compatible = "qcom,sdx65-qmp-usb3-uni-phy", - .data = &sdx65_usb3_uniphy_cfg, }, { .compatible = "qcom,sm8350-qmp-usb3-phy", .data = &sm8350_usb3phy_cfg, @@ -2696,119 +2643,11 @@ static const struct of_device_id qmp_usb_of_match_table[] = { }, { .compatible = "qcom,sm8450-qmp-usb3-phy", .data = &sm8350_usb3phy_cfg, - }, { - .compatible = "qcom,qcm2290-qmp-usb3-phy", - .data = &qcm2290_usb3phy_cfg, }, { }, }; MODULE_DEVICE_TABLE(of, qmp_usb_of_match_table); -static const struct dev_pm_ops qmp_usb_pm_ops = { - SET_RUNTIME_PM_OPS(qmp_usb_runtime_suspend, - qmp_usb_runtime_resume, NULL) -}; - -static int qmp_usb_probe(struct platform_device *pdev) -{ - struct qcom_qmp *qmp; - struct device *dev = &pdev->dev; - struct device_node *child; - struct phy_provider *phy_provider; - void __iomem *serdes; - const struct qmp_phy_cfg *cfg = NULL; - int num, id; - int ret; - - qmp = devm_kzalloc(dev, sizeof(*qmp), GFP_KERNEL); - if (!qmp) - return -ENOMEM; - - qmp->dev = dev; - dev_set_drvdata(dev, qmp); - - /* Get the specific init parameters of QMP phy */ - cfg = of_device_get_match_data(dev); - if (!cfg) - return -EINVAL; - - /* per PHY serdes; usually located at base address */ - serdes = devm_platform_ioremap_resource(pdev, 0); - if (IS_ERR(serdes)) - return PTR_ERR(serdes); - - /* per PHY dp_com; if PHY has dp_com control block */ - if (cfg->has_phy_dp_com_ctrl) { - qmp->dp_com = devm_platform_ioremap_resource(pdev, 1); - if (IS_ERR(qmp->dp_com)) - return PTR_ERR(qmp->dp_com); - } - - ret = qmp_usb_clk_init(dev, cfg); - if (ret) - return ret; - - ret = qmp_usb_reset_init(dev, cfg); - if (ret) - return ret; - - ret = qmp_usb_vreg_init(dev, cfg); - if (ret) - return dev_err_probe(dev, ret, - "failed to get regulator supplies\n"); - - num = of_get_available_child_count(dev->of_node); - /* do we have a rogue child node ? */ - if (num > 1) - return -EINVAL; - - qmp->phys = devm_kcalloc(dev, num, sizeof(*qmp->phys), GFP_KERNEL); - if (!qmp->phys) - return -ENOMEM; - - pm_runtime_set_active(dev); - ret = devm_pm_runtime_enable(dev); - if (ret) - return ret; - /* - * Prevent runtime pm from being ON by default. Users can enable - * it using power/control in sysfs. - */ - pm_runtime_forbid(dev); - - id = 0; - for_each_available_child_of_node(dev->of_node, child) { - /* Create per-lane phy */ - ret = qmp_usb_create(dev, child, id, serdes, cfg); - if (ret) { - dev_err(dev, "failed to create lane%d phy, %d\n", - id, ret); - goto err_node_put; - } - - /* - * Register the pipe clock provided by phy. - * See function description to see details of this pipe clock. - */ - ret = phy_pipe_clk_register(qmp, child); - if (ret) { - dev_err(qmp->dev, - "failed to register pipe clock source\n"); - goto err_node_put; - } - - id++; - } - - phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate); - - return PTR_ERR_OR_ZERO(phy_provider); - -err_node_put: - of_node_put(child); - return ret; -} - static struct platform_driver qmp_usb_driver = { .probe = qmp_usb_probe, .driver = { diff --git a/drivers/phy/qualcomm/phy-qcom-qmp.h b/drivers/phy/qualcomm/phy-qcom-qmp.h index 26274e3c0cf9512615f71b217ecce49147602342..29a48f0436d2aa9fa4f2eade4fb729d2775f8b4d 100644 --- a/drivers/phy/qualcomm/phy-qcom-qmp.h +++ b/drivers/phy/qualcomm/phy-qcom-qmp.h @@ -38,6 +38,7 @@ #include "phy-qcom-qmp-pcs-pcie-v4_20.h" #include "phy-qcom-qmp-pcs-v5.h" +#include "phy-qcom-qmp-pcs-v5_20.h" #include "phy-qcom-qmp-pcs-pcie-v5.h" #include "phy-qcom-qmp-pcs-usb-v5.h" #include "phy-qcom-qmp-pcs-ufs-v5.h" diff --git a/drivers/phy/renesas/Kconfig b/drivers/phy/renesas/Kconfig index 111bdcae775c8ddf4e6185479fa694d56fdfbf55..36505fc5f386e2ca17b3efa8b30c1eb554b3e9a7 100644 --- a/drivers/phy/renesas/Kconfig +++ b/drivers/phy/renesas/Kconfig @@ -2,6 +2,14 @@ # # Phy drivers for Renesas platforms # +# NOTE: Please sorted config names alphabetically. +config PHY_R8A779F0_ETHERNET_SERDES + tristate "Renesas R-Car S4-8 Ethernet SERDES driver" + depends on ARCH_RENESAS || COMPILE_TEST + select GENERIC_PHY + help + Support for Ethernet SERDES found on Renesas R-Car S4-8 SoCs. + config PHY_RCAR_GEN2 tristate "Renesas R-Car generation 2 USB PHY driver" depends on ARCH_RENESAS diff --git a/drivers/phy/renesas/Makefile b/drivers/phy/renesas/Makefile index b599ff8a4349cff092fa37ae15968ec72ab33be7..8896d1919faa6ec11ae6c1e6bf65960cb948914f 100644 --- a/drivers/phy/renesas/Makefile +++ b/drivers/phy/renesas/Makefile @@ -1,4 +1,5 @@ # SPDX-License-Identifier: GPL-2.0 +obj-$(CONFIG_PHY_R8A779F0_ETHERNET_SERDES) += r8a779f0-ether-serdes.o obj-$(CONFIG_PHY_RCAR_GEN2) += phy-rcar-gen2.o obj-$(CONFIG_PHY_RCAR_GEN3_PCIE) += phy-rcar-gen3-pcie.o obj-$(CONFIG_PHY_RCAR_GEN3_USB2) += phy-rcar-gen3-usb2.o diff --git a/drivers/phy/renesas/r8a779f0-ether-serdes.c b/drivers/phy/renesas/r8a779f0-ether-serdes.c new file mode 100644 index 0000000000000000000000000000000000000000..ec6594e6dc275bf1c7fac5888736ee5263b0be91 --- /dev/null +++ b/drivers/phy/renesas/r8a779f0-ether-serdes.c @@ -0,0 +1,417 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Renesas Ethernet SERDES device driver + * + * Copyright (C) 2022 Renesas Electronics Corporation + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#define R8A779F0_ETH_SERDES_NUM 3 +#define R8A779F0_ETH_SERDES_OFFSET 0x0400 +#define R8A779F0_ETH_SERDES_BANK_SELECT 0x03fc +#define R8A779F0_ETH_SERDES_TIMEOUT_US 100000 +#define R8A779F0_ETH_SERDES_NUM_RETRY_LINKUP 3 +#define R8A779F0_ETH_SERDES_NUM_RETRY_INIT 3 + +struct r8a779f0_eth_serdes_drv_data; +struct r8a779f0_eth_serdes_channel { + struct r8a779f0_eth_serdes_drv_data *dd; + struct phy *phy; + void __iomem *addr; + phy_interface_t phy_interface; + int speed; + int index; +}; + +struct r8a779f0_eth_serdes_drv_data { + void __iomem *addr; + struct platform_device *pdev; + struct reset_control *reset; + struct r8a779f0_eth_serdes_channel channel[R8A779F0_ETH_SERDES_NUM]; + bool initialized; +}; + +/* + * The datasheet describes initialization procedure without any information + * about registers' name/bits. So, this is all black magic to initialize + * the hardware. + */ +static void r8a779f0_eth_serdes_write32(void __iomem *addr, u32 offs, u32 bank, u32 data) +{ + iowrite32(bank, addr + R8A779F0_ETH_SERDES_BANK_SELECT); + iowrite32(data, addr + offs); +} + +static int +r8a779f0_eth_serdes_reg_wait(struct r8a779f0_eth_serdes_channel *channel, + u32 offs, u32 bank, u32 mask, u32 expected) +{ + int ret; + u32 val; + + iowrite32(bank, channel->addr + R8A779F0_ETH_SERDES_BANK_SELECT); + + ret = readl_poll_timeout_atomic(channel->addr + offs, val, + (val & mask) == expected, + 1, R8A779F0_ETH_SERDES_TIMEOUT_US); + if (ret) + dev_dbg(&channel->phy->dev, + "%s: index %d, offs %x, bank %x, mask %x, expected %x\n", + __func__, channel->index, offs, bank, mask, expected); + + return ret; +} + +static int +r8a779f0_eth_serdes_common_init_ram(struct r8a779f0_eth_serdes_drv_data *dd) +{ + struct r8a779f0_eth_serdes_channel *channel; + int i, ret; + + for (i = 0; i < R8A779F0_ETH_SERDES_NUM; i++) { + channel = &dd->channel[i]; + ret = r8a779f0_eth_serdes_reg_wait(channel, 0x026c, 0x180, BIT(0), 0x01); + if (ret) + return ret; + } + + r8a779f0_eth_serdes_write32(dd->addr, 0x026c, 0x180, 0x03); + + return ret; +} + +static int +r8a779f0_eth_serdes_common_setting(struct r8a779f0_eth_serdes_channel *channel) +{ + struct r8a779f0_eth_serdes_drv_data *dd = channel->dd; + + switch (channel->phy_interface) { + case PHY_INTERFACE_MODE_SGMII: + r8a779f0_eth_serdes_write32(dd->addr, 0x0244, 0x180, 0x0097); + r8a779f0_eth_serdes_write32(dd->addr, 0x01d0, 0x180, 0x0060); + r8a779f0_eth_serdes_write32(dd->addr, 0x01d8, 0x180, 0x2200); + r8a779f0_eth_serdes_write32(dd->addr, 0x01d4, 0x180, 0x0000); + r8a779f0_eth_serdes_write32(dd->addr, 0x01e0, 0x180, 0x003d); + return 0; + default: + return -EOPNOTSUPP; + } +} + +static int +r8a779f0_eth_serdes_chan_setting(struct r8a779f0_eth_serdes_channel *channel) +{ + int ret; + + switch (channel->phy_interface) { + case PHY_INTERFACE_MODE_SGMII: + r8a779f0_eth_serdes_write32(channel->addr, 0x0000, 0x380, 0x2000); + r8a779f0_eth_serdes_write32(channel->addr, 0x01c0, 0x180, 0x0011); + r8a779f0_eth_serdes_write32(channel->addr, 0x0248, 0x180, 0x0540); + r8a779f0_eth_serdes_write32(channel->addr, 0x0258, 0x180, 0x0015); + r8a779f0_eth_serdes_write32(channel->addr, 0x0144, 0x180, 0x0100); + r8a779f0_eth_serdes_write32(channel->addr, 0x01a0, 0x180, 0x0000); + r8a779f0_eth_serdes_write32(channel->addr, 0x00d0, 0x180, 0x0002); + r8a779f0_eth_serdes_write32(channel->addr, 0x0150, 0x180, 0x0003); + r8a779f0_eth_serdes_write32(channel->addr, 0x00c8, 0x180, 0x0100); + r8a779f0_eth_serdes_write32(channel->addr, 0x0148, 0x180, 0x0100); + r8a779f0_eth_serdes_write32(channel->addr, 0x0174, 0x180, 0x0000); + r8a779f0_eth_serdes_write32(channel->addr, 0x0160, 0x180, 0x0007); + r8a779f0_eth_serdes_write32(channel->addr, 0x01ac, 0x180, 0x0000); + r8a779f0_eth_serdes_write32(channel->addr, 0x00c4, 0x180, 0x0310); + r8a779f0_eth_serdes_write32(channel->addr, 0x00c8, 0x380, 0x0101); + ret = r8a779f0_eth_serdes_reg_wait(channel, 0x00c8, 0x0180, BIT(0), 0); + if (ret) + return ret; + + r8a779f0_eth_serdes_write32(channel->addr, 0x0148, 0x180, 0x0101); + ret = r8a779f0_eth_serdes_reg_wait(channel, 0x0148, 0x0180, BIT(0), 0); + if (ret) + return ret; + + r8a779f0_eth_serdes_write32(channel->addr, 0x00c4, 0x180, 0x1310); + r8a779f0_eth_serdes_write32(channel->addr, 0x00d8, 0x180, 0x1800); + r8a779f0_eth_serdes_write32(channel->addr, 0x00dc, 0x180, 0x0000); + r8a779f0_eth_serdes_write32(channel->addr, 0x001c, 0x300, 0x0001); + r8a779f0_eth_serdes_write32(channel->addr, 0x0000, 0x380, 0x2100); + ret = r8a779f0_eth_serdes_reg_wait(channel, 0x0000, 0x0380, BIT(8), 0); + if (ret) + return ret; + + if (channel->speed == 1000) + r8a779f0_eth_serdes_write32(channel->addr, 0x0000, 0x1f00, 0x0140); + else if (channel->speed == 100) + r8a779f0_eth_serdes_write32(channel->addr, 0x0000, 0x1f00, 0x2100); + + /* For AN_ON */ + r8a779f0_eth_serdes_write32(channel->addr, 0x0004, 0x1f80, 0x0005); + r8a779f0_eth_serdes_write32(channel->addr, 0x0028, 0x1f80, 0x07a1); + r8a779f0_eth_serdes_write32(channel->addr, 0x0000, 0x1f80, 0x0208); + break; + default: + return -EOPNOTSUPP; + } + + return 0; +} + +static int +r8a779f0_eth_serdes_chan_speed(struct r8a779f0_eth_serdes_channel *channel) +{ + int ret; + + switch (channel->phy_interface) { + case PHY_INTERFACE_MODE_SGMII: + /* For AN_ON */ + if (channel->speed == 1000) + r8a779f0_eth_serdes_write32(channel->addr, 0x0000, 0x1f00, 0x1140); + else if (channel->speed == 100) + r8a779f0_eth_serdes_write32(channel->addr, 0x0000, 0x1f00, 0x3100); + ret = r8a779f0_eth_serdes_reg_wait(channel, 0x0008, 0x1f80, BIT(0), 1); + if (ret) + return ret; + r8a779f0_eth_serdes_write32(channel->addr, 0x0008, 0x1f80, 0x0000); + break; + default: + return -EOPNOTSUPP; + } + + return 0; +} + + +static int r8a779f0_eth_serdes_monitor_linkup(struct r8a779f0_eth_serdes_channel *channel) +{ + int i, ret; + + for (i = 0; i < R8A779F0_ETH_SERDES_NUM_RETRY_LINKUP; i++) { + ret = r8a779f0_eth_serdes_reg_wait(channel, 0x0004, 0x300, + BIT(2), BIT(2)); + if (!ret) + break; + + /* restart */ + r8a779f0_eth_serdes_write32(channel->addr, 0x0144, 0x180, 0x0100); + udelay(1); + r8a779f0_eth_serdes_write32(channel->addr, 0x0144, 0x180, 0x0000); + } + + return ret; +} + +static int r8a779f0_eth_serdes_hw_init(struct r8a779f0_eth_serdes_channel *channel) +{ + struct r8a779f0_eth_serdes_drv_data *dd = channel->dd; + int i, ret; + + if (dd->initialized) + return 0; + + ret = r8a779f0_eth_serdes_common_init_ram(dd); + if (ret) + return ret; + + for (i = 0; i < R8A779F0_ETH_SERDES_NUM; i++) { + ret = r8a779f0_eth_serdes_reg_wait(&dd->channel[i], 0x0000, + 0x300, BIT(15), 0); + if (ret) + return ret; + } + + for (i = 0; i < R8A779F0_ETH_SERDES_NUM; i++) + r8a779f0_eth_serdes_write32(dd->channel[i].addr, 0x03d4, 0x380, 0x0443); + + ret = r8a779f0_eth_serdes_common_setting(channel); + if (ret) + return ret; + + for (i = 0; i < R8A779F0_ETH_SERDES_NUM; i++) + r8a779f0_eth_serdes_write32(dd->channel[i].addr, 0x03d0, 0x380, 0x0001); + + + r8a779f0_eth_serdes_write32(dd->addr, 0x0000, 0x380, 0x8000); + + ret = r8a779f0_eth_serdes_common_init_ram(dd); + if (ret) + return ret; + + ret = r8a779f0_eth_serdes_reg_wait(&dd->channel[0], 0x0000, 0x380, BIT(15), 0); + if (ret) + return ret; + + for (i = 0; i < R8A779F0_ETH_SERDES_NUM; i++) { + ret = r8a779f0_eth_serdes_chan_setting(&dd->channel[i]); + if (ret) + return ret; + } + + for (i = 0; i < R8A779F0_ETH_SERDES_NUM; i++) { + ret = r8a779f0_eth_serdes_chan_speed(&dd->channel[i]); + if (ret) + return ret; + } + + for (i = 0; i < R8A779F0_ETH_SERDES_NUM; i++) + r8a779f0_eth_serdes_write32(dd->channel[i].addr, 0x03c0, 0x380, 0x0000); + for (i = 0; i < R8A779F0_ETH_SERDES_NUM; i++) + r8a779f0_eth_serdes_write32(dd->channel[i].addr, 0x03d0, 0x380, 0x0000); + + for (i = 0; i < R8A779F0_ETH_SERDES_NUM; i++) { + ret = r8a779f0_eth_serdes_monitor_linkup(&dd->channel[i]); + if (ret) + return ret; + } + + return 0; +} + +static int r8a779f0_eth_serdes_init(struct phy *p) +{ + struct r8a779f0_eth_serdes_channel *channel = phy_get_drvdata(p); + int i, ret; + + for (i = 0; i < R8A779F0_ETH_SERDES_NUM_RETRY_INIT; i++) { + ret = r8a779f0_eth_serdes_hw_init(channel); + if (!ret) { + channel->dd->initialized = true; + break; + } + usleep_range(1000, 2000); + } + + return ret; +} + +static int r8a779f0_eth_serdes_set_mode(struct phy *p, enum phy_mode mode, + int submode) +{ + struct r8a779f0_eth_serdes_channel *channel = phy_get_drvdata(p); + + if (mode != PHY_MODE_ETHERNET) + return -EOPNOTSUPP; + + switch (submode) { + case PHY_INTERFACE_MODE_GMII: + case PHY_INTERFACE_MODE_SGMII: + case PHY_INTERFACE_MODE_USXGMII: + channel->phy_interface = submode; + return 0; + default: + return -EOPNOTSUPP; + } +} + +static int r8a779f0_eth_serdes_set_speed(struct phy *p, int speed) +{ + struct r8a779f0_eth_serdes_channel *channel = phy_get_drvdata(p); + + channel->speed = speed; + + return 0; +} + +static const struct phy_ops r8a779f0_eth_serdes_ops = { + .init = r8a779f0_eth_serdes_init, + .set_mode = r8a779f0_eth_serdes_set_mode, + .set_speed = r8a779f0_eth_serdes_set_speed, +}; + +static struct phy *r8a779f0_eth_serdes_xlate(struct device *dev, + struct of_phandle_args *args) +{ + struct r8a779f0_eth_serdes_drv_data *dd = dev_get_drvdata(dev); + + if (args->args[0] >= R8A779F0_ETH_SERDES_NUM) + return ERR_PTR(-ENODEV); + + return dd->channel[args->args[0]].phy; +} + +static const struct of_device_id r8a779f0_eth_serdes_of_table[] = { + { .compatible = "renesas,r8a779f0-ether-serdes", }, + { } +}; +MODULE_DEVICE_TABLE(of, r8a779f0_eth_serdes_of_table); + +static int r8a779f0_eth_serdes_probe(struct platform_device *pdev) +{ + struct r8a779f0_eth_serdes_drv_data *dd; + struct phy_provider *provider; + struct resource *res; + int i; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + dev_err(&pdev->dev, "invalid resource\n"); + return -EINVAL; + } + + dd = devm_kzalloc(&pdev->dev, sizeof(*dd), GFP_KERNEL); + if (!dd) + return -ENOMEM; + + platform_set_drvdata(pdev, dd); + dd->pdev = pdev; + dd->addr = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(dd->addr)) + return PTR_ERR(dd->addr); + + dd->reset = devm_reset_control_get(&pdev->dev, NULL); + if (IS_ERR(dd->reset)) + return PTR_ERR(dd->reset); + + reset_control_reset(dd->reset); + + for (i = 0; i < R8A779F0_ETH_SERDES_NUM; i++) { + struct r8a779f0_eth_serdes_channel *channel = &dd->channel[i]; + + channel->phy = devm_phy_create(&pdev->dev, NULL, + &r8a779f0_eth_serdes_ops); + if (IS_ERR(channel->phy)) + return PTR_ERR(channel->phy); + channel->addr = dd->addr + R8A779F0_ETH_SERDES_OFFSET * i; + channel->dd = dd; + channel->index = i; + phy_set_drvdata(channel->phy, channel); + } + + provider = devm_of_phy_provider_register(&pdev->dev, + r8a779f0_eth_serdes_xlate); + if (IS_ERR(provider)) + return PTR_ERR(provider); + + pm_runtime_enable(&pdev->dev); + pm_runtime_get_sync(&pdev->dev); + + return 0; +} + +static int r8a779f0_eth_serdes_remove(struct platform_device *pdev) +{ + pm_runtime_put(&pdev->dev); + pm_runtime_disable(&pdev->dev); + + platform_set_drvdata(pdev, NULL); + + return 0; +} + +static struct platform_driver r8a779f0_eth_serdes_driver_platform = { + .probe = r8a779f0_eth_serdes_probe, + .remove = r8a779f0_eth_serdes_remove, + .driver = { + .name = "r8a779f0_eth_serdes", + .of_match_table = r8a779f0_eth_serdes_of_table, + } +}; +module_platform_driver(r8a779f0_eth_serdes_driver_platform); +MODULE_AUTHOR("Yoshihiro Shimoda"); +MODULE_DESCRIPTION("Renesas Ethernet SERDES device driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/phy/tegra/phy-tegra194-p2u.c b/drivers/phy/tegra/phy-tegra194-p2u.c index 1415ca71de382fb0dc92b5de276b64ad1ef420fd..633e6b7472759a6b819320dfa9d29adce2171ecc 100644 --- a/drivers/phy/tegra/phy-tegra194-p2u.c +++ b/drivers/phy/tegra/phy-tegra194-p2u.c @@ -15,6 +15,7 @@ #include #define P2U_CONTROL_CMN 0x74 +#define P2U_CONTROL_CMN_ENABLE_L2_EXIT_RATE_CHANGE BIT(13) #define P2U_CONTROL_CMN_SKP_SIZE_PROTECTION_EN BIT(20) #define P2U_PERIODIC_EQ_CTRL_GEN3 0xc0 @@ -85,8 +86,21 @@ static int tegra_p2u_power_on(struct phy *x) return 0; } +static int tegra_p2u_calibrate(struct phy *x) +{ + struct tegra_p2u *phy = phy_get_drvdata(x); + u32 val; + + val = p2u_readl(phy, P2U_CONTROL_CMN); + val |= P2U_CONTROL_CMN_ENABLE_L2_EXIT_RATE_CHANGE; + p2u_writel(phy, val, P2U_CONTROL_CMN); + + return 0; +} + static const struct phy_ops ops = { .power_on = tegra_p2u_power_on, + .calibrate = tegra_p2u_calibrate, .owner = THIS_MODULE, }; diff --git a/drivers/phy/tegra/xusb-tegra124.c b/drivers/phy/tegra/xusb-tegra124.c index db56c7fbe60be01d23f1a4f89f30acef6c0db8a7..f4f75ea033b81a445da1675cc4dc03fc48cd4ae3 100644 --- a/drivers/phy/tegra/xusb-tegra124.c +++ b/drivers/phy/tegra/xusb-tegra124.c @@ -1652,7 +1652,6 @@ tegra124_usb3_port_map(struct tegra_xusb_port *port) static const struct tegra_xusb_port_ops tegra124_usb3_port_ops = { .release = tegra_xusb_usb3_port_release, - .remove = tegra_xusb_usb3_port_remove, .enable = tegra124_usb3_port_enable, .disable = tegra124_usb3_port_disable, .map = tegra124_usb3_port_map, diff --git a/drivers/phy/tegra/xusb-tegra186.c b/drivers/phy/tegra/xusb-tegra186.c index 0996ede63387a25a4760b82f70c3ec7c7e3873c1..6a8bd87cfdbda7af843b93849876bece2c528385 100644 --- a/drivers/phy/tegra/xusb-tegra186.c +++ b/drivers/phy/tegra/xusb-tegra186.c @@ -1185,7 +1185,6 @@ tegra186_usb3_port_map(struct tegra_xusb_port *port) static const struct tegra_xusb_port_ops tegra186_usb3_port_ops = { .release = tegra_xusb_usb3_port_release, - .remove = tegra_xusb_usb3_port_remove, .enable = tegra186_usb3_port_enable, .disable = tegra186_usb3_port_disable, .map = tegra186_usb3_port_map, diff --git a/drivers/phy/tegra/xusb-tegra210.c b/drivers/phy/tegra/xusb-tegra210.c index eedfc7c2cc05270f0be4598fe758af168bc0fc62..ebc8a7e21a318160b162113eea8a6c97b7ed7966 100644 --- a/drivers/phy/tegra/xusb-tegra210.c +++ b/drivers/phy/tegra/xusb-tegra210.c @@ -3078,7 +3078,6 @@ tegra210_usb3_port_map(struct tegra_xusb_port *port) static const struct tegra_xusb_port_ops tegra210_usb3_port_ops = { .release = tegra_xusb_usb3_port_release, - .remove = tegra_xusb_usb3_port_remove, .enable = tegra210_usb3_port_enable, .disable = tegra210_usb3_port_disable, .map = tegra210_usb3_port_map, diff --git a/drivers/phy/tegra/xusb.c b/drivers/phy/tegra/xusb.c index dce45fbbd699c12c668e17d4cd62d301c96096b2..ff4b930879f3c32b921c95ca0a01ead96804c5a1 100644 --- a/drivers/phy/tegra/xusb.c +++ b/drivers/phy/tegra/xusb.c @@ -954,8 +954,7 @@ static int tegra_xusb_usb3_port_parse_dt(struct tegra_xusb_usb3_port *usb3) return -EINVAL; } - usb3->supply = regulator_get(&port->dev, "vbus"); - return PTR_ERR_OR_ZERO(usb3->supply); + return 0; } static int tegra_xusb_add_usb3_port(struct tegra_xusb_padctl *padctl, @@ -1012,13 +1011,6 @@ void tegra_xusb_usb3_port_release(struct tegra_xusb_port *port) kfree(usb3); } -void tegra_xusb_usb3_port_remove(struct tegra_xusb_port *port) -{ - struct tegra_xusb_usb3_port *usb3 = to_usb3_port(port); - - regulator_put(usb3->supply); -} - static void __tegra_xusb_remove_ports(struct tegra_xusb_padctl *padctl) { struct tegra_xusb_port *port, *tmp; diff --git a/drivers/phy/tegra/xusb.h b/drivers/phy/tegra/xusb.h index 8cfbbdbd6e0c0103b9086128e7889ab9afc0027f..c384734a61c2ab7f324f5f9bd4baa6313d248348 100644 --- a/drivers/phy/tegra/xusb.h +++ b/drivers/phy/tegra/xusb.h @@ -359,7 +359,6 @@ void tegra_xusb_hsic_port_release(struct tegra_xusb_port *port); struct tegra_xusb_usb3_port { struct tegra_xusb_port base; - struct regulator *supply; bool context_saved; unsigned int port; bool internal; @@ -381,7 +380,6 @@ struct tegra_xusb_usb3_port * tegra_xusb_find_usb3_port(struct tegra_xusb_padctl *padctl, unsigned int index); void tegra_xusb_usb3_port_release(struct tegra_xusb_port *port); -void tegra_xusb_usb3_port_remove(struct tegra_xusb_port *port); struct tegra_xusb_port_ops { void (*release)(struct tegra_xusb_port *port); diff --git a/drivers/phy/ti/phy-gmii-sel.c b/drivers/phy/ti/phy-gmii-sel.c index 0bcfd6d96b4d0a67d0d0cc5a7dc579610844c8e4..8c667819c39a10b0fe3186f7f65197ffb4b21f2f 100644 --- a/drivers/phy/ti/phy-gmii-sel.c +++ b/drivers/phy/ti/phy-gmii-sel.c @@ -50,6 +50,7 @@ struct phy_gmii_sel_soc_data { const struct reg_field (*regfields)[PHY_GMII_SEL_LAST]; bool use_of_data; u64 extra_modes; + u32 num_qsgmii_main_ports; }; struct phy_gmii_sel_priv { @@ -213,6 +214,17 @@ struct phy_gmii_sel_soc_data phy_gmii_sel_cpsw5g_soc_j7200 = { .use_of_data = true, .regfields = phy_gmii_sel_fields_am654, .extra_modes = BIT(PHY_INTERFACE_MODE_QSGMII), + .num_ports = 4, + .num_qsgmii_main_ports = 1, +}; + +static const +struct phy_gmii_sel_soc_data phy_gmii_sel_cpsw9g_soc_j721e = { + .use_of_data = true, + .regfields = phy_gmii_sel_fields_am654, + .extra_modes = BIT(PHY_INTERFACE_MODE_QSGMII), + .num_ports = 8, + .num_qsgmii_main_ports = 2, }; static const struct of_device_id phy_gmii_sel_id_table[] = { @@ -240,6 +252,10 @@ static const struct of_device_id phy_gmii_sel_id_table[] = { .compatible = "ti,j7200-cpsw5g-phy-gmii-sel", .data = &phy_gmii_sel_cpsw5g_soc_j7200, }, + { + .compatible = "ti,j721e-cpsw9g-phy-gmii-sel", + .data = &phy_gmii_sel_cpsw9g_soc_j721e, + }, {} }; MODULE_DEVICE_TABLE(of, phy_gmii_sel_id_table); @@ -378,11 +394,13 @@ static int phy_gmii_sel_init_ports(struct phy_gmii_sel_priv *priv) static int phy_gmii_sel_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; + const struct phy_gmii_sel_soc_data *soc_data; struct device_node *node = dev->of_node; const struct of_device_id *of_id; struct phy_gmii_sel_priv *priv; u32 main_ports = 1; int ret; + u32 i; of_id = of_match_node(phy_gmii_sel_id_table, pdev->dev.of_node); if (!of_id) @@ -394,16 +412,26 @@ static int phy_gmii_sel_probe(struct platform_device *pdev) priv->dev = &pdev->dev; priv->soc_data = of_id->data; + soc_data = priv->soc_data; priv->num_ports = priv->soc_data->num_ports; - of_property_read_u32(node, "ti,qsgmii-main-ports", &main_ports); + priv->qsgmii_main_ports = 0; + /* - * Ensure that main_ports is within bounds. If the property - * ti,qsgmii-main-ports is not mentioned, or the value mentioned - * is out of bounds, default to 1. + * Based on the compatible, try to read the appropriate number of + * QSGMII main ports from the "ti,qsgmii-main-ports" property from + * the device-tree node. */ - if (main_ports < 1 || main_ports > 4) - main_ports = 1; - priv->qsgmii_main_ports = PHY_GMII_PORT(main_ports); + for (i = 0; i < soc_data->num_qsgmii_main_ports; i++) { + of_property_read_u32_index(node, "ti,qsgmii-main-ports", i, &main_ports); + /* + * Ensure that main_ports is within bounds. + */ + if (main_ports < 1 || main_ports > soc_data->num_ports) { + dev_err(dev, "Invalid qsgmii main port provided\n"); + return -EINVAL; + } + priv->qsgmii_main_ports |= PHY_GMII_PORT(main_ports); + } priv->regmap = syscon_node_to_regmap(node->parent); if (IS_ERR(priv->regmap)) { diff --git a/drivers/phy/ti/phy-j721e-wiz.c b/drivers/phy/ti/phy-j721e-wiz.c index 41725c6bcdf6f30656127165c96838b0eed502c9..ddce5ef7711c605541951e38698a18c79ec2f7e6 100644 --- a/drivers/phy/ti/phy-j721e-wiz.c +++ b/drivers/phy/ti/phy-j721e-wiz.c @@ -81,14 +81,20 @@ static const struct reg_field phy_reset_n = REG_FIELD(WIZ_SERDES_RST, 31, 31); static const struct reg_field phy_en_refclk = REG_FIELD(WIZ_SERDES_RST, 30, 30); static const struct reg_field pll1_refclk_mux_sel = REG_FIELD(WIZ_SERDES_RST, 29, 29); +static const struct reg_field pll1_refclk_mux_sel_2 = + REG_FIELD(WIZ_SERDES_RST, 22, 23); static const struct reg_field pll0_refclk_mux_sel = REG_FIELD(WIZ_SERDES_RST, 28, 28); +static const struct reg_field pll0_refclk_mux_sel_2 = + REG_FIELD(WIZ_SERDES_RST, 28, 29); static const struct reg_field refclk_dig_sel_16g = REG_FIELD(WIZ_SERDES_RST, 24, 25); static const struct reg_field refclk_dig_sel_10g = REG_FIELD(WIZ_SERDES_RST, 24, 24); static const struct reg_field pma_cmn_refclk_int_mode = REG_FIELD(WIZ_SERDES_TOP_CTRL, 28, 29); +static const struct reg_field pma_cmn_refclk1_int_mode = + REG_FIELD(WIZ_SERDES_TOP_CTRL, 20, 21); static const struct reg_field pma_cmn_refclk_mode = REG_FIELD(WIZ_SERDES_TOP_CTRL, 30, 31); static const struct reg_field pma_cmn_refclk_dig_div = @@ -315,6 +321,8 @@ enum wiz_type { J721E_WIZ_10G, /* Also for J7200 SR1.0 */ AM64_WIZ_10G, J7200_WIZ_10G, /* J7200 SR2.0 */ + J784S4_WIZ_10G, + J721S2_WIZ_10G, }; struct wiz_data { @@ -992,6 +1000,8 @@ static void wiz_clock_cleanup(struct wiz *wiz, struct device_node *node) switch (wiz->type) { case AM64_WIZ_10G: case J7200_WIZ_10G: + case J784S4_WIZ_10G: + case J721S2_WIZ_10G: of_clk_del_provider(dev->of_node); return; default: @@ -1123,6 +1133,8 @@ static int wiz_clock_init(struct wiz *wiz, struct device_node *node) switch (wiz->type) { case AM64_WIZ_10G: case J7200_WIZ_10G: + case J784S4_WIZ_10G: + case J721S2_WIZ_10G: ret = wiz_clock_register(wiz); if (ret) dev_err(dev, "Failed to register wiz clocks\n"); @@ -1205,6 +1217,7 @@ static int wiz_phy_fullrt_div(struct wiz *wiz, int lane) break; case J721E_WIZ_10G: case J7200_WIZ_10G: + case J721S2_WIZ_10G: if (wiz->lane_phy_type[lane] == PHY_TYPE_SGMII) return regmap_field_write(wiz->p0_fullrt_div[lane], 0x2); break; @@ -1299,6 +1312,25 @@ static struct wiz_data j7200_pg2_10g_data = { .clk_div_sel_num = WIZ_DIV_NUM_CLOCKS_10G, }; +static struct wiz_data j784s4_10g_data = { + .type = J784S4_WIZ_10G, + .pll0_refclk_mux_sel = &pll0_refclk_mux_sel_2, + .pll1_refclk_mux_sel = &pll1_refclk_mux_sel_2, + .refclk_dig_sel = &refclk_dig_sel_16g, + .pma_cmn_refclk1_int_mode = &pma_cmn_refclk1_int_mode, + .clk_mux_sel = clk_mux_sel_10g_2_refclk, + .clk_div_sel_num = WIZ_DIV_NUM_CLOCKS_10G, +}; + +static struct wiz_data j721s2_10g_data = { + .type = J721S2_WIZ_10G, + .pll0_refclk_mux_sel = &pll0_refclk_mux_sel, + .pll1_refclk_mux_sel = &pll1_refclk_mux_sel, + .refclk_dig_sel = &refclk_dig_sel_10g, + .clk_mux_sel = clk_mux_sel_10g, + .clk_div_sel_num = WIZ_DIV_NUM_CLOCKS_10G, +}; + static const struct of_device_id wiz_id_table[] = { { .compatible = "ti,j721e-wiz-16g", .data = &j721e_16g_data, @@ -1312,6 +1344,12 @@ static const struct of_device_id wiz_id_table[] = { { .compatible = "ti,j7200-wiz-10g", .data = &j7200_pg2_10g_data, }, + { + .compatible = "ti,j784s4-wiz-10g", .data = &j784s4_10g_data, + }, + { + .compatible = "ti,j721s2-wiz-10g", .data = &j721s2_10g_data, + }, {} }; MODULE_DEVICE_TABLE(of, wiz_id_table); diff --git a/drivers/platform/chrome/cros_ec_typec.c b/drivers/platform/chrome/cros_ec_typec.c index 59de4ce01faba4e4e28c597ba0ff1ed01d069305..001b0de95a46ef855a9698e2a8d80809e449c2b3 100644 --- a/drivers/platform/chrome/cros_ec_typec.c +++ b/drivers/platform/chrome/cros_ec_typec.c @@ -971,7 +971,7 @@ static void cros_typec_register_partner_pdos(struct cros_typec_data *typec, if (!resp->source_cap_count && !resp->sink_cap_count) return; - port->partner_pd = usb_power_delivery_register(NULL, &desc); + port->partner_pd = typec_partner_usb_power_delivery_register(port->partner, &desc); if (IS_ERR(port->partner_pd)) { dev_warn(typec->dev, "Failed to register partner PD device, port: %d\n", port_num); return; diff --git a/drivers/power/supply/88pm860x_charger.c b/drivers/power/supply/88pm860x_charger.c index f21ce52fbc0405f963f290a429f3c790607bd22b..2b9fcb7e71d79e0ce5f062d1c53db7948e4d7067 100644 --- a/drivers/power/supply/88pm860x_charger.c +++ b/drivers/power/supply/88pm860x_charger.c @@ -690,8 +690,7 @@ static int pm860x_charger_probe(struct platform_device *pdev) (chip->id == CHIP_PM8607) ? chip->companion : chip->client; if (!info->i2c_8606) { dev_err(&pdev->dev, "Missed I2C address of 88PM8606!\n"); - ret = -EINVAL; - goto out; + return -EINVAL; } info->dev = &pdev->dev; @@ -704,44 +703,26 @@ static int pm860x_charger_probe(struct platform_device *pdev) psy_cfg.drv_data = info; psy_cfg.supplied_to = pm860x_supplied_to; psy_cfg.num_supplicants = ARRAY_SIZE(pm860x_supplied_to); - info->usb = power_supply_register(&pdev->dev, &pm860x_charger_desc, - &psy_cfg); + info->usb = devm_power_supply_register(&pdev->dev, &pm860x_charger_desc, + &psy_cfg); if (IS_ERR(info->usb)) { - ret = PTR_ERR(info->usb); - goto out; + return PTR_ERR(info->usb); } pm860x_init_charger(info); for (i = 0; i < ARRAY_SIZE(info->irq); i++) { - ret = request_threaded_irq(info->irq[i], NULL, - pm860x_irq_descs[i].handler, - IRQF_ONESHOT, pm860x_irq_descs[i].name, info); + ret = devm_request_threaded_irq(&pdev->dev, info->irq[i], NULL, + pm860x_irq_descs[i].handler, + IRQF_ONESHOT, + pm860x_irq_descs[i].name, info); if (ret < 0) { dev_err(chip->dev, "Failed to request IRQ: #%d: %d\n", info->irq[i], ret); - goto out_irq; + return ret; } } return 0; - -out_irq: - power_supply_unregister(info->usb); - while (--i >= 0) - free_irq(info->irq[i], info); -out: - return ret; -} - -static int pm860x_charger_remove(struct platform_device *pdev) -{ - struct pm860x_charger_info *info = platform_get_drvdata(pdev); - int i; - - power_supply_unregister(info->usb); - for (i = 0; i < info->irq_nums; i++) - free_irq(info->irq[i], info); - return 0; } static struct platform_driver pm860x_charger_driver = { @@ -749,7 +730,6 @@ static struct platform_driver pm860x_charger_driver = { .name = "88pm860x-charger", }, .probe = pm860x_charger_probe, - .remove = pm860x_charger_remove, }; module_platform_driver(pm860x_charger_driver); diff --git a/drivers/power/supply/ab8500_charger.c b/drivers/power/supply/ab8500_charger.c index c19c50442761dc92cc7ed145eea355df0690b9fa..308e68545d44d4bc71a82c074ff0f1471b878770 100644 --- a/drivers/power/supply/ab8500_charger.c +++ b/drivers/power/supply/ab8500_charger.c @@ -1940,7 +1940,7 @@ static int ab8500_charger_get_ext_psy_data(struct device *dev, void *data) * * Due to a asic bug it is necessary to lower the input current to the vbus * charger when charging with at some specific levels. This issue is only valid - * for below a certain battery voltage. This function makes sure that the + * for below a certain battery voltage. This function makes sure that * the allowed current limit isn't exceeded. */ static void ab8500_charger_check_vbat_work(struct work_struct *work) @@ -3719,7 +3719,14 @@ static int __init ab8500_charger_init(void) if (ret) return ret; - return platform_driver_register(&ab8500_charger_driver); + ret = platform_driver_register(&ab8500_charger_driver); + if (ret) { + platform_unregister_drivers(ab8500_charger_component_drivers, + ARRAY_SIZE(ab8500_charger_component_drivers)); + return ret; + } + + return 0; } static void __exit ab8500_charger_exit(void) diff --git a/drivers/power/supply/adp5061.c b/drivers/power/supply/adp5061.c index fcf8ff0bc974b86c289580ed17a6d3230d042a98..840db629a46c34cdec1b8940065f1db34699aa3f 100644 --- a/drivers/power/supply/adp5061.c +++ b/drivers/power/supply/adp5061.c @@ -694,8 +694,7 @@ static const struct power_supply_desc adp5061_desc = { .num_properties = ARRAY_SIZE(adp5061_props), }; -static int adp5061_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int adp5061_probe(struct i2c_client *client) { struct power_supply_config psy_cfg = {}; struct adp5061_state *st; @@ -737,7 +736,7 @@ static struct i2c_driver adp5061_driver = { .driver = { .name = KBUILD_MODNAME, }, - .probe = adp5061_probe, + .probe_new = adp5061_probe, .id_table = adp5061_id, }; module_i2c_driver(adp5061_driver); diff --git a/drivers/power/supply/bd99954-charger.c b/drivers/power/supply/bd99954-charger.c index 96e93e1b80949cb344f99c18a578cfa57563d4cc..250362e15c98ba2c18e23f5dccf4f3e23a8fcc56 100644 --- a/drivers/power/supply/bd99954-charger.c +++ b/drivers/power/supply/bd99954-charger.c @@ -768,27 +768,13 @@ static const struct power_supply_desc bd9995x_power_supply_desc = { * Describe the setting in linear_range table. */ static const struct linear_range input_current_limit_ranges[] = { - { - .min = 0, - .step = 32000, - .min_sel = 0x0, - .max_sel = 0x1ff, - }, + LINEAR_RANGE(0, 0x0, 0x1ff, 32000), }; /* Possible trickle, pre-charging and termination current values */ static const struct linear_range charging_current_ranges[] = { - { - .min = 0, - .step = 64000, - .min_sel = 0x0, - .max_sel = 0x10, - }, { - .min = 1024000, - .step = 0, - .min_sel = 0x11, - .max_sel = 0x1f, - }, + LINEAR_RANGE(0, 0x0, 0x10, 64000), + LINEAR_RANGE(1024000, 0x11, 0x1f, 0), }; /* @@ -796,72 +782,28 @@ static const struct linear_range charging_current_ranges[] = { * and battery over voltage protection have same possible values */ static const struct linear_range charge_voltage_regulation_ranges[] = { - { - .min = 2560000, - .step = 0, - .min_sel = 0, - .max_sel = 0xA0, - }, { - .min = 2560000, - .step = 16000, - .min_sel = 0xA0, - .max_sel = 0x4B0, - }, { - .min = 19200000, - .step = 0, - .min_sel = 0x4B0, - .max_sel = 0x7FF, - }, + LINEAR_RANGE(2560000, 0, 0xA0, 0), + LINEAR_RANGE(2560000, 0xA0, 0x4B0, 16000), + LINEAR_RANGE(19200000, 0x4B0, 0x7FF, 0), }; /* Possible VSYS voltage regulation values */ static const struct linear_range vsys_voltage_regulation_ranges[] = { - { - .min = 2560000, - .step = 0, - .min_sel = 0, - .max_sel = 0x28, - }, { - .min = 2560000, - .step = 64000, - .min_sel = 0x28, - .max_sel = 0x12C, - }, { - .min = 19200000, - .step = 0, - .min_sel = 0x12C, - .max_sel = 0x1FF, - }, + LINEAR_RANGE(2560000, 0, 0x28, 0), + LINEAR_RANGE(2560000, 0x28, 0x12C, 64000), + LINEAR_RANGE(19200000, 0x12C, 0x1FF, 0), }; /* Possible settings for switching from trickle to pre-charging limits */ static const struct linear_range trickle_to_pre_threshold_ranges[] = { - { - .min = 2048000, - .step = 0, - .min_sel = 0, - .max_sel = 0x20, - }, { - .min = 2048000, - .step = 64000, - .min_sel = 0x20, - .max_sel = 0x12C, - }, { - .min = 19200000, - .step = 0, - .min_sel = 0x12C, - .max_sel = 0x1FF - } + LINEAR_RANGE(2048000, 0, 0x20, 0), + LINEAR_RANGE(2048000, 0x20, 0x12C, 64000), + LINEAR_RANGE(19200000, 0x12C, 0x1FF, 0), }; /* Possible current values for fast-charging constant current phase */ static const struct linear_range fast_charge_current_ranges[] = { - { - .min = 0, - .step = 64000, - .min_sel = 0, - .max_sel = 0xFF, - } + LINEAR_RANGE(0, 0, 0xFF, 64000), }; struct battery_init { diff --git a/drivers/power/supply/bq2415x_charger.c b/drivers/power/supply/bq2415x_charger.c index 6b99e1c675b87cacdcff64104c83eec077872076..d2cb7431dced00ae48fd7a3b540a149cb33c34d8 100644 --- a/drivers/power/supply/bq2415x_charger.c +++ b/drivers/power/supply/bq2415x_charger.c @@ -1520,9 +1520,9 @@ static int bq2415x_power_supply_init(struct bq2415x_device *bq) } /* main bq2415x probe function */ -static int bq2415x_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int bq2415x_probe(struct i2c_client *client) { + const struct i2c_device_id *id = i2c_client_get_device_id(client); int ret; int num; char *name = NULL; @@ -1780,7 +1780,7 @@ static struct i2c_driver bq2415x_driver = { .of_match_table = of_match_ptr(bq2415x_of_match_table), .acpi_match_table = ACPI_PTR(bq2415x_i2c_acpi_match), }, - .probe = bq2415x_probe, + .probe_new = bq2415x_probe, .remove = bq2415x_remove, .id_table = bq2415x_i2c_id_table, }; diff --git a/drivers/power/supply/bq24190_charger.c b/drivers/power/supply/bq24190_charger.c index 2274679c5ddd215eac319a13cc982c6a7c531c76..2b2c3a4391c19bce97d4c4153d111576d5f50ace 100644 --- a/drivers/power/supply/bq24190_charger.c +++ b/drivers/power/supply/bq24190_charger.c @@ -1767,9 +1767,9 @@ static int bq24190_get_config(struct bq24190_dev_info *bdi) return 0; } -static int bq24190_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int bq24190_probe(struct i2c_client *client) { + const struct i2c_device_id *id = i2c_client_get_device_id(client); struct i2c_adapter *adapter = client->adapter; struct device *dev = &client->dev; struct power_supply_config charger_cfg = {}, battery_cfg = {}; @@ -2032,7 +2032,7 @@ static const struct of_device_id bq24190_of_match[] = { MODULE_DEVICE_TABLE(of, bq24190_of_match); static struct i2c_driver bq24190_driver = { - .probe = bq24190_probe, + .probe_new = bq24190_probe, .remove = bq24190_remove, .shutdown = bq24190_shutdown, .id_table = bq24190_i2c_ids, diff --git a/drivers/power/supply/bq24257_charger.c b/drivers/power/supply/bq24257_charger.c index a309bbedfe521956d63d33b3f9dc174b04d9547a..ab4c49788c586224fa58708fad2d19bdf2752db9 100644 --- a/drivers/power/supply/bq24257_charger.c +++ b/drivers/power/supply/bq24257_charger.c @@ -947,9 +947,9 @@ static int bq24257_fw_probe(struct bq24257_device *bq) return 0; } -static int bq24257_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int bq24257_probe(struct i2c_client *client) { + const struct i2c_device_id *id = i2c_client_get_device_id(client); struct i2c_adapter *adapter = client->adapter; struct device *dev = &client->dev; const struct acpi_device_id *acpi_id; @@ -1167,7 +1167,7 @@ static struct i2c_driver bq24257_driver = { .acpi_match_table = ACPI_PTR(bq24257_acpi_match), .pm = &bq24257_pm, }, - .probe = bq24257_probe, + .probe_new = bq24257_probe, .remove = bq24257_remove, .id_table = bq24257_i2c_ids, }; diff --git a/drivers/power/supply/bq24735-charger.c b/drivers/power/supply/bq24735-charger.c index 3ce36d09c017cf1fb6ff118f36d46723431d0cae..cfca3a82d5a8d0d0bddf4a277f8b81ab0eb9a494 100644 --- a/drivers/power/supply/bq24735-charger.c +++ b/drivers/power/supply/bq24735-charger.c @@ -352,8 +352,7 @@ static struct bq24735_platform *bq24735_parse_dt_data(struct i2c_client *client) return pdata; } -static int bq24735_charger_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int bq24735_charger_probe(struct i2c_client *client) { int ret; struct bq24735 *charger; @@ -506,7 +505,7 @@ static struct i2c_driver bq24735_charger_driver = { .name = "bq24735-charger", .of_match_table = bq24735_match_ids, }, - .probe = bq24735_charger_probe, + .probe_new = bq24735_charger_probe, .id_table = bq24735_charger_id, }; diff --git a/drivers/power/supply/bq2515x_charger.c b/drivers/power/supply/bq2515x_charger.c index 4f76ad9c2f18e032bf6cb84844fc9fb3b2f50be2..da224ae8dc6155e6e1b97bbd5e1cd5f1e6864efa 100644 --- a/drivers/power/supply/bq2515x_charger.c +++ b/drivers/power/supply/bq2515x_charger.c @@ -1078,9 +1078,9 @@ static const struct regmap_config bq25155_regmap_config = { .volatile_reg = bq2515x_volatile_register, }; -static int bq2515x_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int bq2515x_probe(struct i2c_client *client) { + const struct i2c_device_id *id = i2c_client_get_device_id(client); struct device *dev = &client->dev; struct bq2515x_device *bq2515x; struct power_supply_config charger_cfg = {}; @@ -1158,7 +1158,7 @@ static struct i2c_driver bq2515x_driver = { .name = "bq2515x-charger", .of_match_table = bq2515x_of_match, }, - .probe = bq2515x_probe, + .probe_new = bq2515x_probe, .id_table = bq2515x_i2c_ids, }; module_i2c_driver(bq2515x_driver); diff --git a/drivers/power/supply/bq256xx_charger.c b/drivers/power/supply/bq256xx_charger.c index 01ad84fd147c8ab131a2f36a130f81c90bcdca40..db13e288e439a8aa421a47b5800db5e876938b24 100644 --- a/drivers/power/supply/bq256xx_charger.c +++ b/drivers/power/supply/bq256xx_charger.c @@ -1619,9 +1619,9 @@ static int bq256xx_parse_dt(struct bq256xx_device *bq, return 0; } -static int bq256xx_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int bq256xx_probe(struct i2c_client *client) { + const struct i2c_device_id *id = i2c_client_get_device_id(client); struct device *dev = &client->dev; struct bq256xx_device *bq; struct power_supply_config psy_cfg = { }; @@ -1744,7 +1744,7 @@ static struct i2c_driver bq256xx_driver = { .of_match_table = bq256xx_of_match, .acpi_match_table = bq256xx_acpi_match, }, - .probe = bq256xx_probe, + .probe_new = bq256xx_probe, .id_table = bq256xx_i2c_ids, }; module_i2c_driver(bq256xx_driver); diff --git a/drivers/power/supply/bq25890_charger.c b/drivers/power/supply/bq25890_charger.c index 6020b58c641d237de3cbc4da18d61bd64a353efd..2d731ea58323b54cfcbe6b239fe642375bb585f1 100644 --- a/drivers/power/supply/bq25890_charger.c +++ b/drivers/power/supply/bq25890_charger.c @@ -529,8 +529,53 @@ static int bq25890_power_supply_get_property(struct power_supply *psy, val->intval = POWER_SUPPLY_HEALTH_UNSPEC_FAILURE; break; - case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX: - val->intval = bq25890_find_val(bq->init_data.ichg, TBL_ICHG); + case POWER_SUPPLY_PROP_PRECHARGE_CURRENT: + val->intval = bq25890_find_val(bq->init_data.iprechg, TBL_ITERM); + break; + + case POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT: + val->intval = bq25890_find_val(bq->init_data.iterm, TBL_ITERM); + break; + + case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT: + ret = bq25890_field_read(bq, F_IINLIM); + if (ret < 0) + return ret; + + val->intval = bq25890_find_val(ret, TBL_IINLIM); + break; + + case POWER_SUPPLY_PROP_CURRENT_NOW: /* I_BAT now */ + /* + * This is ADC-sampled immediate charge current supplied + * from charger to battery. The property name is confusing, + * for clarification refer to: + * Documentation/ABI/testing/sysfs-class-power + * /sys/class/power_supply//current_now + */ + ret = bq25890_field_read(bq, F_ICHGR); /* read measured value */ + if (ret < 0) + return ret; + + /* converted_val = ADC_val * 50mA (table 10.3.19) */ + val->intval = ret * -50000; + break; + + case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT: /* I_BAT user limit */ + /* + * This is user-configured constant charge current supplied + * from charger to battery in first phase of charging, when + * battery voltage is below constant charge voltage. + * + * This value reflects the current hardware setting. + * + * The POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX is the + * maximum value of this property. + */ + ret = bq25890_field_read(bq, F_ICHG); + if (ret < 0) + return ret; + val->intval = bq25890_find_val(ret, TBL_ICHG); /* When temperature is too low, charge current is decreased */ if (bq->state.ntc_fault == NTC_FAULT_COOL) { @@ -545,12 +590,25 @@ static int bq25890_power_supply_get_property(struct power_supply *psy, } break; - case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE: - if (!state.online) { - val->intval = 0; - break; - } + case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX: /* I_BAT max */ + /* + * This is maximum allowed constant charge current supplied + * from charger to battery in first phase of charging, when + * battery voltage is below constant charge voltage. + * + * This value is constant for each battery and set from DT. + */ + val->intval = bq25890_find_val(bq->init_data.ichg, TBL_ICHG); + break; + case POWER_SUPPLY_PROP_VOLTAGE_NOW: /* V_BAT now */ + /* + * This is ADC-sampled immediate charge voltage supplied + * from charger to battery. The property name is confusing, + * for clarification refer to: + * Documentation/ABI/testing/sysfs-class-power + * /sys/class/power_supply//voltage_now + */ ret = bq25890_field_read(bq, F_BATV); /* read measured value */ if (ret < 0) return ret; @@ -559,42 +617,33 @@ static int bq25890_power_supply_get_property(struct power_supply *psy, val->intval = 2304000 + ret * 20000; break; - case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX: - val->intval = bq25890_find_val(bq->init_data.vreg, TBL_VREG); - break; - - case POWER_SUPPLY_PROP_PRECHARGE_CURRENT: - val->intval = bq25890_find_val(bq->init_data.iprechg, TBL_ITERM); - break; - - case POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT: - val->intval = bq25890_find_val(bq->init_data.iterm, TBL_ITERM); - break; - - case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT: - ret = bq25890_field_read(bq, F_IINLIM); - if (ret < 0) - return ret; - - val->intval = bq25890_find_val(ret, TBL_IINLIM); - break; - - case POWER_SUPPLY_PROP_VOLTAGE_NOW: - ret = bq25890_field_read(bq, F_SYSV); /* read measured value */ + case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE: /* V_BAT user limit */ + /* + * This is user-configured constant charge voltage supplied + * from charger to battery in second phase of charging, when + * battery voltage reached constant charge voltage. + * + * This value reflects the current hardware setting. + * + * The POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX is the + * maximum value of this property. + */ + ret = bq25890_field_read(bq, F_VREG); if (ret < 0) return ret; - /* converted_val = 2.304V + ADC_val * 20mV (table 10.3.15) */ - val->intval = 2304000 + ret * 20000; + val->intval = bq25890_find_val(ret, TBL_VREG); break; - case POWER_SUPPLY_PROP_CURRENT_NOW: - ret = bq25890_field_read(bq, F_ICHGR); /* read measured value */ - if (ret < 0) - return ret; - - /* converted_val = ADC_val * 50mA (table 10.3.19) */ - val->intval = ret * -50000; + case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX: /* V_BAT max */ + /* + * This is maximum allowed constant charge voltage supplied + * from charger to battery in second phase of charging, when + * battery voltage reached constant charge voltage. + * + * This value is constant for each battery and set from DT. + */ + val->intval = bq25890_find_val(bq->init_data.vreg, TBL_VREG); break; case POWER_SUPPLY_PROP_TEMP: @@ -618,9 +667,18 @@ static int bq25890_power_supply_set_property(struct power_supply *psy, const union power_supply_propval *val) { struct bq25890_device *bq = power_supply_get_drvdata(psy); + int maxval; u8 lval; switch (psp) { + case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT: + maxval = bq25890_find_val(bq->init_data.ichg, TBL_ICHG); + lval = bq25890_find_idx(min(val->intval, maxval), TBL_ICHG); + return bq25890_field_write(bq, F_ICHG, lval); + case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE: + maxval = bq25890_find_val(bq->init_data.vreg, TBL_VREG); + lval = bq25890_find_idx(min(val->intval, maxval), TBL_VREG); + return bq25890_field_write(bq, F_VREG, lval); case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT: lval = bq25890_find_idx(val->intval, TBL_IINLIM); return bq25890_field_write(bq, F_IINLIM, lval); @@ -633,6 +691,8 @@ static int bq25890_power_supply_property_is_writeable(struct power_supply *psy, enum power_supply_property psp) { switch (psp) { + case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT: + case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE: case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT: return true; default: @@ -880,6 +940,7 @@ static const enum power_supply_property bq25890_power_supply_props[] = { POWER_SUPPLY_PROP_CHARGE_TYPE, POWER_SUPPLY_PROP_ONLINE, POWER_SUPPLY_PROP_HEALTH, + POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT, POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX, POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE, POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX, @@ -1034,10 +1095,32 @@ static int bq25890_vbus_is_enabled(struct regulator_dev *rdev) return bq25890_field_read(bq, F_OTG_CFG); } +static int bq25890_vbus_get_voltage(struct regulator_dev *rdev) +{ + struct bq25890_device *bq = rdev_get_drvdata(rdev); + + return bq25890_get_vbus_voltage(bq); +} + +static int bq25890_vsys_get_voltage(struct regulator_dev *rdev) +{ + struct bq25890_device *bq = rdev_get_drvdata(rdev); + int ret; + + /* Should be some output voltage ? */ + ret = bq25890_field_read(bq, F_SYSV); /* read measured value */ + if (ret < 0) + return ret; + + /* converted_val = 2.304V + ADC_val * 20mV (table 10.3.15) */ + return 2304000 + ret * 20000; +} + static const struct regulator_ops bq25890_vbus_ops = { .enable = bq25890_vbus_enable, .disable = bq25890_vbus_disable, .is_enabled = bq25890_vbus_is_enabled, + .get_voltage = bq25890_vbus_get_voltage, }; static const struct regulator_desc bq25890_vbus_desc = { @@ -1046,9 +1129,54 @@ static const struct regulator_desc bq25890_vbus_desc = { .type = REGULATOR_VOLTAGE, .owner = THIS_MODULE, .ops = &bq25890_vbus_ops, - .fixed_uV = 5000000, - .n_voltages = 1, }; + +static const struct regulator_ops bq25890_vsys_ops = { + .get_voltage = bq25890_vsys_get_voltage, +}; + +static const struct regulator_desc bq25890_vsys_desc = { + .name = "vsys", + .of_match = "vsys", + .type = REGULATOR_VOLTAGE, + .owner = THIS_MODULE, + .ops = &bq25890_vsys_ops, +}; + +static int bq25890_register_regulator(struct bq25890_device *bq) +{ + struct bq25890_platform_data *pdata = dev_get_platdata(bq->dev); + struct regulator_config cfg = { + .dev = bq->dev, + .driver_data = bq, + }; + struct regulator_dev *reg; + + if (pdata) + cfg.init_data = pdata->regulator_init_data; + + reg = devm_regulator_register(bq->dev, &bq25890_vbus_desc, &cfg); + if (IS_ERR(reg)) { + return dev_err_probe(bq->dev, PTR_ERR(reg), + "registering vbus regulator"); + } + + /* pdata->regulator_init_data is for vbus only */ + cfg.init_data = NULL; + reg = devm_regulator_register(bq->dev, &bq25890_vsys_desc, &cfg); + if (IS_ERR(reg)) { + return dev_err_probe(bq->dev, PTR_ERR(reg), + "registering vsys regulator"); + } + + return 0; +} +#else +static inline int +bq25890_register_regulator(struct bq25890_device *bq) +{ + return 0; +} #endif static int bq25890_get_chip_version(struct bq25890_device *bq) @@ -1189,8 +1317,14 @@ static int bq25890_fw_probe(struct bq25890_device *bq) return 0; } -static int bq25890_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static void bq25890_non_devm_cleanup(void *data) +{ + struct bq25890_device *bq = data; + + cancel_delayed_work_sync(&bq->pump_express_work); +} + +static int bq25890_probe(struct i2c_client *client) { struct device *dev = &client->dev; struct bq25890_device *bq; @@ -1244,56 +1378,47 @@ static int bq25890_probe(struct i2c_client *client, /* OTG reporting */ bq->usb_phy = devm_usb_get_phy(dev, USB_PHY_TYPE_USB2); - if (!IS_ERR_OR_NULL(bq->usb_phy)) { - INIT_WORK(&bq->usb_work, bq25890_usb_work); - bq->usb_nb.notifier_call = bq25890_usb_notifier; - usb_register_notifier(bq->usb_phy, &bq->usb_nb); - } -#ifdef CONFIG_REGULATOR - else { - struct bq25890_platform_data *pdata = dev_get_platdata(dev); - struct regulator_config cfg = { }; - struct regulator_dev *reg; - - cfg.dev = dev; - cfg.driver_data = bq; - if (pdata) - cfg.init_data = pdata->regulator_init_data; - - reg = devm_regulator_register(dev, &bq25890_vbus_desc, &cfg); - if (IS_ERR(reg)) - return dev_err_probe(dev, PTR_ERR(reg), "registering regulator"); - } -#endif + + /* + * This must be before bq25890_power_supply_init(), so that it runs + * after devm unregisters the power_supply. + */ + ret = devm_add_action_or_reset(dev, bq25890_non_devm_cleanup, bq); + if (ret) + return ret; + + ret = bq25890_register_regulator(bq); + if (ret) + return ret; ret = bq25890_power_supply_init(bq); - if (ret < 0) { - dev_err(dev, "Failed to register power supply\n"); - goto err_unregister_usb_notifier; - } + if (ret < 0) + return dev_err_probe(dev, ret, "registering power supply\n"); ret = devm_request_threaded_irq(dev, client->irq, NULL, bq25890_irq_handler_thread, IRQF_TRIGGER_FALLING | IRQF_ONESHOT, BQ25890_IRQ_PIN, bq); if (ret) - goto err_unregister_usb_notifier; - - return 0; + return ret; -err_unregister_usb_notifier: - if (!IS_ERR_OR_NULL(bq->usb_phy)) - usb_unregister_notifier(bq->usb_phy, &bq->usb_nb); + if (!IS_ERR_OR_NULL(bq->usb_phy)) { + INIT_WORK(&bq->usb_work, bq25890_usb_work); + bq->usb_nb.notifier_call = bq25890_usb_notifier; + usb_register_notifier(bq->usb_phy, &bq->usb_nb); + } - return ret; + return 0; } static void bq25890_remove(struct i2c_client *client) { struct bq25890_device *bq = i2c_get_clientdata(client); - if (!IS_ERR_OR_NULL(bq->usb_phy)) + if (!IS_ERR_OR_NULL(bq->usb_phy)) { usb_unregister_notifier(bq->usb_phy, &bq->usb_nb); + cancel_work_sync(&bq->usb_work); + } if (!bq->skip_reset) { /* reset all registers to default values */ @@ -1400,7 +1525,7 @@ static struct i2c_driver bq25890_driver = { .acpi_match_table = ACPI_PTR(bq25890_acpi_match), .pm = &bq25890_pm, }, - .probe = bq25890_probe, + .probe_new = bq25890_probe, .remove = bq25890_remove, .shutdown = bq25890_shutdown, .id_table = bq25890_i2c_ids, diff --git a/drivers/power/supply/bq25980_charger.c b/drivers/power/supply/bq25980_charger.c index 9339f5649282786fbf3628b7fbb16ab6ebbbc5f6..a59d9762bc91684aa73fa3bd49d40bb5c7fa0eb1 100644 --- a/drivers/power/supply/bq25980_charger.c +++ b/drivers/power/supply/bq25980_charger.c @@ -1207,9 +1207,9 @@ static int bq25980_parse_dt(struct bq25980_device *bq) return 0; } -static int bq25980_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int bq25980_probe(struct i2c_client *client) { + const struct i2c_device_id *id = i2c_client_get_device_id(client); struct device *dev = &client->dev; struct bq25980_device *bq; int ret; @@ -1287,7 +1287,7 @@ static struct i2c_driver bq25980_driver = { .name = "bq25980-charger", .of_match_table = bq25980_of_match, }, - .probe = bq25980_probe, + .probe_new = bq25980_probe, .id_table = bq25980_i2c_ids, }; module_i2c_driver(bq25980_driver); diff --git a/drivers/power/supply/bq27xxx_battery_i2c.c b/drivers/power/supply/bq27xxx_battery_i2c.c index 94b00bb89c177fe80df9c7a8465a10419192e744..f8768997333bc59412d040cee7427efad086a7f2 100644 --- a/drivers/power/supply/bq27xxx_battery_i2c.c +++ b/drivers/power/supply/bq27xxx_battery_i2c.c @@ -136,9 +136,9 @@ static int bq27xxx_battery_i2c_bulk_write(struct bq27xxx_device_info *di, return 0; } -static int bq27xxx_battery_i2c_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int bq27xxx_battery_i2c_probe(struct i2c_client *client) { + const struct i2c_device_id *id = i2c_client_get_device_id(client); struct bq27xxx_device_info *di; int ret; char *name; @@ -295,7 +295,7 @@ static struct i2c_driver bq27xxx_battery_i2c_driver = { .name = "bq27xxx-battery", .of_match_table = of_match_ptr(bq27xxx_battery_i2c_of_match_table), }, - .probe = bq27xxx_battery_i2c_probe, + .probe_new = bq27xxx_battery_i2c_probe, .remove = bq27xxx_battery_i2c_remove, .id_table = bq27xxx_i2c_id_table, }; diff --git a/drivers/power/supply/cw2015_battery.c b/drivers/power/supply/cw2015_battery.c index 6d52641151d9ac4a1eb9dcade7ce1f1e32eb0d08..473522b4326adc5f84aebfbf8962e2b7e2ef1089 100644 --- a/drivers/power/supply/cw2015_battery.c +++ b/drivers/power/supply/cw2015_battery.c @@ -699,6 +699,9 @@ static int cw_bat_probe(struct i2c_client *client) } cw_bat->battery_workqueue = create_singlethread_workqueue("rk_battery"); + if (!cw_bat->battery_workqueue) + return -ENOMEM; + devm_delayed_work_autocancel(&client->dev, &cw_bat->battery_delay_work, cw_bat_work); queue_delayed_work(cw_bat->battery_workqueue, diff --git a/drivers/power/supply/ds2782_battery.c b/drivers/power/supply/ds2782_battery.c index d78cd05402f66cafe86fd6b5b6cd9e05ac614385..9b9619246902c66e5541302b19e7693a558155c6 100644 --- a/drivers/power/supply/ds2782_battery.c +++ b/drivers/power/supply/ds2782_battery.c @@ -368,9 +368,9 @@ static const struct ds278x_battery_ops ds278x_ops[] = { } }; -static int ds278x_battery_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int ds278x_battery_probe(struct i2c_client *client) { + const struct i2c_device_id *id = i2c_client_get_device_id(client); struct ds278x_platform_data *pdata = client->dev.platform_data; struct power_supply_config psy_cfg = {}; struct ds278x_info *info; @@ -458,7 +458,7 @@ static struct i2c_driver ds278x_battery_driver = { .name = "ds2782-battery", .pm = &ds278x_battery_pm_ops, }, - .probe = ds278x_battery_probe, + .probe_new = ds278x_battery_probe, .remove = ds278x_battery_remove, .id_table = ds278x_id, }; diff --git a/drivers/power/supply/lp8727_charger.c b/drivers/power/supply/lp8727_charger.c index 384a374b52c1955d35a0e24d6206e93e66408ff0..e6c21377d53c2e576af1404b36a5a0d7e074e3d3 100644 --- a/drivers/power/supply/lp8727_charger.c +++ b/drivers/power/supply/lp8727_charger.c @@ -540,7 +540,7 @@ static struct lp8727_platform_data *lp8727_parse_dt(struct device *dev) } #endif -static int lp8727_probe(struct i2c_client *cl, const struct i2c_device_id *id) +static int lp8727_probe(struct i2c_client *cl) { struct lp8727_chg *pchg; struct lp8727_platform_data *pdata; @@ -615,7 +615,7 @@ static struct i2c_driver lp8727_driver = { .name = "lp8727", .of_match_table = of_match_ptr(lp8727_dt_ids), }, - .probe = lp8727_probe, + .probe_new = lp8727_probe, .remove = lp8727_remove, .id_table = lp8727_ids, }; diff --git a/drivers/power/supply/lp8788-charger.c b/drivers/power/supply/lp8788-charger.c index 56c57529c2287ffc09d58a5c320fdb642e9fe29a..f5f47a0aa1e3d5c7a22f83e5c8b7439b7b9c6185 100644 --- a/drivers/power/supply/lp8788-charger.c +++ b/drivers/power/supply/lp8788-charger.c @@ -520,7 +520,7 @@ err_free_irq: static int lp8788_irq_register(struct platform_device *pdev, struct lp8788_charger *pchg) { - const char *name[] = { + static const char * const name[] = { LP8788_CHG_IRQ, LP8788_PRSW_IRQ, LP8788_BATT_IRQ }; int i; diff --git a/drivers/power/supply/ltc2941-battery-gauge.c b/drivers/power/supply/ltc2941-battery-gauge.c index 657305214d68dcc150b1eec03c1a47ecd298c13d..d3fb42825983939fdd9335a6f0fc0174f43662a4 100644 --- a/drivers/power/supply/ltc2941-battery-gauge.c +++ b/drivers/power/supply/ltc2941-battery-gauge.c @@ -439,8 +439,7 @@ static enum power_supply_property ltc294x_properties[] = { POWER_SUPPLY_PROP_CURRENT_NOW, }; -static int ltc294x_i2c_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int ltc294x_i2c_probe(struct i2c_client *client) { struct power_supply_config psy_cfg = {}; struct ltc294x_info *info; @@ -636,7 +635,7 @@ static struct i2c_driver ltc294x_driver = { .of_match_table = ltc294x_i2c_of_match, .pm = LTC294X_PM_OPS, }, - .probe = ltc294x_i2c_probe, + .probe_new = ltc294x_i2c_probe, .shutdown = ltc294x_i2c_shutdown, .id_table = ltc294x_i2c_id, }; diff --git a/drivers/power/supply/ltc4162-l-charger.c b/drivers/power/supply/ltc4162-l-charger.c index 1a5cb4405ee3ca76e29a4e316473309cf2200071..db2bb523357052e0f367d5b64615fba578165e7b 100644 --- a/drivers/power/supply/ltc4162-l-charger.c +++ b/drivers/power/supply/ltc4162-l-charger.c @@ -819,8 +819,7 @@ static void ltc4162l_clear_interrupts(struct ltc4162l_info *info) regmap_write(info->regmap, LTC4162L_CHARGE_STATUS_ALERTS_REG, 0); } -static int ltc4162l_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int ltc4162l_probe(struct i2c_client *client) { struct i2c_adapter *adapter = client->adapter; struct device *dev = &client->dev; @@ -916,7 +915,7 @@ static const struct of_device_id ltc4162l_of_match[] = { MODULE_DEVICE_TABLE(of, ltc4162l_of_match); static struct i2c_driver ltc4162l_driver = { - .probe = ltc4162l_probe, + .probe_new = ltc4162l_probe, .alert = ltc4162l_alert, .id_table = ltc4162l_i2c_id_table, .driver = { diff --git a/drivers/power/supply/max14656_charger_detector.c b/drivers/power/supply/max14656_charger_detector.c index fc36828895bf66996eb76a9b39fd2734c111e5f2..0d0180fcfa63bb8918432bd10df331df2e8d1070 100644 --- a/drivers/power/supply/max14656_charger_detector.c +++ b/drivers/power/supply/max14656_charger_detector.c @@ -234,8 +234,7 @@ static enum power_supply_property max14656_battery_props[] = { POWER_SUPPLY_PROP_MANUFACTURER, }; -static int max14656_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int max14656_probe(struct i2c_client *client) { struct i2c_adapter *adapter = client->adapter; struct device *dev = &client->dev; @@ -317,7 +316,7 @@ static struct i2c_driver max14656_i2c_driver = { .name = "max14656", .of_match_table = max14656_match_table, }, - .probe = max14656_probe, + .probe_new = max14656_probe, .id_table = max14656_id, }; module_i2c_driver(max14656_i2c_driver); diff --git a/drivers/power/supply/max17040_battery.c b/drivers/power/supply/max17040_battery.c index a9aef1e8b186e956cdd17e36c81172f44213d7c1..d1075959dd466b402730e0e16e04d9662fb65631 100644 --- a/drivers/power/supply/max17040_battery.c +++ b/drivers/power/supply/max17040_battery.c @@ -430,9 +430,9 @@ static const struct power_supply_desc max17040_battery_desc = { .num_properties = ARRAY_SIZE(max17040_battery_props), }; -static int max17040_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int max17040_probe(struct i2c_client *client) { + const struct i2c_device_id *id = i2c_client_get_device_id(client); struct i2c_adapter *adapter = client->adapter; struct power_supply_config psy_cfg = {}; struct max17040_chip *chip; @@ -599,7 +599,7 @@ static struct i2c_driver max17040_i2c_driver = { .of_match_table = max17040_of_match, .pm = MAX17040_PM_OPS, }, - .probe = max17040_probe, + .probe_new = max17040_probe, .id_table = max17040_id, }; module_i2c_driver(max17040_i2c_driver); diff --git a/drivers/power/supply/max17042_battery.c b/drivers/power/supply/max17042_battery.c index ab031bbfbe785b8ebec1ea48a264c8a6e569012e..89cabe8ed3b06edc9b3c755ec03f43ea12a5ee8c 100644 --- a/drivers/power/supply/max17042_battery.c +++ b/drivers/power/supply/max17042_battery.c @@ -1031,9 +1031,9 @@ static const struct power_supply_desc max17042_no_current_sense_psy_desc = { .num_properties = ARRAY_SIZE(max17042_battery_props) - 2, }; -static int max17042_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int max17042_probe(struct i2c_client *client) { + const struct i2c_device_id *id = i2c_client_get_device_id(client); struct i2c_adapter *adapter = client->adapter; const struct power_supply_desc *max17042_desc = &max17042_psy_desc; struct power_supply_config psy_cfg = {}; @@ -1220,7 +1220,7 @@ static struct i2c_driver max17042_i2c_driver = { .of_match_table = of_match_ptr(max17042_dt_match), .pm = &max17042_pm_ops, }, - .probe = max17042_probe, + .probe_new = max17042_probe, .id_table = max17042_id, }; module_i2c_driver(max17042_i2c_driver); diff --git a/drivers/power/supply/mt6360_charger.c b/drivers/power/supply/mt6360_charger.c index 3abaa72e06683c2a2eba447695f2d026aea6b04a..92e48e3a485364dd154e8489398488494215874c 100644 --- a/drivers/power/supply/mt6360_charger.c +++ b/drivers/power/supply/mt6360_charger.c @@ -113,16 +113,13 @@ enum { MT6360_RANGE_MAX, }; -#define MT6360_LINEAR_RANGE(idx, _min, _min_sel, _max_sel, _step) \ - [idx] = REGULATOR_LINEAR_RANGE(_min, _min_sel, _max_sel, _step) - static const struct linear_range mt6360_chg_range[MT6360_RANGE_MAX] = { - MT6360_LINEAR_RANGE(MT6360_RANGE_VMIVR, 3900000, 0, 0x5F, 100000), - MT6360_LINEAR_RANGE(MT6360_RANGE_ICHG, 100000, 0, 0x31, 100000), - MT6360_LINEAR_RANGE(MT6360_RANGE_VOREG, 3900000, 0, 0x51, 10000), - MT6360_LINEAR_RANGE(MT6360_RANGE_AICR, 100000, 0, 0x3F, 50000), - MT6360_LINEAR_RANGE(MT6360_RANGE_IPREC, 100000, 0, 0x0F, 50000), - MT6360_LINEAR_RANGE(MT6360_RANGE_IEOC, 100000, 0, 0x0F, 50000), + LINEAR_RANGE_IDX(MT6360_RANGE_VMIVR, 3900000, 0, 0x5F, 100000), + LINEAR_RANGE_IDX(MT6360_RANGE_ICHG, 100000, 0, 0x31, 100000), + LINEAR_RANGE_IDX(MT6360_RANGE_VOREG, 3900000, 0, 0x51, 10000), + LINEAR_RANGE_IDX(MT6360_RANGE_AICR, 100000, 0, 0x3F, 50000), + LINEAR_RANGE_IDX(MT6360_RANGE_IPREC, 100000, 0, 0x0F, 50000), + LINEAR_RANGE_IDX(MT6360_RANGE_IEOC, 100000, 0, 0x0F, 50000), }; struct mt6360_chg_info { diff --git a/drivers/power/supply/power_supply.h b/drivers/power/supply/power_supply.h index c310d4f36c105713c3ccb9e5c57a0f074de74973..645eee4d6b6aebb709ce327bda6d5b6541bff26d 100644 --- a/drivers/power/supply/power_supply.h +++ b/drivers/power/supply/power_supply.h @@ -16,7 +16,7 @@ struct power_supply; #ifdef CONFIG_SYSFS extern void power_supply_init_attrs(struct device_type *dev_type); -extern int power_supply_uevent(struct device *dev, struct kobj_uevent_env *env); +extern int power_supply_uevent(const struct device *dev, struct kobj_uevent_env *env); #else diff --git a/drivers/power/supply/power_supply_core.c b/drivers/power/supply/power_supply_core.c index 4b5fb172fa994baa499d63a34e6f7734ca6f0484..7c790c41e2fe31a7b2ff9807c5b0dec5a685b5e4 100644 --- a/drivers/power/supply/power_supply_core.c +++ b/drivers/power/supply/power_supply_core.c @@ -750,6 +750,11 @@ int power_supply_get_battery_info(struct power_supply *psy, int i, tab_len, size; propname = kasprintf(GFP_KERNEL, "ocv-capacity-table-%d", index); + if (!propname) { + power_supply_put_battery_info(psy, info); + err = -ENOMEM; + goto out_put_node; + } list = of_get_property(battery_np, propname, &size); if (!list || !size) { dev_err(&psy->dev, "failed to get %s\n", propname); @@ -870,7 +875,6 @@ EXPORT_SYMBOL_GPL(power_supply_temp2resist_simple); * power_supply_vbat2ri() - find the battery internal resistance * from the battery voltage * @info: The battery information container - * @table: Pointer to battery resistance temperature table * @vbat_uv: The battery voltage in microvolt * @charging: If we are charging (true) or not (false) * @@ -1387,8 +1391,8 @@ create_triggers_failed: register_cooler_failed: psy_unregister_thermal(psy); register_thermal_failed: - device_del(dev); wakeup_init_failed: + device_del(dev); device_add_failed: check_supplies_failed: dev_set_name_failed: diff --git a/drivers/power/supply/power_supply_sysfs.c b/drivers/power/supply/power_supply_sysfs.c index 5369abaceb5ccb46072502fbce7a12d8ed5aa250..6ca7d3985a40685133987b9c12fcdd9ffc12e705 100644 --- a/drivers/power/supply/power_supply_sysfs.c +++ b/drivers/power/supply/power_supply_sysfs.c @@ -427,7 +427,7 @@ void power_supply_init_attrs(struct device_type *dev_type) } } -static int add_prop_uevent(struct device *dev, struct kobj_uevent_env *env, +static int add_prop_uevent(const struct device *dev, struct kobj_uevent_env *env, enum power_supply_property prop, char *prop_buf) { int ret = 0; @@ -438,7 +438,7 @@ static int add_prop_uevent(struct device *dev, struct kobj_uevent_env *env, pwr_attr = &power_supply_attrs[prop]; dev_attr = &pwr_attr->dev_attr; - ret = power_supply_show_property(dev, dev_attr, prop_buf); + ret = power_supply_show_property((struct device *)dev, dev_attr, prop_buf); if (ret == -ENODEV || ret == -ENODATA) { /* * When a battery is absent, we expect -ENODEV. Don't abort; @@ -458,9 +458,9 @@ static int add_prop_uevent(struct device *dev, struct kobj_uevent_env *env, pwr_attr->prop_name, prop_buf); } -int power_supply_uevent(struct device *dev, struct kobj_uevent_env *env) +int power_supply_uevent(const struct device *dev, struct kobj_uevent_env *env) { - struct power_supply *psy = dev_get_drvdata(dev); + const struct power_supply *psy = dev_get_drvdata(dev); int ret = 0, j; char *prop_buf; diff --git a/drivers/power/supply/rk817_charger.c b/drivers/power/supply/rk817_charger.c index f20a6ac584ccdb531be616d36b2588d15c4abe10..4f9c1c417916550d76706253347acc7e64d18f3d 100644 --- a/drivers/power/supply/rk817_charger.c +++ b/drivers/power/supply/rk817_charger.c @@ -1060,8 +1060,10 @@ static int rk817_charger_probe(struct platform_device *pdev) return -ENODEV; charger = devm_kzalloc(&pdev->dev, sizeof(*charger), GFP_KERNEL); - if (!charger) + if (!charger) { + of_node_put(node); return -ENOMEM; + } charger->rk808 = rk808; diff --git a/drivers/power/supply/rt5033_battery.c b/drivers/power/supply/rt5033_battery.c index 736dec608ff6d9e66c2cbd4b776a26faed46d5f4..5c04cf30521931c74d52a39cabd502553011faf0 100644 --- a/drivers/power/supply/rt5033_battery.c +++ b/drivers/power/supply/rt5033_battery.c @@ -112,8 +112,7 @@ static const struct power_supply_desc rt5033_battery_desc = { .num_properties = ARRAY_SIZE(rt5033_battery_props), }; -static int rt5033_battery_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int rt5033_battery_probe(struct i2c_client *client) { struct i2c_adapter *adapter = client->adapter; struct power_supply_config psy_cfg = {}; @@ -173,7 +172,7 @@ static struct i2c_driver rt5033_battery_driver = { .name = "rt5033-battery", .of_match_table = rt5033_battery_of_match, }, - .probe = rt5033_battery_probe, + .probe_new = rt5033_battery_probe, .remove = rt5033_battery_remove, .id_table = rt5033_battery_id, }; diff --git a/drivers/power/supply/rt9455_charger.c b/drivers/power/supply/rt9455_charger.c index 72962286d70459eee3c4f3e35ceda5e823c45796..31fb6526a1fdf129b2ce01b8104f3bc512034771 100644 --- a/drivers/power/supply/rt9455_charger.c +++ b/drivers/power/supply/rt9455_charger.c @@ -1581,8 +1581,7 @@ static const struct regmap_config rt9455_regmap_config = { .cache_type = REGCACHE_RBTREE, }; -static int rt9455_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int rt9455_probe(struct i2c_client *client) { struct i2c_adapter *adapter = client->adapter; struct device *dev = &client->dev; @@ -1738,7 +1737,7 @@ MODULE_DEVICE_TABLE(acpi, rt9455_i2c_acpi_match); #endif static struct i2c_driver rt9455_driver = { - .probe = rt9455_probe, + .probe_new = rt9455_probe, .remove = rt9455_remove, .id_table = rt9455_i2c_id_table, .driver = { diff --git a/drivers/power/supply/sbs-charger.c b/drivers/power/supply/sbs-charger.c index b08f7d0c4181599d3e25e84ef920d633b6bc339f..75ebcbf0a7883750b009ef21a6bd10692f138b1a 100644 --- a/drivers/power/supply/sbs-charger.c +++ b/drivers/power/supply/sbs-charger.c @@ -162,8 +162,7 @@ static const struct power_supply_desc sbs_desc = { .get_property = sbs_get_property, }; -static int sbs_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int sbs_probe(struct i2c_client *client) { struct power_supply_config psy_cfg = {}; struct sbs_info *chip; @@ -241,7 +240,7 @@ static const struct i2c_device_id sbs_id[] = { MODULE_DEVICE_TABLE(i2c, sbs_id); static struct i2c_driver sbs_driver = { - .probe = sbs_probe, + .probe_new = sbs_probe, .id_table = sbs_id, .driver = { .name = "sbs-charger", diff --git a/drivers/power/supply/sbs-manager.c b/drivers/power/supply/sbs-manager.c index 71ec8f74f83596391a86cbd5e43af3879d479c19..bde977391fd4c172304e08c9f2430b3d9c97042d 100644 --- a/drivers/power/supply/sbs-manager.c +++ b/drivers/power/supply/sbs-manager.c @@ -315,9 +315,9 @@ static void sbsm_del_mux_adapter(void *data) i2c_mux_del_adapters(sbsm->muxc); } -static int sbsm_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int sbsm_probe(struct i2c_client *client) { + const struct i2c_device_id *id = i2c_client_get_device_id(client); struct i2c_adapter *adapter = client->adapter; struct sbsm_data *data; struct device *dev = &client->dev; @@ -409,7 +409,7 @@ static struct i2c_driver sbsm_driver = { .name = "sbsm", .of_match_table = of_match_ptr(sbsm_dt_ids), }, - .probe = sbsm_probe, + .probe_new = sbsm_probe, .alert = sbsm_alert, .id_table = sbsm_ids }; diff --git a/drivers/power/supply/smb347-charger.c b/drivers/power/supply/smb347-charger.c index 996a82f8a2a1dd11a9c6e2dd809355f12bc6f981..b5f0383102824ebfface63580aae6e9f722801f5 100644 --- a/drivers/power/supply/smb347-charger.c +++ b/drivers/power/supply/smb347-charger.c @@ -1528,9 +1528,9 @@ static const struct regulator_desc smb347_usb_vbus_regulator_desc = { .n_voltages = 1, }; -static int smb347_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int smb347_probe(struct i2c_client *client) { + const struct i2c_device_id *id = i2c_client_get_device_id(client); struct power_supply_config mains_usb_cfg = {}; struct regulator_config usb_rdev_cfg = {}; struct device *dev = &client->dev; @@ -1629,7 +1629,7 @@ static struct i2c_driver smb347_driver = { .name = "smb347", .of_match_table = smb3xx_of_match, }, - .probe = smb347_probe, + .probe_new = smb347_probe, .remove = smb347_remove, .shutdown = smb347_shutdown, .id_table = smb347_id, diff --git a/drivers/power/supply/ucs1002_power.c b/drivers/power/supply/ucs1002_power.c index ef673ec3db568a16c4ff99278cde344fe209e45b..836d44c9fb74f75cddafc5472d256ef7025cfb69 100644 --- a/drivers/power/supply/ucs1002_power.c +++ b/drivers/power/supply/ucs1002_power.c @@ -532,8 +532,7 @@ static const struct regulator_desc ucs1002_regulator_descriptor = { .n_voltages = 1, }; -static int ucs1002_probe(struct i2c_client *client, - const struct i2c_device_id *dev_id) +static int ucs1002_probe(struct i2c_client *client) { struct device *dev = &client->dev; struct power_supply_config charger_config = {}; @@ -681,7 +680,7 @@ static struct i2c_driver ucs1002_driver = { .name = "ucs1002", .of_match_table = ucs1002_of_match, }, - .probe = ucs1002_probe, + .probe_new = ucs1002_probe, }; module_i2c_driver(ucs1002_driver); diff --git a/drivers/power/supply/z2_battery.c b/drivers/power/supply/z2_battery.c index 1897c29848600b6c214732f927295ac191961f21..0ba4a590a0a5fc529edf2dee7847a45f7f1e807a 100644 --- a/drivers/power/supply/z2_battery.c +++ b/drivers/power/supply/z2_battery.c @@ -176,8 +176,7 @@ static int z2_batt_ps_init(struct z2_charger *charger, int props) return 0; } -static int z2_batt_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int z2_batt_probe(struct i2c_client *client) { int ret = 0; int props = 1; /* POWER_SUPPLY_PROP_PRESENT */ @@ -206,10 +205,12 @@ static int z2_batt_probe(struct i2c_client *client, charger->charge_gpiod = devm_gpiod_get_optional(&client->dev, NULL, GPIOD_IN); - if (IS_ERR(charger->charge_gpiod)) - return dev_err_probe(&client->dev, + if (IS_ERR(charger->charge_gpiod)) { + ret = dev_err_probe(&client->dev, PTR_ERR(charger->charge_gpiod), "failed to get charge GPIO\n"); + goto err; + } if (charger->charge_gpiod) { gpiod_set_consumer_name(charger->charge_gpiod, "BATT CHRG"); @@ -306,7 +307,7 @@ static struct i2c_driver z2_batt_driver = { .name = "z2-battery", .pm = Z2_BATTERY_PM_OPS }, - .probe = z2_batt_probe, + .probe_new = z2_batt_probe, .remove = z2_batt_remove, .id_table = z2_batt_id, }; diff --git a/drivers/ps3/ps3-lpm.c b/drivers/ps3/ps3-lpm.c index 65512b6cc6fdcd2eaeabb0fdac41e676815f4e0e..200ad8751860a6f4f32ae937cf3161440a579d1b 100644 --- a/drivers/ps3/ps3-lpm.c +++ b/drivers/ps3/ps3-lpm.c @@ -1066,7 +1066,7 @@ EXPORT_SYMBOL_GPL(ps3_disable_pm_interrupts); * instance, specified by one of enum ps3_lpm_tb_type. * @tb_cache: Optional user supplied buffer to use as the trace buffer cache. * If NULL, the driver will allocate and manage an internal buffer. - * Unused when when @tb_type is PS3_LPM_TB_TYPE_NONE. + * Unused when @tb_type is PS3_LPM_TB_TYPE_NONE. * @tb_cache_size: The size in bytes of the user supplied @tb_cache buffer. * Unused when @tb_cache is NULL or @tb_type is PS3_LPM_TB_TYPE_NONE. */ diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig index bb63edb507da4b7b255b33744444cf219f10054c..2bb640d1521d8f4c1cb4b715d66e0b75692ca099 100644 --- a/drivers/rtc/Kconfig +++ b/drivers/rtc/Kconfig @@ -433,7 +433,7 @@ config RTC_DRV_ISL12022 config RTC_DRV_ISL12026 tristate "Intersil ISL12026" - depends on OF || COMPILE_TEST + depends on OF help If you say yes here you get support for the Intersil ISL12026 RTC chip. @@ -1351,16 +1351,6 @@ config RTC_DRV_ASM9260 This driver can also be built as a module. If so, the module will be called rtc-asm9260. -config RTC_DRV_DAVINCI - tristate "TI DaVinci RTC" - depends on ARCH_DAVINCI_DM365 || COMPILE_TEST - help - If you say yes here you get support for the RTC on the - DaVinci platforms (DM365). - - This driver can also be built as a module. If so, the module - will be called rtc-davinci. - config RTC_DRV_DIGICOLOR tristate "Conexant Digicolor RTC" depends on ARCH_DIGICOLOR || COMPILE_TEST diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile index aab22bc634321019dc6b214387f795938f866226..791994eb913d99eb88901fd9c24555785b5e799b 100644 --- a/drivers/rtc/Makefile +++ b/drivers/rtc/Makefile @@ -44,7 +44,6 @@ obj-$(CONFIG_RTC_DRV_CROS_EC) += rtc-cros-ec.o obj-$(CONFIG_RTC_DRV_DA9052) += rtc-da9052.o obj-$(CONFIG_RTC_DRV_DA9055) += rtc-da9055.o obj-$(CONFIG_RTC_DRV_DA9063) += rtc-da9063.o -obj-$(CONFIG_RTC_DRV_DAVINCI) += rtc-davinci.o obj-$(CONFIG_RTC_DRV_DIGICOLOR) += rtc-digicolor.o obj-$(CONFIG_RTC_DRV_DM355EVM) += rtc-dm355evm.o obj-$(CONFIG_RTC_DRV_DS1216) += rtc-ds1216.o diff --git a/drivers/rtc/class.c b/drivers/rtc/class.c index e48223c00c672eb37aa0132f319e52765c7ded02..e5b7b48cffac06f5643ebd5b2b06a41ff46e61b9 100644 --- a/drivers/rtc/class.c +++ b/drivers/rtc/class.c @@ -374,11 +374,11 @@ struct rtc_device *devm_rtc_allocate_device(struct device *dev) rtc->id = id; rtc->dev.parent = dev; - err = dev_set_name(&rtc->dev, "rtc%d", id); + err = devm_add_action_or_reset(dev, devm_rtc_release_device, rtc); if (err) return ERR_PTR(err); - err = devm_add_action_or_reset(dev, devm_rtc_release_device, rtc); + err = dev_set_name(&rtc->dev, "rtc%d", id); if (err) return ERR_PTR(err); diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c index 9edd662c69ace4da09ef528e6b8216114987be9e..7c30cb3c764d83c9c85c16182874182709a81635 100644 --- a/drivers/rtc/interface.c +++ b/drivers/rtc/interface.c @@ -256,7 +256,7 @@ int __rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm) * * This could all instead be done in the lower level driver, * but since more than one lower level RTC implementation needs it, - * then it's probably best best to do it here instead of there.. + * then it's probably best to do it here instead of there.. */ /* Get the "before" timestamp */ diff --git a/drivers/rtc/rtc-abx80x.c b/drivers/rtc/rtc-abx80x.c index 9b0138d07232d95018f68ed5e2b89bfbd7dc1027..2e0e6432901b844aeb301375d4252c7d8c5e4f66 100644 --- a/drivers/rtc/rtc-abx80x.c +++ b/drivers/rtc/rtc-abx80x.c @@ -12,6 +12,7 @@ #include #include +#include #include #include #include @@ -673,13 +674,28 @@ static int abx80x_setup_watchdog(struct abx80x_priv *priv) } #endif -static int abx80x_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static const struct i2c_device_id abx80x_id[] = { + { "abx80x", ABX80X }, + { "ab0801", AB0801 }, + { "ab0803", AB0803 }, + { "ab0804", AB0804 }, + { "ab0805", AB0805 }, + { "ab1801", AB1801 }, + { "ab1803", AB1803 }, + { "ab1804", AB1804 }, + { "ab1805", AB1805 }, + { "rv1805", RV1805 }, + { } +}; +MODULE_DEVICE_TABLE(i2c, abx80x_id); + +static int abx80x_probe(struct i2c_client *client) { struct device_node *np = client->dev.of_node; struct abx80x_priv *priv; int i, data, err, trickle_cfg = -EINVAL; char buf[7]; + const struct i2c_device_id *id = i2c_match_id(abx80x_id, client); unsigned int part = id->driver_data; unsigned int partnumber; unsigned int majrev, minrev; @@ -847,21 +863,6 @@ static int abx80x_probe(struct i2c_client *client, return devm_rtc_register_device(priv->rtc); } -static const struct i2c_device_id abx80x_id[] = { - { "abx80x", ABX80X }, - { "ab0801", AB0801 }, - { "ab0803", AB0803 }, - { "ab0804", AB0804 }, - { "ab0805", AB0805 }, - { "ab1801", AB1801 }, - { "ab1803", AB1803 }, - { "ab1804", AB1804 }, - { "ab1805", AB1805 }, - { "rv1805", RV1805 }, - { } -}; -MODULE_DEVICE_TABLE(i2c, abx80x_id); - #ifdef CONFIG_OF static const struct of_device_id abx80x_of_match[] = { { @@ -914,7 +915,7 @@ static struct i2c_driver abx80x_driver = { .name = "rtc-abx80x", .of_match_table = of_match_ptr(abx80x_of_match), }, - .probe = abx80x_probe, + .probe_new = abx80x_probe, .id_table = abx80x_id, }; diff --git a/drivers/rtc/rtc-at91rm9200.c b/drivers/rtc/rtc-at91rm9200.c index fe396d27ebb7108cbbac9eb2053e6a4281ddc056..e9d17232d0a8163e37e3ee5b5d51fd1a71fcc55a 100644 --- a/drivers/rtc/rtc-at91rm9200.c +++ b/drivers/rtc/rtc-at91rm9200.c @@ -130,7 +130,7 @@ static void at91_rtc_write_idr(u32 mask) * * Note that there is still a possibility that the mask is updated * before interrupts have actually been disabled in hardware. The only - * way to be certain would be to poll the IMR-register, which is is + * way to be certain would be to poll the IMR-register, which is * the very register we are trying to emulate. The register read back * is a reasonable heuristic. */ diff --git a/drivers/rtc/rtc-bq32k.c b/drivers/rtc/rtc-bq32k.c index 6d6a55efb9cc7ae6e14d872564f6c36d2ff49049..967ddc6bf76d68697d304c3b511d0f224d8056da 100644 --- a/drivers/rtc/rtc-bq32k.c +++ b/drivers/rtc/rtc-bq32k.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c index 58cc2bae2f8a04848c6666031ef76d219fb2af96..00e2ca7374ecfb2579ed14445434d9cd6c626fe5 100644 --- a/drivers/rtc/rtc-cmos.c +++ b/drivers/rtc/rtc-cmos.c @@ -744,6 +744,168 @@ static irqreturn_t cmos_interrupt(int irq, void *p) return IRQ_NONE; } +#ifdef CONFIG_ACPI + +#include + +static u32 rtc_handler(void *context) +{ + struct device *dev = context; + struct cmos_rtc *cmos = dev_get_drvdata(dev); + unsigned char rtc_control = 0; + unsigned char rtc_intr; + unsigned long flags; + + + /* + * Always update rtc irq when ACPI is used as RTC Alarm. + * Or else, ACPI SCI is enabled during suspend/resume only, + * update rtc irq in that case. + */ + if (cmos_use_acpi_alarm()) + cmos_interrupt(0, (void *)cmos->rtc); + else { + /* Fix me: can we use cmos_interrupt() here as well? */ + spin_lock_irqsave(&rtc_lock, flags); + if (cmos_rtc.suspend_ctrl) + rtc_control = CMOS_READ(RTC_CONTROL); + if (rtc_control & RTC_AIE) { + cmos_rtc.suspend_ctrl &= ~RTC_AIE; + CMOS_WRITE(rtc_control, RTC_CONTROL); + rtc_intr = CMOS_READ(RTC_INTR_FLAGS); + rtc_update_irq(cmos->rtc, 1, rtc_intr); + } + spin_unlock_irqrestore(&rtc_lock, flags); + } + + pm_wakeup_hard_event(dev); + acpi_clear_event(ACPI_EVENT_RTC); + acpi_disable_event(ACPI_EVENT_RTC, 0); + return ACPI_INTERRUPT_HANDLED; +} + +static void acpi_rtc_event_setup(struct device *dev) +{ + if (acpi_disabled) + return; + + acpi_install_fixed_event_handler(ACPI_EVENT_RTC, rtc_handler, dev); + /* + * After the RTC handler is installed, the Fixed_RTC event should + * be disabled. Only when the RTC alarm is set will it be enabled. + */ + acpi_clear_event(ACPI_EVENT_RTC); + acpi_disable_event(ACPI_EVENT_RTC, 0); +} + +static void acpi_rtc_event_cleanup(void) +{ + if (acpi_disabled) + return; + + acpi_remove_fixed_event_handler(ACPI_EVENT_RTC, rtc_handler); +} + +static void rtc_wake_on(struct device *dev) +{ + acpi_clear_event(ACPI_EVENT_RTC); + acpi_enable_event(ACPI_EVENT_RTC, 0); +} + +static void rtc_wake_off(struct device *dev) +{ + acpi_disable_event(ACPI_EVENT_RTC, 0); +} + +#ifdef CONFIG_X86 +/* Enable use_acpi_alarm mode for Intel platforms no earlier than 2015 */ +static void use_acpi_alarm_quirks(void) +{ + if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) + return; + + if (!is_hpet_enabled()) + return; + + if (dmi_get_bios_year() < 2015) + return; + + use_acpi_alarm = true; +} +#else +static inline void use_acpi_alarm_quirks(void) { } +#endif + +static void acpi_cmos_wake_setup(struct device *dev) +{ + if (acpi_disabled) + return; + + use_acpi_alarm_quirks(); + + cmos_rtc.wake_on = rtc_wake_on; + cmos_rtc.wake_off = rtc_wake_off; + + /* ACPI tables bug workaround. */ + if (acpi_gbl_FADT.month_alarm && !acpi_gbl_FADT.day_alarm) { + dev_dbg(dev, "bogus FADT month_alarm (%d)\n", + acpi_gbl_FADT.month_alarm); + acpi_gbl_FADT.month_alarm = 0; + } + + cmos_rtc.day_alrm = acpi_gbl_FADT.day_alarm; + cmos_rtc.mon_alrm = acpi_gbl_FADT.month_alarm; + cmos_rtc.century = acpi_gbl_FADT.century; + + if (acpi_gbl_FADT.flags & ACPI_FADT_S4_RTC_WAKE) + dev_info(dev, "RTC can wake from S4\n"); + + /* RTC always wakes from S1/S2/S3, and often S4/STD */ + device_init_wakeup(dev, 1); +} + +static void cmos_check_acpi_rtc_status(struct device *dev, + unsigned char *rtc_control) +{ + struct cmos_rtc *cmos = dev_get_drvdata(dev); + acpi_event_status rtc_status; + acpi_status status; + + if (acpi_gbl_FADT.flags & ACPI_FADT_FIXED_RTC) + return; + + status = acpi_get_event_status(ACPI_EVENT_RTC, &rtc_status); + if (ACPI_FAILURE(status)) { + dev_err(dev, "Could not get RTC status\n"); + } else if (rtc_status & ACPI_EVENT_FLAG_SET) { + unsigned char mask; + *rtc_control &= ~RTC_AIE; + CMOS_WRITE(*rtc_control, RTC_CONTROL); + mask = CMOS_READ(RTC_INTR_FLAGS); + rtc_update_irq(cmos->rtc, 1, mask); + } +} + +#else /* !CONFIG_ACPI */ + +static inline void acpi_rtc_event_setup(struct device *dev) +{ +} + +static inline void acpi_rtc_event_cleanup(void) +{ +} + +static inline void acpi_cmos_wake_setup(struct device *dev) +{ +} + +static inline void cmos_check_acpi_rtc_status(struct device *dev, + unsigned char *rtc_control) +{ +} +#endif /* CONFIG_ACPI */ + #ifdef CONFIG_PNP #define INITSECTION @@ -827,19 +989,27 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq) if (info->address_space) address_space = info->address_space; - if (info->rtc_day_alarm && info->rtc_day_alarm < 128) - cmos_rtc.day_alrm = info->rtc_day_alarm; - if (info->rtc_mon_alarm && info->rtc_mon_alarm < 128) - cmos_rtc.mon_alrm = info->rtc_mon_alarm; - if (info->rtc_century && info->rtc_century < 128) - cmos_rtc.century = info->rtc_century; + cmos_rtc.day_alrm = info->rtc_day_alarm; + cmos_rtc.mon_alrm = info->rtc_mon_alarm; + cmos_rtc.century = info->rtc_century; if (info->wake_on && info->wake_off) { cmos_rtc.wake_on = info->wake_on; cmos_rtc.wake_off = info->wake_off; } + } else { + acpi_cmos_wake_setup(dev); } + if (cmos_rtc.day_alrm >= 128) + cmos_rtc.day_alrm = 0; + + if (cmos_rtc.mon_alrm >= 128) + cmos_rtc.mon_alrm = 0; + + if (cmos_rtc.century >= 128) + cmos_rtc.century = 0; + cmos_rtc.dev = dev; dev_set_drvdata(dev, &cmos_rtc); @@ -928,6 +1098,13 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq) nvmem_cfg.size = address_space - NVRAM_OFFSET; devm_rtc_nvmem_register(cmos_rtc.rtc, &nvmem_cfg); + /* + * Everything has gone well so far, so by default register a handler for + * the ACPI RTC fixed event. + */ + if (!info) + acpi_rtc_event_setup(dev); + dev_info(dev, "%s%s, %d bytes nvram%s\n", !is_valid_irq(rtc_irq) ? "no alarms" : cmos_rtc.mon_alrm ? "alarms up to one year" : @@ -973,6 +1150,9 @@ static void cmos_do_remove(struct device *dev) hpet_unregister_irq_handler(cmos_interrupt); } + if (!dev_get_platdata(dev)) + acpi_rtc_event_cleanup(); + cmos->rtc = NULL; ports = cmos->iomem; @@ -1122,9 +1302,6 @@ static void cmos_check_wkalrm(struct device *dev) } } -static void cmos_check_acpi_rtc_status(struct device *dev, - unsigned char *rtc_control); - static int __maybe_unused cmos_resume(struct device *dev) { struct cmos_rtc *cmos = dev_get_drvdata(dev); @@ -1191,175 +1368,13 @@ static SIMPLE_DEV_PM_OPS(cmos_pm_ops, cmos_suspend, cmos_resume); * predate even PNPBIOS should set up platform_bus devices. */ -#ifdef CONFIG_ACPI - -#include - -static u32 rtc_handler(void *context) -{ - struct device *dev = context; - struct cmos_rtc *cmos = dev_get_drvdata(dev); - unsigned char rtc_control = 0; - unsigned char rtc_intr; - unsigned long flags; - - - /* - * Always update rtc irq when ACPI is used as RTC Alarm. - * Or else, ACPI SCI is enabled during suspend/resume only, - * update rtc irq in that case. - */ - if (cmos_use_acpi_alarm()) - cmos_interrupt(0, (void *)cmos->rtc); - else { - /* Fix me: can we use cmos_interrupt() here as well? */ - spin_lock_irqsave(&rtc_lock, flags); - if (cmos_rtc.suspend_ctrl) - rtc_control = CMOS_READ(RTC_CONTROL); - if (rtc_control & RTC_AIE) { - cmos_rtc.suspend_ctrl &= ~RTC_AIE; - CMOS_WRITE(rtc_control, RTC_CONTROL); - rtc_intr = CMOS_READ(RTC_INTR_FLAGS); - rtc_update_irq(cmos->rtc, 1, rtc_intr); - } - spin_unlock_irqrestore(&rtc_lock, flags); - } - - pm_wakeup_hard_event(dev); - acpi_clear_event(ACPI_EVENT_RTC); - acpi_disable_event(ACPI_EVENT_RTC, 0); - return ACPI_INTERRUPT_HANDLED; -} - -static inline void rtc_wake_setup(struct device *dev) -{ - if (acpi_disabled) - return; - - acpi_install_fixed_event_handler(ACPI_EVENT_RTC, rtc_handler, dev); - /* - * After the RTC handler is installed, the Fixed_RTC event should - * be disabled. Only when the RTC alarm is set will it be enabled. - */ - acpi_clear_event(ACPI_EVENT_RTC); - acpi_disable_event(ACPI_EVENT_RTC, 0); -} - -static void rtc_wake_on(struct device *dev) -{ - acpi_clear_event(ACPI_EVENT_RTC); - acpi_enable_event(ACPI_EVENT_RTC, 0); -} - -static void rtc_wake_off(struct device *dev) -{ - acpi_disable_event(ACPI_EVENT_RTC, 0); -} - -#ifdef CONFIG_X86 -/* Enable use_acpi_alarm mode for Intel platforms no earlier than 2015 */ -static void use_acpi_alarm_quirks(void) -{ - if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) - return; - - if (!is_hpet_enabled()) - return; - - if (dmi_get_bios_year() < 2015) - return; - - use_acpi_alarm = true; -} -#else -static inline void use_acpi_alarm_quirks(void) { } -#endif - -/* Every ACPI platform has a mc146818 compatible "cmos rtc". Here we find - * its device node and pass extra config data. This helps its driver use - * capabilities that the now-obsolete mc146818 didn't have, and informs it - * that this board's RTC is wakeup-capable (per ACPI spec). - */ -static struct cmos_rtc_board_info acpi_rtc_info; - -static void cmos_wake_setup(struct device *dev) -{ - if (acpi_disabled) - return; - - use_acpi_alarm_quirks(); - - acpi_rtc_info.wake_on = rtc_wake_on; - acpi_rtc_info.wake_off = rtc_wake_off; - - /* workaround bug in some ACPI tables */ - if (acpi_gbl_FADT.month_alarm && !acpi_gbl_FADT.day_alarm) { - dev_dbg(dev, "bogus FADT month_alarm (%d)\n", - acpi_gbl_FADT.month_alarm); - acpi_gbl_FADT.month_alarm = 0; - } - - acpi_rtc_info.rtc_day_alarm = acpi_gbl_FADT.day_alarm; - acpi_rtc_info.rtc_mon_alarm = acpi_gbl_FADT.month_alarm; - acpi_rtc_info.rtc_century = acpi_gbl_FADT.century; - - /* NOTE: S4_RTC_WAKE is NOT currently useful to Linux */ - if (acpi_gbl_FADT.flags & ACPI_FADT_S4_RTC_WAKE) - dev_info(dev, "RTC can wake from S4\n"); - - dev->platform_data = &acpi_rtc_info; - - /* RTC always wakes from S1/S2/S3, and often S4/STD */ - device_init_wakeup(dev, 1); -} - -static void cmos_check_acpi_rtc_status(struct device *dev, - unsigned char *rtc_control) -{ - struct cmos_rtc *cmos = dev_get_drvdata(dev); - acpi_event_status rtc_status; - acpi_status status; - - if (acpi_gbl_FADT.flags & ACPI_FADT_FIXED_RTC) - return; - - status = acpi_get_event_status(ACPI_EVENT_RTC, &rtc_status); - if (ACPI_FAILURE(status)) { - dev_err(dev, "Could not get RTC status\n"); - } else if (rtc_status & ACPI_EVENT_FLAG_SET) { - unsigned char mask; - *rtc_control &= ~RTC_AIE; - CMOS_WRITE(*rtc_control, RTC_CONTROL); - mask = CMOS_READ(RTC_INTR_FLAGS); - rtc_update_irq(cmos->rtc, 1, mask); - } -} - -#else - -static void cmos_wake_setup(struct device *dev) -{ -} - -static void cmos_check_acpi_rtc_status(struct device *dev, - unsigned char *rtc_control) -{ -} - -static void rtc_wake_setup(struct device *dev) -{ -} -#endif - #ifdef CONFIG_PNP #include static int cmos_pnp_probe(struct pnp_dev *pnp, const struct pnp_device_id *id) { - int irq, ret; - - cmos_wake_setup(&pnp->dev); + int irq; if (pnp_port_start(pnp, 0) == 0x70 && !pnp_irq_valid(pnp, 0)) { irq = 0; @@ -1375,13 +1390,7 @@ static int cmos_pnp_probe(struct pnp_dev *pnp, const struct pnp_device_id *id) irq = pnp_irq(pnp, 0); } - ret = cmos_do_probe(&pnp->dev, pnp_get_resource(pnp, IORESOURCE_IO, 0), irq); - if (ret) - return ret; - - rtc_wake_setup(&pnp->dev); - - return 0; + return cmos_do_probe(&pnp->dev, pnp_get_resource(pnp, IORESOURCE_IO, 0), irq); } static void cmos_pnp_remove(struct pnp_dev *pnp) @@ -1465,10 +1474,9 @@ static inline void cmos_of_init(struct platform_device *pdev) {} static int __init cmos_platform_probe(struct platform_device *pdev) { struct resource *resource; - int irq, ret; + int irq; cmos_of_init(pdev); - cmos_wake_setup(&pdev->dev); if (RTC_IOMAPPED) resource = platform_get_resource(pdev, IORESOURCE_IO, 0); @@ -1478,13 +1486,7 @@ static int __init cmos_platform_probe(struct platform_device *pdev) if (irq < 0) irq = -1; - ret = cmos_do_probe(&pdev->dev, resource, irq); - if (ret) - return ret; - - rtc_wake_setup(&pdev->dev); - - return 0; + return cmos_do_probe(&pdev->dev, resource, irq); } static int cmos_platform_remove(struct platform_device *pdev) diff --git a/drivers/rtc/rtc-cros-ec.c b/drivers/rtc/rtc-cros-ec.c index 887f5193e253da592a0340fd8c2b80af10aefc00..a3ec066d8066775730871ac144218f05010e15d9 100644 --- a/drivers/rtc/rtc-cros-ec.c +++ b/drivers/rtc/rtc-cros-ec.c @@ -14,6 +14,8 @@ #define DRV_NAME "cros-ec-rtc" +#define SECS_PER_DAY (24 * 60 * 60) + /** * struct cros_ec_rtc - Driver data for EC RTC * @@ -43,13 +45,8 @@ static int cros_ec_rtc_get(struct cros_ec_device *cros_ec, u32 command, msg.msg.insize = sizeof(msg.data); ret = cros_ec_cmd_xfer_status(cros_ec, &msg.msg); - if (ret < 0) { - dev_err(cros_ec->dev, - "error getting %s from EC: %d\n", - command == EC_CMD_RTC_GET_VALUE ? "time" : "alarm", - ret); + if (ret < 0) return ret; - } *response = msg.data.time; @@ -59,7 +56,7 @@ static int cros_ec_rtc_get(struct cros_ec_device *cros_ec, u32 command, static int cros_ec_rtc_set(struct cros_ec_device *cros_ec, u32 command, u32 param) { - int ret = 0; + int ret; struct { struct cros_ec_command msg; struct ec_response_rtc data; @@ -71,13 +68,8 @@ static int cros_ec_rtc_set(struct cros_ec_device *cros_ec, u32 command, msg.data.time = param; ret = cros_ec_cmd_xfer_status(cros_ec, &msg.msg); - if (ret < 0) { - dev_err(cros_ec->dev, "error setting %s on EC: %d\n", - command == EC_CMD_RTC_SET_VALUE ? "time" : "alarm", - ret); + if (ret < 0) return ret; - } - return 0; } @@ -190,8 +182,21 @@ static int cros_ec_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm) ret = cros_ec_rtc_set(cros_ec, EC_CMD_RTC_SET_ALARM, alarm_offset); if (ret < 0) { - dev_err(dev, "error setting alarm: %d\n", ret); - return ret; + if (ret == -EINVAL && alarm_offset >= SECS_PER_DAY) { + /* + * RTC chips on some older Chromebooks can only handle + * alarms up to 24h in the future. Try to set an alarm + * below that limit to avoid suspend failures. + */ + ret = cros_ec_rtc_set(cros_ec, EC_CMD_RTC_SET_ALARM, + SECS_PER_DAY - 1); + } + + if (ret < 0) { + dev_err(dev, "error setting alarm in %u seconds: %d\n", + alarm_offset, ret); + return ret; + } } return 0; diff --git a/drivers/rtc/rtc-davinci.c b/drivers/rtc/rtc-davinci.c deleted file mode 100644 index 6bef0f2353da4b048d6da8a04ee4fc8a82f6786a..0000000000000000000000000000000000000000 --- a/drivers/rtc/rtc-davinci.c +++ /dev/null @@ -1,512 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * DaVinci Power Management and Real Time Clock Driver for TI platforms - * - * Copyright (C) 2009 Texas Instruments, Inc - * - * Author: Miguel Aguilar - */ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -/* - * The DaVinci RTC is a simple RTC with the following - * Sec: 0 - 59 : BCD count - * Min: 0 - 59 : BCD count - * Hour: 0 - 23 : BCD count - * Day: 0 - 0x7FFF(32767) : Binary count ( Over 89 years ) - */ - -/* PRTC interface registers */ -#define DAVINCI_PRTCIF_PID 0x00 -#define PRTCIF_CTLR 0x04 -#define PRTCIF_LDATA 0x08 -#define PRTCIF_UDATA 0x0C -#define PRTCIF_INTEN 0x10 -#define PRTCIF_INTFLG 0x14 - -/* PRTCIF_CTLR bit fields */ -#define PRTCIF_CTLR_BUSY BIT(31) -#define PRTCIF_CTLR_SIZE BIT(25) -#define PRTCIF_CTLR_DIR BIT(24) -#define PRTCIF_CTLR_BENU_MSB BIT(23) -#define PRTCIF_CTLR_BENU_3RD_BYTE BIT(22) -#define PRTCIF_CTLR_BENU_2ND_BYTE BIT(21) -#define PRTCIF_CTLR_BENU_LSB BIT(20) -#define PRTCIF_CTLR_BENU_MASK (0x00F00000) -#define PRTCIF_CTLR_BENL_MSB BIT(19) -#define PRTCIF_CTLR_BENL_3RD_BYTE BIT(18) -#define PRTCIF_CTLR_BENL_2ND_BYTE BIT(17) -#define PRTCIF_CTLR_BENL_LSB BIT(16) -#define PRTCIF_CTLR_BENL_MASK (0x000F0000) - -/* PRTCIF_INTEN bit fields */ -#define PRTCIF_INTEN_RTCSS BIT(1) -#define PRTCIF_INTEN_RTCIF BIT(0) -#define PRTCIF_INTEN_MASK (PRTCIF_INTEN_RTCSS \ - | PRTCIF_INTEN_RTCIF) - -/* PRTCIF_INTFLG bit fields */ -#define PRTCIF_INTFLG_RTCSS BIT(1) -#define PRTCIF_INTFLG_RTCIF BIT(0) -#define PRTCIF_INTFLG_MASK (PRTCIF_INTFLG_RTCSS \ - | PRTCIF_INTFLG_RTCIF) - -/* PRTC subsystem registers */ -#define PRTCSS_RTC_INTC_EXTENA1 (0x0C) -#define PRTCSS_RTC_CTRL (0x10) -#define PRTCSS_RTC_WDT (0x11) -#define PRTCSS_RTC_TMR0 (0x12) -#define PRTCSS_RTC_TMR1 (0x13) -#define PRTCSS_RTC_CCTRL (0x14) -#define PRTCSS_RTC_SEC (0x15) -#define PRTCSS_RTC_MIN (0x16) -#define PRTCSS_RTC_HOUR (0x17) -#define PRTCSS_RTC_DAY0 (0x18) -#define PRTCSS_RTC_DAY1 (0x19) -#define PRTCSS_RTC_AMIN (0x1A) -#define PRTCSS_RTC_AHOUR (0x1B) -#define PRTCSS_RTC_ADAY0 (0x1C) -#define PRTCSS_RTC_ADAY1 (0x1D) -#define PRTCSS_RTC_CLKC_CNT (0x20) - -/* PRTCSS_RTC_INTC_EXTENA1 */ -#define PRTCSS_RTC_INTC_EXTENA1_MASK (0x07) - -/* PRTCSS_RTC_CTRL bit fields */ -#define PRTCSS_RTC_CTRL_WDTBUS BIT(7) -#define PRTCSS_RTC_CTRL_WEN BIT(6) -#define PRTCSS_RTC_CTRL_WDRT BIT(5) -#define PRTCSS_RTC_CTRL_WDTFLG BIT(4) -#define PRTCSS_RTC_CTRL_TE BIT(3) -#define PRTCSS_RTC_CTRL_TIEN BIT(2) -#define PRTCSS_RTC_CTRL_TMRFLG BIT(1) -#define PRTCSS_RTC_CTRL_TMMD BIT(0) - -/* PRTCSS_RTC_CCTRL bit fields */ -#define PRTCSS_RTC_CCTRL_CALBUSY BIT(7) -#define PRTCSS_RTC_CCTRL_DAEN BIT(5) -#define PRTCSS_RTC_CCTRL_HAEN BIT(4) -#define PRTCSS_RTC_CCTRL_MAEN BIT(3) -#define PRTCSS_RTC_CCTRL_ALMFLG BIT(2) -#define PRTCSS_RTC_CCTRL_AIEN BIT(1) -#define PRTCSS_RTC_CCTRL_CAEN BIT(0) - -static DEFINE_SPINLOCK(davinci_rtc_lock); - -struct davinci_rtc { - struct rtc_device *rtc; - void __iomem *base; - int irq; -}; - -static inline void rtcif_write(struct davinci_rtc *davinci_rtc, - u32 val, u32 addr) -{ - writel(val, davinci_rtc->base + addr); -} - -static inline u32 rtcif_read(struct davinci_rtc *davinci_rtc, u32 addr) -{ - return readl(davinci_rtc->base + addr); -} - -static inline void rtcif_wait(struct davinci_rtc *davinci_rtc) -{ - while (rtcif_read(davinci_rtc, PRTCIF_CTLR) & PRTCIF_CTLR_BUSY) - cpu_relax(); -} - -static inline void rtcss_write(struct davinci_rtc *davinci_rtc, - unsigned long val, u8 addr) -{ - rtcif_wait(davinci_rtc); - - rtcif_write(davinci_rtc, PRTCIF_CTLR_BENL_LSB | addr, PRTCIF_CTLR); - rtcif_write(davinci_rtc, val, PRTCIF_LDATA); - - rtcif_wait(davinci_rtc); -} - -static inline u8 rtcss_read(struct davinci_rtc *davinci_rtc, u8 addr) -{ - rtcif_wait(davinci_rtc); - - rtcif_write(davinci_rtc, PRTCIF_CTLR_DIR | PRTCIF_CTLR_BENL_LSB | addr, - PRTCIF_CTLR); - - rtcif_wait(davinci_rtc); - - return rtcif_read(davinci_rtc, PRTCIF_LDATA); -} - -static inline void davinci_rtcss_calendar_wait(struct davinci_rtc *davinci_rtc) -{ - while (rtcss_read(davinci_rtc, PRTCSS_RTC_CCTRL) & - PRTCSS_RTC_CCTRL_CALBUSY) - cpu_relax(); -} - -static irqreturn_t davinci_rtc_interrupt(int irq, void *class_dev) -{ - struct davinci_rtc *davinci_rtc = class_dev; - unsigned long events = 0; - u32 irq_flg; - u8 alm_irq, tmr_irq; - u8 rtc_ctrl, rtc_cctrl; - int ret = IRQ_NONE; - - irq_flg = rtcif_read(davinci_rtc, PRTCIF_INTFLG) & - PRTCIF_INTFLG_RTCSS; - - alm_irq = rtcss_read(davinci_rtc, PRTCSS_RTC_CCTRL) & - PRTCSS_RTC_CCTRL_ALMFLG; - - tmr_irq = rtcss_read(davinci_rtc, PRTCSS_RTC_CTRL) & - PRTCSS_RTC_CTRL_TMRFLG; - - if (irq_flg) { - if (alm_irq) { - events |= RTC_IRQF | RTC_AF; - rtc_cctrl = rtcss_read(davinci_rtc, PRTCSS_RTC_CCTRL); - rtc_cctrl |= PRTCSS_RTC_CCTRL_ALMFLG; - rtcss_write(davinci_rtc, rtc_cctrl, PRTCSS_RTC_CCTRL); - } else if (tmr_irq) { - events |= RTC_IRQF | RTC_PF; - rtc_ctrl = rtcss_read(davinci_rtc, PRTCSS_RTC_CTRL); - rtc_ctrl |= PRTCSS_RTC_CTRL_TMRFLG; - rtcss_write(davinci_rtc, rtc_ctrl, PRTCSS_RTC_CTRL); - } - - rtcif_write(davinci_rtc, PRTCIF_INTFLG_RTCSS, - PRTCIF_INTFLG); - rtc_update_irq(davinci_rtc->rtc, 1, events); - - ret = IRQ_HANDLED; - } - - return ret; -} - -static int -davinci_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg) -{ - struct davinci_rtc *davinci_rtc = dev_get_drvdata(dev); - u8 rtc_ctrl; - unsigned long flags; - int ret = 0; - - spin_lock_irqsave(&davinci_rtc_lock, flags); - - rtc_ctrl = rtcss_read(davinci_rtc, PRTCSS_RTC_CTRL); - - switch (cmd) { - case RTC_WIE_ON: - rtc_ctrl |= PRTCSS_RTC_CTRL_WEN | PRTCSS_RTC_CTRL_WDTFLG; - break; - case RTC_WIE_OFF: - rtc_ctrl &= ~PRTCSS_RTC_CTRL_WEN; - break; - default: - ret = -ENOIOCTLCMD; - } - - rtcss_write(davinci_rtc, rtc_ctrl, PRTCSS_RTC_CTRL); - - spin_unlock_irqrestore(&davinci_rtc_lock, flags); - - return ret; -} - -static void convertfromdays(u16 days, struct rtc_time *tm) -{ - int tmp_days, year, mon; - - for (year = 2000;; year++) { - tmp_days = rtc_year_days(1, 12, year); - if (days >= tmp_days) - days -= tmp_days; - else { - for (mon = 0;; mon++) { - tmp_days = rtc_month_days(mon, year); - if (days >= tmp_days) { - days -= tmp_days; - } else { - tm->tm_year = year - 1900; - tm->tm_mon = mon; - tm->tm_mday = days + 1; - break; - } - } - break; - } - } -} - -static void convert2days(u16 *days, struct rtc_time *tm) -{ - int i; - *days = 0; - - for (i = 2000; i < 1900 + tm->tm_year; i++) - *days += rtc_year_days(1, 12, i); - - *days += rtc_year_days(tm->tm_mday, tm->tm_mon, 1900 + tm->tm_year); -} - -static int davinci_rtc_read_time(struct device *dev, struct rtc_time *tm) -{ - struct davinci_rtc *davinci_rtc = dev_get_drvdata(dev); - u16 days = 0; - u8 day0, day1; - unsigned long flags; - - spin_lock_irqsave(&davinci_rtc_lock, flags); - - davinci_rtcss_calendar_wait(davinci_rtc); - tm->tm_sec = bcd2bin(rtcss_read(davinci_rtc, PRTCSS_RTC_SEC)); - - davinci_rtcss_calendar_wait(davinci_rtc); - tm->tm_min = bcd2bin(rtcss_read(davinci_rtc, PRTCSS_RTC_MIN)); - - davinci_rtcss_calendar_wait(davinci_rtc); - tm->tm_hour = bcd2bin(rtcss_read(davinci_rtc, PRTCSS_RTC_HOUR)); - - davinci_rtcss_calendar_wait(davinci_rtc); - day0 = rtcss_read(davinci_rtc, PRTCSS_RTC_DAY0); - - davinci_rtcss_calendar_wait(davinci_rtc); - day1 = rtcss_read(davinci_rtc, PRTCSS_RTC_DAY1); - - spin_unlock_irqrestore(&davinci_rtc_lock, flags); - - days |= day1; - days <<= 8; - days |= day0; - - convertfromdays(days, tm); - - return 0; -} - -static int davinci_rtc_set_time(struct device *dev, struct rtc_time *tm) -{ - struct davinci_rtc *davinci_rtc = dev_get_drvdata(dev); - u16 days; - u8 rtc_cctrl; - unsigned long flags; - - convert2days(&days, tm); - - spin_lock_irqsave(&davinci_rtc_lock, flags); - - davinci_rtcss_calendar_wait(davinci_rtc); - rtcss_write(davinci_rtc, bin2bcd(tm->tm_sec), PRTCSS_RTC_SEC); - - davinci_rtcss_calendar_wait(davinci_rtc); - rtcss_write(davinci_rtc, bin2bcd(tm->tm_min), PRTCSS_RTC_MIN); - - davinci_rtcss_calendar_wait(davinci_rtc); - rtcss_write(davinci_rtc, bin2bcd(tm->tm_hour), PRTCSS_RTC_HOUR); - - davinci_rtcss_calendar_wait(davinci_rtc); - rtcss_write(davinci_rtc, days & 0xFF, PRTCSS_RTC_DAY0); - - davinci_rtcss_calendar_wait(davinci_rtc); - rtcss_write(davinci_rtc, (days & 0xFF00) >> 8, PRTCSS_RTC_DAY1); - - rtc_cctrl = rtcss_read(davinci_rtc, PRTCSS_RTC_CCTRL); - rtc_cctrl |= PRTCSS_RTC_CCTRL_CAEN; - rtcss_write(davinci_rtc, rtc_cctrl, PRTCSS_RTC_CCTRL); - - spin_unlock_irqrestore(&davinci_rtc_lock, flags); - - return 0; -} - -static int davinci_rtc_alarm_irq_enable(struct device *dev, - unsigned int enabled) -{ - struct davinci_rtc *davinci_rtc = dev_get_drvdata(dev); - unsigned long flags; - u8 rtc_cctrl = rtcss_read(davinci_rtc, PRTCSS_RTC_CCTRL); - - spin_lock_irqsave(&davinci_rtc_lock, flags); - - if (enabled) - rtc_cctrl |= PRTCSS_RTC_CCTRL_DAEN | - PRTCSS_RTC_CCTRL_HAEN | - PRTCSS_RTC_CCTRL_MAEN | - PRTCSS_RTC_CCTRL_ALMFLG | - PRTCSS_RTC_CCTRL_AIEN; - else - rtc_cctrl &= ~PRTCSS_RTC_CCTRL_AIEN; - - davinci_rtcss_calendar_wait(davinci_rtc); - rtcss_write(davinci_rtc, rtc_cctrl, PRTCSS_RTC_CCTRL); - - spin_unlock_irqrestore(&davinci_rtc_lock, flags); - - return 0; -} - -static int davinci_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alm) -{ - struct davinci_rtc *davinci_rtc = dev_get_drvdata(dev); - u16 days = 0; - u8 day0, day1; - unsigned long flags; - - alm->time.tm_sec = 0; - - spin_lock_irqsave(&davinci_rtc_lock, flags); - - davinci_rtcss_calendar_wait(davinci_rtc); - alm->time.tm_min = bcd2bin(rtcss_read(davinci_rtc, PRTCSS_RTC_AMIN)); - - davinci_rtcss_calendar_wait(davinci_rtc); - alm->time.tm_hour = bcd2bin(rtcss_read(davinci_rtc, PRTCSS_RTC_AHOUR)); - - davinci_rtcss_calendar_wait(davinci_rtc); - day0 = rtcss_read(davinci_rtc, PRTCSS_RTC_ADAY0); - - davinci_rtcss_calendar_wait(davinci_rtc); - day1 = rtcss_read(davinci_rtc, PRTCSS_RTC_ADAY1); - - spin_unlock_irqrestore(&davinci_rtc_lock, flags); - days |= day1; - days <<= 8; - days |= day0; - - convertfromdays(days, &alm->time); - - alm->pending = !!(rtcss_read(davinci_rtc, - PRTCSS_RTC_CCTRL) & - PRTCSS_RTC_CCTRL_AIEN); - alm->enabled = alm->pending && device_may_wakeup(dev); - - return 0; -} - -static int davinci_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm) -{ - struct davinci_rtc *davinci_rtc = dev_get_drvdata(dev); - unsigned long flags; - u16 days; - - convert2days(&days, &alm->time); - - spin_lock_irqsave(&davinci_rtc_lock, flags); - - davinci_rtcss_calendar_wait(davinci_rtc); - rtcss_write(davinci_rtc, bin2bcd(alm->time.tm_min), PRTCSS_RTC_AMIN); - - davinci_rtcss_calendar_wait(davinci_rtc); - rtcss_write(davinci_rtc, bin2bcd(alm->time.tm_hour), PRTCSS_RTC_AHOUR); - - davinci_rtcss_calendar_wait(davinci_rtc); - rtcss_write(davinci_rtc, days & 0xFF, PRTCSS_RTC_ADAY0); - - davinci_rtcss_calendar_wait(davinci_rtc); - rtcss_write(davinci_rtc, (days & 0xFF00) >> 8, PRTCSS_RTC_ADAY1); - - spin_unlock_irqrestore(&davinci_rtc_lock, flags); - - return 0; -} - -static const struct rtc_class_ops davinci_rtc_ops = { - .ioctl = davinci_rtc_ioctl, - .read_time = davinci_rtc_read_time, - .set_time = davinci_rtc_set_time, - .alarm_irq_enable = davinci_rtc_alarm_irq_enable, - .read_alarm = davinci_rtc_read_alarm, - .set_alarm = davinci_rtc_set_alarm, -}; - -static int __init davinci_rtc_probe(struct platform_device *pdev) -{ - struct device *dev = &pdev->dev; - struct davinci_rtc *davinci_rtc; - int ret = 0; - - davinci_rtc = devm_kzalloc(&pdev->dev, sizeof(struct davinci_rtc), GFP_KERNEL); - if (!davinci_rtc) - return -ENOMEM; - - davinci_rtc->irq = platform_get_irq(pdev, 0); - if (davinci_rtc->irq < 0) - return davinci_rtc->irq; - - davinci_rtc->base = devm_platform_ioremap_resource(pdev, 0); - if (IS_ERR(davinci_rtc->base)) - return PTR_ERR(davinci_rtc->base); - - platform_set_drvdata(pdev, davinci_rtc); - - davinci_rtc->rtc = devm_rtc_allocate_device(&pdev->dev); - if (IS_ERR(davinci_rtc->rtc)) - return PTR_ERR(davinci_rtc->rtc); - - davinci_rtc->rtc->ops = &davinci_rtc_ops; - davinci_rtc->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000; - davinci_rtc->rtc->range_max = RTC_TIMESTAMP_BEGIN_2000 + (1 << 16) * 86400ULL - 1; - - rtcif_write(davinci_rtc, PRTCIF_INTFLG_RTCSS, PRTCIF_INTFLG); - rtcif_write(davinci_rtc, 0, PRTCIF_INTEN); - rtcss_write(davinci_rtc, 0, PRTCSS_RTC_INTC_EXTENA1); - - rtcss_write(davinci_rtc, 0, PRTCSS_RTC_CTRL); - rtcss_write(davinci_rtc, 0, PRTCSS_RTC_CCTRL); - - ret = devm_request_irq(dev, davinci_rtc->irq, davinci_rtc_interrupt, - 0, "davinci_rtc", davinci_rtc); - if (ret < 0) { - dev_err(dev, "unable to register davinci RTC interrupt\n"); - return ret; - } - - /* Enable interrupts */ - rtcif_write(davinci_rtc, PRTCIF_INTEN_RTCSS, PRTCIF_INTEN); - rtcss_write(davinci_rtc, PRTCSS_RTC_INTC_EXTENA1_MASK, - PRTCSS_RTC_INTC_EXTENA1); - - rtcss_write(davinci_rtc, PRTCSS_RTC_CCTRL_CAEN, PRTCSS_RTC_CCTRL); - - device_init_wakeup(&pdev->dev, 0); - - return devm_rtc_register_device(davinci_rtc->rtc); -} - -static int __exit davinci_rtc_remove(struct platform_device *pdev) -{ - struct davinci_rtc *davinci_rtc = platform_get_drvdata(pdev); - - device_init_wakeup(&pdev->dev, 0); - - rtcif_write(davinci_rtc, 0, PRTCIF_INTEN); - - return 0; -} - -static struct platform_driver davinci_rtc_driver = { - .remove = __exit_p(davinci_rtc_remove), - .driver = { - .name = "rtc_davinci", - }, -}; - -module_platform_driver_probe(davinci_rtc_driver, davinci_rtc_probe); - -MODULE_AUTHOR("Miguel Aguilar "); -MODULE_DESCRIPTION("Texas Instruments DaVinci PRTC Driver"); -MODULE_LICENSE("GPL"); diff --git a/drivers/rtc/rtc-ds1302.c b/drivers/rtc/rtc-ds1302.c index 6d66ab5a8b1768ade12da5f9be137cce47bf219a..ecc7d03079320f0bef0e3281e37bcbf17a0cc987 100644 --- a/drivers/rtc/rtc-ds1302.c +++ b/drivers/rtc/rtc-ds1302.c @@ -185,11 +185,6 @@ static int ds1302_probe(struct spi_device *spi) return 0; } -static void ds1302_remove(struct spi_device *spi) -{ - spi_set_drvdata(spi, NULL); -} - #ifdef CONFIG_OF static const struct of_device_id ds1302_dt_ids[] = { { .compatible = "maxim,ds1302", }, @@ -208,7 +203,6 @@ static struct spi_driver ds1302_driver = { .driver.name = "rtc-ds1302", .driver.of_match_table = of_match_ptr(ds1302_dt_ids), .probe = ds1302_probe, - .remove = ds1302_remove, .id_table = ds1302_spi_ids, }; diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c index d51565bcc189690a754ba2879cc0b62c058e8bf2..def9b7f9d9577e9dad4052b781f5bfaddbfc5813 100644 --- a/drivers/rtc/rtc-ds1307.c +++ b/drivers/rtc/rtc-ds1307.c @@ -11,6 +11,7 @@ #include #include #include +#include #include #include #include @@ -1218,8 +1219,7 @@ static ssize_t frequency_test_show(struct device *dev, regmap_read(ds1307->regmap, M41TXX_REG_CONTROL, &ctrl_reg); - return scnprintf(buf, PAGE_SIZE, (ctrl_reg & M41TXX_BIT_FT) ? "on\n" : - "off\n"); + return sysfs_emit(buf, (ctrl_reg & M41TXX_BIT_FT) ? "on\n" : "off\n"); } static DEVICE_ATTR_RW(frequency_test); diff --git a/drivers/rtc/rtc-ds1347.c b/drivers/rtc/rtc-ds1347.c index 157bf5209ac40bc468717285182f857ade1f0a4c..a40c1a52df6595e51d4a952f0abff458dd665e0c 100644 --- a/drivers/rtc/rtc-ds1347.c +++ b/drivers/rtc/rtc-ds1347.c @@ -112,7 +112,7 @@ static int ds1347_set_time(struct device *dev, struct rtc_time *dt) return err; century = (dt->tm_year / 100) + 19; - err = regmap_write(map, DS1347_CENTURY_REG, century); + err = regmap_write(map, DS1347_CENTURY_REG, bin2bcd(century)); if (err) return err; diff --git a/drivers/rtc/rtc-ds1742.c b/drivers/rtc/rtc-ds1742.c index 13d45c697da68ee7b814d43537547c6e568ab1b4..a5026b0514e78d27739ef664b1f250c7e921c839 100644 --- a/drivers/rtc/rtc-ds1742.c +++ b/drivers/rtc/rtc-ds1742.c @@ -158,8 +158,7 @@ static int ds1742_rtc_probe(struct platform_device *pdev) if (!pdata) return -ENOMEM; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - ioaddr = devm_ioremap_resource(&pdev->dev, res); + ioaddr = devm_platform_get_and_ioremap_resource(pdev, 0, &res); if (IS_ERR(ioaddr)) return PTR_ERR(ioaddr); diff --git a/drivers/rtc/rtc-efi.c b/drivers/rtc/rtc-efi.c index 11850c2880ad487d3e8b3306fe6057e766e40c1e..e991cccdb6e9cee46a104b03926d4f2945f34f59 100644 --- a/drivers/rtc/rtc-efi.c +++ b/drivers/rtc/rtc-efi.c @@ -271,6 +271,8 @@ static int __init efi_rtc_probe(struct platform_device *dev) clear_bit(RTC_FEATURE_UPDATE_INTERRUPT, rtc->features); set_bit(RTC_FEATURE_ALARM_WAKEUP_ONLY, rtc->features); + device_init_wakeup(&dev->dev, true); + return devm_rtc_register_device(rtc); } diff --git a/drivers/rtc/rtc-fsl-ftm-alarm.c b/drivers/rtc/rtc-fsl-ftm-alarm.c index c0df49fb978ce0ba5ae138c4fc0da04248fc5d78..3d7c4077fe1c69267aa46bc1037a6f7c8bd881d0 100644 --- a/drivers/rtc/rtc-fsl-ftm-alarm.c +++ b/drivers/rtc/rtc-fsl-ftm-alarm.c @@ -327,12 +327,7 @@ static struct platform_driver ftm_rtc_driver = { }, }; -static int __init ftm_alarm_init(void) -{ - return platform_driver_register(&ftm_rtc_driver); -} - -device_initcall(ftm_alarm_init); +module_platform_driver(ftm_rtc_driver); MODULE_DESCRIPTION("NXP/Freescale FlexTimer alarm driver"); MODULE_AUTHOR("Biwen Li "); diff --git a/drivers/rtc/rtc-isl12022.c b/drivers/rtc/rtc-isl12022.c index ca677c4265e6c56da41b08b31caf8931f604d81b..a3b0de3393f5714fa8a68628539316a12c9ac95e 100644 --- a/drivers/rtc/rtc-isl12022.c +++ b/drivers/rtc/rtc-isl12022.c @@ -17,6 +17,7 @@ #include #include #include +#include /* ISL register offsets */ #define ISL12022_REG_SC 0x00 @@ -30,6 +31,9 @@ #define ISL12022_REG_SR 0x07 #define ISL12022_REG_INT 0x08 +#define ISL12022_REG_BETA 0x0d +#define ISL12022_REG_TEMP_L 0x28 + /* ISL register bits */ #define ISL12022_HR_MIL (1 << 7) /* military or 24 hour time */ @@ -38,6 +42,7 @@ #define ISL12022_INT_WRTC (1 << 6) +#define ISL12022_BETA_TSE (1 << 7) static struct i2c_driver isl12022_driver; @@ -46,6 +51,93 @@ struct isl12022 { struct regmap *regmap; }; +static umode_t isl12022_hwmon_is_visible(const void *data, + enum hwmon_sensor_types type, + u32 attr, int channel) +{ + if (type == hwmon_temp && attr == hwmon_temp_input) + return 0444; + + return 0; +} + +/* + * A user-initiated temperature conversion is not started by this function, + * so the temperature is updated once every ~60 seconds. + */ +static int isl12022_hwmon_read_temp(struct device *dev, long *mC) +{ + struct isl12022 *isl12022 = dev_get_drvdata(dev); + struct regmap *regmap = isl12022->regmap; + u8 temp_buf[2]; + int temp, ret; + + ret = regmap_bulk_read(regmap, ISL12022_REG_TEMP_L, + temp_buf, sizeof(temp_buf)); + if (ret) + return ret; + /* + * Temperature is represented as a 10-bit number, unit half-Kelvins. + */ + temp = (temp_buf[1] << 8) | temp_buf[0]; + temp *= 500; + temp -= 273000; + + *mC = temp; + + return 0; +} + +static int isl12022_hwmon_read(struct device *dev, + enum hwmon_sensor_types type, + u32 attr, int channel, long *val) +{ + if (type == hwmon_temp && attr == hwmon_temp_input) + return isl12022_hwmon_read_temp(dev, val); + + return -EOPNOTSUPP; +} + +static const struct hwmon_channel_info *isl12022_hwmon_info[] = { + HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT), + NULL +}; + +static const struct hwmon_ops isl12022_hwmon_ops = { + .is_visible = isl12022_hwmon_is_visible, + .read = isl12022_hwmon_read, +}; + +static const struct hwmon_chip_info isl12022_hwmon_chip_info = { + .ops = &isl12022_hwmon_ops, + .info = isl12022_hwmon_info, +}; + +static void isl12022_hwmon_register(struct device *dev) +{ + struct isl12022 *isl12022; + struct device *hwmon; + int ret; + + if (!IS_REACHABLE(CONFIG_HWMON)) + return; + + isl12022 = dev_get_drvdata(dev); + + ret = regmap_update_bits(isl12022->regmap, ISL12022_REG_BETA, + ISL12022_BETA_TSE, ISL12022_BETA_TSE); + if (ret) { + dev_warn(dev, "unable to enable temperature sensor\n"); + return; + } + + hwmon = devm_hwmon_device_register_with_info(dev, "isl12022", isl12022, + &isl12022_hwmon_chip_info, + NULL); + if (IS_ERR(hwmon)) + dev_warn(dev, "unable to register hwmon device: %pe\n", hwmon); +} + /* * In the routines that deal directly with the isl12022 hardware, we use * rtc_time -- month 0-11, hour 0-23, yr = calendar year-epoch. @@ -160,6 +252,8 @@ static int isl12022_probe(struct i2c_client *client) return PTR_ERR(isl12022->regmap); } + isl12022_hwmon_register(&client->dev); + isl12022->rtc = devm_rtc_allocate_device(&client->dev); if (IS_ERR(isl12022->rtc)) return PTR_ERR(isl12022->rtc); diff --git a/drivers/rtc/rtc-isl1208.c b/drivers/rtc/rtc-isl1208.c index f448a525333e1b1a70d4773d69b78cb717ebec0b..73cc6aaf9b8b72f84d51a54d42bcf1d0528dbdf2 100644 --- a/drivers/rtc/rtc-isl1208.c +++ b/drivers/rtc/rtc-isl1208.c @@ -797,7 +797,7 @@ static int isl1208_setup_irq(struct i2c_client *client, int irq) } static int -isl1208_probe(struct i2c_client *client, const struct i2c_device_id *id) +isl1208_probe(struct i2c_client *client) { int rc = 0; struct isl1208_state *isl1208; @@ -821,6 +821,8 @@ isl1208_probe(struct i2c_client *client, const struct i2c_device_id *id) if (!isl1208->config) return -ENODEV; } else { + const struct i2c_device_id *id = i2c_match_id(isl1208_id, client); + if (id->driver_data >= ISL_LAST_ID) return -ENODEV; isl1208->config = &isl1208_configs[id->driver_data]; @@ -906,7 +908,7 @@ static struct i2c_driver isl1208_driver = { .name = "rtc-isl1208", .of_match_table = of_match_ptr(isl1208_of_match), }, - .probe = isl1208_probe, + .probe_new = isl1208_probe, .id_table = isl1208_id, }; diff --git a/drivers/rtc/rtc-m41t80.c b/drivers/rtc/rtc-m41t80.c index e0b4d3794320bf5b26113f2e2d02180d75843dd2..494052dbd39ff3b8c657b500693e35868aa55ec6 100644 --- a/drivers/rtc/rtc-m41t80.c +++ b/drivers/rtc/rtc-m41t80.c @@ -692,7 +692,7 @@ static void wdt_disable(void) * @ppos: pointer to the position to write. No seeks allowed * * A write to a watchdog device is defined as a keepalive signal. Any - * write of data will do, as we we don't define content meaning. + * write of data will do, as we don't define content meaning. */ static ssize_t wdt_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) @@ -876,8 +876,7 @@ static struct notifier_block wdt_notifier = { ***************************************************************************** */ -static int m41t80_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int m41t80_probe(struct i2c_client *client) { struct i2c_adapter *adapter = client->adapter; int rc = 0; @@ -897,11 +896,13 @@ static int m41t80_probe(struct i2c_client *client, return -ENOMEM; m41t80_data->client = client; - if (client->dev.of_node) + if (client->dev.of_node) { m41t80_data->features = (unsigned long) of_device_get_match_data(&client->dev); - else + } else { + const struct i2c_device_id *id = i2c_match_id(m41t80_id, client); m41t80_data->features = id->driver_data; + } i2c_set_clientdata(client, m41t80_data); m41t80_data->rtc = devm_rtc_allocate_device(&client->dev); @@ -1007,7 +1008,7 @@ static struct i2c_driver m41t80_driver = { .of_match_table = of_match_ptr(m41t80_of_match), .pm = &m41t80_pm, }, - .probe = m41t80_probe, + .probe_new = m41t80_probe, .remove = m41t80_remove, .id_table = m41t80_id, }; diff --git a/drivers/rtc/rtc-msc313.c b/drivers/rtc/rtc-msc313.c index f3fde013c4b8b169cc3f3844c07cbc04769d9c95..8d7737e0e2e02cfd303187ade872922cc3993458 100644 --- a/drivers/rtc/rtc-msc313.c +++ b/drivers/rtc/rtc-msc313.c @@ -212,22 +212,12 @@ static int msc313_rtc_probe(struct platform_device *pdev) return ret; } - clk = devm_clk_get(dev, NULL); + clk = devm_clk_get_enabled(dev, NULL); if (IS_ERR(clk)) { dev_err(dev, "No input reference clock\n"); return PTR_ERR(clk); } - ret = clk_prepare_enable(clk); - if (ret) { - dev_err(dev, "Failed to enable the reference clock, %d\n", ret); - return ret; - } - - ret = devm_add_action_or_reset(dev, (void (*) (void *))clk_disable_unprepare, clk); - if (ret) - return ret; - rate = clk_get_rate(clk); writew(rate & 0xFFFF, priv->rtc_base + REG_RTC_FREQ_CW_L); writew((rate >> 16) & 0xFFFF, priv->rtc_base + REG_RTC_FREQ_CW_H); diff --git a/drivers/rtc/rtc-mxc_v2.c b/drivers/rtc/rtc-mxc_v2.c index 5e038340162941f84796da1e25f55bcd758c559e..f6d2ad91ff7a9a9980d65786bb6125e538467600 100644 --- a/drivers/rtc/rtc-mxc_v2.c +++ b/drivers/rtc/rtc-mxc_v2.c @@ -336,8 +336,10 @@ static int mxc_rtc_probe(struct platform_device *pdev) } pdata->rtc = devm_rtc_allocate_device(&pdev->dev); - if (IS_ERR(pdata->rtc)) + if (IS_ERR(pdata->rtc)) { + clk_disable_unprepare(pdata->clk); return PTR_ERR(pdata->rtc); + } pdata->rtc->ops = &mxc_rtc_ops; pdata->rtc->range_max = U32_MAX; diff --git a/drivers/rtc/rtc-nct3018y.c b/drivers/rtc/rtc-nct3018y.c index d43acd3920ed38bf700b7746c35e57b2be38270b..0a3b14c95d9027ce770e5a30c2f467655519215a 100644 --- a/drivers/rtc/rtc-nct3018y.c +++ b/drivers/rtc/rtc-nct3018y.c @@ -452,8 +452,7 @@ static const struct rtc_class_ops nct3018y_rtc_ops = { .ioctl = nct3018y_ioctl, }; -static int nct3018y_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int nct3018y_probe(struct i2c_client *client) { struct nct3018y *nct3018y; int err, flags; @@ -541,7 +540,7 @@ static struct i2c_driver nct3018y_driver = { .name = "rtc-nct3018y", .of_match_table = of_match_ptr(nct3018y_of_match), }, - .probe = nct3018y_probe, + .probe_new = nct3018y_probe, .id_table = nct3018y_id, }; diff --git a/drivers/rtc/rtc-pcf2127.c b/drivers/rtc/rtc-pcf2127.c index 63b275b014bd613a95b42dea4940f4d3c3035dfb..87f4fc9df68b4a66de70c8768611511828780bb4 100644 --- a/drivers/rtc/rtc-pcf2127.c +++ b/drivers/rtc/rtc-pcf2127.c @@ -885,9 +885,17 @@ static const struct regmap_bus pcf2127_i2c_regmap = { static struct i2c_driver pcf2127_i2c_driver; -static int pcf2127_i2c_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static const struct i2c_device_id pcf2127_i2c_id[] = { + { "pcf2127", 1 }, + { "pcf2129", 0 }, + { "pca2129", 0 }, + { } +}; +MODULE_DEVICE_TABLE(i2c, pcf2127_i2c_id); + +static int pcf2127_i2c_probe(struct i2c_client *client) { + const struct i2c_device_id *id = i2c_match_id(pcf2127_i2c_id, client); struct regmap *regmap; static const struct regmap_config config = { .reg_bits = 8, @@ -910,20 +918,12 @@ static int pcf2127_i2c_probe(struct i2c_client *client, pcf2127_i2c_driver.driver.name, id->driver_data); } -static const struct i2c_device_id pcf2127_i2c_id[] = { - { "pcf2127", 1 }, - { "pcf2129", 0 }, - { "pca2129", 0 }, - { } -}; -MODULE_DEVICE_TABLE(i2c, pcf2127_i2c_id); - static struct i2c_driver pcf2127_i2c_driver = { .driver = { .name = "rtc-pcf2127-i2c", .of_match_table = of_match_ptr(pcf2127_of_match), }, - .probe = pcf2127_i2c_probe, + .probe_new = pcf2127_i2c_probe, .id_table = pcf2127_i2c_id, }; diff --git a/drivers/rtc/rtc-pcf85063.c b/drivers/rtc/rtc-pcf85063.c index 095891999da11287985c562829f90d9b4f2999ca..754e03984f986dc2f5d09cfa59bff4087a15558b 100644 --- a/drivers/rtc/rtc-pcf85063.c +++ b/drivers/rtc/rtc-pcf85063.c @@ -169,10 +169,10 @@ static int pcf85063_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm) if (ret) return ret; - alrm->time.tm_sec = bcd2bin(buf[0]); - alrm->time.tm_min = bcd2bin(buf[1]); - alrm->time.tm_hour = bcd2bin(buf[2]); - alrm->time.tm_mday = bcd2bin(buf[3]); + alrm->time.tm_sec = bcd2bin(buf[0] & 0x7f); + alrm->time.tm_min = bcd2bin(buf[1] & 0x7f); + alrm->time.tm_hour = bcd2bin(buf[2] & 0x3f); + alrm->time.tm_mday = bcd2bin(buf[3] & 0x3f); ret = regmap_read(pcf85063->regmap, PCF85063_REG_CTRL2, &val); if (ret) @@ -424,7 +424,7 @@ static int pcf85063_clkout_control(struct clk_hw *hw, bool enable) unsigned int buf; int ret; - ret = regmap_read(pcf85063->regmap, PCF85063_REG_OFFSET, &buf); + ret = regmap_read(pcf85063->regmap, PCF85063_REG_CTRL2, &buf); if (ret < 0) return ret; buf &= PCF85063_REG_CLKO_F_MASK; diff --git a/drivers/rtc/rtc-pcf8523.c b/drivers/rtc/rtc-pcf8523.c index 6174b3fd4b98560dea3e65cc5dfd0dc1dd4fd1f3..92de99f11a7a5a64dcbe8aa97e8c8cde6c7ffc06 100644 --- a/drivers/rtc/rtc-pcf8523.c +++ b/drivers/rtc/rtc-pcf8523.c @@ -99,24 +99,24 @@ static irqreturn_t pcf8523_irq(int irq, void *dev_id) static int pcf8523_rtc_read_time(struct device *dev, struct rtc_time *tm) { struct pcf8523 *pcf8523 = dev_get_drvdata(dev); - u8 regs[7]; + u8 regs[10]; int err; - err = regmap_bulk_read(pcf8523->regmap, PCF8523_REG_SECONDS, regs, + err = regmap_bulk_read(pcf8523->regmap, PCF8523_REG_CONTROL1, regs, sizeof(regs)); if (err < 0) return err; - if (regs[0] & PCF8523_SECONDS_OS) + if ((regs[0] & PCF8523_CONTROL1_STOP) || (regs[3] & PCF8523_SECONDS_OS)) return -EINVAL; - tm->tm_sec = bcd2bin(regs[0] & 0x7f); - tm->tm_min = bcd2bin(regs[1] & 0x7f); - tm->tm_hour = bcd2bin(regs[2] & 0x3f); - tm->tm_mday = bcd2bin(regs[3] & 0x3f); - tm->tm_wday = regs[4] & 0x7; - tm->tm_mon = bcd2bin(regs[5] & 0x1f) - 1; - tm->tm_year = bcd2bin(regs[6]) + 100; + tm->tm_sec = bcd2bin(regs[3] & 0x7f); + tm->tm_min = bcd2bin(regs[4] & 0x7f); + tm->tm_hour = bcd2bin(regs[5] & 0x3f); + tm->tm_mday = bcd2bin(regs[6] & 0x3f); + tm->tm_wday = regs[7] & 0x7; + tm->tm_mon = bcd2bin(regs[8] & 0x1f) - 1; + tm->tm_year = bcd2bin(regs[9]) + 100; return 0; } diff --git a/drivers/rtc/rtc-pcf8563.c b/drivers/rtc/rtc-pcf8563.c index 11fa9788558beafbb7669b6403187d23dae9f9c5..0a7fd94784651b9e604e8e626b79ee1c182fb9a3 100644 --- a/drivers/rtc/rtc-pcf8563.c +++ b/drivers/rtc/rtc-pcf8563.c @@ -567,6 +567,8 @@ static int pcf8563_probe(struct i2c_client *client) client->irq); return err; } + } else { + clear_bit(RTC_FEATURE_ALARM, pcf8563->rtc->features); } err = devm_rtc_register_device(pcf8563->rtc); diff --git a/drivers/rtc/rtc-pic32.c b/drivers/rtc/rtc-pic32.c index 7fb9145c43bd532f312078d3c8234c3a76b38e96..fa351ac201587e8556ebbf007790294d2f24d07f 100644 --- a/drivers/rtc/rtc-pic32.c +++ b/drivers/rtc/rtc-pic32.c @@ -324,16 +324,16 @@ static int pic32_rtc_probe(struct platform_device *pdev) spin_lock_init(&pdata->alarm_lock); + pdata->rtc = devm_rtc_allocate_device(&pdev->dev); + if (IS_ERR(pdata->rtc)) + return PTR_ERR(pdata->rtc); + clk_prepare_enable(pdata->clk); pic32_rtc_enable(pdata, 1); device_init_wakeup(&pdev->dev, 1); - pdata->rtc = devm_rtc_allocate_device(&pdev->dev); - if (IS_ERR(pdata->rtc)) - return PTR_ERR(pdata->rtc); - pdata->rtc->ops = &pic32_rtcops; pdata->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000; pdata->rtc->range_max = RTC_TIMESTAMP_END_2099; diff --git a/drivers/rtc/rtc-pm8xxx.c b/drivers/rtc/rtc-pm8xxx.c index dc6d1476baa597374b396a81cba382d027dbea20..716e5d9ad74d14a27fbf827619bcec1043375f22 100644 --- a/drivers/rtc/rtc-pm8xxx.c +++ b/drivers/rtc/rtc-pm8xxx.c @@ -461,7 +461,6 @@ static const struct pm8xxx_rtc_regs pmk8350_regs = { */ static const struct of_device_id pm8xxx_id_table[] = { { .compatible = "qcom,pm8921-rtc", .data = &pm8921_regs }, - { .compatible = "qcom,pm8018-rtc", .data = &pm8921_regs }, { .compatible = "qcom,pm8058-rtc", .data = &pm8058_regs }, { .compatible = "qcom,pm8941-rtc", .data = &pm8941_regs }, { .compatible = "qcom,pmk8350-rtc", .data = &pmk8350_regs }, diff --git a/drivers/rtc/rtc-rk808.c b/drivers/rtc/rtc-rk808.c index e920da8c08da198260f926dee63febda8f1c21c7..2d9bcb3ce1e3b93b7c67f0c32d0628aba91f8fd9 100644 --- a/drivers/rtc/rtc-rk808.c +++ b/drivers/rtc/rtc-rk808.c @@ -14,7 +14,6 @@ #include #include #include -#include /* RTC_CTRL_REG bitfields */ #define BIT_RTC_CTRL_REG_STOP_RTC_M BIT(0) @@ -51,7 +50,7 @@ struct rk_rtc_compat_reg { }; struct rk808_rtc { - struct rk808 *rk808; + struct regmap *regmap; struct rtc_device *rtc; struct rk_rtc_compat_reg *creg; int irq; @@ -97,12 +96,11 @@ static void gregorian_to_rockchip(struct rtc_time *tm) static int rk808_rtc_readtime(struct device *dev, struct rtc_time *tm) { struct rk808_rtc *rk808_rtc = dev_get_drvdata(dev); - struct rk808 *rk808 = rk808_rtc->rk808; u8 rtc_data[NUM_TIME_REGS]; int ret; /* Force an update of the shadowed registers right now */ - ret = regmap_update_bits(rk808->regmap, rk808_rtc->creg->ctrl_reg, + ret = regmap_update_bits(rk808_rtc->regmap, rk808_rtc->creg->ctrl_reg, BIT_RTC_CTRL_REG_RTC_GET_TIME, BIT_RTC_CTRL_REG_RTC_GET_TIME); if (ret) { @@ -116,7 +114,7 @@ static int rk808_rtc_readtime(struct device *dev, struct rtc_time *tm) * 32khz. If we clear the GET_TIME bit here, the time of i2c transfer * certainly more than 31.25us: 16 * 2.5us at 400kHz bus frequency. */ - ret = regmap_update_bits(rk808->regmap, rk808_rtc->creg->ctrl_reg, + ret = regmap_update_bits(rk808_rtc->regmap, rk808_rtc->creg->ctrl_reg, BIT_RTC_CTRL_REG_RTC_GET_TIME, 0); if (ret) { @@ -124,7 +122,7 @@ static int rk808_rtc_readtime(struct device *dev, struct rtc_time *tm) return ret; } - ret = regmap_bulk_read(rk808->regmap, rk808_rtc->creg->seconds_reg, + ret = regmap_bulk_read(rk808_rtc->regmap, rk808_rtc->creg->seconds_reg, rtc_data, NUM_TIME_REGS); if (ret) { dev_err(dev, "Failed to bulk read rtc_data: %d\n", ret); @@ -148,7 +146,6 @@ static int rk808_rtc_readtime(struct device *dev, struct rtc_time *tm) static int rk808_rtc_set_time(struct device *dev, struct rtc_time *tm) { struct rk808_rtc *rk808_rtc = dev_get_drvdata(dev); - struct rk808 *rk808 = rk808_rtc->rk808; u8 rtc_data[NUM_TIME_REGS]; int ret; @@ -163,7 +160,7 @@ static int rk808_rtc_set_time(struct device *dev, struct rtc_time *tm) rtc_data[6] = bin2bcd(tm->tm_wday); /* Stop RTC while updating the RTC registers */ - ret = regmap_update_bits(rk808->regmap, rk808_rtc->creg->ctrl_reg, + ret = regmap_update_bits(rk808_rtc->regmap, rk808_rtc->creg->ctrl_reg, BIT_RTC_CTRL_REG_STOP_RTC_M, BIT_RTC_CTRL_REG_STOP_RTC_M); if (ret) { @@ -171,14 +168,14 @@ static int rk808_rtc_set_time(struct device *dev, struct rtc_time *tm) return ret; } - ret = regmap_bulk_write(rk808->regmap, rk808_rtc->creg->seconds_reg, + ret = regmap_bulk_write(rk808_rtc->regmap, rk808_rtc->creg->seconds_reg, rtc_data, NUM_TIME_REGS); if (ret) { dev_err(dev, "Failed to bull write rtc_data: %d\n", ret); return ret; } /* Start RTC again */ - ret = regmap_update_bits(rk808->regmap, rk808_rtc->creg->ctrl_reg, + ret = regmap_update_bits(rk808_rtc->regmap, rk808_rtc->creg->ctrl_reg, BIT_RTC_CTRL_REG_STOP_RTC_M, 0); if (ret) { dev_err(dev, "Failed to update RTC control: %d\n", ret); @@ -191,12 +188,11 @@ static int rk808_rtc_set_time(struct device *dev, struct rtc_time *tm) static int rk808_rtc_readalarm(struct device *dev, struct rtc_wkalrm *alrm) { struct rk808_rtc *rk808_rtc = dev_get_drvdata(dev); - struct rk808 *rk808 = rk808_rtc->rk808; u8 alrm_data[NUM_ALARM_REGS]; uint32_t int_reg; int ret; - ret = regmap_bulk_read(rk808->regmap, + ret = regmap_bulk_read(rk808_rtc->regmap, rk808_rtc->creg->alarm_seconds_reg, alrm_data, NUM_ALARM_REGS); if (ret) { @@ -212,7 +208,7 @@ static int rk808_rtc_readalarm(struct device *dev, struct rtc_wkalrm *alrm) alrm->time.tm_year = (bcd2bin(alrm_data[5] & YEARS_REG_MSK)) + 100; rockchip_to_gregorian(&alrm->time); - ret = regmap_read(rk808->regmap, rk808_rtc->creg->int_reg, &int_reg); + ret = regmap_read(rk808_rtc->regmap, rk808_rtc->creg->int_reg, &int_reg); if (ret) { dev_err(dev, "Failed to read RTC INT REG: %d\n", ret); return ret; @@ -228,10 +224,9 @@ static int rk808_rtc_readalarm(struct device *dev, struct rtc_wkalrm *alrm) static int rk808_rtc_stop_alarm(struct rk808_rtc *rk808_rtc) { - struct rk808 *rk808 = rk808_rtc->rk808; int ret; - ret = regmap_update_bits(rk808->regmap, rk808_rtc->creg->int_reg, + ret = regmap_update_bits(rk808_rtc->regmap, rk808_rtc->creg->int_reg, BIT_RTC_INTERRUPTS_REG_IT_ALARM_M, 0); return ret; @@ -239,10 +234,9 @@ static int rk808_rtc_stop_alarm(struct rk808_rtc *rk808_rtc) static int rk808_rtc_start_alarm(struct rk808_rtc *rk808_rtc) { - struct rk808 *rk808 = rk808_rtc->rk808; int ret; - ret = regmap_update_bits(rk808->regmap, rk808_rtc->creg->int_reg, + ret = regmap_update_bits(rk808_rtc->regmap, rk808_rtc->creg->int_reg, BIT_RTC_INTERRUPTS_REG_IT_ALARM_M, BIT_RTC_INTERRUPTS_REG_IT_ALARM_M); @@ -252,7 +246,6 @@ static int rk808_rtc_start_alarm(struct rk808_rtc *rk808_rtc) static int rk808_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm) { struct rk808_rtc *rk808_rtc = dev_get_drvdata(dev); - struct rk808 *rk808 = rk808_rtc->rk808; u8 alrm_data[NUM_ALARM_REGS]; int ret; @@ -272,7 +265,7 @@ static int rk808_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm) alrm_data[4] = bin2bcd(alrm->time.tm_mon + 1); alrm_data[5] = bin2bcd(alrm->time.tm_year - 100); - ret = regmap_bulk_write(rk808->regmap, + ret = regmap_bulk_write(rk808_rtc->regmap, rk808_rtc->creg->alarm_seconds_reg, alrm_data, NUM_ALARM_REGS); if (ret) { @@ -313,20 +306,18 @@ static int rk808_rtc_alarm_irq_enable(struct device *dev, static irqreturn_t rk808_alarm_irq(int irq, void *data) { struct rk808_rtc *rk808_rtc = data; - struct rk808 *rk808 = rk808_rtc->rk808; - struct i2c_client *client = rk808->i2c; int ret; - ret = regmap_write(rk808->regmap, rk808_rtc->creg->status_reg, + ret = regmap_write(rk808_rtc->regmap, rk808_rtc->creg->status_reg, RTC_STATUS_MASK); if (ret) { - dev_err(&client->dev, + dev_err(&rk808_rtc->rtc->dev, "%s:Failed to update RTC status: %d\n", __func__, ret); return ret; } rtc_update_irq(rk808_rtc->rtc, 1, RTC_IRQF | RTC_AF); - dev_dbg(&client->dev, + dev_dbg(&rk808_rtc->rtc->dev, "%s:irq=%d\n", __func__, irq); return IRQ_HANDLED; } @@ -404,10 +395,12 @@ static int rk808_rtc_probe(struct platform_device *pdev) break; } platform_set_drvdata(pdev, rk808_rtc); - rk808_rtc->rk808 = rk808; + rk808_rtc->regmap = dev_get_regmap(pdev->dev.parent, NULL); + if (!rk808_rtc->regmap) + return -ENODEV; /* start rtc running by default, and use shadowed timer. */ - ret = regmap_update_bits(rk808->regmap, rk808_rtc->creg->ctrl_reg, + ret = regmap_update_bits(rk808_rtc->regmap, rk808_rtc->creg->ctrl_reg, BIT_RTC_CTRL_REG_STOP_RTC_M | BIT_RTC_CTRL_REG_RTC_READSEL_M, BIT_RTC_CTRL_REG_RTC_READSEL_M); @@ -417,7 +410,7 @@ static int rk808_rtc_probe(struct platform_device *pdev) return ret; } - ret = regmap_write(rk808->regmap, rk808_rtc->creg->status_reg, + ret = regmap_write(rk808_rtc->regmap, rk808_rtc->creg->status_reg, RTC_STATUS_MASK); if (ret) { dev_err(&pdev->dev, diff --git a/drivers/rtc/rtc-rs5c313.c b/drivers/rtc/rtc-rs5c313.c index e98f85f34206f16cb1741c97770df7ee519978b0..712a08e9e52db5acf9315b630cb98f5de964fe34 100644 --- a/drivers/rtc/rtc-rs5c313.c +++ b/drivers/rtc/rtc-rs5c313.c @@ -2,7 +2,7 @@ * Ricoh RS5C313 RTC device/driver * Copyright (C) 2007 Nobuhiro Iwamatsu * - * 2005-09-19 modifed by kogiidena + * 2005-09-19 modified by kogiidena * * Based on the old drivers/char/rs5c313_rtc.c by: * Copyright (C) 2000 Philipp Rumpf @@ -36,7 +36,7 @@ * 1.11a Daniele Bellucci: Audit create_proc_read_entry in rtc_init * 1.12 Venkatesh Pallipadi: Hooks for emulating rtc on HPET base-timer * CONFIG_HPET_EMULATE_RTC - * 1.13 Nobuhiro Iwamatsu: Updata driver. + * 1.13 Nobuhiro Iwamatsu: Update driver. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt @@ -280,7 +280,7 @@ static int rs5c313_rtc_set_time(struct device *dev, struct rtc_time *tm) while (1) { RS5C313_CEENABLE; /* CE:H */ - /* Initiatlize control reg. 24 hour */ + /* Initialize control reg. 24 hour */ rs5c313_write_cntreg(0x04); if (!(rs5c313_read_cntreg() & RS5C313_CNTREG_ADJ_BSY)) diff --git a/drivers/rtc/rtc-rs5c372.c b/drivers/rtc/rtc-rs5c372.c index 9562c477e1c96d2362e57178889bfac1b965686e..b4c5d016eca32c10838c352859ff1b73fbcb4556 100644 --- a/drivers/rtc/rtc-rs5c372.c +++ b/drivers/rtc/rtc-rs5c372.c @@ -150,7 +150,7 @@ static int rs5c_get_regs(struct rs5c372 *rs5c) * least 80219 chips; this works around that bug. * * The third method on the other hand doesn't work for the SMBus-only - * configurations, so we use the the first method there, stripping off + * configurations, so we use the first method there, stripping off * the extra register in the process. */ if (rs5c->smbus) { @@ -791,8 +791,7 @@ static int rs5c_oscillator_setup(struct rs5c372 *rs5c372) return 0; } -static int rs5c372_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int rs5c372_probe(struct i2c_client *client) { int err = 0; int smbus_mode = 0; @@ -826,11 +825,13 @@ static int rs5c372_probe(struct i2c_client *client, rs5c372->client = client; i2c_set_clientdata(client, rs5c372); - if (client->dev.of_node) + if (client->dev.of_node) { rs5c372->type = (enum rtc_type) of_device_get_match_data(&client->dev); - else + } else { + const struct i2c_device_id *id = i2c_match_id(rs5c372_id, client); rs5c372->type = id->driver_data; + } /* we read registers 0x0f then 0x00-0x0f; skip the first one */ rs5c372->regs = &rs5c372->buf[1]; @@ -920,7 +921,7 @@ static struct i2c_driver rs5c372_driver = { .name = "rtc-rs5c372", .of_match_table = of_match_ptr(rs5c372_of_match), }, - .probe = rs5c372_probe, + .probe_new = rs5c372_probe, .remove = rs5c372_remove, .id_table = rs5c372_id, }; diff --git a/drivers/rtc/rtc-rv3028.c b/drivers/rtc/rtc-rv3028.c index dd170e3efd83ed6da15ca91feefc2e39829df923..b0099e26e3b05fbfccb3927b1b96a638ef79dbca 100644 --- a/drivers/rtc/rtc-rv3028.c +++ b/drivers/rtc/rtc-rv3028.c @@ -902,9 +902,20 @@ static int rv3028_probe(struct i2c_client *client) return PTR_ERR(rv3028->rtc); if (client->irq > 0) { + unsigned long flags; + + /* + * If flags = 0, devm_request_threaded_irq() will use IRQ flags + * obtained from device tree. + */ + if (dev_fwnode(&client->dev)) + flags = 0; + else + flags = IRQF_TRIGGER_LOW; + ret = devm_request_threaded_irq(&client->dev, client->irq, NULL, rv3028_handle_irq, - IRQF_TRIGGER_LOW | IRQF_ONESHOT, + flags | IRQF_ONESHOT, "rv3028", rv3028); if (ret) { dev_warn(&client->dev, "unable to request IRQ, alarms disabled\n"); diff --git a/drivers/rtc/rtc-rv3029c2.c b/drivers/rtc/rtc-rv3029c2.c index eb483a30bd92ff1a84f4ce4c8c4e00a2e079003b..e4fdd47ae066c0d5be83086441a430035587b992 100644 --- a/drivers/rtc/rtc-rv3029c2.c +++ b/drivers/rtc/rtc-rv3029c2.c @@ -17,6 +17,7 @@ #include #include #include +#include #include /* Register map */ diff --git a/drivers/rtc/rtc-rv8803.c b/drivers/rtc/rtc-rv8803.c index 3527a0521e9b2531122deb16365e09611831820d..b581b6d5ad731730bbe1f4e18601917cf26f541e 100644 --- a/drivers/rtc/rtc-rv8803.c +++ b/drivers/rtc/rtc-rv8803.c @@ -576,8 +576,16 @@ static int rv8803_regs_configure(struct rv8803_data *rv8803) return 0; } -static int rv8803_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static const struct i2c_device_id rv8803_id[] = { + { "rv8803", rv_8803 }, + { "rv8804", rx_8804 }, + { "rx8803", rx_8803 }, + { "rx8900", rx_8900 }, + { } +}; +MODULE_DEVICE_TABLE(i2c, rv8803_id); + +static int rv8803_probe(struct i2c_client *client) { struct i2c_adapter *adapter = client->adapter; struct rv8803_data *rv8803; @@ -605,11 +613,14 @@ static int rv8803_probe(struct i2c_client *client, mutex_init(&rv8803->flags_lock); rv8803->client = client; - if (client->dev.of_node) + if (client->dev.of_node) { rv8803->type = (enum rv8803_type) of_device_get_match_data(&client->dev); - else + } else { + const struct i2c_device_id *id = i2c_match_id(rv8803_id, client); + rv8803->type = id->driver_data; + } i2c_set_clientdata(client, rv8803); flags = rv8803_read_reg(client, RV8803_FLAG); @@ -666,15 +677,6 @@ static int rv8803_probe(struct i2c_client *client, return 0; } -static const struct i2c_device_id rv8803_id[] = { - { "rv8803", rv_8803 }, - { "rv8804", rx_8804 }, - { "rx8803", rx_8803 }, - { "rx8900", rx_8900 }, - { } -}; -MODULE_DEVICE_TABLE(i2c, rv8803_id); - static const __maybe_unused struct of_device_id rv8803_of_match[] = { { .compatible = "microcrystal,rv8803", @@ -701,7 +703,7 @@ static struct i2c_driver rv8803_driver = { .name = "rtc-rv8803", .of_match_table = of_match_ptr(rv8803_of_match), }, - .probe = rv8803_probe, + .probe_new = rv8803_probe, .id_table = rv8803_id, }; module_i2c_driver(rv8803_driver); diff --git a/drivers/rtc/rtc-rx6110.c b/drivers/rtc/rtc-rx6110.c index cc634558b9280bd9b11a6a0bdb7b6d6ded4f85df..76a49838014ba31643210571c98b55d4efd970ad 100644 --- a/drivers/rtc/rtc-rx6110.c +++ b/drivers/rtc/rtc-rx6110.c @@ -376,7 +376,7 @@ static const struct spi_device_id rx6110_spi_id[] = { }; MODULE_DEVICE_TABLE(spi, rx6110_spi_id); -static const struct of_device_id rx6110_spi_of_match[] = { +static const __maybe_unused struct of_device_id rx6110_spi_of_match[] = { { .compatible = "epson,rx6110" }, { }, }; diff --git a/drivers/rtc/rtc-rx8025.c b/drivers/rtc/rtc-rx8025.c index dde86f3e2a4bdf072c1056d3bac73156092b77ae..331c20d4d843c1a3aa234c0083763b1151b8eb59 100644 --- a/drivers/rtc/rtc-rx8025.c +++ b/drivers/rtc/rtc-rx8025.c @@ -19,6 +19,7 @@ #include #include #include +#include #include #include @@ -519,9 +520,9 @@ static const struct attribute_group rx8025_attr_group = { .attrs = rx8025_attrs, }; -static int rx8025_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int rx8025_probe(struct i2c_client *client) { + const struct i2c_device_id *id = i2c_match_id(rx8025_id, client); struct i2c_adapter *adapter = client->adapter; struct rx8025_data *rx8025; int err = 0; @@ -580,7 +581,7 @@ static struct i2c_driver rx8025_driver = { .driver = { .name = "rtc-rx8025", }, - .probe = rx8025_probe, + .probe_new = rx8025_probe, .id_table = rx8025_id, }; diff --git a/drivers/rtc/rtc-rzn1.c b/drivers/rtc/rtc-rzn1.c index ac788799c8e3e4228f342141da491acae2da9356..0d36bc50197c121844ef29cd7e7b81e8dd7fb115 100644 --- a/drivers/rtc/rtc-rzn1.c +++ b/drivers/rtc/rtc-rzn1.c @@ -355,7 +355,9 @@ static int rzn1_rtc_probe(struct platform_device *pdev) set_bit(RTC_FEATURE_ALARM_RES_MINUTE, rtc->rtcdev->features); clear_bit(RTC_FEATURE_UPDATE_INTERRUPT, rtc->rtcdev->features); - devm_pm_runtime_enable(&pdev->dev); + ret = devm_pm_runtime_enable(&pdev->dev); + if (ret < 0) + return ret; ret = pm_runtime_resume_and_get(&pdev->dev); if (ret < 0) return ret; diff --git a/drivers/rtc/rtc-s35390a.c b/drivers/rtc/rtc-s35390a.c index 81d97b1d3159159b787ca8dc0289ae07a387d9f7..b18daaf72b17dbd87e982362985ea584c454e2c8 100644 --- a/drivers/rtc/rtc-s35390a.c +++ b/drivers/rtc/rtc-s35390a.c @@ -211,7 +211,7 @@ static int s35390a_rtc_set_time(struct device *dev, struct rtc_time *tm) { struct i2c_client *client = to_i2c_client(dev); struct s35390a *s35390a = i2c_get_clientdata(client); - int i, err; + int i; char buf[7], status; dev_dbg(&client->dev, "%s: tm is secs=%d, mins=%d, hours=%d mday=%d, " @@ -234,9 +234,7 @@ static int s35390a_rtc_set_time(struct device *dev, struct rtc_time *tm) for (i = 0; i < 7; ++i) buf[i] = bitrev8(buf[i]); - err = s35390a_set_reg(s35390a, S35390A_CMD_TIME1, buf, sizeof(buf)); - - return err; + return s35390a_set_reg(s35390a, S35390A_CMD_TIME1, buf, sizeof(buf)); } static int s35390a_rtc_read_time(struct device *dev, struct rtc_time *tm) diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c index db529733c9c46c7e8967a701f27235c09e0bb716..8fc5efde3e0b30c19b8f2e87e8ad05ae35483d05 100644 --- a/drivers/rtc/rtc-s3c.c +++ b/drivers/rtc/rtc-s3c.c @@ -429,14 +429,9 @@ static int s3c_rtc_probe(struct platform_device *pdev) return PTR_ERR(info->base); info->rtc_clk = devm_clk_get(&pdev->dev, "rtc"); - if (IS_ERR(info->rtc_clk)) { - ret = PTR_ERR(info->rtc_clk); - if (ret != -EPROBE_DEFER) - dev_err(&pdev->dev, "failed to find rtc clock\n"); - else - dev_dbg(&pdev->dev, "probe deferred due to missing rtc clk\n"); - return ret; - } + if (IS_ERR(info->rtc_clk)) + return dev_err_probe(&pdev->dev, PTR_ERR(info->rtc_clk), + "failed to find rtc clock\n"); ret = clk_prepare_enable(info->rtc_clk); if (ret) return ret; diff --git a/drivers/rtc/rtc-snvs.c b/drivers/rtc/rtc-snvs.c index bd929b0e7d7defdd94335cca248a8526ee24d1e0..d82acf1af1faec540a7d3e90e4b6cde257981e67 100644 --- a/drivers/rtc/rtc-snvs.c +++ b/drivers/rtc/rtc-snvs.c @@ -32,6 +32,14 @@ #define SNVS_LPPGDR_INIT 0x41736166 #define CNTR_TO_SECS_SH 15 +/* The maximum RTC clock cycles that are allowed to pass between two + * consecutive clock counter register reads. If the values are corrupted a + * bigger difference is expected. The RTC frequency is 32kHz. With 320 cycles + * we end at 10ms which should be enough for most cases. If it once takes + * longer than expected we do a retry. + */ +#define MAX_RTC_READ_DIFF_CYCLES 320 + struct snvs_rtc_data { struct rtc_device *rtc; struct regmap *regmap; @@ -56,6 +64,7 @@ static u64 rtc_read_lpsrt(struct snvs_rtc_data *data) static u32 rtc_read_lp_counter(struct snvs_rtc_data *data) { u64 read1, read2; + s64 diff; unsigned int timeout = 100; /* As expected, the registers might update between the read of the LSB @@ -66,7 +75,8 @@ static u32 rtc_read_lp_counter(struct snvs_rtc_data *data) do { read2 = read1; read1 = rtc_read_lpsrt(data); - } while (read1 != read2 && --timeout); + diff = read1 - read2; + } while (((diff < 0) || (diff > MAX_RTC_READ_DIFF_CYCLES)) && --timeout); if (!timeout) dev_err(&data->rtc->dev, "Timeout trying to get valid LPSRT Counter read\n"); @@ -78,13 +88,15 @@ static u32 rtc_read_lp_counter(struct snvs_rtc_data *data) static int rtc_read_lp_counter_lsb(struct snvs_rtc_data *data, u32 *lsb) { u32 count1, count2; + s32 diff; unsigned int timeout = 100; regmap_read(data->regmap, data->offset + SNVS_LPSRTCLR, &count1); do { count2 = count1; regmap_read(data->regmap, data->offset + SNVS_LPSRTCLR, &count1); - } while (count1 != count2 && --timeout); + diff = count1 - count2; + } while (((diff < 0) || (diff > MAX_RTC_READ_DIFF_CYCLES)) && --timeout); if (!timeout) { dev_err(&data->rtc->dev, "Timeout trying to get valid LPSRT Counter read\n"); return -ETIMEDOUT; diff --git a/drivers/rtc/rtc-st-lpc.c b/drivers/rtc/rtc-st-lpc.c index bdb20f63254e26cafdeea10330b4ecf3d944b543..0f8e4231098ef1b856906a484a17585f8be94e2a 100644 --- a/drivers/rtc/rtc-st-lpc.c +++ b/drivers/rtc/rtc-st-lpc.c @@ -238,6 +238,7 @@ static int st_rtc_probe(struct platform_device *pdev) rtc->clkrate = clk_get_rate(rtc->clk); if (!rtc->clkrate) { + clk_disable_unprepare(rtc->clk); dev_err(&pdev->dev, "Unable to fetch clock rate\n"); return -EINVAL; } diff --git a/drivers/rtc/sysfs.c b/drivers/rtc/sysfs.c index 00f1945bcb7e17585f03a4b6d9f1c915e9adaf46..e3062c4d3f2ce1fbf2ce70c9cbaa13853fc60b92 100644 --- a/drivers/rtc/sysfs.c +++ b/drivers/rtc/sysfs.c @@ -6,6 +6,7 @@ * Author: Alessandro Zummo */ +#include #include #include diff --git a/drivers/s390/char/hmcdrv_dev.c b/drivers/s390/char/hmcdrv_dev.c index 20e9cd542e0335b07efceb88e67b9426f7ec2b85..cb8fdf057ecafe1d4d70790ad88952152ffdf8e8 100644 --- a/drivers/s390/char/hmcdrv_dev.c +++ b/drivers/s390/char/hmcdrv_dev.c @@ -90,7 +90,7 @@ static dev_t hmcdrv_dev_no; /* device number (major/minor) */ * * Return: recommended device file name in /dev */ -static char *hmcdrv_dev_name(struct device *dev, umode_t *mode) +static char *hmcdrv_dev_name(const struct device *dev, umode_t *mode) { char *nodename = NULL; const char *devname = dev_name(dev); /* kernel device name */ diff --git a/drivers/s390/cio/vfio_ccw_chp.c b/drivers/s390/cio/vfio_ccw_chp.c index 13b26a1c79886bacd8742d1404588096bd224170..d3f3a611f95b41a11bde8ed2fe7bae18dad5f21b 100644 --- a/drivers/s390/cio/vfio_ccw_chp.c +++ b/drivers/s390/cio/vfio_ccw_chp.c @@ -16,6 +16,7 @@ static ssize_t vfio_ccw_schib_region_read(struct vfio_ccw_private *private, char __user *buf, size_t count, loff_t *ppos) { + struct subchannel *sch = to_subchannel(private->vdev.dev->parent); unsigned int i = VFIO_CCW_OFFSET_TO_INDEX(*ppos) - VFIO_CCW_NUM_REGIONS; loff_t pos = *ppos & VFIO_CCW_OFFSET_MASK; struct ccw_schib_region *region; @@ -27,12 +28,12 @@ static ssize_t vfio_ccw_schib_region_read(struct vfio_ccw_private *private, mutex_lock(&private->io_mutex); region = private->region[i].data; - if (cio_update_schib(private->sch)) { + if (cio_update_schib(sch)) { ret = -ENODEV; goto out; } - memcpy(region, &private->sch->schib, sizeof(*region)); + memcpy(region, &sch->schib, sizeof(*region)); if (copy_to_user(buf, (void *)region + pos, count)) { ret = -EFAULT; diff --git a/drivers/s390/cio/vfio_ccw_drv.c b/drivers/s390/cio/vfio_ccw_drv.c index 7f5402fe857a2e6d2fbc5292b1fd88718c3d49d1..54aba7cceb33f98bc80c3f09bb7bceb95f2b8cdf 100644 --- a/drivers/s390/cio/vfio_ccw_drv.c +++ b/drivers/s390/cio/vfio_ccw_drv.c @@ -23,10 +23,10 @@ #include "vfio_ccw_private.h" struct workqueue_struct *vfio_ccw_work_q; -static struct kmem_cache *vfio_ccw_io_region; -static struct kmem_cache *vfio_ccw_cmd_region; -static struct kmem_cache *vfio_ccw_schib_region; -static struct kmem_cache *vfio_ccw_crw_region; +struct kmem_cache *vfio_ccw_io_region; +struct kmem_cache *vfio_ccw_cmd_region; +struct kmem_cache *vfio_ccw_schib_region; +struct kmem_cache *vfio_ccw_crw_region; debug_info_t *vfio_ccw_debug_msg_id; debug_info_t *vfio_ccw_debug_trace_id; @@ -36,10 +36,19 @@ debug_info_t *vfio_ccw_debug_trace_id; */ int vfio_ccw_sch_quiesce(struct subchannel *sch) { - struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev); + struct vfio_ccw_parent *parent = dev_get_drvdata(&sch->dev); + struct vfio_ccw_private *private = dev_get_drvdata(&parent->dev); DECLARE_COMPLETION_ONSTACK(completion); int iretry, ret = 0; + /* + * Probably an impossible situation, after being called through + * FSM callbacks. But in the event it did, register a warning + * and return as if things were fine. + */ + if (WARN_ON(!private)) + return 0; + iretry = 255; do { @@ -70,7 +79,7 @@ int vfio_ccw_sch_quiesce(struct subchannel *sch) return ret; } -static void vfio_ccw_sch_io_todo(struct work_struct *work) +void vfio_ccw_sch_io_todo(struct work_struct *work) { struct vfio_ccw_private *private; struct irb *irb; @@ -106,7 +115,7 @@ static void vfio_ccw_sch_io_todo(struct work_struct *work) eventfd_signal(private->io_trigger, 1); } -static void vfio_ccw_crw_todo(struct work_struct *work) +void vfio_ccw_crw_todo(struct work_struct *work) { struct vfio_ccw_private *private; @@ -121,90 +130,39 @@ static void vfio_ccw_crw_todo(struct work_struct *work) */ static void vfio_ccw_sch_irq(struct subchannel *sch) { - struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev); + struct vfio_ccw_parent *parent = dev_get_drvdata(&sch->dev); + struct vfio_ccw_private *private = dev_get_drvdata(&parent->dev); + + /* + * The subchannel should still be disabled at this point, + * so an interrupt would be quite surprising. As with an + * interrupt while the FSM is closed, let's attempt to + * disable the subchannel again. + */ + if (!private) { + VFIO_CCW_MSG_EVENT(2, "sch %x.%x.%04x: unexpected interrupt\n", + sch->schid.cssid, sch->schid.ssid, + sch->schid.sch_no); + + cio_disable_subchannel(sch); + return; + } inc_irq_stat(IRQIO_CIO); vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_INTERRUPT); } -static struct vfio_ccw_private *vfio_ccw_alloc_private(struct subchannel *sch) +static void vfio_ccw_free_parent(struct device *dev) { - struct vfio_ccw_private *private; + struct vfio_ccw_parent *parent = container_of(dev, struct vfio_ccw_parent, dev); - private = kzalloc(sizeof(*private), GFP_KERNEL); - if (!private) - return ERR_PTR(-ENOMEM); - - private->sch = sch; - mutex_init(&private->io_mutex); - private->state = VFIO_CCW_STATE_STANDBY; - INIT_LIST_HEAD(&private->crw); - INIT_WORK(&private->io_work, vfio_ccw_sch_io_todo); - INIT_WORK(&private->crw_work, vfio_ccw_crw_todo); - - private->cp.guest_cp = kcalloc(CCWCHAIN_LEN_MAX, sizeof(struct ccw1), - GFP_KERNEL); - if (!private->cp.guest_cp) - goto out_free_private; - - private->io_region = kmem_cache_zalloc(vfio_ccw_io_region, - GFP_KERNEL | GFP_DMA); - if (!private->io_region) - goto out_free_cp; - - private->cmd_region = kmem_cache_zalloc(vfio_ccw_cmd_region, - GFP_KERNEL | GFP_DMA); - if (!private->cmd_region) - goto out_free_io; - - private->schib_region = kmem_cache_zalloc(vfio_ccw_schib_region, - GFP_KERNEL | GFP_DMA); - - if (!private->schib_region) - goto out_free_cmd; - - private->crw_region = kmem_cache_zalloc(vfio_ccw_crw_region, - GFP_KERNEL | GFP_DMA); - - if (!private->crw_region) - goto out_free_schib; - return private; - -out_free_schib: - kmem_cache_free(vfio_ccw_schib_region, private->schib_region); -out_free_cmd: - kmem_cache_free(vfio_ccw_cmd_region, private->cmd_region); -out_free_io: - kmem_cache_free(vfio_ccw_io_region, private->io_region); -out_free_cp: - kfree(private->cp.guest_cp); -out_free_private: - mutex_destroy(&private->io_mutex); - kfree(private); - return ERR_PTR(-ENOMEM); + kfree(parent); } -static void vfio_ccw_free_private(struct vfio_ccw_private *private) -{ - struct vfio_ccw_crw *crw, *temp; - - list_for_each_entry_safe(crw, temp, &private->crw, next) { - list_del(&crw->next); - kfree(crw); - } - - kmem_cache_free(vfio_ccw_crw_region, private->crw_region); - kmem_cache_free(vfio_ccw_schib_region, private->schib_region); - kmem_cache_free(vfio_ccw_cmd_region, private->cmd_region); - kmem_cache_free(vfio_ccw_io_region, private->io_region); - kfree(private->cp.guest_cp); - mutex_destroy(&private->io_mutex); - kfree(private); -} static int vfio_ccw_sch_probe(struct subchannel *sch) { struct pmcw *pmcw = &sch->schib.pmcw; - struct vfio_ccw_private *private; + struct vfio_ccw_parent *parent; int ret = -ENOMEM; if (pmcw->qf) { @@ -213,42 +171,50 @@ static int vfio_ccw_sch_probe(struct subchannel *sch) return -ENODEV; } - private = vfio_ccw_alloc_private(sch); - if (IS_ERR(private)) - return PTR_ERR(private); + parent = kzalloc(sizeof(*parent), GFP_KERNEL); + if (!parent) + return -ENOMEM; + + dev_set_name(&parent->dev, "parent"); + parent->dev.parent = &sch->dev; + parent->dev.release = &vfio_ccw_free_parent; + ret = device_register(&parent->dev); + if (ret) + goto out_free; - dev_set_drvdata(&sch->dev, private); + dev_set_drvdata(&sch->dev, parent); - private->mdev_type.sysfs_name = "io"; - private->mdev_type.pretty_name = "I/O subchannel (Non-QDIO)"; - private->mdev_types[0] = &private->mdev_type; - ret = mdev_register_parent(&private->parent, &sch->dev, + parent->mdev_type.sysfs_name = "io"; + parent->mdev_type.pretty_name = "I/O subchannel (Non-QDIO)"; + parent->mdev_types[0] = &parent->mdev_type; + ret = mdev_register_parent(&parent->parent, &sch->dev, &vfio_ccw_mdev_driver, - private->mdev_types, 1); + parent->mdev_types, 1); if (ret) - goto out_free; + goto out_unreg; VFIO_CCW_MSG_EVENT(4, "bound to subchannel %x.%x.%04x\n", sch->schid.cssid, sch->schid.ssid, sch->schid.sch_no); return 0; +out_unreg: + device_del(&parent->dev); out_free: + put_device(&parent->dev); dev_set_drvdata(&sch->dev, NULL); - vfio_ccw_free_private(private); return ret; } static void vfio_ccw_sch_remove(struct subchannel *sch) { - struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev); + struct vfio_ccw_parent *parent = dev_get_drvdata(&sch->dev); - mdev_unregister_parent(&private->parent); + mdev_unregister_parent(&parent->parent); + device_unregister(&parent->dev); dev_set_drvdata(&sch->dev, NULL); - vfio_ccw_free_private(private); - VFIO_CCW_MSG_EVENT(4, "unbound from subchannel %x.%x.%04x\n", sch->schid.cssid, sch->schid.ssid, sch->schid.sch_no); @@ -256,7 +222,11 @@ static void vfio_ccw_sch_remove(struct subchannel *sch) static void vfio_ccw_sch_shutdown(struct subchannel *sch) { - struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev); + struct vfio_ccw_parent *parent = dev_get_drvdata(&sch->dev); + struct vfio_ccw_private *private = dev_get_drvdata(&parent->dev); + + if (WARN_ON(!private)) + return; vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_CLOSE); vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_NOT_OPER); @@ -274,7 +244,8 @@ static void vfio_ccw_sch_shutdown(struct subchannel *sch) */ static int vfio_ccw_sch_event(struct subchannel *sch, int process) { - struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev); + struct vfio_ccw_parent *parent = dev_get_drvdata(&sch->dev); + struct vfio_ccw_private *private = dev_get_drvdata(&parent->dev); unsigned long flags; int rc = -EAGAIN; @@ -287,8 +258,10 @@ static int vfio_ccw_sch_event(struct subchannel *sch, int process) rc = 0; - if (cio_update_schib(sch)) - vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_NOT_OPER); + if (cio_update_schib(sch)) { + if (private) + vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_NOT_OPER); + } out_unlock: spin_unlock_irqrestore(sch->lock, flags); @@ -326,14 +299,15 @@ static void vfio_ccw_queue_crw(struct vfio_ccw_private *private, static int vfio_ccw_chp_event(struct subchannel *sch, struct chp_link *link, int event) { - struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev); + struct vfio_ccw_parent *parent = dev_get_drvdata(&sch->dev); + struct vfio_ccw_private *private = dev_get_drvdata(&parent->dev); int mask = chp_ssd_get_mask(&sch->ssd_info, link); int retry = 255; if (!private || !mask) return 0; - trace_vfio_ccw_chp_event(private->sch->schid, mask, event); + trace_vfio_ccw_chp_event(sch->schid, mask, event); VFIO_CCW_MSG_EVENT(2, "sch %x.%x.%04x: mask=0x%x event=%d\n", sch->schid.cssid, sch->schid.ssid, sch->schid.sch_no, diff --git a/drivers/s390/cio/vfio_ccw_fsm.c b/drivers/s390/cio/vfio_ccw_fsm.c index 0a5e8b4a674342b31d784b0ed73c777d5d40c51b..2784a4e4d2bef3c9383ee10eb66f8ded33cb00f9 100644 --- a/drivers/s390/cio/vfio_ccw_fsm.c +++ b/drivers/s390/cio/vfio_ccw_fsm.c @@ -18,15 +18,13 @@ static int fsm_io_helper(struct vfio_ccw_private *private) { - struct subchannel *sch; + struct subchannel *sch = to_subchannel(private->vdev.dev->parent); union orb *orb; int ccode; __u8 lpm; unsigned long flags; int ret; - sch = private->sch; - spin_lock_irqsave(sch->lock, flags); orb = cp_get_orb(&private->cp, (u32)virt_to_phys(sch), sch->lpm); @@ -80,13 +78,11 @@ out: static int fsm_do_halt(struct vfio_ccw_private *private) { - struct subchannel *sch; + struct subchannel *sch = to_subchannel(private->vdev.dev->parent); unsigned long flags; int ccode; int ret; - sch = private->sch; - spin_lock_irqsave(sch->lock, flags); VFIO_CCW_TRACE_EVENT(2, "haltIO"); @@ -121,13 +117,11 @@ static int fsm_do_halt(struct vfio_ccw_private *private) static int fsm_do_clear(struct vfio_ccw_private *private) { - struct subchannel *sch; + struct subchannel *sch = to_subchannel(private->vdev.dev->parent); unsigned long flags; int ccode; int ret; - sch = private->sch; - spin_lock_irqsave(sch->lock, flags); VFIO_CCW_TRACE_EVENT(2, "clearIO"); @@ -160,7 +154,7 @@ static int fsm_do_clear(struct vfio_ccw_private *private) static void fsm_notoper(struct vfio_ccw_private *private, enum vfio_ccw_event event) { - struct subchannel *sch = private->sch; + struct subchannel *sch = to_subchannel(private->vdev.dev->parent); VFIO_CCW_MSG_EVENT(2, "sch %x.%x.%04x: notoper event %x state %x\n", sch->schid.cssid, @@ -228,7 +222,7 @@ static void fsm_async_retry(struct vfio_ccw_private *private, static void fsm_disabled_irq(struct vfio_ccw_private *private, enum vfio_ccw_event event) { - struct subchannel *sch = private->sch; + struct subchannel *sch = to_subchannel(private->vdev.dev->parent); /* * An interrupt in a disabled state means a previous disable was not @@ -238,7 +232,9 @@ static void fsm_disabled_irq(struct vfio_ccw_private *private, } inline struct subchannel_id get_schid(struct vfio_ccw_private *p) { - return p->sch->schid; + struct subchannel *sch = to_subchannel(p->vdev.dev->parent); + + return sch->schid; } /* @@ -360,10 +356,11 @@ static void fsm_async_request(struct vfio_ccw_private *private, static void fsm_irq(struct vfio_ccw_private *private, enum vfio_ccw_event event) { + struct subchannel *sch = to_subchannel(private->vdev.dev->parent); struct irb *irb = this_cpu_ptr(&cio_irb); VFIO_CCW_TRACE_EVENT(6, "IRQ"); - VFIO_CCW_TRACE_EVENT(6, dev_name(&private->sch->dev)); + VFIO_CCW_TRACE_EVENT(6, dev_name(&sch->dev)); memcpy(&private->irb, irb, sizeof(*irb)); @@ -376,7 +373,7 @@ static void fsm_irq(struct vfio_ccw_private *private, static void fsm_open(struct vfio_ccw_private *private, enum vfio_ccw_event event) { - struct subchannel *sch = private->sch; + struct subchannel *sch = to_subchannel(private->vdev.dev->parent); int ret; spin_lock_irq(sch->lock); @@ -397,7 +394,7 @@ err_unlock: static void fsm_close(struct vfio_ccw_private *private, enum vfio_ccw_event event) { - struct subchannel *sch = private->sch; + struct subchannel *sch = to_subchannel(private->vdev.dev->parent); int ret; spin_lock_irq(sch->lock); diff --git a/drivers/s390/cio/vfio_ccw_ops.c b/drivers/s390/cio/vfio_ccw_ops.c index 6ae4d012d800845bf5cad2b68abefd5bbc4e2164..5b53b94f13c741cc089c98373eaecc9a30e74f34 100644 --- a/drivers/s390/cio/vfio_ccw_ops.c +++ b/drivers/s390/cio/vfio_ccw_ops.c @@ -49,26 +49,70 @@ static int vfio_ccw_mdev_init_dev(struct vfio_device *vdev) struct vfio_ccw_private *private = container_of(vdev, struct vfio_ccw_private, vdev); - init_completion(&private->release_comp); + mutex_init(&private->io_mutex); + private->state = VFIO_CCW_STATE_STANDBY; + INIT_LIST_HEAD(&private->crw); + INIT_WORK(&private->io_work, vfio_ccw_sch_io_todo); + INIT_WORK(&private->crw_work, vfio_ccw_crw_todo); + + private->cp.guest_cp = kcalloc(CCWCHAIN_LEN_MAX, sizeof(struct ccw1), + GFP_KERNEL); + if (!private->cp.guest_cp) + goto out_free_private; + + private->io_region = kmem_cache_zalloc(vfio_ccw_io_region, + GFP_KERNEL | GFP_DMA); + if (!private->io_region) + goto out_free_cp; + + private->cmd_region = kmem_cache_zalloc(vfio_ccw_cmd_region, + GFP_KERNEL | GFP_DMA); + if (!private->cmd_region) + goto out_free_io; + + private->schib_region = kmem_cache_zalloc(vfio_ccw_schib_region, + GFP_KERNEL | GFP_DMA); + if (!private->schib_region) + goto out_free_cmd; + + private->crw_region = kmem_cache_zalloc(vfio_ccw_crw_region, + GFP_KERNEL | GFP_DMA); + if (!private->crw_region) + goto out_free_schib; + return 0; + +out_free_schib: + kmem_cache_free(vfio_ccw_schib_region, private->schib_region); +out_free_cmd: + kmem_cache_free(vfio_ccw_cmd_region, private->cmd_region); +out_free_io: + kmem_cache_free(vfio_ccw_io_region, private->io_region); +out_free_cp: + kfree(private->cp.guest_cp); +out_free_private: + mutex_destroy(&private->io_mutex); + return -ENOMEM; } static int vfio_ccw_mdev_probe(struct mdev_device *mdev) { - struct vfio_ccw_private *private = dev_get_drvdata(mdev->dev.parent); + struct subchannel *sch = to_subchannel(mdev->dev.parent); + struct vfio_ccw_parent *parent = dev_get_drvdata(&sch->dev); + struct vfio_ccw_private *private; int ret; - if (private->state == VFIO_CCW_STATE_NOT_OPER) - return -ENODEV; + private = vfio_alloc_device(vfio_ccw_private, vdev, &mdev->dev, + &vfio_ccw_dev_ops); + if (IS_ERR(private)) + return PTR_ERR(private); - ret = vfio_init_device(&private->vdev, &mdev->dev, &vfio_ccw_dev_ops); - if (ret) - return ret; + dev_set_drvdata(&parent->dev, private); VFIO_CCW_MSG_EVENT(2, "sch %x.%x.%04x: create\n", - private->sch->schid.cssid, - private->sch->schid.ssid, - private->sch->schid.sch_no); + sch->schid.cssid, + sch->schid.ssid, + sch->schid.sch_no); ret = vfio_register_emulated_iommu_dev(&private->vdev); if (ret) @@ -77,6 +121,7 @@ static int vfio_ccw_mdev_probe(struct mdev_device *mdev) return 0; err_put_vdev: + dev_set_drvdata(&parent->dev, NULL); vfio_put_device(&private->vdev); return ret; } @@ -85,40 +130,36 @@ static void vfio_ccw_mdev_release_dev(struct vfio_device *vdev) { struct vfio_ccw_private *private = container_of(vdev, struct vfio_ccw_private, vdev); + struct vfio_ccw_crw *crw, *temp; - /* - * We cannot free vfio_ccw_private here because it includes - * parent info which must be free'ed by css driver. - * - * Use a workaround by memset'ing the core device part and - * then notifying the remove path that all active references - * to this device have been released. - */ - memset(vdev, 0, sizeof(*vdev)); - complete(&private->release_comp); + list_for_each_entry_safe(crw, temp, &private->crw, next) { + list_del(&crw->next); + kfree(crw); + } + + kmem_cache_free(vfio_ccw_crw_region, private->crw_region); + kmem_cache_free(vfio_ccw_schib_region, private->schib_region); + kmem_cache_free(vfio_ccw_cmd_region, private->cmd_region); + kmem_cache_free(vfio_ccw_io_region, private->io_region); + kfree(private->cp.guest_cp); + mutex_destroy(&private->io_mutex); } static void vfio_ccw_mdev_remove(struct mdev_device *mdev) { - struct vfio_ccw_private *private = dev_get_drvdata(mdev->dev.parent); + struct subchannel *sch = to_subchannel(mdev->dev.parent); + struct vfio_ccw_parent *parent = dev_get_drvdata(&sch->dev); + struct vfio_ccw_private *private = dev_get_drvdata(&parent->dev); VFIO_CCW_MSG_EVENT(2, "sch %x.%x.%04x: remove\n", - private->sch->schid.cssid, - private->sch->schid.ssid, - private->sch->schid.sch_no); + sch->schid.cssid, + sch->schid.ssid, + sch->schid.sch_no); vfio_unregister_group_dev(&private->vdev); + dev_set_drvdata(&parent->dev, NULL); vfio_put_device(&private->vdev); - /* - * Wait for all active references on mdev are released so it - * is safe to defer kfree() to a later point. - * - * TODO: the clean fix is to split parent/mdev info from ccw - * private structure so each can be managed in its own life - * cycle. - */ - wait_for_completion(&private->release_comp); } static int vfio_ccw_mdev_open_device(struct vfio_device *vdev) @@ -588,6 +629,9 @@ static const struct vfio_device_ops vfio_ccw_dev_ops = { .ioctl = vfio_ccw_mdev_ioctl, .request = vfio_ccw_mdev_request, .dma_unmap = vfio_ccw_dma_unmap, + .bind_iommufd = vfio_iommufd_emulated_bind, + .unbind_iommufd = vfio_iommufd_emulated_unbind, + .attach_ioas = vfio_iommufd_emulated_attach_ioas, }; struct mdev_driver vfio_ccw_mdev_driver = { diff --git a/drivers/s390/cio/vfio_ccw_private.h b/drivers/s390/cio/vfio_ccw_private.h index bd5fb81456af854c0a87a2b27262e72f66a3d04d..b441ae6700fd235e6bb2044b6cda33b217c5dfe6 100644 --- a/drivers/s390/cio/vfio_ccw_private.h +++ b/drivers/s390/cio/vfio_ccw_private.h @@ -67,10 +67,24 @@ struct vfio_ccw_crw { struct crw crw; }; +/** + * struct vfio_ccw_parent + * + * @dev: embedded device struct + * @parent: parent data structures for mdevs created + * @mdev_type(s): identifying information for mdevs created + */ +struct vfio_ccw_parent { + struct device dev; + + struct mdev_parent parent; + struct mdev_type mdev_type; + struct mdev_type *mdev_types[1]; +}; + /** * struct vfio_ccw_private * @vdev: Embedded VFIO device - * @sch: pointer to the subchannel * @state: internal state of the device * @completion: synchronization helper of the I/O completion * @io_region: MMIO region to input/output I/O arguments/results @@ -88,12 +102,9 @@ struct vfio_ccw_crw { * @req_trigger: eventfd ctx for signaling userspace to return device * @io_work: work for deferral process of I/O handling * @crw_work: work for deferral process of CRW handling - * @release_comp: synchronization helper for vfio device release - * @parent: parent data structures for mdevs created */ struct vfio_ccw_private { struct vfio_device vdev; - struct subchannel *sch; int state; struct completion *completion; struct ccw_io_region *io_region; @@ -114,15 +125,11 @@ struct vfio_ccw_private { struct eventfd_ctx *req_trigger; struct work_struct io_work; struct work_struct crw_work; - - struct completion release_comp; - - struct mdev_parent parent; - struct mdev_type mdev_type; - struct mdev_type *mdev_types[1]; } __aligned(8); int vfio_ccw_sch_quiesce(struct subchannel *sch); +void vfio_ccw_sch_io_todo(struct work_struct *work); +void vfio_ccw_crw_todo(struct work_struct *work); extern struct mdev_driver vfio_ccw_mdev_driver; @@ -162,12 +169,18 @@ extern fsm_func_t *vfio_ccw_jumptable[NR_VFIO_CCW_STATES][NR_VFIO_CCW_EVENTS]; static inline void vfio_ccw_fsm_event(struct vfio_ccw_private *private, enum vfio_ccw_event event) { - trace_vfio_ccw_fsm_event(private->sch->schid, private->state, event); + struct subchannel *sch = to_subchannel(private->vdev.dev->parent); + + if (sch) + trace_vfio_ccw_fsm_event(sch->schid, private->state, event); vfio_ccw_jumptable[private->state][event](private, event); } extern struct workqueue_struct *vfio_ccw_work_q; - +extern struct kmem_cache *vfio_ccw_io_region; +extern struct kmem_cache *vfio_ccw_cmd_region; +extern struct kmem_cache *vfio_ccw_schib_region; +extern struct kmem_cache *vfio_ccw_crw_region; /* s390 debug feature, similar to base cio */ extern debug_info_t *vfio_ccw_debug_msg_id; diff --git a/drivers/s390/crypto/vfio_ap_drv.c b/drivers/s390/crypto/vfio_ap_drv.c index f43cfeabd2cc8b709f6d3e75b4ae44170195cf27..997b524bdd2b5bffbd01782e970e318c6d40c013 100644 --- a/drivers/s390/crypto/vfio_ap_drv.c +++ b/drivers/s390/crypto/vfio_ap_drv.c @@ -122,7 +122,7 @@ static int vfio_ap_matrix_dev_create(void) return 0; matrix_drv_err: - device_unregister(&matrix_dev->device); + device_del(&matrix_dev->device); matrix_reg_err: put_device(&matrix_dev->device); matrix_alloc_err: diff --git a/drivers/s390/crypto/vfio_ap_ops.c b/drivers/s390/crypto/vfio_ap_ops.c index 0b4cc8c597ae67845c8d01c434e93ec05c3a5195..9c01957e56b3f17f494dcb8766646a404eb35280 100644 --- a/drivers/s390/crypto/vfio_ap_ops.c +++ b/drivers/s390/crypto/vfio_ap_ops.c @@ -429,7 +429,7 @@ static struct ap_queue_status vfio_ap_irq_enable(struct vfio_ap_queue *q, aqic_gisa.isc = nisc; aqic_gisa.ir = 1; - aqic_gisa.gisa = (uint64_t)gisa >> 4; + aqic_gisa.gisa = virt_to_phys(gisa) >> 4; status = ap_aqic(q->apqn, aqic_gisa, h_nib); switch (status.response_code) { @@ -765,11 +765,6 @@ static void vfio_ap_mdev_unlink_fr_queues(struct ap_matrix_mdev *matrix_mdev) } } -static void vfio_ap_mdev_release_dev(struct vfio_device *vdev) -{ - vfio_free_device(vdev); -} - static void vfio_ap_mdev_remove(struct mdev_device *mdev) { struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(&mdev->dev); @@ -1535,13 +1530,29 @@ static int vfio_ap_mdev_set_kvm(struct ap_matrix_mdev *matrix_mdev, return 0; } +static void unmap_iova(struct ap_matrix_mdev *matrix_mdev, u64 iova, u64 length) +{ + struct ap_queue_table *qtable = &matrix_mdev->qtable; + struct vfio_ap_queue *q; + int loop_cursor; + + hash_for_each(qtable->queues, loop_cursor, q, mdev_qnode) { + if (q->saved_iova >= iova && q->saved_iova < iova + length) + vfio_ap_irq_disable(q); + } +} + static void vfio_ap_mdev_dma_unmap(struct vfio_device *vdev, u64 iova, u64 length) { struct ap_matrix_mdev *matrix_mdev = container_of(vdev, struct ap_matrix_mdev, vdev); - vfio_unpin_pages(&matrix_mdev->vdev, iova, 1); + mutex_lock(&matrix_dev->mdevs_lock); + + unmap_iova(matrix_mdev, iova, length); + + mutex_unlock(&matrix_dev->mdevs_lock); } /** @@ -1784,11 +1795,13 @@ static const struct attribute_group vfio_queue_attr_group = { static const struct vfio_device_ops vfio_ap_matrix_dev_ops = { .init = vfio_ap_mdev_init_dev, - .release = vfio_ap_mdev_release_dev, .open_device = vfio_ap_mdev_open_device, .close_device = vfio_ap_mdev_close_device, .ioctl = vfio_ap_mdev_ioctl, .dma_unmap = vfio_ap_mdev_dma_unmap, + .bind_iommufd = vfio_iommufd_emulated_bind, + .unbind_iommufd = vfio_iommufd_emulated_unbind, + .attach_ioas = vfio_iommufd_emulated_attach_ioas, }; static struct mdev_driver vfio_ap_matrix_driver = { diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c index f94b43ce9a65836690ffe3a57688e5d340d63a58..4bf36e53fe3e4af6cd41e690dd77dcf5613173ee 100644 --- a/drivers/s390/crypto/zcrypt_api.c +++ b/drivers/s390/crypto/zcrypt_api.c @@ -53,10 +53,6 @@ MODULE_LICENSE("GPL"); EXPORT_TRACEPOINT_SYMBOL(s390_zcrypt_req); EXPORT_TRACEPOINT_SYMBOL(s390_zcrypt_rep); -static int zcrypt_hwrng_seed = 1; -module_param_named(hwrng_seed, zcrypt_hwrng_seed, int, 0440); -MODULE_PARM_DESC(hwrng_seed, "Turn on/off hwrng auto seed, default is 1 (on)."); - DEFINE_SPINLOCK(zcrypt_list_lock); LIST_HEAD(zcrypt_card_list); @@ -2063,8 +2059,6 @@ int zcrypt_rng_device_add(void) goto out; } zcrypt_rng_buffer_index = 0; - if (!zcrypt_hwrng_seed) - zcrypt_rng_dev.quality = 0; rc = hwrng_register(&zcrypt_rng_dev); if (rc) goto out_free; diff --git a/drivers/scsi/BusLogic.c b/drivers/scsi/BusLogic.c index f2abffce26599cf40f5fb9a39c788f5888e26c64..f7b7ffda1161f5878da10f26f0a0c128c3414449 100644 --- a/drivers/scsi/BusLogic.c +++ b/drivers/scsi/BusLogic.c @@ -2198,7 +2198,7 @@ static int blogic_slaveconfig(struct scsi_device *dev) static int __init blogic_init(void) { - int adapter_count = 0, drvr_optindex = 0, probeindex; + int drvr_optindex = 0, probeindex; struct blogic_adapter *adapter; int ret = 0; @@ -2368,10 +2368,8 @@ static int __init blogic_init(void) list_del(&myadapter->host_list); scsi_host_put(host); ret = -ENODEV; - } else { + } else scsi_scan_host(host); - adapter_count++; - } } } else { /* diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c index dece7d9eb4d367ccb1b34c04c3df7fa0ee96bb7e..ca85bddb582b468a4573b2858d52cf5acd3c80d9 100644 --- a/drivers/scsi/NCR5380.c +++ b/drivers/scsi/NCR5380.c @@ -858,7 +858,7 @@ static void NCR5380_dma_complete(struct Scsi_Host *instance) * latency, but a bus reset will reset chip logic. Checking for parity error * is unnecessary because that interrupt is never enabled. A Loss of BSY * condition will clear DMA Mode. We can tell when this occurs because the - * the Busy Monitor interrupt is enabled together with DMA Mode. + * Busy Monitor interrupt is enabled together with DMA Mode. */ static irqreturn_t __maybe_unused NCR5380_intr(int irq, void *dev_id) diff --git a/drivers/scsi/bfa/bfa_fc.h b/drivers/scsi/bfa/bfa_fc.h index 0314e4b9e1fbb08ee9fd5330706790524d6769d3..a12d693065cef6a3c823cb8f98cc71ab7a4fa76c 100644 --- a/drivers/scsi/bfa/bfa_fc.h +++ b/drivers/scsi/bfa/bfa_fc.h @@ -1548,7 +1548,7 @@ enum fdmi_port_attribute_type { struct fdmi_attr_s { __be16 type; __be16 len; - u8 value[1]; + u8 value[]; }; /* diff --git a/drivers/scsi/bfa/bfad_bsg.c b/drivers/scsi/bfa/bfad_bsg.c index be8dfbe13e904e057e5f007550b47b557251cb7b..79d4f7ee5bcb05c6073544c936f7e945f1f367b4 100644 --- a/drivers/scsi/bfa/bfad_bsg.c +++ b/drivers/scsi/bfa/bfad_bsg.c @@ -2540,6 +2540,35 @@ out: return 0; } +/* + * Set the SCSI device sdev_bflags - sdev_bflags are used by the + * SCSI mid-layer to choose LUN Scanning mode REPORT_LUNS vs. Sequential Scan + * + * Internally iterates over all the ITNIM's part of the im_port & sets the + * sdev_bflags for the scsi_device associated with LUN #0. + */ +static void bfad_reset_sdev_bflags(struct bfad_im_port_s *im_port, + int lunmask_cfg) +{ + const u32 scan_flags = BLIST_NOREPORTLUN | BLIST_SPARSELUN; + struct bfad_itnim_s *itnim; + struct scsi_device *sdev; + unsigned long flags; + + spin_lock_irqsave(im_port->shost->host_lock, flags); + list_for_each_entry(itnim, &im_port->itnim_mapped_list, list_entry) { + sdev = __scsi_device_lookup(im_port->shost, itnim->channel, + itnim->scsi_tgt_id, 0); + if (sdev) { + if (lunmask_cfg == BFA_TRUE) + sdev->sdev_bflags |= scan_flags; + else + sdev->sdev_bflags &= ~scan_flags; + } + } + spin_unlock_irqrestore(im_port->shost->host_lock, flags); +} + /* Function to reset the LUN SCAN mode */ static void bfad_iocmd_lunmask_reset_lunscan_mode(struct bfad_s *bfad, int lunmask_cfg) diff --git a/drivers/scsi/bfa/bfad_im.h b/drivers/scsi/bfa/bfad_im.h index c03b225ea1ba0e2b586268c0c9be550bfa1ba2ba..4353feedf76ad25861ff1c065b05249fccaa532b 100644 --- a/drivers/scsi/bfa/bfad_im.h +++ b/drivers/scsi/bfa/bfad_im.h @@ -198,30 +198,4 @@ irqreturn_t bfad_intx(int irq, void *dev_id); int bfad_im_bsg_request(struct bsg_job *job); int bfad_im_bsg_timeout(struct bsg_job *job); -/* - * Macro to set the SCSI device sdev_bflags - sdev_bflags are used by the - * SCSI mid-layer to choose LUN Scanning mode REPORT_LUNS vs. Sequential Scan - * - * Internally iterate's over all the ITNIM's part of the im_port & set's the - * sdev_bflags for the scsi_device associated with LUN #0. - */ -#define bfad_reset_sdev_bflags(__im_port, __lunmask_cfg) do { \ - struct scsi_device *__sdev = NULL; \ - struct bfad_itnim_s *__itnim = NULL; \ - u32 scan_flags = BLIST_NOREPORTLUN | BLIST_SPARSELUN; \ - list_for_each_entry(__itnim, &((__im_port)->itnim_mapped_list), \ - list_entry) { \ - __sdev = scsi_device_lookup((__im_port)->shost, \ - __itnim->channel, \ - __itnim->scsi_tgt_id, 0); \ - if (__sdev) { \ - if ((__lunmask_cfg) == BFA_TRUE) \ - __sdev->sdev_bflags |= scan_flags; \ - else \ - __sdev->sdev_bflags &= ~scan_flags; \ - scsi_device_put(__sdev); \ - } \ - } \ -} while (0) - #endif diff --git a/drivers/scsi/csiostor/csio_wr.c b/drivers/scsi/csiostor/csio_wr.c index fe0355c964bca2ef3d0c8ed60bedc8dc60d3cd10..a516df019c2240fab1f9851701fb658e7a047b07 100644 --- a/drivers/scsi/csiostor/csio_wr.c +++ b/drivers/scsi/csiostor/csio_wr.c @@ -1051,7 +1051,6 @@ csio_wr_process_fl(struct csio_hw *hw, struct csio_q *q, struct csio_fl_dma_buf flb; struct csio_dma_buf *buf, *fbuf; uint32_t bufsz, len, lastlen = 0; - int n; struct csio_q *flq = hw->wrm.q_arr[q->un.iq.flq_idx]; CSIO_DB_ASSERT(flq != NULL); @@ -1071,7 +1070,7 @@ csio_wr_process_fl(struct csio_hw *hw, struct csio_q *q, flb.totlen = len; /* Consume all freelist buffers used for len bytes */ - for (n = 0, fbuf = flb.flbufs; ; n++, fbuf++) { + for (fbuf = flb.flbufs; ; fbuf++) { buf = &flq->un.fl.bufs[flq->cidx]; bufsz = csio_wr_fl_bufsz(sge, buf); diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c index cd1324ec742de4d05ecffdb5d6940b33dd264845..395b00b942f7036c316d3d99ddd43e372035cdb8 100644 --- a/drivers/scsi/cxlflash/main.c +++ b/drivers/scsi/cxlflash/main.c @@ -3857,7 +3857,7 @@ static void cxlflash_pci_resume(struct pci_dev *pdev) * * Return: Allocated string describing the devtmpfs structure. */ -static char *cxlflash_devnode(struct device *dev, umode_t *mode) +static char *cxlflash_devnode(const struct device *dev, umode_t *mode) { return kasprintf(GFP_KERNEL, "cxlflash/%s", dev_name(dev)); } diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c index 610a51538f03475196571b7cf66ea71b6ed8cc7f..49cc18a8747313e5729fccb02f55c292d58c52bc 100644 --- a/drivers/scsi/device_handler/scsi_dh_alua.c +++ b/drivers/scsi/device_handler/scsi_dh_alua.c @@ -354,6 +354,8 @@ static int alua_check_vpd(struct scsi_device *sdev, struct alua_dh_data *h, "%s: port group %x rel port %x\n", ALUA_DH_NAME, group_id, rel_port); + kref_get(&pg->kref); + /* Check for existing port group references */ spin_lock(&h->pg_lock); old_pg = rcu_dereference_protected(h->pg, lockdep_is_held(&h->pg_lock)); @@ -373,11 +375,11 @@ static int alua_check_vpd(struct scsi_device *sdev, struct alua_dh_data *h, list_add_rcu(&h->node, &pg->dh_list); spin_unlock_irqrestore(&pg->lock, flags); - alua_rtpg_queue(rcu_dereference_protected(h->pg, - lockdep_is_held(&h->pg_lock)), - sdev, NULL, true); spin_unlock(&h->pg_lock); + alua_rtpg_queue(pg, sdev, NULL, true); + kref_put(&pg->kref, release_port_group); + if (old_pg) kref_put(&old_pg->kref, release_port_group); @@ -811,14 +813,19 @@ static unsigned alua_stpg(struct scsi_device *sdev, struct alua_port_group *pg) return SCSI_DH_RETRY; } -static bool alua_rtpg_select_sdev(struct alua_port_group *pg) +/* + * The caller must call scsi_device_put() on the returned pointer if it is not + * NULL. + */ +static struct scsi_device * __must_check +alua_rtpg_select_sdev(struct alua_port_group *pg) { struct alua_dh_data *h; - struct scsi_device *sdev = NULL; + struct scsi_device *sdev = NULL, *prev_sdev; lockdep_assert_held(&pg->lock); if (WARN_ON(!pg->rtpg_sdev)) - return false; + return NULL; /* * RCU protection isn't necessary for dh_list here @@ -845,22 +852,22 @@ static bool alua_rtpg_select_sdev(struct alua_port_group *pg) pr_warn("%s: no device found for rtpg\n", (pg->device_id_len ? (char *)pg->device_id_str : "(nameless PG)")); - return false; + return NULL; } sdev_printk(KERN_INFO, sdev, "rtpg retry on different device\n"); - scsi_device_put(pg->rtpg_sdev); + prev_sdev = pg->rtpg_sdev; pg->rtpg_sdev = sdev; - return true; + return prev_sdev; } static void alua_rtpg_work(struct work_struct *work) { struct alua_port_group *pg = container_of(work, struct alua_port_group, rtpg_work.work); - struct scsi_device *sdev; + struct scsi_device *sdev, *prev_sdev = NULL; LIST_HEAD(qdata_list); int err = SCSI_DH_OK; struct alua_queue_data *qdata, *tmp; @@ -901,7 +908,7 @@ static void alua_rtpg_work(struct work_struct *work) /* If RTPG failed on the current device, try using another */ if (err == SCSI_DH_RES_TEMP_UNAVAIL && - alua_rtpg_select_sdev(pg)) + (prev_sdev = alua_rtpg_select_sdev(pg))) err = SCSI_DH_IMM_RETRY; if (err == SCSI_DH_RETRY || err == SCSI_DH_IMM_RETRY || @@ -913,9 +920,7 @@ static void alua_rtpg_work(struct work_struct *work) pg->interval = ALUA_RTPG_RETRY_DELAY; pg->flags |= ALUA_PG_RUN_RTPG; spin_unlock_irqrestore(&pg->lock, flags); - queue_delayed_work(kaluad_wq, &pg->rtpg_work, - pg->interval * HZ); - return; + goto queue_rtpg; } if (err != SCSI_DH_OK) pg->flags &= ~ALUA_PG_RUN_STPG; @@ -930,9 +935,7 @@ static void alua_rtpg_work(struct work_struct *work) pg->interval = 0; pg->flags &= ~ALUA_PG_RUNNING; spin_unlock_irqrestore(&pg->lock, flags); - queue_delayed_work(kaluad_wq, &pg->rtpg_work, - pg->interval * HZ); - return; + goto queue_rtpg; } } @@ -946,6 +949,9 @@ static void alua_rtpg_work(struct work_struct *work) pg->rtpg_sdev = NULL; spin_unlock_irqrestore(&pg->lock, flags); + if (prev_sdev) + scsi_device_put(prev_sdev); + list_for_each_entry_safe(qdata, tmp, &qdata_list, entry) { list_del(&qdata->entry); if (qdata->callback_fn) @@ -957,6 +963,12 @@ static void alua_rtpg_work(struct work_struct *work) spin_unlock_irqrestore(&pg->lock, flags); scsi_device_put(sdev); kref_put(&pg->kref, release_port_group); + return; + +queue_rtpg: + if (prev_sdev) + scsi_device_put(prev_sdev); + queue_delayed_work(kaluad_wq, &pg->rtpg_work, pg->interval * HZ); } /** @@ -976,6 +988,9 @@ static bool alua_rtpg_queue(struct alua_port_group *pg, { int start_queue = 0; unsigned long flags; + + might_sleep(); + if (WARN_ON_ONCE(!pg) || scsi_device_get(sdev)) return false; @@ -986,11 +1001,17 @@ static bool alua_rtpg_queue(struct alua_port_group *pg, force = true; } if (pg->rtpg_sdev == NULL) { - pg->interval = 0; - pg->flags |= ALUA_PG_RUN_RTPG; - kref_get(&pg->kref); - pg->rtpg_sdev = sdev; - start_queue = 1; + struct alua_dh_data *h = sdev->handler_data; + + rcu_read_lock(); + if (h && rcu_dereference(h->pg) == pg) { + pg->interval = 0; + pg->flags |= ALUA_PG_RUN_RTPG; + kref_get(&pg->kref); + pg->rtpg_sdev = sdev; + start_queue = 1; + } + rcu_read_unlock(); } else if (!(pg->flags & ALUA_PG_RUN_RTPG) && force) { pg->flags |= ALUA_PG_RUN_RTPG; /* Do not queue if the worker is already running */ diff --git a/drivers/scsi/elx/efct/efct_driver.c b/drivers/scsi/elx/efct/efct_driver.c index b08fc8839808d2b63f83765e80eb60cc7135406a..49fd2cfed70c70c98aa0fa3ccbe585789c2c742f 100644 --- a/drivers/scsi/elx/efct/efct_driver.c +++ b/drivers/scsi/elx/efct/efct_driver.c @@ -42,6 +42,7 @@ efct_device_init(void) rc = efct_scsi_reg_fc_transport(); if (rc) { + efct_scsi_tgt_driver_exit(); pr_err("failed to register to FC host\n"); return rc; } diff --git a/drivers/scsi/elx/libefc/efclib.h b/drivers/scsi/elx/libefc/efclib.h index dde20891c2dd71f54cd47712c33a7713b14c10c0..57e3386128127f59b11db90c2a9a8b87c2df9c1f 100644 --- a/drivers/scsi/elx/libefc/efclib.h +++ b/drivers/scsi/elx/libefc/efclib.h @@ -58,10 +58,12 @@ enum efc_node_send_ls_acc { #define EFC_LINK_STATUS_UP 0 #define EFC_LINK_STATUS_DOWN 1 +enum efc_sm_event; + /* State machine context header */ struct efc_sm_ctx { void (*current_state)(struct efc_sm_ctx *ctx, - u32 evt, void *arg); + enum efc_sm_event evt, void *arg); const char *description; void *app; @@ -365,7 +367,7 @@ struct efc_node { int prev_evt; void (*nodedb_state)(struct efc_sm_ctx *ctx, - u32 evt, void *arg); + enum efc_sm_event evt, void *arg); struct timer_list gidpt_delay_timer; u64 time_last_gidpt_msec; diff --git a/drivers/scsi/esas2r/esas2r_main.c b/drivers/scsi/esas2r/esas2r_main.c index 7a4eadad23d70168440f578f3ee25a10f21023d8..d7a2c49ff5eeb67079d6c97dafe22d4ae5445c5b 100644 --- a/drivers/scsi/esas2r/esas2r_main.c +++ b/drivers/scsi/esas2r/esas2r_main.c @@ -248,8 +248,6 @@ static struct scsi_host_template driver_template = { .sg_tablesize = SG_CHUNK_SIZE, .cmd_per_lun = ESAS2R_DEFAULT_CMD_PER_LUN, - .present = 0, - .emulated = 0, .proc_name = ESAS2R_DRVR_NAME, .change_queue_depth = scsi_change_queue_depth, .max_sectors = 0xFFFF, @@ -637,10 +635,13 @@ static void __exit esas2r_exit(void) esas2r_log(ESAS2R_LOG_INFO, "%s called", __func__); if (esas2r_proc_major > 0) { + struct proc_dir_entry *proc_dir; + esas2r_log(ESAS2R_LOG_INFO, "unregister proc"); - remove_proc_entry(ATTONODE_NAME, - esas2r_proc_host->hostt->proc_dir); + proc_dir = scsi_template_proc_dir(esas2r_proc_host->hostt); + if (proc_dir) + remove_proc_entry(ATTONODE_NAME, proc_dir); unregister_chrdev(esas2r_proc_major, ESAS2R_DRVR_NAME); esas2r_proc_major = 0; @@ -730,11 +731,13 @@ const char *esas2r_info(struct Scsi_Host *sh) esas2r_proc_major); if (esas2r_proc_major > 0) { - struct proc_dir_entry *pde; + struct proc_dir_entry *proc_dir; + struct proc_dir_entry *pde = NULL; - pde = proc_create(ATTONODE_NAME, 0, - sh->hostt->proc_dir, - &esas2r_proc_ops); + proc_dir = scsi_template_proc_dir(sh->hostt); + if (proc_dir) + pde = proc_create(ATTONODE_NAME, 0, proc_dir, + &esas2r_proc_ops); if (!pde) { esas2r_log_dev(ESAS2R_LOG_WARN, diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index 6ec296321ffc1b85b3071d197652ddee833f83de..38774a272e627909a59997e837d56c04292a994e 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c @@ -2491,6 +2491,7 @@ static int __init fcoe_init(void) out_free: mutex_unlock(&fcoe_config_mutex); + fcoe_transport_detach(&fcoe_sw_transport); out_destroy: destroy_workqueue(fcoe_wq); return rc; diff --git a/drivers/scsi/fcoe/fcoe_sysfs.c b/drivers/scsi/fcoe/fcoe_sysfs.c index af658aa38fedfd30cb81332cdc2c67af1664c428..6260aa5ea6af80fc139574e025e7a39d2fad65a3 100644 --- a/drivers/scsi/fcoe/fcoe_sysfs.c +++ b/drivers/scsi/fcoe/fcoe_sysfs.c @@ -830,14 +830,15 @@ struct fcoe_ctlr_device *fcoe_ctlr_device_add(struct device *parent, dev_set_name(&ctlr->dev, "ctlr_%d", ctlr->id); error = device_register(&ctlr->dev); - if (error) - goto out_del_q2; + if (error) { + destroy_workqueue(ctlr->devloss_work_q); + destroy_workqueue(ctlr->work_q); + put_device(&ctlr->dev); + return NULL; + } return ctlr; -out_del_q2: - destroy_workqueue(ctlr->devloss_work_q); - ctlr->devloss_work_q = NULL; out_del_q: destroy_workqueue(ctlr->work_q); ctlr->work_q = NULL; @@ -1036,16 +1037,16 @@ struct fcoe_fcf_device *fcoe_fcf_device_add(struct fcoe_ctlr_device *ctlr, fcf->selected = new_fcf->selected; error = device_register(&fcf->dev); - if (error) - goto out_del; + if (error) { + put_device(&fcf->dev); + goto out; + } fcf->state = FCOE_FCF_STATE_CONNECTED; list_add_tail(&fcf->peers, &ctlr->fcfs); return fcf; -out_del: - kfree(fcf); out: return NULL; } diff --git a/drivers/scsi/hisi_sas/hisi_sas.h b/drivers/scsi/hisi_sas/hisi_sas.h index 9aebf4a26b132df449adc812426f393e370d41dc..6f8a52a1b80873d9aec9ecd12f67cbc7df72e67b 100644 --- a/drivers/scsi/hisi_sas/hisi_sas.h +++ b/drivers/scsi/hisi_sas/hisi_sas.h @@ -104,6 +104,7 @@ enum { enum dev_status { HISI_SAS_DEV_INIT, HISI_SAS_DEV_NORMAL, + HISI_SAS_DEV_NCQ_ERR, }; enum { diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c index 699b07abb6b0b03e460226959fe5c31a8972e23d..41ba22f6c7f051cbf58abcebf322cda7527a0c70 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_main.c +++ b/drivers/scsi/hisi_sas/hisi_sas_main.c @@ -177,22 +177,22 @@ static void hisi_sas_slot_index_set(struct hisi_hba *hisi_hba, int slot_idx) } static int hisi_sas_slot_index_alloc(struct hisi_hba *hisi_hba, - struct scsi_cmnd *scsi_cmnd) + struct request *rq) { int index; void *bitmap = hisi_hba->slot_index_tags; - if (scsi_cmnd) - return scsi_cmd_to_rq(scsi_cmnd)->tag; + if (rq) + return rq->tag + HISI_SAS_RESERVED_IPTT; spin_lock(&hisi_hba->lock); - index = find_next_zero_bit(bitmap, hisi_hba->slot_index_count, + index = find_next_zero_bit(bitmap, HISI_SAS_RESERVED_IPTT, hisi_hba->last_slot_index + 1); - if (index >= hisi_hba->slot_index_count) { + if (index >= HISI_SAS_RESERVED_IPTT) { index = find_next_zero_bit(bitmap, - hisi_hba->slot_index_count, - HISI_SAS_UNRESERVED_IPTT); - if (index >= hisi_hba->slot_index_count) { + HISI_SAS_RESERVED_IPTT, + 0); + if (index >= HISI_SAS_RESERVED_IPTT) { spin_unlock(&hisi_hba->lock); return -SAS_QUEUE_FULL; } @@ -461,11 +461,11 @@ static int hisi_sas_queue_command(struct sas_task *task, gfp_t gfp_flags) struct asd_sas_port *sas_port = device->port; struct hisi_sas_device *sas_dev = device->lldd_dev; bool internal_abort = sas_is_internal_abort(task); - struct scsi_cmnd *scmd = NULL; struct hisi_sas_dq *dq = NULL; struct hisi_sas_port *port; struct hisi_hba *hisi_hba; struct hisi_sas_slot *slot; + struct request *rq = NULL; struct device *dev; int rc; @@ -520,22 +520,12 @@ static int hisi_sas_queue_command(struct sas_task *task, gfp_t gfp_flags) return -ECOMM; } - if (task->uldd_task) { - struct ata_queued_cmd *qc; - - if (dev_is_sata(device)) { - qc = task->uldd_task; - scmd = qc->scsicmd; - } else { - scmd = task->uldd_task; - } - } - - if (scmd) { + rq = sas_task_find_rq(task); + if (rq) { unsigned int dq_index; u32 blk_tag; - blk_tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd)); + blk_tag = blk_mq_unique_tag(rq); dq_index = blk_mq_unique_tag_to_hwq(blk_tag); dq = &hisi_hba->dq[dq_index]; } else { @@ -580,7 +570,7 @@ static int hisi_sas_queue_command(struct sas_task *task, gfp_t gfp_flags) if (!internal_abort && hisi_hba->hw->slot_index_alloc) rc = hisi_hba->hw->slot_index_alloc(hisi_hba, device); else - rc = hisi_sas_slot_index_alloc(hisi_hba, scmd); + rc = hisi_sas_slot_index_alloc(hisi_hba, rq); if (rc < 0) goto err_out_dif_dma_unmap; @@ -792,22 +782,14 @@ static int hisi_sas_dev_found(struct domain_device *device) if (parent_dev && dev_is_expander(parent_dev->dev_type)) { int phy_no; - u8 phy_num = parent_dev->ex_dev.num_phys; - struct ex_phy *phy; - - for (phy_no = 0; phy_no < phy_num; phy_no++) { - phy = &parent_dev->ex_dev.ex_phy[phy_no]; - if (SAS_ADDR(phy->attached_sas_addr) == - SAS_ADDR(device->sas_addr)) - break; - } - if (phy_no == phy_num) { + phy_no = sas_find_attached_phy_id(&parent_dev->ex_dev, device); + if (phy_no < 0) { dev_info(dev, "dev found: no attached " "dev:%016llx at ex:%016llx\n", SAS_ADDR(device->sas_addr), SAS_ADDR(parent_dev->sas_addr)); - rc = -EINVAL; + rc = phy_no; goto err_out; } } @@ -1341,7 +1323,6 @@ static void hisi_sas_refresh_port_id(struct hisi_hba *hisi_hba) static void hisi_sas_rescan_topology(struct hisi_hba *hisi_hba, u32 state) { - struct sas_ha_struct *sas_ha = &hisi_hba->sha; struct asd_sas_port *_sas_port = NULL; int phy_no; @@ -1370,12 +1351,6 @@ static void hisi_sas_rescan_topology(struct hisi_hba *hisi_hba, u32 state) hisi_sas_phy_down(hisi_hba, phy_no, 0, GFP_KERNEL); } } - /* - * Ensure any bcast events are processed prior to calling async nexus - * reset calls from hisi_sas_clear_nexus_ha() -> - * hisi_sas_async_I_T_nexus_reset() - */ - sas_drain_work(sas_ha); } static void hisi_sas_reset_init_all_devices(struct hisi_hba *hisi_hba) @@ -1547,6 +1522,7 @@ static int hisi_sas_abort_task(struct sas_task *task) struct hisi_sas_internal_abort_data internal_abort_data = { false }; struct domain_device *device = task->dev; struct hisi_sas_device *sas_dev = device->lldd_dev; + struct hisi_sas_slot *slot = task->lldd_task; struct hisi_hba *hisi_hba; struct device *dev; int rc = TMF_RESP_FUNC_FAILED; @@ -1560,7 +1536,6 @@ static int hisi_sas_abort_task(struct sas_task *task) spin_lock_irqsave(&task->task_state_lock, flags); if (task->task_state_flags & SAS_TASK_STATE_DONE) { - struct hisi_sas_slot *slot = task->lldd_task; struct hisi_sas_cq *cq; if (slot) { @@ -1578,8 +1553,7 @@ static int hisi_sas_abort_task(struct sas_task *task) task->task_state_flags |= SAS_TASK_STATE_ABORTED; spin_unlock_irqrestore(&task->task_state_lock, flags); - if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) { - struct hisi_sas_slot *slot = task->lldd_task; + if (slot && task->task_proto & SAS_PROTOCOL_SSP) { u16 tag = slot->idx; int rc2; @@ -1605,17 +1579,29 @@ static int hisi_sas_abort_task(struct sas_task *task) } else if (task->task_proto & SAS_PROTOCOL_SATA || task->task_proto & SAS_PROTOCOL_STP) { if (task->dev->dev_type == SAS_SATA_DEV) { + struct ata_queued_cmd *qc = task->uldd_task; + rc = hisi_sas_internal_task_abort_dev(sas_dev, false); if (rc < 0) { dev_err(dev, "abort task: internal abort failed\n"); goto out; } hisi_sas_dereg_device(hisi_hba, device); - rc = hisi_sas_softreset_ata_disk(device); + + /* + * If an ATA internal command times out in ATA EH, it + * need to execute soft reset, so check the scsicmd + */ + if ((sas_dev->dev_status == HISI_SAS_DEV_NCQ_ERR) && + qc && qc->scsicmd) { + hisi_sas_do_release_task(hisi_hba, task, slot); + rc = TMF_RESP_FUNC_COMPLETE; + } else { + rc = hisi_sas_softreset_ata_disk(device); + } } - } else if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SMP) { + } else if (slot && task->task_proto & SAS_PROTOCOL_SMP) { /* SMP */ - struct hisi_sas_slot *slot = task->lldd_task; u32 tag = slot->idx; struct hisi_sas_cq *cq = &hisi_hba->cq[slot->dlvry_queue]; @@ -1708,13 +1694,15 @@ static int hisi_sas_debug_I_T_nexus_reset(struct domain_device *device) return rc; } + /* Remote phy */ if (rc) return rc; - /* Remote phy */ if (dev_is_sata(device)) { - rc = sas_ata_wait_after_reset(device, - HISI_SAS_WAIT_PHYUP_TIMEOUT); + struct ata_link *link = &device->sata_dev.ap->link; + + rc = ata_wait_after_reset(link, HISI_SAS_WAIT_PHYUP_TIMEOUT, + smp_ata_check_ready_type); } else { msleep(2000); } @@ -1729,6 +1717,9 @@ static int hisi_sas_I_T_nexus_reset(struct domain_device *device) struct device *dev = hisi_hba->dev; int rc; + if (sas_dev->dev_status == HISI_SAS_DEV_NCQ_ERR) + sas_dev->dev_status = HISI_SAS_DEV_NORMAL; + rc = hisi_sas_internal_task_abort_dev(sas_dev, false); if (rc < 0) { dev_err(dev, "I_T nexus reset: internal abort (%d)\n", rc); @@ -1823,14 +1814,12 @@ static int hisi_sas_clear_nexus_ha(struct sas_ha_struct *sas_ha) struct hisi_hba *hisi_hba = sas_ha->lldd_ha; HISI_SAS_DECLARE_RST_WORK_ON_STACK(r); ASYNC_DOMAIN_EXCLUSIVE(async); - int i, ret; + int i; queue_work(hisi_hba->wq, &r.work); wait_for_completion(r.completion); - if (!r.done) { - ret = TMF_RESP_FUNC_FAILED; - goto out; - } + if (!r.done) + return TMF_RESP_FUNC_FAILED; for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) { struct hisi_sas_device *sas_dev = &hisi_hba->devices[i]; @@ -1847,9 +1836,7 @@ static int hisi_sas_clear_nexus_ha(struct sas_ha_struct *sas_ha) async_synchronize_full_domain(&async); hisi_sas_release_tasks(hisi_hba); - ret = TMF_RESP_FUNC_COMPLETE; -out: - return ret; + return TMF_RESP_FUNC_COMPLETE; } static int hisi_sas_query_task(struct sas_task *task) @@ -1997,14 +1984,10 @@ void hisi_sas_phy_bcast(struct hisi_sas_phy *phy) { struct asd_sas_phy *sas_phy = &phy->sas_phy; struct hisi_hba *hisi_hba = phy->hisi_hba; - struct sas_ha_struct *sha = &hisi_hba->sha; if (test_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags)) return; - if (test_bit(SAS_HA_FROZEN, &sha->state)) - return; - sas_notify_port_event(sas_phy, PORTE_BROADCAST_RCVD, GFP_ATOMIC); } EXPORT_SYMBOL_GPL(hisi_sas_phy_bcast); @@ -2220,7 +2203,7 @@ int hisi_sas_alloc(struct hisi_hba *hisi_hba) if (!hisi_hba->sata_breakpoint) goto err_out; - hisi_hba->last_slot_index = HISI_SAS_UNRESERVED_IPTT; + hisi_hba->last_slot_index = 0; hisi_hba->wq = create_singlethread_workqueue(dev_name(dev)); if (!hisi_hba->wq) { diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c index d56b4bfd276781770f3f9d55af0694972ab04e8c..0c3fcb8078062b9ad7e44c7a987076042adfabb6 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c +++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c @@ -404,6 +404,11 @@ #define CMPLT_HDR_CMPLT_MSK (0x3 << CMPLT_HDR_CMPLT_OFF) #define CMPLT_HDR_ERROR_PHASE_OFF 2 #define CMPLT_HDR_ERROR_PHASE_MSK (0xff << CMPLT_HDR_ERROR_PHASE_OFF) +/* bit[9:2] Error Phase */ +#define ERR_PHASE_RESPONSE_FRAME_REV_STAGE_OFF \ + 8 +#define ERR_PHASE_RESPONSE_FRAME_REV_STAGE_MSK \ + (0x1 << ERR_PHASE_RESPONSE_FRAME_REV_STAGE_OFF) #define CMPLT_HDR_RSPNS_XFRD_OFF 10 #define CMPLT_HDR_RSPNS_XFRD_MSK (0x1 << CMPLT_HDR_RSPNS_XFRD_OFF) #define CMPLT_HDR_RSPNS_GOOD_OFF 11 @@ -423,8 +428,17 @@ #define CMPLT_HDR_DEV_ID_OFF 16 #define CMPLT_HDR_DEV_ID_MSK (0xffff << CMPLT_HDR_DEV_ID_OFF) /* dw3 */ +#define SATA_DISK_IN_ERROR_STATUS_OFF 8 +#define SATA_DISK_IN_ERROR_STATUS_MSK (0x1 << SATA_DISK_IN_ERROR_STATUS_OFF) +#define CMPLT_HDR_SATA_DISK_ERR_OFF 16 +#define CMPLT_HDR_SATA_DISK_ERR_MSK (0x1 << CMPLT_HDR_SATA_DISK_ERR_OFF) #define CMPLT_HDR_IO_IN_TARGET_OFF 17 #define CMPLT_HDR_IO_IN_TARGET_MSK (0x1 << CMPLT_HDR_IO_IN_TARGET_OFF) +/* bit[23:18] ERR_FIS_ATA_STATUS */ +#define FIS_ATA_STATUS_ERR_OFF 18 +#define FIS_ATA_STATUS_ERR_MSK (0x1 << FIS_ATA_STATUS_ERR_OFF) +#define FIS_TYPE_SDB_OFF 31 +#define FIS_TYPE_SDB_MSK (0x1 << FIS_TYPE_SDB_OFF) /* ITCT header */ /* qw0 */ @@ -2148,6 +2162,18 @@ static irqreturn_t fatal_axi_int_v3_hw(int irq_no, void *p) return IRQ_HANDLED; } +static bool is_ncq_err_v3_hw(struct hisi_sas_complete_v3_hdr *complete_hdr) +{ + u32 dw0, dw3; + + dw0 = le32_to_cpu(complete_hdr->dw0); + dw3 = le32_to_cpu(complete_hdr->dw3); + + return (dw0 & ERR_PHASE_RESPONSE_FRAME_REV_STAGE_MSK) && + (dw3 & FIS_TYPE_SDB_MSK) && + (dw3 & FIS_ATA_STATUS_ERR_MSK); +} + static bool slot_err_v3_hw(struct hisi_hba *hisi_hba, struct sas_task *task, struct hisi_sas_slot *slot) @@ -2195,7 +2221,8 @@ slot_err_v3_hw(struct hisi_hba *hisi_hba, struct sas_task *task, } else if (dma_rx_err_type & RX_DATA_LEN_UNDERFLOW_MSK) { ts->residual = trans_tx_fail_type; ts->stat = SAS_DATA_UNDERRUN; - } else if (dw3 & CMPLT_HDR_IO_IN_TARGET_MSK) { + } else if ((dw3 & CMPLT_HDR_IO_IN_TARGET_MSK) || + (dw3 & SATA_DISK_IN_ERROR_STATUS_MSK)) { ts->stat = SAS_PHY_DOWN; slot->abort = 1; } else { @@ -2381,14 +2408,34 @@ static irqreturn_t cq_thread_v3_hw(int irq_no, void *p) while (rd_point != wr_point) { struct hisi_sas_complete_v3_hdr *complete_hdr; struct device *dev = hisi_hba->dev; - u32 dw1; + u32 dw0, dw1, dw3; int iptt; complete_hdr = &complete_queue[rd_point]; + dw0 = le32_to_cpu(complete_hdr->dw0); dw1 = le32_to_cpu(complete_hdr->dw1); + dw3 = le32_to_cpu(complete_hdr->dw3); iptt = dw1 & CMPLT_HDR_IPTT_MSK; - if (likely(iptt < HISI_SAS_COMMAND_ENTRIES_V3_HW)) { + if (unlikely((dw0 & CMPLT_HDR_CMPLT_MSK) == 0x3) && + (dw3 & CMPLT_HDR_SATA_DISK_ERR_MSK)) { + int device_id = (dw1 & CMPLT_HDR_DEV_ID_MSK) >> + CMPLT_HDR_DEV_ID_OFF; + struct hisi_sas_itct *itct = + &hisi_hba->itct[device_id]; + struct hisi_sas_device *sas_dev = + &hisi_hba->devices[device_id]; + struct domain_device *device = sas_dev->sas_device; + + dev_err(dev, "erroneous completion disk err dev id=%d sas_addr=0x%llx CQ hdr: 0x%x 0x%x 0x%x 0x%x\n", + device_id, itct->sas_addr, dw0, dw1, + complete_hdr->act, dw3); + + if (is_ncq_err_v3_hw(complete_hdr)) + sas_dev->dev_status = HISI_SAS_DEV_NCQ_ERR; + + sas_ata_device_link_abort(device, true); + } else if (likely(iptt < HISI_SAS_COMMAND_ENTRIES_V3_HW)) { slot = &hisi_hba->slot_info[iptt]; slot->cmplt_queue_slot = rd_point; slot->cmplt_queue = queue; diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c index 9857dba09c951a68cfd94965191fb7dd30972d70..12346e2297fdb97903e68df29d9aea9849103cb9 100644 --- a/drivers/scsi/hosts.c +++ b/drivers/scsi/hosts.c @@ -519,7 +519,8 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize) "failed to create tmf workq\n"); goto fail; } - scsi_proc_hostdir_add(shost->hostt); + if (scsi_proc_hostdir_add(shost->hostt) < 0) + goto fail; return shost; fail: /* diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index f8e832b1bc46a716b37f6f1fa5a168fcdd926d3c..4dbf51e2623ad543bea3f74bf2940d8f9be95509 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c @@ -8925,7 +8925,7 @@ clean1: /* wq/aer/h */ destroy_workqueue(h->monitor_ctlr_wq); h->monitor_ctlr_wq = NULL; } - kfree(h); + hpda_free_ctlr_info(h); return rc; } @@ -9786,7 +9786,8 @@ static int hpsa_add_sas_host(struct ctlr_info *h) return 0; free_sas_phy: - hpsa_free_sas_phy(hpsa_sas_phy); + sas_phy_free(hpsa_sas_phy->phy); + kfree(hpsa_sas_phy); free_sas_port: hpsa_free_sas_port(hpsa_sas_port); free_sas_node: @@ -9822,10 +9823,12 @@ static int hpsa_add_sas_device(struct hpsa_sas_node *hpsa_sas_node, rc = hpsa_sas_port_add_rphy(hpsa_sas_port, rphy); if (rc) - goto free_sas_port; + goto free_sas_rphy; return 0; +free_sas_rphy: + sas_rphy_free(rphy); free_sas_port: hpsa_free_sas_port(hpsa_sas_port); device->sas_port = NULL; diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index 9d01a3e3c26aa6cc149414e0536f0f1ce2e7676b..2022ffb450417edcbe54723e4fe9ba4af23f6528 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c @@ -10872,11 +10872,19 @@ static struct notifier_block ipr_notifier = { **/ static int __init ipr_init(void) { + int rc; + ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n", IPR_DRIVER_VERSION, IPR_DRIVER_DATE); register_reboot_notifier(&ipr_notifier); - return pci_register_driver(&ipr_driver); + rc = pci_register_driver(&ipr_driver); + if (rc) { + unregister_reboot_notifier(&ipr_notifier); + return rc; + } + + return 0; } /** diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c index 5fb1f364e8155d7a3c857cbff10ad02ca9c89c33..1d1cf641937c14738596a68d6cb546c576bef741 100644 --- a/drivers/scsi/iscsi_tcp.c +++ b/drivers/scsi/iscsi_tcp.c @@ -738,6 +738,7 @@ iscsi_sw_tcp_conn_bind(struct iscsi_cls_session *cls_session, sk->sk_reuse = SK_CAN_REUSE; sk->sk_sndtimeo = 15 * HZ; /* FIXME: make it configurable */ sk->sk_allocation = GFP_ATOMIC; + sk->sk_use_task_frag = false; sk_set_memalloc(sk); sock_no_linger(sk); diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c index 942fc60f7c21c770de1c60637f78987c958c6213..0f32ded246d0bfdd889fdae53da0878c3f0d3d8b 100644 --- a/drivers/scsi/libfc/fc_disc.c +++ b/drivers/scsi/libfc/fc_disc.c @@ -75,7 +75,6 @@ static void fc_disc_recv_rscn_req(struct fc_disc *disc, struct fc_frame *fp) struct fc_seq_els_data rjt_data; unsigned int len; int redisc = 0; - enum fc_els_rscn_ev_qual ev_qual; enum fc_els_rscn_addr_fmt fmt; LIST_HEAD(disc_ports); struct fc_disc_port *dp, *next; @@ -107,8 +106,6 @@ static void fc_disc_recv_rscn_req(struct fc_disc *disc, struct fc_frame *fp) goto reject; for (pp = (void *)(rp + 1); len > 0; len -= sizeof(*pp), pp++) { - ev_qual = pp->rscn_page_flags >> ELS_RSCN_EV_QUAL_BIT; - ev_qual &= ELS_RSCN_EV_QUAL_MASK; fmt = pp->rscn_page_flags >> ELS_RSCN_ADDR_FMT_BIT; fmt &= ELS_RSCN_ADDR_FMT_MASK; /* diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index d95f4bcdeb2ec4db2531708bc28b07a2f7f8b5ea..ef2fc860257e6e1b1614a50528bd4b244adb4e2a 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c @@ -2071,9 +2071,9 @@ static int iscsi_has_ping_timed_out(struct iscsi_conn *conn) return 0; } -enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc) +enum scsi_timeout_action iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc) { - enum blk_eh_timer_return rc = BLK_EH_DONE; + enum scsi_timeout_action rc = SCSI_EH_NOT_HANDLED; struct iscsi_task *task = NULL, *running_task; struct iscsi_cls_session *cls_session; struct iscsi_session *session; @@ -2093,7 +2093,7 @@ enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc) * Raced with completion. Blk layer has taken ownership * so let timeout code complete it now. */ - rc = BLK_EH_DONE; + rc = SCSI_EH_NOT_HANDLED; spin_unlock(&session->back_lock); goto done; } @@ -2102,7 +2102,7 @@ enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc) * Racing with the completion path right now, so give it more * time so that path can complete it like normal. */ - rc = BLK_EH_RESET_TIMER; + rc = SCSI_EH_RESET_TIMER; task = NULL; spin_unlock(&session->back_lock); goto done; @@ -2120,21 +2120,21 @@ enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc) if (unlikely(system_state != SYSTEM_RUNNING)) { sc->result = DID_NO_CONNECT << 16; ISCSI_DBG_EH(session, "sc on shutdown, handled\n"); - rc = BLK_EH_DONE; + rc = SCSI_EH_NOT_HANDLED; goto done; } /* * We are probably in the middle of iscsi recovery so let * that complete and handle the error. */ - rc = BLK_EH_RESET_TIMER; + rc = SCSI_EH_RESET_TIMER; goto done; } conn = session->leadconn; if (!conn) { /* In the middle of shuting down */ - rc = BLK_EH_RESET_TIMER; + rc = SCSI_EH_RESET_TIMER; goto done; } @@ -2151,7 +2151,7 @@ enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc) "Last data xfer at %lu. Last timeout was at " "%lu\n.", task->last_xfer, task->last_timeout); task->have_checked_conn = false; - rc = BLK_EH_RESET_TIMER; + rc = SCSI_EH_RESET_TIMER; goto done; } @@ -2162,7 +2162,7 @@ enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc) * and can let the iscsi eh handle it */ if (iscsi_has_ping_timed_out(conn)) { - rc = BLK_EH_RESET_TIMER; + rc = SCSI_EH_RESET_TIMER; goto done; } @@ -2200,7 +2200,7 @@ enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc) task->last_xfer, running_task->last_xfer, task->last_timeout); spin_unlock(&session->back_lock); - rc = BLK_EH_RESET_TIMER; + rc = SCSI_EH_RESET_TIMER; goto done; } } @@ -2216,14 +2216,14 @@ enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc) */ if (READ_ONCE(conn->ping_task)) { task->have_checked_conn = true; - rc = BLK_EH_RESET_TIMER; + rc = SCSI_EH_RESET_TIMER; goto done; } /* Make sure there is a transport check done */ iscsi_send_nopout(conn, NULL); task->have_checked_conn = true; - rc = BLK_EH_RESET_TIMER; + rc = SCSI_EH_RESET_TIMER; done: spin_unlock_bh(&session->frwd_lock); @@ -2232,7 +2232,7 @@ done: task->last_timeout = jiffies; iscsi_put_task(task); } - ISCSI_DBG_EH(session, "return %s\n", rc == BLK_EH_RESET_TIMER ? + ISCSI_DBG_EH(session, "return %s\n", rc == SCSI_EH_RESET_TIMER ? "timer reset" : "shutdown or nh"); return rc; } diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c index 5692577f82e331d373cb0d12cad78ad969cb05cf..1ccce706167a5b054c987f38575f65ef08494f70 100644 --- a/drivers/scsi/libsas/sas_ata.c +++ b/drivers/scsi/libsas/sas_ata.c @@ -139,8 +139,8 @@ static void sas_ata_task_done(struct sas_task *task) qc->flags |= ATA_QCFLAG_FAILED; } - dev->sata_dev.fis[3] = 0x04; /* status err */ - dev->sata_dev.fis[2] = ATA_ERR; + dev->sata_dev.fis[2] = ATA_ERR | ATA_DRDY; /* tf status */ + dev->sata_dev.fis[3] = ATA_ABORTED; /* tf error */ } } @@ -287,6 +287,31 @@ static int sas_ata_clear_pending(struct domain_device *dev, struct ex_phy *phy) return 1; } +int smp_ata_check_ready_type(struct ata_link *link) +{ + struct domain_device *dev = link->ap->private_data; + struct sas_phy *phy = sas_get_local_phy(dev); + struct domain_device *ex_dev = dev->parent; + enum sas_device_type type = SAS_PHY_UNUSED; + u8 sas_addr[SAS_ADDR_SIZE]; + int res; + + res = sas_get_phy_attached_dev(ex_dev, phy->number, sas_addr, &type); + sas_put_local_phy(phy); + if (res) + return res; + + switch (type) { + case SAS_SATA_PENDING: + return 0; + case SAS_END_DEVICE: + return 1; + default: + return -ENODEV; + } +} +EXPORT_SYMBOL_GPL(smp_ata_check_ready_type); + static int smp_ata_check_ready(struct ata_link *link) { int res; @@ -358,7 +383,7 @@ static int sas_ata_printk(const char *level, const struct domain_device *ddev, return r; } -int sas_ata_wait_after_reset(struct domain_device *dev, unsigned long deadline) +static int sas_ata_wait_after_reset(struct domain_device *dev, unsigned long deadline) { struct sata_device *sata_dev = &dev->sata_dev; int (*check_ready)(struct ata_link *link); @@ -380,7 +405,6 @@ int sas_ata_wait_after_reset(struct domain_device *dev, unsigned long deadline) return ret; } -EXPORT_SYMBOL_GPL(sas_ata_wait_after_reset); static int sas_ata_hard_reset(struct ata_link *link, unsigned int *class, unsigned long deadline) @@ -861,6 +885,21 @@ void sas_ata_wait_eh(struct domain_device *dev) ata_port_wait_eh(ap); } +void sas_ata_device_link_abort(struct domain_device *device, bool force_reset) +{ + struct ata_port *ap = device->sata_dev.ap; + struct ata_link *link = &ap->link; + + device->sata_dev.fis[2] = ATA_ERR | ATA_DRDY; /* tf status */ + device->sata_dev.fis[3] = ATA_ABORTED; /* tf error */ + + link->eh_info.err_mask |= AC_ERR_DEV; + if (force_reset) + link->eh_info.action |= ATA_EH_RESET; + ata_link_abort(link); +} +EXPORT_SYMBOL_GPL(sas_ata_device_link_abort); + int sas_execute_ata_cmd(struct domain_device *device, u8 *fis, int force_phy_id) { struct sas_tmf_task tmf_task = {}; diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c index 5ce2518301040a7ce660787a08e717bcd2616680..a04cad620e9392414d41da68120a9cec6a79f8a8 100644 --- a/drivers/scsi/libsas/sas_expander.c +++ b/drivers/scsi/libsas/sas_expander.c @@ -738,9 +738,7 @@ static void sas_ex_get_linkrate(struct domain_device *parent, phy->phy_state == PHY_NOT_PRESENT) continue; - if (SAS_ADDR(phy->attached_sas_addr) == - SAS_ADDR(child->sas_addr)) { - + if (sas_phy_match_dev_addr(child, phy)) { child->min_linkrate = min(parent->min_linkrate, phy->linkrate); child->max_linkrate = max(parent->max_linkrate, @@ -1007,13 +1005,11 @@ static int sas_ex_discover_dev(struct domain_device *dev, int phy_id) } /* Parent and domain coherency */ - if (!dev->parent && (SAS_ADDR(ex_phy->attached_sas_addr) == - SAS_ADDR(dev->port->sas_addr))) { + if (!dev->parent && sas_phy_match_port_addr(dev->port, ex_phy)) { sas_add_parent_port(dev, phy_id); return 0; } - if (dev->parent && (SAS_ADDR(ex_phy->attached_sas_addr) == - SAS_ADDR(dev->parent->sas_addr))) { + if (dev->parent && sas_phy_match_dev_addr(dev->parent, ex_phy)) { sas_add_parent_port(dev, phy_id); if (ex_phy->routing_attr == TABLE_ROUTING) sas_configure_phy(dev, phy_id, dev->port->sas_addr, 1); @@ -1312,7 +1308,7 @@ static int sas_check_parent_topology(struct domain_device *child) parent_phy->phy_state == PHY_NOT_PRESENT) continue; - if (SAS_ADDR(parent_phy->attached_sas_addr) != SAS_ADDR(child->sas_addr)) + if (!sas_phy_match_dev_addr(child, parent_phy)) continue; child_phy = &child_ex->ex_phy[parent_phy->attached_phy_id]; @@ -1522,8 +1518,7 @@ static int sas_configure_parent(struct domain_device *parent, struct ex_phy *phy = &ex_parent->ex_phy[i]; if ((phy->routing_attr == TABLE_ROUTING) && - (SAS_ADDR(phy->attached_sas_addr) == - SAS_ADDR(child->sas_addr))) { + sas_phy_match_dev_addr(child, phy)) { res = sas_configure_phy(parent, i, sas_addr, include); if (res) return res; @@ -1693,8 +1688,8 @@ static int sas_get_phy_change_count(struct domain_device *dev, return res; } -static int sas_get_phy_attached_dev(struct domain_device *dev, int phy_id, - u8 *sas_addr, enum sas_device_type *type) +int sas_get_phy_attached_dev(struct domain_device *dev, int phy_id, + u8 *sas_addr, enum sas_device_type *type) { int res; struct smp_disc_resp *disc_resp; @@ -1858,8 +1853,7 @@ static void sas_unregister_devs_sas_addr(struct domain_device *parent, if (last) { list_for_each_entry_safe(child, n, &ex_dev->children, siblings) { - if (SAS_ADDR(child->sas_addr) == - SAS_ADDR(phy->attached_sas_addr)) { + if (sas_phy_match_dev_addr(child, phy)) { set_bit(SAS_DEV_GONE, &child->state); if (dev_is_expander(child->dev_type)) sas_unregister_ex_tree(parent->port, child); @@ -1941,8 +1935,7 @@ static int sas_discover_new(struct domain_device *dev, int phy_id) if (res) return res; list_for_each_entry(child, &dev->ex_dev.children, siblings) { - if (SAS_ADDR(child->sas_addr) == - SAS_ADDR(ex_phy->attached_sas_addr)) { + if (sas_phy_match_dev_addr(child, ex_phy)) { if (dev_is_expander(child->dev_type)) res = sas_discover_bfs_by_root(child); break; @@ -2064,8 +2057,7 @@ static int sas_rediscover(struct domain_device *dev, const int phy_id) if (i == phy_id) continue; - if (SAS_ADDR(phy->attached_sas_addr) == - SAS_ADDR(changed_phy->attached_sas_addr)) { + if (sas_phy_addr_match(phy, changed_phy)) { last = false; break; } @@ -2107,6 +2099,22 @@ int sas_ex_revalidate_domain(struct domain_device *port_dev) return res; } +int sas_find_attached_phy_id(struct expander_device *ex_dev, + struct domain_device *dev) +{ + struct ex_phy *phy; + int phy_id; + + for (phy_id = 0; phy_id < ex_dev->num_phys; phy_id++) { + phy = &ex_dev->ex_phy[phy_id]; + if (sas_phy_match_dev_addr(dev, phy)) + return phy_id; + } + + return -ENODEV; +} +EXPORT_SYMBOL_GPL(sas_find_attached_phy_id); + void sas_smp_handler(struct bsg_job *job, struct Scsi_Host *shost, struct sas_rphy *rphy) { diff --git a/drivers/scsi/libsas/sas_init.c b/drivers/scsi/libsas/sas_init.c index e4f77072a58d27db7e18205067c445707acf0d69..f2c05ebeb72ffbe992455a99f9292ecf7568f2d5 100644 --- a/drivers/scsi/libsas/sas_init.c +++ b/drivers/scsi/libsas/sas_init.c @@ -35,7 +35,6 @@ struct sas_task *sas_alloc_task(gfp_t flags) return task; } -EXPORT_SYMBOL_GPL(sas_alloc_task); struct sas_task *sas_alloc_slow_task(gfp_t flags) { @@ -56,7 +55,6 @@ struct sas_task *sas_alloc_slow_task(gfp_t flags) return task; } -EXPORT_SYMBOL_GPL(sas_alloc_slow_task); void sas_free_task(struct sas_task *task) { @@ -65,7 +63,6 @@ void sas_free_task(struct sas_task *task) kmem_cache_free(sas_task_cache, task); } } -EXPORT_SYMBOL_GPL(sas_free_task); /*------------ SAS addr hash -----------*/ void sas_hash_addr(u8 *hashed, const u8 *sas_addr) diff --git a/drivers/scsi/libsas/sas_internal.h b/drivers/scsi/libsas/sas_internal.h index 8d0ad3abc7b5c7942165ec8f0a1b9069b79a4709..6f593fa69b586335ef3c7ab40e9cb3348fea759c 100644 --- a/drivers/scsi/libsas/sas_internal.h +++ b/drivers/scsi/libsas/sas_internal.h @@ -52,6 +52,10 @@ void sas_unregister_phys(struct sas_ha_struct *sas_ha); struct asd_sas_event *sas_alloc_event(struct asd_sas_phy *phy, gfp_t gfp_flags); void sas_free_event(struct asd_sas_event *event); +struct sas_task *sas_alloc_task(gfp_t flags); +struct sas_task *sas_alloc_slow_task(gfp_t flags); +void sas_free_task(struct sas_task *task); + int sas_register_ports(struct sas_ha_struct *sas_ha); void sas_unregister_ports(struct sas_ha_struct *sas_ha); @@ -84,6 +88,8 @@ struct domain_device *sas_ex_to_ata(struct domain_device *ex_dev, int phy_id); int sas_ex_phy_discover(struct domain_device *dev, int single); int sas_get_report_phy_sata(struct domain_device *dev, int phy_id, struct smp_rps_resp *rps_resp); +int sas_get_phy_attached_dev(struct domain_device *dev, int phy_id, + u8 *sas_addr, enum sas_device_type *type); int sas_try_ata_reset(struct asd_sas_phy *phy); void sas_hae_reset(struct work_struct *work); @@ -111,6 +117,23 @@ static inline void sas_smp_host_handler(struct bsg_job *job, } #endif +static inline bool sas_phy_match_dev_addr(struct domain_device *dev, + struct ex_phy *phy) +{ + return SAS_ADDR(dev->sas_addr) == SAS_ADDR(phy->attached_sas_addr); +} + +static inline bool sas_phy_match_port_addr(struct asd_sas_port *port, + struct ex_phy *phy) +{ + return SAS_ADDR(port->sas_addr) == SAS_ADDR(phy->attached_sas_addr); +} + +static inline bool sas_phy_addr_match(struct ex_phy *p1, struct ex_phy *p2) +{ + return SAS_ADDR(p1->attached_sas_addr) == SAS_ADDR(p2->attached_sas_addr); +} + static inline void sas_fail_probe(struct domain_device *dev, const char *func, int err) { pr_warn("%s: for %s device %016llx returned %d\n", diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index ef1481326fd7d37359f578a88f9c2ad8fc495533..77e1b2911cb41aa14aab08c66d25b8bc7feaa57c 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c @@ -134,7 +134,7 @@ lpfc_cmf_info_show(struct device *dev, struct device_attribute *attr, scnprintf(tmp, sizeof(tmp), "Congestion Mgmt Info: E2Eattr %d Ver %d " "CMF %d cnt %d\n", - phba->sli4_hba.pc_sli4_params.mi_ver, + phba->sli4_hba.pc_sli4_params.mi_cap, cp ? cp->cgn_info_version : 0, phba->sli4_hba.pc_sli4_params.cmf, phba->cmf_timer_cnt); @@ -1877,6 +1877,122 @@ lpfc_set_trunking(struct lpfc_hba *phba, char *buff_out) return 0; } +static ssize_t +lpfc_xcvr_data_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata; + struct lpfc_hba *phba = vport->phba; + int rc; + int len = 0; + struct lpfc_rdp_context *rdp_context; + u16 temperature; + u16 rx_power; + u16 tx_bias; + u16 tx_power; + u16 vcc; + char chbuf[128]; + u16 wavelength = 0; + struct sff_trasnceiver_codes_byte7 *trasn_code_byte7; + + /* Get transceiver information */ + rdp_context = kmalloc(sizeof(*rdp_context), GFP_KERNEL); + + rc = lpfc_get_sfp_info_wait(phba, rdp_context); + if (rc) { + len = scnprintf(buf, PAGE_SIZE - len, "SFP info NA:\n"); + goto out_free_rdp; + } + + strncpy(chbuf, &rdp_context->page_a0[SSF_VENDOR_NAME], 16); + chbuf[16] = 0; + + len = scnprintf(buf, PAGE_SIZE - len, "VendorName:\t%s\n", chbuf); + len += scnprintf(buf + len, PAGE_SIZE - len, + "VendorOUI:\t%02x-%02x-%02x\n", + (uint8_t)rdp_context->page_a0[SSF_VENDOR_OUI], + (uint8_t)rdp_context->page_a0[SSF_VENDOR_OUI + 1], + (uint8_t)rdp_context->page_a0[SSF_VENDOR_OUI + 2]); + strncpy(chbuf, &rdp_context->page_a0[SSF_VENDOR_PN], 16); + chbuf[16] = 0; + len += scnprintf(buf + len, PAGE_SIZE - len, "VendorPN:\t%s\n", chbuf); + strncpy(chbuf, &rdp_context->page_a0[SSF_VENDOR_SN], 16); + chbuf[16] = 0; + len += scnprintf(buf + len, PAGE_SIZE - len, "VendorSN:\t%s\n", chbuf); + strncpy(chbuf, &rdp_context->page_a0[SSF_VENDOR_REV], 4); + chbuf[4] = 0; + len += scnprintf(buf + len, PAGE_SIZE - len, "VendorRev:\t%s\n", chbuf); + strncpy(chbuf, &rdp_context->page_a0[SSF_DATE_CODE], 8); + chbuf[8] = 0; + len += scnprintf(buf + len, PAGE_SIZE - len, "DateCode:\t%s\n", chbuf); + len += scnprintf(buf + len, PAGE_SIZE - len, "Identifier:\t%xh\n", + (uint8_t)rdp_context->page_a0[SSF_IDENTIFIER]); + len += scnprintf(buf + len, PAGE_SIZE - len, "ExtIdentifier:\t%xh\n", + (uint8_t)rdp_context->page_a0[SSF_EXT_IDENTIFIER]); + len += scnprintf(buf + len, PAGE_SIZE - len, "Connector:\t%xh\n", + (uint8_t)rdp_context->page_a0[SSF_CONNECTOR]); + wavelength = (rdp_context->page_a0[SSF_WAVELENGTH_B1] << 8) | + rdp_context->page_a0[SSF_WAVELENGTH_B0]; + + len += scnprintf(buf + len, PAGE_SIZE - len, "Wavelength:\t%d nm\n", + wavelength); + trasn_code_byte7 = (struct sff_trasnceiver_codes_byte7 *) + &rdp_context->page_a0[SSF_TRANSCEIVER_CODE_B7]; + + len += scnprintf(buf + len, PAGE_SIZE - len, "Speeds: \t"); + if (*(uint8_t *)trasn_code_byte7 == 0) { + len += scnprintf(buf + len, PAGE_SIZE - len, + "Unknown\n"); + } else { + if (trasn_code_byte7->fc_sp_100MB) + len += scnprintf(buf + len, PAGE_SIZE - len, + "1 "); + if (trasn_code_byte7->fc_sp_200mb) + len += scnprintf(buf + len, PAGE_SIZE - len, + "2 "); + if (trasn_code_byte7->fc_sp_400MB) + len += scnprintf(buf + len, PAGE_SIZE - len, + "4 "); + if (trasn_code_byte7->fc_sp_800MB) + len += scnprintf(buf + len, PAGE_SIZE - len, + "8 "); + if (trasn_code_byte7->fc_sp_1600MB) + len += scnprintf(buf + len, PAGE_SIZE - len, + "16 "); + if (trasn_code_byte7->fc_sp_3200MB) + len += scnprintf(buf + len, PAGE_SIZE - len, + "32 "); + if (trasn_code_byte7->speed_chk_ecc) + len += scnprintf(buf + len, PAGE_SIZE - len, + "64 "); + len += scnprintf(buf + len, PAGE_SIZE - len, "GB\n"); + } + temperature = (rdp_context->page_a2[SFF_TEMPERATURE_B1] << 8 | + rdp_context->page_a2[SFF_TEMPERATURE_B0]); + vcc = (rdp_context->page_a2[SFF_VCC_B1] << 8 | + rdp_context->page_a2[SFF_VCC_B0]); + tx_power = (rdp_context->page_a2[SFF_TXPOWER_B1] << 8 | + rdp_context->page_a2[SFF_TXPOWER_B0]); + tx_bias = (rdp_context->page_a2[SFF_TX_BIAS_CURRENT_B1] << 8 | + rdp_context->page_a2[SFF_TX_BIAS_CURRENT_B0]); + rx_power = (rdp_context->page_a2[SFF_RXPOWER_B1] << 8 | + rdp_context->page_a2[SFF_RXPOWER_B0]); + + len += scnprintf(buf + len, PAGE_SIZE - len, + "Temperature:\tx%04x C\n", temperature); + len += scnprintf(buf + len, PAGE_SIZE - len, "Vcc:\t\tx%04x V\n", vcc); + len += scnprintf(buf + len, PAGE_SIZE - len, + "TxBiasCurrent:\tx%04x mA\n", tx_bias); + len += scnprintf(buf + len, PAGE_SIZE - len, "TxPower:\tx%04x mW\n", + tx_power); + len += scnprintf(buf + len, PAGE_SIZE - len, "RxPower:\tx%04x mW\n", + rx_power); +out_free_rdp: + kfree(rdp_context); + return len; +} + /** * lpfc_board_mode_show - Return the state of the board * @dev: class device that is converted into a Scsi_host. @@ -2810,6 +2926,7 @@ static DEVICE_ATTR_RO(lpfc_drvr_version); static DEVICE_ATTR_RO(lpfc_enable_fip); static DEVICE_ATTR(board_mode, S_IRUGO | S_IWUSR, lpfc_board_mode_show, lpfc_board_mode_store); +static DEVICE_ATTR_RO(lpfc_xcvr_data); static DEVICE_ATTR(issue_reset, S_IWUSR, NULL, lpfc_issue_reset); static DEVICE_ATTR(max_vpi, S_IRUGO, lpfc_max_vpi_show, NULL); static DEVICE_ATTR(used_vpi, S_IRUGO, lpfc_used_vpi_show, NULL); @@ -5906,6 +6023,7 @@ static struct attribute *lpfc_hba_attrs[] = { &dev_attr_lpfc_fcp_wait_abts_rsp.attr, &dev_attr_nport_evt_cnt.attr, &dev_attr_board_mode.attr, + &dev_attr_lpfc_xcvr_data.attr, &dev_attr_max_vpi.attr, &dev_attr_used_vpi.attr, &dev_attr_max_rpi.attr, diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h index d2d207791056ca1b5938969116f96426bb5ba5cc..8928f016d09e2b72b6c05b0bc879343cf56b9833 100644 --- a/drivers/scsi/lpfc/lpfc_crtn.h +++ b/drivers/scsi/lpfc/lpfc_crtn.h @@ -687,3 +687,6 @@ int lpfc_issue_els_qfpa(struct lpfc_vport *vport); void lpfc_sli_rpi_release(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp); + +int lpfc_get_sfp_info_wait(struct lpfc_hba *phba, + struct lpfc_rdp_context *rdp_context); diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index 863b2125fed6cebe9a0ffdade153914af88c8a26..919741bbe267b3799b898021341ccdbfa17d3438 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c @@ -952,6 +952,7 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, uint16_t fcf_index; int rc; u32 ulp_status, ulp_word4, tmo; + bool flogi_in_retry = false; /* Check to see if link went down during discovery */ if (lpfc_els_chk_latt(vport)) { @@ -1022,8 +1023,23 @@ stop_rr_fcf_flogi: phba->hba_flag, phba->fcf.fcf_flag); /* Check for retry */ - if (lpfc_els_retry(phba, cmdiocb, rspiocb)) + if (lpfc_els_retry(phba, cmdiocb, rspiocb)) { + /* Address a timing race with dev_loss. If dev_loss + * is active on this FPort node, put the initial ref + * count back to stop premature node release actions. + */ + lpfc_check_nlp_post_devloss(vport, ndlp); + flogi_in_retry = true; goto out; + } + + /* The FLOGI will not be retried. If the FPort node is not + * registered with the SCSI transport, remove the initial + * reference to trigger node release. + */ + if (!(ndlp->nlp_flag & NLP_IN_DEV_LOSS) && + !(ndlp->fc4_xpt_flags & SCSI_XPT_REGD)) + lpfc_nlp_put(ndlp); lpfc_printf_vlog(vport, KERN_WARNING, LOG_TRACE_EVENT, "0150 FLOGI failure Status:x%x/x%x " @@ -1086,7 +1102,7 @@ stop_rr_fcf_flogi: spin_unlock_irq(shost->host_lock); /* - * The FLogI succeeded. Sync the data for the CPU before + * The FLOGI succeeded. Sync the data for the CPU before * accessing it. */ prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list); @@ -1108,6 +1124,12 @@ stop_rr_fcf_flogi: vport->phba->pport->vmid_flag |= (LPFC_VMID_ISSUE_QFPA | LPFC_VMID_TYPE_PRIO); + /* + * Address a timing race with dev_loss. If dev_loss is active on + * this FPort node, put the initial ref count back to stop premature + * node release actions. + */ + lpfc_check_nlp_post_devloss(vport, ndlp); if (vport->port_state == LPFC_FLOGI) { /* * If Common Service Parameters indicate Nport @@ -1198,7 +1220,9 @@ flogifail: lpfc_issue_clear_la(phba, vport); } out: - phba->hba_flag &= ~HBA_FLOGI_OUTSTANDING; + if (!flogi_in_retry) + phba->hba_flag &= ~HBA_FLOGI_OUTSTANDING; + lpfc_els_free_iocb(phba, cmdiocb); lpfc_nlp_put(ndlp); } @@ -1365,15 +1389,17 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, return 1; } + /* Avoid race with FLOGI completion and hba_flags. */ + phba->hba_flag |= (HBA_FLOGI_ISSUED | HBA_FLOGI_OUTSTANDING); + rc = lpfc_issue_fabric_iocb(phba, elsiocb); if (rc == IOCB_ERROR) { + phba->hba_flag &= ~(HBA_FLOGI_ISSUED | HBA_FLOGI_OUTSTANDING); lpfc_els_free_iocb(phba, elsiocb); lpfc_nlp_put(ndlp); return 1; } - phba->hba_flag |= (HBA_FLOGI_ISSUED | HBA_FLOGI_OUTSTANDING); - /* Clear external loopback plug detected flag */ phba->link_flag &= ~LS_EXTERNAL_LOOPBACK; @@ -7190,6 +7216,134 @@ rdp_fail: return 1; } +int lpfc_get_sfp_info_wait(struct lpfc_hba *phba, + struct lpfc_rdp_context *rdp_context) +{ + LPFC_MBOXQ_t *mbox = NULL; + int rc; + struct lpfc_dmabuf *mp; + struct lpfc_dmabuf *mpsave; + void *virt; + MAILBOX_t *mb; + + mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mbox) { + lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_ELS, + "7205 failed to allocate mailbox memory"); + return 1; + } + + if (lpfc_sli4_dump_page_a0(phba, mbox)) + goto sfp_fail; + mp = mbox->ctx_buf; + mpsave = mp; + virt = mp->virt; + if (phba->sli_rev < LPFC_SLI_REV4) { + mb = &mbox->u.mb; + mb->un.varDmp.cv = 1; + mb->un.varDmp.co = 1; + mb->un.varWords[2] = 0; + mb->un.varWords[3] = DMP_SFF_PAGE_A0_SIZE / 4; + mb->un.varWords[4] = 0; + mb->un.varWords[5] = 0; + mb->un.varWords[6] = 0; + mb->un.varWords[7] = 0; + mb->un.varWords[8] = 0; + mb->un.varWords[9] = 0; + mb->un.varWords[10] = 0; + mbox->in_ext_byte_len = DMP_SFF_PAGE_A0_SIZE; + mbox->out_ext_byte_len = DMP_SFF_PAGE_A0_SIZE; + mbox->mbox_offset_word = 5; + mbox->ctx_buf = virt; + } else { + bf_set(lpfc_mbx_memory_dump_type3_length, + &mbox->u.mqe.un.mem_dump_type3, DMP_SFF_PAGE_A0_SIZE); + mbox->u.mqe.un.mem_dump_type3.addr_lo = putPaddrLow(mp->phys); + mbox->u.mqe.un.mem_dump_type3.addr_hi = putPaddrHigh(mp->phys); + } + mbox->vport = phba->pport; + mbox->ctx_ndlp = (struct lpfc_rdp_context *)rdp_context; + + rc = lpfc_sli_issue_mbox_wait(phba, mbox, 30); + if (rc == MBX_NOT_FINISHED) { + rc = 1; + goto error; + } + + if (phba->sli_rev == LPFC_SLI_REV4) + mp = (struct lpfc_dmabuf *)(mbox->ctx_buf); + else + mp = mpsave; + + if (bf_get(lpfc_mqe_status, &mbox->u.mqe)) { + rc = 1; + goto error; + } + + lpfc_sli_bemem_bcopy(mp->virt, &rdp_context->page_a0, + DMP_SFF_PAGE_A0_SIZE); + + memset(mbox, 0, sizeof(*mbox)); + memset(mp->virt, 0, DMP_SFF_PAGE_A2_SIZE); + INIT_LIST_HEAD(&mp->list); + + /* save address for completion */ + mbox->ctx_buf = mp; + mbox->vport = phba->pport; + + bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_DUMP_MEMORY); + bf_set(lpfc_mbx_memory_dump_type3_type, + &mbox->u.mqe.un.mem_dump_type3, DMP_LMSD); + bf_set(lpfc_mbx_memory_dump_type3_link, + &mbox->u.mqe.un.mem_dump_type3, phba->sli4_hba.physical_port); + bf_set(lpfc_mbx_memory_dump_type3_page_no, + &mbox->u.mqe.un.mem_dump_type3, DMP_PAGE_A2); + if (phba->sli_rev < LPFC_SLI_REV4) { + mb = &mbox->u.mb; + mb->un.varDmp.cv = 1; + mb->un.varDmp.co = 1; + mb->un.varWords[2] = 0; + mb->un.varWords[3] = DMP_SFF_PAGE_A2_SIZE / 4; + mb->un.varWords[4] = 0; + mb->un.varWords[5] = 0; + mb->un.varWords[6] = 0; + mb->un.varWords[7] = 0; + mb->un.varWords[8] = 0; + mb->un.varWords[9] = 0; + mb->un.varWords[10] = 0; + mbox->in_ext_byte_len = DMP_SFF_PAGE_A2_SIZE; + mbox->out_ext_byte_len = DMP_SFF_PAGE_A2_SIZE; + mbox->mbox_offset_word = 5; + mbox->ctx_buf = virt; + } else { + bf_set(lpfc_mbx_memory_dump_type3_length, + &mbox->u.mqe.un.mem_dump_type3, DMP_SFF_PAGE_A2_SIZE); + mbox->u.mqe.un.mem_dump_type3.addr_lo = putPaddrLow(mp->phys); + mbox->u.mqe.un.mem_dump_type3.addr_hi = putPaddrHigh(mp->phys); + } + + mbox->ctx_ndlp = (struct lpfc_rdp_context *)rdp_context; + rc = lpfc_sli_issue_mbox_wait(phba, mbox, 30); + if (bf_get(lpfc_mqe_status, &mbox->u.mqe)) { + rc = 1; + goto error; + } + rc = 0; + + lpfc_sli_bemem_bcopy(mp->virt, &rdp_context->page_a2, + DMP_SFF_PAGE_A2_SIZE); + +error: + mbox->ctx_buf = mpsave; + lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED); + + return rc; + +sfp_fail: + mempool_free(mbox, phba->mbox_mem_pool); + return 1; +} + /* * lpfc_els_rcv_rdp - Process an unsolicited RDP ELS. * @vport: pointer to a host virtual N_Port data structure. @@ -9044,15 +9198,10 @@ static int lpfc_els_rcv_farpr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, struct lpfc_nodelist *ndlp) { - struct lpfc_dmabuf *pcmd; - uint32_t *lp; uint32_t did; did = get_job_els_rsp64_did(vport->phba, cmdiocb); - pcmd = cmdiocb->cmd_dmabuf; - lp = (uint32_t *)pcmd->virt; - lp++; /* FARP-RSP received from DID */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0600 FARP-RSP received from DID x%x\n", did); diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index d38ebd7281b9b96f4485b58aa8af808d91d5f8a1..80375d73b732025102d7861aab9af08b56d6a93c 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c @@ -426,10 +426,6 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp) name = (uint8_t *)&ndlp->nlp_portname; phba = vport->phba; - spin_lock_irqsave(&ndlp->lock, iflags); - ndlp->nlp_flag &= ~NLP_IN_DEV_LOSS; - spin_unlock_irqrestore(&ndlp->lock, iflags); - if (phba->sli_rev == LPFC_SLI_REV4) fcf_inuse = lpfc_fcf_inuse(phba); @@ -451,22 +447,36 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp) *name, *(name+1), *(name+2), *(name+3), *(name+4), *(name+5), *(name+6), *(name+7), ndlp->nlp_DID); + + spin_lock_irqsave(&ndlp->lock, iflags); + ndlp->nlp_flag &= ~NLP_IN_DEV_LOSS; + spin_unlock_irqrestore(&ndlp->lock, iflags); return fcf_inuse; } /* Fabric nodes are done. */ if (ndlp->nlp_type & NLP_FABRIC) { spin_lock_irqsave(&ndlp->lock, iflags); - /* In massive vport configuration settings, it's possible - * dev_loss_tmo fired during node recovery. So, check if - * fabric nodes are in discovery states outstanding. + + /* In massive vport configuration settings or when the FLOGI + * completes with a sequence timeout, it's possible + * dev_loss_tmo fired during node recovery. The driver has to + * account for this race to allow for recovery and keep + * the reference counting correct. */ switch (ndlp->nlp_DID) { case Fabric_DID: fc_vport = vport->fc_vport; - if (fc_vport && - fc_vport->vport_state == FC_VPORT_INITIALIZING) - recovering = true; + if (fc_vport) { + /* NPIV path. */ + if (fc_vport->vport_state == + FC_VPORT_INITIALIZING) + recovering = true; + } else { + /* Physical port path. */ + if (phba->hba_flag & HBA_FLOGI_OUTSTANDING) + recovering = true; + } break; case Fabric_Cntl_DID: if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND) @@ -514,6 +524,9 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp) return fcf_inuse; } + spin_lock_irqsave(&ndlp->lock, iflags); + ndlp->nlp_flag &= ~NLP_IN_DEV_LOSS; + spin_unlock_irqrestore(&ndlp->lock, iflags); lpfc_nlp_put(ndlp); return fcf_inuse; } @@ -552,6 +565,9 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp) return fcf_inuse; } + spin_lock_irqsave(&ndlp->lock, iflags); + ndlp->nlp_flag &= ~NLP_IN_DEV_LOSS; + spin_unlock_irqrestore(&ndlp->lock, iflags); if (!(ndlp->fc4_xpt_flags & NVME_XPT_REGD)) lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h index 5288fc69908a6691e5f65ad94ee16b3fbc61d41c..fb3504dbb8993d80c4cdb19ea137abe335d6d723 100644 --- a/drivers/scsi/lpfc/lpfc_hw4.h +++ b/drivers/scsi/lpfc/lpfc_hw4.h @@ -3162,7 +3162,8 @@ struct lpfc_mbx_memory_dump_type3 { #define SFF_LENGTH_COPPER 18 #define SSF_LENGTH_50UM_OM3 19 #define SSF_VENDOR_NAME 20 -#define SSF_VENDOR_OUI 36 +#define SSF_TRANSCEIVER2 36 +#define SSF_VENDOR_OUI 37 #define SSF_VENDOR_PN 40 #define SSF_VENDOR_REV 56 #define SSF_WAVELENGTH_B1 60 @@ -3281,7 +3282,7 @@ struct sff_trasnceiver_codes_byte6 { struct sff_trasnceiver_codes_byte7 { uint8_t fc_sp_100MB:1; /* 100 MB/sec */ - uint8_t reserve:1; + uint8_t speed_chk_ecc:1; uint8_t fc_sp_200mb:1; /* 200 MB/sec */ uint8_t fc_sp_3200MB:1; /* 3200 MB/sec */ uint8_t fc_sp_400MB:1; /* 400 MB/sec */ diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index b535f1fd301002f1294b1eb7443a4b7d1fcdbd21..25ba20e42825520d902b2af4ed865e01d9bb1914 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -36,7 +36,6 @@ #include #include #include -#include #include #include #include @@ -699,6 +698,8 @@ lpfc_sli4_refresh_params(struct lpfc_hba *phba) return rc; } mbx_sli4_parameters = &mqe->un.get_sli4_parameters.sli4_parameters; + phba->sli4_hba.pc_sli4_params.mi_cap = + bf_get(cfg_mi_ver, mbx_sli4_parameters); /* Are we forcing MI off via module parameter? */ if (phba->cfg_enable_mi) @@ -10093,17 +10094,15 @@ lpfc_sli4_read_config(struct lpfc_hba *phba) qmin = phba->sli4_hba.max_cfg_param.max_wq; if (phba->sli4_hba.max_cfg_param.max_cq < qmin) qmin = phba->sli4_hba.max_cfg_param.max_cq; - if (phba->sli4_hba.max_cfg_param.max_eq < qmin) - qmin = phba->sli4_hba.max_cfg_param.max_eq; /* - * Whats left after this can go toward NVME / FCP. - * The minus 4 accounts for ELS, NVME LS, MBOX - * plus one extra. When configured for - * NVMET, FCP io channel WQs are not created. + * Reserve 4 (ELS, NVME LS, MBOX, plus one extra) and + * the remainder can be used for NVME / FCP. */ qmin -= 4; + if (phba->sli4_hba.max_cfg_param.max_eq < qmin) + qmin = phba->sli4_hba.max_cfg_param.max_eq; - /* Check to see if there is enough for NVME */ + /* Check to see if there is enough for default cfg */ if ((phba->cfg_irq_chann > qmin) || (phba->cfg_hdw_queue > qmin)) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, @@ -13842,6 +13841,7 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) mbx_sli4_parameters); phba->sli4_hba.extents_in_use = bf_get(cfg_ext, mbx_sli4_parameters); phba->sli4_hba.rpi_hdrs_in_use = bf_get(cfg_hdrr, mbx_sli4_parameters); + sli4_params->mi_cap = bf_get(cfg_mi_ver, mbx_sli4_parameters); /* Check for Extended Pre-Registered SGL support */ phba->cfg_xpsgl = bf_get(cfg_xpsgl, mbx_sli4_parameters); diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 99d06dc7ddf6b5e8fa993663a17e191a88546722..182aaae6038689a51daa4d2f0a4259245444509c 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -1373,7 +1373,6 @@ static void __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) { struct lpfc_sglq *sglq; - size_t start_clean = offsetof(struct lpfc_iocbq, wqe); unsigned long iflag = 0; struct lpfc_sli_ring *pring; @@ -1430,7 +1429,7 @@ out: /* * Clean all volatile data fields, preserve iotag and node struct. */ - memset((char *)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean); + memset_startat(iocbq, 0, wqe); iocbq->sli4_lxritag = NO_XRI; iocbq->sli4_xritag = NO_XRI; iocbq->cmd_flag &= ~(LPFC_IO_NVME | LPFC_IO_NVMET | LPFC_IO_CMF | @@ -1453,12 +1452,11 @@ out: static void __lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) { - size_t start_clean = offsetof(struct lpfc_iocbq, iocb); /* * Clean all volatile data fields, preserve iotag and node struct. */ - memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean); + memset_startat(iocbq, 0, iocb); iocbq->sli4_xritag = NO_XRI; list_add_tail(&iocbq->list, &phba->lpfc_iocb_list); } @@ -1848,6 +1846,24 @@ lpfc_cmf_sync_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, phba->cmf_link_byte_count); bwpcent = div64_u64(bw * 100 + slop, phba->cmf_link_byte_count); + /* Because of bytes adjustment due to shorter timer in + * lpfc_cmf_timer() the cmf_link_byte_count can be shorter and + * may seem like BW is above 100%. + */ + if (bwpcent > 100) + bwpcent = 100; + + if (phba->cmf_max_bytes_per_interval < bw && + bwpcent > 95) + lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, + "6208 Congestion bandwidth " + "limits removed\n"); + else if ((phba->cmf_max_bytes_per_interval > bw) && + ((bwpcent + pcent) <= 100) && ((bwpcent + pcent) > 95)) + lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, + "6209 Congestion bandwidth " + "limits in effect\n"); + if (asig) { lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, "6237 BW Threshold %lld%% (%lld): " @@ -8150,10 +8166,10 @@ u32 lpfc_rx_monitor_report(struct lpfc_hba *phba, "IO_cnt", "Info", "BWutil(ms)"); } - /* Needs to be _bh because record is called from timer interrupt + /* Needs to be _irq because record is called from timer interrupt * context */ - spin_lock_bh(ring_lock); + spin_lock_irq(ring_lock); while (*head_idx != *tail_idx) { entry = &ring[*head_idx]; @@ -8197,7 +8213,7 @@ u32 lpfc_rx_monitor_report(struct lpfc_hba *phba, if (cnt >= max_read_entries) break; } - spin_unlock_bh(ring_lock); + spin_unlock_irq(ring_lock); return cnt; } @@ -8354,6 +8370,7 @@ no_cmf: phba->cgn_i = NULL; /* Ensure CGN Mode is off */ phba->cmf_active_mode = LPFC_CFG_OFF; + sli4_params->cmf = 0; return 0; } } diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h index cbb1aa1cf025bacf16c019a6342da43681a7560d..f927c2a25d5461e6f05285cae802edcc5110d4ae 100644 --- a/drivers/scsi/lpfc/lpfc_sli4.h +++ b/drivers/scsi/lpfc/lpfc_sli4.h @@ -556,6 +556,7 @@ struct lpfc_pc_sli4_params { #define LPFC_MIB3_SUPPORT 3 uint16_t mi_value; #define LPFC_DFLT_MIB_VAL 2 + uint8_t mi_cap; uint8_t mib_bde_cnt; uint8_t cmf; uint8_t cqv; diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h index 192d5630a44daf3509e54476f3969e1102ae5ffb..41a1128f86514e0888ee8e8fd40d2572aea66d4f 100644 --- a/drivers/scsi/lpfc/lpfc_version.h +++ b/drivers/scsi/lpfc/lpfc_version.h @@ -20,7 +20,7 @@ * included with this package. * *******************************************************************/ -#define LPFC_DRIVER_VERSION "14.2.0.7" +#define LPFC_DRIVER_VERSION "14.2.0.9" #define LPFC_DRIVER_NAME "lpfc" /* Used for SLI 2/3 */ diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index d265a2d9d08245e2e65a4f62d7d0e810514ac87a..3ceece9883383b78969fac6c87bcca3bbd5a61b1 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -2927,15 +2927,14 @@ static int megasas_generic_reset(struct scsi_cmnd *scmd) * Sets the FW busy flag and reduces the host->can_queue if the * cmd has not been completed within the timeout period. */ -static enum -blk_eh_timer_return megasas_reset_timer(struct scsi_cmnd *scmd) +static enum scsi_timeout_action megasas_reset_timer(struct scsi_cmnd *scmd) { struct megasas_instance *instance; unsigned long flags; if (time_after(jiffies, scmd->jiffies_at_alloc + (scmd_timeout * 2) * HZ)) { - return BLK_EH_DONE; + return SCSI_EH_NOT_HANDLED; } instance = (struct megasas_instance *)scmd->device->host->hostdata; @@ -2949,7 +2948,7 @@ blk_eh_timer_return megasas_reset_timer(struct scsi_cmnd *scmd) spin_unlock_irqrestore(instance->host->host_lock, flags); } - return BLK_EH_RESET_TIMER; + return SCSI_EH_RESET_TIMER; } /** diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c index 6650f8c8e9b046fc845735547fb5e10d88e62067..fe70f8f114352bfb0d8d252de39f676fc5ec850f 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c @@ -80,7 +80,7 @@ static void megasas_fusion_crash_dump(struct megasas_instance *instance); * @ocr_context: If called from OCR context this will * be set to 1, else 0 * - * This function initates a chip reset followed by a wait for controller to + * This function initiates a chip reset followed by a wait for controller to * transition to ready state. * During this, driver will block all access to PCI config space from userspace */ @@ -334,7 +334,7 @@ megasas_fire_cmd_fusion(struct megasas_instance *instance, * * This function is only for fusion controllers. * Update host can queue, if firmware downgrade max supported firmware commands. - * Firmware upgrade case will be skiped because underlying firmware has + * Firmware upgrade case will be skipped because underlying firmware has * more resource than exposed to the OS. * */ @@ -2588,7 +2588,7 @@ static void megasas_stream_detect(struct megasas_instance *instance, if ((io_info->ldStartBlock != current_sd->next_seq_lba) && ((!io_info->isRead) || (!is_read_ahead))) /* - * Once the API availible we need to change this. + * Once the API is available we need to change this. * At this point we are not allowing any gap */ continue; @@ -4650,7 +4650,7 @@ megasas_issue_tm(struct megasas_instance *instance, u16 device_handle, } /* - * megasas_fusion_smid_lookup : Look for fusion command correpspodning to SCSI + * megasas_fusion_smid_lookup : Look for fusion command corresponding to SCSI * @instance: per adapter struct * * Return Non Zero index, if SMID found in outstanding commands diff --git a/drivers/scsi/mpt3sas/mpt3sas_transport.c b/drivers/scsi/mpt3sas/mpt3sas_transport.c index 0681daee6c1495aad818e7131a1a44a79bd97b21..e5ecd6ada6cdd851e833fce012d1e6f987bc3c3d 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_transport.c +++ b/drivers/scsi/mpt3sas/mpt3sas_transport.c @@ -829,6 +829,8 @@ mpt3sas_transport_port_add(struct MPT3SAS_ADAPTER *ioc, u16 handle, if ((sas_rphy_add(rphy))) { ioc_err(ioc, "failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); + sas_rphy_free(rphy); + rphy = NULL; } if (mpt3sas_port->remote_identify.device_type == SAS_END_DEVICE) { diff --git a/drivers/scsi/mvsas/mv_defs.h b/drivers/scsi/mvsas/mv_defs.h index 7123a2efbf5836b8610539564882434c85bcc792..8ef174cd4d374b3f12686fcebf303e4d96900249 100644 --- a/drivers/scsi/mvsas/mv_defs.h +++ b/drivers/scsi/mvsas/mv_defs.h @@ -40,6 +40,7 @@ enum driver_configuration { MVS_ATA_CMD_SZ = 96, /* SATA command table buffer size */ MVS_OAF_SZ = 64, /* Open address frame buffer size */ MVS_QUEUE_SIZE = 64, /* Support Queue depth */ + MVS_RSVD_SLOTS = 4, MVS_SOC_CAN_QUEUE = MVS_SOC_SLOTS - 2, }; diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c index 2fde496fff5f7409c9fbab5e0cad45a8f1af9903..cfe84473a515389f0e88b19b04d390827bec7cb3 100644 --- a/drivers/scsi/mvsas/mv_init.c +++ b/drivers/scsi/mvsas/mv_init.c @@ -142,7 +142,7 @@ static void mvs_free(struct mvs_info *mvi) scsi_host_put(mvi->shost); list_for_each_entry(mwq, &mvi->wq_list, entry) cancel_delayed_work(&mwq->work_q); - kfree(mvi->tags); + kfree(mvi->rsvd_tags); kfree(mvi); } @@ -284,10 +284,7 @@ static int mvs_alloc(struct mvs_info *mvi, struct Scsi_Host *shost) printk(KERN_DEBUG "failed to create dma pool %s.\n", pool_name); goto err_out; } - mvi->tags_num = slot_nr; - /* Initialize tags */ - mvs_tag_init(mvi); return 0; err_out: return 1; @@ -369,8 +366,8 @@ static struct mvs_info *mvs_pci_alloc(struct pci_dev *pdev, mvi->sas = sha; mvi->shost = shost; - mvi->tags = kzalloc(MVS_CHIP_SLOT_SZ>>3, GFP_KERNEL); - if (!mvi->tags) + mvi->rsvd_tags = bitmap_zalloc(MVS_RSVD_SLOTS, GFP_KERNEL); + if (!mvi->rsvd_tags) goto err_out; if (MVS_CHIP_DISP->chip_ioremap(mvi)) @@ -471,6 +468,8 @@ static void mvs_post_sas_ha_init(struct Scsi_Host *shost, else can_queue = MVS_CHIP_SLOT_SZ; + can_queue -= MVS_RSVD_SLOTS; + shost->sg_tablesize = min_t(u16, SG_ALL, MVS_MAX_SG); shost->can_queue = can_queue; mvi->shost->cmd_per_lun = MVS_QUEUE_SIZE; diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c index a6867dae0e7c21d4c9f085f608bba79b68164375..9978c424214cda590152c65ca5ea6d8c541dd69b 100644 --- a/drivers/scsi/mvsas/mv_sas.c +++ b/drivers/scsi/mvsas/mv_sas.c @@ -20,44 +20,40 @@ static int mvs_find_tag(struct mvs_info *mvi, struct sas_task *task, u32 *tag) return 0; } -void mvs_tag_clear(struct mvs_info *mvi, u32 tag) +static void mvs_tag_clear(struct mvs_info *mvi, u32 tag) { - void *bitmap = mvi->tags; + void *bitmap = mvi->rsvd_tags; clear_bit(tag, bitmap); } -void mvs_tag_free(struct mvs_info *mvi, u32 tag) +static void mvs_tag_free(struct mvs_info *mvi, u32 tag) { + if (tag >= MVS_RSVD_SLOTS) + return; + mvs_tag_clear(mvi, tag); } -void mvs_tag_set(struct mvs_info *mvi, unsigned int tag) +static void mvs_tag_set(struct mvs_info *mvi, unsigned int tag) { - void *bitmap = mvi->tags; + void *bitmap = mvi->rsvd_tags; set_bit(tag, bitmap); } -inline int mvs_tag_alloc(struct mvs_info *mvi, u32 *tag_out) +static int mvs_tag_alloc(struct mvs_info *mvi, u32 *tag_out) { unsigned int index, tag; - void *bitmap = mvi->tags; + void *bitmap = mvi->rsvd_tags; - index = find_first_zero_bit(bitmap, mvi->tags_num); + index = find_first_zero_bit(bitmap, MVS_RSVD_SLOTS); tag = index; - if (tag >= mvi->tags_num) + if (tag >= MVS_RSVD_SLOTS) return -SAS_QUEUE_FULL; mvs_tag_set(mvi, tag); *tag_out = tag; return 0; } -void mvs_tag_init(struct mvs_info *mvi) -{ - int i; - for (i = 0; i < mvi->tags_num; ++i) - mvs_tag_clear(mvi, i); -} - static struct mvs_info *mvs_find_dev_mvi(struct domain_device *dev) { unsigned long i = 0, j = 0, hi = 0; @@ -703,6 +699,7 @@ static int mvs_task_prep(struct sas_task *task, struct mvs_info *mvi, int is_tmf struct mvs_task_exec_info tei; struct mvs_slot_info *slot; u32 tag = 0xdeadbeef, n_elem = 0; + struct request *rq; int rc = 0; if (!dev->port) { @@ -767,9 +764,14 @@ static int mvs_task_prep(struct sas_task *task, struct mvs_info *mvi, int is_tmf n_elem = task->num_scatter; } - rc = mvs_tag_alloc(mvi, &tag); - if (rc) - goto err_out; + rq = sas_task_find_rq(task); + if (rq) { + tag = rq->tag + MVS_RSVD_SLOTS; + } else { + rc = mvs_tag_alloc(mvi, &tag); + if (rc) + goto err_out; + } slot = &mvi->slot_info[tag]; @@ -864,7 +866,7 @@ int mvs_queue_command(struct sas_task *task, gfp_t gfp_flags) static void mvs_slot_free(struct mvs_info *mvi, u32 rx_desc) { u32 slot_idx = rx_desc & RXQ_SLOT_MASK; - mvs_tag_clear(mvi, slot_idx); + mvs_tag_free(mvi, slot_idx); } static void mvs_slot_task_free(struct mvs_info *mvi, struct sas_task *task, @@ -1190,23 +1192,16 @@ static int mvs_dev_found_notify(struct domain_device *dev, int lock) mvi_device->sas_device = dev; if (parent_dev && dev_is_expander(parent_dev->dev_type)) { int phy_id; - u8 phy_num = parent_dev->ex_dev.num_phys; - struct ex_phy *phy; - for (phy_id = 0; phy_id < phy_num; phy_id++) { - phy = &parent_dev->ex_dev.ex_phy[phy_id]; - if (SAS_ADDR(phy->attached_sas_addr) == - SAS_ADDR(dev->sas_addr)) { - mvi_device->attached_phy = phy_id; - break; - } - } - if (phy_id == phy_num) { + phy_id = sas_find_attached_phy_id(&parent_dev->ex_dev, dev); + if (phy_id < 0) { mv_printk("Error: no attached dev:%016llx" "at ex:%016llx.\n", SAS_ADDR(dev->sas_addr), SAS_ADDR(parent_dev->sas_addr)); - res = -1; + res = phy_id; + } else { + mvi_device->attached_phy = phy_id; } } diff --git a/drivers/scsi/mvsas/mv_sas.h b/drivers/scsi/mvsas/mv_sas.h index 509d8f32a04ffd23e3c657a59caa8f9148576c86..68df771e29759d251dca0e3354ecca5942b6afee 100644 --- a/drivers/scsi/mvsas/mv_sas.h +++ b/drivers/scsi/mvsas/mv_sas.h @@ -370,8 +370,7 @@ struct mvs_info { u32 chip_id; const struct mvs_chip_info *chip; - int tags_num; - unsigned long *tags; + unsigned long *rsvd_tags; /* further per-slot information */ struct mvs_phy phy[MVS_MAX_PHYS]; struct mvs_port port[MVS_MAX_PHYS]; @@ -424,11 +423,6 @@ struct mvs_task_exec_info { /******************** function prototype *********************/ void mvs_get_sas_addr(void *buf, u32 buflen); -void mvs_tag_clear(struct mvs_info *mvi, u32 tag); -void mvs_tag_free(struct mvs_info *mvi, u32 tag); -void mvs_tag_set(struct mvs_info *mvi, unsigned int tag); -int mvs_tag_alloc(struct mvs_info *mvi, u32 *tag_out); -void mvs_tag_init(struct mvs_info *mvi); void mvs_iounmap(void __iomem *regs); int mvs_ioremap(struct mvs_info *mvi, int bar, int bar_ex); void mvs_phys_reset(struct mvs_info *mvi, u32 phy_mask, int hard); diff --git a/drivers/scsi/mvumi.c b/drivers/scsi/mvumi.c index 05d3ce9b72dba17d0732af4b7da1f985fd94b25f..b3dcb891861865eb3d5940d6253e30bb5cd35f90 100644 --- a/drivers/scsi/mvumi.c +++ b/drivers/scsi/mvumi.c @@ -2109,7 +2109,7 @@ out_return_cmd: return 0; } -static enum blk_eh_timer_return mvumi_timed_out(struct scsi_cmnd *scmd) +static enum scsi_timeout_action mvumi_timed_out(struct scsi_cmnd *scmd) { struct mvumi_cmd *cmd = mvumi_priv(scmd)->cmd_priv; struct Scsi_Host *host = scmd->device->host; @@ -2137,7 +2137,7 @@ static enum blk_eh_timer_return mvumi_timed_out(struct scsi_cmnd *scmd) mvumi_return_cmd(mhba, cmd); spin_unlock_irqrestore(mhba->shost->host_lock, flags); - return BLK_EH_DONE; + return SCSI_EH_NOT_HANDLED; } static int diff --git a/drivers/scsi/pcmcia/nsp_cs.c b/drivers/scsi/pcmcia/nsp_cs.c index 48acab03a8a00320ed1c468663601370d817677c..a5a1406a2bdef584e664ef41cdf5968bb55e105f 100644 --- a/drivers/scsi/pcmcia/nsp_cs.c +++ b/drivers/scsi/pcmcia/nsp_cs.c @@ -450,8 +450,6 @@ static int nsp_analyze_sdtr(struct scsi_cmnd *SCpnt) sync_data *sync = &(data->Sync[target]); struct nsp_sync_table *sync_table; unsigned int period, offset; - int i; - nsp_dbg(NSP_DEBUG_SYNC, "in"); @@ -466,7 +464,7 @@ static int nsp_analyze_sdtr(struct scsi_cmnd *SCpnt) sync_table = nsp_sync_table_40M; } - for ( i = 0; sync_table->max_period != 0; i++, sync_table++) { + for (; sync_table->max_period != 0; sync_table++) { if ( period >= sync_table->min_period && period <= sync_table->max_period ) { break; diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c index 628b08ba6770bff66f861aae9dc09c02a1a48b96..ec1a9ab6181466557ec3d70daba5b0f1f04cb5eb 100644 --- a/drivers/scsi/pm8001/pm8001_hwi.c +++ b/drivers/scsi/pm8001/pm8001_hwi.c @@ -1724,7 +1724,14 @@ void pm8001_work_fn(struct work_struct *work) pm8001_free_dev(pm8001_dev); } } - } break; + } + break; + case IO_XFER_ERROR_ABORTED_NCQ_MODE: + { + dev = pm8001_dev->sas_device; + sas_ata_device_link_abort(dev, false); + } + break; } kfree(pw); } @@ -1748,110 +1755,6 @@ int pm8001_handle_event(struct pm8001_hba_info *pm8001_ha, void *data, return ret; } -static void pm8001_send_abort_all(struct pm8001_hba_info *pm8001_ha, - struct pm8001_device *pm8001_ha_dev) -{ - struct pm8001_ccb_info *ccb; - struct sas_task *task; - struct task_abort_req task_abort; - u32 opc = OPC_INB_SATA_ABORT; - int ret; - - pm8001_ha_dev->id |= NCQ_ABORT_ALL_FLAG; - pm8001_ha_dev->id &= ~NCQ_READ_LOG_FLAG; - - task = sas_alloc_slow_task(GFP_ATOMIC); - if (!task) { - pm8001_dbg(pm8001_ha, FAIL, "cannot allocate task\n"); - return; - } - - task->task_done = pm8001_task_done; - - ccb = pm8001_ccb_alloc(pm8001_ha, pm8001_ha_dev, task); - if (!ccb) { - sas_free_task(task); - return; - } - - memset(&task_abort, 0, sizeof(task_abort)); - task_abort.abort_all = cpu_to_le32(1); - task_abort.device_id = cpu_to_le32(pm8001_ha_dev->device_id); - task_abort.tag = cpu_to_le32(ccb->ccb_tag); - - ret = pm8001_mpi_build_cmd(pm8001_ha, 0, opc, &task_abort, - sizeof(task_abort), 0); - if (ret) { - sas_free_task(task); - pm8001_ccb_free(pm8001_ha, ccb); - } -} - -static void pm8001_send_read_log(struct pm8001_hba_info *pm8001_ha, - struct pm8001_device *pm8001_ha_dev) -{ - struct sata_start_req sata_cmd; - int res; - struct pm8001_ccb_info *ccb; - struct sas_task *task = NULL; - struct host_to_dev_fis fis; - struct domain_device *dev; - u32 opc = OPC_INB_SATA_HOST_OPSTART; - - task = sas_alloc_slow_task(GFP_ATOMIC); - if (!task) { - pm8001_dbg(pm8001_ha, FAIL, "cannot allocate task !!!\n"); - return; - } - task->task_done = pm8001_task_done; - - /* - * Allocate domain device by ourselves as libsas is not going to - * provide any. - */ - dev = kzalloc(sizeof(struct domain_device), GFP_ATOMIC); - if (!dev) { - sas_free_task(task); - pm8001_dbg(pm8001_ha, FAIL, - "Domain device cannot be allocated\n"); - return; - } - task->dev = dev; - task->dev->lldd_dev = pm8001_ha_dev; - - ccb = pm8001_ccb_alloc(pm8001_ha, pm8001_ha_dev, task); - if (!ccb) { - sas_free_task(task); - kfree(dev); - return; - } - - pm8001_ha_dev->id |= NCQ_READ_LOG_FLAG; - pm8001_ha_dev->id |= NCQ_2ND_RLE_FLAG; - - /* construct read log FIS */ - memset(&fis, 0, sizeof(struct host_to_dev_fis)); - fis.fis_type = 0x27; - fis.flags = 0x80; - fis.command = ATA_CMD_READ_LOG_EXT; - fis.lbal = 0x10; - fis.sector_count = 0x1; - - memset(&sata_cmd, 0, sizeof(sata_cmd)); - sata_cmd.tag = cpu_to_le32(ccb->ccb_tag); - sata_cmd.device_id = cpu_to_le32(pm8001_ha_dev->device_id); - sata_cmd.ncqtag_atap_dir_m = cpu_to_le32((0x1 << 7) | (0x5 << 9)); - memcpy(&sata_cmd.sata_fis, &fis, sizeof(struct host_to_dev_fis)); - - res = pm8001_mpi_build_cmd(pm8001_ha, 0, opc, &sata_cmd, - sizeof(sata_cmd), 0); - if (res) { - sas_free_task(task); - pm8001_ccb_free(pm8001_ha, ccb); - kfree(dev); - } -} - /** * mpi_ssp_completion- process the event that FW response to the SSP request. * @pm8001_ha: our hba card information @@ -2295,12 +2198,13 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb) if (t->dev && (t->dev->lldd_dev)) pm8001_dev = t->dev->lldd_dev; } else { - pm8001_dbg(pm8001_ha, FAIL, "task null\n"); + pm8001_dbg(pm8001_ha, FAIL, "task null, freeing CCB tag %d\n", + ccb->ccb_tag); + pm8001_ccb_free(pm8001_ha, ccb); return; } - if ((pm8001_dev && !(pm8001_dev->id & NCQ_READ_LOG_FLAG)) - && unlikely(!t || !t->lldd_task || !t->dev)) { + if (pm8001_dev && unlikely(!t || !t->lldd_task || !t->dev)) { pm8001_dbg(pm8001_ha, FAIL, "task or dev null\n"); return; } @@ -2358,15 +2262,6 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb) if (param == 0) { ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_SAM_STAT_GOOD; - /* check if response is for SEND READ LOG */ - if (pm8001_dev && - (pm8001_dev->id & NCQ_READ_LOG_FLAG)) { - pm8001_send_abort_all(pm8001_ha, pm8001_dev); - /* Free the tag */ - pm8001_tag_free(pm8001_ha, tag); - sas_free_task(t); - return; - } } else { u8 len; ts->resp = SAS_TASK_COMPLETE; @@ -2664,9 +2559,10 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha, void *piomb) if (event == IO_XFER_ERROR_ABORTED_NCQ_MODE) { /* find device using device id */ pm8001_dev = pm8001_find_dev(pm8001_ha, dev_id); - /* send read log extension */ if (pm8001_dev) - pm8001_send_read_log(pm8001_ha, pm8001_dev); + pm8001_handle_event(pm8001_ha, + pm8001_dev, + IO_XFER_ERROR_ABORTED_NCQ_MODE); return; } @@ -2675,8 +2571,17 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha, void *piomb) pm8001_dev = ccb->device; if (event) pm8001_dbg(pm8001_ha, FAIL, "sata IO status 0x%x\n", event); - if (unlikely(!t || !t->lldd_task || !t->dev)) + + if (unlikely(!t)) { + pm8001_dbg(pm8001_ha, FAIL, "task null, freeing CCB tag %d\n", + ccb->ccb_tag); + pm8001_ccb_free(pm8001_ha, ccb); return; + } + + if (unlikely(!t->lldd_task || !t->dev)) + return; + ts = &t->task_status; pm8001_dbg(pm8001_ha, DEVIO, "port_id:0x%x, device_id:0x%x, tag:0x%x, event:0x%x\n", @@ -3638,12 +3543,7 @@ int pm8001_mpi_task_abort_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) pm8001_ccb_task_free(pm8001_ha, ccb); mb(); - if (pm8001_dev->id & NCQ_ABORT_ALL_FLAG) { - sas_free_task(t); - pm8001_dev->id &= ~NCQ_ABORT_ALL_FLAG; - } else { - t->task_done(t); - } + t->task_done(t); return 0; } @@ -4195,7 +4095,6 @@ static int pm8001_chip_sata_req(struct pm8001_hba_info *pm8001_ha, u64 phys_addr; u32 ATAP = 0x0; u32 dir; - unsigned long flags; u32 opc = OPC_INB_SATA_HOST_OPSTART; memset(&sata_cmd, 0, sizeof(sata_cmd)); @@ -4250,39 +4149,6 @@ static int pm8001_chip_sata_req(struct pm8001_hba_info *pm8001_ha, sata_cmd.esgl = 0; } - /* Check for read log for failed drive and return */ - if (sata_cmd.sata_fis.command == 0x2f) { - if (((pm8001_ha_dev->id & NCQ_READ_LOG_FLAG) || - (pm8001_ha_dev->id & NCQ_ABORT_ALL_FLAG) || - (pm8001_ha_dev->id & NCQ_2ND_RLE_FLAG))) { - struct task_status_struct *ts; - - pm8001_ha_dev->id &= 0xDFFFFFFF; - ts = &task->task_status; - - spin_lock_irqsave(&task->task_state_lock, flags); - ts->resp = SAS_TASK_COMPLETE; - ts->stat = SAS_SAM_STAT_GOOD; - task->task_state_flags &= ~SAS_TASK_STATE_PENDING; - task->task_state_flags |= SAS_TASK_STATE_DONE; - if (unlikely((task->task_state_flags & - SAS_TASK_STATE_ABORTED))) { - spin_unlock_irqrestore(&task->task_state_lock, - flags); - pm8001_dbg(pm8001_ha, FAIL, - "task 0x%p resp 0x%x stat 0x%x but aborted by upper layer\n", - task, ts->resp, - ts->stat); - pm8001_ccb_task_free(pm8001_ha, ccb); - } else { - spin_unlock_irqrestore(&task->task_state_lock, - flags); - pm8001_ccb_task_free_done(pm8001_ha, ccb); - return 0; - } - } - } - return pm8001_mpi_build_cmd(pm8001_ha, 0, opc, &sata_cmd, sizeof(sata_cmd), 0); } diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c index 7a7d63aa90e219d4d62e1a9fe7d43f48374386e1..7e589fe3e01020c210795911adce7fc14cd30aa3 100644 --- a/drivers/scsi/pm8001/pm8001_init.c +++ b/drivers/scsi/pm8001/pm8001_init.c @@ -197,7 +197,7 @@ static void pm8001_free(struct pm8001_hba_info *pm8001_ha) } PM8001_CHIP_DISP->chip_iounmap(pm8001_ha); flush_workqueue(pm8001_wq); - bitmap_free(pm8001_ha->tags); + bitmap_free(pm8001_ha->rsvd_tags); kfree(pm8001_ha); } @@ -437,8 +437,6 @@ static int pm8001_alloc(struct pm8001_hba_info *pm8001_ha, atomic_set(&pm8001_ha->devices[i].running_req, 0); } pm8001_ha->flags = PM8001F_INIT_TIME; - /* Initialize tags */ - pm8001_tag_init(pm8001_ha); return 0; err_out_nodev: @@ -1211,18 +1209,15 @@ static int pm8001_init_ccb_tag(struct pm8001_hba_info *pm8001_ha) struct Scsi_Host *shost = pm8001_ha->shost; struct device *dev = pm8001_ha->dev; u32 max_out_io, ccb_count; - u32 can_queue; int i; max_out_io = pm8001_ha->main_cfg_tbl.pm80xx_tbl.max_out_io; ccb_count = min_t(int, PM8001_MAX_CCB, max_out_io); - /* Update to the scsi host*/ - can_queue = ccb_count - PM8001_RESERVE_SLOT; - shost->can_queue = can_queue; + shost->can_queue = ccb_count - PM8001_RESERVE_SLOT; - pm8001_ha->tags = bitmap_zalloc(ccb_count, GFP_KERNEL); - if (!pm8001_ha->tags) + pm8001_ha->rsvd_tags = bitmap_zalloc(PM8001_RESERVE_SLOT, GFP_KERNEL); + if (!pm8001_ha->rsvd_tags) goto err_out; /* Memory region for ccb_info*/ @@ -1247,7 +1242,6 @@ static int pm8001_init_ccb_tag(struct pm8001_hba_info *pm8001_ha) pm8001_ha->ccb_info[i].task = NULL; pm8001_ha->ccb_info[i].ccb_tag = PM8001_INVALID_TAG; pm8001_ha->ccb_info[i].device = NULL; - ++pm8001_ha->tags_num; } return 0; diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c index 8e3f2f9ddaacd037cb34b5181179fccd7bbdaaff..e5673c774f66d7e3be116063fe4a8b31bf55be66 100644 --- a/drivers/scsi/pm8001/pm8001_sas.c +++ b/drivers/scsi/pm8001/pm8001_sas.c @@ -65,9 +65,12 @@ static int pm8001_find_tag(struct sas_task *task, u32 *tag) */ void pm8001_tag_free(struct pm8001_hba_info *pm8001_ha, u32 tag) { - void *bitmap = pm8001_ha->tags; + void *bitmap = pm8001_ha->rsvd_tags; unsigned long flags; + if (tag >= PM8001_RESERVE_SLOT) + return; + spin_lock_irqsave(&pm8001_ha->bitmap_lock, flags); __clear_bit(tag, bitmap); spin_unlock_irqrestore(&pm8001_ha->bitmap_lock, flags); @@ -80,29 +83,24 @@ void pm8001_tag_free(struct pm8001_hba_info *pm8001_ha, u32 tag) */ int pm8001_tag_alloc(struct pm8001_hba_info *pm8001_ha, u32 *tag_out) { - void *bitmap = pm8001_ha->tags; + void *bitmap = pm8001_ha->rsvd_tags; unsigned long flags; unsigned int tag; spin_lock_irqsave(&pm8001_ha->bitmap_lock, flags); - tag = find_first_zero_bit(bitmap, pm8001_ha->tags_num); - if (tag >= pm8001_ha->tags_num) { + tag = find_first_zero_bit(bitmap, PM8001_RESERVE_SLOT); + if (tag >= PM8001_RESERVE_SLOT) { spin_unlock_irqrestore(&pm8001_ha->bitmap_lock, flags); return -SAS_QUEUE_FULL; } __set_bit(tag, bitmap); spin_unlock_irqrestore(&pm8001_ha->bitmap_lock, flags); + + /* reserved tags are in the lower region of the tagset */ *tag_out = tag; return 0; } -void pm8001_tag_init(struct pm8001_hba_info *pm8001_ha) -{ - int i; - for (i = 0; i < pm8001_ha->tags_num; ++i) - pm8001_tag_free(pm8001_ha, i); -} - /** * pm8001_mem_alloc - allocate memory for pm8001. * @pdev: pci device. @@ -645,22 +643,16 @@ static int pm8001_dev_found_notify(struct domain_device *dev) pm8001_device->dcompletion = &completion; if (parent_dev && dev_is_expander(parent_dev->dev_type)) { int phy_id; - struct ex_phy *phy; - for (phy_id = 0; phy_id < parent_dev->ex_dev.num_phys; - phy_id++) { - phy = &parent_dev->ex_dev.ex_phy[phy_id]; - if (SAS_ADDR(phy->attached_sas_addr) - == SAS_ADDR(dev->sas_addr)) { - pm8001_device->attached_phy = phy_id; - break; - } - } - if (phy_id == parent_dev->ex_dev.num_phys) { + + phy_id = sas_find_attached_phy_id(&parent_dev->ex_dev, dev); + if (phy_id < 0) { pm8001_dbg(pm8001_ha, FAIL, "Error: no attached dev:%016llx at ex:%016llx.\n", SAS_ADDR(dev->sas_addr), SAS_ADDR(parent_dev->sas_addr)); - res = -1; + res = phy_id; + } else { + pm8001_device->attached_phy = phy_id; } } else { if (dev->dev_type == SAS_SATA_DEV) { @@ -687,12 +679,6 @@ int pm8001_dev_found(struct domain_device *dev) return pm8001_dev_found_notify(dev); } -void pm8001_task_done(struct sas_task *task) -{ - del_timer(&task->slow_task->timer); - complete(&task->slow_task->completion); -} - #define PM8001_TASK_TIMEOUT 20 /** @@ -983,6 +969,7 @@ int pm8001_query_task(struct sas_task *task) /* mandatory SAM-3, still need free task/ccb info, abort the specified task */ int pm8001_abort_task(struct sas_task *task) { + struct pm8001_ccb_info *ccb = task->lldd_task; unsigned long flags; u32 tag; struct domain_device *dev ; @@ -992,7 +979,7 @@ int pm8001_abort_task(struct sas_task *task) u32 phy_id, port_id; struct sas_task_slow slow_task; - if (unlikely(!task || !task->lldd_task || !task->dev)) + if (!task->lldd_task || !task->dev) return TMF_RESP_FUNC_FAILED; dev = task->dev; @@ -1113,6 +1100,13 @@ int pm8001_abort_task(struct sas_task *task) pm8001_dev, DS_OPERATIONAL); wait_for_completion(&completion); } else { + /* + * Ensure that if we see a completion for the ccb + * associated with the task which we are trying to + * abort then we should not touch the sas_task as it + * may race with libsas freeing it when return here. + */ + ccb->task = NULL; ret = sas_execute_internal_abort_single(dev, tag, 0, NULL); } rc = TMF_RESP_FUNC_COMPLETE; diff --git a/drivers/scsi/pm8001/pm8001_sas.h b/drivers/scsi/pm8001/pm8001_sas.h index b08f52673889c682ec5fb003797fc0c5d1b4af4b..dc1f4d958e03d6dc09ffe03c51fe165f5b7fc19f 100644 --- a/drivers/scsi/pm8001/pm8001_sas.h +++ b/drivers/scsi/pm8001/pm8001_sas.h @@ -510,8 +510,7 @@ struct pm8001_hba_info { u32 chip_id; const struct pm8001_chip_info *chip; struct completion *nvmd_completion; - int tags_num; - unsigned long *tags; + unsigned long *rsvd_tags; struct pm8001_phy phy[PM8001_MAX_PHYS]; struct pm8001_port port[PM8001_MAX_PHYS]; u32 id; @@ -535,7 +534,6 @@ struct pm8001_hba_info { bool controller_fatal_error; const struct firmware *fw_image; struct isr_param irq_vector[PM8001_MAX_MSIX_VEC]; - u32 reset_in_progress; u32 non_fatal_count; u32 non_fatal_read_length; u32 max_q_num; @@ -579,10 +577,6 @@ struct pm8001_fw_image_header { #define FLASH_UPDATE_DNLD_NOT_SUPPORTED 0x10 #define FLASH_UPDATE_DISABLED 0x11 -#define NCQ_READ_LOG_FLAG 0x80000000 -#define NCQ_ABORT_ALL_FLAG 0x40000000 -#define NCQ_2ND_RLE_FLAG 0x20000000 - /* Device states */ #define DS_OPERATIONAL 0x01 #define DS_PORT_IN_RESET 0x02 @@ -636,7 +630,6 @@ extern struct workqueue_struct *pm8001_wq; /******************** function prototype *********************/ int pm8001_tag_alloc(struct pm8001_hba_info *pm8001_ha, u32 *tag_out); -void pm8001_tag_init(struct pm8001_hba_info *pm8001_ha); u32 pm8001_get_ncq_tag(struct sas_task *task, u32 *tag); void pm8001_ccb_task_free(struct pm8001_hba_info *pm8001_ha, struct pm8001_ccb_info *ccb); @@ -709,7 +702,6 @@ int pm8001_mpi_fw_flash_update_resp(struct pm8001_hba_info *pm8001_ha, int pm8001_mpi_general_event(struct pm8001_hba_info *pm8001_ha, void *piomb); int pm8001_mpi_task_abort_resp(struct pm8001_hba_info *pm8001_ha, void *piomb); struct sas_task *pm8001_alloc_task(void); -void pm8001_task_done(struct sas_task *task); void pm8001_free_task(struct sas_task *task); void pm8001_tag_free(struct pm8001_hba_info *pm8001_ha, u32 tag); struct pm8001_device *pm8001_find_dev(struct pm8001_hba_info *pm8001_ha, @@ -742,9 +734,15 @@ pm8001_ccb_alloc(struct pm8001_hba_info *pm8001_ha, struct pm8001_device *dev, struct sas_task *task) { struct pm8001_ccb_info *ccb; + struct request *rq = NULL; u32 tag; - if (pm8001_tag_alloc(pm8001_ha, &tag)) { + if (task) + rq = sas_task_find_rq(task); + + if (rq) { + tag = rq->tag + PM8001_RESERVE_SLOT; + } else if (pm8001_tag_alloc(pm8001_ha, &tag)) { pm8001_dbg(pm8001_ha, FAIL, "Failed to allocate a tag\n"); return NULL; } diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c index f8b8624458f73f9b56bb17aafaa67a0aa1558ff6..9584cadc420137940c9033d115c89457c6365bfc 100644 --- a/drivers/scsi/pm8001/pm80xx_hwi.c +++ b/drivers/scsi/pm8001/pm80xx_hwi.c @@ -1778,113 +1778,6 @@ pm80xx_chip_interrupt_disable(struct pm8001_hba_info *pm8001_ha, u8 vec) pm80xx_chip_intx_interrupt_disable(pm8001_ha); } -static void pm80xx_send_abort_all(struct pm8001_hba_info *pm8001_ha, - struct pm8001_device *pm8001_ha_dev) -{ - struct pm8001_ccb_info *ccb; - struct sas_task *task; - struct task_abort_req task_abort; - u32 opc = OPC_INB_SATA_ABORT; - int ret; - - pm8001_ha_dev->id |= NCQ_ABORT_ALL_FLAG; - pm8001_ha_dev->id &= ~NCQ_READ_LOG_FLAG; - - task = sas_alloc_slow_task(GFP_ATOMIC); - if (!task) { - pm8001_dbg(pm8001_ha, FAIL, "cannot allocate task\n"); - return; - } - task->task_done = pm8001_task_done; - - ccb = pm8001_ccb_alloc(pm8001_ha, pm8001_ha_dev, task); - if (!ccb) { - sas_free_task(task); - return; - } - - memset(&task_abort, 0, sizeof(task_abort)); - task_abort.abort_all = cpu_to_le32(1); - task_abort.device_id = cpu_to_le32(pm8001_ha_dev->device_id); - task_abort.tag = cpu_to_le32(ccb->ccb_tag); - - ret = pm8001_mpi_build_cmd(pm8001_ha, 0, opc, &task_abort, - sizeof(task_abort), 0); - pm8001_dbg(pm8001_ha, FAIL, "Executing abort task end\n"); - if (ret) { - sas_free_task(task); - pm8001_ccb_free(pm8001_ha, ccb); - } -} - -static void pm80xx_send_read_log(struct pm8001_hba_info *pm8001_ha, - struct pm8001_device *pm8001_ha_dev) -{ - struct sata_start_req sata_cmd; - int res; - struct pm8001_ccb_info *ccb; - struct sas_task *task = NULL; - struct host_to_dev_fis fis; - struct domain_device *dev; - u32 opc = OPC_INB_SATA_HOST_OPSTART; - - task = sas_alloc_slow_task(GFP_ATOMIC); - if (!task) { - pm8001_dbg(pm8001_ha, FAIL, "cannot allocate task !!!\n"); - return; - } - task->task_done = pm8001_task_done; - - /* - * Allocate domain device by ourselves as libsas is not going to - * provide any. - */ - dev = kzalloc(sizeof(struct domain_device), GFP_ATOMIC); - if (!dev) { - sas_free_task(task); - pm8001_dbg(pm8001_ha, FAIL, - "Domain device cannot be allocated\n"); - return; - } - - task->dev = dev; - task->dev->lldd_dev = pm8001_ha_dev; - - ccb = pm8001_ccb_alloc(pm8001_ha, pm8001_ha_dev, task); - if (!ccb) { - sas_free_task(task); - kfree(dev); - return; - } - - pm8001_ha_dev->id |= NCQ_READ_LOG_FLAG; - pm8001_ha_dev->id |= NCQ_2ND_RLE_FLAG; - - memset(&sata_cmd, 0, sizeof(sata_cmd)); - - /* construct read log FIS */ - memset(&fis, 0, sizeof(struct host_to_dev_fis)); - fis.fis_type = 0x27; - fis.flags = 0x80; - fis.command = ATA_CMD_READ_LOG_EXT; - fis.lbal = 0x10; - fis.sector_count = 0x1; - - sata_cmd.tag = cpu_to_le32(ccb->ccb_tag); - sata_cmd.device_id = cpu_to_le32(pm8001_ha_dev->device_id); - sata_cmd.ncqtag_atap_dir_m_dad = cpu_to_le32(((0x1 << 7) | (0x5 << 9))); - memcpy(&sata_cmd.sata_fis, &fis, sizeof(struct host_to_dev_fis)); - - res = pm8001_mpi_build_cmd(pm8001_ha, 0, opc, &sata_cmd, - sizeof(sata_cmd), 0); - pm8001_dbg(pm8001_ha, FAIL, "Executing read log end\n"); - if (res) { - sas_free_task(task); - pm8001_ccb_free(pm8001_ha, ccb); - kfree(dev); - } -} - /** * mpi_ssp_completion - process the event that FW response to the SSP request. * @pm8001_ha: our hba card information @@ -2396,15 +2289,15 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, if (t->dev && (t->dev->lldd_dev)) pm8001_dev = t->dev->lldd_dev; } else { - pm8001_dbg(pm8001_ha, FAIL, "task null\n"); + pm8001_dbg(pm8001_ha, FAIL, "task null, freeing CCB tag %d\n", + ccb->ccb_tag); + pm8001_ccb_free(pm8001_ha, ccb); return; } - if ((pm8001_dev && !(pm8001_dev->id & NCQ_READ_LOG_FLAG)) - && unlikely(!t || !t->lldd_task || !t->dev)) { - pm8001_dbg(pm8001_ha, FAIL, "task or dev null\n"); + + if (pm8001_dev && unlikely(!t->lldd_task || !t->dev)) return; - } ts = &t->task_status; @@ -2461,15 +2354,6 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, if (param == 0) { ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_SAM_STAT_GOOD; - /* check if response is for SEND READ LOG */ - if (pm8001_dev && - (pm8001_dev->id & NCQ_READ_LOG_FLAG)) { - pm80xx_send_abort_all(pm8001_ha, pm8001_dev); - /* Free the tag */ - pm8001_tag_free(pm8001_ha, tag); - sas_free_task(t); - return; - } } else { u8 len; ts->resp = SAS_TASK_COMPLETE; @@ -2804,21 +2688,27 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha, if (event == IO_XFER_ERROR_ABORTED_NCQ_MODE) { /* find device using device id */ pm8001_dev = pm8001_find_dev(pm8001_ha, dev_id); - /* send read log extension */ + /* send read log extension by aborting the link - libata does what we want */ if (pm8001_dev) - pm80xx_send_read_log(pm8001_ha, pm8001_dev); + pm8001_handle_event(pm8001_ha, + pm8001_dev, + IO_XFER_ERROR_ABORTED_NCQ_MODE); return; } ccb = &pm8001_ha->ccb_info[tag]; t = ccb->task; pm8001_dev = ccb->device; - - if (unlikely(!t || !t->lldd_task || !t->dev)) { - pm8001_dbg(pm8001_ha, FAIL, "task or dev null\n"); + if (unlikely(!t)) { + pm8001_dbg(pm8001_ha, FAIL, "task null, freeing CCB tag %d\n", + ccb->ccb_tag); + pm8001_ccb_free(pm8001_ha, ccb); return; } + if (unlikely(!t->lldd_task || !t->dev)) + return; + ts = &t->task_status; pm8001_dbg(pm8001_ha, IOERR, "port_id:0x%x, tag:0x%x, event:0x%x\n", port_id, tag, event); @@ -3550,10 +3440,6 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb) case HW_EVENT_PHY_DOWN: pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_PHY_DOWN\n"); hw_event_phy_down(pm8001_ha, piomb); - if (pm8001_ha->reset_in_progress) { - pm8001_dbg(pm8001_ha, MSG, "Reset in progress\n"); - return 0; - } phy->phy_attached = 0; phy->phy_state = PHY_LINK_DISABLE; break; @@ -4357,25 +4243,12 @@ static int check_enc_sat_cmd(struct sas_task *task) static u32 pm80xx_chip_get_q_index(struct sas_task *task) { - struct scsi_cmnd *scmd = NULL; - u32 blk_tag; - - if (task->uldd_task) { - struct ata_queued_cmd *qc; + struct request *rq = sas_task_find_rq(task); - if (dev_is_sata(task->dev)) { - qc = task->uldd_task; - scmd = qc->scsicmd; - } else { - scmd = task->uldd_task; - } - } - - if (!scmd) + if (!rq) return 0; - blk_tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd)); - return blk_mq_unique_tag_to_hwq(blk_tag); + return blk_mq_unique_tag_to_hwq(blk_mq_unique_tag(rq)); } /** @@ -4550,7 +4423,6 @@ static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha, u32 end_addr_high, end_addr_low; u32 ATAP = 0x0; u32 dir; - unsigned long flags; u32 opc = OPC_INB_SATA_HOST_OPSTART; memset(&sata_cmd, 0, sizeof(sata_cmd)); @@ -4729,40 +4601,6 @@ static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha, (task->ata_task.atapi_packet[15] << 24))); } - /* Check for read log for failed drive and return */ - if (sata_cmd.sata_fis.command == 0x2f) { - if (pm8001_ha_dev && ((pm8001_ha_dev->id & NCQ_READ_LOG_FLAG) || - (pm8001_ha_dev->id & NCQ_ABORT_ALL_FLAG) || - (pm8001_ha_dev->id & NCQ_2ND_RLE_FLAG))) { - struct task_status_struct *ts; - - pm8001_ha_dev->id &= 0xDFFFFFFF; - ts = &task->task_status; - - spin_lock_irqsave(&task->task_state_lock, flags); - ts->resp = SAS_TASK_COMPLETE; - ts->stat = SAS_SAM_STAT_GOOD; - task->task_state_flags &= ~SAS_TASK_STATE_PENDING; - task->task_state_flags |= SAS_TASK_STATE_DONE; - if (unlikely((task->task_state_flags & - SAS_TASK_STATE_ABORTED))) { - spin_unlock_irqrestore(&task->task_state_lock, - flags); - pm8001_dbg(pm8001_ha, FAIL, - "task 0x%p resp 0x%x stat 0x%x but aborted by upper layer\n", - task, ts->resp, - ts->stat); - pm8001_ccb_task_free(pm8001_ha, ccb); - return 0; - } else { - spin_unlock_irqrestore(&task->task_state_lock, - flags); - pm8001_ccb_task_free_done(pm8001_ha, ccb); - atomic_dec(&pm8001_ha_dev->running_req); - return 0; - } - } - } trace_pm80xx_request_issue(pm8001_ha->id, ccb->device ? ccb->device->attached_phy : PM8001_MAX_PHYS, ccb->ccb_tag, opc, diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c index e045c6e25090281d31da505aac7f06f66b19d193..35e16600fc637defe2a3f67ef838d4f0e12bb559 100644 --- a/drivers/scsi/qedf/qedf_main.c +++ b/drivers/scsi/qedf/qedf_main.c @@ -2951,7 +2951,6 @@ static int qedf_alloc_bdq(struct qedf_ctx *qedf) int i; struct scsi_bd *pbl; u64 *list; - dma_addr_t page; /* Alloc dma memory for BDQ buffers */ for (i = 0; i < QEDF_BDQ_SIZE; i++) { @@ -3012,11 +3011,9 @@ static int qedf_alloc_bdq(struct qedf_ctx *qedf) qedf->bdq_pbl_list_num_entries = qedf->bdq_pbl_mem_size / QEDF_PAGE_SIZE; list = (u64 *)qedf->bdq_pbl_list; - page = qedf->bdq_pbl_list_dma; for (i = 0; i < qedf->bdq_pbl_list_num_entries; i++) { *list = qedf->bdq_pbl_dma; list++; - page += QEDF_PAGE_SIZE; } return 0; diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index 802eec6407d9accf0a9b480487550e8be97940bd..a26a373be9da302290c36fed9fb26f5881459769 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -5136,17 +5136,17 @@ struct secure_flash_update_block_pk { (test_bit(ISP_ABORT_NEEDED, &ha->dpc_flags) || \ test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags)) -#define QLA_VHA_MARK_BUSY(__vha, __bail) do { \ - atomic_inc(&__vha->vref_count); \ - mb(); \ - if (__vha->flags.delete_progress) { \ - atomic_dec(&__vha->vref_count); \ - wake_up(&__vha->vref_waitq); \ - __bail = 1; \ - } else { \ - __bail = 0; \ - } \ -} while (0) +static inline bool qla_vha_mark_busy(scsi_qla_host_t *vha) +{ + atomic_inc(&vha->vref_count); + mb(); + if (vha->flags.delete_progress) { + atomic_dec(&vha->vref_count); + wake_up(&vha->vref_waitq); + return true; + } + return false; +} #define QLA_VHA_MARK_NOT_BUSY(__vha) do { \ atomic_dec(&__vha->vref_count); \ diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index e12db95de6883f351bed957f8ce0b35f871ba7d6..ce4c5d728407db70a4a33e0249454d2dde436dde 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -168,7 +168,6 @@ int qla24xx_async_abort_cmd(srb_t *cmd_sp, bool wait) struct srb_iocb *abt_iocb; srb_t *sp; int rval = QLA_FUNCTION_FAILED; - uint8_t bail; /* ref: INIT for ABTS command */ sp = qla2xxx_get_qpair_sp(cmd_sp->vha, cmd_sp->qpair, cmd_sp->fcport, @@ -176,7 +175,7 @@ int qla24xx_async_abort_cmd(srb_t *cmd_sp, bool wait) if (!sp) return QLA_MEMORY_ALLOC_FAILED; - QLA_VHA_MARK_BUSY(vha, bail); + qla_vha_mark_busy(vha); abt_iocb = &sp->u.iocb_cmd; sp->type = SRB_ABT_CMD; sp->name = "abort"; @@ -2020,14 +2019,13 @@ qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun, struct srb_iocb *tm_iocb; srb_t *sp; int rval = QLA_FUNCTION_FAILED; - uint8_t bail; /* ref: INIT */ sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); if (!sp) goto done; - QLA_VHA_MARK_BUSY(vha, bail); + qla_vha_mark_busy(vha); sp->type = SRB_TM_CMD; sp->name = "tmf"; qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha), @@ -5546,7 +5544,6 @@ static int qla2x00_configure_local_loop(scsi_qla_host_t *vha) { int rval, rval2; - int found_devs; int found; fc_port_t *fcport, *new_fcport; uint16_t index; @@ -5561,7 +5558,6 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha) if (N2N_TOPO(ha)) return qla2x00_configure_n2n_loop(vha); - found_devs = 0; new_fcport = NULL; entries = MAX_FIBRE_DEVICES_LOOP; @@ -5720,8 +5716,6 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha) /* Base iIDMA settings on HBA port speed. */ fcport->fp_speed = ha->link_data_rate; - - found_devs++; } list_for_each_entry(fcport, &vha->vp_fcports, list) { diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h index db17f7f410cddeaf23d899ecea93047f8743d053..5185dc5daf80d47cbf279faf95ce0e0a777603f8 100644 --- a/drivers/scsi/qla2xxx/qla_inline.h +++ b/drivers/scsi/qla2xxx/qla_inline.h @@ -225,11 +225,9 @@ static inline srb_t * qla2x00_get_sp(scsi_qla_host_t *vha, fc_port_t *fcport, gfp_t flag) { srb_t *sp = NULL; - uint8_t bail; struct qla_qpair *qpair; - QLA_VHA_MARK_BUSY(vha, bail); - if (unlikely(bail)) + if (unlikely(qla_vha_mark_busy(vha))) return NULL; qpair = vha->hw->base_qpair; diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 2c85f3cce7264d82f3c18ccebeec5eac09656226..7fb28c207ee50ec325e8bbd85954ba797d5691cc 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -3284,7 +3284,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) host->max_cmd_len, host->max_channel, host->max_lun, host->transportt, sht->vendor_id); - INIT_WORK(&base_vha->iocb_work, qla2x00_iocb_work_fn); INIT_WORK(&ha->heartbeat_work, qla_heartbeat_work_fn); /* Set up the irqs */ @@ -5069,13 +5068,11 @@ struct qla_work_evt * qla2x00_alloc_work(struct scsi_qla_host *vha, enum qla_work_type type) { struct qla_work_evt *e; - uint8_t bail; if (test_bit(UNLOADING, &vha->dpc_flags)) return NULL; - QLA_VHA_MARK_BUSY(vha, bail); - if (bail) + if (qla_vha_mark_busy(vha)) return NULL; e = kzalloc(sizeof(struct qla_work_evt), GFP_ATOMIC); diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index bb754a95080231b95eb226b3efe7bcfc4b1d1beb..548f22705ddc8baff5dc6af2c220fdac001ea514 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -6741,6 +6741,9 @@ qlt_vport_create(struct scsi_qla_host *vha, struct qla_hw_data *ha) mutex_init(&vha->vha_tgt.tgt_mutex); mutex_init(&vha->vha_tgt.tgt_host_action_mutex); + INIT_LIST_HEAD(&vha->unknown_atio_list); + INIT_DELAYED_WORK(&vha->unknown_atio_work, qlt_unknown_atio_work_fn); + qlt_clear_mode(vha); /* diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c index 9e849f6b0d0f7df2c0685f1b12e2f63b36596cd7..005502125b270bf0468c47d72935de07860dce3c 100644 --- a/drivers/scsi/qla4xxx/ql4_os.c +++ b/drivers/scsi/qla4xxx/ql4_os.c @@ -116,7 +116,7 @@ static int qla4xxx_iface_set_param(struct Scsi_Host *shost, void *data, static int qla4xxx_get_iface_param(struct iscsi_iface *iface, enum iscsi_param_type param_type, int param, char *buf); -static enum blk_eh_timer_return qla4xxx_eh_cmd_timed_out(struct scsi_cmnd *sc); +static enum scsi_timeout_action qla4xxx_eh_cmd_timed_out(struct scsi_cmnd *sc); static struct iscsi_endpoint *qla4xxx_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr, int non_blocking); @@ -1871,17 +1871,17 @@ exit_get_stats: return; } -static enum blk_eh_timer_return qla4xxx_eh_cmd_timed_out(struct scsi_cmnd *sc) +static enum scsi_timeout_action qla4xxx_eh_cmd_timed_out(struct scsi_cmnd *sc) { struct iscsi_cls_session *session; unsigned long flags; - enum blk_eh_timer_return ret = BLK_EH_DONE; + enum scsi_timeout_action ret = SCSI_EH_NOT_HANDLED; session = starget_to_session(scsi_target(sc->device)); spin_lock_irqsave(&session->lock, flags); if (session->state == ISCSI_SESSION_FAILED) - ret = BLK_EH_RESET_TIMER; + ret = SCSI_EH_RESET_TIMER; spin_unlock_irqrestore(&session->lock, flags); return ret; diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c index c59eac7a32f2a087f0491b9d594a9e00f7950d47..1426b9b03612f9628e6e85af0484675ab8147f3a 100644 --- a/drivers/scsi/scsi.c +++ b/drivers/scsi/scsi.c @@ -563,14 +563,14 @@ int scsi_device_get(struct scsi_device *sdev) { if (sdev->sdev_state == SDEV_DEL || sdev->sdev_state == SDEV_CANCEL) goto fail; - if (!get_device(&sdev->sdev_gendev)) - goto fail; if (!try_module_get(sdev->host->hostt->module)) - goto fail_put_device; + goto fail; + if (!get_device(&sdev->sdev_gendev)) + goto fail_put_module; return 0; -fail_put_device: - put_device(&sdev->sdev_gendev); +fail_put_module: + module_put(sdev->host->hostt->module); fail: return -ENXIO; } @@ -588,6 +588,8 @@ void scsi_device_put(struct scsi_device *sdev) { struct module *mod = sdev->host->hostt->module; + might_sleep(); + put_device(&sdev->sdev_gendev); module_put(mod); } diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c index a0797101a8a05ad3352d861e246b3d8adb4843b1..cc6953809a248a2b8def1e71b567fe7f643e2244 100644 --- a/drivers/scsi/scsi_debug.c +++ b/drivers/scsi/scsi_debug.c @@ -3785,7 +3785,7 @@ static int resp_write_scat(struct scsi_cmnd *scp, mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0); return illegal_condition_result; } - lrdp = kzalloc(lbdof_blen, GFP_ATOMIC); + lrdp = kzalloc(lbdof_blen, GFP_ATOMIC | __GFP_NOWARN); if (lrdp == NULL) return SCSI_MLQUEUE_HOST_BUSY; if (sdebug_verbose) @@ -4436,7 +4436,7 @@ static int resp_verify(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) if (ret) return ret; - arr = kcalloc(lb_size, vnum, GFP_ATOMIC); + arr = kcalloc(lb_size, vnum, GFP_ATOMIC | __GFP_NOWARN); if (!arr) { mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC, INSUFF_RES_ASCQ); @@ -4504,7 +4504,7 @@ static int resp_report_zones(struct scsi_cmnd *scp, rep_max_zones = (alloc_len - 64) >> ilog2(RZONES_DESC_HD); - arr = kzalloc(alloc_len, GFP_ATOMIC); + arr = kzalloc(alloc_len, GFP_ATOMIC | __GFP_NOWARN); if (!arr) { mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC, INSUFF_RES_ASCQ); @@ -7340,7 +7340,10 @@ clean: kfree(sdbg_devinfo->zstate); kfree(sdbg_devinfo); } - kfree(sdbg_host); + if (sdbg_host->dev.release) + put_device(&sdbg_host->dev); + else + kfree(sdbg_host); pr_warn("%s: failed, errno=%d\n", __func__, -error); return error; } diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c index ac5ff0783b4f046b5a63dccf7d50a68c60d964f5..a7960ad2d386a466719e5548a70e7f34b18db5f3 100644 --- a/drivers/scsi/scsi_error.c +++ b/drivers/scsi/scsi_error.c @@ -328,7 +328,6 @@ void scsi_eh_scmd_add(struct scsi_cmnd *scmd) enum blk_eh_timer_return scsi_timeout(struct request *req) { struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(req); - enum blk_eh_timer_return rtn = BLK_EH_DONE; struct Scsi_Host *host = scmd->device->host; trace_scsi_dispatch_cmd_timeout(scmd); @@ -338,31 +337,30 @@ enum blk_eh_timer_return scsi_timeout(struct request *req) if (host->eh_deadline != -1 && !host->last_reset) host->last_reset = jiffies; - if (host->hostt->eh_timed_out) - rtn = host->hostt->eh_timed_out(scmd); - - if (rtn == BLK_EH_DONE) { - /* - * Set the command to complete first in order to prevent a real - * completion from releasing the command while error handling - * is using it. If the command was already completed, then the - * lower level driver beat the timeout handler, and it is safe - * to return without escalating error recovery. - * - * If timeout handling lost the race to a real completion, the - * block layer may ignore that due to a fake timeout injection, - * so return RESET_TIMER to allow error handling another shot - * at this command. - */ - if (test_and_set_bit(SCMD_STATE_COMPLETE, &scmd->state)) + if (host->hostt->eh_timed_out) { + switch (host->hostt->eh_timed_out(scmd)) { + case SCSI_EH_DONE: + return BLK_EH_DONE; + case SCSI_EH_RESET_TIMER: return BLK_EH_RESET_TIMER; - if (scsi_abort_command(scmd) != SUCCESS) { - set_host_byte(scmd, DID_TIME_OUT); - scsi_eh_scmd_add(scmd); + case SCSI_EH_NOT_HANDLED: + break; } } - return rtn; + /* + * If scsi_done() has already set SCMD_STATE_COMPLETE, do not modify + * *scmd. + */ + if (test_and_set_bit(SCMD_STATE_COMPLETE, &scmd->state)) + return BLK_EH_DONE; + atomic_inc(&scmd->device->iodone_cnt); + if (scsi_abort_command(scmd) != SUCCESS) { + set_host_byte(scmd, DID_TIME_OUT); + scsi_eh_scmd_add(scmd); + } + + return BLK_EH_DONE; } /** diff --git a/drivers/scsi/scsi_ioctl.c b/drivers/scsi/scsi_ioctl.c index 2d20da55fb64086ebc66d4702b37ac28c7cc1bea..fdd47565a3115daa33d0d1929108642e83f431c1 100644 --- a/drivers/scsi/scsi_ioctl.c +++ b/drivers/scsi/scsi_ioctl.c @@ -519,7 +519,7 @@ static int sg_scsi_ioctl(struct request_queue *q, fmode_t mode, return -EFAULT; if (in_len > PAGE_SIZE || out_len > PAGE_SIZE) return -EINVAL; - if (get_user(opcode, sic->data)) + if (get_user(opcode, &sic->data[0])) return -EFAULT; bytes = max(in_len, out_len); diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 249757ddd8fea323e3869ba9bbad1947a8cb5d99..9ed1ebcb7443c3b5bec7d474c6f37e5873f5e7cd 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -307,6 +307,18 @@ static void scsi_kick_queue(struct request_queue *q) blk_mq_run_hw_queues(q, false); } +/* + * Kick the queue of SCSI device @sdev if @sdev != current_sdev. Called with + * interrupts disabled. + */ +static void scsi_kick_sdev_queue(struct scsi_device *sdev, void *data) +{ + struct scsi_device *current_sdev = data; + + if (sdev != current_sdev) + blk_mq_run_hw_queues(sdev->request_queue, true); +} + /* * Called for single_lun devices on IO completion. Clear starget_sdev_user, * and call blk_run_queue for all the scsi_devices on the target - @@ -317,7 +329,6 @@ static void scsi_kick_queue(struct request_queue *q) static void scsi_single_lun_run(struct scsi_device *current_sdev) { struct Scsi_Host *shost = current_sdev->host; - struct scsi_device *sdev, *tmp; struct scsi_target *starget = scsi_target(current_sdev); unsigned long flags; @@ -334,22 +345,9 @@ static void scsi_single_lun_run(struct scsi_device *current_sdev) scsi_kick_queue(current_sdev->request_queue); spin_lock_irqsave(shost->host_lock, flags); - if (starget->starget_sdev_user) - goto out; - list_for_each_entry_safe(sdev, tmp, &starget->devices, - same_target_siblings) { - if (sdev == current_sdev) - continue; - if (scsi_device_get(sdev)) - continue; - - spin_unlock_irqrestore(shost->host_lock, flags); - scsi_kick_queue(sdev->request_queue); - spin_lock_irqsave(shost->host_lock, flags); - - scsi_device_put(sdev); - } - out: + if (!starget->starget_sdev_user) + __starget_for_each_device(starget, current_sdev, + scsi_kick_sdev_queue); spin_unlock_irqrestore(shost->host_lock, flags); } @@ -1343,9 +1341,6 @@ static inline int scsi_host_queue_ready(struct request_queue *q, struct scsi_device *sdev, struct scsi_cmnd *cmd) { - if (scsi_host_in_recovery(shost)) - return 0; - if (atomic_read(&shost->host_blocked) > 0) { if (scsi_host_busy(shost) > 0) goto starved; @@ -1469,8 +1464,6 @@ static int scsi_dispatch_cmd(struct scsi_cmnd *cmd) struct Scsi_Host *host = cmd->device->host; int rtn = 0; - atomic_inc(&cmd->device->iorequest_cnt); - /* check if the device is still usable */ if (unlikely(cmd->device->sdev_state == SDEV_DEL)) { /* in SDEV_DEL we error all commands. DID_NO_CONNECT @@ -1734,6 +1727,11 @@ static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx, ret = BLK_STS_RESOURCE; if (!scsi_target_queue_ready(shost, sdev)) goto out_put_budget; + if (unlikely(scsi_host_in_recovery(shost))) { + if (cmd->flags & SCMD_FAIL_IF_RECOVERING) + ret = BLK_STS_OFFLINE; + goto out_dec_target_busy; + } if (!scsi_host_queue_ready(q, shost, sdev, cmd)) goto out_dec_target_busy; @@ -1764,6 +1762,7 @@ static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx, goto out_dec_host_busy; } + atomic_inc(&cmd->device->iorequest_cnt); return BLK_STS_OK; out_dec_host_busy: diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h index c52de9a973e46622ef521f9418a1b4859c2df5a3..96284a0e13fea26d0d7d35255aa0b6b01585dfa0 100644 --- a/drivers/scsi/scsi_priv.h +++ b/drivers/scsi/scsi_priv.h @@ -111,14 +111,14 @@ extern void scsi_evt_thread(struct work_struct *work); /* scsi_proc.c */ #ifdef CONFIG_SCSI_PROC_FS -extern void scsi_proc_hostdir_add(struct scsi_host_template *); -extern void scsi_proc_hostdir_rm(struct scsi_host_template *); +extern int scsi_proc_hostdir_add(const struct scsi_host_template *); +extern void scsi_proc_hostdir_rm(const struct scsi_host_template *); extern void scsi_proc_host_add(struct Scsi_Host *); extern void scsi_proc_host_rm(struct Scsi_Host *); extern int scsi_init_procfs(void); extern void scsi_exit_procfs(void); #else -# define scsi_proc_hostdir_add(sht) do { } while (0) +# define scsi_proc_hostdir_add(sht) 0 # define scsi_proc_hostdir_rm(sht) do { } while (0) # define scsi_proc_host_add(shost) do { } while (0) # define scsi_proc_host_rm(shost) do { } while (0) diff --git a/drivers/scsi/scsi_proc.c b/drivers/scsi/scsi_proc.c index 95aee1ad1383408482fd4000893b8f141ee5dea5..4a6eb1741be0d2b56725bc7d5427cc59a325c49e 100644 --- a/drivers/scsi/scsi_proc.c +++ b/drivers/scsi/scsi_proc.c @@ -43,8 +43,23 @@ static struct proc_dir_entry *proc_scsi; -/* Protect sht->present and sht->proc_dir */ +/* Protects scsi_proc_list */ static DEFINE_MUTEX(global_host_template_mutex); +static LIST_HEAD(scsi_proc_list); + +/** + * struct scsi_proc_entry - (host template, SCSI proc dir) association + * @entry: entry in scsi_proc_list. + * @sht: SCSI host template associated with the procfs directory. + * @proc_dir: procfs directory associated with the SCSI host template. + * @present: Number of SCSI hosts instantiated for @sht. + */ +struct scsi_proc_entry { + struct list_head entry; + const struct scsi_host_template *sht; + struct proc_dir_entry *proc_dir; + unsigned int present; +}; static ssize_t proc_scsi_host_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) @@ -83,6 +98,45 @@ static int proc_scsi_host_open(struct inode *inode, struct file *file) 4 * PAGE_SIZE); } +static struct scsi_proc_entry * +__scsi_lookup_proc_entry(const struct scsi_host_template *sht) +{ + struct scsi_proc_entry *e; + + lockdep_assert_held(&global_host_template_mutex); + + list_for_each_entry(e, &scsi_proc_list, entry) + if (e->sht == sht) + return e; + + return NULL; +} + +static struct scsi_proc_entry * +scsi_lookup_proc_entry(const struct scsi_host_template *sht) +{ + struct scsi_proc_entry *e; + + mutex_lock(&global_host_template_mutex); + e = __scsi_lookup_proc_entry(sht); + mutex_unlock(&global_host_template_mutex); + + return e; +} + +/** + * scsi_template_proc_dir() - returns the procfs dir for a SCSI host template + * @sht: SCSI host template pointer. + */ +struct proc_dir_entry * +scsi_template_proc_dir(const struct scsi_host_template *sht) +{ + struct scsi_proc_entry *e = scsi_lookup_proc_entry(sht); + + return e ? e->proc_dir : NULL; +} +EXPORT_SYMBOL_GPL(scsi_template_proc_dir); + static const struct proc_ops proc_scsi_ops = { .proc_open = proc_scsi_host_open, .proc_release = single_release, @@ -97,35 +151,61 @@ static const struct proc_ops proc_scsi_ops = { * * Sets sht->proc_dir to the new directory. */ - -void scsi_proc_hostdir_add(struct scsi_host_template *sht) +int scsi_proc_hostdir_add(const struct scsi_host_template *sht) { + struct scsi_proc_entry *e; + int ret; + if (!sht->show_info) - return; + return 0; mutex_lock(&global_host_template_mutex); - if (!sht->present++) { - sht->proc_dir = proc_mkdir(sht->proc_name, proc_scsi); - if (!sht->proc_dir) - printk(KERN_ERR "%s: proc_mkdir failed for %s\n", - __func__, sht->proc_name); + e = __scsi_lookup_proc_entry(sht); + if (!e) { + e = kzalloc(sizeof(*e), GFP_KERNEL); + if (!e) { + ret = -ENOMEM; + goto unlock; + } } + if (e->present++) + goto success; + e->proc_dir = proc_mkdir(sht->proc_name, proc_scsi); + if (!e->proc_dir) { + printk(KERN_ERR "%s: proc_mkdir failed for %s\n", __func__, + sht->proc_name); + ret = -ENOMEM; + goto unlock; + } + e->sht = sht; + list_add_tail(&e->entry, &scsi_proc_list); +success: + e = NULL; + ret = 0; +unlock: mutex_unlock(&global_host_template_mutex); + + kfree(e); + return ret; } /** * scsi_proc_hostdir_rm - remove directory in /proc for a scsi host * @sht: owner of directory */ -void scsi_proc_hostdir_rm(struct scsi_host_template *sht) +void scsi_proc_hostdir_rm(const struct scsi_host_template *sht) { + struct scsi_proc_entry *e; + if (!sht->show_info) return; mutex_lock(&global_host_template_mutex); - if (!--sht->present && sht->proc_dir) { + e = __scsi_lookup_proc_entry(sht); + if (e && !--e->present) { remove_proc_entry(sht->proc_name, proc_scsi); - sht->proc_dir = NULL; + list_del(&e->entry); + kfree(e); } mutex_unlock(&global_host_template_mutex); } @@ -137,20 +217,29 @@ void scsi_proc_hostdir_rm(struct scsi_host_template *sht) */ void scsi_proc_host_add(struct Scsi_Host *shost) { - struct scsi_host_template *sht = shost->hostt; + const struct scsi_host_template *sht = shost->hostt; + struct scsi_proc_entry *e; struct proc_dir_entry *p; char name[10]; - if (!sht->proc_dir) + if (!sht->show_info) return; + e = scsi_lookup_proc_entry(sht); + if (!e) + goto err; + sprintf(name,"%d", shost->host_no); - p = proc_create_data(name, S_IRUGO | S_IWUSR, - sht->proc_dir, &proc_scsi_ops, shost); + p = proc_create_data(name, S_IRUGO | S_IWUSR, e->proc_dir, + &proc_scsi_ops, shost); if (!p) - printk(KERN_ERR "%s: Failed to register host %d in" - "%s\n", __func__, shost->host_no, - sht->proc_name); + goto err; + return; + +err: + shost_printk(KERN_ERR, shost, + "%s: Failed to register host (%s failed)\n", __func__, + e ? "proc_create_data()" : "scsi_proc_hostdir_add()"); } /** @@ -159,13 +248,19 @@ void scsi_proc_host_add(struct Scsi_Host *shost) */ void scsi_proc_host_rm(struct Scsi_Host *shost) { + const struct scsi_host_template *sht = shost->hostt; + struct scsi_proc_entry *e; char name[10]; - if (!shost->hostt->proc_dir) + if (!sht->show_info) + return; + + e = scsi_lookup_proc_entry(sht); + if (!e) return; sprintf(name,"%d", shost->host_no); - remove_proc_entry(name, shost->hostt->proc_dir); + remove_proc_entry(name, e->proc_dir); } /** * proc_print_scsidevice - return data about this host diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c index 0a95fa787fdf4838debff07d09bd06998dc62ae8..7a6904a3928e6f8a20115890827e4f29de6f0339 100644 --- a/drivers/scsi/scsi_scan.c +++ b/drivers/scsi/scsi_scan.c @@ -1579,7 +1579,8 @@ struct scsi_device *__scsi_add_device(struct Scsi_Host *shost, uint channel, scsi_complete_async_scans(); if (scsi_host_scan_allowed(shost) && scsi_autopm_get_host(shost) == 0) { - scsi_probe_and_add_lun(starget, lun, NULL, &sdev, 1, hostdata); + scsi_probe_and_add_lun(starget, lun, NULL, &sdev, + SCSI_SCAN_RESCAN, hostdata); scsi_autopm_put_host(shost); } mutex_unlock(&shost->scan_mutex); @@ -1918,7 +1919,7 @@ static void do_scsi_scan_host(struct Scsi_Host *shost) msleep(10); } else { scsi_scan_host_selected(shost, SCAN_WILD_CARD, SCAN_WILD_CARD, - SCAN_WILD_CARD, 0); + SCAN_WILD_CARD, SCSI_SCAN_INITIAL); } } diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c index cac7c902cf70a1716fea0f3ca4fb30bd75fb037d..981d1bab21207166b97f0003830d069bb01ff615 100644 --- a/drivers/scsi/scsi_sysfs.c +++ b/drivers/scsi/scsi_sysfs.c @@ -441,20 +441,15 @@ static void scsi_device_cls_release(struct device *class_dev) put_device(&sdev->sdev_gendev); } -static void scsi_device_dev_release_usercontext(struct work_struct *work) +static void scsi_device_dev_release(struct device *dev) { - struct scsi_device *sdev; + struct scsi_device *sdev = to_scsi_device(dev); struct device *parent; struct list_head *this, *tmp; struct scsi_vpd *vpd_pg80 = NULL, *vpd_pg83 = NULL; struct scsi_vpd *vpd_pg0 = NULL, *vpd_pg89 = NULL; struct scsi_vpd *vpd_pgb0 = NULL, *vpd_pgb1 = NULL, *vpd_pgb2 = NULL; unsigned long flags; - struct module *mod; - - sdev = container_of(work, struct scsi_device, ew.work); - - mod = sdev->host->hostt->module; scsi_dh_release_device(sdev); @@ -518,19 +513,6 @@ static void scsi_device_dev_release_usercontext(struct work_struct *work) if (parent) put_device(parent); - module_put(mod); -} - -static void scsi_device_dev_release(struct device *dev) -{ - struct scsi_device *sdp = to_scsi_device(dev); - - /* Set module pointer as NULL in case of module unloading */ - if (!try_module_get(sdp->host->hostt->module)) - sdp->host->hostt->module = NULL; - - execute_in_process_context(scsi_device_dev_release_usercontext, - &sdp->ew); } static struct class sdev_class = { diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c index 8934160c4a33bf013033b76368fb04edb9b732be..0965f8a7134f08079dab58fccd4b1f35bcba128a 100644 --- a/drivers/scsi/scsi_transport_fc.c +++ b/drivers/scsi/scsi_transport_fc.c @@ -2530,15 +2530,14 @@ static int fc_vport_match(struct attribute_container *cont, * Notes: * This routine assumes no locks are held on entry. */ -enum blk_eh_timer_return -fc_eh_timed_out(struct scsi_cmnd *scmd) +enum scsi_timeout_action fc_eh_timed_out(struct scsi_cmnd *scmd) { struct fc_rport *rport = starget_to_rport(scsi_target(scmd->device)); if (rport->port_state == FC_PORTSTATE_BLOCKED) - return BLK_EH_RESET_TIMER; + return SCSI_EH_RESET_TIMER; - return BLK_EH_DONE; + return SCSI_EH_NOT_HANDLED; } EXPORT_SYMBOL(fc_eh_timed_out); diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c index f473c002fa4d6c2e0712bfc0ada288e1b6847e2b..13cfd3e317cc0690c212cb910e8b1bd6efe23854 100644 --- a/drivers/scsi/scsi_transport_iscsi.c +++ b/drivers/scsi/scsi_transport_iscsi.c @@ -2989,7 +2989,7 @@ iscsi_if_destroy_conn(struct iscsi_transport *transport, struct iscsi_uevent *ev } static int -iscsi_set_param(struct iscsi_transport *transport, struct iscsi_uevent *ev) +iscsi_if_set_param(struct iscsi_transport *transport, struct iscsi_uevent *ev) { char *data = (char*)ev + sizeof(*ev); struct iscsi_cls_conn *conn; @@ -3942,7 +3942,7 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group) err = -EINVAL; break; case ISCSI_UEVENT_SET_PARAM: - err = iscsi_set_param(transport, ev); + err = iscsi_if_set_param(transport, ev); break; case ISCSI_UEVENT_CREATE_CONN: case ISCSI_UEVENT_DESTROY_CONN: diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c index 98a34ed10f1a00c0d214380222deceed18492a0d..87d0fb8dc50327f33abb7d62a22505559018c30e 100644 --- a/drivers/scsi/scsi_transport_srp.c +++ b/drivers/scsi/scsi_transport_srp.c @@ -594,13 +594,13 @@ EXPORT_SYMBOL(srp_reconnect_rport); * @scmd: SCSI command. * * If a timeout occurs while an rport is in the blocked state, ask the SCSI - * EH to continue waiting (BLK_EH_RESET_TIMER). Otherwise let the SCSI core - * handle the timeout (BLK_EH_DONE). + * EH to continue waiting (SCSI_EH_RESET_TIMER). Otherwise let the SCSI core + * handle the timeout (SCSI_EH_NOT_HANDLED). * * Note: This function is called from soft-IRQ context and with the request * queue lock held. */ -enum blk_eh_timer_return srp_timed_out(struct scsi_cmnd *scmd) +enum scsi_timeout_action srp_timed_out(struct scsi_cmnd *scmd) { struct scsi_device *sdev = scmd->device; struct Scsi_Host *shost = sdev->host; @@ -611,7 +611,7 @@ enum blk_eh_timer_return srp_timed_out(struct scsi_cmnd *scmd) return rport && rport->fast_io_fail_tmo < 0 && rport->dev_loss_tmo < 0 && i->f->reset_timer_if_blocked && scsi_device_blocked(sdev) ? - BLK_EH_RESET_TIMER : BLK_EH_DONE; + SCSI_EH_RESET_TIMER : SCSI_EH_NOT_HANDLED; } EXPORT_SYMBOL(srp_timed_out); diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index eb76ba0550216d7d55d998e4609ed7ea5d4bdb9a..faa2b55d1a21a16cd96aea61adce703ef27f2c7a 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -1026,8 +1026,13 @@ static blk_status_t sd_setup_flush_cmnd(struct scsi_cmnd *cmd) /* flush requests don't perform I/O, zero the S/G table */ memset(&cmd->sdb, 0, sizeof(cmd->sdb)); - cmd->cmnd[0] = SYNCHRONIZE_CACHE; - cmd->cmd_len = 10; + if (cmd->device->use_16_for_sync) { + cmd->cmnd[0] = SYNCHRONIZE_CACHE_16; + cmd->cmd_len = 16; + } else { + cmd->cmnd[0] = SYNCHRONIZE_CACHE; + cmd->cmd_len = 10; + } cmd->transfersize = 0; cmd->allowed = sdkp->max_retries; @@ -1587,9 +1592,12 @@ static int sd_sync_cache(struct scsi_disk *sdkp, struct scsi_sense_hdr *sshdr) sshdr = &my_sshdr; for (retries = 3; retries > 0; --retries) { - unsigned char cmd[10] = { 0 }; + unsigned char cmd[16] = { 0 }; - cmd[0] = SYNCHRONIZE_CACHE; + if (sdp->use_16_for_sync) + cmd[0] = SYNCHRONIZE_CACHE_16; + else + cmd[0] = SYNCHRONIZE_CACHE; /* * Leave the rest of the command zero to indicate * flush everything. diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c index bd15624c63228010ffe68eadd7613a20a6c2287c..b163bf936acc7e7a50aa63b59bedd34bed501123 100644 --- a/drivers/scsi/sd_zbc.c +++ b/drivers/scsi/sd_zbc.c @@ -921,9 +921,10 @@ int sd_zbc_read_zones(struct scsi_disk *sdkp, u8 buf[SD_BUF_SIZE]) return 0; } - /* READ16/WRITE16 is mandatory for ZBC disks */ + /* READ16/WRITE16/SYNC16 is mandatory for ZBC devices */ sdkp->device->use_16_for_rw = 1; sdkp->device->use_10_for_rw = 0; + sdkp->device->use_16_for_sync = 1; if (!blk_queue_is_zoned(q)) { /* diff --git a/drivers/scsi/smartpqi/smartpqi.h b/drivers/scsi/smartpqi/smartpqi.h index e550b12e525a1bdf779da71a2d40a64503e873bb..af27bb0f3133e2c4ebf5392c8d0b6b1261e3e84b 100644 --- a/drivers/scsi/smartpqi/smartpqi.h +++ b/drivers/scsi/smartpqi/smartpqi.h @@ -1130,7 +1130,7 @@ struct pqi_scsi_dev { u8 phy_id; u8 ncq_prio_enable; u8 ncq_prio_support; - u8 multi_lun_device_lun_count; + u8 lun_count; bool raid_bypass_configured; /* RAID bypass configured */ bool raid_bypass_enabled; /* RAID bypass enabled */ u32 next_bypass_group[RAID_MAP_MAX_DATA_DISKS_PER_ROW]; @@ -1307,7 +1307,6 @@ struct pqi_ctrl_info { dma_addr_t error_buffer_dma_handle; size_t sg_chain_buffer_length; unsigned int num_queue_groups; - u16 max_hw_queue_index; u16 num_elements_per_iq; u16 num_elements_per_oq; u16 max_inbound_iu_length_per_firmware; @@ -1369,8 +1368,6 @@ struct pqi_ctrl_info { u64 sas_address; struct pqi_io_request *io_request_pool; - u16 next_io_request_slot; - struct pqi_event events[PQI_NUM_SUPPORTED_EVENTS]; struct work_struct event_work; diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c index b971fbe3b3a173db160702cdcb07095b073bf96e..d0446d4d446564297b8a9985ab9f83b149ae8fe7 100644 --- a/drivers/scsi/smartpqi/smartpqi_init.c +++ b/drivers/scsi/smartpqi/smartpqi_init.c @@ -33,11 +33,11 @@ #define BUILD_TIMESTAMP #endif -#define DRIVER_VERSION "2.1.18-045" +#define DRIVER_VERSION "2.1.20-035" #define DRIVER_MAJOR 2 #define DRIVER_MINOR 1 -#define DRIVER_RELEASE 18 -#define DRIVER_REVISION 45 +#define DRIVER_RELEASE 20 +#define DRIVER_REVISION 35 #define DRIVER_NAME "Microchip SmartPQI Driver (v" \ DRIVER_VERSION BUILD_TIMESTAMP ")" @@ -678,23 +678,36 @@ static inline void pqi_reinit_io_request(struct pqi_io_request *io_request) io_request->raid_bypass = false; } -static struct pqi_io_request *pqi_alloc_io_request( - struct pqi_ctrl_info *ctrl_info) +static inline struct pqi_io_request *pqi_alloc_io_request(struct pqi_ctrl_info *ctrl_info, struct scsi_cmnd *scmd) { struct pqi_io_request *io_request; - u16 i = ctrl_info->next_io_request_slot; /* benignly racy */ + u16 i; - while (1) { + if (scmd) { /* SML I/O request */ + u32 blk_tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd)); + + i = blk_mq_unique_tag_to_tag(blk_tag); io_request = &ctrl_info->io_request_pool[i]; - if (atomic_inc_return(&io_request->refcount) == 1) - break; - atomic_dec(&io_request->refcount); - i = (i + 1) % ctrl_info->max_io_slots; + if (atomic_inc_return(&io_request->refcount) > 1) { + atomic_dec(&io_request->refcount); + return NULL; + } + } else { /* IOCTL or driver internal request */ + /* + * benignly racy - may have to wait for an open slot. + * command slot range is scsi_ml_can_queue - + * [scsi_ml_can_queue + (PQI_RESERVED_IO_SLOTS - 1)] + */ + i = 0; + while (1) { + io_request = &ctrl_info->io_request_pool[ctrl_info->scsi_ml_can_queue + i]; + if (atomic_inc_return(&io_request->refcount) == 1) + break; + atomic_dec(&io_request->refcount); + i = (i + 1) % PQI_RESERVED_IO_SLOTS; + } } - /* benignly racy */ - ctrl_info->next_io_request_slot = (i + 1) % ctrl_info->max_io_slots; - pqi_reinit_io_request(io_request); return io_request; @@ -1610,9 +1623,7 @@ static int pqi_get_physical_device_info(struct pqi_ctrl_info *ctrl_info, &id_phys->alternate_paths_phys_connector, sizeof(device->phys_connector)); device->bay = id_phys->phys_bay_in_box; - device->multi_lun_device_lun_count = id_phys->multi_lun_device_lun_count; - if (!device->multi_lun_device_lun_count) - device->multi_lun_device_lun_count = 1; + device->lun_count = id_phys->multi_lun_device_lun_count; if ((id_phys->even_more_flags & PQI_DEVICE_PHY_MAP_SUPPORTED) && id_phys->phy_count) device->phy_id = @@ -1746,7 +1757,7 @@ out: return offline; } -static int pqi_get_device_info(struct pqi_ctrl_info *ctrl_info, +static int pqi_get_device_info_phys_logical(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device, struct bmic_identify_physical_device *id_phys) { @@ -1763,6 +1774,20 @@ static int pqi_get_device_info(struct pqi_ctrl_info *ctrl_info, return rc; } +static int pqi_get_device_info(struct pqi_ctrl_info *ctrl_info, + struct pqi_scsi_dev *device, + struct bmic_identify_physical_device *id_phys) +{ + int rc; + + rc = pqi_get_device_info_phys_logical(ctrl_info, device, id_phys); + + if (rc == 0 && device->lun_count == 0) + device->lun_count = 1; + + return rc; +} + static void pqi_show_volume_status(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device) { @@ -1897,7 +1922,7 @@ static inline void pqi_remove_device(struct pqi_ctrl_info *ctrl_info, struct pqi int rc; int lun; - for (lun = 0; lun < device->multi_lun_device_lun_count; lun++) { + for (lun = 0; lun < device->lun_count; lun++) { rc = pqi_device_wait_for_pending_io(ctrl_info, device, lun, PQI_REMOVE_DEVICE_PENDING_IO_TIMEOUT_MSECS); if (rc) @@ -2076,6 +2101,7 @@ static void pqi_scsi_update_device(struct pqi_ctrl_info *ctrl_info, existing_device->sas_address = new_device->sas_address; existing_device->queue_depth = new_device->queue_depth; existing_device->device_offline = false; + existing_device->lun_count = new_device->lun_count; if (pqi_is_logical_device(existing_device)) { existing_device->is_external_raid_device = new_device->is_external_raid_device; @@ -2108,10 +2134,6 @@ static void pqi_scsi_update_device(struct pqi_ctrl_info *ctrl_info, existing_device->phy_connected_dev_type = new_device->phy_connected_dev_type; memcpy(existing_device->box, new_device->box, sizeof(existing_device->box)); memcpy(existing_device->phys_connector, new_device->phys_connector, sizeof(existing_device->phys_connector)); - - existing_device->multi_lun_device_lun_count = new_device->multi_lun_device_lun_count; - if (existing_device->multi_lun_device_lun_count == 0) - existing_device->multi_lun_device_lun_count = 1; } } @@ -4586,7 +4608,7 @@ static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info, goto out; } - io_request = pqi_alloc_io_request(ctrl_info); + io_request = pqi_alloc_io_request(ctrl_info, NULL); put_unaligned_le16(io_request->index, &(((struct pqi_raid_path_request *)request)->request_id)); @@ -5233,7 +5255,6 @@ static void pqi_calculate_queue_resources(struct pqi_ctrl_info *ctrl_info) } ctrl_info->num_queue_groups = num_queue_groups; - ctrl_info->max_hw_queue_index = num_queue_groups - 1; /* * Make sure that the max. inbound IU length is an even multiple @@ -5567,7 +5588,9 @@ static inline int pqi_raid_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info, { struct pqi_io_request *io_request; - io_request = pqi_alloc_io_request(ctrl_info); + io_request = pqi_alloc_io_request(ctrl_info, scmd); + if (!io_request) + return SCSI_MLQUEUE_HOST_BUSY; return pqi_raid_submit_scsi_cmd_with_io_request(ctrl_info, io_request, device, scmd, queue_group); @@ -5671,7 +5694,9 @@ static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device; device = scmd->device->hostdata; - io_request = pqi_alloc_io_request(ctrl_info); + io_request = pqi_alloc_io_request(ctrl_info, scmd); + if (!io_request) + return SCSI_MLQUEUE_HOST_BUSY; io_request->io_complete_callback = pqi_aio_io_complete; io_request->scmd = scmd; io_request->raid_bypass = raid_bypass; @@ -5743,7 +5768,10 @@ static int pqi_aio_submit_r1_write_io(struct pqi_ctrl_info *ctrl_info, struct pqi_io_request *io_request; struct pqi_aio_r1_path_request *r1_request; - io_request = pqi_alloc_io_request(ctrl_info); + io_request = pqi_alloc_io_request(ctrl_info, scmd); + if (!io_request) + return SCSI_MLQUEUE_HOST_BUSY; + io_request->io_complete_callback = pqi_aio_io_complete; io_request->scmd = scmd; io_request->raid_bypass = true; @@ -5801,7 +5829,9 @@ static int pqi_aio_submit_r56_write_io(struct pqi_ctrl_info *ctrl_info, struct pqi_io_request *io_request; struct pqi_aio_r56_path_request *r56_request; - io_request = pqi_alloc_io_request(ctrl_info); + io_request = pqi_alloc_io_request(ctrl_info, scmd); + if (!io_request) + return SCSI_MLQUEUE_HOST_BUSY; io_request->io_complete_callback = pqi_aio_io_complete; io_request->scmd = scmd; io_request->raid_bypass = true; @@ -5860,13 +5890,10 @@ static int pqi_aio_submit_r56_write_io(struct pqi_ctrl_info *ctrl_info, static inline u16 pqi_get_hw_queue(struct pqi_ctrl_info *ctrl_info, struct scsi_cmnd *scmd) { - u16 hw_queue; - - hw_queue = blk_mq_unique_tag_to_hwq(blk_mq_unique_tag(scsi_cmd_to_rq(scmd))); - if (hw_queue > ctrl_info->max_hw_queue_index) - hw_queue = 0; - - return hw_queue; + /* + * We are setting host_tagset = 1 during init. + */ + return blk_mq_unique_tag_to_hwq(blk_mq_unique_tag(scsi_cmd_to_rq(scmd))); } static inline bool pqi_is_bypass_eligible_request(struct scsi_cmnd *scmd) @@ -6268,7 +6295,7 @@ static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info, struct scsi_cmnd *scmd struct pqi_scsi_dev *device; device = scmd->device->hostdata; - io_request = pqi_alloc_io_request(ctrl_info); + io_request = pqi_alloc_io_request(ctrl_info, NULL); io_request->io_complete_callback = pqi_lun_reset_complete; io_request->context = &wait; @@ -6484,6 +6511,12 @@ static void pqi_slave_destroy(struct scsi_device *sdev) return; } + device->lun_count--; + if (device->lun_count > 0) { + mutex_unlock(&ctrl_info->scan_mutex); + return; + } + spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); list_del(&device->scsi_device_list_entry); spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); @@ -7237,7 +7270,7 @@ static ssize_t pqi_raid_level_show(struct device *dev, return -ENODEV; } - if (pqi_is_logical_device(device)) + if (pqi_is_logical_device(device) && device->devtype == TYPE_DISK) raid_level = pqi_raid_level_to_string(device->raid_level); else raid_level = "N/A"; @@ -7405,7 +7438,6 @@ static int pqi_register_scsi(struct pqi_ctrl_info *ctrl_info) shost->max_channel = PQI_MAX_BUS; shost->max_cmd_len = MAX_COMMAND_SIZE; shost->max_lun = PQI_MAX_LUNS_PER_DEVICE; - shost->max_lun = ~0; shost->max_id = ~0; shost->max_sectors = ctrl_info->max_sectors; shost->can_queue = ctrl_info->scsi_ml_can_queue; @@ -7972,7 +8004,7 @@ static int pqi_process_config_table(struct pqi_ctrl_info *ctrl_info) struct pqi_config_table *config_table; struct pqi_config_table_section_header *section; struct pqi_config_table_section_info section_info; - struct pqi_config_table_section_info feature_section_info; + struct pqi_config_table_section_info feature_section_info = {0}; table_length = ctrl_info->config_table_length; if (table_length == 0) @@ -9008,6 +9040,7 @@ static void pqi_pci_remove(struct pci_dev *pci_dev) { struct pqi_ctrl_info *ctrl_info; u16 vendor_id; + int rc; ctrl_info = pci_get_drvdata(pci_dev); if (!ctrl_info) @@ -9019,6 +9052,13 @@ static void pqi_pci_remove(struct pci_dev *pci_dev) else ctrl_info->ctrl_removal_state = PQI_CTRL_GRACEFUL_REMOVAL; + if (ctrl_info->ctrl_removal_state == PQI_CTRL_GRACEFUL_REMOVAL) { + rc = pqi_flush_cache(ctrl_info, RESTART); + if (rc) + dev_err(&pci_dev->dev, + "unable to flush controller cache during remove\n"); + } + pqi_remove_ctrl(ctrl_info); } @@ -9302,6 +9342,10 @@ static const struct pci_device_id pqi_pci_id_table[] = { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 0x193d, 0x1109) }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x193d, 0x110b) + }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 0x193d, 0x8460) @@ -9402,6 +9446,22 @@ static const struct pci_device_id pqi_pci_id_table[] = { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 0x1bd4, 0x0072) }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1bd4, 0x0086) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1bd4, 0x0087) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1bd4, 0x0088) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1bd4, 0x0089) + }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 0x19e5, 0xd227) @@ -9650,6 +9710,10 @@ static const struct pci_device_id pqi_pci_id_table[] = { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, PCI_VENDOR_ID_ADAPTEC2, 0x1474) }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_ADAPTEC2, 0x1475) + }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, PCI_VENDOR_ID_ADAPTEC2, 0x1480) @@ -9706,6 +9770,14 @@ static const struct pci_device_id pqi_pci_id_table[] = { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, PCI_VENDOR_ID_ADAPTEC2, 0x14c2) }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_ADAPTEC2, 0x14c3) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_ADAPTEC2, 0x14c4) + }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, PCI_VENDOR_ID_ADAPTEC2, 0x14d0) @@ -9942,6 +10014,18 @@ static const struct pci_device_id pqi_pci_id_table[] = { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, PCI_VENDOR_ID_LENOVO, 0x0623) }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1e93, 0x1000) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1e93, 0x1001) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1e93, 0x1002) + }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, PCI_ANY_ID, PCI_ANY_ID) diff --git a/drivers/scsi/snic/snic_disc.c b/drivers/scsi/snic/snic_disc.c index 9b2b5f8c23b9a3d628c7c50067dbb5fd4c762ce2..8fbf3c1b1311d805173e130101abb6a527ccf3fd 100644 --- a/drivers/scsi/snic/snic_disc.c +++ b/drivers/scsi/snic/snic_disc.c @@ -304,6 +304,9 @@ snic_tgt_create(struct snic *snic, struct snic_tgt_id *tgtid) ret); put_device(&snic->shost->shost_gendev); + spin_lock_irqsave(snic->shost->host_lock, flags); + list_del(&tgt->list); + spin_unlock_irqrestore(snic->shost->host_lock, flags); kfree(tgt); tgt = NULL; diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c index 3c5b7e4227b25e3d44795d26ad70974a32add542..d7a84c0bfaeb73570523b95f6bb4ea82a4d06b66 100644 --- a/drivers/scsi/storvsc_drv.c +++ b/drivers/scsi/storvsc_drv.c @@ -1651,13 +1651,13 @@ static int storvsc_host_reset_handler(struct scsi_cmnd *scmnd) * be unbounded on Azure. Reset the timer unconditionally to give the host a * chance to perform EH. */ -static enum blk_eh_timer_return storvsc_eh_timed_out(struct scsi_cmnd *scmnd) +static enum scsi_timeout_action storvsc_eh_timed_out(struct scsi_cmnd *scmnd) { #if IS_ENABLED(CONFIG_SCSI_FC_ATTRS) if (scmnd->device->host->transportt == fc_transport_template) return fc_eh_timed_out(scmnd); #endif - return BLK_EH_RESET_TIMER; + return SCSI_EH_RESET_TIMER; } static bool storvsc_scsi_cmd_ok(struct scsi_cmnd *scmnd) diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c index 2a79ab16134b15d449adc947abf7d22b1eb169e8..d07d24c06b546fcd28ef93847689fed3e4431fb3 100644 --- a/drivers/scsi/virtio_scsi.c +++ b/drivers/scsi/virtio_scsi.c @@ -731,9 +731,9 @@ static void virtscsi_commit_rqs(struct Scsi_Host *shost, u16 hwq) * latencies might be higher than on bare metal. Reset the timer * unconditionally to give the host a chance to perform EH. */ -static enum blk_eh_timer_return virtscsi_eh_timed_out(struct scsi_cmnd *scmnd) +static enum scsi_timeout_action virtscsi_eh_timed_out(struct scsi_cmnd *scmnd) { - return BLK_EH_RESET_TIMER; + return SCSI_EH_RESET_TIMER; } static struct scsi_host_template virtscsi_host_template = { diff --git a/drivers/slimbus/qcom-ctrl.c b/drivers/slimbus/qcom-ctrl.c index c0c4f895d76e204561d33156db434ca81763d4c2..400b7b385a443e93574c4ef8d3f814a2078e9bbd 100644 --- a/drivers/slimbus/qcom-ctrl.c +++ b/drivers/slimbus/qcom-ctrl.c @@ -488,7 +488,6 @@ static int qcom_slim_probe(struct platform_device *pdev) { struct qcom_slim_ctrl *ctrl; struct slim_controller *sctrl; - struct resource *slim_mem; int ret, ver; ctrl = devm_kzalloc(&pdev->dev, sizeof(*ctrl), GFP_KERNEL); @@ -519,8 +518,7 @@ static int qcom_slim_probe(struct platform_device *pdev) platform_set_drvdata(pdev, ctrl); dev_set_drvdata(ctrl->dev, ctrl); - slim_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ctrl"); - ctrl->base = devm_ioremap_resource(ctrl->dev, slim_mem); + ctrl->base = devm_platform_ioremap_resource_byname(pdev, "ctrl"); if (IS_ERR(ctrl->base)) return PTR_ERR(ctrl->base); @@ -718,7 +716,6 @@ static const struct dev_pm_ops qcom_slim_dev_pm_ops = { static const struct of_device_id qcom_slim_dt_match[] = { { .compatible = "qcom,slim", }, - { .compatible = "qcom,apq8064-slim", }, {} }; diff --git a/drivers/slimbus/qcom-ngd-ctrl.c b/drivers/slimbus/qcom-ngd-ctrl.c index 76c5e446d2433c4bcee6666af5ca78db89b55c62..77aa6d26476cd24b5eba6a663671d7677055538c 100644 --- a/drivers/slimbus/qcom-ngd-ctrl.c +++ b/drivers/slimbus/qcom-ngd-ctrl.c @@ -763,7 +763,14 @@ static irqreturn_t qcom_slim_ngd_interrupt(int irq, void *d) { struct qcom_slim_ngd_ctrl *ctrl = d; void __iomem *base = ctrl->ngd->base; - u32 stat = readl(base + NGD_INT_STAT); + u32 stat; + + if (pm_runtime_suspended(ctrl->ctrl.dev)) { + dev_warn_once(ctrl->dev, "Interrupt received while suspended\n"); + return IRQ_NONE; + } + + stat = readl(base + NGD_INT_STAT); if ((stat & NGD_INT_MSG_BUF_CONTE) || (stat & NGD_INT_MSG_TX_INVAL) || (stat & NGD_INT_DEV_ERR) || @@ -912,21 +919,77 @@ static int qcom_slim_ngd_xfer_msg_sync(struct slim_controller *ctrl, DECLARE_COMPLETION_ONSTACK(done); int ret, timeout; - pm_runtime_get_sync(ctrl->dev); + ret = pm_runtime_get_sync(ctrl->dev); + if (ret < 0) + goto pm_put; txn->comp = &done; ret = qcom_slim_ngd_xfer_msg(ctrl, txn); if (ret) - return ret; + goto pm_put; timeout = wait_for_completion_timeout(&done, HZ); if (!timeout) { dev_err(ctrl->dev, "TX timed out:MC:0x%x,mt:0x%x", txn->mc, txn->mt); - return -ETIMEDOUT; + ret = -ETIMEDOUT; + goto pm_put; } return 0; + +pm_put: + pm_runtime_put(ctrl->dev); + + return ret; +} + +static int qcom_slim_calc_coef(struct slim_stream_runtime *rt, int *exp) +{ + struct slim_controller *ctrl = rt->dev->ctrl; + int coef; + + if (rt->ratem * ctrl->a_framer->superfreq < rt->rate) + rt->ratem++; + + coef = rt->ratem; + *exp = 0; + + /* + * CRM = Cx(2^E) is the formula we are using. + * Here C is the coffecient and E is the exponent. + * CRM is the Channel Rate Multiplier. + * Coefficeint should be either 1 or 3 and exponenet + * should be an integer between 0 to 9, inclusive. + */ + while (1) { + while ((coef & 0x1) != 0x1) { + coef >>= 1; + *exp = *exp + 1; + } + + if (coef <= 3) + break; + + coef++; + } + + /* + * we rely on the coef value (1 or 3) to set a bit + * in the slimbus message packet. This bit is + * BIT(5) which is the segment rate coefficient. + */ + if (coef == 1) { + if (*exp > 9) + return -EIO; + coef = 0; + } else { + if (*exp > 8) + return -EIO; + coef = 1; + } + + return coef; } static int qcom_slim_ngd_enable_stream(struct slim_stream_runtime *rt) @@ -952,16 +1015,22 @@ static int qcom_slim_ngd_enable_stream(struct slim_stream_runtime *rt) struct slim_port *port = &rt->ports[i]; if (txn.msg->num_bytes == 0) { - int seg_interval = SLIM_SLOTS_PER_SUPERFRAME/rt->ratem; - int exp; + int exp = 0, coef = 0; wbuf[txn.msg->num_bytes++] = sdev->laddr; wbuf[txn.msg->num_bytes] = rt->bps >> 2 | (port->ch.aux_fmt << 6); - /* Data channel segment interval not multiple of 3 */ - exp = seg_interval % 3; - if (exp) + /* calculate coef dynamically */ + coef = qcom_slim_calc_coef(rt, &exp); + if (coef < 0) { + dev_err(&sdev->dev, + "%s: error calculating coef %d\n", __func__, + coef); + return -EIO; + } + + if (coef) wbuf[txn.msg->num_bytes] |= BIT(5); txn.msg->num_bytes++; @@ -1136,6 +1205,12 @@ static int qcom_slim_ngd_power_up(struct qcom_slim_ngd_ctrl *ctrl) return 0; } + /* + * Reinitialize only when registers are not retained or when enumeration + * is lost for ngd. + */ + reinit_completion(&ctrl->reconf); + writel_relaxed(DEF_NGD_INT_MASK, ngd->base + NGD_INT_EN); rx_msgq = readl_relaxed(ngd->base + NGD_RX_MSGQ_CFG); @@ -1528,7 +1603,6 @@ static int qcom_slim_ngd_ctrl_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct qcom_slim_ngd_ctrl *ctrl; - struct resource *res; int ret; struct pdr_service *pds; @@ -1538,8 +1612,7 @@ static int qcom_slim_ngd_ctrl_probe(struct platform_device *pdev) dev_set_drvdata(dev, ctrl); - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - ctrl->base = devm_ioremap_resource(dev, res); + ctrl->base = devm_platform_get_and_ioremap_resource(pdev, 0, NULL); if (IS_ERR(ctrl->base)) return PTR_ERR(ctrl->base); diff --git a/drivers/slimbus/stream.c b/drivers/slimbus/stream.c index 73a2aa362957206c29d0c2afa50fe71275ebca61..1d6b38657917380d878a4c956bbaf4b9634fa6bc 100644 --- a/drivers/slimbus/stream.c +++ b/drivers/slimbus/stream.c @@ -204,7 +204,7 @@ int slim_stream_prepare(struct slim_stream_runtime *rt, { struct slim_controller *ctrl = rt->dev->ctrl; struct slim_port *port; - int num_ports, i, port_id; + int num_ports, i, port_id, prrate; if (rt->ports) { dev_err(&rt->dev->dev, "Stream already Prepared\n"); @@ -221,6 +221,13 @@ int slim_stream_prepare(struct slim_stream_runtime *rt, rt->bps = cfg->bps; rt->direction = cfg->direction; + prrate = slim_get_prate_code(cfg->rate); + if (prrate < 0) { + dev_err(&rt->dev->dev, "Cannot get presence rate for rate %d Hz\n", + cfg->rate); + return prrate; + } + if (cfg->rate % ctrl->a_framer->superfreq) { /* * data rate not exactly multiple of super frame, @@ -241,7 +248,7 @@ int slim_stream_prepare(struct slim_stream_runtime *rt, port = &rt->ports[i]; port->state = SLIM_PORT_DISCONNECTED; port->id = port_id; - port->ch.prrate = slim_get_prate_code(cfg->rate); + port->ch.prrate = prrate; port->ch.id = cfg->chs[i]; port->ch.data_fmt = SLIM_CH_DATA_FMT_NOT_DEFINED; port->ch.aux_fmt = SLIM_CH_AUX_FMT_NOT_APPLICABLE; @@ -407,6 +414,9 @@ int slim_stream_disable(struct slim_stream_runtime *stream) struct slim_controller *ctrl = stream->dev->ctrl; int ret, i; + if (!stream->ports || !stream->num_ports) + return -EINVAL; + if (ctrl->disable_stream) ctrl->disable_stream(stream); @@ -438,6 +448,9 @@ int slim_stream_unprepare(struct slim_stream_runtime *stream) { int i; + if (!stream->ports || !stream->num_ports) + return -EINVAL; + for (i = 0; i < stream->num_ports; i++) slim_disconnect_port(stream, &stream->ports[i]); diff --git a/drivers/soc/mediatek/mtk-pm-domains.c b/drivers/soc/mediatek/mtk-pm-domains.c index 09e3c38b8466434b5dfd9eb732494e028670c59d..474b272f9b02d6cf305f59bf238762d9db93827d 100644 --- a/drivers/soc/mediatek/mtk-pm-domains.c +++ b/drivers/soc/mediatek/mtk-pm-domains.c @@ -275,9 +275,9 @@ static int scpsys_power_off(struct generic_pm_domain *genpd) clk_bulk_disable_unprepare(pd->num_subsys_clks, pd->subsys_clks); /* subsys power off */ - regmap_clear_bits(scpsys->base, pd->data->ctl_offs, PWR_RST_B_BIT); regmap_set_bits(scpsys->base, pd->data->ctl_offs, PWR_ISO_BIT); regmap_set_bits(scpsys->base, pd->data->ctl_offs, PWR_CLK_DIS_BIT); + regmap_clear_bits(scpsys->base, pd->data->ctl_offs, PWR_RST_B_BIT); regmap_clear_bits(scpsys->base, pd->data->ctl_offs, PWR_ON_2ND_BIT); regmap_clear_bits(scpsys->base, pd->data->ctl_offs, PWR_ON_BIT); diff --git a/drivers/soc/qcom/qcom-geni-se.c b/drivers/soc/qcom/qcom-geni-se.c index a0ceeede450f1d580175f272e30cf37b2c1036f8..f0475b93ca730624a3fb0a383f3ee58c366bbb5f 100644 --- a/drivers/soc/qcom/qcom-geni-se.c +++ b/drivers/soc/qcom/qcom-geni-se.c @@ -81,19 +81,31 @@ */ #define MAX_CLK_PERF_LEVEL 32 -#define NUM_AHB_CLKS 2 +#define MAX_CLKS 2 /** * struct geni_wrapper - Data structure to represent the QUP Wrapper Core * @dev: Device pointer of the QUP wrapper core * @base: Base address of this instance of QUP wrapper core - * @ahb_clks: Handle to the primary & secondary AHB clocks + * @clks: Handle to the primary & optional secondary AHB clocks + * @num_clks: Count of clocks * @to_core: Core ICC path */ struct geni_wrapper { struct device *dev; void __iomem *base; - struct clk_bulk_data ahb_clks[NUM_AHB_CLKS]; + struct clk_bulk_data clks[MAX_CLKS]; + unsigned int num_clks; +}; + +/** + * struct geni_se_desc - Data structure to represent the QUP Wrapper resources + * @clks: Name of the primary & optional secondary AHB clocks + * @num_clks: Count of clock names + */ +struct geni_se_desc { + unsigned int num_clks; + const char * const *clks; }; static const char * const icc_path_names[] = {"qup-core", "qup-config", @@ -496,8 +508,7 @@ static void geni_se_clks_off(struct geni_se *se) struct geni_wrapper *wrapper = se->wrapper; clk_disable_unprepare(se->clk); - clk_bulk_disable_unprepare(ARRAY_SIZE(wrapper->ahb_clks), - wrapper->ahb_clks); + clk_bulk_disable_unprepare(wrapper->num_clks, wrapper->clks); } /** @@ -528,15 +539,13 @@ static int geni_se_clks_on(struct geni_se *se) int ret; struct geni_wrapper *wrapper = se->wrapper; - ret = clk_bulk_prepare_enable(ARRAY_SIZE(wrapper->ahb_clks), - wrapper->ahb_clks); + ret = clk_bulk_prepare_enable(wrapper->num_clks, wrapper->clks); if (ret) return ret; ret = clk_prepare_enable(se->clk); if (ret) - clk_bulk_disable_unprepare(ARRAY_SIZE(wrapper->ahb_clks), - wrapper->ahb_clks); + clk_bulk_disable_unprepare(wrapper->num_clks, wrapper->clks); return ret; } @@ -887,11 +896,33 @@ static int geni_se_probe(struct platform_device *pdev) return PTR_ERR(wrapper->base); if (!has_acpi_companion(&pdev->dev)) { - wrapper->ahb_clks[0].id = "m-ahb"; - wrapper->ahb_clks[1].id = "s-ahb"; - ret = devm_clk_bulk_get(dev, NUM_AHB_CLKS, wrapper->ahb_clks); + const struct geni_se_desc *desc; + int i; + + desc = device_get_match_data(&pdev->dev); + if (!desc) + return -EINVAL; + + wrapper->num_clks = min_t(unsigned int, desc->num_clks, MAX_CLKS); + + for (i = 0; i < wrapper->num_clks; ++i) + wrapper->clks[i].id = desc->clks[i]; + + ret = of_count_phandle_with_args(dev->of_node, "clocks", "#clock-cells"); + if (ret < 0) { + dev_err(dev, "invalid clocks property at %pOF\n", dev->of_node); + return ret; + } + + if (ret < wrapper->num_clks) { + dev_err(dev, "invalid clocks count at %pOF, expected %d entries\n", + dev->of_node, wrapper->num_clks); + return -EINVAL; + } + + ret = devm_clk_bulk_get(dev, wrapper->num_clks, wrapper->clks); if (ret) { - dev_err(dev, "Err getting AHB clks %d\n", ret); + dev_err(dev, "Err getting clks %d\n", ret); return ret; } } @@ -901,8 +932,28 @@ static int geni_se_probe(struct platform_device *pdev) return devm_of_platform_populate(dev); } +static const char * const qup_clks[] = { + "m-ahb", + "s-ahb", +}; + +static const struct geni_se_desc qup_desc = { + .clks = qup_clks, + .num_clks = ARRAY_SIZE(qup_clks), +}; + +static const char * const i2c_master_hub_clks[] = { + "s-ahb", +}; + +static const struct geni_se_desc i2c_master_hub_desc = { + .clks = i2c_master_hub_clks, + .num_clks = ARRAY_SIZE(i2c_master_hub_clks), +}; + static const struct of_device_id geni_se_dt_match[] = { - { .compatible = "qcom,geni-se-qup", }, + { .compatible = "qcom,geni-se-qup", .data = &qup_desc }, + { .compatible = "qcom,geni-se-i2c-master-hub", .data = &i2c_master_hub_desc }, {} }; MODULE_DEVICE_TABLE(of, geni_se_dt_match); diff --git a/drivers/soc/tegra/Kconfig b/drivers/soc/tegra/Kconfig index 5f53242946369bc660813ad5ab89768c5f8a7244..3658fb0f0c5b1e50a657b2413684b5fb20ddef88 100644 --- a/drivers/soc/tegra/Kconfig +++ b/drivers/soc/tegra/Kconfig @@ -95,6 +95,7 @@ config ARCH_TEGRA_210_SOC config ARCH_TEGRA_186_SOC bool "NVIDIA Tegra186 SoC" + depends on !CPU_BIG_ENDIAN select MAILBOX select TEGRA_BPMP select TEGRA_HSP_MBOX @@ -110,6 +111,7 @@ config ARCH_TEGRA_186_SOC config ARCH_TEGRA_194_SOC bool "NVIDIA Tegra194 SoC" + depends on !CPU_BIG_ENDIAN select MAILBOX select PINCTRL_TEGRA194 select TEGRA_BPMP @@ -121,6 +123,7 @@ config ARCH_TEGRA_194_SOC config ARCH_TEGRA_234_SOC bool "NVIDIA Tegra234 SoC" + depends on !CPU_BIG_ENDIAN select MAILBOX select TEGRA_BPMP select TEGRA_HSP_MBOX diff --git a/drivers/soundwire/Makefile b/drivers/soundwire/Makefile index 986776787b9eaddf17ef3ca2ff472623eb94fead..ca97414ada7057539cb53cc99aeed10dd17f3e55 100644 --- a/drivers/soundwire/Makefile +++ b/drivers/soundwire/Makefile @@ -20,7 +20,7 @@ soundwire-cadence-y := cadence_master.o obj-$(CONFIG_SOUNDWIRE_CADENCE) += soundwire-cadence.o #Intel driver -soundwire-intel-y := intel.o intel_init.o dmi-quirks.o +soundwire-intel-y := intel.o intel_auxdevice.o intel_init.o dmi-quirks.o obj-$(CONFIG_SOUNDWIRE_INTEL) += soundwire-intel.o #Qualcomm driver diff --git a/drivers/soundwire/cadence_master.c b/drivers/soundwire/cadence_master.c index 93929f19d0831b511fb83c0c4d9c6bd428b8af36..a1de363eba3ff035f7a0028910392fd065c20741 100644 --- a/drivers/soundwire/cadence_master.c +++ b/drivers/soundwire/cadence_master.c @@ -1707,47 +1707,45 @@ int cdns_set_sdw_stream(struct snd_soc_dai *dai, void *stream, int direction) { struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai); - struct sdw_cdns_dma_data *dma; + struct sdw_cdns_dai_runtime *dai_runtime; + + dai_runtime = cdns->dai_runtime_array[dai->id]; if (stream) { /* first paranoia check */ - if (direction == SNDRV_PCM_STREAM_PLAYBACK) - dma = dai->playback_dma_data; - else - dma = dai->capture_dma_data; - - if (dma) { + if (dai_runtime) { dev_err(dai->dev, - "dma_data already allocated for dai %s\n", + "dai_runtime already allocated for dai %s\n", dai->name); return -EINVAL; } - /* allocate and set dma info */ - dma = kzalloc(sizeof(*dma), GFP_KERNEL); - if (!dma) + /* allocate and set dai_runtime info */ + dai_runtime = kzalloc(sizeof(*dai_runtime), GFP_KERNEL); + if (!dai_runtime) return -ENOMEM; - dma->stream_type = SDW_STREAM_PCM; + dai_runtime->stream_type = SDW_STREAM_PCM; - dma->bus = &cdns->bus; - dma->link_id = cdns->instance; + dai_runtime->bus = &cdns->bus; + dai_runtime->link_id = cdns->instance; - dma->stream = stream; + dai_runtime->stream = stream; + dai_runtime->direction = direction; - if (direction == SNDRV_PCM_STREAM_PLAYBACK) - dai->playback_dma_data = dma; - else - dai->capture_dma_data = dma; + cdns->dai_runtime_array[dai->id] = dai_runtime; } else { - /* for NULL stream we release allocated dma_data */ - if (direction == SNDRV_PCM_STREAM_PLAYBACK) { - kfree(dai->playback_dma_data); - dai->playback_dma_data = NULL; - } else { - kfree(dai->capture_dma_data); - dai->capture_dma_data = NULL; + /* second paranoia check */ + if (!dai_runtime) { + dev_err(dai->dev, + "dai_runtime not allocated for dai %s\n", + dai->name); + return -EINVAL; } + + /* for NULL stream we release allocated dai_runtime */ + kfree(dai_runtime); + cdns->dai_runtime_array[dai->id] = NULL; } return 0; } diff --git a/drivers/soundwire/cadence_master.h b/drivers/soundwire/cadence_master.h index ca9e805bab88f383fdd5d1a8e2acae535389c0a0..0434d70d4b1f5fc11803b69b56d23b74f566ad68 100644 --- a/drivers/soundwire/cadence_master.h +++ b/drivers/soundwire/cadence_master.h @@ -70,7 +70,7 @@ struct sdw_cdns_stream_config { }; /** - * struct sdw_cdns_dma_data: Cadence DMA data + * struct sdw_cdns_dai_runtime: Cadence DAI runtime data * * @name: SoundWire stream name * @stream: stream runtime @@ -81,8 +81,9 @@ struct sdw_cdns_stream_config { * @hw_params: hw_params to be applied in .prepare step * @suspended: status set when suspended, to be used in .prepare * @paused: status set in .trigger, to be used in suspend + * @direction: stream direction */ -struct sdw_cdns_dma_data { +struct sdw_cdns_dai_runtime { char *name; struct sdw_stream_runtime *stream; struct sdw_cdns_pdi *pdi; @@ -92,6 +93,7 @@ struct sdw_cdns_dma_data { struct snd_pcm_hw_params *hw_params; bool suspended; bool paused; + int direction; }; /** @@ -108,6 +110,7 @@ struct sdw_cdns_dma_data { * @registers: Cadence registers * @link_up: Link status * @msg_count: Messages sent on bus + * @dai_runtime_array: runtime context for each allocated DAI. */ struct sdw_cdns { struct device *dev; @@ -135,6 +138,8 @@ struct sdw_cdns { struct work_struct work; struct list_head list; + + struct sdw_cdns_dai_runtime **dai_runtime_array; }; #define bus_to_cdns(_bus) container_of(_bus, struct sdw_cdns, bus) diff --git a/drivers/soundwire/dmi-quirks.c b/drivers/soundwire/dmi-quirks.c index f81cdd83ec26e6b1f59cffee8f48fbdb66c220b8..7969881f126dc1d03fe428dae9c98aa8ce74920d 100644 --- a/drivers/soundwire/dmi-quirks.c +++ b/drivers/soundwire/dmi-quirks.c @@ -90,6 +90,14 @@ static const struct dmi_system_id adr_remap_quirk_table[] = { }, .driver_data = (void *)intel_tgl_bios, }, + { + /* quirk used for NUC15 LAPBC710 skew */ + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "Intel Corporation"), + DMI_MATCH(DMI_BOARD_NAME, "LAPBC710"), + }, + .driver_data = (void *)intel_tgl_bios, + }, { .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc"), diff --git a/drivers/soundwire/intel.c b/drivers/soundwire/intel.c index 8c76541d553f116d951f27cf4f8adbf39fcbe9cd..bc9c50bacc494824d7fbbaea01c818219d4764ae 100644 --- a/drivers/soundwire/intel.c +++ b/drivers/soundwire/intel.c @@ -8,10 +8,7 @@ #include #include #include -#include -#include #include -#include #include #include #include @@ -22,27 +19,6 @@ #include "bus.h" #include "intel.h" -/* IDA min selected to avoid conflicts with HDaudio/iDISP SDI values */ -#define INTEL_DEV_NUM_IDA_MIN 4 - -#define INTEL_MASTER_SUSPEND_DELAY_MS 3000 -#define INTEL_MASTER_RESET_ITERATIONS 10 - -/* - * debug/config flags for the Intel SoundWire Master. - * - * Since we may have multiple masters active, we can have up to 8 - * flags reused in each byte, with master0 using the ls-byte, etc. - */ - -#define SDW_INTEL_MASTER_DISABLE_PM_RUNTIME BIT(0) -#define SDW_INTEL_MASTER_DISABLE_CLOCK_STOP BIT(1) -#define SDW_INTEL_MASTER_DISABLE_PM_RUNTIME_IDLE BIT(2) -#define SDW_INTEL_MASTER_DISABLE_MULTI_LINK BIT(3) - -static int md_flags; -module_param_named(sdw_md_flags, md_flags, int, 0444); -MODULE_PARM_DESC(sdw_md_flags, "SoundWire Intel Master device flags (0x0 all off)"); enum intel_pdi_type { INTEL_PDI_IN = 0, @@ -745,10 +721,10 @@ static int intel_free_stream(struct sdw_intel *sdw, * bank switch routines */ -static int intel_pre_bank_switch(struct sdw_bus *bus) +static int intel_pre_bank_switch(struct sdw_intel *sdw) { - struct sdw_cdns *cdns = bus_to_cdns(bus); - struct sdw_intel *sdw = cdns_to_intel(cdns); + struct sdw_cdns *cdns = &sdw->cdns; + struct sdw_bus *bus = &cdns->bus; /* Write to register only for multi-link */ if (!bus->multi_link) @@ -759,10 +735,10 @@ static int intel_pre_bank_switch(struct sdw_bus *bus) return 0; } -static int intel_post_bank_switch(struct sdw_bus *bus) +static int intel_post_bank_switch(struct sdw_intel *sdw) { - struct sdw_cdns *cdns = bus_to_cdns(bus); - struct sdw_intel *sdw = cdns_to_intel(cdns); + struct sdw_cdns *cdns = &sdw->cdns; + struct sdw_bus *bus = &cdns->bus; void __iomem *shim = sdw->link_res->shim; int sync_reg, ret; @@ -824,15 +800,15 @@ static int intel_hw_params(struct snd_pcm_substream *substream, { struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai); struct sdw_intel *sdw = cdns_to_intel(cdns); - struct sdw_cdns_dma_data *dma; + struct sdw_cdns_dai_runtime *dai_runtime; struct sdw_cdns_pdi *pdi; struct sdw_stream_config sconfig; struct sdw_port_config *pconfig; int ch, dir; int ret; - dma = snd_soc_dai_get_dma_data(dai, substream); - if (!dma) + dai_runtime = cdns->dai_runtime_array[dai->id]; + if (!dai_runtime) return -EIO; ch = params_channels(params); @@ -854,10 +830,10 @@ static int intel_hw_params(struct snd_pcm_substream *substream, sdw_cdns_config_stream(cdns, ch, dir, pdi); /* store pdi and hw_params, may be needed in prepare step */ - dma->paused = false; - dma->suspended = false; - dma->pdi = pdi; - dma->hw_params = params; + dai_runtime->paused = false; + dai_runtime->suspended = false; + dai_runtime->pdi = pdi; + dai_runtime->hw_params = params; /* Inform DSP about PDI stream number */ ret = intel_params_stream(sdw, substream->stream, dai, params, @@ -869,7 +845,7 @@ static int intel_hw_params(struct snd_pcm_substream *substream, sconfig.direction = dir; sconfig.ch_count = ch; sconfig.frame_rate = params_rate(params); - sconfig.type = dma->stream_type; + sconfig.type = dai_runtime->stream_type; sconfig.bps = snd_pcm_format_width(params_format(params)); @@ -884,7 +860,7 @@ static int intel_hw_params(struct snd_pcm_substream *substream, pconfig->ch_mask = (1 << ch) - 1; ret = sdw_stream_add_master(&cdns->bus, &sconfig, - pconfig, 1, dma->stream); + pconfig, 1, dai_runtime->stream); if (ret) dev_err(cdns->dev, "add master to stream failed:%d\n", ret); @@ -898,19 +874,19 @@ static int intel_prepare(struct snd_pcm_substream *substream, { struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai); struct sdw_intel *sdw = cdns_to_intel(cdns); - struct sdw_cdns_dma_data *dma; + struct sdw_cdns_dai_runtime *dai_runtime; int ch, dir; int ret = 0; - dma = snd_soc_dai_get_dma_data(dai, substream); - if (!dma) { - dev_err(dai->dev, "failed to get dma data in %s\n", + dai_runtime = cdns->dai_runtime_array[dai->id]; + if (!dai_runtime) { + dev_err(dai->dev, "failed to get dai runtime in %s\n", __func__); return -EIO; } - if (dma->suspended) { - dma->suspended = false; + if (dai_runtime->suspended) { + dai_runtime->suspended = false; /* * .prepare() is called after system resume, where we @@ -921,21 +897,21 @@ static int intel_prepare(struct snd_pcm_substream *substream, */ /* configure stream */ - ch = params_channels(dma->hw_params); + ch = params_channels(dai_runtime->hw_params); if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) dir = SDW_DATA_DIR_RX; else dir = SDW_DATA_DIR_TX; - intel_pdi_shim_configure(sdw, dma->pdi); - intel_pdi_alh_configure(sdw, dma->pdi); - sdw_cdns_config_stream(cdns, ch, dir, dma->pdi); + intel_pdi_shim_configure(sdw, dai_runtime->pdi); + intel_pdi_alh_configure(sdw, dai_runtime->pdi); + sdw_cdns_config_stream(cdns, ch, dir, dai_runtime->pdi); /* Inform DSP about PDI stream number */ ret = intel_params_stream(sdw, substream->stream, dai, - dma->hw_params, + dai_runtime->hw_params, sdw->instance, - dma->pdi->intel_alh_id); + dai_runtime->pdi->intel_alh_id); } return ret; @@ -946,11 +922,11 @@ intel_hw_free(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai); struct sdw_intel *sdw = cdns_to_intel(cdns); - struct sdw_cdns_dma_data *dma; + struct sdw_cdns_dai_runtime *dai_runtime; int ret; - dma = snd_soc_dai_get_dma_data(dai, substream); - if (!dma) + dai_runtime = cdns->dai_runtime_array[dai->id]; + if (!dai_runtime) return -EIO; /* @@ -959,10 +935,10 @@ intel_hw_free(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) * DEPREPARED for the first cpu-dai and to RELEASED for the last * cpu-dai. */ - ret = sdw_stream_remove_master(&cdns->bus, dma->stream); + ret = sdw_stream_remove_master(&cdns->bus, dai_runtime->stream); if (ret < 0) { dev_err(dai->dev, "remove master from stream %s failed: %d\n", - dma->stream->name, ret); + dai_runtime->stream->name, ret); return ret; } @@ -972,8 +948,8 @@ intel_hw_free(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) return ret; } - dma->hw_params = NULL; - dma->pdi = NULL; + dai_runtime->hw_params = NULL; + dai_runtime->pdi = NULL; return 0; } @@ -996,17 +972,14 @@ static int intel_pcm_set_sdw_stream(struct snd_soc_dai *dai, static void *intel_get_sdw_stream(struct snd_soc_dai *dai, int direction) { - struct sdw_cdns_dma_data *dma; - - if (direction == SNDRV_PCM_STREAM_PLAYBACK) - dma = dai->playback_dma_data; - else - dma = dai->capture_dma_data; + struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai); + struct sdw_cdns_dai_runtime *dai_runtime; - if (!dma) + dai_runtime = cdns->dai_runtime_array[dai->id]; + if (!dai_runtime) return ERR_PTR(-EINVAL); - return dma->stream; + return dai_runtime->stream; } static int intel_trigger(struct snd_pcm_substream *substream, int cmd, struct snd_soc_dai *dai) @@ -1014,7 +987,7 @@ static int intel_trigger(struct snd_pcm_substream *substream, int cmd, struct sn struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai); struct sdw_intel *sdw = cdns_to_intel(cdns); struct sdw_intel_link_res *res = sdw->link_res; - struct sdw_cdns_dma_data *dma; + struct sdw_cdns_dai_runtime *dai_runtime; int ret = 0; /* @@ -1025,9 +998,9 @@ static int intel_trigger(struct snd_pcm_substream *substream, int cmd, struct sn if (res->ops && res->ops->trigger) res->ops->trigger(dai, cmd, substream->stream); - dma = snd_soc_dai_get_dma_data(dai, substream); - if (!dma) { - dev_err(dai->dev, "failed to get dma data in %s\n", + dai_runtime = cdns->dai_runtime_array[dai->id]; + if (!dai_runtime) { + dev_err(dai->dev, "failed to get dai runtime in %s\n", __func__); return -EIO; } @@ -1042,17 +1015,17 @@ static int intel_trigger(struct snd_pcm_substream *substream, int cmd, struct sn * the .trigger callback is used to track the suspend case only. */ - dma->suspended = true; + dai_runtime->suspended = true; ret = intel_free_stream(sdw, substream->stream, dai, sdw->instance); break; case SNDRV_PCM_TRIGGER_PAUSE_PUSH: - dma->paused = true; + dai_runtime->paused = true; break; case SNDRV_PCM_TRIGGER_STOP: case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: - dma->paused = false; + dai_runtime->paused = false; break; default: break; @@ -1091,27 +1064,21 @@ static int intel_component_dais_suspend(struct snd_soc_component *component) for_each_component_dais(component, dai) { struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai); struct sdw_intel *sdw = cdns_to_intel(cdns); - struct sdw_cdns_dma_data *dma; - int stream; + struct sdw_cdns_dai_runtime *dai_runtime; int ret; - dma = dai->playback_dma_data; - stream = SNDRV_PCM_STREAM_PLAYBACK; - if (!dma) { - dma = dai->capture_dma_data; - stream = SNDRV_PCM_STREAM_CAPTURE; - } + dai_runtime = cdns->dai_runtime_array[dai->id]; - if (!dma) + if (!dai_runtime) continue; - if (dma->suspended) + if (dai_runtime->suspended) continue; - if (dma->paused) { - dma->suspended = true; + if (dai_runtime->paused) { + dai_runtime->suspended = true; - ret = intel_free_stream(sdw, stream, dai, sdw->instance); + ret = intel_free_stream(sdw, dai_runtime->direction, dai, sdw->instance); if (ret < 0) return ret; } @@ -1178,6 +1145,7 @@ static int intel_create_dai(struct sdw_cdns *cdns, static int intel_register_dai(struct sdw_intel *sdw) { + struct sdw_cdns_dai_runtime **dai_runtime_array; struct sdw_cdns_stream_config config; struct sdw_cdns *cdns = &sdw->cdns; struct sdw_cdns_streams *stream; @@ -1195,6 +1163,13 @@ static int intel_register_dai(struct sdw_intel *sdw) /* DAIs are created based on total number of PDIs supported */ num_dai = cdns->pcm.num_pdi; + dai_runtime_array = devm_kcalloc(cdns->dev, num_dai, + sizeof(struct sdw_cdns_dai_runtime *), + GFP_KERNEL); + if (!dai_runtime_array) + return -ENOMEM; + cdns->dai_runtime_array = dai_runtime_array; + dais = devm_kcalloc(cdns->dev, num_dai, sizeof(*dais), GFP_KERNEL); if (!dais) return -ENOMEM; @@ -1423,620 +1398,26 @@ static int intel_stop_bus(struct sdw_intel *sdw, bool clock_stop) return 0; } -static int sdw_master_read_intel_prop(struct sdw_bus *bus) -{ - struct sdw_master_prop *prop = &bus->prop; - struct fwnode_handle *link; - char name[32]; - u32 quirk_mask; - - /* Find master handle */ - snprintf(name, sizeof(name), - "mipi-sdw-link-%d-subproperties", bus->link_id); - - link = device_get_named_child_node(bus->dev, name); - if (!link) { - dev_err(bus->dev, "Master node %s not found\n", name); - return -EIO; - } - - fwnode_property_read_u32(link, - "intel-sdw-ip-clock", - &prop->mclk_freq); - - /* the values reported by BIOS are the 2x clock, not the bus clock */ - prop->mclk_freq /= 2; - - fwnode_property_read_u32(link, - "intel-quirk-mask", - &quirk_mask); - - if (quirk_mask & SDW_INTEL_QUIRK_MASK_BUS_DISABLE) - prop->hw_disabled = true; +const struct sdw_intel_hw_ops sdw_intel_cnl_hw_ops = { + .debugfs_init = intel_debugfs_init, + .debugfs_exit = intel_debugfs_exit, - prop->quirks = SDW_MASTER_QUIRKS_CLEAR_INITIAL_CLASH | - SDW_MASTER_QUIRKS_CLEAR_INITIAL_PARITY; + .register_dai = intel_register_dai, - return 0; -} + .check_clock_stop = intel_check_clock_stop, + .start_bus = intel_start_bus, + .start_bus_after_reset = intel_start_bus_after_reset, + .start_bus_after_clock_stop = intel_start_bus_after_clock_stop, + .stop_bus = intel_stop_bus, -static int intel_prop_read(struct sdw_bus *bus) -{ - /* Initialize with default handler to read all DisCo properties */ - sdw_master_read_prop(bus); + .link_power_up = intel_link_power_up, + .link_power_down = intel_link_power_down, - /* read Intel-specific properties */ - sdw_master_read_intel_prop(bus); + .shim_check_wake = intel_shim_check_wake, + .shim_wake = intel_shim_wake, - return 0; -} - -static struct sdw_master_ops sdw_intel_ops = { - .read_prop = intel_prop_read, - .override_adr = sdw_dmi_override_adr, - .xfer_msg = cdns_xfer_msg, - .xfer_msg_defer = cdns_xfer_msg_defer, - .reset_page_addr = cdns_reset_page_addr, - .set_bus_conf = cdns_bus_conf, .pre_bank_switch = intel_pre_bank_switch, .post_bank_switch = intel_post_bank_switch, - .read_ping_status = cdns_read_ping_status, -}; - -/* - * probe and init (aux_dev_id argument is required by function prototype but not used) - */ -static int intel_link_probe(struct auxiliary_device *auxdev, - const struct auxiliary_device_id *aux_dev_id) - -{ - struct device *dev = &auxdev->dev; - struct sdw_intel_link_dev *ldev = auxiliary_dev_to_sdw_intel_link_dev(auxdev); - struct sdw_intel *sdw; - struct sdw_cdns *cdns; - struct sdw_bus *bus; - int ret; - - sdw = devm_kzalloc(dev, sizeof(*sdw), GFP_KERNEL); - if (!sdw) - return -ENOMEM; - - cdns = &sdw->cdns; - bus = &cdns->bus; - - sdw->instance = auxdev->id; - sdw->link_res = &ldev->link_res; - cdns->dev = dev; - cdns->registers = sdw->link_res->registers; - cdns->instance = sdw->instance; - cdns->msg_count = 0; - - bus->link_id = auxdev->id; - bus->dev_num_ida_min = INTEL_DEV_NUM_IDA_MIN; - bus->clk_stop_timeout = 1; - - sdw_cdns_probe(cdns); - - /* Set ops */ - bus->ops = &sdw_intel_ops; - - /* set driver data, accessed by snd_soc_dai_get_drvdata() */ - auxiliary_set_drvdata(auxdev, cdns); - - /* use generic bandwidth allocation algorithm */ - sdw->cdns.bus.compute_params = sdw_compute_params; - - /* avoid resuming from pm_runtime suspend if it's not required */ - dev_pm_set_driver_flags(dev, DPM_FLAG_SMART_SUSPEND); - - ret = sdw_bus_master_add(bus, dev, dev->fwnode); - if (ret) { - dev_err(dev, "sdw_bus_master_add fail: %d\n", ret); - return ret; - } - - if (bus->prop.hw_disabled) - dev_info(dev, - "SoundWire master %d is disabled, will be ignored\n", - bus->link_id); - /* - * Ignore BIOS err_threshold, it's a really bad idea when dealing - * with multiple hardware synchronized links - */ - bus->prop.err_threshold = 0; - - return 0; -} - -int intel_link_startup(struct auxiliary_device *auxdev) -{ - struct device *dev = &auxdev->dev; - struct sdw_cdns *cdns = auxiliary_get_drvdata(auxdev); - struct sdw_intel *sdw = cdns_to_intel(cdns); - struct sdw_bus *bus = &cdns->bus; - int link_flags; - bool multi_link; - u32 clock_stop_quirks; - int ret; - - if (bus->prop.hw_disabled) { - dev_info(dev, - "SoundWire master %d is disabled, ignoring\n", - sdw->instance); - return 0; - } - - link_flags = md_flags >> (bus->link_id * 8); - multi_link = !(link_flags & SDW_INTEL_MASTER_DISABLE_MULTI_LINK); - if (!multi_link) { - dev_dbg(dev, "Multi-link is disabled\n"); - } else { - /* - * hardware-based synchronization is required regardless - * of the number of segments used by a stream: SSP-based - * synchronization is gated by gsync when the multi-master - * mode is set. - */ - bus->hw_sync_min_links = 1; - } - bus->multi_link = multi_link; - - /* Initialize shim, controller */ - ret = intel_link_power_up(sdw); - if (ret) - goto err_init; - - /* Register DAIs */ - ret = intel_register_dai(sdw); - if (ret) { - dev_err(dev, "DAI registration failed: %d\n", ret); - goto err_power_up; - } - - intel_debugfs_init(sdw); - - /* start bus */ - ret = intel_start_bus(sdw); - if (ret) { - dev_err(dev, "bus start failed: %d\n", ret); - goto err_power_up; - } - - /* Enable runtime PM */ - if (!(link_flags & SDW_INTEL_MASTER_DISABLE_PM_RUNTIME)) { - pm_runtime_set_autosuspend_delay(dev, - INTEL_MASTER_SUSPEND_DELAY_MS); - pm_runtime_use_autosuspend(dev); - pm_runtime_mark_last_busy(dev); - - pm_runtime_set_active(dev); - pm_runtime_enable(dev); - } - - clock_stop_quirks = sdw->link_res->clock_stop_quirks; - if (clock_stop_quirks & SDW_INTEL_CLK_STOP_NOT_ALLOWED) { - /* - * To keep the clock running we need to prevent - * pm_runtime suspend from happening by increasing the - * reference count. - * This quirk is specified by the parent PCI device in - * case of specific latency requirements. It will have - * no effect if pm_runtime is disabled by the user via - * a module parameter for testing purposes. - */ - pm_runtime_get_noresume(dev); - } - - /* - * The runtime PM status of Slave devices is "Unsupported" - * until they report as ATTACHED. If they don't, e.g. because - * there are no Slave devices populated or if the power-on is - * delayed or dependent on a power switch, the Master will - * remain active and prevent its parent from suspending. - * - * Conditionally force the pm_runtime core to re-evaluate the - * Master status in the absence of any Slave activity. A quirk - * is provided to e.g. deal with Slaves that may be powered on - * with a delay. A more complete solution would require the - * definition of Master properties. - */ - if (!(link_flags & SDW_INTEL_MASTER_DISABLE_PM_RUNTIME_IDLE)) - pm_runtime_idle(dev); - - sdw->startup_done = true; - return 0; - -err_power_up: - intel_link_power_down(sdw); -err_init: - return ret; -} - -static void intel_link_remove(struct auxiliary_device *auxdev) -{ - struct sdw_cdns *cdns = auxiliary_get_drvdata(auxdev); - struct sdw_intel *sdw = cdns_to_intel(cdns); - struct sdw_bus *bus = &cdns->bus; - - /* - * Since pm_runtime is already disabled, we don't decrease - * the refcount when the clock_stop_quirk is - * SDW_INTEL_CLK_STOP_NOT_ALLOWED - */ - if (!bus->prop.hw_disabled) { - intel_debugfs_exit(sdw); - sdw_cdns_enable_interrupt(cdns, false); - } - sdw_bus_master_delete(bus); -} - -int intel_link_process_wakeen_event(struct auxiliary_device *auxdev) -{ - struct device *dev = &auxdev->dev; - struct sdw_intel *sdw; - struct sdw_bus *bus; - - sdw = auxiliary_get_drvdata(auxdev); - bus = &sdw->cdns.bus; - - if (bus->prop.hw_disabled || !sdw->startup_done) { - dev_dbg(dev, "SoundWire master %d is disabled or not-started, ignoring\n", - bus->link_id); - return 0; - } - - if (!intel_shim_check_wake(sdw)) - return 0; - - /* disable WAKEEN interrupt ASAP to prevent interrupt flood */ - intel_shim_wake(sdw, false); - - /* - * resume the Master, which will generate a bus reset and result in - * Slaves re-attaching and be re-enumerated. The SoundWire physical - * device which generated the wake will trigger an interrupt, which - * will in turn cause the corresponding Linux Slave device to be - * resumed and the Slave codec driver to check the status. - */ - pm_request_resume(dev); - - return 0; -} - -/* - * PM calls - */ - -static int intel_resume_child_device(struct device *dev, void *data) -{ - int ret; - struct sdw_slave *slave = dev_to_sdw_dev(dev); - - if (!slave->probed) { - dev_dbg(dev, "skipping device, no probed driver\n"); - return 0; - } - if (!slave->dev_num_sticky) { - dev_dbg(dev, "skipping device, never detected on bus\n"); - return 0; - } - - ret = pm_request_resume(dev); - if (ret < 0) - dev_err(dev, "%s: pm_request_resume failed: %d\n", __func__, ret); - - return ret; -} - -static int __maybe_unused intel_pm_prepare(struct device *dev) -{ - struct sdw_cdns *cdns = dev_get_drvdata(dev); - struct sdw_intel *sdw = cdns_to_intel(cdns); - struct sdw_bus *bus = &cdns->bus; - u32 clock_stop_quirks; - int ret; - - if (bus->prop.hw_disabled || !sdw->startup_done) { - dev_dbg(dev, "SoundWire master %d is disabled or not-started, ignoring\n", - bus->link_id); - return 0; - } - - clock_stop_quirks = sdw->link_res->clock_stop_quirks; - - if (pm_runtime_suspended(dev) && - pm_runtime_suspended(dev->parent) && - ((clock_stop_quirks & SDW_INTEL_CLK_STOP_BUS_RESET) || - !clock_stop_quirks)) { - /* - * if we've enabled clock stop, and the parent is suspended, the SHIM registers - * are not accessible and the shim wake cannot be disabled. - * The only solution is to resume the entire bus to full power - */ - - /* - * If any operation in this block fails, we keep going since we don't want - * to prevent system suspend from happening and errors should be recoverable - * on resume. - */ - - /* - * first resume the device for this link. This will also by construction - * resume the PCI parent device. - */ - ret = pm_request_resume(dev); - if (ret < 0) { - dev_err(dev, "%s: pm_request_resume failed: %d\n", __func__, ret); - return 0; - } - - /* - * Continue resuming the entire bus (parent + child devices) to exit - * the clock stop mode. If there are no devices connected on this link - * this is a no-op. - * The resume to full power could have been implemented with a .prepare - * step in SoundWire codec drivers. This would however require a lot - * of code to handle an Intel-specific corner case. It is simpler in - * practice to add a loop at the link level. - */ - ret = device_for_each_child(bus->dev, NULL, intel_resume_child_device); - - if (ret < 0) - dev_err(dev, "%s: intel_resume_child_device failed: %d\n", __func__, ret); - } - - return 0; -} - -static int __maybe_unused intel_suspend(struct device *dev) -{ - struct sdw_cdns *cdns = dev_get_drvdata(dev); - struct sdw_intel *sdw = cdns_to_intel(cdns); - struct sdw_bus *bus = &cdns->bus; - u32 clock_stop_quirks; - int ret; - - if (bus->prop.hw_disabled || !sdw->startup_done) { - dev_dbg(dev, "SoundWire master %d is disabled or not-started, ignoring\n", - bus->link_id); - return 0; - } - - if (pm_runtime_suspended(dev)) { - dev_dbg(dev, "pm_runtime status: suspended\n"); - - clock_stop_quirks = sdw->link_res->clock_stop_quirks; - - if ((clock_stop_quirks & SDW_INTEL_CLK_STOP_BUS_RESET) || - !clock_stop_quirks) { - - if (pm_runtime_suspended(dev->parent)) { - /* - * paranoia check: this should not happen with the .prepare - * resume to full power - */ - dev_err(dev, "%s: invalid config: parent is suspended\n", __func__); - } else { - intel_shim_wake(sdw, false); - } - } - - return 0; - } - - ret = intel_stop_bus(sdw, false); - if (ret < 0) { - dev_err(dev, "%s: cannot stop bus: %d\n", __func__, ret); - return ret; - } - - return 0; -} - -static int __maybe_unused intel_suspend_runtime(struct device *dev) -{ - struct sdw_cdns *cdns = dev_get_drvdata(dev); - struct sdw_intel *sdw = cdns_to_intel(cdns); - struct sdw_bus *bus = &cdns->bus; - u32 clock_stop_quirks; - int ret; - - if (bus->prop.hw_disabled || !sdw->startup_done) { - dev_dbg(dev, "SoundWire master %d is disabled or not-started, ignoring\n", - bus->link_id); - return 0; - } - - clock_stop_quirks = sdw->link_res->clock_stop_quirks; - - if (clock_stop_quirks & SDW_INTEL_CLK_STOP_TEARDOWN) { - ret = intel_stop_bus(sdw, false); - if (ret < 0) { - dev_err(dev, "%s: cannot stop bus during teardown: %d\n", - __func__, ret); - return ret; - } - } else if (clock_stop_quirks & SDW_INTEL_CLK_STOP_BUS_RESET || !clock_stop_quirks) { - ret = intel_stop_bus(sdw, true); - if (ret < 0) { - dev_err(dev, "%s: cannot stop bus during clock_stop: %d\n", - __func__, ret); - return ret; - } - } else { - dev_err(dev, "%s clock_stop_quirks %x unsupported\n", - __func__, clock_stop_quirks); - ret = -EINVAL; - } - - return ret; -} - -static int __maybe_unused intel_resume(struct device *dev) -{ - struct sdw_cdns *cdns = dev_get_drvdata(dev); - struct sdw_intel *sdw = cdns_to_intel(cdns); - struct sdw_bus *bus = &cdns->bus; - int link_flags; - int ret; - - if (bus->prop.hw_disabled || !sdw->startup_done) { - dev_dbg(dev, "SoundWire master %d is disabled or not-started, ignoring\n", - bus->link_id); - return 0; - } - - link_flags = md_flags >> (bus->link_id * 8); - - if (pm_runtime_suspended(dev)) { - dev_dbg(dev, "pm_runtime status was suspended, forcing active\n"); - - /* follow required sequence from runtime_pm.rst */ - pm_runtime_disable(dev); - pm_runtime_set_active(dev); - pm_runtime_mark_last_busy(dev); - pm_runtime_enable(dev); - - link_flags = md_flags >> (bus->link_id * 8); - - if (!(link_flags & SDW_INTEL_MASTER_DISABLE_PM_RUNTIME_IDLE)) - pm_runtime_idle(dev); - } - - ret = intel_link_power_up(sdw); - if (ret) { - dev_err(dev, "%s failed: %d\n", __func__, ret); - return ret; - } - - /* - * make sure all Slaves are tagged as UNATTACHED and provide - * reason for reinitialization - */ - sdw_clear_slave_status(bus, SDW_UNATTACH_REQUEST_MASTER_RESET); - - ret = intel_start_bus(sdw); - if (ret < 0) { - dev_err(dev, "cannot start bus during resume\n"); - intel_link_power_down(sdw); - return ret; - } - - /* - * after system resume, the pm_runtime suspend() may kick in - * during the enumeration, before any children device force the - * master device to remain active. Using pm_runtime_get() - * routines is not really possible, since it'd prevent the - * master from suspending. - * A reasonable compromise is to update the pm_runtime - * counters and delay the pm_runtime suspend by several - * seconds, by when all enumeration should be complete. - */ - pm_runtime_mark_last_busy(dev); - - return 0; -} - -static int __maybe_unused intel_resume_runtime(struct device *dev) -{ - struct sdw_cdns *cdns = dev_get_drvdata(dev); - struct sdw_intel *sdw = cdns_to_intel(cdns); - struct sdw_bus *bus = &cdns->bus; - u32 clock_stop_quirks; - int ret; - - if (bus->prop.hw_disabled || !sdw->startup_done) { - dev_dbg(dev, "SoundWire master %d is disabled or not-started, ignoring\n", - bus->link_id); - return 0; - } - - /* unconditionally disable WAKEEN interrupt */ - intel_shim_wake(sdw, false); - - clock_stop_quirks = sdw->link_res->clock_stop_quirks; - - if (clock_stop_quirks & SDW_INTEL_CLK_STOP_TEARDOWN) { - ret = intel_link_power_up(sdw); - if (ret) { - dev_err(dev, "%s: power_up failed after teardown: %d\n", __func__, ret); - return ret; - } - - /* - * make sure all Slaves are tagged as UNATTACHED and provide - * reason for reinitialization - */ - sdw_clear_slave_status(bus, SDW_UNATTACH_REQUEST_MASTER_RESET); - - ret = intel_start_bus(sdw); - if (ret < 0) { - dev_err(dev, "%s: cannot start bus after teardown: %d\n", __func__, ret); - intel_link_power_down(sdw); - return ret; - } - - - } else if (clock_stop_quirks & SDW_INTEL_CLK_STOP_BUS_RESET) { - ret = intel_link_power_up(sdw); - if (ret) { - dev_err(dev, "%s: power_up failed after bus reset: %d\n", __func__, ret); - return ret; - } - - ret = intel_start_bus_after_reset(sdw); - if (ret < 0) { - dev_err(dev, "%s: cannot start bus after reset: %d\n", __func__, ret); - intel_link_power_down(sdw); - return ret; - } - } else if (!clock_stop_quirks) { - - intel_check_clock_stop(sdw); - - ret = intel_link_power_up(sdw); - if (ret) { - dev_err(dev, "%s: power_up failed: %d\n", __func__, ret); - return ret; - } - - ret = intel_start_bus_after_clock_stop(sdw); - if (ret < 0) { - dev_err(dev, "%s: cannot start bus after clock stop: %d\n", __func__, ret); - intel_link_power_down(sdw); - return ret; - } - } else { - dev_err(dev, "%s: clock_stop_quirks %x unsupported\n", - __func__, clock_stop_quirks); - ret = -EINVAL; - } - - return ret; -} - -static const struct dev_pm_ops intel_pm = { - .prepare = intel_pm_prepare, - SET_SYSTEM_SLEEP_PM_OPS(intel_suspend, intel_resume) - SET_RUNTIME_PM_OPS(intel_suspend_runtime, intel_resume_runtime, NULL) -}; - -static const struct auxiliary_device_id intel_link_id_table[] = { - { .name = "soundwire_intel.link" }, - {}, -}; -MODULE_DEVICE_TABLE(auxiliary, intel_link_id_table); - -static struct auxiliary_driver sdw_intel_drv = { - .probe = intel_link_probe, - .remove = intel_link_remove, - .driver = { - /* auxiliary_driver_register() sets .name to be the modname */ - .pm = &intel_pm, - }, - .id_table = intel_link_id_table }; -module_auxiliary_driver(sdw_intel_drv); +EXPORT_SYMBOL_NS(sdw_intel_cnl_hw_ops, SOUNDWIRE_INTEL); -MODULE_LICENSE("Dual BSD/GPL"); -MODULE_DESCRIPTION("Intel Soundwire Link Driver"); diff --git a/drivers/soundwire/intel.h b/drivers/soundwire/intel.h index cd93a44dba9a132923adb9cea82e774ad9fdb638..de9883313c8f28a26533dcbc9a1ccd3b17aec88e 100644 --- a/drivers/soundwire/intel.h +++ b/drivers/soundwire/intel.h @@ -7,6 +7,7 @@ /** * struct sdw_intel_link_res - Soundwire Intel link resource structure, * typically populated by the controller driver. + * @hw_ops: platform-specific ops * @mmio_base: mmio base of SoundWire registers * @registers: Link IO registers base * @shim: Audio shim pointer @@ -22,6 +23,8 @@ * @list: used to walk-through all masters exposed by the same controller */ struct sdw_intel_link_res { + const struct sdw_intel_hw_ops *hw_ops; + void __iomem *mmio_base; /* not strictly needed, useful for debug */ void __iomem *registers; void __iomem *shim; @@ -47,15 +50,92 @@ struct sdw_intel { #endif }; -int intel_link_startup(struct auxiliary_device *auxdev); -int intel_link_process_wakeen_event(struct auxiliary_device *auxdev); +#define cdns_to_intel(_cdns) container_of(_cdns, struct sdw_intel, cdns) -struct sdw_intel_link_dev { - struct auxiliary_device auxdev; - struct sdw_intel_link_res link_res; -}; +#define INTEL_MASTER_RESET_ITERATIONS 10 + +#define SDW_INTEL_CHECK_OPS(sdw, cb) ((sdw) && (sdw)->link_res && (sdw)->link_res->hw_ops && \ + (sdw)->link_res->hw_ops->cb) +#define SDW_INTEL_OPS(sdw, cb) ((sdw)->link_res->hw_ops->cb) + +static inline void sdw_intel_debugfs_init(struct sdw_intel *sdw) +{ + if (SDW_INTEL_CHECK_OPS(sdw, debugfs_init)) + SDW_INTEL_OPS(sdw, debugfs_init)(sdw); +} + +static inline void sdw_intel_debugfs_exit(struct sdw_intel *sdw) +{ + if (SDW_INTEL_CHECK_OPS(sdw, debugfs_exit)) + SDW_INTEL_OPS(sdw, debugfs_exit)(sdw); +} + +static inline int sdw_intel_register_dai(struct sdw_intel *sdw) +{ + if (SDW_INTEL_CHECK_OPS(sdw, register_dai)) + return SDW_INTEL_OPS(sdw, register_dai)(sdw); + return -ENOTSUPP; +} + +static inline void sdw_intel_check_clock_stop(struct sdw_intel *sdw) +{ + if (SDW_INTEL_CHECK_OPS(sdw, check_clock_stop)) + SDW_INTEL_OPS(sdw, check_clock_stop)(sdw); +} + +static inline int sdw_intel_start_bus(struct sdw_intel *sdw) +{ + if (SDW_INTEL_CHECK_OPS(sdw, start_bus)) + return SDW_INTEL_OPS(sdw, start_bus)(sdw); + return -ENOTSUPP; +} + +static inline int sdw_intel_start_bus_after_reset(struct sdw_intel *sdw) +{ + if (SDW_INTEL_CHECK_OPS(sdw, start_bus_after_reset)) + return SDW_INTEL_OPS(sdw, start_bus_after_reset)(sdw); + return -ENOTSUPP; +} + +static inline int sdw_intel_start_bus_after_clock_stop(struct sdw_intel *sdw) +{ + if (SDW_INTEL_CHECK_OPS(sdw, start_bus_after_clock_stop)) + return SDW_INTEL_OPS(sdw, start_bus_after_clock_stop)(sdw); + return -ENOTSUPP; +} + +static inline int sdw_intel_stop_bus(struct sdw_intel *sdw, bool clock_stop) +{ + if (SDW_INTEL_CHECK_OPS(sdw, stop_bus)) + return SDW_INTEL_OPS(sdw, stop_bus)(sdw, clock_stop); + return -ENOTSUPP; +} + +static inline int sdw_intel_link_power_up(struct sdw_intel *sdw) +{ + if (SDW_INTEL_CHECK_OPS(sdw, link_power_up)) + return SDW_INTEL_OPS(sdw, link_power_up)(sdw); + return -ENOTSUPP; +} + +static inline int sdw_intel_link_power_down(struct sdw_intel *sdw) +{ + if (SDW_INTEL_CHECK_OPS(sdw, link_power_down)) + return SDW_INTEL_OPS(sdw, link_power_down)(sdw); + return -ENOTSUPP; +} + +static inline int sdw_intel_shim_check_wake(struct sdw_intel *sdw) +{ + if (SDW_INTEL_CHECK_OPS(sdw, shim_check_wake)) + return SDW_INTEL_OPS(sdw, shim_check_wake)(sdw); + return -ENOTSUPP; +} -#define auxiliary_dev_to_sdw_intel_link_dev(auxiliary_dev) \ - container_of(auxiliary_dev, struct sdw_intel_link_dev, auxdev) +static inline void sdw_intel_shim_wake(struct sdw_intel *sdw, bool wake_enable) +{ + if (SDW_INTEL_CHECK_OPS(sdw, shim_wake)) + SDW_INTEL_OPS(sdw, shim_wake)(sdw, wake_enable); +} #endif /* __SDW_INTEL_LOCAL_H */ diff --git a/drivers/soundwire/intel_auxdevice.c b/drivers/soundwire/intel_auxdevice.c new file mode 100644 index 0000000000000000000000000000000000000000..96c6b2112feb6f37042dc8990ccae9d20207fa57 --- /dev/null +++ b/drivers/soundwire/intel_auxdevice.c @@ -0,0 +1,678 @@ +// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) +// Copyright(c) 2015-22 Intel Corporation. + +/* + * Soundwire Intel Manager Driver + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "cadence_master.h" +#include "bus.h" +#include "intel.h" +#include "intel_auxdevice.h" + +/* IDA min selected to avoid conflicts with HDaudio/iDISP SDI values */ +#define INTEL_DEV_NUM_IDA_MIN 4 + +#define INTEL_MASTER_SUSPEND_DELAY_MS 3000 + +/* + * debug/config flags for the Intel SoundWire Master. + * + * Since we may have multiple masters active, we can have up to 8 + * flags reused in each byte, with master0 using the ls-byte, etc. + */ + +#define SDW_INTEL_MASTER_DISABLE_PM_RUNTIME BIT(0) +#define SDW_INTEL_MASTER_DISABLE_CLOCK_STOP BIT(1) +#define SDW_INTEL_MASTER_DISABLE_PM_RUNTIME_IDLE BIT(2) +#define SDW_INTEL_MASTER_DISABLE_MULTI_LINK BIT(3) + +static int md_flags; +module_param_named(sdw_md_flags, md_flags, int, 0444); +MODULE_PARM_DESC(sdw_md_flags, "SoundWire Intel Master device flags (0x0 all off)"); + +static int generic_pre_bank_switch(struct sdw_bus *bus) +{ + struct sdw_cdns *cdns = bus_to_cdns(bus); + struct sdw_intel *sdw = cdns_to_intel(cdns); + + return sdw->link_res->hw_ops->pre_bank_switch(sdw); +} + +static int generic_post_bank_switch(struct sdw_bus *bus) +{ + struct sdw_cdns *cdns = bus_to_cdns(bus); + struct sdw_intel *sdw = cdns_to_intel(cdns); + + return sdw->link_res->hw_ops->post_bank_switch(sdw); +} + +static int sdw_master_read_intel_prop(struct sdw_bus *bus) +{ + struct sdw_master_prop *prop = &bus->prop; + struct fwnode_handle *link; + char name[32]; + u32 quirk_mask; + + /* Find master handle */ + snprintf(name, sizeof(name), + "mipi-sdw-link-%d-subproperties", bus->link_id); + + link = device_get_named_child_node(bus->dev, name); + if (!link) { + dev_err(bus->dev, "Master node %s not found\n", name); + return -EIO; + } + + fwnode_property_read_u32(link, + "intel-sdw-ip-clock", + &prop->mclk_freq); + + /* the values reported by BIOS are the 2x clock, not the bus clock */ + prop->mclk_freq /= 2; + + fwnode_property_read_u32(link, + "intel-quirk-mask", + &quirk_mask); + + if (quirk_mask & SDW_INTEL_QUIRK_MASK_BUS_DISABLE) + prop->hw_disabled = true; + + prop->quirks = SDW_MASTER_QUIRKS_CLEAR_INITIAL_CLASH | + SDW_MASTER_QUIRKS_CLEAR_INITIAL_PARITY; + + return 0; +} + +static int intel_prop_read(struct sdw_bus *bus) +{ + /* Initialize with default handler to read all DisCo properties */ + sdw_master_read_prop(bus); + + /* read Intel-specific properties */ + sdw_master_read_intel_prop(bus); + + return 0; +} + +static struct sdw_master_ops sdw_intel_ops = { + .read_prop = intel_prop_read, + .override_adr = sdw_dmi_override_adr, + .xfer_msg = cdns_xfer_msg, + .xfer_msg_defer = cdns_xfer_msg_defer, + .reset_page_addr = cdns_reset_page_addr, + .set_bus_conf = cdns_bus_conf, + .pre_bank_switch = generic_pre_bank_switch, + .post_bank_switch = generic_post_bank_switch, + .read_ping_status = cdns_read_ping_status, +}; + +/* + * probe and init (aux_dev_id argument is required by function prototype but not used) + */ +static int intel_link_probe(struct auxiliary_device *auxdev, + const struct auxiliary_device_id *aux_dev_id) + +{ + struct device *dev = &auxdev->dev; + struct sdw_intel_link_dev *ldev = auxiliary_dev_to_sdw_intel_link_dev(auxdev); + struct sdw_intel *sdw; + struct sdw_cdns *cdns; + struct sdw_bus *bus; + int ret; + + sdw = devm_kzalloc(dev, sizeof(*sdw), GFP_KERNEL); + if (!sdw) + return -ENOMEM; + + cdns = &sdw->cdns; + bus = &cdns->bus; + + sdw->instance = auxdev->id; + sdw->link_res = &ldev->link_res; + cdns->dev = dev; + cdns->registers = sdw->link_res->registers; + cdns->instance = sdw->instance; + cdns->msg_count = 0; + + bus->link_id = auxdev->id; + bus->dev_num_ida_min = INTEL_DEV_NUM_IDA_MIN; + bus->clk_stop_timeout = 1; + + sdw_cdns_probe(cdns); + + /* Set ops */ + bus->ops = &sdw_intel_ops; + + /* set driver data, accessed by snd_soc_dai_get_drvdata() */ + auxiliary_set_drvdata(auxdev, cdns); + + /* use generic bandwidth allocation algorithm */ + sdw->cdns.bus.compute_params = sdw_compute_params; + + /* avoid resuming from pm_runtime suspend if it's not required */ + dev_pm_set_driver_flags(dev, DPM_FLAG_SMART_SUSPEND); + + ret = sdw_bus_master_add(bus, dev, dev->fwnode); + if (ret) { + dev_err(dev, "sdw_bus_master_add fail: %d\n", ret); + return ret; + } + + if (bus->prop.hw_disabled) + dev_info(dev, + "SoundWire master %d is disabled, will be ignored\n", + bus->link_id); + /* + * Ignore BIOS err_threshold, it's a really bad idea when dealing + * with multiple hardware synchronized links + */ + bus->prop.err_threshold = 0; + + return 0; +} + +int intel_link_startup(struct auxiliary_device *auxdev) +{ + struct device *dev = &auxdev->dev; + struct sdw_cdns *cdns = auxiliary_get_drvdata(auxdev); + struct sdw_intel *sdw = cdns_to_intel(cdns); + struct sdw_bus *bus = &cdns->bus; + int link_flags; + bool multi_link; + u32 clock_stop_quirks; + int ret; + + if (bus->prop.hw_disabled) { + dev_info(dev, + "SoundWire master %d is disabled, ignoring\n", + sdw->instance); + return 0; + } + + link_flags = md_flags >> (bus->link_id * 8); + multi_link = !(link_flags & SDW_INTEL_MASTER_DISABLE_MULTI_LINK); + if (!multi_link) { + dev_dbg(dev, "Multi-link is disabled\n"); + } else { + /* + * hardware-based synchronization is required regardless + * of the number of segments used by a stream: SSP-based + * synchronization is gated by gsync when the multi-master + * mode is set. + */ + bus->hw_sync_min_links = 1; + } + bus->multi_link = multi_link; + + /* Initialize shim, controller */ + ret = sdw_intel_link_power_up(sdw); + if (ret) + goto err_init; + + /* Register DAIs */ + ret = sdw_intel_register_dai(sdw); + if (ret) { + dev_err(dev, "DAI registration failed: %d\n", ret); + goto err_power_up; + } + + sdw_intel_debugfs_init(sdw); + + /* start bus */ + ret = sdw_intel_start_bus(sdw); + if (ret) { + dev_err(dev, "bus start failed: %d\n", ret); + goto err_power_up; + } + + /* Enable runtime PM */ + if (!(link_flags & SDW_INTEL_MASTER_DISABLE_PM_RUNTIME)) { + pm_runtime_set_autosuspend_delay(dev, + INTEL_MASTER_SUSPEND_DELAY_MS); + pm_runtime_use_autosuspend(dev); + pm_runtime_mark_last_busy(dev); + + pm_runtime_set_active(dev); + pm_runtime_enable(dev); + } + + clock_stop_quirks = sdw->link_res->clock_stop_quirks; + if (clock_stop_quirks & SDW_INTEL_CLK_STOP_NOT_ALLOWED) { + /* + * To keep the clock running we need to prevent + * pm_runtime suspend from happening by increasing the + * reference count. + * This quirk is specified by the parent PCI device in + * case of specific latency requirements. It will have + * no effect if pm_runtime is disabled by the user via + * a module parameter for testing purposes. + */ + pm_runtime_get_noresume(dev); + } + + /* + * The runtime PM status of Slave devices is "Unsupported" + * until they report as ATTACHED. If they don't, e.g. because + * there are no Slave devices populated or if the power-on is + * delayed or dependent on a power switch, the Master will + * remain active and prevent its parent from suspending. + * + * Conditionally force the pm_runtime core to re-evaluate the + * Master status in the absence of any Slave activity. A quirk + * is provided to e.g. deal with Slaves that may be powered on + * with a delay. A more complete solution would require the + * definition of Master properties. + */ + if (!(link_flags & SDW_INTEL_MASTER_DISABLE_PM_RUNTIME_IDLE)) + pm_runtime_idle(dev); + + sdw->startup_done = true; + return 0; + +err_power_up: + sdw_intel_link_power_down(sdw); +err_init: + return ret; +} + +static void intel_link_remove(struct auxiliary_device *auxdev) +{ + struct sdw_cdns *cdns = auxiliary_get_drvdata(auxdev); + struct sdw_intel *sdw = cdns_to_intel(cdns); + struct sdw_bus *bus = &cdns->bus; + + /* + * Since pm_runtime is already disabled, we don't decrease + * the refcount when the clock_stop_quirk is + * SDW_INTEL_CLK_STOP_NOT_ALLOWED + */ + if (!bus->prop.hw_disabled) { + sdw_intel_debugfs_exit(sdw); + sdw_cdns_enable_interrupt(cdns, false); + } + sdw_bus_master_delete(bus); +} + +int intel_link_process_wakeen_event(struct auxiliary_device *auxdev) +{ + struct device *dev = &auxdev->dev; + struct sdw_intel *sdw; + struct sdw_bus *bus; + + sdw = auxiliary_get_drvdata(auxdev); + bus = &sdw->cdns.bus; + + if (bus->prop.hw_disabled || !sdw->startup_done) { + dev_dbg(dev, "SoundWire master %d is disabled or not-started, ignoring\n", + bus->link_id); + return 0; + } + + if (!sdw_intel_shim_check_wake(sdw)) + return 0; + + /* disable WAKEEN interrupt ASAP to prevent interrupt flood */ + sdw_intel_shim_wake(sdw, false); + + /* + * resume the Master, which will generate a bus reset and result in + * Slaves re-attaching and be re-enumerated. The SoundWire physical + * device which generated the wake will trigger an interrupt, which + * will in turn cause the corresponding Linux Slave device to be + * resumed and the Slave codec driver to check the status. + */ + pm_request_resume(dev); + + return 0; +} + +/* + * PM calls + */ + +static int intel_resume_child_device(struct device *dev, void *data) +{ + int ret; + struct sdw_slave *slave = dev_to_sdw_dev(dev); + + if (!slave->probed) { + dev_dbg(dev, "skipping device, no probed driver\n"); + return 0; + } + if (!slave->dev_num_sticky) { + dev_dbg(dev, "skipping device, never detected on bus\n"); + return 0; + } + + ret = pm_request_resume(dev); + if (ret < 0) + dev_err(dev, "%s: pm_request_resume failed: %d\n", __func__, ret); + + return ret; +} + +static int __maybe_unused intel_pm_prepare(struct device *dev) +{ + struct sdw_cdns *cdns = dev_get_drvdata(dev); + struct sdw_intel *sdw = cdns_to_intel(cdns); + struct sdw_bus *bus = &cdns->bus; + u32 clock_stop_quirks; + int ret; + + if (bus->prop.hw_disabled || !sdw->startup_done) { + dev_dbg(dev, "SoundWire master %d is disabled or not-started, ignoring\n", + bus->link_id); + return 0; + } + + clock_stop_quirks = sdw->link_res->clock_stop_quirks; + + if (pm_runtime_suspended(dev) && + pm_runtime_suspended(dev->parent) && + ((clock_stop_quirks & SDW_INTEL_CLK_STOP_BUS_RESET) || + !clock_stop_quirks)) { + /* + * if we've enabled clock stop, and the parent is suspended, the SHIM registers + * are not accessible and the shim wake cannot be disabled. + * The only solution is to resume the entire bus to full power + */ + + /* + * If any operation in this block fails, we keep going since we don't want + * to prevent system suspend from happening and errors should be recoverable + * on resume. + */ + + /* + * first resume the device for this link. This will also by construction + * resume the PCI parent device. + */ + ret = pm_request_resume(dev); + if (ret < 0) { + dev_err(dev, "%s: pm_request_resume failed: %d\n", __func__, ret); + return 0; + } + + /* + * Continue resuming the entire bus (parent + child devices) to exit + * the clock stop mode. If there are no devices connected on this link + * this is a no-op. + * The resume to full power could have been implemented with a .prepare + * step in SoundWire codec drivers. This would however require a lot + * of code to handle an Intel-specific corner case. It is simpler in + * practice to add a loop at the link level. + */ + ret = device_for_each_child(bus->dev, NULL, intel_resume_child_device); + + if (ret < 0) + dev_err(dev, "%s: intel_resume_child_device failed: %d\n", __func__, ret); + } + + return 0; +} + +static int __maybe_unused intel_suspend(struct device *dev) +{ + struct sdw_cdns *cdns = dev_get_drvdata(dev); + struct sdw_intel *sdw = cdns_to_intel(cdns); + struct sdw_bus *bus = &cdns->bus; + u32 clock_stop_quirks; + int ret; + + if (bus->prop.hw_disabled || !sdw->startup_done) { + dev_dbg(dev, "SoundWire master %d is disabled or not-started, ignoring\n", + bus->link_id); + return 0; + } + + if (pm_runtime_suspended(dev)) { + dev_dbg(dev, "pm_runtime status: suspended\n"); + + clock_stop_quirks = sdw->link_res->clock_stop_quirks; + + if ((clock_stop_quirks & SDW_INTEL_CLK_STOP_BUS_RESET) || + !clock_stop_quirks) { + + if (pm_runtime_suspended(dev->parent)) { + /* + * paranoia check: this should not happen with the .prepare + * resume to full power + */ + dev_err(dev, "%s: invalid config: parent is suspended\n", __func__); + } else { + sdw_intel_shim_wake(sdw, false); + } + } + + return 0; + } + + ret = sdw_intel_stop_bus(sdw, false); + if (ret < 0) { + dev_err(dev, "%s: cannot stop bus: %d\n", __func__, ret); + return ret; + } + + return 0; +} + +static int __maybe_unused intel_suspend_runtime(struct device *dev) +{ + struct sdw_cdns *cdns = dev_get_drvdata(dev); + struct sdw_intel *sdw = cdns_to_intel(cdns); + struct sdw_bus *bus = &cdns->bus; + u32 clock_stop_quirks; + int ret; + + if (bus->prop.hw_disabled || !sdw->startup_done) { + dev_dbg(dev, "SoundWire master %d is disabled or not-started, ignoring\n", + bus->link_id); + return 0; + } + + clock_stop_quirks = sdw->link_res->clock_stop_quirks; + + if (clock_stop_quirks & SDW_INTEL_CLK_STOP_TEARDOWN) { + ret = sdw_intel_stop_bus(sdw, false); + if (ret < 0) { + dev_err(dev, "%s: cannot stop bus during teardown: %d\n", + __func__, ret); + return ret; + } + } else if (clock_stop_quirks & SDW_INTEL_CLK_STOP_BUS_RESET || !clock_stop_quirks) { + ret = sdw_intel_stop_bus(sdw, true); + if (ret < 0) { + dev_err(dev, "%s: cannot stop bus during clock_stop: %d\n", + __func__, ret); + return ret; + } + } else { + dev_err(dev, "%s clock_stop_quirks %x unsupported\n", + __func__, clock_stop_quirks); + ret = -EINVAL; + } + + return ret; +} + +static int __maybe_unused intel_resume(struct device *dev) +{ + struct sdw_cdns *cdns = dev_get_drvdata(dev); + struct sdw_intel *sdw = cdns_to_intel(cdns); + struct sdw_bus *bus = &cdns->bus; + int link_flags; + int ret; + + if (bus->prop.hw_disabled || !sdw->startup_done) { + dev_dbg(dev, "SoundWire master %d is disabled or not-started, ignoring\n", + bus->link_id); + return 0; + } + + link_flags = md_flags >> (bus->link_id * 8); + + if (pm_runtime_suspended(dev)) { + dev_dbg(dev, "pm_runtime status was suspended, forcing active\n"); + + /* follow required sequence from runtime_pm.rst */ + pm_runtime_disable(dev); + pm_runtime_set_active(dev); + pm_runtime_mark_last_busy(dev); + pm_runtime_enable(dev); + + link_flags = md_flags >> (bus->link_id * 8); + + if (!(link_flags & SDW_INTEL_MASTER_DISABLE_PM_RUNTIME_IDLE)) + pm_runtime_idle(dev); + } + + ret = sdw_intel_link_power_up(sdw); + if (ret) { + dev_err(dev, "%s failed: %d\n", __func__, ret); + return ret; + } + + /* + * make sure all Slaves are tagged as UNATTACHED and provide + * reason for reinitialization + */ + sdw_clear_slave_status(bus, SDW_UNATTACH_REQUEST_MASTER_RESET); + + ret = sdw_intel_start_bus(sdw); + if (ret < 0) { + dev_err(dev, "cannot start bus during resume\n"); + sdw_intel_link_power_down(sdw); + return ret; + } + + /* + * after system resume, the pm_runtime suspend() may kick in + * during the enumeration, before any children device force the + * master device to remain active. Using pm_runtime_get() + * routines is not really possible, since it'd prevent the + * master from suspending. + * A reasonable compromise is to update the pm_runtime + * counters and delay the pm_runtime suspend by several + * seconds, by when all enumeration should be complete. + */ + pm_runtime_mark_last_busy(dev); + + return 0; +} + +static int __maybe_unused intel_resume_runtime(struct device *dev) +{ + struct sdw_cdns *cdns = dev_get_drvdata(dev); + struct sdw_intel *sdw = cdns_to_intel(cdns); + struct sdw_bus *bus = &cdns->bus; + u32 clock_stop_quirks; + int ret; + + if (bus->prop.hw_disabled || !sdw->startup_done) { + dev_dbg(dev, "SoundWire master %d is disabled or not-started, ignoring\n", + bus->link_id); + return 0; + } + + /* unconditionally disable WAKEEN interrupt */ + sdw_intel_shim_wake(sdw, false); + + clock_stop_quirks = sdw->link_res->clock_stop_quirks; + + if (clock_stop_quirks & SDW_INTEL_CLK_STOP_TEARDOWN) { + ret = sdw_intel_link_power_up(sdw); + if (ret) { + dev_err(dev, "%s: power_up failed after teardown: %d\n", __func__, ret); + return ret; + } + + /* + * make sure all Slaves are tagged as UNATTACHED and provide + * reason for reinitialization + */ + sdw_clear_slave_status(bus, SDW_UNATTACH_REQUEST_MASTER_RESET); + + ret = sdw_intel_start_bus(sdw); + if (ret < 0) { + dev_err(dev, "%s: cannot start bus after teardown: %d\n", __func__, ret); + sdw_intel_link_power_down(sdw); + return ret; + } + + } else if (clock_stop_quirks & SDW_INTEL_CLK_STOP_BUS_RESET) { + ret = sdw_intel_link_power_up(sdw); + if (ret) { + dev_err(dev, "%s: power_up failed after bus reset: %d\n", __func__, ret); + return ret; + } + + ret = sdw_intel_start_bus_after_reset(sdw); + if (ret < 0) { + dev_err(dev, "%s: cannot start bus after reset: %d\n", __func__, ret); + sdw_intel_link_power_down(sdw); + return ret; + } + } else if (!clock_stop_quirks) { + + sdw_intel_check_clock_stop(sdw); + + ret = sdw_intel_link_power_up(sdw); + if (ret) { + dev_err(dev, "%s: power_up failed: %d\n", __func__, ret); + return ret; + } + + ret = sdw_intel_start_bus_after_clock_stop(sdw); + if (ret < 0) { + dev_err(dev, "%s: cannot start bus after clock stop: %d\n", __func__, ret); + sdw_intel_link_power_down(sdw); + return ret; + } + } else { + dev_err(dev, "%s: clock_stop_quirks %x unsupported\n", + __func__, clock_stop_quirks); + ret = -EINVAL; + } + + return ret; +} + +static const struct dev_pm_ops intel_pm = { + .prepare = intel_pm_prepare, + SET_SYSTEM_SLEEP_PM_OPS(intel_suspend, intel_resume) + SET_RUNTIME_PM_OPS(intel_suspend_runtime, intel_resume_runtime, NULL) +}; + +static const struct auxiliary_device_id intel_link_id_table[] = { + { .name = "soundwire_intel.link" }, + {}, +}; +MODULE_DEVICE_TABLE(auxiliary, intel_link_id_table); + +static struct auxiliary_driver sdw_intel_drv = { + .probe = intel_link_probe, + .remove = intel_link_remove, + .driver = { + /* auxiliary_driver_register() sets .name to be the modname */ + .pm = &intel_pm, + }, + .id_table = intel_link_id_table +}; +module_auxiliary_driver(sdw_intel_drv); + +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_DESCRIPTION("Intel Soundwire Link Driver"); diff --git a/drivers/soundwire/intel_auxdevice.h b/drivers/soundwire/intel_auxdevice.h new file mode 100644 index 0000000000000000000000000000000000000000..a00ecde9556333dabd2e3cc61ad9195e360c0d6d --- /dev/null +++ b/drivers/soundwire/intel_auxdevice.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */ +/* Copyright(c) 2015-2022 Intel Corporation. */ + +#ifndef __SDW_INTEL_AUXDEVICE_H +#define __SDW_INTEL_AUXDEVICE_H + +int intel_link_startup(struct auxiliary_device *auxdev); +int intel_link_process_wakeen_event(struct auxiliary_device *auxdev); + +struct sdw_intel_link_dev { + struct auxiliary_device auxdev; + struct sdw_intel_link_res link_res; +}; + +#define auxiliary_dev_to_sdw_intel_link_dev(auxiliary_dev) \ + container_of(auxiliary_dev, struct sdw_intel_link_dev, auxdev) + +#endif /* __SDW_INTEL_AUXDEVICE_H */ diff --git a/drivers/soundwire/intel_init.c b/drivers/soundwire/intel_init.c index d6842925de61e1c6042c0420467b148919b47f40..cbe56b993c6ce9e0b67cf8add703e95af65020da 100644 --- a/drivers/soundwire/intel_init.c +++ b/drivers/soundwire/intel_init.c @@ -17,6 +17,7 @@ #include #include "cadence_master.h" #include "intel.h" +#include "intel_auxdevice.h" static void intel_link_dev_release(struct device *dev) { @@ -60,6 +61,7 @@ static struct sdw_intel_link_dev *intel_link_dev_register(struct sdw_intel_res * /* Add link information used in the driver probe */ link = &ldev->link_res; + link->hw_ops = res->hw_ops; link->mmio_base = res->mmio_base; link->registers = res->mmio_base + SDW_LINK_BASE + (SDW_LINK_SIZE * link_id); diff --git a/drivers/soundwire/qcom.c b/drivers/soundwire/qcom.c index cee2b22231410345e9373b5968b1f9b9c977623b..33542487029005a91f59dc15acb4b76f69ebeb3b 100644 --- a/drivers/soundwire/qcom.c +++ b/drivers/soundwire/qcom.c @@ -25,6 +25,8 @@ #define SWRM_COMP_SW_RESET 0x008 #define SWRM_COMP_STATUS 0x014 +#define SWRM_LINK_MANAGER_EE 0x018 +#define SWRM_EE_CPU 1 #define SWRM_FRM_GEN_ENABLED BIT(0) #define SWRM_COMP_HW_VERSION 0x00 #define SWRM_COMP_CFG_ADDR 0x04 @@ -104,7 +106,6 @@ #define SWRM_REG_VAL_PACK(data, dev, id, reg) \ ((reg) | ((id) << 16) | ((dev) << 20) | ((data) << 24)) -#define SWRM_SPECIAL_CMD_ID 0xF #define MAX_FREQ_NUM 1 #define TIMEOUT_MS 100 #define QCOM_SWRM_MAX_RD_LEN 0x1 @@ -694,7 +695,14 @@ static int qcom_swrm_init(struct qcom_swrm_ctrl *ctrl) u32p_replace_bits(&val, SWRM_DEF_CMD_NO_PINGS, SWRM_MCP_CFG_MAX_NUM_OF_CMD_NO_PINGS_BMSK); ctrl->reg_write(ctrl, SWRM_MCP_CFG_ADDR, val); - ctrl->reg_write(ctrl, SWRM_MCP_BUS_CTRL, SWRM_MCP_BUS_CLK_START); + if (ctrl->version >= 0x01070000) { + ctrl->reg_write(ctrl, SWRM_LINK_MANAGER_EE, SWRM_EE_CPU); + ctrl->reg_write(ctrl, SWRM_MCP_BUS_CTRL, + SWRM_MCP_BUS_CLK_START << SWRM_EE_CPU); + } else { + ctrl->reg_write(ctrl, SWRM_MCP_BUS_CTRL, SWRM_MCP_BUS_CLK_START); + } + /* Configure number of retries of a read/write cmd */ if (ctrl->version > 0x01050001) { /* Only for versions >= 1.5.1 */ @@ -1331,8 +1339,8 @@ static int qcom_swrm_probe(struct platform_device *pdev) } if (data->sw_clk_gate_required) { - ctrl->audio_cgcr = devm_reset_control_get_exclusive(dev, "swr_audio_cgcr"); - if (IS_ERR_OR_NULL(ctrl->audio_cgcr)) { + ctrl->audio_cgcr = devm_reset_control_get_optional_exclusive(dev, "swr_audio_cgcr"); + if (IS_ERR(ctrl->audio_cgcr)) { dev_err(dev, "Failed to get cgcr reset ctrl required for SW gating\n"); ret = PTR_ERR(ctrl->audio_cgcr); goto err_init; @@ -1519,7 +1527,13 @@ static int __maybe_unused swrm_runtime_resume(struct device *dev) } else { reset_control_reset(ctrl->audio_cgcr); - ctrl->reg_write(ctrl, SWRM_MCP_BUS_CTRL, SWRM_MCP_BUS_CLK_START); + if (ctrl->version >= 0x01070000) { + ctrl->reg_write(ctrl, SWRM_LINK_MANAGER_EE, SWRM_EE_CPU); + ctrl->reg_write(ctrl, SWRM_MCP_BUS_CTRL, + SWRM_MCP_BUS_CLK_START << SWRM_EE_CPU); + } else { + ctrl->reg_write(ctrl, SWRM_MCP_BUS_CTRL, SWRM_MCP_BUS_CLK_START); + } ctrl->reg_write(ctrl, SWRM_INTERRUPT_CLEAR, SWRM_INTERRUPT_STATUS_MASTER_CLASH_DET); @@ -1583,6 +1597,7 @@ static const struct of_device_id qcom_swrm_of_match[] = { { .compatible = "qcom,soundwire-v1.3.0", .data = &swrm_v1_3_data }, { .compatible = "qcom,soundwire-v1.5.1", .data = &swrm_v1_5_data }, { .compatible = "qcom,soundwire-v1.6.0", .data = &swrm_v1_6_data }, + { .compatible = "qcom,soundwire-v1.7.0", .data = &swrm_v1_5_data }, {/* sentinel */}, }; diff --git a/drivers/spmi/spmi-pmic-arb.c b/drivers/spmi/spmi-pmic-arb.c index 2cf3203b2397e88e9014fd3e705d6cd6e9c795ad..8b6a42ab816fbad49858a200eb931967b96f2cd7 100644 --- a/drivers/spmi/spmi-pmic-arb.c +++ b/drivers/spmi/spmi-pmic-arb.c @@ -22,8 +22,14 @@ #define PMIC_ARB_VERSION_V2_MIN 0x20010000 #define PMIC_ARB_VERSION_V3_MIN 0x30000000 #define PMIC_ARB_VERSION_V5_MIN 0x50000000 +#define PMIC_ARB_VERSION_V7_MIN 0x70000000 #define PMIC_ARB_INT_EN 0x0004 +#define PMIC_ARB_FEATURES 0x0004 +#define PMIC_ARB_FEATURES_PERIPH_MASK GENMASK(10, 0) + +#define PMIC_ARB_FEATURES1 0x0008 + /* PMIC Arbiter channel registers offsets */ #define PMIC_ARB_CMD 0x00 #define PMIC_ARB_CONFIG 0x04 @@ -48,7 +54,6 @@ #define INVALID_EE 0xFF /* Ownership Table */ -#define SPMI_OWNERSHIP_TABLE_REG(N) (0x0700 + (4 * (N))) #define SPMI_OWNERSHIP_PERIPH2OWNER(X) ((X) & 0x7) /* Channel Status fields */ @@ -91,6 +96,7 @@ enum pmic_arb_channel { /* Maximum number of support PMIC peripherals */ #define PMIC_ARB_MAX_PERIPHS 512 +#define PMIC_ARB_MAX_PERIPHS_V7 1024 #define PMIC_ARB_TIMEOUT_US 1000 #define PMIC_ARB_MAX_TRANS_BYTES (8) @@ -104,12 +110,12 @@ enum pmic_arb_channel { ((((slave_id) & 0xF) << 28) | \ (((periph_id) & 0xFF) << 20) | \ (((irq_id) & 0x7) << 16) | \ - (((apid) & 0x1FF) << 0)) + (((apid) & 0x3FF) << 0)) #define hwirq_to_sid(hwirq) (((hwirq) >> 28) & 0xF) #define hwirq_to_per(hwirq) (((hwirq) >> 20) & 0xFF) #define hwirq_to_irq(hwirq) (((hwirq) >> 16) & 0x7) -#define hwirq_to_apid(hwirq) (((hwirq) >> 0) & 0x1FF) +#define hwirq_to_apid(hwirq) (((hwirq) >> 0) & 0x3FF) struct pmic_arb_ver_ops; @@ -130,13 +136,21 @@ struct apid_data { * @channel: execution environment channel to use for accesses. * @irq: PMIC ARB interrupt. * @ee: the current Execution Environment + * @bus_instance: on v7: 0 = primary SPMI bus, 1 = secondary SPMI bus * @min_apid: minimum APID (used for bounding IRQ search) * @max_apid: maximum APID + * @base_apid: on v7: minimum APID associated with the particular SPMI + * bus instance + * @apid_count: on v5 and v7: number of APIDs associated with the + * particular SPMI bus instance * @mapping_table: in-memory copy of PPID -> APID mapping table. * @domain: irq domain object for PMIC IRQ domain * @spmic: SPMI controller object * @ver_ops: version dependent operations. - * @ppid_to_apid in-memory copy of PPID -> APID mapping table. + * @ppid_to_apid: in-memory copy of PPID -> APID mapping table. + * @last_apid: Highest value APID in use + * @apid_data: Table of data for all APIDs + * @max_periphs: Number of elements in apid_data[] */ struct spmi_pmic_arb { void __iomem *rd_base; @@ -149,8 +163,11 @@ struct spmi_pmic_arb { u8 channel; int irq; u8 ee; + u32 bus_instance; u16 min_apid; u16 max_apid; + u16 base_apid; + int apid_count; u32 *mapping_table; DECLARE_BITMAP(mapping_table_valid, PMIC_ARB_MAX_PERIPHS); struct irq_domain *domain; @@ -158,7 +175,8 @@ struct spmi_pmic_arb { const struct pmic_arb_ver_ops *ver_ops; u16 *ppid_to_apid; u16 last_apid; - struct apid_data apid_data[PMIC_ARB_MAX_PERIPHS]; + struct apid_data *apid_data; + int max_periphs; }; /** @@ -180,6 +198,7 @@ struct spmi_pmic_arb { * @irq_clear: on v1 address of PMIC_ARB_SPMI_PIC_IRQ_CLEARn * on v2 address of SPMI_PIC_IRQ_CLEARn. * @apid_map_offset: offset of PMIC_ARB_REG_CHNLn + * @apid_owner: on v2 and later address of SPMI_PERIPHn_2OWNER_TABLE_REG */ struct pmic_arb_ver_ops { const char *ver_str; @@ -196,6 +215,7 @@ struct pmic_arb_ver_ops { void __iomem *(*irq_status)(struct spmi_pmic_arb *pmic_arb, u16 n); void __iomem *(*irq_clear)(struct spmi_pmic_arb *pmic_arb, u16 n); u32 (*apid_map_offset)(u16 n); + void __iomem *(*apid_owner)(struct spmi_pmic_arb *pmic_arb, u16 n); }; static inline void pmic_arb_base_write(struct spmi_pmic_arb *pmic_arb, @@ -627,6 +647,11 @@ static void pmic_arb_chained_irq(struct irq_desc *desc) struct irq_chip *chip = irq_desc_get_chip(desc); int first = pmic_arb->min_apid; int last = pmic_arb->max_apid; + /* + * acc_offset will be non-zero for the secondary SPMI bus instance on + * v7 controllers. + */ + int acc_offset = pmic_arb->base_apid >> 5; u8 ee = pmic_arb->ee; u32 status, enable, handled = 0; int i, id, apid; @@ -637,8 +662,7 @@ static void pmic_arb_chained_irq(struct irq_desc *desc) chained_irq_enter(chip, desc); for (i = first >> 5; i <= last >> 5; ++i) { - status = readl_relaxed( - ver_ops->owner_acc_status(pmic_arb, ee, i)); + status = readl_relaxed(ver_ops->owner_acc_status(pmic_arb, ee, i - acc_offset)); if (status) acc_valid = true; @@ -983,8 +1007,8 @@ static u16 pmic_arb_find_apid(struct spmi_pmic_arb *pmic_arb, u16 ppid) if (offset >= pmic_arb->core_size) break; - regval = readl_relaxed(pmic_arb->cnfg + - SPMI_OWNERSHIP_TABLE_REG(apid)); + regval = readl_relaxed(pmic_arb->ver_ops->apid_owner(pmic_arb, + apid)); apidd->irq_ee = SPMI_OWNERSHIP_PERIPH2OWNER(regval); apidd->write_ee = apidd->irq_ee; @@ -1020,21 +1044,30 @@ static int pmic_arb_ppid_to_apid_v2(struct spmi_pmic_arb *pmic_arb, u16 ppid) static int pmic_arb_read_apid_map_v5(struct spmi_pmic_arb *pmic_arb) { - struct apid_data *apidd = pmic_arb->apid_data; + struct apid_data *apidd; struct apid_data *prev_apidd; - u16 i, apid, ppid; + u16 i, apid, ppid, apid_max; bool valid, is_irq_ee; u32 regval, offset; /* * In order to allow multiple EEs to write to a single PPID in arbiter - * version 5, there is more than one APID mapped to each PPID. + * version 5 and 7, there is more than one APID mapped to each PPID. * The owner field for each of these mappings specifies the EE which is * allowed to write to the APID. The owner of the last (highest) APID * which has the IRQ owner bit set for a given PPID will receive * interrupts from the PPID. + * + * In arbiter version 7, the APID numbering space is divided between + * the primary bus (0) and secondary bus (1) such that: + * APID = 0 to N-1 are assigned to the primary bus + * APID = N to N+M-1 are assigned to the secondary bus + * where N = number of APIDs supported by the primary bus and + * M = number of APIDs supported by the secondary bus */ - for (i = 0; ; i++, apidd++) { + apidd = &pmic_arb->apid_data[pmic_arb->base_apid]; + apid_max = pmic_arb->base_apid + pmic_arb->apid_count; + for (i = pmic_arb->base_apid; i < apid_max; i++, apidd++) { offset = pmic_arb->ver_ops->apid_map_offset(i); if (offset >= pmic_arb->core_size) break; @@ -1045,8 +1078,8 @@ static int pmic_arb_read_apid_map_v5(struct spmi_pmic_arb *pmic_arb) ppid = (regval >> 8) & PMIC_ARB_PPID_MASK; is_irq_ee = PMIC_ARB_CHAN_IS_IRQ_OWNER(regval); - regval = readl_relaxed(pmic_arb->cnfg + - SPMI_OWNERSHIP_TABLE_REG(i)); + regval = readl_relaxed(pmic_arb->ver_ops->apid_owner(pmic_arb, + i)); apidd->write_ee = SPMI_OWNERSHIP_PERIPH2OWNER(regval); apidd->irq_ee = is_irq_ee ? apidd->write_ee : INVALID_EE; @@ -1145,6 +1178,40 @@ static int pmic_arb_offset_v5(struct spmi_pmic_arb *pmic_arb, u8 sid, u16 addr, return offset; } +/* + * v7 offset per ee and per apid for observer channels and per apid for + * read/write channels. + */ +static int pmic_arb_offset_v7(struct spmi_pmic_arb *pmic_arb, u8 sid, u16 addr, + enum pmic_arb_channel ch_type) +{ + u16 apid; + int rc; + u32 offset = 0; + u16 ppid = (sid << 8) | (addr >> 8); + + rc = pmic_arb->ver_ops->ppid_to_apid(pmic_arb, ppid); + if (rc < 0) + return rc; + + apid = rc; + switch (ch_type) { + case PMIC_ARB_CHANNEL_OBS: + offset = 0x8000 * pmic_arb->ee + 0x20 * apid; + break; + case PMIC_ARB_CHANNEL_RW: + if (pmic_arb->apid_data[apid].write_ee != pmic_arb->ee) { + dev_err(&pmic_arb->spmic->dev, "disallowed SPMI write to sid=%u, addr=0x%04X\n", + sid, addr); + return -EPERM; + } + offset = 0x1000 * apid; + break; + } + + return offset; +} + static u32 pmic_arb_fmt_cmd_v1(u8 opc, u8 sid, u16 addr, u8 bc) { return (opc << 27) | ((sid & 0xf) << 20) | (addr << 4) | (bc & 0x7); @@ -1179,6 +1246,12 @@ pmic_arb_owner_acc_status_v5(struct spmi_pmic_arb *pmic_arb, u8 m, u16 n) return pmic_arb->intr + 0x10000 * m + 0x4 * n; } +static void __iomem * +pmic_arb_owner_acc_status_v7(struct spmi_pmic_arb *pmic_arb, u8 m, u16 n) +{ + return pmic_arb->intr + 0x1000 * m + 0x4 * n; +} + static void __iomem * pmic_arb_acc_enable_v1(struct spmi_pmic_arb *pmic_arb, u16 n) { @@ -1197,6 +1270,12 @@ pmic_arb_acc_enable_v5(struct spmi_pmic_arb *pmic_arb, u16 n) return pmic_arb->wr_base + 0x100 + 0x10000 * n; } +static void __iomem * +pmic_arb_acc_enable_v7(struct spmi_pmic_arb *pmic_arb, u16 n) +{ + return pmic_arb->wr_base + 0x100 + 0x1000 * n; +} + static void __iomem * pmic_arb_irq_status_v1(struct spmi_pmic_arb *pmic_arb, u16 n) { @@ -1215,6 +1294,12 @@ pmic_arb_irq_status_v5(struct spmi_pmic_arb *pmic_arb, u16 n) return pmic_arb->wr_base + 0x104 + 0x10000 * n; } +static void __iomem * +pmic_arb_irq_status_v7(struct spmi_pmic_arb *pmic_arb, u16 n) +{ + return pmic_arb->wr_base + 0x104 + 0x1000 * n; +} + static void __iomem * pmic_arb_irq_clear_v1(struct spmi_pmic_arb *pmic_arb, u16 n) { @@ -1233,6 +1318,12 @@ pmic_arb_irq_clear_v5(struct spmi_pmic_arb *pmic_arb, u16 n) return pmic_arb->wr_base + 0x108 + 0x10000 * n; } +static void __iomem * +pmic_arb_irq_clear_v7(struct spmi_pmic_arb *pmic_arb, u16 n) +{ + return pmic_arb->wr_base + 0x108 + 0x1000 * n; +} + static u32 pmic_arb_apid_map_offset_v2(u16 n) { return 0x800 + 0x4 * n; @@ -1243,6 +1334,28 @@ static u32 pmic_arb_apid_map_offset_v5(u16 n) return 0x900 + 0x4 * n; } +static u32 pmic_arb_apid_map_offset_v7(u16 n) +{ + return 0x2000 + 0x4 * n; +} + +static void __iomem * +pmic_arb_apid_owner_v2(struct spmi_pmic_arb *pmic_arb, u16 n) +{ + return pmic_arb->cnfg + 0x700 + 0x4 * n; +} + +/* + * For arbiter version 7, APID ownership table registers have independent + * numbering space for each SPMI bus instance, so each is indexed starting from + * 0. + */ +static void __iomem * +pmic_arb_apid_owner_v7(struct spmi_pmic_arb *pmic_arb, u16 n) +{ + return pmic_arb->cnfg + 0x4 * (n - pmic_arb->base_apid); +} + static const struct pmic_arb_ver_ops pmic_arb_v1 = { .ver_str = "v1", .ppid_to_apid = pmic_arb_ppid_to_apid_v1, @@ -1254,6 +1367,7 @@ static const struct pmic_arb_ver_ops pmic_arb_v1 = { .irq_status = pmic_arb_irq_status_v1, .irq_clear = pmic_arb_irq_clear_v1, .apid_map_offset = pmic_arb_apid_map_offset_v2, + .apid_owner = pmic_arb_apid_owner_v2, }; static const struct pmic_arb_ver_ops pmic_arb_v2 = { @@ -1267,6 +1381,7 @@ static const struct pmic_arb_ver_ops pmic_arb_v2 = { .irq_status = pmic_arb_irq_status_v2, .irq_clear = pmic_arb_irq_clear_v2, .apid_map_offset = pmic_arb_apid_map_offset_v2, + .apid_owner = pmic_arb_apid_owner_v2, }; static const struct pmic_arb_ver_ops pmic_arb_v3 = { @@ -1280,6 +1395,7 @@ static const struct pmic_arb_ver_ops pmic_arb_v3 = { .irq_status = pmic_arb_irq_status_v2, .irq_clear = pmic_arb_irq_clear_v2, .apid_map_offset = pmic_arb_apid_map_offset_v2, + .apid_owner = pmic_arb_apid_owner_v2, }; static const struct pmic_arb_ver_ops pmic_arb_v5 = { @@ -1293,6 +1409,21 @@ static const struct pmic_arb_ver_ops pmic_arb_v5 = { .irq_status = pmic_arb_irq_status_v5, .irq_clear = pmic_arb_irq_clear_v5, .apid_map_offset = pmic_arb_apid_map_offset_v5, + .apid_owner = pmic_arb_apid_owner_v2, +}; + +static const struct pmic_arb_ver_ops pmic_arb_v7 = { + .ver_str = "v7", + .ppid_to_apid = pmic_arb_ppid_to_apid_v5, + .non_data_cmd = pmic_arb_non_data_cmd_v2, + .offset = pmic_arb_offset_v7, + .fmt_cmd = pmic_arb_fmt_cmd_v2, + .owner_acc_status = pmic_arb_owner_acc_status_v7, + .acc_enable = pmic_arb_acc_enable_v7, + .irq_status = pmic_arb_irq_status_v7, + .irq_clear = pmic_arb_irq_clear_v7, + .apid_map_offset = pmic_arb_apid_map_offset_v7, + .apid_owner = pmic_arb_apid_owner_v7, }; static const struct irq_domain_ops pmic_arb_irq_domain_ops = { @@ -1319,8 +1450,18 @@ static int spmi_pmic_arb_probe(struct platform_device *pdev) pmic_arb = spmi_controller_get_drvdata(ctrl); pmic_arb->spmic = ctrl; + /* + * Please don't replace this with devm_platform_ioremap_resource() or + * devm_ioremap_resource(). These both result in a call to + * devm_request_mem_region() which prevents multiple mappings of this + * register address range. SoCs with PMIC arbiter v7 may define two + * arbiter devices, for the two physical SPMI interfaces, which share + * some register address ranges (i.e. "core", "obsrvr", and "chnls"). + * Ensure that both devices probe successfully by calling devm_ioremap() + * which does not result in a devm_request_mem_region() call. + */ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "core"); - core = devm_ioremap_resource(&ctrl->dev, res); + core = devm_ioremap(&ctrl->dev, res->start, resource_size(res)); if (IS_ERR(core)) { err = PTR_ERR(core); goto err_put_ctrl; @@ -1349,12 +1490,15 @@ static int spmi_pmic_arb_probe(struct platform_device *pdev) pmic_arb->ver_ops = &pmic_arb_v2; else if (hw_ver < PMIC_ARB_VERSION_V5_MIN) pmic_arb->ver_ops = &pmic_arb_v3; - else + else if (hw_ver < PMIC_ARB_VERSION_V7_MIN) pmic_arb->ver_ops = &pmic_arb_v5; + else + pmic_arb->ver_ops = &pmic_arb_v7; res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "obsrvr"); - pmic_arb->rd_base = devm_ioremap_resource(&ctrl->dev, res); + pmic_arb->rd_base = devm_ioremap(&ctrl->dev, res->start, + resource_size(res)); if (IS_ERR(pmic_arb->rd_base)) { err = PTR_ERR(pmic_arb->rd_base); goto err_put_ctrl; @@ -1362,13 +1506,69 @@ static int spmi_pmic_arb_probe(struct platform_device *pdev) res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "chnls"); - pmic_arb->wr_base = devm_ioremap_resource(&ctrl->dev, res); + pmic_arb->wr_base = devm_ioremap(&ctrl->dev, res->start, + resource_size(res)); if (IS_ERR(pmic_arb->wr_base)) { err = PTR_ERR(pmic_arb->wr_base); goto err_put_ctrl; } } + pmic_arb->max_periphs = PMIC_ARB_MAX_PERIPHS; + + if (hw_ver >= PMIC_ARB_VERSION_V7_MIN) { + pmic_arb->max_periphs = PMIC_ARB_MAX_PERIPHS_V7; + /* Optional property for v7: */ + of_property_read_u32(pdev->dev.of_node, "qcom,bus-id", + &pmic_arb->bus_instance); + if (pmic_arb->bus_instance > 1) { + err = -EINVAL; + dev_err(&pdev->dev, "invalid bus instance (%u) specified\n", + pmic_arb->bus_instance); + goto err_put_ctrl; + } + + if (pmic_arb->bus_instance == 0) { + pmic_arb->base_apid = 0; + pmic_arb->apid_count = + readl_relaxed(core + PMIC_ARB_FEATURES) & + PMIC_ARB_FEATURES_PERIPH_MASK; + } else { + pmic_arb->base_apid = + readl_relaxed(core + PMIC_ARB_FEATURES) & + PMIC_ARB_FEATURES_PERIPH_MASK; + pmic_arb->apid_count = + readl_relaxed(core + PMIC_ARB_FEATURES1) & + PMIC_ARB_FEATURES_PERIPH_MASK; + } + + if (pmic_arb->base_apid + pmic_arb->apid_count > pmic_arb->max_periphs) { + err = -EINVAL; + dev_err(&pdev->dev, "Unsupported APID count %d detected\n", + pmic_arb->base_apid + pmic_arb->apid_count); + goto err_put_ctrl; + } + } else if (hw_ver >= PMIC_ARB_VERSION_V5_MIN) { + pmic_arb->base_apid = 0; + pmic_arb->apid_count = readl_relaxed(core + PMIC_ARB_FEATURES) & + PMIC_ARB_FEATURES_PERIPH_MASK; + + if (pmic_arb->apid_count > pmic_arb->max_periphs) { + err = -EINVAL; + dev_err(&pdev->dev, "Unsupported APID count %d detected\n", + pmic_arb->apid_count); + goto err_put_ctrl; + } + } + + pmic_arb->apid_data = devm_kcalloc(&ctrl->dev, pmic_arb->max_periphs, + sizeof(*pmic_arb->apid_data), + GFP_KERNEL); + if (!pmic_arb->apid_data) { + err = -ENOMEM; + goto err_put_ctrl; + } + dev_info(&ctrl->dev, "PMIC arbiter version %s (0x%x)\n", pmic_arb->ver_ops->ver_str, hw_ver); @@ -1420,7 +1620,7 @@ static int spmi_pmic_arb_probe(struct platform_device *pdev) } pmic_arb->ee = ee; - mapping_table = devm_kcalloc(&ctrl->dev, PMIC_ARB_MAX_PERIPHS, + mapping_table = devm_kcalloc(&ctrl->dev, pmic_arb->max_periphs, sizeof(*mapping_table), GFP_KERNEL); if (!mapping_table) { err = -ENOMEM; @@ -1431,7 +1631,7 @@ static int spmi_pmic_arb_probe(struct platform_device *pdev) /* Initialize max_apid/min_apid to the opposite bounds, during * the irq domain translation, we are sure to update these */ pmic_arb->max_apid = 0; - pmic_arb->min_apid = PMIC_ARB_MAX_PERIPHS - 1; + pmic_arb->min_apid = pmic_arb->max_periphs - 1; platform_set_drvdata(pdev, ctrl); raw_spin_lock_init(&pmic_arb->lock); diff --git a/drivers/staging/emxx_udc/emxx_udc.c b/drivers/staging/emxx_udc/emxx_udc.c index b6abd3770e81cdb1a567568cad4860b68e766d28..b4e19174bef2e4a243d0b7dbb6af73c57033cb2c 100644 --- a/drivers/staging/emxx_udc/emxx_udc.c +++ b/drivers/staging/emxx_udc/emxx_udc.c @@ -1004,10 +1004,7 @@ static int _nbu2ss_in_dma(struct nbu2ss_udc *udc, struct nbu2ss_ep *ep, /* MAX Packet Size */ mpkt = _nbu2ss_readl(&preg->EP_REGS[num].EP_PCKT_ADRS) & EPN_MPKT; - if ((DMA_MAX_COUNT * mpkt) < length) - i_write_length = DMA_MAX_COUNT * mpkt; - else - i_write_length = length; + i_write_length = min(DMA_MAX_COUNT * mpkt, length); /*------------------------------------------------------------*/ /* Number of transmission packets */ diff --git a/drivers/staging/fieldbus/dev_core.c b/drivers/staging/fieldbus/dev_core.c index 5aab734606eae6dd872d5e76fc71b23552dc814b..5f54f2674bd19b11c7f0765ad79010868c281be1 100644 --- a/drivers/staging/fieldbus/dev_core.c +++ b/drivers/staging/fieldbus/dev_core.c @@ -28,7 +28,7 @@ static ssize_t online_show(struct device *dev, struct device_attribute *attr, { struct fieldbus_dev *fb = dev_get_drvdata(dev); - return sprintf(buf, "%d\n", !!fb->online); + return sysfs_emit(buf, "%d\n", !!fb->online); } static DEVICE_ATTR_RO(online); @@ -39,7 +39,7 @@ static ssize_t enabled_show(struct device *dev, struct device_attribute *attr, if (!fb->enable_get) return -EINVAL; - return sprintf(buf, "%d\n", !!fb->enable_get(fb)); + return sysfs_emit(buf, "%d\n", !!fb->enable_get(fb)); } static ssize_t enabled_store(struct device *dev, struct device_attribute *attr, @@ -66,11 +66,8 @@ static ssize_t card_name_show(struct device *dev, struct device_attribute *attr, { struct fieldbus_dev *fb = dev_get_drvdata(dev); - /* - * card_name was provided by child driver, could potentially be long. - * protect against buffer overrun. - */ - return snprintf(buf, PAGE_SIZE, "%s\n", fb->card_name); + /* card_name was provided by child driver. */ + return sysfs_emit(buf, "%s\n", fb->card_name); } static DEVICE_ATTR_RO(card_name); @@ -79,7 +76,7 @@ static ssize_t read_area_size_show(struct device *dev, { struct fieldbus_dev *fb = dev_get_drvdata(dev); - return sprintf(buf, "%zu\n", fb->read_area_sz); + return sysfs_emit(buf, "%zu\n", fb->read_area_sz); } static DEVICE_ATTR_RO(read_area_size); @@ -88,7 +85,7 @@ static ssize_t write_area_size_show(struct device *dev, { struct fieldbus_dev *fb = dev_get_drvdata(dev); - return sprintf(buf, "%zu\n", fb->write_area_sz); + return sysfs_emit(buf, "%zu\n", fb->write_area_sz); } static DEVICE_ATTR_RO(write_area_size); @@ -116,7 +113,7 @@ static ssize_t fieldbus_type_show(struct device *dev, break; } - return sprintf(buf, "%s\n", t); + return sysfs_emit(buf, "%s\n", t); } static DEVICE_ATTR_RO(fieldbus_type); diff --git a/drivers/staging/gdm724x/gdm_tty.c b/drivers/staging/gdm724x/gdm_tty.c index cc6d80554c98297dfa5325c48c4cfe9e79515948..e1a84d6020f482d0a9e4e0dbf26462cb9775d498 100644 --- a/drivers/staging/gdm724x/gdm_tty.c +++ b/drivers/staging/gdm724x/gdm_tty.c @@ -21,7 +21,10 @@ #define MUX_TX_MAX_SIZE 2048 -#define GDM_TTY_READY(gdm) (gdm && gdm->tty_dev && gdm->port.count) +static inline bool gdm_tty_ready(struct gdm *gdm) +{ + return gdm && gdm->tty_dev && gdm->port.count; +} static struct tty_driver *gdm_driver[TTY_MAX_COUNT]; static struct gdm *gdm_table[TTY_MAX_COUNT][GDM_TTY_MINOR]; @@ -113,7 +116,7 @@ static int gdm_tty_recv_complete(void *data, { struct gdm *gdm = tty_dev->gdm[index]; - if (!GDM_TTY_READY(gdm)) { + if (!gdm_tty_ready(gdm)) { if (complete == RECV_PACKET_PROCESS_COMPLETE) gdm->tty_dev->recv_func(gdm->tty_dev->priv_dev, gdm_tty_recv_complete); @@ -140,7 +143,7 @@ static void gdm_tty_send_complete(void *arg) { struct gdm *gdm = arg; - if (!GDM_TTY_READY(gdm)) + if (!gdm_tty_ready(gdm)) return; tty_port_tty_wakeup(&gdm->port); @@ -154,7 +157,7 @@ static int gdm_tty_write(struct tty_struct *tty, const unsigned char *buf, int sent_len = 0; int sending_len = 0; - if (!GDM_TTY_READY(gdm)) + if (!gdm_tty_ready(gdm)) return -ENODEV; if (!len) @@ -181,7 +184,7 @@ static unsigned int gdm_tty_write_room(struct tty_struct *tty) { struct gdm *gdm = tty->driver_data; - if (!GDM_TTY_READY(gdm)) + if (!gdm_tty_ready(gdm)) return 0; return WRITE_SIZE; diff --git a/drivers/staging/greybus/tools/loopback_test.c b/drivers/staging/greybus/tools/loopback_test.c index 4c42e393cd3dbd3c29dac8fe8fb075265eceafef..d7ad51ff60c552d54c99769ac5965dbdde589eed 100644 --- a/drivers/staging/greybus/tools/loopback_test.c +++ b/drivers/staging/greybus/tools/loopback_test.c @@ -239,7 +239,6 @@ static void show_loopback_devices(struct loopback_test *t) for (i = 0; i < t->device_count; i++) printf("device[%d] = %s\n", i, t->devices[i].name); - } int open_sysfs(const char *sys_pfx, const char *node, int flags) @@ -274,7 +273,6 @@ float read_sysfs_float_fd(int fd, const char *sys_pfx, const char *node) char buf[SYSFS_MAX_INT]; if (read(fd, buf, sizeof(buf)) < 0) { - fprintf(stderr, "unable to read from %s%s %s\n", sys_pfx, node, strerror(errno)); close(fd); @@ -367,7 +365,6 @@ static int get_results(struct loopback_test *t) r->apbridge_unipro_latency_max - r->apbridge_unipro_latency_min; r->gbphy_firmware_latency_jitter = r->gbphy_firmware_latency_max - r->gbphy_firmware_latency_min; - } /*calculate the aggregate results of all enabled devices */ @@ -407,7 +404,6 @@ static int get_results(struct loopback_test *t) r->apbridge_unipro_latency_max - r->apbridge_unipro_latency_min; r->gbphy_firmware_latency_jitter = r->gbphy_firmware_latency_max - r->gbphy_firmware_latency_min; - } return 0; @@ -536,7 +532,6 @@ static int log_results(struct loopback_test *t) fprintf(stderr, "unable to open %s for appending\n", file_name); abort(); } - } for (i = 0; i < t->device_count; i++) { if (!device_enabled(t, i)) @@ -550,10 +545,8 @@ static int log_results(struct loopback_test *t) if (ret == -1) fprintf(stderr, "unable to write %d bytes to csv.\n", len); } - } - if (t->aggregate_output) { len = format_output(t, &t->aggregate_results, "aggregate", data, sizeof(data), &tm); @@ -675,6 +668,7 @@ err: static int close_poll_files(struct loopback_test *t) { int i; + for (i = 0; i < t->poll_count; i++) close(t->fds[i].fd); @@ -740,7 +734,6 @@ static int wait_for_complete(struct loopback_test *t) ts = &t->poll_timeout; while (1) { - ret = ppoll(t->fds, t->poll_count, ts, &mask_old); if (ret <= 0) { stop_tests(t); @@ -780,7 +773,6 @@ static void prepare_devices(struct loopback_test *t) if (t->stop_all || device_enabled(t, i)) write_sysfs_val(t->devices[i].sysfs_entry, "type", 0); - for (i = 0; i < t->device_count; i++) { if (!device_enabled(t, i)) continue; @@ -823,7 +815,6 @@ static int start(struct loopback_test *t) return 0; } - void loopback_run(struct loopback_test *t) { int i; @@ -852,7 +843,6 @@ void loopback_run(struct loopback_test *t) if (ret) goto err; - get_results(t); log_results(t); @@ -861,7 +851,6 @@ void loopback_run(struct loopback_test *t) err: printf("Error running test\n"); - return; } static int sanity_check(struct loopback_test *t) @@ -881,10 +870,8 @@ static int sanity_check(struct loopback_test *t) fprintf(stderr, "Bad device mask %x\n", (1 << i)); return -1; } - } - return 0; } diff --git a/drivers/staging/iio/accel/adis16203.c b/drivers/staging/iio/accel/adis16203.c index 62d5397ff1f98a5809fa96064fac53e135dac3cc..c0e4c9266b5f4aa987d5cde8eab20078fa85a385 100644 --- a/drivers/staging/iio/accel/adis16203.c +++ b/drivers/staging/iio/accel/adis16203.c @@ -285,7 +285,7 @@ static int adis16203_probe(struct spi_device *spi) return ret; /* Get the device into a sane initial state */ - ret = adis_initial_startup(st); + ret = __adis_initial_startup(st); if (ret) return ret; diff --git a/drivers/staging/iio/accel/adis16240.c b/drivers/staging/iio/accel/adis16240.c index bca857eef92e271b200b54ddfd3480f4af37dcad..337492785f04c98df122b4f4844f739a7ed707ae 100644 --- a/drivers/staging/iio/accel/adis16240.c +++ b/drivers/staging/iio/accel/adis16240.c @@ -414,7 +414,7 @@ static int adis16240_probe(struct spi_device *spi) return ret; /* Get the device into a sane initial state */ - ret = adis_initial_startup(st); + ret = __adis_initial_startup(st); if (ret) return ret; diff --git a/drivers/staging/iio/addac/adt7316-i2c.c b/drivers/staging/iio/addac/adt7316-i2c.c index 5543cc90970704b322b834202b0b83280a0d2fdc..7e3d1a6f30ba96dc25591e4b889fba01b7b96740 100644 --- a/drivers/staging/iio/addac/adt7316-i2c.c +++ b/drivers/staging/iio/addac/adt7316-i2c.c @@ -93,9 +93,9 @@ static int adt7316_i2c_multi_write(void *client, u8 reg, u8 count, u8 *data) * device probe and remove */ -static int adt7316_i2c_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int adt7316_i2c_probe(struct i2c_client *client) { + const struct i2c_device_id *id = i2c_client_get_device_id(client); struct adt7316_bus bus = { .client = client, .irq = client->irq, @@ -138,7 +138,7 @@ static struct i2c_driver adt7316_driver = { .of_match_table = adt7316_of_match, .pm = ADT7316_PM_OPS, }, - .probe = adt7316_i2c_probe, + .probe_new = adt7316_i2c_probe, .id_table = adt7316_i2c_id, }; module_i2c_driver(adt7316_driver); diff --git a/drivers/staging/iio/frequency/ad9834.c b/drivers/staging/iio/frequency/ad9834.c index 2b4267a87e65e153e9b5fa55cf17d3b9e3677720..285df0e489a62503daa509507787be8b48450d8d 100644 --- a/drivers/staging/iio/frequency/ad9834.c +++ b/drivers/staging/iio/frequency/ad9834.c @@ -331,11 +331,9 @@ static IIO_DEV_ATTR_PHASE(0, 1, 0200, NULL, ad9834_write, AD9834_REG_PHASE1); static IIO_DEV_ATTR_PHASESYMBOL(0, 0200, NULL, ad9834_write, AD9834_PSEL); static IIO_CONST_ATTR_PHASE_SCALE(0, "0.0015339808"); /* 2PI/2^12 rad*/ -static IIO_DEV_ATTR_PINCONTROL_EN(0, 0200, NULL, - ad9834_write, AD9834_PIN_SW); +static IIO_DEV_ATTR_PINCONTROL_EN(0, 0200, NULL, ad9834_write, AD9834_PIN_SW); static IIO_DEV_ATTR_OUT_ENABLE(0, 0200, NULL, ad9834_write, AD9834_RESET); -static IIO_DEV_ATTR_OUTY_ENABLE(0, 1, 0200, NULL, - ad9834_write, AD9834_OPBITEN); +static IIO_DEV_ATTR_OUTY_ENABLE(0, 1, 0200, NULL, ad9834_write, AD9834_OPBITEN); static IIO_DEV_ATTR_OUT_WAVETYPE(0, 0, ad9834_store_wavetype, 0); static IIO_DEV_ATTR_OUT_WAVETYPE(0, 1, ad9834_store_wavetype, 1); diff --git a/drivers/staging/iio/impedance-analyzer/ad5933.c b/drivers/staging/iio/impedance-analyzer/ad5933.c index f177b20f0f2d983f4244228112d3051f60e9a60e..b3152f7153fbb1cf2199c32b2e384c18d2d87c36 100644 --- a/drivers/staging/iio/impedance-analyzer/ad5933.c +++ b/drivers/staging/iio/impedance-analyzer/ad5933.c @@ -674,9 +674,9 @@ static void ad5933_clk_disable(void *data) clk_disable_unprepare(st->mclk); } -static int ad5933_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int ad5933_probe(struct i2c_client *client) { + const struct i2c_device_id *id = i2c_client_get_device_id(client); int ret; struct ad5933_state *st; struct iio_dev *indio_dev; @@ -781,7 +781,7 @@ static struct i2c_driver ad5933_driver = { .name = "ad5933", .of_match_table = ad5933_of_match, }, - .probe = ad5933_probe, + .probe_new = ad5933_probe, .id_table = ad5933_id, }; module_i2c_driver(ad5933_driver); diff --git a/drivers/staging/iio/meter/ade7854-i2c.c b/drivers/staging/iio/meter/ade7854-i2c.c index a9a06e8dda51f0d2c00dbf999696dc4f3c75217b..572d714eb0ddb7219000af72c59b519d0299817e 100644 --- a/drivers/staging/iio/meter/ade7854-i2c.c +++ b/drivers/staging/iio/meter/ade7854-i2c.c @@ -61,7 +61,10 @@ static int ade7854_i2c_write_reg(struct device *dev, unlock: mutex_unlock(&st->buf_lock); - return ret < 0 ? ret : 0; + if (ret < 0) + return ret; + + return 0; } static int ade7854_i2c_read_reg(struct device *dev, @@ -109,8 +112,7 @@ unlock: return ret; } -static int ade7854_i2c_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int ade7854_i2c_probe(struct i2c_client *client) { struct ade7854_state *st; struct iio_dev *indio_dev; @@ -141,7 +143,7 @@ static struct i2c_driver ade7854_i2c_driver = { .driver = { .name = "ade7854", }, - .probe = ade7854_i2c_probe, + .probe_new = ade7854_i2c_probe, .id_table = ade7854_id, }; module_i2c_driver(ade7854_i2c_driver); diff --git a/drivers/staging/ks7010/TODO b/drivers/staging/ks7010/TODO index ab6f39175d992ab07cfb46572e5f2f76f319b5ac..80c97543b97728568466fff5cfe2fc4ab0dac4c7 100644 --- a/drivers/staging/ks7010/TODO +++ b/drivers/staging/ks7010/TODO @@ -27,6 +27,9 @@ Now the TODOs: - fix the 'card removal' event when card is inserted when booting - check what other upstream wireless mechanisms can be used instead of the custom ones here +- Switch to use LIB80211. +- Switch to use MAC80211. +- Switch to use CFG80211. Please send any patches to: Greg Kroah-Hartman diff --git a/drivers/staging/ks7010/ks_wlan_net.c b/drivers/staging/ks7010/ks_wlan_net.c index 7e8d37c169f0feb284e5fa7ff9214a444155e73f..044c807ca022364f5c0f8978d6e49cb93b483291 100644 --- a/drivers/staging/ks7010/ks_wlan_net.c +++ b/drivers/staging/ks7010/ks_wlan_net.c @@ -1763,8 +1763,8 @@ static struct iw_statistics *ks_get_wireless_stats(struct net_device *dev) } static int ks_wlan_set_stop_request(struct net_device *dev, - struct iw_request_info *info, __u32 *uwrq, - char *extra) + struct iw_request_info *info, + union iwreq_data *uwrq, char *extra) { struct ks_wlan_private *priv = netdev_priv(dev); @@ -1772,7 +1772,7 @@ static int ks_wlan_set_stop_request(struct net_device *dev, return -EPERM; /* for SLEEP MODE */ - if (!(*uwrq)) + if (!(uwrq->mode)) return -EINVAL; hostif_sme_enqueue(priv, SME_STOP_REQUEST); @@ -1786,7 +1786,9 @@ static int ks_wlan_set_mlme(struct net_device *dev, { struct ks_wlan_private *priv = netdev_priv(dev); struct iw_mlme *mlme = (struct iw_mlme *)extra; - __u32 mode = 1; + union iwreq_data uwrq; + + uwrq.mode = 1; if (priv->sleep_mode == SLP_SLEEP) return -EPERM; @@ -1799,13 +1801,14 @@ static int ks_wlan_set_mlme(struct net_device *dev, mlme->reason_code == WLAN_REASON_MIC_FAILURE) return 0; - return ks_wlan_set_stop_request(dev, NULL, &mode, NULL); + return ks_wlan_set_stop_request(dev, NULL, &uwrq, NULL); } static int ks_wlan_get_firmware_version(struct net_device *dev, struct iw_request_info *info, - struct iw_point *dwrq, char *extra) + union iwreq_data *uwrq, char *extra) { + struct iw_point *dwrq = &uwrq->data; struct ks_wlan_private *priv = netdev_priv(dev); dwrq->length = priv->version_size + 1; @@ -1814,8 +1817,8 @@ static int ks_wlan_get_firmware_version(struct net_device *dev, } static int ks_wlan_set_preamble(struct net_device *dev, - struct iw_request_info *info, __u32 *uwrq, - char *extra) + struct iw_request_info *info, + union iwreq_data *uwrq, char *extra) { struct ks_wlan_private *priv = netdev_priv(dev); @@ -1823,17 +1826,17 @@ static int ks_wlan_set_preamble(struct net_device *dev, return -EPERM; /* for SLEEP MODE */ - if (*uwrq != LONG_PREAMBLE && *uwrq != SHORT_PREAMBLE) + if (uwrq->mode != LONG_PREAMBLE && uwrq->mode != SHORT_PREAMBLE) return -EINVAL; - priv->reg.preamble = *uwrq; + priv->reg.preamble = uwrq->mode; priv->need_commit |= SME_MODE_SET; return -EINPROGRESS; /* Call commit handler */ } static int ks_wlan_get_preamble(struct net_device *dev, - struct iw_request_info *info, __u32 *uwrq, - char *extra) + struct iw_request_info *info, + union iwreq_data *uwrq, char *extra) { struct ks_wlan_private *priv = netdev_priv(dev); @@ -1841,37 +1844,37 @@ static int ks_wlan_get_preamble(struct net_device *dev, return -EPERM; /* for SLEEP MODE */ - *uwrq = priv->reg.preamble; + uwrq->mode = priv->reg.preamble; return 0; } static int ks_wlan_set_power_mgmt(struct net_device *dev, - struct iw_request_info *info, __u32 *uwrq, - char *extra) + struct iw_request_info *info, + union iwreq_data *uwrq, char *extra) { struct ks_wlan_private *priv = netdev_priv(dev); if (priv->sleep_mode == SLP_SLEEP) return -EPERM; - if (*uwrq != POWER_MGMT_ACTIVE && - *uwrq != POWER_MGMT_SAVE1 && - *uwrq != POWER_MGMT_SAVE2) + if (uwrq->mode != POWER_MGMT_ACTIVE && + uwrq->mode != POWER_MGMT_SAVE1 && + uwrq->mode != POWER_MGMT_SAVE2) return -EINVAL; - if ((*uwrq == POWER_MGMT_SAVE1 || *uwrq == POWER_MGMT_SAVE2) && + if ((uwrq->mode == POWER_MGMT_SAVE1 || uwrq->mode == POWER_MGMT_SAVE2) && (priv->reg.operation_mode != MODE_INFRASTRUCTURE)) return -EINVAL; - priv->reg.power_mgmt = *uwrq; + priv->reg.power_mgmt = uwrq->mode; hostif_sme_enqueue(priv, SME_POW_MNGMT_REQUEST); return 0; } static int ks_wlan_get_power_mgmt(struct net_device *dev, - struct iw_request_info *info, __u32 *uwrq, - char *extra) + struct iw_request_info *info, + union iwreq_data *uwrq, char *extra) { struct ks_wlan_private *priv = netdev_priv(dev); @@ -1879,13 +1882,13 @@ static int ks_wlan_get_power_mgmt(struct net_device *dev, return -EPERM; /* for SLEEP MODE */ - *uwrq = priv->reg.power_mgmt; + uwrq->mode = priv->reg.power_mgmt; return 0; } static int ks_wlan_set_scan_type(struct net_device *dev, - struct iw_request_info *info, __u32 *uwrq, - char *extra) + struct iw_request_info *info, + union iwreq_data *uwrq, char *extra) { struct ks_wlan_private *priv = netdev_priv(dev); @@ -1893,39 +1896,39 @@ static int ks_wlan_set_scan_type(struct net_device *dev, return -EPERM; /* for SLEEP MODE */ - if (*uwrq != ACTIVE_SCAN && *uwrq != PASSIVE_SCAN) + if (uwrq->mode != ACTIVE_SCAN && uwrq->mode != PASSIVE_SCAN) return -EINVAL; - priv->reg.scan_type = *uwrq; + priv->reg.scan_type = uwrq->mode; return 0; } static int ks_wlan_get_scan_type(struct net_device *dev, - struct iw_request_info *info, __u32 *uwrq, - char *extra) + struct iw_request_info *info, + union iwreq_data *uwrq, char *extra) { struct ks_wlan_private *priv = netdev_priv(dev); if (priv->sleep_mode == SLP_SLEEP) return -EPERM; /* for SLEEP MODE */ - *uwrq = priv->reg.scan_type; + uwrq->mode = priv->reg.scan_type; return 0; } static int ks_wlan_set_beacon_lost(struct net_device *dev, - struct iw_request_info *info, __u32 *uwrq, - char *extra) + struct iw_request_info *info, + union iwreq_data *uwrq, char *extra) { struct ks_wlan_private *priv = netdev_priv(dev); if (priv->sleep_mode == SLP_SLEEP) return -EPERM; /* for SLEEP MODE */ - if (*uwrq > BEACON_LOST_COUNT_MAX) + if (uwrq->mode > BEACON_LOST_COUNT_MAX) return -EINVAL; - priv->reg.beacon_lost_count = *uwrq; + priv->reg.beacon_lost_count = uwrq->mode; if (priv->reg.operation_mode == MODE_INFRASTRUCTURE) { priv->need_commit |= SME_MODE_SET; @@ -1936,101 +1939,101 @@ static int ks_wlan_set_beacon_lost(struct net_device *dev, } static int ks_wlan_get_beacon_lost(struct net_device *dev, - struct iw_request_info *info, __u32 *uwrq, - char *extra) + struct iw_request_info *info, + union iwreq_data *uwrq, char *extra) { struct ks_wlan_private *priv = netdev_priv(dev); if (priv->sleep_mode == SLP_SLEEP) return -EPERM; /* for SLEEP MODE */ - *uwrq = priv->reg.beacon_lost_count; + uwrq->mode = priv->reg.beacon_lost_count; return 0; } static int ks_wlan_set_phy_type(struct net_device *dev, - struct iw_request_info *info, __u32 *uwrq, - char *extra) + struct iw_request_info *info, + union iwreq_data *uwrq, char *extra) { struct ks_wlan_private *priv = netdev_priv(dev); if (priv->sleep_mode == SLP_SLEEP) return -EPERM; - if (*uwrq != D_11B_ONLY_MODE && - *uwrq != D_11G_ONLY_MODE && - *uwrq != D_11BG_COMPATIBLE_MODE) + if (uwrq->mode != D_11B_ONLY_MODE && + uwrq->mode != D_11G_ONLY_MODE && + uwrq->mode != D_11BG_COMPATIBLE_MODE) return -EINVAL; /* for SLEEP MODE */ - priv->reg.phy_type = *uwrq; + priv->reg.phy_type = uwrq->mode; priv->need_commit |= SME_MODE_SET; return -EINPROGRESS; /* Call commit handler */ } static int ks_wlan_get_phy_type(struct net_device *dev, - struct iw_request_info *info, __u32 *uwrq, - char *extra) + struct iw_request_info *info, + union iwreq_data *uwrq, char *extra) { struct ks_wlan_private *priv = netdev_priv(dev); if (priv->sleep_mode == SLP_SLEEP) return -EPERM; /* for SLEEP MODE */ - *uwrq = priv->reg.phy_type; + uwrq->mode = priv->reg.phy_type; return 0; } static int ks_wlan_set_cts_mode(struct net_device *dev, - struct iw_request_info *info, __u32 *uwrq, - char *extra) + struct iw_request_info *info, + union iwreq_data *uwrq, char *extra) { struct ks_wlan_private *priv = netdev_priv(dev); if (priv->sleep_mode == SLP_SLEEP) return -EPERM; /* for SLEEP MODE */ - if (*uwrq != CTS_MODE_FALSE && *uwrq != CTS_MODE_TRUE) + if (uwrq->mode != CTS_MODE_FALSE && uwrq->mode != CTS_MODE_TRUE) return -EINVAL; - priv->reg.cts_mode = (*uwrq == CTS_MODE_FALSE) ? *uwrq : + priv->reg.cts_mode = (uwrq->mode == CTS_MODE_FALSE) ? uwrq->mode : (priv->reg.phy_type == D_11G_ONLY_MODE || priv->reg.phy_type == D_11BG_COMPATIBLE_MODE) ? - *uwrq : !*uwrq; + uwrq->mode : !uwrq->mode; priv->need_commit |= SME_MODE_SET; return -EINPROGRESS; /* Call commit handler */ } static int ks_wlan_get_cts_mode(struct net_device *dev, - struct iw_request_info *info, __u32 *uwrq, - char *extra) + struct iw_request_info *info, + union iwreq_data *uwrq, char *extra) { struct ks_wlan_private *priv = netdev_priv(dev); if (priv->sleep_mode == SLP_SLEEP) return -EPERM; /* for SLEEP MODE */ - *uwrq = priv->reg.cts_mode; + uwrq->mode = priv->reg.cts_mode; return 0; } static int ks_wlan_set_sleep_mode(struct net_device *dev, struct iw_request_info *info, - __u32 *uwrq, char *extra) + union iwreq_data *uwrq, char *extra) { struct ks_wlan_private *priv = netdev_priv(dev); - if (*uwrq != SLP_SLEEP && - *uwrq != SLP_ACTIVE) { - netdev_err(dev, "SET_SLEEP_MODE %d error\n", *uwrq); + if (uwrq->mode != SLP_SLEEP && + uwrq->mode != SLP_ACTIVE) { + netdev_err(dev, "SET_SLEEP_MODE %d error\n", uwrq->mode); return -EINVAL; } - priv->sleep_mode = *uwrq; + priv->sleep_mode = uwrq->mode; netdev_info(dev, "SET_SLEEP_MODE %d\n", priv->sleep_mode); - if (*uwrq == SLP_SLEEP) + if (uwrq->mode == SLP_SLEEP) hostif_sme_enqueue(priv, SME_STOP_REQUEST); hostif_sme_enqueue(priv, SME_SLEEP_REQUEST); @@ -2040,52 +2043,53 @@ static int ks_wlan_set_sleep_mode(struct net_device *dev, static int ks_wlan_get_sleep_mode(struct net_device *dev, struct iw_request_info *info, - __u32 *uwrq, char *extra) + union iwreq_data *uwrq, char *extra) { struct ks_wlan_private *priv = netdev_priv(dev); - *uwrq = priv->sleep_mode; + uwrq->mode = priv->sleep_mode; return 0; } static int ks_wlan_set_wps_enable(struct net_device *dev, - struct iw_request_info *info, __u32 *uwrq, - char *extra) + struct iw_request_info *info, + union iwreq_data *uwrq, char *extra) { struct ks_wlan_private *priv = netdev_priv(dev); if (priv->sleep_mode == SLP_SLEEP) return -EPERM; /* for SLEEP MODE */ - if (*uwrq != 0 && *uwrq != 1) + if (uwrq->mode != 0 && uwrq->mode != 1) return -EINVAL; - priv->wps.wps_enabled = *uwrq; + priv->wps.wps_enabled = uwrq->mode; hostif_sme_enqueue(priv, SME_WPS_ENABLE_REQUEST); return 0; } static int ks_wlan_get_wps_enable(struct net_device *dev, - struct iw_request_info *info, __u32 *uwrq, - char *extra) + struct iw_request_info *info, + union iwreq_data *uwrq, char *extra) { struct ks_wlan_private *priv = netdev_priv(dev); if (priv->sleep_mode == SLP_SLEEP) return -EPERM; /* for SLEEP MODE */ - *uwrq = priv->wps.wps_enabled; - netdev_info(dev, "return=%d\n", *uwrq); + uwrq->mode = priv->wps.wps_enabled; + netdev_info(dev, "return=%d\n", uwrq->mode); return 0; } static int ks_wlan_set_wps_probe_req(struct net_device *dev, struct iw_request_info *info, - struct iw_point *dwrq, char *extra) + union iwreq_data *uwrq, char *extra) { + struct iw_point *dwrq = &uwrq->data; u8 *p = extra; unsigned char len; struct ks_wlan_private *priv = netdev_priv(dev); @@ -2114,76 +2118,76 @@ static int ks_wlan_set_wps_probe_req(struct net_device *dev, } static int ks_wlan_set_tx_gain(struct net_device *dev, - struct iw_request_info *info, __u32 *uwrq, - char *extra) + struct iw_request_info *info, + union iwreq_data *uwrq, char *extra) { struct ks_wlan_private *priv = netdev_priv(dev); if (priv->sleep_mode == SLP_SLEEP) return -EPERM; /* for SLEEP MODE */ - if (*uwrq > 0xFF) + if (uwrq->mode > 0xFF) return -EINVAL; - priv->gain.tx_gain = (u8)*uwrq; + priv->gain.tx_gain = (u8)uwrq->mode; priv->gain.tx_mode = (priv->gain.tx_gain < 0xFF) ? 1 : 0; hostif_sme_enqueue(priv, SME_SET_GAIN); return 0; } static int ks_wlan_get_tx_gain(struct net_device *dev, - struct iw_request_info *info, __u32 *uwrq, - char *extra) + struct iw_request_info *info, + union iwreq_data *uwrq, char *extra) { struct ks_wlan_private *priv = netdev_priv(dev); if (priv->sleep_mode == SLP_SLEEP) return -EPERM; /* for SLEEP MODE */ - *uwrq = priv->gain.tx_gain; + uwrq->mode = priv->gain.tx_gain; hostif_sme_enqueue(priv, SME_GET_GAIN); return 0; } static int ks_wlan_set_rx_gain(struct net_device *dev, - struct iw_request_info *info, __u32 *uwrq, - char *extra) + struct iw_request_info *info, + union iwreq_data *uwrq, char *extra) { struct ks_wlan_private *priv = netdev_priv(dev); if (priv->sleep_mode == SLP_SLEEP) return -EPERM; /* for SLEEP MODE */ - if (*uwrq > 0xFF) + if (uwrq->mode > 0xFF) return -EINVAL; - priv->gain.rx_gain = (u8)*uwrq; + priv->gain.rx_gain = (u8)uwrq->mode; priv->gain.rx_mode = (priv->gain.rx_gain < 0xFF) ? 1 : 0; hostif_sme_enqueue(priv, SME_SET_GAIN); return 0; } static int ks_wlan_get_rx_gain(struct net_device *dev, - struct iw_request_info *info, __u32 *uwrq, - char *extra) + struct iw_request_info *info, + union iwreq_data *uwrq, char *extra) { struct ks_wlan_private *priv = netdev_priv(dev); if (priv->sleep_mode == SLP_SLEEP) return -EPERM; /* for SLEEP MODE */ - *uwrq = priv->gain.rx_gain; + uwrq->mode = priv->gain.rx_gain; hostif_sme_enqueue(priv, SME_GET_GAIN); return 0; } static int ks_wlan_get_eeprom_cksum(struct net_device *dev, - struct iw_request_info *info, __u32 *uwrq, - char *extra) + struct iw_request_info *info, + union iwreq_data *uwrq, char *extra) { struct ks_wlan_private *priv = netdev_priv(dev); - *uwrq = priv->eeprom_checksum; + uwrq->mode = priv->eeprom_checksum; return 0; } @@ -2302,7 +2306,7 @@ static void print_hif_event(struct net_device *dev, int event) /* get host command history */ static int ks_wlan_hostt(struct net_device *dev, struct iw_request_info *info, - __u32 *uwrq, char *extra) + union iwreq_data *uwrq, char *extra) { int i, event; struct ks_wlan_private *priv = netdev_priv(dev); @@ -2409,38 +2413,38 @@ static const iw_handler ks_wlan_handler[] = { /* private_handler */ static const iw_handler ks_wlan_private_handler[] = { - (iw_handler)NULL, /* 0 */ - (iw_handler)NULL, /* 1, KS_WLAN_GET_DRIVER_VERSION */ - (iw_handler)NULL, /* 2 */ - (iw_handler)ks_wlan_get_firmware_version,/* 3 KS_WLAN_GET_FIRM_VERSION */ - (iw_handler)ks_wlan_set_wps_enable, /* 4 KS_WLAN_SET_WPS_ENABLE */ - (iw_handler)ks_wlan_get_wps_enable, /* 5 KS_WLAN_GET_WPS_ENABLE */ - (iw_handler)ks_wlan_set_wps_probe_req, /* 6 KS_WLAN_SET_WPS_PROBE_REQ */ - (iw_handler)ks_wlan_get_eeprom_cksum, /* 7 KS_WLAN_GET_CONNECT */ - (iw_handler)ks_wlan_set_preamble, /* 8 KS_WLAN_SET_PREAMBLE */ - (iw_handler)ks_wlan_get_preamble, /* 9 KS_WLAN_GET_PREAMBLE */ - (iw_handler)ks_wlan_set_power_mgmt, /* 10 KS_WLAN_SET_POWER_SAVE */ - (iw_handler)ks_wlan_get_power_mgmt, /* 11 KS_WLAN_GET_POWER_SAVE */ - (iw_handler)ks_wlan_set_scan_type, /* 12 KS_WLAN_SET_SCAN_TYPE */ - (iw_handler)ks_wlan_get_scan_type, /* 13 KS_WLAN_GET_SCAN_TYPE */ - (iw_handler)ks_wlan_set_rx_gain, /* 14 KS_WLAN_SET_RX_GAIN */ - (iw_handler)ks_wlan_get_rx_gain, /* 15 KS_WLAN_GET_RX_GAIN */ - (iw_handler)ks_wlan_hostt, /* 16 KS_WLAN_HOSTT */ - (iw_handler)NULL, /* 17 */ - (iw_handler)ks_wlan_set_beacon_lost, /* 18 KS_WLAN_SET_BECAN_LOST */ - (iw_handler)ks_wlan_get_beacon_lost, /* 19 KS_WLAN_GET_BECAN_LOST */ - (iw_handler)ks_wlan_set_tx_gain, /* 20 KS_WLAN_SET_TX_GAIN */ - (iw_handler)ks_wlan_get_tx_gain, /* 21 KS_WLAN_GET_TX_GAIN */ - (iw_handler)ks_wlan_set_phy_type, /* 22 KS_WLAN_SET_PHY_TYPE */ - (iw_handler)ks_wlan_get_phy_type, /* 23 KS_WLAN_GET_PHY_TYPE */ - (iw_handler)ks_wlan_set_cts_mode, /* 24 KS_WLAN_SET_CTS_MODE */ - (iw_handler)ks_wlan_get_cts_mode, /* 25 KS_WLAN_GET_CTS_MODE */ - (iw_handler)NULL, /* 26 */ - (iw_handler)NULL, /* 27 */ - (iw_handler)ks_wlan_set_sleep_mode, /* 28 KS_WLAN_SET_SLEEP_MODE */ - (iw_handler)ks_wlan_get_sleep_mode, /* 29 KS_WLAN_GET_SLEEP_MODE */ - (iw_handler)NULL, /* 30 */ - (iw_handler)NULL, /* 31 */ + NULL, /* 0 */ + NULL, /* 1, KS_WLAN_GET_DRIVER_VERSION */ + NULL, /* 2 */ + ks_wlan_get_firmware_version, /* 3 KS_WLAN_GET_FIRM_VERSION */ + ks_wlan_set_wps_enable, /* 4 KS_WLAN_SET_WPS_ENABLE */ + ks_wlan_get_wps_enable, /* 5 KS_WLAN_GET_WPS_ENABLE */ + ks_wlan_set_wps_probe_req, /* 6 KS_WLAN_SET_WPS_PROBE_REQ */ + ks_wlan_get_eeprom_cksum, /* 7 KS_WLAN_GET_CONNECT */ + ks_wlan_set_preamble, /* 8 KS_WLAN_SET_PREAMBLE */ + ks_wlan_get_preamble, /* 9 KS_WLAN_GET_PREAMBLE */ + ks_wlan_set_power_mgmt, /* 10 KS_WLAN_SET_POWER_SAVE */ + ks_wlan_get_power_mgmt, /* 11 KS_WLAN_GET_POWER_SAVE */ + ks_wlan_set_scan_type, /* 12 KS_WLAN_SET_SCAN_TYPE */ + ks_wlan_get_scan_type, /* 13 KS_WLAN_GET_SCAN_TYPE */ + ks_wlan_set_rx_gain, /* 14 KS_WLAN_SET_RX_GAIN */ + ks_wlan_get_rx_gain, /* 15 KS_WLAN_GET_RX_GAIN */ + ks_wlan_hostt, /* 16 KS_WLAN_HOSTT */ + NULL, /* 17 */ + ks_wlan_set_beacon_lost, /* 18 KS_WLAN_SET_BECAN_LOST */ + ks_wlan_get_beacon_lost, /* 19 KS_WLAN_GET_BECAN_LOST */ + ks_wlan_set_tx_gain, /* 20 KS_WLAN_SET_TX_GAIN */ + ks_wlan_get_tx_gain, /* 21 KS_WLAN_GET_TX_GAIN */ + ks_wlan_set_phy_type, /* 22 KS_WLAN_SET_PHY_TYPE */ + ks_wlan_get_phy_type, /* 23 KS_WLAN_GET_PHY_TYPE */ + ks_wlan_set_cts_mode, /* 24 KS_WLAN_SET_CTS_MODE */ + ks_wlan_get_cts_mode, /* 25 KS_WLAN_GET_CTS_MODE */ + NULL, /* 26 */ + NULL, /* 27 */ + ks_wlan_set_sleep_mode, /* 28 KS_WLAN_SET_SLEEP_MODE */ + ks_wlan_get_sleep_mode, /* 29 KS_WLAN_GET_SLEEP_MODE */ + NULL, /* 30 */ + NULL, /* 31 */ }; static const struct iw_handler_def ks_wlan_handler_def = { @@ -2461,7 +2465,7 @@ static int ks_wlan_netdev_ioctl(struct net_device *dev, struct ifreq *rq, switch (cmd) { case SIOCIWFIRSTPRIV + 20: /* KS_WLAN_SET_STOP_REQ */ - ret = ks_wlan_set_stop_request(dev, NULL, &wrq->u.mode, NULL); + ret = ks_wlan_set_stop_request(dev, NULL, &wrq->u, NULL); break; // All other calls are currently unsupported default: diff --git a/drivers/staging/most/dim2/dim2.c b/drivers/staging/most/dim2/dim2.c index 97dff82b7a5fbb88280840dad4d4d349cc7d97d0..7a5f80e637a0bb6a11698bf9d4896fc71654c284 100644 --- a/drivers/staging/most/dim2/dim2.c +++ b/drivers/staging/most/dim2/dim2.c @@ -161,7 +161,7 @@ static int try_start_dim_transfer(struct hdm_channel *hdm_ch) struct list_head *head = &hdm_ch->pending_list; struct mbo *mbo; unsigned long flags; - struct dim_ch_state_t st; + struct dim_ch_state st; BUG_ON(!hdm_ch); BUG_ON(!hdm_ch->is_initialized); @@ -259,7 +259,7 @@ static void retrieve_netinfo(struct dim2_hdm *dev, struct mbo *mbo) static void service_done_flag(struct dim2_hdm *dev, int ch_idx) { struct hdm_channel *hdm_ch = dev->hch + ch_idx; - struct dim_ch_state_t st; + struct dim_ch_state st; struct list_head *head; struct mbo *mbo; int done_buffers; diff --git a/drivers/staging/most/dim2/hal.c b/drivers/staging/most/dim2/hal.c index 65282c27686287fab02726401aa1ca0aa322722e..a5d40b5b138ab1f15593266ebe0716a21de6f210 100644 --- a/drivers/staging/most/dim2/hal.c +++ b/drivers/staging/most/dim2/hal.c @@ -943,8 +943,8 @@ u8 dim_service_channel(struct dim_channel *ch) return channel_service(ch); } -struct dim_ch_state_t *dim_get_channel_state(struct dim_channel *ch, - struct dim_ch_state_t *state_ptr) +struct dim_ch_state *dim_get_channel_state(struct dim_channel *ch, + struct dim_ch_state *state_ptr) { if (!ch || !state_ptr) return NULL; diff --git a/drivers/staging/most/dim2/hal.h b/drivers/staging/most/dim2/hal.h index 20531449acab1dac2bed3882cbc91a3ed41e18a0..ef10a8741c10b87de8fb6c81ce1b08d01f1f5b6d 100644 --- a/drivers/staging/most/dim2/hal.h +++ b/drivers/staging/most/dim2/hal.h @@ -27,7 +27,7 @@ enum mlb_clk_speed { CLK_8192FS = 7, }; -struct dim_ch_state_t { +struct dim_ch_state { bool ready; /* Shows readiness to enqueue next buffer */ u16 done_buffers; /* Number of completed buffers */ }; @@ -87,8 +87,8 @@ void dim_service_ahb_int_irq(struct dim_channel *const *channels); u8 dim_service_channel(struct dim_channel *ch); -struct dim_ch_state_t *dim_get_channel_state(struct dim_channel *ch, - struct dim_ch_state_t *state_ptr); +struct dim_ch_state *dim_get_channel_state(struct dim_channel *ch, + struct dim_ch_state *state_ptr); u16 dim_dbr_space(struct dim_channel *ch); diff --git a/drivers/staging/most/i2c/i2c.c b/drivers/staging/most/i2c/i2c.c index 285a071f02be18bbc0150d74acdb93a8129f3e02..df53a4c4f850489eff36ce402331cb9d7301572c 100644 --- a/drivers/staging/most/i2c/i2c.c +++ b/drivers/staging/most/i2c/i2c.c @@ -284,7 +284,7 @@ static irqreturn_t most_irq_handler(int irq, void *_dev) * * Register the i2c client device as a MOST interface */ -static int i2c_probe(struct i2c_client *client, const struct i2c_device_id *id) +static int i2c_probe(struct i2c_client *client) { struct hdm_i2c *dev; int ret, i; @@ -359,7 +359,7 @@ static struct i2c_driver i2c_driver = { .driver = { .name = "hdm_i2c", }, - .probe = i2c_probe, + .probe_new = i2c_probe, .remove = i2c_remove, .id_table = i2c_id, }; diff --git a/drivers/staging/octeon/ethernet-tx.c b/drivers/staging/octeon/ethernet-tx.c index a36e36701c74365bdc4888ecf90311a0b85efd56..bbf33b88bb7c2afe59321c5862e42102ab15a52a 100644 --- a/drivers/staging/octeon/ethernet-tx.c +++ b/drivers/staging/octeon/ethernet-tx.c @@ -73,7 +73,6 @@ static void cvm_oct_free_tx_skbs(struct net_device *dev) { int skb_to_free; int qos, queues_per_port; - int total_freed = 0; int total_remaining = 0; unsigned long flags; struct octeon_ethernet *priv = netdev_priv(dev); @@ -87,7 +86,6 @@ static void cvm_oct_free_tx_skbs(struct net_device *dev) MAX_SKB_TO_FREE); skb_to_free = cvm_oct_adjust_skb_to_free(skb_to_free, priv->fau + qos * 4); - total_freed += skb_to_free; if (skb_to_free > 0) { struct sk_buff *to_free_list = NULL; diff --git a/drivers/staging/octeon/octeon-stubs.h b/drivers/staging/octeon/octeon-stubs.h index 3f8e5713b8a850f06f9f15de63431105dc1f8442..7a02e59e283fbae84f49af366616a4e13ea24f8a 100644 --- a/drivers/staging/octeon/octeon-stubs.h +++ b/drivers/staging/octeon/octeon-stubs.h @@ -1212,7 +1212,7 @@ static inline void *cvmx_phys_to_ptr(uint64_t physical_address) return (void *)(uintptr_t)(physical_address); } -static inline uint64_t cvmx_ptr_to_phys(void *ptr) +static inline phys_addr_t cvmx_ptr_to_phys(void *ptr) { return (unsigned long)ptr; } diff --git a/drivers/staging/olpc_dcon/olpc_dcon.c b/drivers/staging/olpc_dcon/olpc_dcon.c index 4fb9b9f1079962c2ef3e946c714c85167df9109c..2fba52e0bd7bde73d44e6c351dcc7284bd04dfe7 100644 --- a/drivers/staging/olpc_dcon/olpc_dcon.c +++ b/drivers/staging/olpc_dcon/olpc_dcon.c @@ -579,7 +579,7 @@ static int dcon_detect(struct i2c_client *client, struct i2c_board_info *info) return 0; } -static int dcon_probe(struct i2c_client *client, const struct i2c_device_id *id) +static int dcon_probe(struct i2c_client *client) { struct dcon_priv *dcon; int rc, i, j; @@ -779,7 +779,7 @@ static struct i2c_driver dcon_driver = { }, .class = I2C_CLASS_DDC | I2C_CLASS_HWMON, .id_table = dcon_idtable, - .probe = dcon_probe, + .probe_new = dcon_probe, .remove = dcon_remove, .detect = dcon_detect, .address_list = normal_i2c, diff --git a/drivers/staging/r8188eu/core/rtw_ap.c b/drivers/staging/r8188eu/core/rtw_ap.c index 24eb8dce9bfeb042362098516c1db86b123e548a..e0ca4b6e17ccae2904457f3cacfc690e2ff52f3b 100644 --- a/drivers/staging/r8188eu/core/rtw_ap.c +++ b/drivers/staging/r8188eu/core/rtw_ap.c @@ -1017,10 +1017,9 @@ u8 ap_free_sta(struct adapter *padapter, struct sta_info *psta, return beacon_updated; } -int rtw_sta_flush(struct adapter *padapter) +void rtw_sta_flush(struct adapter *padapter) { struct list_head *phead, *plist; - int ret = 0; struct sta_info *psta = NULL; struct sta_priv *pstapriv = &padapter->stapriv; struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv; @@ -1028,7 +1027,7 @@ int rtw_sta_flush(struct adapter *padapter) u8 bc_addr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; if ((pmlmeinfo->state & 0x03) != WIFI_FW_AP_STATE) - return ret; + return; spin_lock_bh(&pstapriv->asoc_list_lock); phead = &pstapriv->asoc_list; @@ -1050,8 +1049,6 @@ int rtw_sta_flush(struct adapter *padapter) issue_deauth(padapter, bc_addr, WLAN_REASON_DEAUTH_LEAVING); associated_clients_update(padapter, true); - - return ret; } /* called > TSR LEVEL for USB or SDIO Interface*/ diff --git a/drivers/staging/r8188eu/core/rtw_br_ext.c b/drivers/staging/r8188eu/core/rtw_br_ext.c index 4c5f30792a46cd680fa5479087d85de540ebc887..a7c67014dde0f067e197319a4c5681bdc1ed3e93 100644 --- a/drivers/staging/r8188eu/core/rtw_br_ext.c +++ b/drivers/staging/r8188eu/core/rtw_br_ext.c @@ -50,17 +50,17 @@ static unsigned char *__nat25_find_pppoe_tag(struct pppoe_hdr *ph, unsigned short type) { unsigned char *cur_ptr, *start_ptr; - unsigned short tagLen, tagType; + unsigned short tag_len, tag_type; start_ptr = (unsigned char *)ph->tag; cur_ptr = (unsigned char *)ph->tag; while ((cur_ptr - start_ptr) < ntohs(ph->length)) { /* prevent un-alignment access */ - tagType = (unsigned short)((cur_ptr[0] << 8) + cur_ptr[1]); - tagLen = (unsigned short)((cur_ptr[2] << 8) + cur_ptr[3]); - if (tagType == type) + tag_type = (unsigned short)((cur_ptr[0] << 8) + cur_ptr[1]); + tag_len = (unsigned short)((cur_ptr[2] << 8) + cur_ptr[3]); + if (tag_type == type) return cur_ptr; - cur_ptr = cur_ptr + TAG_HDR_LEN + tagLen; + cur_ptr = cur_ptr + TAG_HDR_LEN + tag_len; } return NULL; } @@ -111,32 +111,32 @@ static int __nat25_has_expired(struct nat25_network_db_entry *fdb) return 0; } -static void __nat25_generate_ipv4_network_addr(unsigned char *networkAddr, - unsigned int *ipAddr) +static void __nat25_generate_ipv4_network_addr(unsigned char *addr, + unsigned int *ip_addr) { - memset(networkAddr, 0, MAX_NETWORK_ADDR_LEN); + memset(addr, 0, MAX_NETWORK_ADDR_LEN); - networkAddr[0] = NAT25_IPV4; - memcpy(networkAddr + 7, (unsigned char *)ipAddr, 4); + addr[0] = NAT25_IPV4; + memcpy(addr + 7, (unsigned char *)ip_addr, 4); } -static void __nat25_generate_pppoe_network_addr(unsigned char *networkAddr, +static void __nat25_generate_pppoe_network_addr(unsigned char *addr, unsigned char *ac_mac, __be16 *sid) { - memset(networkAddr, 0, MAX_NETWORK_ADDR_LEN); + memset(addr, 0, MAX_NETWORK_ADDR_LEN); - networkAddr[0] = NAT25_PPPOE; - memcpy(networkAddr + 1, (unsigned char *)sid, 2); - memcpy(networkAddr + 3, (unsigned char *)ac_mac, 6); + addr[0] = NAT25_PPPOE; + memcpy(addr + 1, (unsigned char *)sid, 2); + memcpy(addr + 3, (unsigned char *)ac_mac, 6); } -static void __nat25_generate_ipv6_network_addr(unsigned char *networkAddr, - unsigned int *ipAddr) +static void __nat25_generate_ipv6_network_addr(unsigned char *addr, + unsigned int *ip_addr) { - memset(networkAddr, 0, MAX_NETWORK_ADDR_LEN); + memset(addr, 0, MAX_NETWORK_ADDR_LEN); - networkAddr[0] = NAT25_IPV6; - memcpy(networkAddr + 1, (unsigned char *)ipAddr, 16); + addr[0] = NAT25_IPV6; + memcpy(addr + 1, (unsigned char *)ip_addr, 16); } static unsigned char *scan_tlv(unsigned char *data, int len, unsigned char tag, unsigned char len8b) @@ -200,40 +200,40 @@ static int update_nd_link_layer_addr(unsigned char *data, int len, unsigned char return 0; } -static int __nat25_network_hash(unsigned char *networkAddr) +static int __nat25_network_hash(unsigned char *addr) { - if (networkAddr[0] == NAT25_IPV4) { + if (addr[0] == NAT25_IPV4) { unsigned long x; - x = networkAddr[7] ^ networkAddr[8] ^ networkAddr[9] ^ networkAddr[10]; + x = addr[7] ^ addr[8] ^ addr[9] ^ addr[10]; return x & (NAT25_HASH_SIZE - 1); - } else if (networkAddr[0] == NAT25_IPX) { + } else if (addr[0] == NAT25_IPX) { unsigned long x; - x = networkAddr[1] ^ networkAddr[2] ^ networkAddr[3] ^ networkAddr[4] ^ networkAddr[5] ^ - networkAddr[6] ^ networkAddr[7] ^ networkAddr[8] ^ networkAddr[9] ^ networkAddr[10]; + x = addr[1] ^ addr[2] ^ addr[3] ^ addr[4] ^ addr[5] ^ + addr[6] ^ addr[7] ^ addr[8] ^ addr[9] ^ addr[10]; return x & (NAT25_HASH_SIZE - 1); - } else if (networkAddr[0] == NAT25_APPLE) { + } else if (addr[0] == NAT25_APPLE) { unsigned long x; - x = networkAddr[1] ^ networkAddr[2] ^ networkAddr[3]; + x = addr[1] ^ addr[2] ^ addr[3]; return x & (NAT25_HASH_SIZE - 1); - } else if (networkAddr[0] == NAT25_PPPOE) { + } else if (addr[0] == NAT25_PPPOE) { unsigned long x; - x = networkAddr[0] ^ networkAddr[1] ^ networkAddr[2] ^ networkAddr[3] ^ networkAddr[4] ^ networkAddr[5] ^ networkAddr[6] ^ networkAddr[7] ^ networkAddr[8]; + x = addr[0] ^ addr[1] ^ addr[2] ^ addr[3] ^ addr[4] ^ + addr[5] ^ addr[6] ^ addr[7] ^ addr[8]; return x & (NAT25_HASH_SIZE - 1); - } else if (networkAddr[0] == NAT25_IPV6) { + } else if (addr[0] == NAT25_IPV6) { unsigned long x; - x = networkAddr[1] ^ networkAddr[2] ^ networkAddr[3] ^ networkAddr[4] ^ networkAddr[5] ^ - networkAddr[6] ^ networkAddr[7] ^ networkAddr[8] ^ networkAddr[9] ^ networkAddr[10] ^ - networkAddr[11] ^ networkAddr[12] ^ networkAddr[13] ^ networkAddr[14] ^ networkAddr[15] ^ - networkAddr[16]; + x = addr[1] ^ addr[2] ^ addr[3] ^ addr[4] ^ addr[5] ^ addr[6] ^ + addr[7] ^ addr[8] ^ addr[9] ^ addr[10] ^ addr[11] ^ addr[12] ^ + addr[13] ^ addr[14] ^ addr[15] ^ addr[16]; return x & (NAT25_HASH_SIZE - 1); } else { @@ -241,7 +241,7 @@ static int __nat25_network_hash(unsigned char *networkAddr) int i; for (i = 0; i < MAX_NETWORK_ADDR_LEN; i++) - x ^= networkAddr[i]; + x ^= addr[i]; return x & (NAT25_HASH_SIZE - 1); } @@ -269,17 +269,17 @@ static void __network_hash_unlink(struct nat25_network_db_entry *ent) } static void __nat25_db_network_insert(struct adapter *priv, - unsigned char *macAddr, unsigned char *networkAddr) + unsigned char *mac_addr, unsigned char *addr) { struct nat25_network_db_entry *db; int hash; spin_lock_bh(&priv->br_ext_lock); - hash = __nat25_network_hash(networkAddr); + hash = __nat25_network_hash(addr); db = priv->nethash[hash]; while (db) { - if (!memcmp(db->networkAddr, networkAddr, MAX_NETWORK_ADDR_LEN)) { - memcpy(db->macAddr, macAddr, ETH_ALEN); + if (!memcmp(db->networkAddr, addr, MAX_NETWORK_ADDR_LEN)) { + memcpy(db->macAddr, mac_addr, ETH_ALEN); db->ageing_timer = jiffies; spin_unlock_bh(&priv->br_ext_lock); return; @@ -291,8 +291,8 @@ static void __nat25_db_network_insert(struct adapter *priv, spin_unlock_bh(&priv->br_ext_lock); return; } - memcpy(db->networkAddr, networkAddr, MAX_NETWORK_ADDR_LEN); - memcpy(db->macAddr, macAddr, ETH_ALEN); + memcpy(db->networkAddr, addr, MAX_NETWORK_ADDR_LEN); + memcpy(db->macAddr, mac_addr, ETH_ALEN); atomic_set(&db->use_count, 1); db->ageing_timer = jiffies; @@ -366,7 +366,7 @@ void nat25_db_expire(struct adapter *priv) int nat25_db_handle(struct adapter *priv, struct sk_buff *skb, int method) { unsigned short protocol; - unsigned char networkAddr[MAX_NETWORK_ADDR_LEN]; + unsigned char addr[MAX_NETWORK_ADDR_LEN]; unsigned int tmp; if (!skb) @@ -395,9 +395,9 @@ int nat25_db_handle(struct adapter *priv, struct sk_buff *skb, int method) if (iph->saddr == 0) return 0; tmp = be32_to_cpu(iph->saddr); - __nat25_generate_ipv4_network_addr(networkAddr, &tmp); + __nat25_generate_ipv4_network_addr(addr, &tmp); /* record source IP address and , source mac address into db */ - __nat25_db_network_insert(priv, skb->data + ETH_ALEN, networkAddr); + __nat25_db_network_insert(priv, skb->data + ETH_ALEN, addr); return 0; default: return -1; @@ -421,8 +421,8 @@ int nat25_db_handle(struct adapter *priv, struct sk_buff *skb, int method) memcpy(arp_ptr, GET_MY_HWADDR(priv), ETH_ALEN); arp_ptr += arp->ar_hln; sender = (unsigned int *)arp_ptr; - __nat25_generate_ipv4_network_addr(networkAddr, sender); - __nat25_db_network_insert(priv, skb->data + ETH_ALEN, networkAddr); + __nat25_generate_ipv4_network_addr(addr, sender); + __nat25_db_network_insert(priv, skb->data + ETH_ALEN, addr); return 0; default: return -1; @@ -495,9 +495,9 @@ int nat25_db_handle(struct adapter *priv, struct sk_buff *skb, int method) return -1; } } else { /* session phase */ - __nat25_generate_pppoe_network_addr(networkAddr, skb->data, &ph->sid); + __nat25_generate_pppoe_network_addr(addr, skb->data, &ph->sid); - __nat25_db_network_insert(priv, skb->data + ETH_ALEN, networkAddr); + __nat25_db_network_insert(priv, skb->data + ETH_ALEN, addr); if (!priv->ethBrExtInfo.addPPPoETag && priv->pppoe_connection_in_progress && @@ -548,8 +548,8 @@ int nat25_db_handle(struct adapter *priv, struct sk_buff *skb, int method) return -1; case NAT25_INSERT: if (memcmp(&iph->saddr, "\x0\x0\x0\x0\x0\x0\x0\x0\x0\x0\x0\x0\x0\x0\x0\x0", 16)) { - __nat25_generate_ipv6_network_addr(networkAddr, (unsigned int *)&iph->saddr); - __nat25_db_network_insert(priv, skb->data + ETH_ALEN, networkAddr); + __nat25_generate_ipv6_network_addr(addr, (unsigned int *)&iph->saddr); + __nat25_db_network_insert(priv, skb->data + ETH_ALEN, addr); if (iph->nexthdr == IPPROTO_ICMPV6 && skb->len > (ETH_HLEN + sizeof(*iph) + 4)) { @@ -606,17 +606,16 @@ void dhcp_flag_bcast(struct adapter *priv, struct sk_buff *skb) if (!priv->ethBrExtInfo.dhcp_bcst_disable) { __be16 protocol = *((__be16 *)(skb->data + 2 * ETH_ALEN)); - if (protocol == __constant_htons(ETH_P_IP)) { /* IP */ + if (protocol == htons(ETH_P_IP)) { /* IP */ struct iphdr *iph = (struct iphdr *)(skb->data + ETH_HLEN); if (iph->protocol == IPPROTO_UDP) { /* UDP */ - struct udphdr *udph = (struct udphdr *)((size_t)iph + (iph->ihl << 2)); + struct udphdr *udph = (void *)iph + (iph->ihl << 2); - if ((udph->source == __constant_htons(CLIENT_PORT)) && - (udph->dest == __constant_htons(SERVER_PORT))) { /* DHCP request */ - struct dhcpMessage *dhcph = - (struct dhcpMessage *)((size_t)udph + sizeof(struct udphdr)); - u32 cookie = be32_to_cpu((__be32)dhcph->cookie); + if ((udph->source == htons(CLIENT_PORT)) && + (udph->dest == htons(SERVER_PORT))) { /* DHCP request */ + struct dhcpMessage *dhcph = (void *)udph + sizeof(struct udphdr); + u32 cookie = be32_to_cpu(dhcph->cookie); if (cookie == DHCP_MAGIC) { /* match magic word */ if (!(dhcph->flags & htons(BROADCAST_FLAG))) { @@ -639,19 +638,18 @@ void dhcp_flag_bcast(struct adapter *priv, struct sk_buff *skb) } } -void *scdb_findEntry(struct adapter *priv, unsigned char *ipAddr) +void *scdb_findEntry(struct adapter *priv, unsigned char *ip_addr) { - unsigned char networkAddr[MAX_NETWORK_ADDR_LEN]; + unsigned char addr[MAX_NETWORK_ADDR_LEN]; struct nat25_network_db_entry *db; int hash; - __nat25_generate_ipv4_network_addr(networkAddr, (unsigned int *)ipAddr); - hash = __nat25_network_hash(networkAddr); + __nat25_generate_ipv4_network_addr(addr, (unsigned int *)ip_addr); + hash = __nat25_network_hash(addr); db = priv->nethash[hash]; while (db) { - if (!memcmp(db->networkAddr, networkAddr, MAX_NETWORK_ADDR_LEN)) { + if (!memcmp(db->networkAddr, addr, MAX_NETWORK_ADDR_LEN)) return (void *)db; - } db = db->next_hash; } diff --git a/drivers/staging/r8188eu/core/rtw_cmd.c b/drivers/staging/r8188eu/core/rtw_cmd.c index 3fadace33de6524762861b80980d736d0ae82509..19b2f73d481dbc7f48513d8a75cda3437152570d 100644 --- a/drivers/staging/r8188eu/core/rtw_cmd.c +++ b/drivers/staging/r8188eu/core/rtw_cmd.c @@ -54,7 +54,7 @@ exit: return _SUCCESS; } -u32 rtw_init_cmd_priv(struct cmd_priv *pcmdpriv) +int rtw_init_cmd_priv(struct cmd_priv *pcmdpriv) { init_completion(&pcmdpriv->enqueue_cmd); /* sema_init(&(pcmdpriv->cmd_done_sema), 0); */ @@ -71,7 +71,7 @@ u32 rtw_init_cmd_priv(struct cmd_priv *pcmdpriv) GFP_KERNEL); if (!pcmdpriv->cmd_allocated_buf) - return _FAIL; + return -ENOMEM; pcmdpriv->cmd_buf = pcmdpriv->cmd_allocated_buf + CMDBUFF_ALIGN_SZ - ((size_t)(pcmdpriv->cmd_allocated_buf) & (CMDBUFF_ALIGN_SZ - 1)); @@ -79,7 +79,7 @@ u32 rtw_init_cmd_priv(struct cmd_priv *pcmdpriv) if (!pcmdpriv->rsp_allocated_buf) { kfree(pcmdpriv->cmd_allocated_buf); - return _FAIL; + return -ENOMEM; } pcmdpriv->rsp_buf = pcmdpriv->rsp_allocated_buf + 4 - ((size_t)(pcmdpriv->rsp_allocated_buf) & 3); @@ -87,13 +87,11 @@ u32 rtw_init_cmd_priv(struct cmd_priv *pcmdpriv) pcmdpriv->cmd_done_cnt = 0; pcmdpriv->rsp_cnt = 0; - return _SUCCESS; + return 0; } -u32 rtw_init_evt_priv(struct evt_priv *pevtpriv) +int rtw_init_evt_priv(struct evt_priv *pevtpriv) { - u32 res = _SUCCESS; - /* allocate DMA-able/Non-Page memory for cmd_buf and rsp_buf */ atomic_set(&pevtpriv->event_seq, 0); @@ -101,9 +99,9 @@ u32 rtw_init_evt_priv(struct evt_priv *pevtpriv) pevtpriv->c2h_wk_alive = false; pevtpriv->c2h_queue = rtw_cbuf_alloc(C2H_QUEUE_MAX_LEN + 1); if (!pevtpriv->c2h_queue) - res = _FAIL; + return -ENOMEM; - return res; + return 0; } void rtw_free_cmd_priv(struct cmd_priv *pcmdpriv) @@ -342,33 +340,29 @@ u8 rtw_sitesurvey_cmd(struct adapter *padapter, struct ndis_802_11_ssid *ssid, return res; } -u8 rtw_setdatarate_cmd(struct adapter *padapter, u8 *rateset) +int rtw_setdatarate_cmd(struct adapter *padapter, u8 *rateset) { struct cmd_obj *ph2c; struct setdatarate_parm *pbsetdataratepara; struct cmd_priv *pcmdpriv = &padapter->cmdpriv; - u8 res = _SUCCESS; ph2c = kzalloc(sizeof(*ph2c), GFP_ATOMIC); - if (!ph2c) { - res = _FAIL; - goto exit; - } + if (!ph2c) + return -ENOMEM; pbsetdataratepara = kzalloc(sizeof(*pbsetdataratepara), GFP_ATOMIC); if (!pbsetdataratepara) { kfree(ph2c); - res = _FAIL; - goto exit; + return -ENOMEM; } init_h2fwcmd_w_parm_no_rsp(ph2c, pbsetdataratepara, GEN_CMD_CODE(_SetDataRate)); pbsetdataratepara->mac_id = 5; memcpy(pbsetdataratepara->datarates, rateset, NumRates); - res = rtw_enqueue_cmd(pcmdpriv, ph2c); -exit: + if (rtw_enqueue_cmd(pcmdpriv, ph2c) == _FAIL) + return -EPERM; - return res; + return 0; } void rtw_getbbrfreg_cmdrsp_callback(struct adapter *padapter, struct cmd_obj *pcmd) diff --git a/drivers/staging/r8188eu/core/rtw_ioctl_set.c b/drivers/staging/r8188eu/core/rtw_ioctl_set.c index 55e6b0f41dc32bb27b2788919b96d240449e3983..785c0dba508fae27dc152078ca5a3a39e3dc3e36 100644 --- a/drivers/staging/r8188eu/core/rtw_ioctl_set.c +++ b/drivers/staging/r8188eu/core/rtw_ioctl_set.c @@ -287,7 +287,7 @@ u8 rtw_set_802_11_infrastructure_mode(struct adapter *padapter, if ((*pold_state == Ndis802_11Infrastructure) || (*pold_state == Ndis802_11IBSS)) { if (check_fwstate(pmlmepriv, _FW_LINKED)) - rtw_indicate_disconnect(padapter); /* will clr Linked_state; before this function, we must have chked whether issue dis-assoc_cmd or not */ + rtw_indicate_disconnect(padapter); /* will clr Linked_state; before this function, we must have checked whether issue dis-assoc_cmd or not */ } *pold_state = networktype; @@ -314,7 +314,7 @@ u8 rtw_set_802_11_infrastructure_mode(struct adapter *padapter, return true; } -u8 rtw_set_802_11_disassociate(struct adapter *padapter) +void rtw_set_802_11_disassociate(struct adapter *padapter) { struct mlme_priv *pmlmepriv = &padapter->mlmepriv; @@ -328,8 +328,6 @@ u8 rtw_set_802_11_disassociate(struct adapter *padapter) } spin_unlock_bh(&pmlmepriv->lock); - - return true; } u8 rtw_set_802_11_bssid_list_scan(struct adapter *padapter, struct ndis_802_11_ssid *pssid, int ssid_max_num) diff --git a/drivers/staging/r8188eu/core/rtw_led.c b/drivers/staging/r8188eu/core/rtw_led.c index 1e316e6358ea2deda4c80ef0bb8710fc156167e6..48725ce9d369e3db812abdd37f3685a247e9b039 100644 --- a/drivers/staging/r8188eu/core/rtw_led.c +++ b/drivers/staging/r8188eu/core/rtw_led.c @@ -26,47 +26,32 @@ static void ResetLedStatus(struct led_priv *pLed) pLed->BlinkTimes = 0; /* Number of times to toggle led state for blinking. */ - pLed->bLedLinkBlinkInProgress = false; pLed->bLedScanBlinkInProgress = false; } -static void SwLedOn(struct adapter *padapter, struct led_priv *pLed) +static void SwLedOn(struct led_priv *pLed) { - u8 LedCfg; - int res; + struct adapter *padapter = container_of(pLed, struct adapter, ledpriv); if (padapter->bDriverStopped) return; - res = rtw_read8(padapter, REG_LEDCFG2, &LedCfg); - if (res) + if (rtw_write8(padapter, REG_LEDCFG2, BIT(5)) != _SUCCESS) return; - rtw_write8(padapter, REG_LEDCFG2, (LedCfg & 0xf0) | BIT(5) | BIT(6)); /* SW control led0 on. */ pLed->bLedOn = true; } -static void SwLedOff(struct adapter *padapter, struct led_priv *pLed) +static void SwLedOff(struct led_priv *pLed) { - u8 LedCfg; - int res; + struct adapter *padapter = container_of(pLed, struct adapter, ledpriv); if (padapter->bDriverStopped) - goto exit; - - res = rtw_read8(padapter, REG_LEDCFG2, &LedCfg);/* 0x4E */ - if (res) - goto exit; + return; - LedCfg &= 0x90; /* Set to software control. */ - rtw_write8(padapter, REG_LEDCFG2, (LedCfg | BIT(3))); - res = rtw_read8(padapter, REG_MAC_PINMUX_CFG, &LedCfg); - if (res) - goto exit; + if (rtw_write8(padapter, REG_LEDCFG2, BIT(5) | BIT(3)) != _SUCCESS) + return; - LedCfg &= 0xFE; - rtw_write8(padapter, REG_MAC_PINMUX_CFG, LedCfg); -exit: pLed->bLedOn = false; } @@ -74,19 +59,19 @@ static void blink_work(struct work_struct *work) { struct delayed_work *dwork = to_delayed_work(work); struct led_priv *pLed = container_of(dwork, struct led_priv, blink_work); - struct adapter *padapter = pLed->padapter; + struct adapter *padapter = container_of(pLed, struct adapter, ledpriv); struct mlme_priv *pmlmepriv = &padapter->mlmepriv; if (padapter->pwrctrlpriv.rf_pwrstate != rf_on) { - SwLedOff(padapter, pLed); + SwLedOff(pLed); ResetLedStatus(pLed); return; } if (pLed->bLedOn) - SwLedOff(padapter, pLed); + SwLedOff(pLed); else - SwLedOn(padapter, pLed); + SwLedOn(pLed); switch (pLed->CurrLedState) { case LED_BLINK_SLOWLY: @@ -96,26 +81,10 @@ static void blink_work(struct work_struct *work) schedule_delayed_work(&pLed->blink_work, LED_BLINK_LINK_INTVL); break; case LED_BLINK_SCAN: - pLed->BlinkTimes--; - if (pLed->BlinkTimes == 0) { - if (check_fwstate(pmlmepriv, _FW_LINKED)) { - pLed->bLedLinkBlinkInProgress = true; - pLed->CurrLedState = LED_BLINK_NORMAL; - schedule_delayed_work(&pLed->blink_work, LED_BLINK_LINK_INTVL); - } else { - pLed->CurrLedState = LED_BLINK_SLOWLY; - schedule_delayed_work(&pLed->blink_work, LED_BLINK_NO_LINK_INTVL); - } - pLed->bLedScanBlinkInProgress = false; - } else { - schedule_delayed_work(&pLed->blink_work, LED_BLINK_SCAN_INTVL); - } - break; case LED_BLINK_TXRX: pLed->BlinkTimes--; if (pLed->BlinkTimes == 0) { if (check_fwstate(pmlmepriv, _FW_LINKED)) { - pLed->bLedLinkBlinkInProgress = true; pLed->CurrLedState = LED_BLINK_NORMAL; schedule_delayed_work(&pLed->blink_work, LED_BLINK_LINK_INTVL); } else { @@ -123,8 +92,11 @@ static void blink_work(struct work_struct *work) schedule_delayed_work(&pLed->blink_work, LED_BLINK_NO_LINK_INTVL); } pLed->bLedBlinkInProgress = false; + pLed->bLedScanBlinkInProgress = false; } else { - schedule_delayed_work(&pLed->blink_work, LED_BLINK_FASTER_INTVL); + schedule_delayed_work(&pLed->blink_work, + pLed->CurrLedState == LED_BLINK_SCAN ? + LED_BLINK_SCAN_INTVL : LED_BLINK_FASTER_INTVL); } break; case LED_BLINK_WPS: @@ -132,7 +104,6 @@ static void blink_work(struct work_struct *work) break; case LED_BLINK_WPS_STOP: /* WPS success */ if (!pLed->bLedOn) { - pLed->bLedLinkBlinkInProgress = true; pLed->CurrLedState = LED_BLINK_NORMAL; schedule_delayed_work(&pLed->blink_work, LED_BLINK_LINK_INTVL); @@ -150,7 +121,6 @@ void rtl8188eu_InitSwLeds(struct adapter *padapter) { struct led_priv *pledpriv = &padapter->ledpriv; - pledpriv->padapter = padapter; ResetLedStatus(pledpriv); INIT_DELAYED_WORK(&pledpriv->blink_work, blink_work); } @@ -161,7 +131,7 @@ void rtl8188eu_DeInitSwLeds(struct adapter *padapter) cancel_delayed_work_sync(&ledpriv->blink_work); ResetLedStatus(ledpriv); - SwLedOff(padapter, ledpriv); + SwLedOff(ledpriv); } void rtw_led_control(struct adapter *padapter, enum LED_CTL_MODE LedAction) @@ -170,8 +140,7 @@ void rtw_led_control(struct adapter *padapter, enum LED_CTL_MODE LedAction) struct registry_priv *registry_par; struct mlme_priv *pmlmepriv = &padapter->mlmepriv; - if ((padapter->bSurpriseRemoved) || (padapter->bDriverStopped) || - (!padapter->hw_init_completed)) + if (!padapter->hw_init_completed) return; if (!pLed->bRegUseLed) @@ -189,23 +158,18 @@ void rtw_led_control(struct adapter *padapter, enum LED_CTL_MODE LedAction) cancel_delayed_work(&pLed->blink_work); - pLed->bLedLinkBlinkInProgress = false; pLed->bLedBlinkInProgress = false; pLed->CurrLedState = LED_BLINK_SLOWLY; schedule_delayed_work(&pLed->blink_work, LED_BLINK_NO_LINK_INTVL); break; case LED_CTL_LINK: - if (!pLed->bLedLinkBlinkInProgress) - return; - if (pLed->CurrLedState == LED_BLINK_SCAN || IS_LED_WPS_BLINKING(pLed)) return; cancel_delayed_work(&pLed->blink_work); pLed->bLedBlinkInProgress = false; - pLed->bLedLinkBlinkInProgress = true; pLed->CurrLedState = LED_BLINK_NORMAL; schedule_delayed_work(&pLed->blink_work, LED_BLINK_LINK_INTVL); @@ -222,7 +186,6 @@ void rtw_led_control(struct adapter *padapter, enum LED_CTL_MODE LedAction) cancel_delayed_work(&pLed->blink_work); - pLed->bLedLinkBlinkInProgress = false; pLed->bLedBlinkInProgress = false; pLed->bLedScanBlinkInProgress = true; @@ -240,7 +203,6 @@ void rtw_led_control(struct adapter *padapter, enum LED_CTL_MODE LedAction) cancel_delayed_work(&pLed->blink_work); - pLed->bLedLinkBlinkInProgress = false; pLed->bLedBlinkInProgress = true; pLed->CurrLedState = LED_BLINK_TXRX; @@ -253,7 +215,6 @@ void rtw_led_control(struct adapter *padapter, enum LED_CTL_MODE LedAction) cancel_delayed_work(&pLed->blink_work); - pLed->bLedLinkBlinkInProgress = false; pLed->bLedBlinkInProgress = false; pLed->bLedScanBlinkInProgress = false; pLed->bLedWPSBlinkInProgress = true; @@ -263,7 +224,6 @@ void rtw_led_control(struct adapter *padapter, enum LED_CTL_MODE LedAction) case LED_CTL_STOP_WPS: cancel_delayed_work(&pLed->blink_work); - pLed->bLedLinkBlinkInProgress = false; pLed->bLedBlinkInProgress = false; pLed->bLedScanBlinkInProgress = false; pLed->bLedWPSBlinkInProgress = true; @@ -283,12 +243,11 @@ void rtw_led_control(struct adapter *padapter, enum LED_CTL_MODE LedAction) break; case LED_CTL_POWER_OFF: pLed->CurrLedState = RTW_LED_OFF; - pLed->bLedLinkBlinkInProgress = false; pLed->bLedBlinkInProgress = false; pLed->bLedWPSBlinkInProgress = false; pLed->bLedScanBlinkInProgress = false; cancel_delayed_work(&pLed->blink_work); - SwLedOff(padapter, pLed); + SwLedOff(pLed); break; default: break; diff --git a/drivers/staging/r8188eu/core/rtw_mlme.c b/drivers/staging/r8188eu/core/rtw_mlme.c index 5ca03d6cac32a2f1b849bab76835321c46d4ee5c..b272123626ac6f3473f71c608fe5b4b5e706d739 100644 --- a/drivers/staging/r8188eu/core/rtw_mlme.c +++ b/drivers/staging/r8188eu/core/rtw_mlme.c @@ -76,19 +76,6 @@ void _rtw_free_network(struct mlme_priv *pmlmepriv, struct wlan_network *pnetwor spin_unlock_bh(&free_queue->lock); } -void _rtw_free_network_nolock(struct mlme_priv *pmlmepriv, struct wlan_network *pnetwork) -{ - struct __queue *free_queue = &pmlmepriv->free_bss_pool; - - if (!pnetwork) - return; - if (pnetwork->fixed) - return; - list_del_init(&pnetwork->list); - list_add_tail(&pnetwork->list, get_list_head(free_queue)); - pmlmepriv->num_of_scanned--; -} - /* return the wlan_network with the matching addr @@ -224,7 +211,6 @@ int rtw_init_mlme_priv(struct adapter *padapter)/* struct mlme_priv *pmlmepriv) u8 *pbuf; struct wlan_network *pnetwork; struct mlme_priv *pmlmepriv = &padapter->mlmepriv; - int res = _SUCCESS; /* We don't need to memset padapter->XXX to zero, because adapter is allocated by vzalloc(). */ @@ -245,10 +231,9 @@ int rtw_init_mlme_priv(struct adapter *padapter)/* struct mlme_priv *pmlmepriv) pbuf = vzalloc(MAX_BSS_CNT * (sizeof(struct wlan_network))); - if (!pbuf) { - res = _FAIL; - goto exit; - } + if (!pbuf) + return -ENOMEM; + pmlmepriv->free_bss_buf = pbuf; pnetwork = (struct wlan_network *)pbuf; @@ -265,9 +250,7 @@ int rtw_init_mlme_priv(struct adapter *padapter)/* struct mlme_priv *pmlmepriv) rtw_init_mlme_timer(padapter); -exit: - - return res; + return 0; } void rtw_free_mlme_priv(struct mlme_priv *pmlmepriv) @@ -311,9 +294,15 @@ exit: static void rtw_free_network_nolock(struct mlme_priv *pmlmepriv, struct wlan_network *pnetwork) { + struct __queue *free_queue = &pmlmepriv->free_bss_pool; - _rtw_free_network_nolock(pmlmepriv, pnetwork); - + if (!pnetwork) + return; + if (pnetwork->fixed) + return; + list_del_init(&pnetwork->list); + list_add_tail(&pnetwork->list, get_list_head(free_queue)); + pmlmepriv->num_of_scanned--; } void rtw_free_network_queue(struct adapter *dev, u8 isfreeall) @@ -1823,22 +1812,6 @@ void rtw_update_registrypriv_dev_network(struct adapter *adapter) pdev_network->Rssi = 0; - switch (pregistrypriv->wireless_mode) { - case WIRELESS_11B: - pdev_network->NetworkTypeInUse = (Ndis802_11DS); - break; - case WIRELESS_11G: - case WIRELESS_11BG: - case WIRELESS_11_24N: - case WIRELESS_11G_24N: - case WIRELESS_11BG_24N: - pdev_network->NetworkTypeInUse = (Ndis802_11OFDM24); - break; - default: - /* TODO */ - break; - } - pdev_network->Configuration.DSConfig = (pregistrypriv->channel); if (cur_network->network.InfrastructureMode == Ndis802_11IBSS) diff --git a/drivers/staging/r8188eu/core/rtw_mlme_ext.c b/drivers/staging/r8188eu/core/rtw_mlme_ext.c index 07905e2ae8e055fb982c30397387ebd8ab9a6a98..1b9cf7596a76eeae362ac3db3f69306bda75e218 100644 --- a/drivers/staging/r8188eu/core/rtw_mlme_ext.c +++ b/drivers/staging/r8188eu/core/rtw_mlme_ext.c @@ -12,24 +12,6 @@ #include "../include/rtl8188e_xmit.h" #include "../include/rtl8188e_dm.h" -/* response function for each management frame subtype, do not reorder */ -static mlme_handler mlme_sta_tbl[] = { - OnAssocReq, - OnAssocRsp, - OnAssocReq, - OnAssocRsp, - OnProbeReq, - OnProbeRsp, - NULL, - NULL, - OnBeacon, - NULL, - OnDisassoc, - OnAuthClient, - OnDeAuth, - OnAction, -}; - static u8 null_addr[ETH_ALEN] = {0, 0, 0, 0, 0, 0}; /************************************************** @@ -137,7 +119,7 @@ static struct rt_channel_plan_map RTW_ChannelPlanMap[RT_CHANNEL_DOMAIN_MAX] = { {0x03}, /* 0x41, RT_CHANNEL_DOMAIN_GLOBAL_DOAMIN_2G */ }; -static struct rt_channel_plan_map RTW_CHANNEL_PLAN_MAP_REALTEK_DEFINE = {0x03}; /* use the conbination for max channel numbers */ +static struct rt_channel_plan_map RTW_CHANNEL_PLAN_MAP_REALTEK_DEFINE = {0x03}; /* use the combination for max channel numbers */ /* * Search the @param channel_num in given @param channel_set @@ -393,47 +375,6 @@ void free_mlme_ext_priv(struct mlme_ext_priv *pmlmeext) } } -void mgt_dispatcher(struct adapter *padapter, struct recv_frame *precv_frame) -{ - int index; - mlme_handler fct; - struct mlme_priv *pmlmepriv = &padapter->mlmepriv; - struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)precv_frame->rx_data; - struct sta_info *psta = rtw_get_stainfo(&padapter->stapriv, hdr->addr2); - - if (!ieee80211_is_mgmt(hdr->frame_control)) - return; - - /* receive the frames that ra(a1) is my address or ra(a1) is bc address. */ - if (memcmp(hdr->addr1, myid(&padapter->eeprompriv), ETH_ALEN) && - !is_broadcast_ether_addr(hdr->addr1)) - return; - - index = (le16_to_cpu(hdr->frame_control) & IEEE80211_FCTL_STYPE) >> 4; - if (index >= ARRAY_SIZE(mlme_sta_tbl)) - return; - fct = mlme_sta_tbl[index]; - - if (psta) { - if (ieee80211_has_retry(hdr->frame_control)) { - if (precv_frame->attrib.seq_num == psta->RxMgmtFrameSeqNum) - /* drop the duplicate management frame */ - return; - } - psta->RxMgmtFrameSeqNum = precv_frame->attrib.seq_num; - } - - if (ieee80211_is_auth(hdr->frame_control)) { - if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) - fct = OnAuth; - else - fct = OnAuthClient; - } - - if (fct) - fct(padapter, precv_frame); -} - static u32 p2p_listen_state_process(struct adapter *padapter, unsigned char *da) { bool response = true; @@ -448,21 +389,6 @@ static u32 p2p_listen_state_process(struct adapter *padapter, unsigned char *da) return _SUCCESS; } -static void update_TSF(struct mlme_ext_priv *pmlmeext, u8 *pframe) -{ - u8 *pIE; - __le32 *pbuf; - - pIE = pframe + sizeof(struct ieee80211_hdr_3addr); - pbuf = (__le32 *)pIE; - - pmlmeext->TSFValue = le32_to_cpu(*(pbuf + 1)); - - pmlmeext->TSFValue = pmlmeext->TSFValue << 32; - - pmlmeext->TSFValue |= le32_to_cpu(*pbuf); -} - static void correct_TSF(struct adapter *padapter) { u8 reg; @@ -506,7 +432,7 @@ Following are the callback functions for each subtype of the management frames *****************************************************************************/ -unsigned int OnProbeReq(struct adapter *padapter, struct recv_frame *precv_frame) +static void OnProbeReq(struct adapter *padapter, struct recv_frame *precv_frame) { unsigned int ielen; unsigned char *p; @@ -540,17 +466,17 @@ unsigned int OnProbeReq(struct adapter *padapter, struct recv_frame *precv_frame report_survey_event(padapter, precv_frame); p2p_listen_state_process(padapter, get_sa(pframe)); - return _SUCCESS; + return; } } } if (check_fwstate(pmlmepriv, WIFI_STATION_STATE)) - return _SUCCESS; + return; if (!check_fwstate(pmlmepriv, _FW_LINKED) && !check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE | WIFI_AP_STATE)) - return _SUCCESS; + return; p = rtw_get_ie(pframe + WLAN_HDR_A3_LEN + _PROBEREQ_IE_OFFSET_, _SSID_IE_, (int *)&ielen, len - WLAN_HDR_A3_LEN - _PROBEREQ_IE_OFFSET_); @@ -562,7 +488,7 @@ unsigned int OnProbeReq(struct adapter *padapter, struct recv_frame *precv_frame if ((ielen != 0 && memcmp((void *)(p + 2), (void *)cur->Ssid.Ssid, cur->Ssid.SsidLength)) || (ielen == 0 && pmlmeinfo->hidden_ssid_mode)) - return _SUCCESS; + return; _issue_probersp: @@ -571,10 +497,9 @@ _issue_probersp: check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE))) issue_probersp(padapter, get_sa(pframe), is_valid_p2p_probereq); } - return _SUCCESS; } -unsigned int OnProbeRsp(struct adapter *padapter, struct recv_frame *precv_frame) +static void OnProbeRsp(struct adapter *padapter, struct recv_frame *precv_frame) { struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv; struct wifidirect_info *pwdinfo = &padapter->wdinfo; @@ -596,7 +521,7 @@ unsigned int OnProbeRsp(struct adapter *padapter, struct recv_frame *precv_frame } } } - return _SUCCESS; + return; } else if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_GONEGO_ING)) { if (pwdinfo->nego_req_info.benable) { if (!memcmp(pwdinfo->nego_req_info.peerDevAddr, GetAddr2Ptr(pframe), ETH_ALEN)) { @@ -614,14 +539,13 @@ unsigned int OnProbeRsp(struct adapter *padapter, struct recv_frame *precv_frame } if (pmlmeext->sitesurvey_res.state == SCAN_PROCESS) { report_survey_event(padapter, precv_frame); - return _SUCCESS; + return; } - - return _SUCCESS; } -unsigned int OnBeacon(struct adapter *padapter, struct recv_frame *precv_frame) +static void OnBeacon(struct adapter *padapter, struct recv_frame *precv_frame) { + struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)precv_frame->rx_data; int cam_idx; struct sta_info *psta; struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv; @@ -631,86 +555,86 @@ unsigned int OnBeacon(struct adapter *padapter, struct recv_frame *precv_frame) u8 *pframe = precv_frame->rx_data; uint len = precv_frame->len; struct wlan_bssid_ex *pbss; - int ret = _SUCCESS; + u8 *ie_ptr; + u32 ie_len; + + ie_ptr = (u8 *)&mgmt->u.beacon.variable; + if (precv_frame->len < offsetof(struct ieee80211_mgmt, u.beacon.variable)) + return; + ie_len = precv_frame->len - offsetof(struct ieee80211_mgmt, u.beacon.variable); if (pmlmeext->sitesurvey_res.state == SCAN_PROCESS) { report_survey_event(padapter, precv_frame); - return _SUCCESS; + return; } - if (!memcmp(GetAddr3Ptr(pframe), get_my_bssid(&pmlmeinfo->network), ETH_ALEN)) { - if (pmlmeinfo->state & WIFI_FW_AUTH_NULL) { - /* we should update current network before auth, or some IE is wrong */ - pbss = kmalloc(sizeof(struct wlan_bssid_ex), GFP_ATOMIC); - if (pbss) { - if (collect_bss_info(padapter, precv_frame, pbss) == _SUCCESS) { - update_network(&pmlmepriv->cur_network.network, pbss, padapter, true); - rtw_get_bcn_info(&pmlmepriv->cur_network); - } - kfree(pbss); - } + if (memcmp(mgmt->bssid, get_my_bssid(&pmlmeinfo->network), ETH_ALEN)) + return; - /* check the vendor of the assoc AP */ - pmlmeinfo->assoc_AP_vendor = check_assoc_AP(pframe + sizeof(struct ieee80211_hdr_3addr), len - sizeof(struct ieee80211_hdr_3addr)); + if (pmlmeinfo->state & WIFI_FW_AUTH_NULL) { + /* we should update current network before auth, or some IE is wrong */ + pbss = kmalloc(sizeof(struct wlan_bssid_ex), GFP_ATOMIC); + if (!pbss) + return; - /* update TSF Value */ - update_TSF(pmlmeext, pframe); + if (collect_bss_info(padapter, precv_frame, pbss) == _SUCCESS) { + update_network(&pmlmepriv->cur_network.network, pbss, padapter, true); + rtw_get_bcn_info(&pmlmepriv->cur_network); + } + kfree(pbss); - /* start auth */ - start_clnt_auth(padapter); + /* check the vendor of the assoc AP */ + pmlmeinfo->assoc_AP_vendor = check_assoc_AP(pframe + sizeof(struct ieee80211_hdr_3addr), len - sizeof(struct ieee80211_hdr_3addr)); - return _SUCCESS; + pmlmeext->TSFValue = le64_to_cpu(mgmt->u.beacon.timestamp); + + /* start auth */ + start_clnt_auth(padapter); + + return; + } + + if (((pmlmeinfo->state & 0x03) == WIFI_FW_STATION_STATE) && (pmlmeinfo->state & WIFI_FW_ASSOC_SUCCESS)) { + psta = rtw_get_stainfo(pstapriv, mgmt->sa); + if (!psta) + return; + + if (rtw_check_bcn_info(padapter, pframe, len) != _SUCCESS) { + receive_disconnect(padapter, pmlmeinfo->network.MacAddress, 0); + return; } + /* update WMM, ERP in the beacon */ + /* todo: the timer is used instead of the number of the beacon received */ + if ((sta_rx_pkts(psta) & 0xf) == 0) + update_beacon_info(padapter, ie_ptr, ie_len, psta); + process_p2p_ps_ie(padapter, ie_ptr, ie_len); + } else if ((pmlmeinfo->state & 0x03) == WIFI_FW_ADHOC_STATE) { + psta = rtw_get_stainfo(pstapriv, mgmt->sa); + if (psta) { + /* update WMM, ERP in the beacon */ + /* todo: the timer is used instead of the number of the beacon received */ + if ((sta_rx_pkts(psta) & 0xf) == 0) + update_beacon_info(padapter, ie_ptr, ie_len, psta); + } else { + /* allocate a new CAM entry for IBSS station */ + cam_idx = allocate_fw_sta_entry(padapter); + if (cam_idx == NUM_STA) + return; - if (((pmlmeinfo->state & 0x03) == WIFI_FW_STATION_STATE) && (pmlmeinfo->state & WIFI_FW_ASSOC_SUCCESS)) { - psta = rtw_get_stainfo(pstapriv, GetAddr2Ptr(pframe)); - if (psta) { - ret = rtw_check_bcn_info(padapter, pframe, len); - if (!ret) { - receive_disconnect(padapter, - pmlmeinfo->network.MacAddress, 0); - return _SUCCESS; - } - /* update WMM, ERP in the beacon */ - /* todo: the timer is used instead of the number of the beacon received */ - if ((sta_rx_pkts(psta) & 0xf) == 0) - update_beacon_info(padapter, pframe, len, psta); - process_p2p_ps_ie(padapter, (pframe + WLAN_HDR_A3_LEN), (len - WLAN_HDR_A3_LEN)); + /* get supported rate */ + if (update_sta_support_rate(padapter, ie_ptr, ie_len, cam_idx) == _FAIL) { + pmlmeinfo->FW_sta_info[cam_idx].status = 0; + return; } - } else if ((pmlmeinfo->state & 0x03) == WIFI_FW_ADHOC_STATE) { - psta = rtw_get_stainfo(pstapriv, GetAddr2Ptr(pframe)); - if (psta) { - /* update WMM, ERP in the beacon */ - /* todo: the timer is used instead of the number of the beacon received */ - if ((sta_rx_pkts(psta) & 0xf) == 0) - update_beacon_info(padapter, pframe, len, psta); - } else { - /* allocate a new CAM entry for IBSS station */ - cam_idx = allocate_fw_sta_entry(padapter); - if (cam_idx == NUM_STA) - goto _END_ONBEACON_; - - /* get supported rate */ - if (update_sta_support_rate(padapter, (pframe + WLAN_HDR_A3_LEN + _BEACON_IE_OFFSET_), (len - WLAN_HDR_A3_LEN - _BEACON_IE_OFFSET_), cam_idx) == _FAIL) { - pmlmeinfo->FW_sta_info[cam_idx].status = 0; - goto _END_ONBEACON_; - } - /* update TSF Value */ - update_TSF(pmlmeext, pframe); + pmlmeext->TSFValue = le64_to_cpu(mgmt->u.beacon.timestamp); - /* report sta add event */ - report_add_sta_event(padapter, GetAddr2Ptr(pframe), cam_idx); - } + report_add_sta_event(padapter, mgmt->sa, cam_idx); } } - -_END_ONBEACON_: - - return _SUCCESS; } -unsigned int OnAuth(struct adapter *padapter, struct recv_frame *precv_frame) +static void OnAuth(struct adapter *padapter, struct recv_frame *precv_frame) { unsigned int auth_mode, ie_len; u16 seq; @@ -727,7 +651,7 @@ unsigned int OnAuth(struct adapter *padapter, struct recv_frame *precv_frame) uint len = precv_frame->len; if ((pmlmeinfo->state & 0x03) != WIFI_FW_AP_STATE) - return _FAIL; + return; sa = GetAddr2Ptr(pframe); @@ -843,7 +767,7 @@ unsigned int OnAuth(struct adapter *padapter, struct recv_frame *precv_frame) if (pstat->state & WIFI_FW_AUTH_SUCCESS) pstat->auth_seq = 0; - return _SUCCESS; + return; auth_fail: @@ -856,27 +780,26 @@ auth_fail: memcpy(pstat->hwaddr, sa, 6); issue_auth(padapter, pstat, (unsigned short)status); - return _FAIL; } -unsigned int OnAuthClient(struct adapter *padapter, struct recv_frame *precv_frame) +static void OnAuthClient(struct adapter *padapter, struct recv_frame *precv_frame) { unsigned int seq, len, status, offset; unsigned char *p; - unsigned int go2asoc = 0; struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv; struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info; + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)precv_frame->rx_data; u8 *pframe = precv_frame->rx_data; uint pkt_len = precv_frame->len; /* check A1 matches or not */ - if (memcmp(myid(&padapter->eeprompriv), get_da(pframe), ETH_ALEN)) - return _SUCCESS; + if (memcmp(myid(&padapter->eeprompriv), ieee80211_get_DA(hdr), ETH_ALEN)) + return; if (!(pmlmeinfo->state & WIFI_FW_AUTH_STATE)) - return _SUCCESS; + return; - offset = (GetPrivacy(pframe)) ? 4 : 0; + offset = ieee80211_has_protected(hdr->frame_control) ? 4 : 0; seq = le16_to_cpu(*(__le16 *)((size_t)pframe + WLAN_HDR_A3_LEN + offset + 2)); status = le16_to_cpu(*(__le16 *)((size_t)pframe + WLAN_HDR_A3_LEN + offset + 4)); @@ -890,7 +813,7 @@ unsigned int OnAuthClient(struct adapter *padapter, struct recv_frame *precv_fra } set_link_timer(pmlmeext, 1); - goto authclnt_fail; + return; } if (seq == 2) { @@ -900,34 +823,22 @@ unsigned int OnAuthClient(struct adapter *padapter, struct recv_frame *precv_fra pkt_len - WLAN_HDR_A3_LEN - _AUTH_IE_OFFSET_); if (!p) - goto authclnt_fail; + return; memcpy((void *)(pmlmeinfo->chg_txt), (void *)(p + 2), len); pmlmeinfo->auth_seq = 3; issue_auth(padapter, NULL, 0); set_link_timer(pmlmeext, REAUTH_TO); - return _SUCCESS; + return; } else { /* open system */ - go2asoc = 1; + start_clnt_assoc(padapter); } } else if (seq == 4) { if (pmlmeinfo->auth_algo == dot11AuthAlgrthm_Shared) - go2asoc = 1; - else - goto authclnt_fail; - } else { - /* this is also illegal */ - goto authclnt_fail; + start_clnt_assoc(padapter); } - - if (go2asoc) { - start_clnt_assoc(padapter); - return _SUCCESS; - } -authclnt_fail: - return _FAIL; } static void UpdateBrateTbl(u8 *mbrate) @@ -970,7 +881,7 @@ static void UpdateBrateTblForSoftAP(u8 *bssrateset, u32 bssratelen) } } -unsigned int OnAssocReq(struct adapter *padapter, struct recv_frame *precv_frame) +static void OnAssocReq(struct adapter *padapter, struct recv_frame *precv_frame) { u16 capab_info; struct rtw_ieee802_11_elems elems; @@ -996,7 +907,7 @@ unsigned int OnAssocReq(struct adapter *padapter, struct recv_frame *precv_frame u32 p2pielen = 0; if ((pmlmeinfo->state & 0x03) != WIFI_FW_AP_STATE) - return _FAIL; + return; frame_type = GetFrameSubType(pframe); if (frame_type == WIFI_ASSOCREQ) @@ -1005,7 +916,7 @@ unsigned int OnAssocReq(struct adapter *padapter, struct recv_frame *precv_frame ie_offset = _REASOCREQ_IE_OFFSET_; if (pkt_len < IEEE80211_3ADDR_LEN + ie_offset) - return _FAIL; + return; pstat = rtw_get_stainfo(pstapriv, GetAddr2Ptr(pframe)); if (pstat == (struct sta_info *)NULL) { @@ -1359,13 +1270,13 @@ unsigned int OnAssocReq(struct adapter *padapter, struct recv_frame *precv_frame report_add_sta_event(padapter, pstat->hwaddr, pstat->aid); } - return _SUCCESS; + return; asoc_class2_error: issue_deauth(padapter, (void *)GetAddr2Ptr(pframe), status); - return _FAIL; + return; OnAssocReqFail: @@ -1375,10 +1286,10 @@ OnAssocReqFail: else issue_asocrsp(padapter, status, pstat, WIFI_REASSOCRSP); - return _FAIL; + return; } -unsigned int OnAssocRsp(struct adapter *padapter, struct recv_frame *precv_frame) +static void OnAssocRsp(struct adapter *padapter, struct recv_frame *precv_frame) { struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)precv_frame->rx_data; uint i; @@ -1392,13 +1303,13 @@ unsigned int OnAssocRsp(struct adapter *padapter, struct recv_frame *precv_frame /* check A1 matches or not */ if (memcmp(myid(&padapter->eeprompriv), mgmt->da, ETH_ALEN)) - return _SUCCESS; + return; if (!(pmlmeinfo->state & (WIFI_FW_AUTH_SUCCESS | WIFI_FW_ASSOC_STATE))) - return _SUCCESS; + return; if (pmlmeinfo->state & WIFI_FW_ASSOC_SUCCESS) - return _SUCCESS; + return; _cancel_timer_ex(&pmlmeext->link_timer); @@ -1451,163 +1362,143 @@ unsigned int OnAssocRsp(struct adapter *padapter, struct recv_frame *precv_frame report_assoc_result: report_join_res(padapter, res); - - return _SUCCESS; } -unsigned int OnDeAuth(struct adapter *padapter, struct recv_frame *precv_frame) +static void OnDeAuth(struct adapter *padapter, struct recv_frame *precv_frame) { + struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)precv_frame->rx_data; unsigned short reason; struct mlme_priv *pmlmepriv = &padapter->mlmepriv; struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv; struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info; - u8 *pframe = precv_frame->rx_data; struct wifidirect_info *pwdinfo = &padapter->wdinfo; - /* check A3 */ - if (!(!memcmp(GetAddr3Ptr(pframe), get_my_bssid(&pmlmeinfo->network), ETH_ALEN))) - return _SUCCESS; + if (memcmp(mgmt->bssid, get_my_bssid(&pmlmeinfo->network), ETH_ALEN)) + return; if (pwdinfo->rx_invitereq_info.scan_op_ch_only) { _cancel_timer_ex(&pwdinfo->reset_ch_sitesurvey); _set_timer(&pwdinfo->reset_ch_sitesurvey, 10); } - reason = le16_to_cpu(*(__le16 *)(pframe + WLAN_HDR_A3_LEN)); + reason = le16_to_cpu(mgmt->u.disassoc.reason_code); if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) { struct sta_info *psta; struct sta_priv *pstapriv = &padapter->stapriv; + u8 updated = 0; - psta = rtw_get_stainfo(pstapriv, GetAddr2Ptr(pframe)); - if (psta) { - u8 updated = 0; - - spin_lock_bh(&pstapriv->asoc_list_lock); - if (!list_empty(&psta->asoc_list)) { - list_del_init(&psta->asoc_list); - pstapriv->asoc_list_cnt--; - updated = ap_free_sta(padapter, psta, false, reason); - } - spin_unlock_bh(&pstapriv->asoc_list_lock); + psta = rtw_get_stainfo(pstapriv, mgmt->sa); + if (!psta) + return; - associated_clients_update(padapter, updated); + spin_lock_bh(&pstapriv->asoc_list_lock); + if (!list_empty(&psta->asoc_list)) { + list_del_init(&psta->asoc_list); + pstapriv->asoc_list_cnt--; + updated = ap_free_sta(padapter, psta, false, reason); } + spin_unlock_bh(&pstapriv->asoc_list_lock); - return _SUCCESS; + associated_clients_update(padapter, updated); } else { - int ignore_received_deauth = 0; + bool ignore_received_deauth = false; /* Before sending the auth frame to start the STA/GC mode connection with AP/GO, * we will send the deauth first. * However, the Win8.1 with BRCM Wi-Fi will send the deauth with reason code 6 to us after receieving our deauth. * Added the following code to avoid this case. */ - if ((pmlmeinfo->state & WIFI_FW_AUTH_STATE) || - (pmlmeinfo->state & WIFI_FW_ASSOC_STATE)) { + if (pmlmeinfo->state & (WIFI_FW_AUTH_STATE | WIFI_FW_ASSOC_STATE)) { if (reason == WLAN_REASON_CLASS2_FRAME_FROM_NONAUTH_STA) { - ignore_received_deauth = 1; + ignore_received_deauth = true; } else if (reason == WLAN_REASON_PREV_AUTH_NOT_VALID) { // TODO: 802.11r - ignore_received_deauth = 1; + ignore_received_deauth = true; } } if (!ignore_received_deauth) - receive_disconnect(padapter, GetAddr3Ptr(pframe), reason); + receive_disconnect(padapter, mgmt->bssid, reason); + + pmlmepriv->LinkDetectInfo.bBusyTraffic = false; } - pmlmepriv->LinkDetectInfo.bBusyTraffic = false; - return _SUCCESS; } -unsigned int OnDisassoc(struct adapter *padapter, struct recv_frame *precv_frame) +static void OnDisassoc(struct adapter *padapter, struct recv_frame *precv_frame) { + struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)precv_frame->rx_data; u16 reason; struct mlme_priv *pmlmepriv = &padapter->mlmepriv; struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv; struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info; - u8 *pframe = precv_frame->rx_data; struct wifidirect_info *pwdinfo = &padapter->wdinfo; + struct sta_info *psta; + struct sta_priv *pstapriv = &padapter->stapriv; + u8 updated = 0; - /* check A3 */ - if (!(!memcmp(GetAddr3Ptr(pframe), get_my_bssid(&pmlmeinfo->network), ETH_ALEN))) - return _SUCCESS; + if (memcmp(mgmt->bssid, get_my_bssid(&pmlmeinfo->network), ETH_ALEN)) + return; if (pwdinfo->rx_invitereq_info.scan_op_ch_only) { _cancel_timer_ex(&pwdinfo->reset_ch_sitesurvey); _set_timer(&pwdinfo->reset_ch_sitesurvey, 10); } - reason = le16_to_cpu(*(__le16 *)(pframe + WLAN_HDR_A3_LEN)); - - if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) { - struct sta_info *psta; - struct sta_priv *pstapriv = &padapter->stapriv; - - psta = rtw_get_stainfo(pstapriv, GetAddr2Ptr(pframe)); - if (psta) { - u8 updated = 0; + reason = le16_to_cpu(mgmt->u.disassoc.reason_code); - spin_lock_bh(&pstapriv->asoc_list_lock); - if (!list_empty(&psta->asoc_list)) { - list_del_init(&psta->asoc_list); - pstapriv->asoc_list_cnt--; - updated = ap_free_sta(padapter, psta, false, reason); - } - spin_unlock_bh(&pstapriv->asoc_list_lock); + if (!check_fwstate(pmlmepriv, WIFI_AP_STATE)) { + receive_disconnect(padapter, mgmt->bssid, reason); + pmlmepriv->LinkDetectInfo.bBusyTraffic = false; + return; + } - associated_clients_update(padapter, updated); - } + psta = rtw_get_stainfo(pstapriv, mgmt->sa); + if (!psta) + return; - return _SUCCESS; - } else { - receive_disconnect(padapter, GetAddr3Ptr(pframe), reason); + spin_lock_bh(&pstapriv->asoc_list_lock); + if (!list_empty(&psta->asoc_list)) { + list_del_init(&psta->asoc_list); + pstapriv->asoc_list_cnt--; + updated = ap_free_sta(padapter, psta, false, reason); } - pmlmepriv->LinkDetectInfo.bBusyTraffic = false; - return _SUCCESS; + spin_unlock_bh(&pstapriv->asoc_list_lock); + + associated_clients_update(padapter, updated); } -unsigned int OnAction_back(struct adapter *padapter, struct recv_frame *precv_frame) +static void OnAction_back(struct adapter *padapter, struct recv_frame *precv_frame) { struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)precv_frame->rx_data; struct sta_info *psta = NULL; struct recv_reorder_ctrl *preorder_ctrl; - unsigned char *frame_body; unsigned short tid; struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv; struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info; - u8 *pframe = precv_frame->rx_data; struct sta_priv *pstapriv = &padapter->stapriv; - /* check RA matches or not */ - if (memcmp(myid(&padapter->eeprompriv), mgmt->da, ETH_ALEN))/* for if1, sta/ap mode */ - return _SUCCESS; if ((pmlmeinfo->state & 0x03) != WIFI_FW_AP_STATE) if (!(pmlmeinfo->state & WIFI_FW_ASSOC_SUCCESS)) - return _SUCCESS; + return; psta = rtw_get_stainfo(pstapriv, mgmt->sa); - if (!psta) - return _SUCCESS; - - frame_body = (unsigned char *)(pframe + sizeof(struct ieee80211_hdr_3addr)); + return; if (!pmlmeinfo->HT_enable) - return _SUCCESS; + return; /* All union members start with an action code, it's ok to use addba_req. */ switch (mgmt->u.action.u.addba_req.action_code) { case WLAN_ACTION_ADDBA_REQ: - memcpy(&pmlmeinfo->ADDBA_req, &frame_body[2], sizeof(struct ADDBA_request)); tid = u16_get_bits(le16_to_cpu(mgmt->u.action.u.addba_req.capab), IEEE80211_ADDBA_PARAM_TID_MASK); preorder_ctrl = &psta->recvreorder_ctrl[tid]; preorder_ctrl->indicate_seq = 0xffff; preorder_ctrl->enable = pmlmeinfo->bAcceptAddbaReq; - issue_action_BA(padapter, mgmt->sa, WLAN_ACTION_ADDBA_RESP, pmlmeinfo->bAcceptAddbaReq ? - WLAN_STATUS_SUCCESS : WLAN_STATUS_REQUEST_DECLINED); + WLAN_STATUS_SUCCESS : WLAN_STATUS_REQUEST_DECLINED, mgmt); break; case WLAN_ACTION_ADDBA_RESP: tid = u16_get_bits(le16_to_cpu(mgmt->u.action.u.addba_resp.capab), @@ -1636,8 +1527,6 @@ unsigned int OnAction_back(struct adapter *padapter, struct recv_frame *precv_fr default: break; } - - return _SUCCESS; } static int get_reg_classes_full_count(struct p2p_channels *channel_list) @@ -1754,7 +1643,7 @@ void issue_p2p_GO_request(struct adapter *padapter, u8 *raddr) p2pie[p2pielen++] = 0x09; /* WFA P2P v1.0 */ /* Commented by Albert 20110306 */ - /* According to the P2P Specification, the group negoitation request frame should contain 9 P2P attributes */ + /* According to the P2P Specification, the group negotiation request frame should contain 9 P2P attributes */ /* 1. P2P Capability */ /* 2. Group Owner Intent */ /* 3. Configuration Timeout */ @@ -2109,7 +1998,7 @@ static void issue_p2p_GO_response(struct adapter *padapter, u8 *raddr, u8 *frame p2pie[p2pielen++] = 0x09; /* WFA P2P v1.0 */ /* Commented by Albert 20100908 */ - /* According to the P2P Specification, the group negoitation response frame should contain 9 P2P attributes */ + /* According to the P2P Specification, the group negotiation response frame should contain 9 P2P attributes */ /* 1. Status */ /* 2. P2P Capability */ /* 3. Group Owner Intent */ @@ -2405,7 +2294,7 @@ static void issue_p2p_GO_confirm(struct adapter *padapter, u8 *raddr, u8 result) p2pie[p2pielen++] = 0x09; /* WFA P2P v1.0 */ /* Commented by Albert 20110306 */ - /* According to the P2P Specification, the group negoitation request frame should contain 5 P2P attributes */ + /* According to the P2P Specification, the group negotiation request frame should contain 5 P2P attributes */ /* 1. Status */ /* 2. P2P Capability */ /* 3. Operating Channel */ @@ -3294,9 +3183,8 @@ void issue_probersp_p2p(struct adapter *padapter, unsigned char *da) dump_mgntframe(padapter, pmgntframe); } -static int _issue_probereq_p2p(struct adapter *padapter, u8 *da, int wait_ack) +inline void issue_probereq_p2p(struct adapter *padapter) { - int ret = _FAIL; struct xmit_frame *pmgntframe; struct pkt_attrib *pattrib; unsigned char *pframe; @@ -3312,7 +3200,7 @@ static int _issue_probereq_p2p(struct adapter *padapter, u8 *da, int wait_ack) pmgntframe = alloc_mgtxmitframe(pxmitpriv); if (!pmgntframe) - goto exit; + return; /* update attribute */ pattrib = &pmgntframe->attrib; @@ -3328,20 +3216,16 @@ static int _issue_probereq_p2p(struct adapter *padapter, u8 *da, int wait_ack) fctrl = &pwlanhdr->frame_control; *(fctrl) = 0; - if (da) { - memcpy(pwlanhdr->addr1, da, ETH_ALEN); - memcpy(pwlanhdr->addr3, da, ETH_ALEN); + if ((pwdinfo->p2p_info.scan_op_ch_only) || (pwdinfo->rx_invitereq_info.scan_op_ch_only)) { + /* This two flags will be set when this is only the P2P client mode. */ + memcpy(pwlanhdr->addr1, pwdinfo->p2p_peer_interface_addr, ETH_ALEN); + memcpy(pwlanhdr->addr3, pwdinfo->p2p_peer_interface_addr, ETH_ALEN); } else { - if ((pwdinfo->p2p_info.scan_op_ch_only) || (pwdinfo->rx_invitereq_info.scan_op_ch_only)) { - /* This two flags will be set when this is only the P2P client mode. */ - memcpy(pwlanhdr->addr1, pwdinfo->p2p_peer_interface_addr, ETH_ALEN); - memcpy(pwlanhdr->addr3, pwdinfo->p2p_peer_interface_addr, ETH_ALEN); - } else { - /* broadcast probe request frame */ - eth_broadcast_addr(pwlanhdr->addr1); - eth_broadcast_addr(pwlanhdr->addr3); - } + /* broadcast probe request frame */ + eth_broadcast_addr(pwlanhdr->addr1); + eth_broadcast_addr(pwlanhdr->addr3); } + memcpy(pwlanhdr->addr2, mac, ETH_ALEN); SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq); @@ -3567,23 +3451,10 @@ static int _issue_probereq_p2p(struct adapter *padapter, u8 *da, int wait_ack) pattrib->last_txcmdsz = pattrib->pktlen; - if (wait_ack) { - ret = dump_mgntframe_and_wait_ack(padapter, pmgntframe); - } else { - dump_mgntframe(padapter, pmgntframe); - ret = _SUCCESS; - } - -exit: - return ret; -} - -inline void issue_probereq_p2p(struct adapter *adapter, u8 *da) -{ - _issue_probereq_p2p(adapter, da, false); + dump_mgntframe(padapter, pmgntframe); } -static s32 rtw_action_public_decache(struct recv_frame *recv_frame, s32 token) +static s32 rtw_action_public_decache(struct recv_frame *recv_frame, u8 token) { struct adapter *adapter = recv_frame->adapter; struct mlme_ext_priv *mlmeext = &adapter->mlmeextpriv; @@ -3592,21 +3463,13 @@ static s32 rtw_action_public_decache(struct recv_frame *recv_frame, s32 token) (recv_frame->attrib.frag_num & 0xf); if (GetRetry(frame)) { - if (token >= 0) { - if ((seq_ctrl == mlmeext->action_public_rxseq) && - (token == mlmeext->action_public_dialog_token)) - return _FAIL; - } else { - if (seq_ctrl == mlmeext->action_public_rxseq) - return _FAIL; - } + if ((seq_ctrl == mlmeext->action_public_rxseq) && + (token == mlmeext->action_public_dialog_token)) + return _FAIL; } mlmeext->action_public_rxseq = seq_ctrl; - - if (token >= 0) - mlmeext->action_public_dialog_token = token; - + mlmeext->action_public_dialog_token = token; return _SUCCESS; } @@ -3872,110 +3735,64 @@ static unsigned int on_action_public_p2p(struct recv_frame *precv_frame) return _SUCCESS; } -static unsigned int on_action_public_vendor(struct recv_frame *precv_frame) +static void on_action_public_vendor(struct recv_frame *precv_frame) { - unsigned int ret = _FAIL; u8 *pframe = precv_frame->rx_data; u8 *frame_body = pframe + sizeof(struct ieee80211_hdr_3addr); - if (!memcmp(frame_body + 2, P2P_OUI, 4)) { - ret = on_action_public_p2p(precv_frame); - } - - return ret; + if (!memcmp(frame_body + 2, P2P_OUI, 4)) + on_action_public_p2p(precv_frame); } -static unsigned int on_action_public_default(struct recv_frame *precv_frame) +static void on_action_public_default(struct recv_frame *precv_frame) { - unsigned int ret = _FAIL; u8 *pframe = precv_frame->rx_data; u8 *frame_body = pframe + sizeof(struct ieee80211_hdr_3addr); u8 token; token = frame_body[2]; - if (rtw_action_public_decache(precv_frame, token) == _FAIL) - goto exit; - - ret = _SUCCESS; - -exit: - return ret; + rtw_action_public_decache(precv_frame, token); } -unsigned int on_action_public(struct adapter *padapter, struct recv_frame *precv_frame) +static void on_action_public(struct adapter *padapter, struct recv_frame *precv_frame) { - unsigned int ret = _FAIL; - u8 *pframe = precv_frame->rx_data; - u8 *frame_body = pframe + sizeof(struct ieee80211_hdr_3addr); - u8 category, action; - - /* check RA matches or not */ - if (memcmp(myid(&padapter->eeprompriv), GetAddr1Ptr(pframe), ETH_ALEN)) - goto exit; - - category = frame_body[0]; - if (category != WLAN_CATEGORY_PUBLIC) - goto exit; - - action = frame_body[1]; - switch (action) { - case ACT_PUBLIC_VENDOR: - ret = on_action_public_vendor(precv_frame); - break; - default: - ret = on_action_public_default(precv_frame); - break; - } + struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)precv_frame->rx_data; -exit: - return ret; + /* All members of the action enum start with action_code. */ + if (mgmt->u.action.u.s1g.action_code == WLAN_PUB_ACTION_VENDOR_SPECIFIC) + on_action_public_vendor(precv_frame); + else + on_action_public_default(precv_frame); } -unsigned int OnAction_p2p(struct adapter *padapter, struct recv_frame *precv_frame) +static void OnAction_p2p(struct adapter *padapter, struct recv_frame *precv_frame) { u8 *frame_body; - u8 category, OUI_Subtype; + u8 OUI_Subtype; u8 *pframe = precv_frame->rx_data; uint len = precv_frame->len; struct wifidirect_info *pwdinfo = &padapter->wdinfo; - /* check RA matches or not */ - if (memcmp(myid(&padapter->eeprompriv), GetAddr1Ptr(pframe), ETH_ALEN))/* for if1, sta/ap mode */ - return _SUCCESS; - frame_body = (unsigned char *)(pframe + sizeof(struct ieee80211_hdr_3addr)); - category = frame_body[0]; - if (category != RTW_WLAN_CATEGORY_P2P) - return _SUCCESS; - if (be32_to_cpu(*((__be32 *)(frame_body + 1))) != P2POUI) - return _SUCCESS; + return; len -= sizeof(struct ieee80211_hdr_3addr); OUI_Subtype = frame_body[5]; - switch (OUI_Subtype) { - case P2P_NOTICE_OF_ABSENCE: - break; - case P2P_PRESENCE_REQUEST: + if (OUI_Subtype == P2P_PRESENCE_REQUEST) process_p2p_presence_req(pwdinfo, pframe, len); - break; - case P2P_PRESENCE_RESPONSE: - break; - case P2P_GO_DISC_REQUEST: - break; - default: - break; - } - return _SUCCESS; } -unsigned int OnAction(struct adapter *padapter, struct recv_frame *precv_frame) +static void OnAction(struct adapter *padapter, struct recv_frame *precv_frame) { struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)precv_frame->rx_data; + if (!ether_addr_equal(myid(&padapter->eeprompriv), mgmt->da)) + return; + switch (mgmt->u.action.category) { case WLAN_CATEGORY_BACK: OnAction_back(padapter, precv_frame); @@ -3987,7 +3804,6 @@ unsigned int OnAction(struct adapter *padapter, struct recv_frame *precv_frame) OnAction_p2p(padapter, precv_frame); break; } - return _SUCCESS; } struct xmit_frame *alloc_mgtxmitframe(struct xmit_priv *pxmitpriv) @@ -4011,9 +3827,66 @@ struct xmit_frame *alloc_mgtxmitframe(struct xmit_priv *pxmitpriv) return pmgntframe; } +void mgt_dispatcher(struct adapter *padapter, struct recv_frame *precv_frame) +{ + mlme_handler mlme_sta_tbl[] = { + OnAssocReq, + OnAssocRsp, + OnAssocReq, + OnAssocRsp, + OnProbeReq, + OnProbeRsp, + NULL, + NULL, + OnBeacon, + NULL, + OnDisassoc, + OnAuthClient, + OnDeAuth, + OnAction, + }; + int index; + mlme_handler fct; + struct mlme_priv *pmlmepriv = &padapter->mlmepriv; + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)precv_frame->rx_data; + struct sta_info *psta = rtw_get_stainfo(&padapter->stapriv, hdr->addr2); + + if (!ieee80211_is_mgmt(hdr->frame_control)) + return; + + /* receive the frames that ra(a1) is my address or ra(a1) is bc address. */ + if (memcmp(hdr->addr1, myid(&padapter->eeprompriv), ETH_ALEN) && + !is_broadcast_ether_addr(hdr->addr1)) + return; + + index = (le16_to_cpu(hdr->frame_control) & IEEE80211_FCTL_STYPE) >> 4; + if (index >= ARRAY_SIZE(mlme_sta_tbl)) + return; + fct = mlme_sta_tbl[index]; + + if (psta) { + if (ieee80211_has_retry(hdr->frame_control)) { + if (precv_frame->attrib.seq_num == psta->RxMgmtFrameSeqNum) + /* drop the duplicate management frame */ + return; + } + psta->RxMgmtFrameSeqNum = precv_frame->attrib.seq_num; + } + + if (ieee80211_is_auth(hdr->frame_control)) { + if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) + fct = OnAuth; + else + fct = OnAuthClient; + } + + if (fct) + fct(padapter, precv_frame); +} + /**************************************************************************** -Following are some TX fuctions for WiFi MLME +Following are some TX functions for WiFi MLME *****************************************************************************/ @@ -4059,9 +3932,6 @@ void update_mgntframe_attrib(struct adapter *padapter, struct pkt_attrib *pattri void dump_mgntframe(struct adapter *padapter, struct xmit_frame *pmgntframe) { - if (padapter->bSurpriseRemoved || padapter->bDriverStopped) - return; - rtl8188eu_mgnt_xmit(padapter, pmgntframe); } @@ -4091,9 +3961,6 @@ s32 dump_mgntframe_and_wait_ack(struct adapter *padapter, struct xmit_frame *pmg u32 timeout_ms = 500;/* 500ms */ struct xmit_priv *pxmitpriv = &padapter->xmitpriv; - if (padapter->bSurpriseRemoved || padapter->bDriverStopped) - return -1; - mutex_lock(&pxmitpriv->ack_tx_mutex); pxmitpriv->ack_tx = true; @@ -4588,34 +4455,19 @@ inline void issue_probereq(struct adapter *padapter, struct ndis_802_11_ssid *ps _issue_probereq(padapter, pssid, da, false); } -int issue_probereq_ex(struct adapter *padapter, struct ndis_802_11_ssid *pssid, u8 *da, - int try_cnt, int wait_ms) +void issue_probereq_ex(struct adapter *padapter, struct ndis_802_11_ssid *pssid, u8 *da) { - int ret; - int i = 0; - - do { - ret = _issue_probereq(padapter, pssid, da, wait_ms > 0); - - i++; + int i; - if (padapter->bDriverStopped || padapter->bSurpriseRemoved) + for (i = 0; i < 3; i++) { + if (_issue_probereq(padapter, pssid, da, true) == _FAIL) + msleep(1); + else break; - - if (i < try_cnt && wait_ms > 0 && ret == _FAIL) - msleep(wait_ms); - - } while ((i < try_cnt) && ((ret == _FAIL) || (wait_ms == 0))); - - if (ret != _FAIL) { - ret = _SUCCESS; - goto exit; } -exit: - return ret; } -/* if psta == NULL, indiate we are station(client) now... */ +/* if psta == NULL, indicate we are station (client) now... */ void issue_auth(struct adapter *padapter, struct sta_info *psta, unsigned short status) { struct xmit_frame *pmgntframe; @@ -5014,7 +4866,7 @@ void issue_assocreq(struct adapter *padapter) if (!padapter->registrypriv.wifi_spec) { /* Commented by Kurt 20110629 */ /* In some older APs, WPS handshake */ - /* would be fail if we append vender extensions informations to AP */ + /* would be fail if we append vendor extension information to AP */ if (!memcmp(pIE->data, WPS_OUI, 4)) pIE->Length = 14; } @@ -5169,7 +5021,7 @@ exit: kfree(pmlmepriv->assoc_req); } -/* when wait_ack is ture, this function shoule be called at process context */ +/* when wait_ack is true, this function should be called at process context */ static int _issue_nulldata(struct adapter *padapter, unsigned char *da, unsigned int power_mode, int wait_ack) { int ret = _FAIL; @@ -5238,7 +5090,7 @@ exit: return ret; } -/* when wait_ms > 0 , this function shoule be called at process context */ +/* when wait_ms > 0, this function should be called at process context */ /* da == NULL for station mode */ int issue_nulldata(struct adapter *padapter, unsigned char *da, unsigned int power_mode, int try_cnt, int wait_ms) { @@ -5247,7 +5099,7 @@ int issue_nulldata(struct adapter *padapter, unsigned char *da, unsigned int pow struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv; struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info; - /* da == NULL, assum it's null data for sta to ap*/ + /* da == NULL, assume it's null data for sta to ap*/ if (!da) da = get_my_bssid(&pmlmeinfo->network); @@ -5271,7 +5123,7 @@ exit: return ret; } -/* when wait_ack is ture, this function shoule be called at process context */ +/* when wait_ack is true, this function should be called at process context */ static int _issue_qos_nulldata(struct adapter *padapter, unsigned char *da, u16 tid, int wait_ack) { int ret = _FAIL; @@ -5344,7 +5196,7 @@ exit: return ret; } -/* when wait_ms > 0 , this function shoule be called at process context */ +/* when wait_ms > 0 , this function should be called at process context */ /* da == NULL for station mode */ int issue_qos_nulldata(struct adapter *padapter, unsigned char *da, u16 tid, int try_cnt, int wait_ms) { @@ -5353,7 +5205,7 @@ int issue_qos_nulldata(struct adapter *padapter, unsigned char *da, u16 tid, int struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv; struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info; - /* da == NULL, assum it's null data for sta to ap*/ + /* da == NULL, assume it's null data for sta to ap*/ if (!da) da = get_my_bssid(&pmlmeinfo->network); @@ -5471,7 +5323,8 @@ exit: return ret; } -void issue_action_BA(struct adapter *padapter, unsigned char *raddr, u8 action, u16 status) +void issue_action_BA(struct adapter *padapter, unsigned char *raddr, u8 action, + u16 status, struct ieee80211_mgmt *mgmt_req) { u16 start_seq; u16 BA_starting_seqctrl = 0; @@ -5540,13 +5393,13 @@ void issue_action_BA(struct adapter *padapter, unsigned char *raddr, u8 action, break; case WLAN_ACTION_ADDBA_RESP: mgmt->u.action.u.addba_resp.action_code = WLAN_ACTION_ADDBA_RESP; - mgmt->u.action.u.addba_resp.dialog_token = pmlmeinfo->ADDBA_req.dialog_token; + mgmt->u.action.u.addba_resp.dialog_token = mgmt_req->u.action.u.addba_req.dialog_token; mgmt->u.action.u.addba_resp.status = cpu_to_le16(status); - capab = le16_to_cpu(pmlmeinfo->ADDBA_req.BA_para_set) & 0x3f; + capab = le16_to_cpu(mgmt_req->u.action.u.addba_req.capab) & 0x3f; capab |= u16_encode_bits(64, IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK); capab |= u16_encode_bits(pregpriv->ampdu_amsdu, IEEE80211_ADDBA_PARAM_AMSDU_MASK); mgmt->u.action.u.addba_req.capab = cpu_to_le16(capab); - mgmt->u.action.u.addba_resp.timeout = pmlmeinfo->ADDBA_req.BA_timeout_value; + mgmt->u.action.u.addba_resp.timeout = mgmt_req->u.action.u.addba_req.timeout; pattrib->pktlen = offsetofend(struct ieee80211_mgmt, u.action.u.addba_resp.timeout); break; case WLAN_ACTION_DELBA: @@ -5714,7 +5567,8 @@ unsigned int send_delba(struct adapter *padapter, u8 initiator, u8 *addr) if (initiator == 0) { /* recipient */ for (tid = 0; tid < MAXTID; tid++) { if (psta->recvreorder_ctrl[tid].enable) { - issue_action_BA(padapter, addr, WLAN_ACTION_DELBA, (((tid << 1) | initiator) & 0x1F)); + issue_action_BA(padapter, addr, WLAN_ACTION_DELBA, + (((tid << 1) | initiator) & 0x1F), NULL); psta->recvreorder_ctrl[tid].enable = false; psta->recvreorder_ctrl[tid].indicate_seq = 0xffff; } @@ -5722,7 +5576,8 @@ unsigned int send_delba(struct adapter *padapter, u8 initiator, u8 *addr) } else if (initiator == 1) { /* originator */ for (tid = 0; tid < MAXTID; tid++) { if (psta->htpriv.agg_enable_bitmap & BIT(tid)) { - issue_action_BA(padapter, addr, WLAN_ACTION_DELBA, (((tid << 1) | initiator) & 0x1F)); + issue_action_BA(padapter, addr, WLAN_ACTION_DELBA, + (((tid << 1) | initiator) & 0x1F), NULL); psta->htpriv.agg_enable_bitmap &= ~BIT(tid); psta->htpriv.candidate_tid_bitmap &= ~BIT(tid); } @@ -5885,7 +5740,7 @@ static void rtw_set_opmode(struct adapter *adapter, u8 mode) /**************************************************************************** -Following are some utitity fuctions for WiFi MLME +Following are some utility functions for WiFi MLME *****************************************************************************/ @@ -5937,7 +5792,7 @@ void rtw_mlme_site_survey_done(struct adapter *adapter) int res; u8 reg; - if ((is_client_associated_to_ap(adapter)) || + if ((r8188eu_is_client_associated_to_ap(adapter)) || ((pmlmeinfo->state & 0x03) == WIFI_FW_ADHOC_STATE)) { /* enable to rx data frame */ rtw_write16(adapter, REG_RXFLTMAP2, 0xFFFF); @@ -6008,9 +5863,9 @@ void site_survey(struct adapter *padapter) if (ScanType == SCAN_ACTIVE) { /* obey the channel plan setting... */ if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_SCAN) || rtw_p2p_chk_state(pwdinfo, P2P_STATE_FIND_PHASE_SEARCH)) { - issue_probereq_p2p(padapter, NULL); - issue_probereq_p2p(padapter, NULL); - issue_probereq_p2p(padapter, NULL); + issue_probereq_p2p(padapter); + issue_probereq_p2p(padapter); + issue_probereq_p2p(padapter); } else { int i; for (i = 0; i < RTW_SSID_SCAN_AMOUNT; i++) { @@ -6058,7 +5913,7 @@ void site_survey(struct adapter *padapter) } else { /* 20100721:Interrupt scan operation here. */ /* For SW antenna diversity before link, it needs to switch to another antenna and scan again. */ - /* It compares the scan result and select beter one to do connection. */ + /* It compares the scan result and selects a better one to do connection. */ if (AntDivBeforeLink8188E(padapter)) { pmlmeext->sitesurvey_res.bss_cnt = 0; pmlmeext->sitesurvey_res.channel_idx = -1; @@ -6088,7 +5943,7 @@ void site_survey(struct adapter *padapter) Restore_DM_Func_Flag(padapter); /* Switch_DM_Func(padapter, DYNAMIC_ALL_FUNC_ENABLE, true); */ - if (is_client_associated_to_ap(padapter)) + if (r8188eu_is_client_associated_to_ap(padapter)) issue_nulldata(padapter, NULL, 0, 3, 500); rtw_mlme_site_survey_done(padapter); @@ -6108,10 +5963,11 @@ void site_survey(struct adapter *padapter) /* collect bss info from Beacon and Probe request/response frames. */ u8 collect_bss_info(struct adapter *padapter, struct recv_frame *precv_frame, struct wlan_bssid_ex *bssid) { + struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)precv_frame->rx_data; int i; u32 len; u8 *p; - u16 val16, subtype; + u16 val16; u8 *pframe = precv_frame->rx_data; u32 packet_len = precv_frame->len; u8 ie_offset; @@ -6127,23 +5983,18 @@ u8 collect_bss_info(struct adapter *padapter, struct recv_frame *precv_frame, st memset(bssid, 0, sizeof(struct wlan_bssid_ex)); - subtype = GetFrameSubType(pframe); - - if (subtype == WIFI_BEACON) { + if (ieee80211_is_beacon(mgmt->frame_control)) { bssid->Reserved[0] = 1; ie_offset = _BEACON_IE_OFFSET_; + } else if (ieee80211_is_probe_req(mgmt->frame_control)) { + ie_offset = _PROBEREQ_IE_OFFSET_; + bssid->Reserved[0] = 2; + } else if (ieee80211_is_probe_resp(mgmt->frame_control)) { + ie_offset = _PROBERSP_IE_OFFSET_; + bssid->Reserved[0] = 3; } else { - /* FIXME : more type */ - if (subtype == WIFI_PROBEREQ) { - ie_offset = _PROBEREQ_IE_OFFSET_; - bssid->Reserved[0] = 2; - } else if (subtype == WIFI_PROBERSP) { - ie_offset = _PROBERSP_IE_OFFSET_; - bssid->Reserved[0] = 3; - } else { - bssid->Reserved[0] = 0; - ie_offset = _FIXED_IE_LENGTH_; - } + bssid->Reserved[0] = 0; + ie_offset = _FIXED_IE_LENGTH_; } bssid->Length = sizeof(struct wlan_bssid_ex) - MAX_IE_SZ + len; @@ -6191,9 +6042,6 @@ u8 collect_bss_info(struct adapter *padapter, struct recv_frame *precv_frame, st memcpy(bssid->SupportedRates + i, (p + 2), len); } - /* todo: */ - bssid->NetworkTypeInUse = Ndis802_11OFDM24; - if (bssid->IELength < 12) return _FAIL; @@ -6328,7 +6176,7 @@ void start_create_ibss(struct adapter *padapter) /* update wireless mode */ update_wireless_mode(padapter); - /* udpate capability */ + /* update capability */ caps = rtw_get_capability((struct wlan_bssid_ex *)pnetwork); update_capinfo(padapter, caps); if (caps & cap_IBSS) {/* adhoc master */ @@ -6378,7 +6226,7 @@ void start_clnt_join(struct adapter *padapter) /* update wireless mode */ update_wireless_mode(padapter); - /* udpate capability */ + /* update capability */ caps = rtw_get_capability((struct wlan_bssid_ex *)pnetwork); update_capinfo(padapter, caps); if (caps & cap_ESS) { @@ -6756,9 +6604,6 @@ void report_join_res(struct adapter *padapter, int res) pcmd_obj->cmdsz = cmdsz; pcmd_obj->parmbuf = pevtcmd; - pcmd_obj->rsp = NULL; - pcmd_obj->rspsz = 0; - pc2h_evt_hdr = (struct C2HEvent_Header *)(pevtcmd); pc2h_evt_hdr->len = sizeof(struct joinbss_event); pc2h_evt_hdr->ID = GEN_EVT_CODE(_JoinBss); @@ -6972,7 +6817,7 @@ void mlmeext_joinbss_event_callback(struct adapter *padapter, int join_res) /* BCN interval */ rtw_write16(padapter, REG_BCN_INTERVAL, pmlmeinfo->bcn_interval); - /* udpate capability */ + /* update capability */ update_capinfo(padapter, pmlmeinfo->capability); /* WMM, Update EDCA param */ @@ -7064,7 +6909,7 @@ void mlmeext_sta_del_event_callback(struct adapter *padapter) struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv; struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info; - if (is_client_associated_to_ap(padapter) || is_IBSS_empty(padapter)) { + if (r8188eu_is_client_associated_to_ap(padapter) || r8188eu_is_ibss_empty(padapter)) { mlme_disconnect(padapter); rtw_set_bssid(padapter, null_addr); @@ -7137,7 +6982,7 @@ void linked_status_chk(struct adapter *padapter) rtl8188e_sreset_linked_status_check(padapter); - if (is_client_associated_to_ap(padapter)) { + if (r8188eu_is_client_associated_to_ap(padapter)) { /* linked infrastructure client mode */ int tx_chk = _SUCCESS, rx_chk = _SUCCESS; @@ -7165,7 +7010,7 @@ void linked_status_chk(struct adapter *padapter) } if (rx_chk != _SUCCESS) - issue_probereq_ex(padapter, &pmlmeinfo->network.Ssid, psta->hwaddr, 3, 1); + issue_probereq_ex(padapter, &pmlmeinfo->network.Ssid, psta->hwaddr); if ((tx_chk != _SUCCESS && pmlmeinfo->link_count++ == 0xf) || rx_chk != _SUCCESS) { tx_chk = issue_nulldata(padapter, psta->hwaddr, 0, 3, 1); @@ -7209,7 +7054,7 @@ void linked_status_chk(struct adapter *padapter) pmlmeinfo->link_count = 0; } } /* end of if ((psta = rtw_get_stainfo(pstapriv, passoc_res->network.MacAddress)) != NULL) */ - } else if (is_client_associated_to_ibss(padapter)) { + } else if (r8188eu_is_client_associated_to_ibss(padapter)) { /* linked IBSS mode */ /* for each assoc list entry to check the rx pkt counter */ for (i = IBSS_START_MAC_ID; i < NUM_STA; i++) { @@ -7527,7 +7372,7 @@ u8 disconnect_hdl(struct adapter *padapter, unsigned char *pbuf) u8 val8; int res; - if (is_client_associated_to_ap(padapter)) + if (r8188eu_is_client_associated_to_ap(padapter)) issue_deauth_ex(padapter, pnetwork->MacAddress, WLAN_REASON_DEAUTH_LEAVING, param->deauth_timeout_ms / 100, 100); mlme_disconnect(padapter); @@ -7639,7 +7484,7 @@ u8 sitesurvey_cmd_hdl(struct adapter *padapter, u8 *pbuf) pmlmeext->sitesurvey_res.scan_mode = pparm->scan_mode; /* issue null data if associating to the AP */ - if (is_client_associated_to_ap(padapter)) { + if (r8188eu_is_client_associated_to_ap(padapter)) { pmlmeext->sitesurvey_res.state = SCAN_TXNULL; issue_nulldata(padapter, NULL, 1, 3, 500); @@ -7783,7 +7628,7 @@ u8 add_ba_hdl(struct adapter *padapter, unsigned char *pbuf) if (((pmlmeinfo->state & WIFI_FW_ASSOC_SUCCESS) && (pmlmeinfo->HT_enable)) || ((pmlmeinfo->state & 0x03) == WIFI_FW_AP_STATE)) { - issue_action_BA(padapter, pparm->addr, WLAN_ACTION_ADDBA_REQ, (u16)pparm->tid); + issue_action_BA(padapter, pparm->addr, WLAN_ACTION_ADDBA_REQ, (u16)pparm->tid, NULL); _set_timer(&psta->addba_retry_timer, ADDBA_TO); } else { psta->htpriv.candidate_tid_bitmap &= ~BIT(pparm->tid); diff --git a/drivers/staging/r8188eu/core/rtw_p2p.c b/drivers/staging/r8188eu/core/rtw_p2p.c index dc159e58f428852abec8f0fb859eea3d458e9a0d..93d3c9c4399cfe2ae341476f2592947db539a54c 100644 --- a/drivers/staging/r8188eu/core/rtw_p2p.c +++ b/drivers/staging/r8188eu/core/rtw_p2p.c @@ -1453,7 +1453,7 @@ static void pre_tx_invitereq_handler(struct adapter *padapter) set_channel_bwmode(padapter, pwdinfo->invitereq_info.peer_ch, HAL_PRIME_CHNL_OFFSET_DONT_CARE, HT_CHANNEL_WIDTH_20); rtw_mlme_under_site_survey(padapter); - issue_probereq_p2p(padapter, NULL); + issue_probereq_p2p(padapter); _set_timer(&pwdinfo->pre_tx_scan_timer, P2P_TX_PRESCAN_TIMEOUT); } @@ -1464,7 +1464,7 @@ static void pre_tx_provdisc_handler(struct adapter *padapter) set_channel_bwmode(padapter, pwdinfo->tx_prov_disc_info.peer_channel_num[0], HAL_PRIME_CHNL_OFFSET_DONT_CARE, HT_CHANNEL_WIDTH_20); rtw_mlme_under_site_survey(padapter); - issue_probereq_p2p(padapter, NULL); + issue_probereq_p2p(padapter); _set_timer(&pwdinfo->pre_tx_scan_timer, P2P_TX_PRESCAN_TIMEOUT); } @@ -1475,7 +1475,7 @@ static void pre_tx_negoreq_handler(struct adapter *padapter) set_channel_bwmode(padapter, pwdinfo->nego_req_info.peer_channel_num[0], HAL_PRIME_CHNL_OFFSET_DONT_CARE, HT_CHANNEL_WIDTH_20); rtw_mlme_under_site_survey(padapter); - issue_probereq_p2p(padapter, NULL); + issue_probereq_p2p(padapter); _set_timer(&pwdinfo->pre_tx_scan_timer, P2P_TX_PRESCAN_TIMEOUT); } @@ -1505,8 +1505,6 @@ void p2p_protocol_wk_hdl(struct adapter *padapter, int intCmdType) void process_p2p_ps_ie(struct adapter *padapter, u8 *IEs, u32 IELength) { - u8 *ies; - u32 ies_len; u8 *p2p_ie; u32 p2p_ielen = 0; u8 noa_attr[MAX_P2P_IE_LEN] = { 0x00 };/* NoA length should be n*(13) + 2 */ @@ -1518,13 +1516,8 @@ void process_p2p_ps_ie(struct adapter *padapter, u8 *IEs, u32 IELength) if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE)) return; - if (IELength <= _BEACON_IE_OFFSET_) - return; - ies = IEs + _BEACON_IE_OFFSET_; - ies_len = IELength - _BEACON_IE_OFFSET_; - - p2p_ie = rtw_get_p2p_ie(ies, ies_len, NULL, &p2p_ielen); + p2p_ie = rtw_get_p2p_ie(IEs, IELength, NULL, &p2p_ielen); while (p2p_ie) { find_p2p = true; @@ -1579,7 +1572,7 @@ void process_p2p_ps_ie(struct adapter *padapter, u8 *IEs, u32 IELength) } /* Get the next P2P IE */ - p2p_ie = rtw_get_p2p_ie(p2p_ie + p2p_ielen, ies_len - (p2p_ie - ies + p2p_ielen), NULL, &p2p_ielen); + p2p_ie = rtw_get_p2p_ie(p2p_ie + p2p_ielen, IELength - (p2p_ie - IEs + p2p_ielen), NULL, &p2p_ielen); } if (find_p2p) { @@ -1732,7 +1725,7 @@ static void pre_tx_scan_timer_process(struct timer_list *t) if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_TX_PROVISION_DIS_REQ)) { if (pwdinfo->tx_prov_disc_info.benable) { /* the provision discovery request frame is trigger to send or not */ p2p_protocol_wk_cmd(adapter, P2P_PRE_TX_PROVDISC_PROCESS_WK); - /* issue_probereq_p2p(adapter, NULL); */ + /* issue_probereq_p2p(adapter); */ /* _set_timer(&pwdinfo->pre_tx_scan_timer, P2P_TX_PRESCAN_TIMEOUT); */ } } else if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_GONEGO_ING)) { diff --git a/drivers/staging/r8188eu/core/rtw_pwrctrl.c b/drivers/staging/r8188eu/core/rtw_pwrctrl.c index 870d81735b8dc5b83d21cf25ef87481bf38189c7..5290ac36f08c13ee3050d7987a52b681ff604aa4 100644 --- a/drivers/staging/r8188eu/core/rtw_pwrctrl.c +++ b/drivers/staging/r8188eu/core/rtw_pwrctrl.c @@ -273,7 +273,7 @@ static s32 LPS_RF_ON_check(struct adapter *padapter, u32 delay_ms) err = -1; break; } - msleep(1); + mdelay(1); } return err; diff --git a/drivers/staging/r8188eu/core/rtw_recv.c b/drivers/staging/r8188eu/core/rtw_recv.c index bb5c3b3888e0828dff2cf9846f3628c61de24c84..631c500dda4279ffb19f80d95490e4e28be256fb 100644 --- a/drivers/staging/r8188eu/core/rtw_recv.c +++ b/drivers/staging/r8188eu/core/rtw_recv.c @@ -779,9 +779,8 @@ static int ap2sta_data_frame( } /* check BSSID */ - if (!memcmp(pattrib->bssid, "\x0\x0\x0\x0\x0\x0", ETH_ALEN) || - !memcmp(mybssid, "\x0\x0\x0\x0\x0\x0", ETH_ALEN) || - (memcmp(pattrib->bssid, mybssid, ETH_ALEN))) { + if (is_zero_ether_addr(pattrib->bssid) || is_zero_ether_addr(mybssid) || + (memcmp(pattrib->bssid, mybssid, ETH_ALEN))) { if (!bmcast) issue_deauth(adapter, pattrib->bssid, WLAN_REASON_CLASS3_FRAME_FROM_NONASSOC_STA); @@ -972,7 +971,7 @@ static void validate_recv_ctrl_frame(struct adapter *padapter, if (psta->sleepq_len == 0) { pstapriv->tim_bitmap &= ~BIT(psta->aid); - /* upate BCN for TIM IE */ + /* update BCN for TIM IE */ /* update_BCNTIM(padapter); */ update_beacon(padapter, _TIM_IE_, NULL, false); } @@ -986,7 +985,7 @@ static void validate_recv_ctrl_frame(struct adapter *padapter, pstapriv->tim_bitmap &= ~BIT(psta->aid); - /* upate BCN for TIM IE */ + /* update BCN for TIM IE */ /* update_BCNTIM(padapter); */ update_beacon(padapter, _TIM_IE_, NULL, false); } @@ -1032,7 +1031,6 @@ static int validate_recv_data_frame(struct adapter *adapter, struct recv_frame *precv_frame) { struct sta_info *psta = NULL; - u8 *ptr = precv_frame->rx_data; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)precv_frame->rx_data; struct rx_pkt_attrib *pattrib = &precv_frame->attrib; struct security_priv *psecuritypriv = &adapter->securitypriv; @@ -1065,18 +1063,18 @@ static int validate_recv_data_frame(struct adapter *adapter, if (!psta) return _FAIL; - /* psta->rssi = prxcmd->rssi; */ - /* psta->signal_quality = prxcmd->sq; */ precv_frame->psta = psta; pattrib->amsdu = 0; pattrib->ack_policy = 0; /* parsing QC field */ if (pattrib->qos) { + struct ieee80211_qos_hdr *qos_hdr = (struct ieee80211_qos_hdr *)hdr; + pattrib->priority = ieee80211_get_tid(hdr); - pattrib->ack_policy = GetAckpolicy((ptr + 24)); - pattrib->amsdu = GetAMsdu((ptr + 24)); - pattrib->hdrlen = 26; + pattrib->ack_policy = GetAckpolicy(&qos_hdr->qos_ctrl); + pattrib->amsdu = GetAMsdu(&qos_hdr->qos_ctrl); + pattrib->hdrlen = sizeof(*qos_hdr); if (pattrib->priority != 0 && pattrib->priority != 3) adapter->recvpriv.bIsAnyNonBEPkts = true; @@ -1415,7 +1413,6 @@ static int amsdu_to_msdu(struct adapter *padapter, struct recv_frame *prframe) struct recv_priv *precvpriv = &padapter->recvpriv; struct __queue *pfree_recv_queue = &precvpriv->free_recv_queue; - int ret = _SUCCESS; nr_subframes = 0; @@ -1513,7 +1510,7 @@ exit: prframe->len = 0; rtw_free_recvframe(prframe, pfree_recv_queue);/* free this recv_frame */ - return ret; + return _SUCCESS; } static bool check_indicate_seq(struct recv_reorder_ctrl *preorder_ctrl, u16 seq_num) @@ -1984,13 +1981,13 @@ static void rtw_signal_stat_timer_hdl(struct timer_list *t) } else { if (recvpriv->signal_strength_data.update_req == 0) {/* update_req is clear, means we got rx */ avg_signal_strength = recvpriv->signal_strength_data.avg_val; - /* after avg_vals are accquired, we can re-stat the signal values */ + /* after avg_vals are acquired, we can re-stat the signal values */ recvpriv->signal_strength_data.update_req = 1; } if (recvpriv->signal_qual_data.update_req == 0) {/* update_req is clear, means we got rx */ avg_signal_qual = recvpriv->signal_qual_data.avg_val; - /* after avg_vals are accquired, we can re-stat the signal values */ + /* after avg_vals are acquired, we can re-stat the signal values */ recvpriv->signal_qual_data.update_req = 1; } diff --git a/drivers/staging/r8188eu/core/rtw_security.c b/drivers/staging/r8188eu/core/rtw_security.c index 5bba57d18b5fc601c31d757acef90502bea84146..780019ce1b983e302f45e8116a4efc6cadc6f3ab 100644 --- a/drivers/staging/r8188eu/core/rtw_security.c +++ b/drivers/staging/r8188eu/core/rtw_security.c @@ -954,7 +954,7 @@ static void bitwise_xor(u8 *ina, u8 *inb, u8 *out) } -static int aes_cipher(u8 *key, uint hdrlen, u8 *pframe, uint plen) +static void aes_cipher(u8 *key, uint hdrlen, u8 *pframe, uint plen) { uint qc_exists, a4_exists, i, j, payload_remainder, num_blocks, payload_index; @@ -1083,8 +1083,6 @@ static int aes_cipher(u8 *key, uint hdrlen, u8 *pframe, uint plen) bitwise_xor(aes_out, padded_buffer, chain_buffer); for (j = 0; j < 8; j++) pframe[payload_index++] = chain_buffer[j]; - - return _SUCCESS; } u32 rtw_aes_encrypt(struct adapter *padapter, struct xmit_frame *pxmitframe) diff --git a/drivers/staging/r8188eu/core/rtw_sta_mgt.c b/drivers/staging/r8188eu/core/rtw_sta_mgt.c index 98eeb16cab6c89dad9c1b4c3d3e0b767cb5199f4..b4aee86230991f1e80d0b22efab08bc7b082f89b 100644 --- a/drivers/staging/r8188eu/core/rtw_sta_mgt.c +++ b/drivers/staging/r8188eu/core/rtw_sta_mgt.c @@ -45,7 +45,7 @@ static void _rtw_init_stainfo(struct sta_info *psta) psta->keep_alive_trycnt = 0; } -u32 _rtw_init_sta_priv(struct sta_priv *pstapriv) +int _rtw_init_sta_priv(struct sta_priv *pstapriv) { struct sta_info *psta; s32 i; @@ -53,7 +53,7 @@ u32 _rtw_init_sta_priv(struct sta_priv *pstapriv) pstapriv->pallocated_stainfo_buf = vzalloc(sizeof(struct sta_info) * NUM_STA + 4); if (!pstapriv->pallocated_stainfo_buf) - return _FAIL; + return -ENOMEM; pstapriv->pstainfo_buf = pstapriv->pallocated_stainfo_buf + 4 - ((size_t)(pstapriv->pallocated_stainfo_buf) & 3); @@ -93,7 +93,7 @@ u32 _rtw_init_sta_priv(struct sta_priv *pstapriv) pstapriv->expire_to = 3; /* 3*2 = 6 sec */ pstapriv->max_num_sta = NUM_STA; - return _SUCCESS; + return 0; } inline int rtw_stainfo_offset(struct sta_priv *stapriv, struct sta_info *sta) @@ -242,7 +242,7 @@ exit: } /* using pstapriv->sta_hash_lock to protect */ -u32 rtw_free_stainfo(struct adapter *padapter, struct sta_info *psta) +void rtw_free_stainfo(struct adapter *padapter, struct sta_info *psta) { int i; struct __queue *pfree_sta_queue; @@ -252,7 +252,7 @@ u32 rtw_free_stainfo(struct adapter *padapter, struct sta_info *psta) struct sta_priv *pstapriv = &padapter->stapriv; if (!psta) - goto exit; + return; pfree_sta_queue = &pstapriv->free_sta_queue; @@ -356,10 +356,6 @@ u32 rtw_free_stainfo(struct adapter *padapter, struct sta_info *psta) spin_lock_bh(&pfree_sta_queue->lock); list_add_tail(&psta->list, get_list_head(pfree_sta_queue)); spin_unlock_bh(&pfree_sta_queue->lock); - -exit: - - return _SUCCESS; } /* free all stainfo which in sta_hash[all] */ @@ -404,7 +400,7 @@ struct sta_info *rtw_get_stainfo(struct sta_priv *pstapriv, u8 *hwaddr) if (!hwaddr) return NULL; - if (IS_MCAST(hwaddr)) + if (is_multicast_ether_addr(hwaddr)) addr = bc_addr; else addr = hwaddr; diff --git a/drivers/staging/r8188eu/core/rtw_wlan_util.c b/drivers/staging/r8188eu/core/rtw_wlan_util.c index e50631848cab6bba2b13def50e0816e87bd4c933..f1ebb5358cb9a7f226e872bcf8e7661180bdf7dd 100644 --- a/drivers/staging/r8188eu/core/rtw_wlan_util.c +++ b/drivers/staging/r8188eu/core/rtw_wlan_util.c @@ -331,35 +331,35 @@ u16 get_beacon_interval(struct wlan_bssid_ex *bss) return le16_to_cpu(val); } -int is_client_associated_to_ap(struct adapter *padapter) +bool r8188eu_is_client_associated_to_ap(struct adapter *padapter) { struct mlme_ext_priv *pmlmeext; struct mlme_ext_info *pmlmeinfo; if (!padapter) - return _FAIL; + return false; pmlmeext = &padapter->mlmeextpriv; pmlmeinfo = &pmlmeext->mlmext_info; if ((pmlmeinfo->state & WIFI_FW_ASSOC_SUCCESS) && ((pmlmeinfo->state & 0x03) == WIFI_FW_STATION_STATE)) return true; - else - return _FAIL; + + return false; } -int is_client_associated_to_ibss(struct adapter *padapter) +bool r8188eu_is_client_associated_to_ibss(struct adapter *padapter) { struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv; struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info; if ((pmlmeinfo->state & WIFI_FW_ASSOC_SUCCESS) && ((pmlmeinfo->state & 0x03) == WIFI_FW_ADHOC_STATE)) return true; - else - return _FAIL; + + return false; } -int is_IBSS_empty(struct adapter *padapter) +bool r8188eu_is_ibss_empty(struct adapter *padapter) { unsigned int i; struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv; @@ -367,7 +367,7 @@ int is_IBSS_empty(struct adapter *padapter) for (i = IBSS_START_MAC_ID; i < NUM_STA; i++) { if (pmlmeinfo->FW_sta_info[i].status == 1) - return _FAIL; + return false; } return true; } @@ -874,9 +874,10 @@ void VCS_update(struct adapter *padapter, struct sta_info *psta) int rtw_check_bcn_info(struct adapter *Adapter, u8 *pframe, u32 packet_len) { + struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)pframe; unsigned int len; unsigned char *p; - unsigned short val16, subtype; + unsigned short val16; struct wlan_network *cur_network = &Adapter->mlmepriv.cur_network; /* u8 wpa_ie[255], rsn_ie[255]; */ u16 wpa_len = 0, rsn_len = 0; @@ -893,7 +894,7 @@ int rtw_check_bcn_info(struct adapter *Adapter, u8 *pframe, u32 packet_len) unsigned short ht_cap_info; unsigned char ht_info_infos_0; - if (!is_client_associated_to_ap(Adapter)) + if (!r8188eu_is_client_associated_to_ap(Adapter)) return true; len = packet_len - sizeof(struct ieee80211_hdr_3addr); @@ -908,9 +909,7 @@ int rtw_check_bcn_info(struct adapter *Adapter, u8 *pframe, u32 packet_len) if (!bssid) return _FAIL; - subtype = GetFrameSubType(pframe) >> 4; - - if (subtype == WIFI_BEACON) + if (ieee80211_is_beacon(mgmt->frame_control)) bssid->Reserved[0] = 1; bssid->Length = sizeof(struct wlan_bssid_ex) - MAX_IE_SZ + len; @@ -1035,16 +1034,13 @@ _mismatch: return _FAIL; } -void update_beacon_info(struct adapter *padapter, u8 *pframe, uint pkt_len, struct sta_info *psta) +void update_beacon_info(struct adapter *padapter, u8 *ie_ptr, uint ie_len, struct sta_info *psta) { unsigned int i; - unsigned int len; struct ndis_802_11_var_ie *pIE; - len = pkt_len - (_BEACON_IE_OFFSET_ + WLAN_HDR_A3_LEN); - - for (i = 0; i < len;) { - pIE = (struct ndis_802_11_var_ie *)(pframe + (_BEACON_IE_OFFSET_ + WLAN_HDR_A3_LEN) + i); + for (i = 0; i < ie_len;) { + pIE = (struct ndis_802_11_var_ie *)(ie_ptr + i); switch (pIE->ElementID) { case _HT_EXTRA_INFO_IE_: /* HT info */ diff --git a/drivers/staging/r8188eu/core/rtw_xmit.c b/drivers/staging/r8188eu/core/rtw_xmit.c index 873d2c5c36344a761f7eb0ae0a40b58957a1dad5..34494f08c0cdf92cf1e19068da49d1dd43169c69 100644 --- a/drivers/staging/r8188eu/core/rtw_xmit.c +++ b/drivers/staging/r8188eu/core/rtw_xmit.c @@ -38,7 +38,7 @@ static int rtw_xmit_resource_alloc(struct adapter *padapter, struct xmit_buf *px { pxmitbuf->pallocated_buf = kzalloc(alloc_sz, GFP_KERNEL); if (!pxmitbuf->pallocated_buf) - return _FAIL; + return -ENOMEM; pxmitbuf->pbuf = (u8 *)ALIGN((size_t)(pxmitbuf->pallocated_buf), XMITBUF_ALIGN_SZ); pxmitbuf->dma_transfer_addr = 0; @@ -46,10 +46,10 @@ static int rtw_xmit_resource_alloc(struct adapter *padapter, struct xmit_buf *px pxmitbuf->pxmit_urb = usb_alloc_urb(0, GFP_KERNEL); if (!pxmitbuf->pxmit_urb) { kfree(pxmitbuf->pallocated_buf); - return _FAIL; + return -ENOMEM; } - return _SUCCESS; + return 0; } static void rtw_xmit_resource_free(struct adapter *padapter, struct xmit_buf *pxmitbuf, @@ -59,12 +59,11 @@ static void rtw_xmit_resource_free(struct adapter *padapter, struct xmit_buf *px kfree(pxmitbuf->pallocated_buf); } -s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter) +int _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter) { int i; struct xmit_buf *pxmitbuf; struct xmit_frame *pxframe; - int res = _SUCCESS; u32 max_xmit_extbuf_size = MAX_XMIT_EXTBUF_SZ; u32 num_xmit_extbuf = NR_XMIT_EXTBUFF; @@ -97,7 +96,6 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter) if (!pxmitpriv->pallocated_frame_buf) { pxmitpriv->pxmit_frame_buf = NULL; - res = _FAIL; goto exit; } pxmitpriv->pxmit_frame_buf = (u8 *)ALIGN((size_t)(pxmitpriv->pallocated_frame_buf), 4); @@ -132,10 +130,8 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter) pxmitpriv->pallocated_xmitbuf = vzalloc(NR_XMITBUFF * sizeof(struct xmit_buf) + 4); - if (!pxmitpriv->pallocated_xmitbuf) { - res = _FAIL; + if (!pxmitpriv->pallocated_xmitbuf) goto free_frame_buf; - } pxmitpriv->pxmitbuf = (u8 *)ALIGN((size_t)(pxmitpriv->pallocated_xmitbuf), 4); /* pxmitpriv->pxmitbuf = pxmitpriv->pallocated_xmitbuf + 4 - */ @@ -151,11 +147,9 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter) pxmitbuf->ext_tag = false; /* Tx buf allocation may fail sometimes, so sleep and retry. */ - res = rtw_xmit_resource_alloc(padapter, pxmitbuf, (MAX_XMITBUF_SZ + XMITBUF_ALIGN_SZ)); - if (res == _FAIL) { + if (rtw_xmit_resource_alloc(padapter, pxmitbuf, (MAX_XMITBUF_SZ + XMITBUF_ALIGN_SZ))) { msleep(10); - res = rtw_xmit_resource_alloc(padapter, pxmitbuf, (MAX_XMITBUF_SZ + XMITBUF_ALIGN_SZ)); - if (res == _FAIL) + if (rtw_xmit_resource_alloc(padapter, pxmitbuf, (MAX_XMITBUF_SZ + XMITBUF_ALIGN_SZ))) goto free_xmitbuf; } @@ -172,10 +166,8 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter) pxmitpriv->pallocated_xmit_extbuf = vzalloc(num_xmit_extbuf * sizeof(struct xmit_buf) + 4); - if (!pxmitpriv->pallocated_xmit_extbuf) { - res = _FAIL; + if (!pxmitpriv->pallocated_xmit_extbuf) goto free_xmitbuf; - } pxmitpriv->pxmit_extbuf = (u8 *)ALIGN((size_t)(pxmitpriv->pallocated_xmit_extbuf), 4); @@ -188,11 +180,8 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter) pxmitbuf->padapter = padapter; pxmitbuf->ext_tag = true; - res = rtw_xmit_resource_alloc(padapter, pxmitbuf, max_xmit_extbuf_size + XMITBUF_ALIGN_SZ); - if (res == _FAIL) { - res = _FAIL; + if (rtw_xmit_resource_alloc(padapter, pxmitbuf, max_xmit_extbuf_size + XMITBUF_ALIGN_SZ)) goto free_xmit_extbuf; - } list_add_tail(&pxmitbuf->list, &pxmitpriv->free_xmit_extbuf_queue.queue); pxmitbuf++; @@ -200,10 +189,8 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter) pxmitpriv->free_xmit_extbuf_cnt = num_xmit_extbuf; - if (rtw_alloc_hwxmits(padapter)) { - res = _FAIL; + if (rtw_alloc_hwxmits(padapter)) goto free_xmit_extbuf; - } rtw_init_hwxmits(pxmitpriv->hwxmits, pxmitpriv->hwxmit_entry); @@ -226,7 +213,7 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter) rtl8188eu_init_xmit_priv(padapter); - return _SUCCESS; + return 0; free_xmit_extbuf: pxmitbuf = (struct xmit_buf *)pxmitpriv->pxmit_extbuf; @@ -246,7 +233,7 @@ free_xmitbuf: free_frame_buf: vfree(pxmitpriv->pallocated_frame_buf); exit: - return res; + return -ENOMEM; } static void rtw_pkt_complete(struct adapter *padapter, struct sk_buff *pkt) @@ -476,8 +463,7 @@ static uint rtw_pktfile_read(struct pkt_file *pfile, u8 *rmem, uint rlen) { uint len; - len = rtw_remainder_len(pfile); - len = (rlen > len) ? len : rlen; + len = min(rtw_remainder_len(pfile), rlen); if (rmem) skb_copy_bits(pfile->pkt, pfile->buf_len - pfile->pkt_len, rmem, len); @@ -1622,14 +1608,14 @@ static int rtw_br_client_tx(struct adapter *padapter, struct sk_buff **pskb) spin_lock_bh(&padapter->br_ext_lock); if (!(skb->data[0] & 1) && br_port && memcmp(skb->data + ETH_ALEN, padapter->br_mac, ETH_ALEN) && - *((__be16 *)(skb->data + ETH_ALEN * 2)) != __constant_htons(ETH_P_8021Q) && - *((__be16 *)(skb->data + ETH_ALEN * 2)) == __constant_htons(ETH_P_IP) && + *((__be16 *)(skb->data + ETH_ALEN * 2)) != htons(ETH_P_8021Q) && + *((__be16 *)(skb->data + ETH_ALEN * 2)) == htons(ETH_P_IP) && !memcmp(padapter->scdb_mac, skb->data + ETH_ALEN, ETH_ALEN) && padapter->scdb_entry) { memcpy(skb->data + ETH_ALEN, GET_MY_HWADDR(padapter), ETH_ALEN); padapter->scdb_entry->ageing_timer = jiffies; spin_unlock_bh(&padapter->br_ext_lock); } else { - if (*((__be16 *)(skb->data + ETH_ALEN * 2)) == __constant_htons(ETH_P_8021Q)) { + if (*((__be16 *)(skb->data + ETH_ALEN * 2)) == htons(ETH_P_8021Q)) { is_vlan_tag = 1; vlan_hdr = *((unsigned short *)(skb->data + ETH_ALEN * 2 + 2)); for (i = 0; i < 6; i++) @@ -1637,10 +1623,10 @@ static int rtw_br_client_tx(struct adapter *padapter, struct sk_buff **pskb) skb_pull(skb, 4); } if (!memcmp(skb->data + ETH_ALEN, padapter->br_mac, ETH_ALEN) && - (*((__be16 *)(skb->data + ETH_ALEN * 2)) == __constant_htons(ETH_P_IP))) + (*((__be16 *)(skb->data + ETH_ALEN * 2)) == htons(ETH_P_IP))) memcpy(padapter->br_ip, skb->data + WLAN_ETHHDR_LEN + 12, 4); - if (*((__be16 *)(skb->data + ETH_ALEN * 2)) == __constant_htons(ETH_P_IP)) { + if (*((__be16 *)(skb->data + ETH_ALEN * 2)) == htons(ETH_P_IP)) { if (memcmp(padapter->scdb_mac, skb->data + ETH_ALEN, ETH_ALEN)) { padapter->scdb_entry = (struct nat25_network_db_entry *)scdb_findEntry(padapter, skb->data + WLAN_ETHHDR_LEN + 12); @@ -1669,7 +1655,7 @@ static int rtw_br_client_tx(struct adapter *padapter, struct sk_buff **pskb) skb_push(skb, 4); for (i = 0; i < 6; i++) *((unsigned short *)(skb->data + i * 2)) = *((unsigned short *)(skb->data + 4 + i * 2)); - *((__be16 *)(skb->data + ETH_ALEN * 2)) = __constant_htons(ETH_P_8021Q); + *((__be16 *)(skb->data + ETH_ALEN * 2)) = htons(ETH_P_8021Q); *((unsigned short *)(skb->data + ETH_ALEN * 2 + 2)) = vlan_hdr; } @@ -1708,7 +1694,7 @@ static int rtw_br_client_tx(struct adapter *padapter, struct sk_buff **pskb) skb_push(skb, 4); for (i = 0; i < 6; i++) *((unsigned short *)(skb->data + i * 2)) = *((unsigned short *)(skb->data + 4 + i * 2)); - *((__be16 *)(skb->data + ETH_ALEN * 2)) = __constant_htons(ETH_P_8021Q); + *((__be16 *)(skb->data + ETH_ALEN * 2)) = htons(ETH_P_8021Q); *((unsigned short *)(skb->data + ETH_ALEN * 2 + 2)) = vlan_hdr; } } diff --git a/drivers/staging/r8188eu/hal/HalPhyRf_8188e.c b/drivers/staging/r8188eu/hal/HalPhyRf_8188e.c index 525deab10820b780460afac6bf6e8eb83dbe5f3d..26e710ef5134d2009a640b6b3c31754b4038f554 100644 --- a/drivers/staging/r8188eu/hal/HalPhyRf_8188e.c +++ b/drivers/staging/r8188eu/hal/HalPhyRf_8188e.c @@ -69,7 +69,7 @@ void ODM_TxPwrTrackAdjust88E(struct odm_dm_struct *dm_odm, u8 Type,/* 0 = OFDM, /*----------------------------------------------------------------------------- * Function: odm_TxPwrTrackSetPwr88E() * - * Overview: 88E change all channel tx power accordign to flag. + * Overview: 88E change all channel tx power according to flag. * OFDM & CCK are all different. * * Input: NONE @@ -583,7 +583,7 @@ static bool phy_SimularityCompare_8188E( tmp2 = resulta[c2][i]; } - diff = (tmp1 > tmp2) ? (tmp1 - tmp2) : (tmp2 - tmp1); + diff = abs(tmp1 - tmp2); if (diff > MAX_TOLERANCE) { if ((i == 2 || i == 6) && !sim_bitmap) { @@ -882,14 +882,6 @@ void PHY_IQCalibrate_8188E(struct adapter *adapt, bool recovery) if (RegE94 != 0) patha_fill_iqk(adapt, pathaok, result, final_candidate, (RegEA4 == 0)); -/* To Fix BSOD when final_candidate is 0xff */ -/* by sherry 20120321 */ - if (final_candidate < 4) { - for (i = 0; i < IQK_Matrix_REG_NUM; i++) - dm_odm->RFCalibrateInfo.IQKMatrixRegSetting.Value[0][i] = result[final_candidate][i]; - dm_odm->RFCalibrateInfo.IQKMatrixRegSetting.bIQKDone = true; - } - _PHY_SaveADDARegisters(adapt, IQK_BB_REG_92C, dm_odm->RFCalibrateInfo.IQK_BB_backup_recover, 9); } diff --git a/drivers/staging/r8188eu/hal/hal_intf.c b/drivers/staging/r8188eu/hal/hal_intf.c index 37935aef71eab131fdb366b30b36074dd71c7756..13790e32f11c2a3a36481585ef6a2f95e652c169 100644 --- a/drivers/staging/r8188eu/hal/hal_intf.c +++ b/drivers/staging/r8188eu/hal/hal_intf.c @@ -6,24 +6,19 @@ #include "../include/drv_types.h" #include "../include/hal_intf.h" -uint rtw_hal_init(struct adapter *adapt) +uint rtw_hal_init(struct adapter *adapt) { - uint status = _SUCCESS; - adapt->hw_init_completed = false; - status = rtl8188eu_hal_init(adapt); + if (rtl8188eu_hal_init(adapt) != _SUCCESS) + return _FAIL; - if (status == _SUCCESS) { - adapt->hw_init_completed = true; + adapt->hw_init_completed = true; - if (adapt->registrypriv.notch_filter == 1) - hal_notch_filter_8188e(adapt, 1); - } else { - adapt->hw_init_completed = false; - } + if (adapt->registrypriv.notch_filter == 1) + hal_notch_filter_8188e(adapt, 1); - return status; + return _SUCCESS; } uint rtw_hal_deinit(struct adapter *adapt) diff --git a/drivers/staging/r8188eu/hal/odm_RTL8188E.c b/drivers/staging/r8188eu/hal/odm_RTL8188E.c index c8a3c521bd60789b80082cebe52c92b62d2a85d3..f3f4074d4316763d7d432a9a61ab1bfa1cd4e915 100644 --- a/drivers/staging/r8188eu/hal/odm_RTL8188E.c +++ b/drivers/staging/r8188eu/hal/odm_RTL8188E.c @@ -194,12 +194,12 @@ static void odm_HWAntDiv(struct odm_dm_struct *dm_odm) for (i = 0; i < ODM_ASSOCIATE_ENTRY_NUM; i++) { pEntry = dm_odm->pODM_StaInfo[i]; if (IS_STA_VALID(pEntry)) { - /* 2 Caculate RSSI per Antenna */ + /* 2 Calculate RSSI per Antenna */ Main_RSSI = (dm_fat_tbl->MainAnt_Cnt[i] != 0) ? (dm_fat_tbl->MainAnt_Sum[i] / dm_fat_tbl->MainAnt_Cnt[i]) : 0; Aux_RSSI = (dm_fat_tbl->AuxAnt_Cnt[i] != 0) ? (dm_fat_tbl->AuxAnt_Sum[i] / dm_fat_tbl->AuxAnt_Cnt[i]) : 0; TargetAnt = (Main_RSSI >= Aux_RSSI) ? MAIN_ANT : AUX_ANT; /* 2 Select MaxRSSI for DIG */ - LocalMaxRSSI = (Main_RSSI > Aux_RSSI) ? Main_RSSI : Aux_RSSI; + LocalMaxRSSI = max(Main_RSSI, Aux_RSSI); if ((LocalMaxRSSI > AntDivMaxRSSI) && (LocalMaxRSSI < 40)) AntDivMaxRSSI = LocalMaxRSSI; if (LocalMaxRSSI > MaxRSSI) @@ -211,7 +211,7 @@ static void odm_HWAntDiv(struct odm_dm_struct *dm_odm) else if ((dm_fat_tbl->RxIdleAnt == AUX_ANT) && (Aux_RSSI == 0)) Aux_RSSI = Main_RSSI; - LocalMinRSSI = (Main_RSSI > Aux_RSSI) ? Aux_RSSI : Main_RSSI; + LocalMinRSSI = min(Main_RSSI, Aux_RSSI); if (LocalMinRSSI < MinRSSI) { MinRSSI = LocalMinRSSI; RxIdleAnt = TargetAnt; diff --git a/drivers/staging/r8188eu/hal/rtl8188e_hal_init.c b/drivers/staging/r8188eu/hal/rtl8188e_hal_init.c index 158260547f2b27942032364385cc4c780a120251..73855bca76fe0c00105ba5eea93b96cea235d9d2 100644 --- a/drivers/staging/r8188eu/hal/rtl8188e_hal_init.c +++ b/drivers/staging/r8188eu/hal/rtl8188e_hal_init.c @@ -355,7 +355,7 @@ void rtl8188e_EfusePowerSwitch(struct adapter *pAdapter, u8 PwrState) if (PwrState) { rtw_write8(pAdapter, REG_EFUSE_ACCESS, EFUSE_ACCESS_ON); - /* 1.2V Power: From VDDON with Power Cut(0x0000h[15]), defualt valid */ + /* 1.2V Power: From VDDON with Power Cut(0x0000h[15]), default valid */ res = rtw_read16(pAdapter, REG_SYS_ISO_CTRL, &tmpV16); if (res) return; @@ -676,11 +676,7 @@ s32 InitLLTTable(struct adapter *padapter, u8 txpktbuf_bndy) return status; } -void -Hal_EfuseParseIDCode88E( - struct adapter *padapter, - u8 *hwinfo - ) +void Hal_EfuseParseIDCode88E(struct adapter *padapter, u8 *hwinfo) { struct eeprom_priv *pEEPROM = &padapter->eeprompriv; struct net_device *netdev = padapter->pnetdev; diff --git a/drivers/staging/r8188eu/hal/rtl8188e_phycfg.c b/drivers/staging/r8188eu/hal/rtl8188e_phycfg.c index 532c63bce0bf02f1ae639adcee9275606444c261..b7f3c7a670fb41c26b7816256e937f5c1334269d 100644 --- a/drivers/staging/r8188eu/hal/rtl8188e_phycfg.c +++ b/drivers/staging/r8188eu/hal/rtl8188e_phycfg.c @@ -23,7 +23,7 @@ static u32 phy_calculate_bit_shift(u32 bitmask) /** * Function: PHY_QueryBBReg * -* OverView: Read "sepcific bits" from BB register +* Overview: Read "sepcific bits" from BB register * * Input: * struct adapter *Adapter, @@ -56,7 +56,7 @@ rtl8188e_PHY_QueryBBReg( /** * Function: PHY_SetBBReg * -* OverView: Write "Specific bits" to BB register (page 8~) +* Overview: Write "Specific bits" to BB register (page 8~) * * Input: * struct adapter *Adapter, @@ -94,7 +94,7 @@ void rtl8188e_PHY_SetBBReg(struct adapter *Adapter, u32 RegAddr, u32 BitMask, u3 /** * Function: phy_RFSerialRead * -* OverView: Read regster from RF chips +* Overview: Read register from RF chips * * Input: * struct adapter *Adapter, @@ -160,7 +160,7 @@ phy_RFSerialRead( /** * Function: phy_RFSerialWrite * -* OverView: Write data to RF register (page 8~) +* Overview: Write data to RF register (page 8~) * * Input: * struct adapter *Adapter, @@ -235,7 +235,7 @@ phy_RFSerialWrite( /** * Function: PHY_QueryRFReg * -* OverView: Query "Specific bits" to RF register (page 8~) +* Overview: Query "Specific bits" to RF register (page 8~) * * Input: * struct adapter *Adapter, @@ -261,7 +261,7 @@ u32 rtl8188e_PHY_QueryRFReg(struct adapter *Adapter, u32 RegAddr, u32 BitMask) /** * Function: PHY_SetRFReg * -* OverView: Write "Specific bits" to RF register (page 8~) +* Overview: Write "Specific bits" to RF register (page 8~) * * Input: * struct adapter *Adapter, @@ -335,7 +335,7 @@ s32 PHY_MACConfig8188E(struct adapter *Adapter) /** * Function: phy_InitBBRFRegisterDefinition * -* OverView: Initialize Register definition offset for Radio Path A/B/C/D +* Overview: Initialize Register definition offset for Radio Path A/B/C/D * * Input: * struct adapter *Adapter, @@ -363,7 +363,7 @@ phy_InitBBRFRegisterDefinition( /* RF Interface (Output and) Enable */ pHalData->PHYRegDef.rfintfe = rFPGA0_XA_RFInterfaceOE; /* 16 MSBs if read 32-bit from 0x860 (16-bit for 0x862) */ - /* Addr of LSSI. Wirte RF register by driver */ + /* Addr of LSSI. Write RF register by driver */ pHalData->PHYRegDef.rf3wireOffset = rFPGA0_XA_LSSIParameter; /* LSSI Parameter */ /* RF parameter */ diff --git a/drivers/staging/r8188eu/hal/rtl8188e_rxdesc.c b/drivers/staging/r8188eu/hal/rtl8188e_rxdesc.c index dff0cba751df45433b6e231224d08fb8254ed292..d1ac2960f1c444d23cff38fe88eb9397446ae63d 100644 --- a/drivers/staging/r8188eu/hal/rtl8188e_rxdesc.c +++ b/drivers/staging/r8188eu/hal/rtl8188e_rxdesc.c @@ -66,28 +66,25 @@ void update_recvframe_attrib_88e(struct recv_frame *precvframe, struct recv_stat if (pattrib->pkt_rpt_type == NORMAL_RX) { pattrib->pkt_len = le32_to_cpu(prxstat->rxdw0) & 0x00003fff; + pattrib->icv_err = (le32_to_cpu(prxstat->rxdw0) >> 15) & 0x1; pattrib->drvinfo_sz = ((le32_to_cpu(prxstat->rxdw0) >> 16) & 0xf) * 8; - + pattrib->encrypt = (u8)((le32_to_cpu(prxstat->rxdw0) >> 20) & 0x7); + pattrib->qos = (le32_to_cpu(prxstat->rxdw0) >> 23) & 0x1; + pattrib->shift_sz = (le32_to_cpu(prxstat->rxdw0) >> 24) & 0x3; pattrib->physt = (le32_to_cpu(prxstat->rxdw0) >> 26) & 0x1; - pattrib->bdecrypted = (le32_to_cpu(prxstat->rxdw0) & BIT(27)) ? 0 : 1; - pattrib->encrypt = (le32_to_cpu(prxstat->rxdw0) >> 20) & 0x7; - pattrib->qos = (le32_to_cpu(prxstat->rxdw0) >> 23) & 0x1; pattrib->priority = (le32_to_cpu(prxstat->rxdw1) >> 8) & 0xf; - pattrib->amsdu = (le32_to_cpu(prxstat->rxdw1) >> 13) & 0x1; + pattrib->mdata = (le32_to_cpu(prxstat->rxdw1) >> 26) & 0x1; + pattrib->mfrag = (le32_to_cpu(prxstat->rxdw1) >> 27) & 0x1; pattrib->seq_num = le32_to_cpu(prxstat->rxdw2) & 0x00000fff; pattrib->frag_num = (le32_to_cpu(prxstat->rxdw2) >> 12) & 0xf; - pattrib->mfrag = (le32_to_cpu(prxstat->rxdw1) >> 27) & 0x1; - pattrib->mdata = (le32_to_cpu(prxstat->rxdw1) >> 26) & 0x1; pattrib->mcs_rate = le32_to_cpu(prxstat->rxdw3) & 0x3f; pattrib->rxht = (le32_to_cpu(prxstat->rxdw3) >> 6) & 0x1; - pattrib->icv_err = (le32_to_cpu(prxstat->rxdw0) >> 15) & 0x1; - pattrib->shift_sz = (le32_to_cpu(prxstat->rxdw0) >> 24) & 0x3; } else if (pattrib->pkt_rpt_type == TX_REPORT1) { /* CCX */ pattrib->pkt_len = TX_RPT1_PKT_LEN; } else if (pattrib->pkt_rpt_type == TX_REPORT2) { @@ -108,33 +105,34 @@ void update_recvframe_attrib_88e(struct recv_frame *precvframe, struct recv_stat */ void update_recvframe_phyinfo_88e(struct recv_frame *precvframe, struct phy_stat *pphy_status) { + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)precvframe->rx_data; struct adapter *padapter = precvframe->adapter; struct rx_pkt_attrib *pattrib = &precvframe->attrib; struct hal_data_8188e *pHalData = &padapter->haldata; struct phy_info *pPHYInfo = &pattrib->phy_info; u8 *wlanhdr = precvframe->rx_data; - __le16 fc = *(__le16 *)wlanhdr; struct odm_per_pkt_info pkt_info; u8 *sa = NULL; struct sta_priv *pstapriv; struct sta_info *psta; - pkt_info.bPacketMatchBSSID = ((!ieee80211_is_ctl(fc)) && + pkt_info.bPacketMatchBSSID = ((!ieee80211_is_ctl(hdr->frame_control)) && !pattrib->icv_err && !pattrib->crc_err && !memcmp(get_hdr_bssid(wlanhdr), get_bssid(&padapter->mlmepriv), ETH_ALEN)); pkt_info.bPacketToSelf = pkt_info.bPacketMatchBSSID && - (!memcmp(get_da(wlanhdr), - myid(&padapter->eeprompriv), ETH_ALEN)); + ether_addr_equal(ieee80211_get_DA(hdr), + myid(&padapter->eeprompriv)); - pkt_info.bPacketBeacon = pkt_info.bPacketMatchBSSID && ieee80211_is_beacon(fc); + pkt_info.bPacketBeacon = pkt_info.bPacketMatchBSSID && + ieee80211_is_beacon(hdr->frame_control); if (pkt_info.bPacketBeacon) { if (check_fwstate(&padapter->mlmepriv, WIFI_STATION_STATE)) sa = padapter->mlmepriv.cur_network.network.MacAddress; /* to do Ad-hoc */ } else { - sa = get_sa(wlanhdr); + sa = ieee80211_get_SA(hdr); } pstapriv = &padapter->stapriv; diff --git a/drivers/staging/r8188eu/hal/rtl8188eu_xmit.c b/drivers/staging/r8188eu/hal/rtl8188eu_xmit.c index 8e4a5acc0b18885d3c93d4d3664e8597c1760263..6d1f56d1f9d72a9539d848e6a5d3816b642213fd 100644 --- a/drivers/staging/r8188eu/hal/rtl8188eu_xmit.c +++ b/drivers/staging/r8188eu/hal/rtl8188eu_xmit.c @@ -149,7 +149,6 @@ static void fill_txdesc_phy(struct pkt_attrib *pattrib, __le32 *pdw) static s32 update_txdesc(struct xmit_frame *pxmitframe, u8 *pmem, s32 sz, u8 bagg_pkt) { - int pull = 0; uint qsel; u8 data_rate, pwr_status, offset; struct adapter *adapt = pxmitframe->padapter; @@ -295,7 +294,7 @@ static s32 update_txdesc(struct xmit_frame *pxmitframe, u8 *pmem, s32 sz, u8 bag ODM_SetTxAntByTxInfo_88E(&haldata->odmpriv, pmem, pattrib->mac_id); rtl8188eu_cal_txdesc_chksum(ptxdesc); - return pull; + return 0; } /* for non-agg data frame or management frame */ diff --git a/drivers/staging/r8188eu/include/Hal8188EPhyReg.h b/drivers/staging/r8188eu/include/Hal8188EPhyReg.h index 8b8c75a1f1490fe1a571030fd36a91777b6ffc2e..da2329be4474a07f60c06b7383a9298213ada1ea 100644 --- a/drivers/staging/r8188eu/include/Hal8188EPhyReg.h +++ b/drivers/staging/r8188eu/include/Hal8188EPhyReg.h @@ -92,7 +92,7 @@ #define rFPGA0_AdDaClockEn 0x888 #define rFPGA0_AnalogParameter4 0x88c -#define rFPGA0_XA_LSSIReadBack 0x8a0 /* Tranceiver LSSI Readback */ +#define rFPGA0_XA_LSSIReadBack 0x8a0 /* Transceiver LSSI Readback */ #define rFPGA0_XB_LSSIReadBack 0x8a4 #define rFPGA0_XC_LSSIReadBack 0x8a8 #define rFPGA0_XD_LSSIReadBack 0x8ac @@ -167,7 +167,7 @@ /* RxIQ DC offset, Rx digital filter, DC notch filter */ #define rOFDM0_XARxAFE 0xc10 -#define rOFDM0_XARxIQImbalance 0xc14 /* RxIQ imblance matrix */ +#define rOFDM0_XARxIQImbalance 0xc14 /* RxIQ imbalance matrix */ #define rOFDM0_XBRxAFE 0xc18 #define rOFDM0_XBRxIQImbalance 0xc1c #define rOFDM0_XCRxAFE 0xc20 diff --git a/drivers/staging/r8188eu/include/drv_types.h b/drivers/staging/r8188eu/include/drv_types.h index 1bd0c8f3a35886308c9e0fad4530d72116cfaa00..8fef5759c36aed1502ac9fbaebf698bf3d9c64e3 100644 --- a/drivers/staging/r8188eu/include/drv_types.h +++ b/drivers/staging/r8188eu/include/drv_types.h @@ -167,7 +167,6 @@ struct adapter { s32 bDriverStopped; s32 bSurpriseRemoved; - s32 bCardDisableWOHSM; u8 hw_init_completed; s8 signal_strength; diff --git a/drivers/staging/r8188eu/include/odm.h b/drivers/staging/r8188eu/include/odm.h index f131e17167bfb7fe1b52fa84a67c0d57ba2e3b0e..8cea166b7b735ca09335b1f3b6177f1ddc51e94a 100644 --- a/drivers/staging/r8188eu/include/odm.h +++ b/drivers/staging/r8188eu/include/odm.h @@ -80,7 +80,6 @@ struct odm_rate_adapt { #define HP_THERMAL_NUM 8 #define AVG_THERMAL_NUM 8 -#define IQK_Matrix_REG_NUM 8 struct odm_phy_dbg_info { /* ODM Write,debug info */ @@ -119,7 +118,8 @@ enum odm_ability_def { ODM_BB_PWR_TRA = BIT(8), }; -# define ODM_ITRF_USB 0x2 +#define ODM_ITRF_USB 0x2 +#define ODM_CE 0x04 /* ODM_CMNINFO_WM_MODE */ enum odm_wireless_mode { @@ -163,11 +163,6 @@ struct odm_ra_info { u8 PTSmoothFactor; }; -struct ijk_matrix_regs_set { - bool bIQKDone; - s32 Value[1][IQK_Matrix_REG_NUM]; -}; - struct odm_rf_cal { /* for tx power tracking */ u32 RegA24; /* for TempCCK */ @@ -205,7 +200,6 @@ struct odm_rf_cal { u8 ThermalValue_HP[HP_THERMAL_NUM]; u8 ThermalValue_HP_index; - struct ijk_matrix_regs_set IQKMatrixRegSetting; u8 Delta_IQK; u8 Delta_LCK; diff --git a/drivers/staging/r8188eu/include/odm_RTL8188E.h b/drivers/staging/r8188eu/include/odm_RTL8188E.h index 3c6471f1a893f65ba3d3105e1e1b4de861b48740..4f16af24859138a0b2c0892c4cc4b1ce87968910 100644 --- a/drivers/staging/r8188eu/include/odm_RTL8188E.h +++ b/drivers/staging/r8188eu/include/odm_RTL8188E.h @@ -11,6 +11,13 @@ #define MAIN_ANT_CGCS_RX 0 #define AUX_ANT_CGCS_RX 1 +#define SET_TX_DESC_ANTSEL_A_88E(__ptxdesc, __value) \ + le32p_replace_bits((__le32 *)(__ptxdesc + 8), __value, BIT(24)) +#define SET_TX_DESC_ANTSEL_B_88E(__ptxdesc, __value) \ + le32p_replace_bits((__le32 *)(__ptxdesc + 8), __value, BIT(25)) +#define SET_TX_DESC_ANTSEL_C_88E(__ptxdesc, __value) \ + le32p_replace_bits((__le32 *)(__ptxdesc + 28), __value, BIT(29)) + void ODM_AntennaDiversityInit_88E(struct odm_dm_struct *pDM_Odm); void ODM_AntennaDiversity_88E(struct odm_dm_struct *pDM_Odm); diff --git a/drivers/staging/r8188eu/include/odm_types.h b/drivers/staging/r8188eu/include/odm_types.h deleted file mode 100644 index 76302df4b330b014508c658cbe90b329bf5579ae..0000000000000000000000000000000000000000 --- a/drivers/staging/r8188eu/include/odm_types.h +++ /dev/null @@ -1,16 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ -/* Copyright(c) 2007 - 2011 Realtek Corporation. */ - -#ifndef __ODM_TYPES_H__ -#define __ODM_TYPES_H__ - -#define ODM_CE 0x04 /* BIT(2) */ - -#define SET_TX_DESC_ANTSEL_A_88E(__ptxdesc, __value) \ - le32p_replace_bits((__le32 *)(__ptxdesc + 8), __value, BIT(24)) -#define SET_TX_DESC_ANTSEL_B_88E(__ptxdesc, __value) \ - le32p_replace_bits((__le32 *)(__ptxdesc + 8), __value, BIT(25)) -#define SET_TX_DESC_ANTSEL_C_88E(__ptxdesc, __value) \ - le32p_replace_bits((__le32 *)(__ptxdesc + 28), __value, BIT(29)) - -#endif /* __ODM_TYPES_H__ */ diff --git a/drivers/staging/r8188eu/include/osdep_intf.h b/drivers/staging/r8188eu/include/osdep_intf.h index 36511c469546147530c25baecb8647e3405f9a12..6d66cb57225e7a7984683491dacb120cb56988a5 100644 --- a/drivers/staging/r8188eu/include/osdep_intf.h +++ b/drivers/staging/r8188eu/include/osdep_intf.h @@ -43,10 +43,10 @@ int netdev_open(struct net_device *pnetdev); int netdev_close(struct net_device *pnetdev); u8 rtw_init_drv_sw(struct adapter *padapter); -u8 rtw_free_drv_sw(struct adapter *padapter); -u8 rtw_reset_drv_sw(struct adapter *padapter); +void rtw_free_drv_sw(struct adapter *padapter); +void rtw_reset_drv_sw(struct adapter *padapter); -u32 rtw_start_drv_threads(struct adapter *padapter); +int rtw_start_drv_threads(struct adapter *padapter); void rtw_stop_drv_threads (struct adapter *padapter); void rtw_cancel_all_timer(struct adapter *padapter); diff --git a/drivers/staging/r8188eu/include/osdep_service.h b/drivers/staging/r8188eu/include/osdep_service.h index 72990a1cdc6615cefbcd50867cd0493206fae752..f8ed04f32cae3e0216afc40d010e6ea127a6c5fb 100644 --- a/drivers/staging/r8188eu/include/osdep_service.h +++ b/drivers/staging/r8188eu/include/osdep_service.h @@ -53,7 +53,7 @@ static inline struct list_head *get_list_head(struct __queue *queue) return (&(queue->queue)); } -static inline void _set_timer(struct timer_list *ptimer,u32 delay_time) +static inline void _set_timer(struct timer_list *ptimer, u32 delay_time) { mod_timer(ptimer, jiffies + msecs_to_jiffies(delay_time)); } @@ -66,7 +66,7 @@ static inline int rtw_netif_queue_stopped(struct net_device *pnetdev) netif_tx_queue_stopped(netdev_get_tx_queue(pnetdev, 3)); } -extern int RTW_STATUS_CODE(int error_code); +int RTW_STATUS_CODE(int error_code); void *rtw_malloc2d(int h, int w, int size); @@ -108,7 +108,7 @@ void rtw_free_netdev(struct net_device *netdev); #define FUNC_ADPT_FMT "%s(%s)" #define FUNC_ADPT_ARG(adapter) __func__, adapter->pnetdev->name -#define rtw_signal_process(pid, sig) kill_pid(find_vpid((pid)),(sig), 1) +#define rtw_signal_process(pid, sig) kill_pid(find_vpid((pid)), (sig), 1) /* Macros for handling unaligned memory accesses */ diff --git a/drivers/staging/r8188eu/include/rtl8188e_hal.h b/drivers/staging/r8188eu/include/rtl8188e_hal.h index ed4091e7cc7e6c0a21f0ebbe49ef6f8e03397a7a..feeb37c228977c5e2ecd504e7ec433ab7377693f 100644 --- a/drivers/staging/r8188eu/include/rtl8188e_hal.h +++ b/drivers/staging/r8188eu/include/rtl8188e_hal.h @@ -14,7 +14,6 @@ #include "rtl8188e_xmit.h" #include "rtl8188e_cmd.h" #include "rtw_efuse.h" -#include "odm_types.h" #include "odm.h" #include "odm_HWConfig.h" #include "odm_RegDefine11N.h" @@ -88,7 +87,7 @@ struct txpowerinfo24g { /* 9bytes + 1byt + 5bytes and pre 1byte. */ /* For worst case: */ /* | 2byte|----8bytes----|1byte|--7bytes--| 92D */ -/* PG data exclude header, dummy 7 bytes frome CP test and reserved 1byte. */ +/* PG data exclude header, dummy 7 bytes from CP test and reserved 1byte. */ #define EFUSE_OOB_PROTECT_BYTES_88E 18 #define EFUSE_PROTECT_BYTES_BANK 16 @@ -165,9 +164,9 @@ void Hal_ReadTxPowerInfo88E(struct adapter *padapter, u8 *hwinfo, void rtl8188e_EfuseParseChnlPlan(struct adapter *padapter, u8 *hwinfo, bool AutoLoadFail); -void Hal_ReadAntennaDiversity88E(struct adapter *pAdapter,u8 *PROMContent, +void Hal_ReadAntennaDiversity88E(struct adapter *pAdapter, u8 *PROMContent, bool AutoLoadFail); -void Hal_ReadThermalMeter_88E(struct adapter * dapter, u8 *PROMContent, +void Hal_ReadThermalMeter_88E(struct adapter *padapter, u8 *PROMContent, bool AutoloadFail); void Hal_EfuseParseXtal_8188E(struct adapter *pAdapter, u8 *hwinfo, bool AutoLoadFail); diff --git a/drivers/staging/r8188eu/include/rtl8188e_spec.h b/drivers/staging/r8188eu/include/rtl8188e_spec.h index e34619140e33cb759bd7ab8d3b3ce33cc9867ae6..3fa3b3e5dd6469ef1fb5b2ef95f8a6b39e9d1fe1 100644 --- a/drivers/staging/r8188eu/include/rtl8188e_spec.h +++ b/drivers/staging/r8188eu/include/rtl8188e_spec.h @@ -4,7 +4,7 @@ #ifndef __RTL8188E_SPEC_H__ #define __RTL8188E_SPEC_H__ -/* 8192C Regsiter offset definition */ +/* 8192C Register offset definition */ #define HAL_PS_TIMER_INT_DELAY 50 /* 50 microseconds */ #define HAL_92C_NAV_UPPER_UNIT 128 /* micro-second */ @@ -674,7 +674,7 @@ Current IOREG MAP #define REG_USB_HRPWM 0xFE58 #define REG_USB_HCPWM 0xFE57 -/* 8192C Regsiter Bit and Content definition */ +/* 8192C Register Bit and Content definition */ /* 0x0000h ~ 0x00FFh System Configuration */ /* 2 SYS_ISO_CTRL */ @@ -900,12 +900,12 @@ Current IOREG MAP #define HQSEL_HIQ BIT(5) /* For normal driver, 0x10C */ -#define _TXDMA_HIQ_MAP(x) (((x)&0x3) << 14) -#define _TXDMA_MGQ_MAP(x) (((x)&0x3) << 12) -#define _TXDMA_BKQ_MAP(x) (((x)&0x3) << 10) -#define _TXDMA_BEQ_MAP(x) (((x)&0x3) << 8 ) -#define _TXDMA_VIQ_MAP(x) (((x)&0x3) << 6 ) -#define _TXDMA_VOQ_MAP(x) (((x)&0x3) << 4 ) +#define _TXDMA_HIQ_MAP(x) (((x) & 0x3) << 14) +#define _TXDMA_MGQ_MAP(x) (((x) & 0x3) << 12) +#define _TXDMA_BKQ_MAP(x) (((x) & 0x3) << 10) +#define _TXDMA_BEQ_MAP(x) (((x) & 0x3) << 8) +#define _TXDMA_VIQ_MAP(x) (((x) & 0x3) << 6) +#define _TXDMA_VOQ_MAP(x) (((x) & 0x3) << 4) #define QUEUE_LOW 1 #define QUEUE_NORMAL 2 @@ -1135,7 +1135,7 @@ Current IOREG MAP #define EEPROM_Default_CrystalCap_88E 0x20 #define EEPROM_Default_ThermalMeter_88E 0x18 -/* New EFUSE deafult value */ +/* New EFUSE default value */ #define EEPROM_DEFAULT_24G_INDEX 0x2D #define EEPROM_DEFAULT_24G_HT20_DIFF 0X02 #define EEPROM_DEFAULT_24G_OFDM_DIFF 0X04 diff --git a/drivers/staging/r8188eu/include/rtw_ap.h b/drivers/staging/r8188eu/include/rtw_ap.h index 8b4134eb3095629e934ec2a3f1fdad4ae6e98989..89b02c97e041d373f5fe34e5b50acf469e12add1 100644 --- a/drivers/staging/r8188eu/include/rtw_ap.h +++ b/drivers/staging/r8188eu/include/rtw_ap.h @@ -26,7 +26,7 @@ u8 bss_cap_update_on_sta_leave(struct adapter *padapter, struct sta_info *psta); void sta_info_update(struct adapter *padapter, struct sta_info *psta); u8 ap_free_sta(struct adapter *padapter, struct sta_info *psta, bool active, u16 reason); -int rtw_sta_flush(struct adapter *padapter); +void rtw_sta_flush(struct adapter *padapter); void start_ap_mode(struct adapter *padapter); void stop_ap_mode(struct adapter *padapter); void update_bmc_sta(struct adapter *padapter); diff --git a/drivers/staging/r8188eu/include/rtw_cmd.h b/drivers/staging/r8188eu/include/rtw_cmd.h index 9a76aa85de940b3815c2948281960a5fcdb87a3f..c330a4435b31a411887c5f0b7c2e9784dfb5590a 100644 --- a/drivers/staging/r8188eu/include/rtw_cmd.h +++ b/drivers/staging/r8188eu/include/rtw_cmd.h @@ -6,15 +6,10 @@ #include "wlan_bssdef.h" #include "rtw_rf.h" -#include "rtw_led.h" - -#define C2H_MEM_SZ (16*1024) #include "osdep_service.h" #include "ieee80211.h" /* */ -#define FREE_CMDOBJ_SZ 128 - #define MAX_CMDSZ 1024 #define MAX_RSPSZ 512 #define MAX_EVTSZ 1024 @@ -82,10 +77,10 @@ void rtw_free_cmd_obj(struct cmd_obj *pcmd); int rtw_cmd_thread(void *context); -u32 rtw_init_cmd_priv(struct cmd_priv *pcmdpriv); +int rtw_init_cmd_priv(struct cmd_priv *pcmdpriv); void rtw_free_cmd_priv(struct cmd_priv *pcmdpriv); -u32 rtw_init_evt_priv(struct evt_priv *pevtpriv); +int rtw_init_evt_priv(struct evt_priv *pevtpriv); void rtw_free_evt_priv(struct evt_priv *pevtpriv); void rtw_evt_notify_isr(struct evt_priv *pevtpriv); u8 p2p_protocol_wk_cmd(struct adapter *padapter, int intCmdType); @@ -445,8 +440,7 @@ struct getrfintfs_parm { u8 rfintfs; }; -struct Tx_Beacon_param -{ +struct Tx_Beacon_param { struct wlan_bssid_ex network; }; @@ -455,7 +449,7 @@ struct Tx_Beacon_param mac[0] == 0 ==> CMD mode, return H2C_SUCCESS. - The following condition must be ture under CMD mode + The following condition must be true under CMD mode mac[1] == mac[4], mac[2] == mac[3], mac[0]=mac[5]= 0; s0 == 0x1234, s1 == 0xabcd, w0 == 0x78563412, w1 == 0x5aa5def7; s2 == (b1 << 8 | b0); @@ -508,7 +502,7 @@ struct drvextra_cmd_parm { unsigned char *pbuf; }; -/*------------------- Below are used for RF/BB tunning ---------------------*/ +/*------------------- Below are used for RF/BB tuning ---------------------*/ struct setantenna_parm { u8 tx_antset; @@ -592,14 +586,14 @@ struct setratable_parm { }; struct getratable_parm { - uint rsvd; + uint rsvd; }; struct getratable_rsp { - u8 ss_ForceUp[NumRates]; - u8 ss_ULevel[NumRates]; - u8 ss_DLevel[NumRates]; - u8 count_judge[NumRates]; + u8 ss_ForceUp[NumRates]; + u8 ss_ULevel[NumRates]; + u8 ss_DLevel[NumRates]; + u8 count_judge[NumRates]; }; /* to get TX,RX retry count */ @@ -682,26 +676,22 @@ struct set_ch_parm { }; /*H2C Handler index: 59 */ -struct SetChannelPlan_param -{ +struct SetChannelPlan_param { u8 channel_plan; }; /*H2C Handler index: 60 */ -struct LedBlink_param -{ +struct LedBlink_param { struct LED_871x *pLed; }; /*H2C Handler index: 61 */ -struct SetChannelSwitch_param -{ +struct SetChannelSwitch_param { u8 new_ch_no; }; /*H2C Handler index: 62 */ -struct TDLSoption_param -{ +struct TDLSoption_param { u8 addr[ETH_ALEN]; u8 option; }; @@ -730,31 +720,31 @@ Result: #define H2C_CMD_OVERFLOW 0x06 #define H2C_RESERVED 0x07 -u8 rtw_sitesurvey_cmd(struct adapter *padapter, struct ndis_802_11_ssid *ssid, int ssid_num); -u8 rtw_createbss_cmd(struct adapter *padapter); +u8 rtw_sitesurvey_cmd(struct adapter *padapter, struct ndis_802_11_ssid *ssid, int ssid_num); +u8 rtw_createbss_cmd(struct adapter *padapter); u8 rtw_setstakey_cmd(struct adapter *padapter, u8 *psta, u8 unicast_key); u8 rtw_clearstakey_cmd(struct adapter *padapter, u8 *psta, u8 entry, u8 enqueue); -u8 rtw_joinbss_cmd(struct adapter *padapter, struct wlan_network* pnetwork); +u8 rtw_joinbss_cmd(struct adapter *padapter, struct wlan_network *pnetwork); u8 rtw_disassoc_cmd(struct adapter *padapter, u32 deauth_timeout_ms, bool enqueue); -u8 rtw_setopmode_cmd(struct adapter *padapter, enum ndis_802_11_network_infra networktype); -u8 rtw_setdatarate_cmd(struct adapter *padapter, u8 *rateset); -u8 rtw_setrfintfs_cmd(struct adapter *padapter, u8 mode); +u8 rtw_setopmode_cmd(struct adapter *padapter, enum ndis_802_11_network_infra networktype); +int rtw_setdatarate_cmd(struct adapter *padapter, u8 *rateset); +u8 rtw_setrfintfs_cmd(struct adapter *padapter, u8 mode); -u8 rtw_gettssi_cmd(struct adapter *padapter, u8 offset,u8 *pval); -u8 rtw_setfwdig_cmd(struct adapter*padapter, u8 type); -u8 rtw_setfwra_cmd(struct adapter*padapter, u8 type); +u8 rtw_gettssi_cmd(struct adapter *padapter, u8 offset, u8 *pval); +u8 rtw_setfwdig_cmd(struct adapter *padapter, u8 type); +u8 rtw_setfwra_cmd(struct adapter *padapter, u8 type); -u8 rtw_addbareq_cmd(struct adapter*padapter, u8 tid, u8 *addr); +u8 rtw_addbareq_cmd(struct adapter *padapter, u8 tid, u8 *addr); u8 rtw_dynamic_chk_wk_cmd(struct adapter *adapter); -u8 rtw_lps_ctrl_wk_cmd(struct adapter*padapter, u8 lps_ctrl_type, u8 enqueue); -u8 rtw_rpt_timer_cfg_cmd(struct adapter*padapter, u16 minRptTime); +u8 rtw_lps_ctrl_wk_cmd(struct adapter *padapter, u8 lps_ctrl_type, u8 enqueue); +u8 rtw_rpt_timer_cfg_cmd(struct adapter *padapter, u16 minRptTime); - u8 rtw_antenna_select_cmd(struct adapter*padapter, u8 antenna,u8 enqueue); -u8 rtw_ps_cmd(struct adapter*padapter); +u8 rtw_antenna_select_cmd(struct adapter *padapter, u8 antenna, u8 enqueue); +u8 rtw_ps_cmd(struct adapter *padapter); -u8 rtw_chk_hi_queue_cmd(struct adapter*padapter); +u8 rtw_chk_hi_queue_cmd(struct adapter *padapter); u8 rtw_set_chplan_cmd(struct adapter *padapter, u8 chplan); @@ -859,8 +849,7 @@ enum rtw_h2c_cmd { #define _SetRFReg_CMD_ _Write_RFREG_CMD_ #ifdef _RTW_CMD_C_ -static struct _cmd_callback rtw_cmd_callback[] = -{ +static struct _cmd_callback rtw_cmd_callback[] = { {GEN_CMD_CODE(_Read_MACREG), NULL}, /*0*/ {GEN_CMD_CODE(_Write_MACREG), NULL}, {GEN_CMD_CODE(_Read_BBREG), &rtw_getbbrfreg_cmdrsp_callback}, diff --git a/drivers/staging/r8188eu/include/rtw_io.h b/drivers/staging/r8188eu/include/rtw_io.h index 925c7967ac04ea8b1e11ea9991f69b53480e6e41..e9744694204bc7086eef9d546d601acd86415a3b 100644 --- a/drivers/staging/r8188eu/include/rtw_io.h +++ b/drivers/staging/r8188eu/include/rtw_io.h @@ -209,7 +209,7 @@ struct io_priv { }; uint ioreq_flush(struct adapter *adapter, struct io_queue *ioqueue); -void sync_ioreq_enqueue(struct io_req *preq,struct io_queue *ioqueue); +void sync_ioreq_enqueue(struct io_req *preq, struct io_queue *ioqueue); uint sync_ioreq_flush(struct adapter *adapter, struct io_queue *ioqueue); uint free_ioreq(struct io_req *preq, struct io_queue *pio_queue); struct io_req *alloc_ioreq(struct io_queue *pio_q); @@ -285,18 +285,4 @@ void bus_sync_io(struct io_queue *pio_q); u32 _ioreq2rwmem(struct io_queue *pio_q); void dev_power_down(struct adapter *Adapter, u8 bpwrup); -#define PlatformEFIOWrite1Byte(_a,_b,_c) \ - rtw_write8(_a,_b,_c) -#define PlatformEFIOWrite2Byte(_a,_b,_c) \ - rtw_write16(_a,_b,_c) -#define PlatformEFIOWrite4Byte(_a,_b,_c) \ - rtw_write32(_a,_b,_c) - -#define PlatformEFIORead1Byte(_a,_b) \ - rtw_read8(_a,_b) -#define PlatformEFIORead2Byte(_a,_b) \ - rtw_read16(_a,_b) -#define PlatformEFIORead4Byte(_a,_b) \ - rtw_read32(_a,_b) - #endif /* _RTL8711_IO_H_ */ diff --git a/drivers/staging/r8188eu/include/rtw_ioctl_set.h b/drivers/staging/r8188eu/include/rtw_ioctl_set.h index 7365079c704f2ff2dbd000bd49af7ec488d8ad14..c3eb2479f27b49613a53782df2c3aed3e7813c96 100644 --- a/drivers/staging/r8188eu/include/rtw_ioctl_set.h +++ b/drivers/staging/r8188eu/include/rtw_ioctl_set.h @@ -10,10 +10,10 @@ typedef u8 NDIS_802_11_PMKID_VALUE[16]; u8 rtw_set_802_11_authentication_mode(struct adapter *adapt, enum ndis_802_11_auth_mode authmode); -u8 rtw_set_802_11_bssid(struct adapter*adapter, u8 *bssid); +u8 rtw_set_802_11_bssid(struct adapter *adapter, u8 *bssid); u8 rtw_set_802_11_add_wep(struct adapter *adapter, struct ndis_802_11_wep *wep); -u8 rtw_set_802_11_disassociate(struct adapter *adapter); -u8 rtw_set_802_11_bssid_list_scan(struct adapter*adapter, +void rtw_set_802_11_disassociate(struct adapter *adapter); +u8 rtw_set_802_11_bssid_list_scan(struct adapter *adapter, struct ndis_802_11_ssid *pssid, int ssid_max_num); u8 rtw_set_802_11_infrastructure_mode(struct adapter *adapter, diff --git a/drivers/staging/r8188eu/include/rtw_led.h b/drivers/staging/r8188eu/include/rtw_led.h index 8520f022a67f25d2d3c02cd6ca16f6ae3b335078..ea5f5edd90131066f1a687924deb381052f17116 100644 --- a/drivers/staging/r8188eu/include/rtw_led.h +++ b/drivers/staging/r8188eu/include/rtw_led.h @@ -33,8 +33,6 @@ enum LED_STATE_871x { }; struct led_priv { - struct adapter *padapter; - bool bRegUseLed; enum LED_STATE_871x CurrLedState; /* Current LED state. */ @@ -47,7 +45,6 @@ struct led_priv { u32 BlinkTimes; /* Number of times to toggle led state for blinking. */ - bool bLedLinkBlinkInProgress; bool bLedScanBlinkInProgress; struct delayed_work blink_work; }; diff --git a/drivers/staging/r8188eu/include/rtw_mlme.h b/drivers/staging/r8188eu/include/rtw_mlme.h index b69989cbab21d6da14b00bc65db399022b0ba09e..3ff653ff1d816d7d541e706dce09d8fb8e1780cd 100644 --- a/drivers/staging/r8188eu/include/rtw_mlme.h +++ b/drivers/staging/r8188eu/include/rtw_mlme.h @@ -101,17 +101,17 @@ struct rt_link_detect { struct profile_info { u8 ssidlen; - u8 ssid[ WLAN_SSID_MAXLEN ]; - u8 peermac[ ETH_ALEN ]; + u8 ssid[WLAN_SSID_MAXLEN]; + u8 peermac[ETH_ALEN]; }; struct tx_invite_req_info { u8 token; u8 benable; - u8 go_ssid[ WLAN_SSID_MAXLEN ]; + u8 go_ssid[WLAN_SSID_MAXLEN]; u8 ssidlen; - u8 go_bssid[ ETH_ALEN ]; - u8 peer_macaddr[ ETH_ALEN ]; + u8 go_bssid[ETH_ALEN]; + u8 peer_macaddr[ETH_ALEN]; u8 operating_ch; /* This information will be set by using the * p2p_set op_ch=x */ u8 peer_ch; /* The listen channel for peer P2P device */ @@ -154,9 +154,9 @@ struct tx_nego_req_info { }; struct group_id_info { - u8 go_device_addr[ ETH_ALEN ]; /* The GO's device address of + u8 go_device_addr[ETH_ALEN]; /* The GO's device address of * this P2P group */ - u8 ssid[ WLAN_SSID_MAXLEN ]; /* The SSID of this P2P group */ + u8 ssid[WLAN_SSID_MAXLEN]; /* The SSID of this P2P group */ }; struct scan_limit_info { @@ -443,11 +443,6 @@ static inline bool check_fwstate(struct mlme_priv *pmlmepriv, int state) return false; } -static inline int get_fwstate(struct mlme_priv *pmlmepriv) -{ - return pmlmepriv->fw_state; -} - /* * No Limit on the calling context, * therefore set it to be the critical section... @@ -459,7 +454,7 @@ static inline void set_fwstate(struct mlme_priv *pmlmepriv, int state) { pmlmepriv->fw_state |= state; /* FOR HW integration */ - if (_FW_UNDER_SURVEY==state) + if (_FW_UNDER_SURVEY == state) pmlmepriv->bScanInProcess = true; } @@ -467,7 +462,7 @@ static inline void _clr_fwstate_(struct mlme_priv *pmlmepriv, int state) { pmlmepriv->fw_state &= ~state; /* FOR HW integration */ - if (_FW_UNDER_SURVEY==state) + if (_FW_UNDER_SURVEY == state) pmlmepriv->bScanInProcess = false; } @@ -528,7 +523,7 @@ void rtw_indicate_scan_done(struct adapter *padapter); int rtw_restruct_sec_ie(struct adapter *adapter, u8 *in_ie, u8 *out_ie, uint in_len); int rtw_restruct_wmm_ie(struct adapter *adapter, u8 *in_ie, u8 *out_ie, - uint in_len, uint initial_out_len); + uint in_len, uint initial_out_len); void rtw_init_registrypriv_dev_network(struct adapter *adapter); void rtw_update_registrypriv_dev_network(struct adapter *adapter); @@ -544,10 +539,8 @@ struct wlan_network *rtw_alloc_network(struct mlme_priv *pmlmepriv); void _rtw_free_network(struct mlme_priv *pmlmepriv, struct wlan_network *pnetwork, u8 isfreeall); -void _rtw_free_network_nolock(struct mlme_priv *pmlmepriv, - struct wlan_network *pnetwork); -struct wlan_network* _rtw_find_network(struct __queue *scanned_queue, u8 *addr); +struct wlan_network *_rtw_find_network(struct __queue *scanned_queue, u8 *addr); void _rtw_free_network_queue(struct adapter *padapter, u8 isfreeall); diff --git a/drivers/staging/r8188eu/include/rtw_mlme_ext.h b/drivers/staging/r8188eu/include/rtw_mlme_ext.h index b322d0848db9b3ce4880541256e768ab56e525dc..589de7c54d9300d6df64d7c8aadedbe08196fad6 100644 --- a/drivers/staging/r8188eu/include/rtw_mlme_ext.h +++ b/drivers/staging/r8188eu/include/rtw_mlme_ext.h @@ -184,7 +184,7 @@ enum SCAN_STATE { SCAN_STATE_MAX, }; -typedef unsigned int (*mlme_handler)(struct adapter *adapt, struct recv_frame *frame); +typedef void (*mlme_handler)(struct adapter *adapt, struct recv_frame *frame); struct ss_res { int state; @@ -285,7 +285,6 @@ struct mlme_ext_info { u8 bwmode_updated; u8 hidden_ssid_mode; - struct ADDBA_request ADDBA_req; struct WMM_para_element WMM_param; struct HT_caps_element HT_caps; struct HT_info_element HT_info; @@ -388,7 +387,7 @@ struct mlme_ext_priv { void init_mlme_ext_priv(struct adapter *adapter); int init_hw_mlme_ext(struct adapter *padapter); void free_mlme_ext_priv (struct mlme_ext_priv *pmlmeext); -extern struct xmit_frame *alloc_mgtxmitframe(struct xmit_priv *pxmitpriv); +struct xmit_frame *alloc_mgtxmitframe(struct xmit_priv *pxmitpriv); unsigned char networktype_to_raid(unsigned char network_type); u8 judge_network_type(struct adapter *padapter, unsigned char *rate, int len); @@ -432,9 +431,9 @@ void update_network(struct wlan_bssid_ex *dst, struct wlan_bssid_ex *src, u8 *get_my_bssid(struct wlan_bssid_ex *pnetwork); u16 get_beacon_interval(struct wlan_bssid_ex *bss); -int is_client_associated_to_ap(struct adapter *padapter); -int is_client_associated_to_ibss(struct adapter *padapter); -int is_IBSS_empty(struct adapter *padapter); +bool r8188eu_is_client_associated_to_ap(struct adapter *padapter); +bool r8188eu_is_client_associated_to_ibss(struct adapter *padapter); +bool r8188eu_is_ibss_empty(struct adapter *padapter); unsigned char check_assoc_AP(u8 *pframe, uint len); @@ -448,8 +447,7 @@ void HTOnAssocRsp(struct adapter *padapter); void ERP_IE_handler(struct adapter *padapter, struct ndis_802_11_var_ie *pIE); void VCS_update(struct adapter *padapter, struct sta_info *psta); -void update_beacon_info(struct adapter *padapter, u8 *pframe, uint len, - struct sta_info *psta); +void update_beacon_info(struct adapter *padapter, u8 *ie_ptr, uint ie_len, struct sta_info *psta); int rtw_check_bcn_info(struct adapter *Adapter, u8 *pframe, u32 packet_len); void update_IOT_info(struct adapter *padapter); void update_capinfo(struct adapter *adapter, u16 updatecap); @@ -479,11 +477,11 @@ void report_survey_event(struct adapter *padapter, struct recv_frame *precv_fram void report_surveydone_event(struct adapter *padapter); void report_del_sta_event(struct adapter *padapter, unsigned char *addr, unsigned short reason); -void report_add_sta_event(struct adapter *padapter, unsigned char* addr, +void report_add_sta_event(struct adapter *padapter, unsigned char *addr, int cam_idx); void beacon_timing_control(struct adapter *padapter); -extern u8 set_tx_beacon_cmd(struct adapter*padapter); +u8 set_tx_beacon_cmd(struct adapter *padapter); unsigned int setup_beacon_frame(struct adapter *padapter, unsigned char *beacon_frame); void update_mgnt_tx_rate(struct adapter *padapter, u8 rate); @@ -499,10 +497,10 @@ void issue_probersp_p2p(struct adapter *padapter, unsigned char *da); void issue_p2p_provision_request(struct adapter *padapter, u8 *pssid, u8 ussidlen, u8 *pdev_raddr); void issue_p2p_GO_request(struct adapter *padapter, u8 *raddr); -void issue_probereq_p2p(struct adapter *padapter, u8 *da); +void issue_probereq_p2p(struct adapter *padapter); void issue_p2p_invitation_response(struct adapter *padapter, u8 *raddr, u8 dialogToken, u8 success); -void issue_p2p_invitation_request(struct adapter *padapter, u8* raddr); +void issue_p2p_invitation_request(struct adapter *padapter, u8 *raddr); void issue_beacon(struct adapter *padapter, int timeout_ms); void issue_probersp(struct adapter *padapter, unsigned char *da, u8 is_valid_p2p_probereq); @@ -513,8 +511,7 @@ void issue_auth(struct adapter *padapter, struct sta_info *psta, unsigned short status); void issue_probereq(struct adapter *padapter, struct ndis_802_11_ssid *pssid, u8 *da); -s32 issue_probereq_ex(struct adapter *adapter, struct ndis_802_11_ssid *pssid, - u8* da, int try_cnt, int wait_ms); +void issue_probereq_ex(struct adapter *padapter, struct ndis_802_11_ssid *pssid, u8 *da); int issue_nulldata(struct adapter *padapter, unsigned char *da, unsigned int power_mode, int try_cnt, int wait_ms); int issue_qos_nulldata(struct adapter *padapter, unsigned char *da, @@ -523,7 +520,8 @@ int issue_deauth(struct adapter *padapter, unsigned char *da, unsigned short reason); int issue_deauth_ex(struct adapter *padapter, u8 *da, unsigned short reason, int try_cnt, int wait_ms); -void issue_action_BA(struct adapter *padapter, unsigned char *raddr, u8 action, u16 status); +void issue_action_BA(struct adapter *padapter, unsigned char *raddr, u8 action, + u16 status, struct ieee80211_mgmt *mgmt_req); unsigned int send_delba(struct adapter *padapter, u8 initiator, u8 *addr); unsigned int send_beacon(struct adapter *padapter); bool get_beacon_valid_bit(struct adapter *adapter); @@ -536,34 +534,6 @@ void start_clnt_auth(struct adapter *padapter); void start_clnt_join(struct adapter *padapter); void start_create_ibss(struct adapter *padapter); -unsigned int OnAssocReq(struct adapter *padapter, - struct recv_frame *precv_frame); -unsigned int OnAssocRsp(struct adapter *padapter, - struct recv_frame *precv_frame); -unsigned int OnProbeReq(struct adapter *padapter, - struct recv_frame *precv_frame); -unsigned int OnProbeRsp(struct adapter *padapter, - struct recv_frame *precv_frame); -unsigned int OnBeacon(struct adapter *padapter, - struct recv_frame *precv_frame); -unsigned int OnDisassoc(struct adapter *padapter, - struct recv_frame *precv_frame); -unsigned int OnAuth(struct adapter *padapter, - struct recv_frame *precv_frame); -unsigned int OnAuthClient(struct adapter *padapter, - struct recv_frame *precv_frame); -unsigned int OnDeAuth(struct adapter *padapter, - struct recv_frame *precv_frame); -unsigned int OnAction(struct adapter *padapter, - struct recv_frame *precv_frame); - -unsigned int OnAction_back(struct adapter *padapter, - struct recv_frame *precv_frame); -unsigned int on_action_public(struct adapter *padapter, - struct recv_frame *precv_frame); -unsigned int OnAction_p2p(struct adapter *padapter, - struct recv_frame *precv_frame); - void mlmeext_joinbss_event_callback(struct adapter *padapter, int join_res); void mlmeext_sta_del_event_callback(struct adapter *padapter); void mlmeext_sta_add_event_callback(struct adapter *padapter, @@ -729,7 +699,7 @@ enum rtw_c2h_event { GEN_EVT_CODE(_Survey), /*8*/ GEN_EVT_CODE(_SurveyDone), /*9*/ - GEN_EVT_CODE(_JoinBss) , /*10*/ + GEN_EVT_CODE(_JoinBss), /*10*/ GEN_EVT_CODE(_AddSTA), GEN_EVT_CODE(_DelSTA), GEN_EVT_CODE(_AtimDone), diff --git a/drivers/staging/r8188eu/include/rtw_recv.h b/drivers/staging/r8188eu/include/rtw_recv.h index 7768b0c5988c9f43dd2c7201a62771c7b1451caa..12026431a3d29b0f0857ab659f0a492164886ab5 100644 --- a/drivers/staging/r8188eu/include/rtw_recv.h +++ b/drivers/staging/r8188eu/include/rtw_recv.h @@ -92,7 +92,7 @@ struct rx_pkt_attrib { u8 privacy; /* in frame_ctrl field */ u8 bdecrypted; u8 encrypt; /* when 0 indicate no encrypt. when non-zero, - * indicate the encrypt algorith */ + * indicate the encrypt algorithm */ u8 iv_len; u8 icv_len; u8 crc_err; @@ -175,7 +175,7 @@ struct recv_priv { u8 *precv_buf; /* 4 alignment */ struct __queue free_recv_buf_queue; u32 free_recv_buf_queue_cnt; - /* For display the phy informatiom */ + /* For display the phy information */ u8 is_signal_dbg; /* for debug */ u8 signal_strength_dbg; /* for debug */ s8 rssi; diff --git a/drivers/staging/r8188eu/include/rtw_xmit.h b/drivers/staging/r8188eu/include/rtw_xmit.h index 82efcd54af3f910007ab4b1493d522cf09d94a16..6e7ebea5362d88488f7760f3ae0c00c7f843522e 100644 --- a/drivers/staging/r8188eu/include/rtw_xmit.h +++ b/drivers/staging/r8188eu/include/rtw_xmit.h @@ -116,7 +116,7 @@ struct pkt_attrib { u32 last_txcmdsz; u8 nr_frags; u8 encrypt; /* when 0 indicate no encrypt. when non-zero, - * indicate the encrypt algorith */ + * indicate the encrypt algorithm */ u8 iv_len; u8 icv_len; u8 iv[18]; @@ -351,7 +351,7 @@ s32 rtw_txframes_pending(struct adapter *padapter); s32 rtw_txframes_sta_ac_pending(struct adapter *padapter, struct pkt_attrib *pattrib); void rtw_init_hwxmits(struct hw_xmit *phwxmit, int entry); -s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter); +int _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter); void _rtw_free_xmit_priv(struct xmit_priv *pxmitpriv); int rtw_alloc_hwxmits(struct adapter *padapter); void rtw_free_hwxmits(struct adapter *padapter); diff --git a/drivers/staging/r8188eu/include/sta_info.h b/drivers/staging/r8188eu/include/sta_info.h index 4112c837bcefbd30098e5d22a7006d908c908f69..e42f4b4c6e247f56ce88c3f7a814ba0c1a9aa42e 100644 --- a/drivers/staging/r8188eu/include/sta_info.h +++ b/drivers/staging/r8188eu/include/sta_info.h @@ -295,19 +295,19 @@ static inline u32 wifi_mac_hash(u8 *mac) return x; } -extern u32 _rtw_init_sta_priv(struct sta_priv *pstapriv); -extern void _rtw_free_sta_priv(struct sta_priv *pstapriv); +int _rtw_init_sta_priv(struct sta_priv *pstapriv); +void _rtw_free_sta_priv(struct sta_priv *pstapriv); #define stainfo_offset_valid(offset) (offset < NUM_STA && offset >= 0) int rtw_stainfo_offset(struct sta_priv *stapriv, struct sta_info *sta); struct sta_info *rtw_get_stainfo_by_offset(struct sta_priv *stapriv, int off); -extern struct sta_info *rtw_alloc_stainfo(struct sta_priv *stapriv, u8 *hwaddr); -extern u32 rtw_free_stainfo(struct adapter *adapt, struct sta_info *psta); -extern void rtw_free_all_stainfo(struct adapter *adapt); -extern struct sta_info *rtw_get_stainfo(struct sta_priv *stapriv, u8 *hwaddr); -extern u32 rtw_init_bcmc_stainfo(struct adapter *adapt); -extern struct sta_info *rtw_get_bcmc_stainfo(struct adapter *padapter); -extern u8 rtw_access_ctrl(struct adapter *padapter, u8 *mac_addr); +struct sta_info *rtw_alloc_stainfo(struct sta_priv *stapriv, u8 *hwaddr); +void rtw_free_stainfo(struct adapter *adapt, struct sta_info *psta); +void rtw_free_all_stainfo(struct adapter *adapt); +struct sta_info *rtw_get_stainfo(struct sta_priv *stapriv, u8 *hwaddr); +u32 rtw_init_bcmc_stainfo(struct adapter *adapt); +struct sta_info *rtw_get_bcmc_stainfo(struct adapter *padapter); +u8 rtw_access_ctrl(struct adapter *padapter, u8 *mac_addr); #endif /* _STA_INFO_H_ */ diff --git a/drivers/staging/r8188eu/include/wifi.h b/drivers/staging/r8188eu/include/wifi.h index 0254310bdf4401aabd9f9a63689b2a064f6480e1..254a4bc1a1419cbc9ed2f4839a1cbaed7969b4b9 100644 --- a/drivers/staging/r8188eu/include/wifi.h +++ b/drivers/staging/r8188eu/include/wifi.h @@ -140,7 +140,6 @@ enum WIFI_REG_DOMAIN { #define _PWRMGT_ BIT(12) #define _MORE_DATA_ BIT(13) #define _PRIVACY_ BIT(14) -#define _ORDER_ BIT(15) #define SetToDs(pbuf) \ *(__le16 *)(pbuf) |= cpu_to_le16(_TO_DS_) @@ -171,9 +170,6 @@ enum WIFI_REG_DOMAIN { #define SetPrivacy(pbuf) \ *(__le16 *)(pbuf) |= cpu_to_le16(_PRIVACY_) -#define GetPrivacy(pbuf) \ - (((*(__le16 *)(pbuf)) & cpu_to_le16(_PRIVACY_)) != 0) - #define GetFrameType(pbuf) \ (le16_to_cpu(*(__le16 *)(pbuf)) & (BIT(3) | BIT(2))) @@ -187,17 +183,6 @@ enum WIFI_REG_DOMAIN { *(__le16 *)(pbuf) |= cpu_to_le16(type); \ } while (0) -#define GetTupleCache(pbuf) \ - (cpu_to_le16(*(unsigned short *)((size_t)(pbuf) + 22))) - -#define SetFragNum(pbuf, num) \ - do { \ - *(unsigned short *)((size_t)(pbuf) + 22) = \ - ((*(unsigned short *)((size_t)(pbuf) + 22)) & \ - le16_to_cpu(~(0x000f))) | \ - cpu_to_le16(0x0f & (num)); \ - } while (0) - #define SetSeqNum(pbuf, num) \ do { \ *(__le16 *)((size_t)(pbuf) + 22) = \ @@ -221,13 +206,6 @@ enum WIFI_REG_DOMAIN { #define GetAMsdu(pbuf) (((le16_to_cpu(*(__le16 *)pbuf)) >> 7) & 0x1) -#define SetAMsdu(pbuf, amsdu) \ - *(__le16 *)(pbuf) |= cpu_to_le16((amsdu & 1) << 7) - -#define GetTid(pbuf) (le16_to_cpu(*(__le16 *)((size_t)(pbuf) + \ - (((GetToDs(pbuf)<<1) | GetFrDs(pbuf)) == 3 ? \ - 30 : 24))) & 0x000f) - #define GetAddr1Ptr(pbuf) ((unsigned char *)((size_t)(pbuf) + 4)) #define GetAddr2Ptr(pbuf) ((unsigned char *)((size_t)(pbuf) + 10)) @@ -236,33 +214,6 @@ enum WIFI_REG_DOMAIN { #define GetAddr4Ptr(pbuf) ((unsigned char *)((size_t)(pbuf) + 24)) -static inline bool IS_MCAST(unsigned char *da) -{ - return (*da) & 0x01; -} - -static inline unsigned char *get_da(unsigned char *pframe) -{ - unsigned char *da; - unsigned int to_fr_ds = (GetToDs(pframe) << 1) | GetFrDs(pframe); - - switch (to_fr_ds) { - case 0x00: /* ToDs=0, FromDs=0 */ - da = GetAddr1Ptr(pframe); - break; - case 0x01: /* ToDs=0, FromDs=1 */ - da = GetAddr1Ptr(pframe); - break; - case 0x02: /* ToDs=1, FromDs=0 */ - da = GetAddr3Ptr(pframe); - break; - default: /* ToDs=1, FromDs=1 */ - da = GetAddr3Ptr(pframe); - break; - } - return da; -} - static inline unsigned char *get_sa(unsigned char *pframe) { unsigned char *sa; @@ -415,14 +366,6 @@ static inline unsigned char *get_hdr_bssid(unsigned char *pframe) Below is the definition for 802.11n ------------------------------------------------------------------------------*/ -#define SetOrderBit(pbuf) \ - do { \ - *(unsigned short *)(pbuf) |= cpu_to_le16(_ORDER_); \ - } while (0) - -#define GetOrderBit(pbuf) \ - (((*(unsigned short *)(pbuf)) & le16_to_cpu(_ORDER_)) != 0) - /** * struct rtw_ieee80211_bar - HT Block Ack Request * @@ -484,14 +427,6 @@ struct WMM_para_element { struct AC_param ac_param[4]; } __packed; -struct ADDBA_request { - unsigned char action_code; - unsigned char dialog_token; - __le16 BA_para_set; - __le16 BA_timeout_value; - __le16 BA_starting_seqctrl; -} __packed; - #define MAX_AMPDU_FACTOR_64K 3 /* Spatial Multiplexing Power Save Modes */ @@ -701,7 +636,7 @@ struct ADDBA_request { #define P2P_WILDCARD_SSID_LEN 7 -/* default value, used when: (1)p2p disabed or (2)p2p enabled +/* default value, used when: (1)p2p disabled or (2)p2p enabled * but only do 1 scan phase */ #define P2P_FINDPHASE_EX_NONE 0 /* used when p2p enabled and want to do 1 scan phase and @@ -766,11 +701,11 @@ enum P2P_STATE { P2P_STATE_TX_PROVISION_DIS_REQ = 6, P2P_STATE_RX_PROVISION_DIS_RSP = 7, P2P_STATE_RX_PROVISION_DIS_REQ = 8, - /* Doing the group owner negoitation handshake */ + /* Doing the group owner negotiation handshake */ P2P_STATE_GONEGO_ING = 9, - /* finish the group negoitation handshake with success */ + /* finish the group negotiation handshake with success */ P2P_STATE_GONEGO_OK = 10, - /* finish the group negoitation handshake with failure */ + /* finish the group negotiation handshake with failure */ P2P_STATE_GONEGO_FAIL = 11, /* receiving the P2P Inviation request and match with the profile. */ P2P_STATE_RECV_INVITE_REQ_MATCH = 12, @@ -790,9 +725,9 @@ enum P2P_STATE { P2P_STATE_RECV_INVITE_REQ_JOIN = 19, /* recveing the P2P Inviation response with failure */ P2P_STATE_RX_INVITE_RESP_FAIL = 20, - /* receiving p2p negoitation response with information is not available */ + /* receiving p2p negotiation response with information is not available */ P2P_STATE_RX_INFOR_NOREADY = 21, - /* sending p2p negoitation response with information is not available */ + /* sending p2p negotiation response with information is not available */ P2P_STATE_TX_INFOR_NOREADY = 22, }; diff --git a/drivers/staging/r8188eu/include/wlan_bssdef.h b/drivers/staging/r8188eu/include/wlan_bssdef.h index 81bda91a4136bab4eb32aa32113044b5f0f3d98c..ffeafa19ef264f34b10f709bb5ecaabd84f6731b 100644 --- a/drivers/staging/r8188eu/include/wlan_bssdef.h +++ b/drivers/staging/r8188eu/include/wlan_bssdef.h @@ -17,14 +17,6 @@ struct ndis_802_11_ssid { u8 Ssid[32]; }; -enum NDIS_802_11_NETWORK_TYPE { - Ndis802_11FH, - Ndis802_11DS, - Ndis802_11OFDM5, - Ndis802_11OFDM24, - Ndis802_11NetworkTypeMax /* dummy upper bound */ -}; - struct ndis_802_11_config_fh { u32 Length; /* Length of structure */ u32 HopPattern; /* As defined by 802.11, MSB set */ @@ -185,20 +177,6 @@ struct ndis_802_11_status_ind { /* MIC check time, 60 seconds. */ #define MIC_CHECK_TIME 60000000 -struct ndis_802_11_auth_evt { - struct ndis_802_11_status_ind Status; - struct ndis_802_11_auth_req Request[1]; -}; - -struct ndis_802_11_test { - u32 Length; - u32 Type; - union { - struct ndis_802_11_auth_evt AuthenticationEvent; - NDIS_802_11_RSSI RssiTrigger; - } tt; -}; - #ifndef Ndis802_11APMode #define Ndis802_11APMode (Ndis802_11InfrastructureMax+1) #endif @@ -233,7 +211,6 @@ struct wlan_bssid_ex { struct ndis_802_11_ssid Ssid; u32 Privacy; NDIS_802_11_RSSI Rssi;/* in dBM,raw data ,get from PHY) */ - enum NDIS_802_11_NETWORK_TYPE NetworkTypeInUse; struct ndis_802_11_config Configuration; enum ndis_802_11_network_infra InfrastructureMode; unsigned char SupportedRates[NDIS_802_11_LENGTH_RATES_EX]; @@ -288,34 +265,6 @@ enum UAPSD_MAX_SP { #define NUM_PRE_AUTH_KEY 16 #define NUM_PMKID_CACHE NUM_PRE_AUTH_KEY -/* -* WPA2 -*/ - -struct pmkid_candidate { - unsigned char BSSID[ETH_ALEN]; - u32 Flags; -}; - -struct ndis_802_11_pmkid_list { - u32 Version; /* Version of the structure */ - u32 NumCandidates; /* No. of pmkid candidates */ - struct pmkid_candidate CandidateList[1]; -}; - -struct ndis_802_11_auth_encrypt { - enum ndis_802_11_auth_mode AuthModeSupported; - enum ndis_802_11_wep_status EncryptStatusSupported; -}; - -struct ndis_802_11_cap { - u32 Length; - u32 Version; - u32 NoOfPMKIDs; - u32 NoOfAuthEncryptPairsSupported; - struct ndis_802_11_auth_encrypt AuthenticationEncryptionSupported[1]; -}; - u8 key_2char2num(u8 hch, u8 lch); u8 key_char2num(u8 ch); u8 str_2char2num(u8 hch, u8 lch); diff --git a/drivers/staging/r8188eu/os_dep/ioctl_linux.c b/drivers/staging/r8188eu/os_dep/ioctl_linux.c index 2de2e1e32738a7569cba0fd82e71497a8d1e1c3c..8e9b7b0664bc31577e93e0812107c5b09b098888 100644 --- a/drivers/staging/r8188eu/os_dep/ioctl_linux.c +++ b/drivers/staging/r8188eu/os_dep/ioctl_linux.c @@ -1011,7 +1011,6 @@ static int rtw_wx_set_mlme(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { - int ret = 0; struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev); struct iw_mlme *mlme = (struct iw_mlme *)extra; @@ -1020,17 +1019,15 @@ static int rtw_wx_set_mlme(struct net_device *dev, switch (mlme->cmd) { case IW_MLME_DEAUTH: - if (!rtw_set_802_11_disassociate(padapter)) - ret = -1; + rtw_set_802_11_disassociate(padapter); break; case IW_MLME_DISASSOC: - if (!rtw_set_802_11_disassociate(padapter)) - ret = -1; + rtw_set_802_11_disassociate(padapter); break; default: return -EOPNOTSUPP; } - return ret; + return 0; } static int rtw_wx_set_scan(struct net_device *dev, struct iw_request_info *a, @@ -1340,7 +1337,7 @@ static int rtw_wx_set_rate(struct net_device *dev, struct iw_request_info *a, union iwreq_data *wrqu, char *extra) { - int i, ret = 0; + int i; struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev); u8 datarates[NumRates]; u32 target_rate = wrqu->bitrate.value; @@ -1408,10 +1405,7 @@ set_rate: } } - if (rtw_setdatarate_cmd(padapter, datarates) != _SUCCESS) - ret = -1; - - return ret; + return rtw_setdatarate_cmd(padapter, datarates); } static int rtw_wx_get_rate(struct net_device *dev, @@ -2647,7 +2641,7 @@ static int rtw_p2p_connect(struct net_device *dev, u32 peer_channel = 0; /* Commented by Albert 20110304 */ - /* The input data contains two informations. */ + /* The input data contains two information. */ /* 1. First information is the MAC address which wants to formate with */ /* 2. Second information is the WPS PINCode or "pbc" string for push button method */ /* Format: 00:E0:4C:00:00:05 */ @@ -2721,7 +2715,7 @@ static void rtw_p2p_invite_req(struct net_device *dev, uint p2pielen = 0, attr_contentlen = 0; struct tx_invite_req_info *pinvite_req_info = &pwdinfo->invitereq_info; - /* The input data contains two informations. */ + /* The input data contains two information items. */ /* 1. First information is the P2P device address which you want to send to. */ /* 2. Second information is the group id which combines with GO's mac address, space and GO's ssid. */ /* Command line sample: iwpriv wlan0 p2p_set invite ="00:11:22:33:44:55 00:E0:4C:00:00:05 DIRECT-xy" */ @@ -2845,7 +2839,7 @@ static void rtw_p2p_prov_disc(struct net_device *dev, u8 *p2pie; uint p2pielen = 0, attr_contentlen = 0; - /* The input data contains two informations. */ + /* The input data contains two information items. */ /* 1. First information is the MAC address which wants to issue the provisioning discovery request frame. */ /* 2. Second information is the WPS configuration method which wants to discovery */ /* Format: 00:E0:4C:00:00:05_display */ @@ -2979,8 +2973,6 @@ static int rtw_p2p_set(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { - int ret = 0; - if (!memcmp(extra, "enable =", 7)) { rtw_wext_p2p_enable(dev, info, wrqu, &extra[7]); } else if (!memcmp(extra, "setDN =", 6)) { @@ -3027,7 +3019,7 @@ static int rtw_p2p_set(struct net_device *dev, rtw_p2p_set_persistent(dev, info, wrqu, &extra[11]); } - return ret; + return 0; } static int rtw_p2p_get2(struct net_device *dev, @@ -3568,7 +3560,7 @@ static int rtw_wx_set_priv(struct net_device *dev, if ((_VENDOR_SPECIFIC_IE_ == probereq_wpsie[0]) && (!memcmp(&probereq_wpsie[2], wps_oui, 4))) { - cp_sz = probereq_wpsie_len > MAX_WPS_IE_LEN ? MAX_WPS_IE_LEN : probereq_wpsie_len; + cp_sz = min(probereq_wpsie_len, MAX_WPS_IE_LEN); pmlmepriv->wps_probe_req_ie_len = 0; kfree(pmlmepriv->wps_probe_req_ie); diff --git a/drivers/staging/r8188eu/os_dep/os_intfs.c b/drivers/staging/r8188eu/os_dep/os_intfs.c index 6a45315d01a29b9c607147298430830120190240..2f59bb99479607a9918c28b09fe9f9ef2beae018 100644 --- a/drivers/staging/r8188eu/os_dep/os_intfs.c +++ b/drivers/staging/r8188eu/os_dep/os_intfs.c @@ -363,18 +363,16 @@ struct net_device *rtw_init_netdev(struct adapter *old_padapter) return pnetdev; } -u32 rtw_start_drv_threads(struct adapter *padapter) +int rtw_start_drv_threads(struct adapter *padapter) { - u32 _status = _SUCCESS; - padapter->cmdThread = kthread_run(rtw_cmd_thread, padapter, "RTW_CMD_THREAD"); if (IS_ERR(padapter->cmdThread)) - _status = _FAIL; - else - /* wait for rtw_cmd_thread() to start running */ - wait_for_completion(&padapter->cmdpriv.start_cmd_thread); + return PTR_ERR(padapter->cmdThread); - return _status; + /* wait for rtw_cmd_thread() to start running */ + wait_for_completion(&padapter->cmdpriv.start_cmd_thread); + + return 0; } void rtw_stop_drv_threads(struct adapter *padapter) @@ -407,7 +405,7 @@ static void rtw_init_default_value(struct adapter *padapter) pmlmepriv->htpriv.ampdu_enable = false;/* set to disabled */ /* security_priv */ - psecuritypriv->binstallGrpkey = _FAIL; + psecuritypriv->binstallGrpkey = false; psecuritypriv->sw_encrypt = pregistrypriv->software_encrypt; psecuritypriv->sw_decrypt = pregistrypriv->software_decrypt; psecuritypriv->dot11AuthAlgrthm = dot11AuthAlgrthm_Open; /* open system */ @@ -433,7 +431,7 @@ static void rtw_init_default_value(struct adapter *padapter) padapter->bShowGetP2PState = 1; } -u8 rtw_reset_drv_sw(struct adapter *padapter) +void rtw_reset_drv_sw(struct adapter *padapter) { struct mlme_priv *pmlmepriv = &padapter->mlmepriv; @@ -455,25 +453,23 @@ u8 rtw_reset_drv_sw(struct adapter *padapter) padapter->mlmeextpriv.sitesurvey_res.state = SCAN_DISABLE; rtw_set_signal_stat_timer(&padapter->recvpriv); - - return _SUCCESS; } u8 rtw_init_drv_sw(struct adapter *padapter) { - if ((rtw_init_cmd_priv(&padapter->cmdpriv)) == _FAIL) { + if (rtw_init_cmd_priv(&padapter->cmdpriv)) { dev_err(dvobj_to_dev(padapter->dvobj), "rtw_init_cmd_priv failed\n"); return _FAIL; } padapter->cmdpriv.padapter = padapter; - if ((rtw_init_evt_priv(&padapter->evtpriv)) == _FAIL) { + if (rtw_init_evt_priv(&padapter->evtpriv)) { dev_err(dvobj_to_dev(padapter->dvobj), "rtw_init_evt_priv failed\n"); goto free_cmd_priv; } - if (rtw_init_mlme_priv(padapter) == _FAIL) { + if (rtw_init_mlme_priv(padapter)) { dev_err(dvobj_to_dev(padapter->dvobj), "rtw_init_mlme_priv failed\n"); goto free_evt_priv; } @@ -484,7 +480,7 @@ u8 rtw_init_drv_sw(struct adapter *padapter) init_mlme_ext_priv(padapter); - if (_rtw_init_xmit_priv(&padapter->xmitpriv, padapter) == _FAIL) { + if (_rtw_init_xmit_priv(&padapter->xmitpriv, padapter)) { dev_err(dvobj_to_dev(padapter->dvobj), "_rtw_init_xmit_priv failed\n"); goto free_mlme_ext; } @@ -494,7 +490,7 @@ u8 rtw_init_drv_sw(struct adapter *padapter) goto free_xmit_priv; } - if (_rtw_init_sta_priv(&padapter->stapriv) == _FAIL) { + if (_rtw_init_sta_priv(&padapter->stapriv)) { dev_err(dvobj_to_dev(padapter->dvobj), "_rtw_init_sta_priv failed\n"); goto free_recv_priv; } @@ -550,7 +546,7 @@ void rtw_cancel_all_timer(struct adapter *padapter) _cancel_timer_ex(&padapter->recvpriv.signal_stat_timer); } -u8 rtw_free_drv_sw(struct adapter *padapter) +void rtw_free_drv_sw(struct adapter *padapter) { /* we can call rtw_p2p_enable here, but: */ /* 1. rtw_p2p_enable may have IO operation */ @@ -587,8 +583,6 @@ u8 rtw_free_drv_sw(struct adapter *padapter) /* clear pbuddystruct adapter to avoid access wrong pointer. */ if (padapter->pbuddy_adapter) padapter->pbuddy_adapter->pbuddy_adapter = NULL; - - return _SUCCESS; } void netdev_br_init(struct net_device *netdev) @@ -624,7 +618,6 @@ static int _netdev_open(struct net_device *pnetdev) if (!padapter->bup) { padapter->bDriverStopped = false; padapter->bSurpriseRemoved = false; - padapter->bCardDisableWOHSM = false; status = rtw_hal_init(padapter); if (status == _FAIL) @@ -632,8 +625,7 @@ static int _netdev_open(struct net_device *pnetdev) netdev_dbg(pnetdev, "MAC Address = %pM\n", pnetdev->dev_addr); - status = rtw_start_drv_threads(padapter); - if (status == _FAIL) { + if (rtw_start_drv_threads(padapter)) { pr_info("Initialize driver software resource Failed!\n"); goto netdev_open_error; } @@ -690,7 +682,6 @@ static int ips_netdrv_open(struct adapter *padapter) padapter->bDriverStopped = false; padapter->bSurpriseRemoved = false; - padapter->bCardDisableWOHSM = false; status = rtw_hal_init(padapter); if (status == _FAIL) @@ -722,13 +713,11 @@ int rtw_ips_pwr_up(struct adapter *padapter) void rtw_ips_pwr_down(struct adapter *padapter) { - padapter->bCardDisableWOHSM = true; padapter->net_closed = true; rtw_led_control(padapter, LED_CTL_POWER_OFF); rtw_ips_dev_unload(padapter); - padapter->bCardDisableWOHSM = false; } static void rtw_fifo_cleanup(struct adapter *adapter) diff --git a/drivers/staging/rtl8192e/TODO b/drivers/staging/rtl8192e/TODO index d51f159d1adff92c16d54c049806b60dbe26e14a..7221ae65d63ebfb6f1251016af3da96a1392f9d2 100644 --- a/drivers/staging/rtl8192e/TODO +++ b/drivers/staging/rtl8192e/TODO @@ -1,2 +1,18 @@ -* merge into drivers/net/wireless/rtllib/rtl8192e +To-do list: + +* merge into drivers/net/wireless/realtek/rtlwifi/rtl8192* * clean up function naming +* Correct the coding style according to Linux guidelines; please read the document + at https://www.kernel.org/doc/html/latest/process/coding-style.html. +* Remove unnecessary debugging/printing macros; for those that are still needed + use the proper kernel API (pr_debug(), dev_dbg(), netdev_dbg()). +* Remove dead code such as unusued functions, variables, fields, etc.. +* Use in-kernel API and remove unnecessary wrappers where possible. +* Fix bugs due to code that sleeps in atomic context. +* Remove the HAL layer and migrate its functionality into the relevant parts of + the driver. +* Switch to use LIB80211. +* Switch to use MAC80211. +* Switch to use CFG80211. +* Improve the error handling of various functions, particularly those that use + existing kernel APIs. diff --git a/drivers/staging/rtl8192e/rtl8192e/r8190P_def.h b/drivers/staging/rtl8192e/rtl8192e/r8190P_def.h index 53fd79a281892dd4ebf33f3cf9b10085dffec9c4..ac192254a4bb363b05977b50a4d70d3f75d5d91c 100644 --- a/drivers/staging/rtl8192e/rtl8192e/r8190P_def.h +++ b/drivers/staging/rtl8192e/rtl8192e/r8190P_def.h @@ -154,21 +154,6 @@ struct tx_fwinfo_8190pci { }; -struct log_int_8190 { - u32 nIMR_COMDOK; - u32 nIMR_MGNTDOK; - u32 nIMR_HIGH; - u32 nIMR_VODOK; - u32 nIMR_VIDOK; - u32 nIMR_BEDOK; - u32 nIMR_BKDOK; - u32 nIMR_ROK; - u32 nIMR_RCOK; - u32 nIMR_TBDOK; - u32 nIMR_BDOK; - u32 nIMR_RXFOVW; -}; - struct phy_ofdm_rx_status_rxsc_sgien_exintfflag { u8 reserved:4; u8 rxsc:2; diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c b/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c index 18e4e5d84878a4b89b17082dd453dfa4dc71685a..f02e67f68e237ec3bbddd321ceeff4b08525748e 100644 --- a/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c +++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c @@ -224,8 +224,6 @@ void rtl92e_set_reg(struct net_device *dev, u8 variable, u8 *val) u8 acm = pAciAifsn->f.acm; u8 AcmCtrl = rtl92e_readb(dev, AcmHwCtrl); - AcmCtrl = AcmCtrl | ((priv->AcmMethod == 2) ? 0x0 : 0x1); - if (acm) { switch (eACI) { case AC0_BE: @@ -474,10 +472,10 @@ static void _rtl92e_read_eeprom_info(struct net_device *dev) priv->rf_chip = RF_8256; - if (priv->RegChannelPlan == 0xf) + if (priv->reg_chnl_plan == 0xf) priv->ChannelPlan = priv->eeprom_ChannelPlan; else - priv->ChannelPlan = priv->RegChannelPlan; + priv->ChannelPlan = priv->reg_chnl_plan; if (priv->eeprom_vid == 0x1186 && priv->eeprom_did == 0x3304) priv->CustomerID = RT_CID_DLINK; @@ -503,7 +501,6 @@ static void _rtl92e_read_eeprom_info(struct net_device *dev) priv->ChannelPlan = 0x0; break; case EEPROM_CID_Nettronix: - priv->ScanDelay = 100; priv->CustomerID = RT_CID_Nettronix; break; case EEPROM_CID_Pronet: @@ -618,15 +615,12 @@ bool rtl92e_start_adapter(struct net_device *dev) start: rtl92e_reset_desc_ring(dev); priv->Rf_Mode = RF_OP_By_SW_3wire; - if (priv->ResetProgress == RESET_TYPE_NORESET) { + if (priv->rst_progress == RESET_TYPE_NORESET) { rtl92e_writeb(dev, ANAPAR, 0x37); mdelay(500); } priv->pFirmware->status = FW_STATUS_0_INIT; - if (priv->RegRfOff) - priv->rtllib->rf_power_state = rf_off; - ulRegRead = rtl92e_readl(dev, CPU_GEN); if (priv->pFirmware->status == FW_STATUS_0_INIT) ulRegRead |= CPU_GEN_SYSTEM_RESET; @@ -654,7 +648,7 @@ start: } priv->LoopbackMode = RTL819X_NO_LOOPBACK; - if (priv->ResetProgress == RESET_TYPE_NORESET) { + if (priv->rst_progress == RESET_TYPE_NORESET) { ulRegRead = rtl92e_readl(dev, CPU_GEN); if (priv->LoopbackMode == RTL819X_NO_LOOPBACK) ulRegRead = (ulRegRead & CPU_GEN_NO_LOOPBACK_MSK) | @@ -703,7 +697,7 @@ start: rtl92e_writeb(dev, ACK_TIMEOUT, 0x30); - if (priv->ResetProgress == RESET_TYPE_NORESET) + if (priv->rst_progress == RESET_TYPE_NORESET) rtl92e_set_wireless_mode(dev, priv->rtllib->mode); rtl92e_cam_reset(dev); { @@ -743,7 +737,7 @@ start: } } - if (priv->ResetProgress == RESET_TYPE_NORESET) { + if (priv->rst_progress == RESET_TYPE_NORESET) { rtStatus = rtl92e_config_phy(dev); if (!rtStatus) { netdev_info(dev, "RF Config failed\n"); @@ -756,9 +750,7 @@ start: rtl92e_writeb(dev, 0x87, 0x0); - if (priv->RegRfOff) { - rtl92e_set_rf_state(dev, rf_off, RF_CHANGE_BY_SW); - } else if (priv->rtllib->rf_off_reason > RF_CHANGE_BY_PS) { + if (priv->rtllib->rf_off_reason > RF_CHANGE_BY_PS) { rtl92e_set_rf_state(dev, rf_off, priv->rtllib->rf_off_reason); } else if (priv->rtllib->rf_off_reason >= RF_CHANGE_BY_IPS) { rtl92e_set_rf_state(dev, rf_off, priv->rtllib->rf_off_reason); @@ -772,7 +764,7 @@ start: else priv->Rf_Mode = RF_OP_By_SW_3wire; - if (priv->ResetProgress == RESET_TYPE_NORESET) { + if (priv->rst_progress == RESET_TYPE_NORESET) { rtl92e_dm_init_txpower_tracking(dev); if (priv->IC_Cut >= IC_VersionCut_D) { @@ -801,7 +793,7 @@ start: } priv->CCKPresentAttentuation_40Mdefault = 0; priv->CCKPresentAttentuation_difference = 0; - priv->CCKPresentAttentuation = + priv->cck_present_attn = priv->CCKPresentAttentuation_20Mdefault; priv->btxpower_tracking = false; } @@ -865,7 +857,7 @@ void rtl92e_link_change(struct net_device *dev) reg = rtl92e_readl(dev, RCR); if (priv->rtllib->state == RTLLIB_LINKED) { - if (ieee->IntelPromiscuousModeInfo.bPromiscuousOn) + if (ieee->intel_promiscuous_md_info.promiscuous_on) ; else priv->ReceiveConfig = reg |= RCR_CBSSID; @@ -1112,9 +1104,8 @@ void rtl92e_fill_tx_desc(struct net_device *dev, struct tx_desc *pdesc, if (cb_desc->bHwSec) { static u8 tmp; - if (!tmp) { + if (!tmp) tmp = 1; - } switch (priv->rtllib->pairwise_key_type) { case KEY_TYPE_WEP40: case KEY_TYPE_WEP104: @@ -1143,8 +1134,8 @@ void rtl92e_fill_tx_desc(struct net_device *dev, struct tx_desc *pdesc, cb_desc->priority); pdesc->TxFWInfoSize = sizeof(struct tx_fwinfo_8190pci); - pdesc->DISFB = cb_desc->bTxDisableRateFallBack; - pdesc->USERATE = cb_desc->bTxUseDriverAssingedRate; + pdesc->DISFB = cb_desc->tx_dis_rate_fallback; + pdesc->USERATE = cb_desc->tx_use_drv_assinged_rate; pdesc->FirstSeg = 1; pdesc->LastSeg = 1; @@ -1935,7 +1926,7 @@ void rtl92e_stop_adapter(struct net_device *dev, bool reset) if (!reset) { mdelay(150); - priv->bHwRfOffAction = 2; + priv->hw_rf_off_action = 2; if (!priv->rtllib->bSupportRemoteWakeUp) { rtl92e_set_rf_off(dev); @@ -1955,8 +1946,6 @@ void rtl92e_stop_adapter(struct net_device *dev, bool reset) for (i = 0; i < MAX_QUEUE_SIZE; i++) skb_queue_purge(&priv->rtllib->skb_waitQ[i]); - for (i = 0; i < MAX_QUEUE_SIZE; i++) - skb_queue_purge(&priv->rtllib->skb_aggQ[i]); skb_queue_purge(&priv->skb_queue); } @@ -1965,7 +1954,7 @@ void rtl92e_update_ratr_table(struct net_device *dev) { struct r8192_priv *priv = rtllib_priv(dev); struct rtllib_device *ieee = priv->rtllib; - u8 *pMcsRate = ieee->dot11HTOperationalRateSet; + u8 *pMcsRate = ieee->dot11ht_oper_rate_set; u32 ratr_value = 0; u16 rate_config = 0; u8 rate_index = 0; @@ -1985,7 +1974,7 @@ void rtl92e_update_ratr_table(struct net_device *dev) break; case IEEE_N_24G: case IEEE_N_5G: - if (ieee->pHTInfo->peer_mimo_ps == 0) { + if (ieee->ht_info->peer_mimo_ps == 0) { ratr_value &= 0x0007F007; } else { if (priv->rf_type == RF_1T2R) @@ -1998,11 +1987,11 @@ void rtl92e_update_ratr_table(struct net_device *dev) break; } ratr_value &= 0x0FFFFFFF; - if (ieee->pHTInfo->cur_tx_bw40mhz && - ieee->pHTInfo->bCurShortGI40MHz) + if (ieee->ht_info->cur_tx_bw40mhz && + ieee->ht_info->bCurShortGI40MHz) ratr_value |= 0x80000000; - else if (!ieee->pHTInfo->cur_tx_bw40mhz && - ieee->pHTInfo->bCurShortGI20MHz) + else if (!ieee->ht_info->cur_tx_bw40mhz && + ieee->ht_info->bCurShortGI20MHz) ratr_value |= 0x80000000; rtl92e_writel(dev, RATR0+rate_index*4, ratr_value); rtl92e_writeb(dev, UFWP, 1); @@ -2136,7 +2125,7 @@ bool rtl92e_is_rx_stuck(struct net_device *dev) SlotIndex = (priv->SilentResetRxSlotIndex++)%SilentResetRxSoltNum; - if (priv->RxCounter == RegRxCounter) { + if (priv->rx_ctr == RegRxCounter) { priv->SilentResetRxStuckEvent[SlotIndex] = 1; for (i = 0; i < SilentResetRxSoltNum; i++) @@ -2154,7 +2143,7 @@ bool rtl92e_is_rx_stuck(struct net_device *dev) priv->SilentResetRxStuckEvent[SlotIndex] = 0; } - priv->RxCounter = RegRxCounter; + priv->rx_ctr = RegRxCounter; return bStuck; } diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.c b/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.c index 1b592258e64061c477ddbb5cf29c77b879b2b0f2..a813eded4cb3f4747f4b13e19e1c9a6e4d367fa3 100644 --- a/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.c +++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.c @@ -522,9 +522,8 @@ static bool _rtl92e_bb_config_para_file(struct net_device *dev) rtStatus = rtl92e_check_bb_and_rf(dev, (enum hw90_block)eCheckItem, (enum rf90_radio_path)0); - if (!rtStatus) { + if (!rtStatus) return rtStatus; - } } rtl92e_set_bb_reg(dev, rFPGA0_RFMOD, bCCKEn|bOFDMEn, 0x0); _rtl92e_phy_config_bb(dev, BaseBand_Config_PHY_REG); @@ -1009,16 +1008,16 @@ static void _rtl92e_cck_tx_power_track_bw_switch_tssi(struct net_device *dev) switch (priv->CurrentChannelBW) { case HT_CHANNEL_WIDTH_20: - priv->CCKPresentAttentuation = + priv->cck_present_attn = priv->CCKPresentAttentuation_20Mdefault + priv->CCKPresentAttentuation_difference; - if (priv->CCKPresentAttentuation > + if (priv->cck_present_attn > (CCKTxBBGainTableLength-1)) - priv->CCKPresentAttentuation = + priv->cck_present_attn = CCKTxBBGainTableLength-1; - if (priv->CCKPresentAttentuation < 0) - priv->CCKPresentAttentuation = 0; + if (priv->cck_present_attn < 0) + priv->cck_present_attn = 0; if (priv->rtllib->current_network.channel == 14 && !priv->bcck_in_ch14) { @@ -1034,16 +1033,16 @@ static void _rtl92e_cck_tx_power_track_bw_switch_tssi(struct net_device *dev) break; case HT_CHANNEL_WIDTH_20_40: - priv->CCKPresentAttentuation = + priv->cck_present_attn = priv->CCKPresentAttentuation_40Mdefault + priv->CCKPresentAttentuation_difference; - if (priv->CCKPresentAttentuation > + if (priv->cck_present_attn > (CCKTxBBGainTableLength - 1)) - priv->CCKPresentAttentuation = + priv->cck_present_attn = CCKTxBBGainTableLength-1; - if (priv->CCKPresentAttentuation < 0) - priv->CCKPresentAttentuation = 0; + if (priv->cck_present_attn < 0) + priv->cck_present_attn = 0; if (priv->rtllib->current_network.channel == 14 && !priv->bcck_in_ch14) { @@ -1304,8 +1303,8 @@ static bool _rtl92e_set_rf_power_state(struct net_device *dev, enum rt_rf_power_state rf_power_state) { struct r8192_priv *priv = rtllib_priv(dev); - struct rt_pwr_save_ctrl *pPSC = (struct rt_pwr_save_ctrl *) - (&(priv->rtllib->PowerSaveControl)); + struct rt_pwr_save_ctrl *psc = (struct rt_pwr_save_ctrl *) + (&priv->rtllib->pwr_save_ctrl); bool bResult = true; u8 i = 0, QueueID = 0; struct rtl8192_tx_ring *ring = NULL; @@ -1319,13 +1318,12 @@ static bool _rtl92e_set_rf_power_state(struct net_device *dev, switch (rf_power_state) { case rf_on: if ((priv->rtllib->rf_power_state == rf_off) && - RT_IN_PS_LEVEL(pPSC, RT_RF_OFF_LEVL_HALT_NIC)) { + RT_IN_PS_LEVEL(psc, RT_RF_OFF_LEVL_HALT_NIC)) { bool rtstatus; u32 InitilizeCount = 3; do { InitilizeCount--; - priv->RegRfOff = false; rtstatus = rtl92e_enable_nic(dev); } while (!rtstatus && (InitilizeCount > 0)); @@ -1337,14 +1335,14 @@ static bool _rtl92e_set_rf_power_state(struct net_device *dev, return false; } - RT_CLEAR_PS_LEVEL(pPSC, + RT_CLEAR_PS_LEVEL(psc, RT_RF_OFF_LEVL_HALT_NIC); } else { rtl92e_writeb(dev, ANAPAR, 0x37); mdelay(1); rtl92e_set_bb_reg(dev, rFPGA0_AnalogParameter1, 0x4, 0x1); - priv->bHwRfOffAction = 0; + priv->hw_rf_off_action = 0; rtl92e_set_bb_reg(dev, rFPGA0_XA_RFInterfaceOE, BIT4, 0x1); @@ -1379,9 +1377,8 @@ static bool _rtl92e_set_rf_power_state(struct net_device *dev, i++; } - if (i >= MAX_DOZE_WAITING_TIMES_9x) { + if (i >= MAX_DOZE_WAITING_TIMES_9x) break; - } } rtl92e_set_rf_off(dev); break; @@ -1398,16 +1395,15 @@ static bool _rtl92e_set_rf_power_state(struct net_device *dev, i++; } - if (i >= MAX_DOZE_WAITING_TIMES_9x) { + if (i >= MAX_DOZE_WAITING_TIMES_9x) break; - } } - if (pPSC->RegRfPsLevel & RT_RF_OFF_LEVL_HALT_NIC && - !RT_IN_PS_LEVEL(pPSC, RT_RF_OFF_LEVL_HALT_NIC)) { + if (psc->RegRfPsLevel & RT_RF_OFF_LEVL_HALT_NIC && + !RT_IN_PS_LEVEL(psc, RT_RF_OFF_LEVL_HALT_NIC)) { rtl92e_disable_nic(dev); - RT_SET_PS_LEVEL(pPSC, RT_RF_OFF_LEVL_HALT_NIC); - } else if (!(pPSC->RegRfPsLevel & + RT_SET_PS_LEVEL(psc, RT_RF_OFF_LEVL_HALT_NIC); + } else if (!(psc->RegRfPsLevel & RT_RF_OFF_LEVL_HALT_NIC)) { rtl92e_set_rf_off(dev); } @@ -1454,7 +1450,7 @@ bool rtl92e_set_rf_power_state(struct net_device *dev, bool bResult = false; if (rf_power_state == priv->rtllib->rf_power_state && - priv->bHwRfOffAction == 0) { + priv->hw_rf_off_action == 0) { return bResult; } diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_cam.c b/drivers/staging/rtl8192e/rtl8192e/rtl_cam.c index 41faeb4b9b9b434989d6c2badbf090c9045a0954..a4d65b4d99c28fc38bf52e1bdf4db27b402b6f2e 100644 --- a/drivers/staging/rtl8192e/rtl8192e/rtl_cam.c +++ b/drivers/staging/rtl8192e/rtl8192e/rtl_cam.c @@ -17,7 +17,7 @@ void rtl92e_cam_reset(struct net_device *dev) { u32 ulcommand = 0; - ulcommand |= BIT31|BIT30; + ulcommand |= BIT31 | BIT30; rtl92e_writel(dev, RWCAM, ulcommand); } @@ -40,9 +40,8 @@ void rtl92e_enable_hw_security_config(struct net_device *dev) SECR_value |= SCR_TxUseDK; } - ieee->hwsec_active = 1; - if ((ieee->pHTInfo->iot_action & HT_IOT_ACT_PURE_N_MODE) || !hwwep) { + if ((ieee->ht_info->iot_action & HT_IOT_ACT_PURE_N_MODE) || !hwwep) { ieee->hwsec_active = 0; SECR_value &= ~SCR_RxDecEnable; } @@ -81,17 +80,15 @@ void rtl92e_set_key(struct net_device *dev, u8 EntryNo, u8 KeyIndex, enum rt_rf_power_state rt_state; rt_state = priv->rtllib->rf_power_state; - if (priv->rtllib->PowerSaveControl.bInactivePs) { - if (rt_state == rf_off) { - if (priv->rtllib->rf_off_reason > RF_CHANGE_BY_IPS) { - netdev_warn(dev, "%s(): RF is OFF.\n", - __func__); - return; - } - mutex_lock(&priv->rtllib->ips_mutex); - rtl92e_ips_leave(dev); - mutex_unlock(&priv->rtllib->ips_mutex); + if (rt_state == rf_off) { + if (priv->rtllib->rf_off_reason > RF_CHANGE_BY_IPS) { + netdev_warn(dev, "%s(): RF is OFF.\n", + __func__); + return; } + mutex_lock(&priv->rtllib->ips_mutex); + rtl92e_ips_leave(dev); + mutex_unlock(&priv->rtllib->ips_mutex); } priv->rtllib->is_set_key = true; if (EntryNo >= TOTAL_CAM_ENTRY) { @@ -100,33 +97,33 @@ void rtl92e_set_key(struct net_device *dev, u8 EntryNo, u8 KeyIndex, } if (DefaultKey) - usConfig |= BIT15 | (KeyType<<2); + usConfig |= BIT15 | (KeyType << 2); else - usConfig |= BIT15 | (KeyType<<2) | KeyIndex; + usConfig |= BIT15 | (KeyType << 2) | KeyIndex; for (i = 0; i < CAM_CONTENT_COUNT; i++) { TargetCommand = i + CAM_CONTENT_COUNT * EntryNo; - TargetCommand |= BIT31|BIT16; + TargetCommand |= BIT31 | BIT16; if (i == 0) { - TargetContent = (u32)(*(MacAddr+0)) << 16 | - (u32)(*(MacAddr+1)) << 24 | + TargetContent = (u32)(*(MacAddr + 0)) << 16 | + (u32)(*(MacAddr + 1)) << 24 | (u32)usConfig; rtl92e_writel(dev, WCAMI, TargetContent); rtl92e_writel(dev, RWCAM, TargetCommand); } else if (i == 1) { - TargetContent = (u32)(*(MacAddr+2)) | - (u32)(*(MacAddr+3)) << 8 | - (u32)(*(MacAddr+4)) << 16 | - (u32)(*(MacAddr+5)) << 24; + TargetContent = (u32)(*(MacAddr + 2)) | + (u32)(*(MacAddr + 3)) << 8 | + (u32)(*(MacAddr + 4)) << 16 | + (u32)(*(MacAddr + 5)) << 24; rtl92e_writel(dev, WCAMI, TargetContent); rtl92e_writel(dev, RWCAM, TargetCommand); } else { if (KeyContent != NULL) { rtl92e_writel(dev, WCAMI, - (u32)(*(KeyContent+i-2))); + (u32)(*(KeyContent + i - 2))); rtl92e_writel(dev, RWCAM, TargetCommand); udelay(100); } diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c index 89bc989cffbae1e14e11bf471df90bf0a39c4d3e..f8fbe78ccad9fe7769dad534cec3efcd4056002a 100644 --- a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c +++ b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c @@ -307,7 +307,7 @@ static void _rtl92e_update_cap(struct net_device *dev, u16 cap) u8 cur_slot_time = priv->slot_time; if ((cap & WLAN_CAPABILITY_SHORT_SLOT_TIME) && - (!priv->rtllib->pHTInfo->current_rt2rt_long_slot_time)) { + (!priv->rtllib->ht_info->current_rt2rt_long_slot_time)) { if (cur_slot_time != SHORT_SLOT_TIME) { slot_time_val = SHORT_SLOT_TIME; priv->rtllib->SetHwRegHandler(dev, @@ -339,10 +339,10 @@ static void _rtl92e_update_beacon(void *data) struct rtllib_device *ieee = priv->rtllib; struct rtllib_network *net = &ieee->current_network; - if (ieee->pHTInfo->bCurrentHTSupport) + if (ieee->ht_info->bCurrentHTSupport) HT_update_self_and_peer_setting(ieee, net); - ieee->pHTInfo->current_rt2rt_long_slot_time = net->bssht.bd_rt2rt_long_slot_time; - ieee->pHTInfo->RT2RT_HT_Mode = net->bssht.rt2rt_ht_mode; + ieee->ht_info->current_rt2rt_long_slot_time = net->bssht.bd_rt2rt_long_slot_time; + ieee->ht_info->RT2RT_HT_Mode = net->bssht.rt2rt_ht_mode; _rtl92e_update_cap(dev, net->capability); } @@ -494,8 +494,8 @@ static void _rtl92e_prepare_beacon(struct tasklet_struct *t) tcb_desc->queue_index = BEACON_QUEUE; tcb_desc->data_rate = 2; tcb_desc->RATRIndex = 7; - tcb_desc->bTxDisableRateFallBack = 1; - tcb_desc->bTxUseDriverAssingedRate = 1; + tcb_desc->tx_dis_rate_fallback = 1; + tcb_desc->tx_use_drv_assinged_rate = 1; skb_push(pnewskb, priv->rtllib->tx_headroom); pdesc = &ring->desc[0]; @@ -607,13 +607,13 @@ static void _rtl92e_refresh_support_rate(struct r8192_priv *priv) if (ieee->mode == WIRELESS_MODE_N_24G || ieee->mode == WIRELESS_MODE_N_5G) { - memcpy(ieee->Regdot11HTOperationalRateSet, - ieee->RegHTSuppRateSet, 16); - memcpy(ieee->Regdot11TxHTOperationalRateSet, - ieee->RegHTSuppRateSet, 16); + memcpy(ieee->reg_dot11ht_oper_rate_set, + ieee->reg_ht_supp_rate_set, 16); + memcpy(ieee->reg_dot11tx_ht_oper_rate_set, + ieee->reg_ht_supp_rate_set, 16); } else { - memset(ieee->Regdot11HTOperationalRateSet, 0, 16); + memset(ieee->reg_dot11ht_oper_rate_set, 0, 16); } } @@ -642,19 +642,19 @@ static u8 _rtl92e_get_supported_wireless_mode(struct net_device *dev) void rtl92e_set_wireless_mode(struct net_device *dev, u8 wireless_mode) { struct r8192_priv *priv = rtllib_priv(dev); - u8 bSupportMode = _rtl92e_get_supported_wireless_mode(dev); + u8 support_mode = _rtl92e_get_supported_wireless_mode(dev); if ((wireless_mode == WIRELESS_MODE_AUTO) || - ((wireless_mode & bSupportMode) == 0)) { - if (bSupportMode & WIRELESS_MODE_N_24G) { + ((wireless_mode & support_mode) == 0)) { + if (support_mode & WIRELESS_MODE_N_24G) { wireless_mode = WIRELESS_MODE_N_24G; - } else if (bSupportMode & WIRELESS_MODE_N_5G) { + } else if (support_mode & WIRELESS_MODE_N_5G) { wireless_mode = WIRELESS_MODE_N_5G; - } else if ((bSupportMode & WIRELESS_MODE_A)) { + } else if ((support_mode & WIRELESS_MODE_A)) { wireless_mode = WIRELESS_MODE_A; - } else if ((bSupportMode & WIRELESS_MODE_G)) { + } else if ((support_mode & WIRELESS_MODE_G)) { wireless_mode = WIRELESS_MODE_G; - } else if ((bSupportMode & WIRELESS_MODE_B)) { + } else if ((support_mode & WIRELESS_MODE_B)) { wireless_mode = WIRELESS_MODE_B; } else { netdev_info(dev, @@ -672,9 +672,9 @@ void rtl92e_set_wireless_mode(struct net_device *dev, u8 wireless_mode) if ((wireless_mode == WIRELESS_MODE_N_24G) || (wireless_mode == WIRELESS_MODE_N_5G)) { - priv->rtllib->pHTInfo->bEnableHT = 1; + priv->rtllib->ht_info->enable_ht = 1; } else { - priv->rtllib->pHTInfo->bEnableHT = 0; + priv->rtllib->ht_info->enable_ht = 0; } _rtl92e_refresh_support_rate(priv); } @@ -682,11 +682,10 @@ void rtl92e_set_wireless_mode(struct net_device *dev, u8 wireless_mode) static int _rtl92e_sta_up(struct net_device *dev, bool is_silent_reset) { struct r8192_priv *priv = rtllib_priv(dev); - struct rt_pwr_save_ctrl *pPSC = (struct rt_pwr_save_ctrl *) - (&priv->rtllib->PowerSaveControl); + struct rt_pwr_save_ctrl *psc = (struct rt_pwr_save_ctrl *) + (&priv->rtllib->pwr_save_ctrl); bool init_status; - priv->bDriverIsGoingToUnload = false; priv->bdisable_nic = false; priv->up = 1; @@ -701,7 +700,7 @@ static int _rtl92e_sta_up(struct net_device *dev, bool is_silent_reset) return -1; } - RT_CLEAR_PS_LEVEL(pPSC, RT_RF_OFF_LEVL_HALT_NIC); + RT_CLEAR_PS_LEVEL(psc, RT_RF_OFF_LEVL_HALT_NIC); priv->bfirst_init = false; if (priv->polling_timer_on == 0) @@ -724,7 +723,7 @@ static int _rtl92e_sta_down(struct net_device *dev, bool shutdownrf) { struct r8192_priv *priv = rtllib_priv(dev); unsigned long flags = 0; - u8 RFInProgressTimeOut = 0; + u8 rf_in_progress_timeout = 0; if (priv->up == 0) return -1; @@ -735,7 +734,6 @@ static int _rtl92e_sta_down(struct net_device *dev, bool shutdownrf) if (priv->rtllib->state == RTLLIB_LINKED) rtl92e_leisure_ps_leave(dev); - priv->bDriverIsGoingToUnload = true; priv->up = 0; priv->rtllib->ieee_up = 0; priv->bfirst_after_down = true; @@ -757,12 +755,12 @@ static int _rtl92e_sta_down(struct net_device *dev, bool shutdownrf) spin_lock_irqsave(&priv->rf_ps_lock, flags); while (priv->rf_change_in_progress) { spin_unlock_irqrestore(&priv->rf_ps_lock, flags); - if (RFInProgressTimeOut > 100) { + if (rf_in_progress_timeout > 100) { spin_lock_irqsave(&priv->rf_ps_lock, flags); break; } mdelay(1); - RFInProgressTimeOut++; + rf_in_progress_timeout++; spin_lock_irqsave(&priv->rf_ps_lock, flags); } priv->rf_change_in_progress = true; @@ -821,10 +819,10 @@ static void _rtl92e_init_priv_handler(struct net_device *dev) static void _rtl92e_init_priv_constant(struct net_device *dev) { struct r8192_priv *priv = rtllib_priv(dev); - struct rt_pwr_save_ctrl *pPSC = (struct rt_pwr_save_ctrl *) - &priv->rtllib->PowerSaveControl; + struct rt_pwr_save_ctrl *psc = (struct rt_pwr_save_ctrl *) + &priv->rtllib->pwr_save_ctrl; - pPSC->RegMaxLPSAwakeIntvl = 5; + psc->reg_max_lps_awake_intvl = 5; } static void _rtl92e_init_priv_variable(struct net_device *dev) @@ -832,15 +830,12 @@ static void _rtl92e_init_priv_variable(struct net_device *dev) struct r8192_priv *priv = rtllib_priv(dev); u8 i; - priv->AcmMethod = eAcmWay2_SW; priv->dot11_current_preamble_mode = PREAMBLE_AUTO; priv->rtllib->status = 0; priv->polling_timer_on = 0; priv->up_first_time = 1; priv->blinked_ingpio = false; - priv->bDriverIsGoingToUnload = false; priv->being_init_adapter = false; - priv->initialized_at_probe = false; priv->bdisable_nic = false; priv->bfirst_init = false; priv->txringcount = 64; @@ -848,12 +843,12 @@ static void _rtl92e_init_priv_variable(struct net_device *dev) priv->rxringcount = MAX_RX_COUNT; priv->irq_enabled = 0; priv->chan = 1; - priv->RegChannelPlan = 0xf; + priv->reg_chnl_plan = 0xf; priv->rtllib->mode = WIRELESS_MODE_AUTO; priv->rtllib->iw_mode = IW_MODE_INFRA; - priv->rtllib->bNetPromiscuousMode = false; - priv->rtllib->IntelPromiscuousModeInfo.bPromiscuousOn = false; - priv->rtllib->IntelPromiscuousModeInfo.bFilterSourceStationFrame = + priv->rtllib->net_promiscuous_md = false; + priv->rtllib->intel_promiscuous_md_info.promiscuous_on = false; + priv->rtllib->intel_promiscuous_md_info.fltr_src_sta_frame = false; priv->rtllib->ieee_up = 0; priv->retry_rts = DEFAULT_RETRY_RTS; @@ -864,32 +859,21 @@ static void _rtl92e_init_priv_variable(struct net_device *dev) priv->promisc = (dev->flags & IFF_PROMISC) ? 1 : 0; priv->bcck_in_ch14 = false; priv->bfsync_processing = false; - priv->CCKPresentAttentuation = 0; + priv->cck_present_attn = 0; priv->rfa_txpowertrackingindex = 0; priv->rfc_txpowertrackingindex = 0; priv->CckPwEnl = 6; - priv->ScanDelay = 50; - priv->ResetProgress = RESET_TYPE_NORESET; - priv->bForcedSilentReset = false; - priv->bDisableNormalResetCheck = false; + priv->rst_progress = RESET_TYPE_NORESET; priv->force_reset = false; memset(priv->rtllib->swcamtable, 0, sizeof(struct sw_cam_table) * 32); - - memset(&priv->InterruptLog, 0, sizeof(struct log_int_8190)); - priv->RxCounter = 0; + priv->rx_ctr = 0; priv->rtllib->wx_set_enc = 0; priv->hw_radio_off = false; - priv->RegRfOff = false; - priv->isRFOff = false; - priv->bInPowerSaveMode = false; priv->rtllib->rf_off_reason = 0; priv->rf_change_in_progress = false; - priv->bHwRfOffAction = 0; + priv->hw_rf_off_action = 0; priv->SetRFPowerStateInProgress = false; - priv->rtllib->PowerSaveControl.bInactivePs = true; - priv->rtllib->PowerSaveControl.bIPSModeBackup = false; - priv->rtllib->PowerSaveControl.bLeisurePs = true; - priv->rtllib->PowerSaveControl.bFwCtrlLPS = false; + priv->rtllib->pwr_save_ctrl.bLeisurePs = true; priv->rtllib->LPSDelayCnt = 0; priv->rtllib->sta_sleep = LPS_IS_WAKE; priv->rtllib->rf_power_state = rf_on; @@ -916,8 +900,6 @@ static void _rtl92e_init_priv_variable(struct net_device *dev) for (i = 0; i < MAX_QUEUE_SIZE; i++) skb_queue_head_init(&priv->rtllib->skb_waitQ[i]); - for (i = 0; i < MAX_QUEUE_SIZE; i++) - skb_queue_head_init(&priv->rtllib->skb_aggQ[i]); } static void _rtl92e_init_priv_lock(struct r8192_priv *priv) @@ -1147,8 +1129,8 @@ static void _rtl92e_if_silent_reset(struct net_device *dev) struct rtllib_device *ieee = priv->rtllib; unsigned long flag; - if (priv->ResetProgress == RESET_TYPE_NORESET) { - priv->ResetProgress = RESET_TYPE_SILENT; + if (priv->rst_progress == RESET_TYPE_NORESET) { + priv->rst_progress = RESET_TYPE_SILENT; spin_lock_irqsave(&priv->rf_ps_lock, flag); if (priv->rf_change_in_progress) { @@ -1245,10 +1227,8 @@ RESET_START: rtl92e_cam_restore(dev); rtl92e_dm_restore_state(dev); END: - priv->ResetProgress = RESET_TYPE_NORESET; + priv->rst_progress = RESET_TYPE_NORESET; priv->reset_count++; - - priv->bForcedSilentReset = false; priv->bResetInProgress = false; rtl92e_writeb(dev, UFWP, 1); @@ -1264,15 +1244,15 @@ static void _rtl92e_update_rxcounts(struct r8192_priv *priv, u32 *TotalRxBcnNum, *TotalRxBcnNum = 0; *TotalRxDataNum = 0; - SlotIndex = (priv->rtllib->LinkDetectInfo.SlotIndex++) % - (priv->rtllib->LinkDetectInfo.SlotNum); - priv->rtllib->LinkDetectInfo.RxBcnNum[SlotIndex] = - priv->rtllib->LinkDetectInfo.NumRecvBcnInPeriod; - priv->rtllib->LinkDetectInfo.RxDataNum[SlotIndex] = - priv->rtllib->LinkDetectInfo.NumRecvDataInPeriod; - for (i = 0; i < priv->rtllib->LinkDetectInfo.SlotNum; i++) { - *TotalRxBcnNum += priv->rtllib->LinkDetectInfo.RxBcnNum[i]; - *TotalRxDataNum += priv->rtllib->LinkDetectInfo.RxDataNum[i]; + SlotIndex = (priv->rtllib->link_detect_info.SlotIndex++) % + (priv->rtllib->link_detect_info.SlotNum); + priv->rtllib->link_detect_info.RxBcnNum[SlotIndex] = + priv->rtllib->link_detect_info.NumRecvBcnInPeriod; + priv->rtllib->link_detect_info.RxDataNum[SlotIndex] = + priv->rtllib->link_detect_info.NumRecvDataInPeriod; + for (i = 0; i < priv->rtllib->link_detect_info.SlotNum; i++) { + *TotalRxBcnNum += priv->rtllib->link_detect_info.RxBcnNum[i]; + *TotalRxDataNum += priv->rtllib->link_detect_info.RxDataNum[i]; } } @@ -1285,8 +1265,8 @@ static void _rtl92e_watchdog_wq_cb(void *data) enum reset_type ResetType = RESET_TYPE_NORESET; static u8 check_reset_cnt; unsigned long flags; - struct rt_pwr_save_ctrl *pPSC = (struct rt_pwr_save_ctrl *) - (&priv->rtllib->PowerSaveControl); + struct rt_pwr_save_ctrl *psc = (struct rt_pwr_save_ctrl *) + (&priv->rtllib->pwr_save_ctrl); bool bBusyTraffic = false; bool bHigherBusyTraffic = false; bool bHigherBusyRxTraffic = false; @@ -1309,31 +1289,31 @@ static void _rtl92e_watchdog_wq_cb(void *data) RTLLIB_NOLINK) && (ieee->rf_power_state == rf_on) && !ieee->is_set_key && (!ieee->proto_stoppping) && !ieee->wx_set_enc) { - if ((ieee->PowerSaveControl.ReturnPoint == + if ((ieee->pwr_save_ctrl.ReturnPoint == IPS_CALLBACK_NONE) && - (!ieee->bNetPromiscuousMode)) { + (!ieee->net_promiscuous_md)) { rtl92e_ips_enter(dev); } } } if ((ieee->state == RTLLIB_LINKED) && (ieee->iw_mode == - IW_MODE_INFRA) && (!ieee->bNetPromiscuousMode)) { - if (ieee->LinkDetectInfo.NumRxOkInPeriod > 100 || - ieee->LinkDetectInfo.NumTxOkInPeriod > 100) + IW_MODE_INFRA) && (!ieee->net_promiscuous_md)) { + if (ieee->link_detect_info.NumRxOkInPeriod > 100 || + ieee->link_detect_info.NumTxOkInPeriod > 100) bBusyTraffic = true; - if (ieee->LinkDetectInfo.NumRxOkInPeriod > 4000 || - ieee->LinkDetectInfo.NumTxOkInPeriod > 4000) { + if (ieee->link_detect_info.NumRxOkInPeriod > 4000 || + ieee->link_detect_info.NumTxOkInPeriod > 4000) { bHigherBusyTraffic = true; - if (ieee->LinkDetectInfo.NumRxOkInPeriod > 5000) + if (ieee->link_detect_info.NumRxOkInPeriod > 5000) bHigherBusyRxTraffic = true; else bHigherBusyRxTraffic = false; } - if (((ieee->LinkDetectInfo.NumRxUnicastOkInPeriod + - ieee->LinkDetectInfo.NumTxOkInPeriod) > 8) || - (ieee->LinkDetectInfo.NumRxUnicastOkInPeriod > 2)) + if (((ieee->link_detect_info.NumRxUnicastOkInPeriod + + ieee->link_detect_info.NumTxOkInPeriod) > 8) || + (ieee->link_detect_info.NumRxUnicastOkInPeriod > 2)) bEnterPS = false; else bEnterPS = true; @@ -1350,13 +1330,13 @@ static void _rtl92e_watchdog_wq_cb(void *data) rtl92e_leisure_ps_leave(dev); } - ieee->LinkDetectInfo.NumRxOkInPeriod = 0; - ieee->LinkDetectInfo.NumTxOkInPeriod = 0; - ieee->LinkDetectInfo.NumRxUnicastOkInPeriod = 0; - ieee->LinkDetectInfo.bBusyTraffic = bBusyTraffic; + ieee->link_detect_info.NumRxOkInPeriod = 0; + ieee->link_detect_info.NumTxOkInPeriod = 0; + ieee->link_detect_info.NumRxUnicastOkInPeriod = 0; + ieee->link_detect_info.bBusyTraffic = bBusyTraffic; - ieee->LinkDetectInfo.bHigherBusyTraffic = bHigherBusyTraffic; - ieee->LinkDetectInfo.bHigherBusyRxTraffic = bHigherBusyRxTraffic; + ieee->link_detect_info.bHigherBusyTraffic = bHigherBusyTraffic; + ieee->link_detect_info.bHigherBusyRxTraffic = bHigherBusyRxTraffic; if (ieee->state == RTLLIB_LINKED && ieee->iw_mode == IW_MODE_INFRA) { u32 TotalRxBcnNum = 0; @@ -1397,28 +1377,26 @@ static void _rtl92e_watchdog_wq_cb(void *data) priv->check_roaming_cnt = 0; } - ieee->LinkDetectInfo.NumRecvBcnInPeriod = 0; - ieee->LinkDetectInfo.NumRecvDataInPeriod = 0; + ieee->link_detect_info.NumRecvBcnInPeriod = 0; + ieee->link_detect_info.NumRecvDataInPeriod = 0; } spin_lock_irqsave(&priv->tx_lock, flags); if ((check_reset_cnt++ >= 3) && (!ieee->is_roaming) && - (!priv->rf_change_in_progress) && (!pPSC->bSwRfProcessing)) { + (!priv->rf_change_in_progress) && (!psc->bSwRfProcessing)) { ResetType = _rtl92e_if_check_reset(dev); check_reset_cnt = 3; } spin_unlock_irqrestore(&priv->tx_lock, flags); - if (!priv->bDisableNormalResetCheck && ResetType == RESET_TYPE_NORMAL) { - priv->ResetProgress = RESET_TYPE_NORMAL; + if (ResetType == RESET_TYPE_NORMAL) { + priv->rst_progress = RESET_TYPE_NORMAL; return; } - if (((priv->force_reset) || (!priv->bDisableNormalResetCheck && - ResetType == RESET_TYPE_SILENT))) + if ((priv->force_reset || ResetType == RESET_TYPE_SILENT)) _rtl92e_if_silent_reset(dev); priv->force_reset = false; - priv->bForcedSilentReset = false; priv->bResetInProgress = false; } @@ -1554,8 +1532,8 @@ static int _rtl92e_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) } tcb_desc->RATRIndex = 7; - tcb_desc->bTxDisableRateFallBack = 1; - tcb_desc->bTxUseDriverAssingedRate = 1; + tcb_desc->tx_dis_rate_fallback = 1; + tcb_desc->tx_use_drv_assinged_rate = 1; tcb_desc->bTxEnableFwCalcDur = 1; skb_push(skb, priv->rtllib->tx_headroom); ret = _rtl92e_tx(dev, skb); @@ -2205,7 +2183,6 @@ static irqreturn_t _rtl92e_irq(int irq, void *netdev) if (inta & IMR_ROK) { priv->stats.rxint++; - priv->InterruptLog.nIMR_ROK++; tasklet_schedule(&priv->irq_rx_tasklet); } @@ -2229,25 +2206,25 @@ static irqreturn_t _rtl92e_irq(int irq, void *netdev) if (inta & IMR_BKDOK) { priv->stats.txbkokint++; - priv->rtllib->LinkDetectInfo.NumTxOkInPeriod++; + priv->rtllib->link_detect_info.NumTxOkInPeriod++; _rtl92e_tx_isr(dev, BK_QUEUE); } if (inta & IMR_BEDOK) { priv->stats.txbeokint++; - priv->rtllib->LinkDetectInfo.NumTxOkInPeriod++; + priv->rtllib->link_detect_info.NumTxOkInPeriod++; _rtl92e_tx_isr(dev, BE_QUEUE); } if (inta & IMR_VIDOK) { priv->stats.txviokint++; - priv->rtllib->LinkDetectInfo.NumTxOkInPeriod++; + priv->rtllib->link_detect_info.NumTxOkInPeriod++; _rtl92e_tx_isr(dev, VI_QUEUE); } if (inta & IMR_VODOK) { priv->stats.txvookint++; - priv->rtllib->LinkDetectInfo.NumTxOkInPeriod++; + priv->rtllib->link_detect_info.NumTxOkInPeriod++; _rtl92e_tx_isr(dev, VO_QUEUE); } @@ -2437,8 +2414,8 @@ bool rtl92e_enable_nic(struct net_device *dev) { bool init_status = true; struct r8192_priv *priv = rtllib_priv(dev); - struct rt_pwr_save_ctrl *pPSC = (struct rt_pwr_save_ctrl *) - (&priv->rtllib->PowerSaveControl); + struct rt_pwr_save_ctrl *psc = (struct rt_pwr_save_ctrl *) + (&priv->rtllib->pwr_save_ctrl); if (!priv->up) { netdev_warn(dev, "%s(): Driver is already down!\n", __func__); @@ -2453,7 +2430,7 @@ bool rtl92e_enable_nic(struct net_device *dev) priv->bdisable_nic = false; return false; } - RT_CLEAR_PS_LEVEL(pPSC, RT_RF_OFF_LEVL_HALT_NIC); + RT_CLEAR_PS_LEVEL(psc, RT_RF_OFF_LEVL_HALT_NIC); priv->bfirst_init = false; rtl92e_irq_enable(dev); diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_core.h b/drivers/staging/rtl8192e/rtl8192e/rtl_core.h index 7021f9c435d96b40d7823a3adf9d304cdda7a33d..cceb77492363ae93941ab2309dfe2035b44d1f89 100644 --- a/drivers/staging/rtl8192e/rtl8192e/rtl_core.h +++ b/drivers/staging/rtl8192e/rtl8192e/rtl_core.h @@ -234,15 +234,6 @@ struct rt_stats { u32 CurrentShowTxate; }; -struct channel_access_setting { - u16 SIFS_Timer; - u16 DIFS_Timer; - u16 SlotTimeTimer; - u16 EIFS_Timer; - u16 CWminIndex; - u16 CWmaxIndex; -}; - struct init_gain { u8 xaagccore1; u8 xbagccore1; @@ -309,9 +300,7 @@ struct r8192_priv { bool bfirst_init; bool bfirst_after_down; - bool initialized_at_probe; bool being_init_adapter; - bool bDriverIsGoingToUnload; int irq; short irq_enabled; @@ -323,16 +312,11 @@ struct r8192_priv { struct delayed_work txpower_tracking_wq; struct delayed_work rfpath_check_wq; struct delayed_work gpio_change_rf_wq; - - struct channel_access_setting ChannelAccessSetting; - struct rtl819x_ops *ops; struct rtllib_device *rtllib; struct work_struct reset_wq; - struct log_int_8190 InterruptLog; - enum rt_customer_id CustomerID; @@ -341,8 +325,6 @@ struct r8192_priv { struct bb_reg_definition PHYRegDef[4]; struct rate_adaptive rate_adaptive; - enum acm_method AcmMethod; - struct rt_firmware *pFirmware; enum rtl819x_loopback LoopbackMode; @@ -410,8 +392,6 @@ struct r8192_priv { short chan; short sens; short max_sens; - - u8 ScanDelay; bool ps_force; u32 irq_mask[2]; @@ -470,13 +450,9 @@ struct r8192_priv { bool bTXPowerDataReadFromEEPORM; - u16 RegChannelPlan; + u16 reg_chnl_plan; u16 ChannelPlan; - - bool RegRfOff; - bool isRFOff; - bool bInPowerSaveMode; - u8 bHwRfOffAction; + u8 hw_rf_off_action; bool rf_change_in_progress; bool SetRFPowerStateInProgress; @@ -490,7 +466,7 @@ struct r8192_priv { u8 CCKPresentAttentuation_20Mdefault; u8 CCKPresentAttentuation_40Mdefault; s8 CCKPresentAttentuation_difference; - s8 CCKPresentAttentuation; + s8 cck_present_attn; long undecorated_smoothed_pwdb; u32 MCSTxPowerLevelOriginalOffset[6]; @@ -543,11 +519,9 @@ struct r8192_priv { u32 reset_count; - enum reset_type ResetProgress; - bool bForcedSilentReset; - bool bDisableNormalResetCheck; + enum reset_type rst_progress; u16 TxCounter; - u16 RxCounter; + u16 rx_ctr; bool bResetInProgress; bool force_reset; bool force_lps; diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c b/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c index 702551056227dc3eede31dbeb3e42ffe382f0ff6..a18393c8a83394c2a8ce8314b10bca87bbda1797 100644 --- a/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c +++ b/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c @@ -267,10 +267,8 @@ static void _rtl92e_dm_check_ac_dc_power(struct net_device *dev) "PATH=/usr/bin:/bin", NULL}; - if (priv->ResetProgress == RESET_TYPE_SILENT) { + if (priv->rst_progress == RESET_TYPE_SILENT) return; - } - if (priv->rtllib->state != RTLLIB_LINKED) return; call_usermodehelper(ac_dc_script, argv, envp, UMH_WAIT_PROC); @@ -323,16 +321,15 @@ void rtl92e_init_adaptive_rate(struct net_device *dev) static void _rtl92e_dm_check_rate_adaptive(struct net_device *dev) { struct r8192_priv *priv = rtllib_priv(dev); - struct rt_hi_throughput *pHTInfo = priv->rtllib->pHTInfo; + struct rt_hi_throughput *ht_info = priv->rtllib->ht_info; struct rate_adaptive *pra = &priv->rate_adaptive; u32 currentRATR, targetRATR = 0; u32 LowRSSIThreshForRA = 0, HighRSSIThreshForRA = 0; bool bshort_gi_enabled = false; static u8 ping_rssi_state; - if (!priv->up) { + if (!priv->up) return; - } if (pra->rate_adaptive_disabled) return; @@ -343,10 +340,10 @@ static void _rtl92e_dm_check_rate_adaptive(struct net_device *dev) if (priv->rtllib->state == RTLLIB_LINKED) { - bshort_gi_enabled = (pHTInfo->cur_tx_bw40mhz && - pHTInfo->bCurShortGI40MHz) || - (!pHTInfo->cur_tx_bw40mhz && - pHTInfo->bCurShortGI20MHz); + bshort_gi_enabled = (ht_info->cur_tx_bw40mhz && + ht_info->bCurShortGI40MHz) || + (!ht_info->cur_tx_bw40mhz && + ht_info->bCurShortGI20MHz); pra->upper_rssi_threshold_ratr = (pra->upper_rssi_threshold_ratr & (~BIT31)) | @@ -631,9 +628,9 @@ static void _rtl92e_dm_tx_power_tracking_callback_tssi(struct net_device *dev) for (j = 0; j <= 30; j++) { - tx_cmd.Op = TXCMD_SET_TX_PWR_TRACKING; - tx_cmd.Length = 4; - tx_cmd.Value = Value; + tx_cmd.op = TXCMD_SET_TX_PWR_TRACKING; + tx_cmd.length = 4; + tx_cmd.value = Value; rtl92e_send_cmd_pkt(dev, DESC_PACKET_TYPE_NORMAL, (u8 *)&tx_cmd, sizeof(struct dcmd_txcmd)); mdelay(1); @@ -719,21 +716,21 @@ static void _rtl92e_dm_tx_power_tracking_callback_tssi(struct net_device *dev) } if (priv->CurrentChannelBW == HT_CHANNEL_WIDTH_20) - priv->CCKPresentAttentuation = + priv->cck_present_attn = priv->CCKPresentAttentuation_20Mdefault + priv->CCKPresentAttentuation_difference; else - priv->CCKPresentAttentuation = + priv->cck_present_attn = priv->CCKPresentAttentuation_40Mdefault + priv->CCKPresentAttentuation_difference; - if (priv->CCKPresentAttentuation > (CCKTxBBGainTableLength-1)) - priv->CCKPresentAttentuation = CCKTxBBGainTableLength-1; - if (priv->CCKPresentAttentuation < 0) - priv->CCKPresentAttentuation = 0; + if (priv->cck_present_attn > (CCKTxBBGainTableLength-1)) + priv->cck_present_attn = CCKTxBBGainTableLength-1; + if (priv->cck_present_attn < 0) + priv->cck_present_attn = 0; - if (priv->CCKPresentAttentuation > -1 && - priv->CCKPresentAttentuation < CCKTxBBGainTableLength) { + if (priv->cck_present_attn > -1 && + priv->cck_present_attn < CCKTxBBGainTableLength) { if (priv->rtllib->current_network.channel == 14 && !priv->bcck_in_ch14) { priv->bcck_in_ch14 = true; @@ -777,9 +774,8 @@ static void _rtl92e_dm_tx_power_tracking_cb_thermal(struct net_device *dev) tmpRegA = rtl92e_get_bb_reg(dev, rOFDM0_XATxIQImbalance, bMaskDWord); for (i = 0; i < OFDM_Table_Length; i++) { - if (tmpRegA == OFDMSwingTable[i]) { + if (tmpRegA == OFDMSwingTable[i]) priv->OFDM_index[0] = i; - } } TempCCk = rtl92e_get_bb_reg(dev, rCCK0_TxFilter1, bMaskByte2); @@ -967,7 +963,7 @@ static void _rtl92e_dm_cck_tx_power_adjust_tssi(struct net_device *dev, { u32 TempVal; struct r8192_priv *priv = rtllib_priv(dev); - u8 attenuation = priv->CCKPresentAttentuation; + u8 attenuation = priv->cck_present_attn; TempVal = 0; if (!bInCH14) { @@ -1066,9 +1062,8 @@ void rtl92e_dm_restore_state(struct net_device *dev) u32 reg_ratr = priv->rate_adaptive.last_ratr; u32 ratr_value; - if (!priv->up) { + if (!priv->up) return; - } if (priv->rate_adaptive.rate_adaptive_disabled) return; @@ -1143,8 +1138,8 @@ static void _rtl92e_dm_dig_init(struct net_device *dev) dm_digtable.dig_state = DM_STA_DIG_MAX; dm_digtable.dig_highpwr_state = DM_STA_DIG_MAX; - dm_digtable.CurSTAConnectState = DIG_STA_DISCONNECT; - dm_digtable.PreSTAConnectState = DIG_STA_DISCONNECT; + dm_digtable.cur_sta_connect_state = DIG_STA_DISCONNECT; + dm_digtable.pre_sta_connect_state = DIG_STA_DISCONNECT; dm_digtable.rssi_low_thresh = DM_DIG_THRESH_LOW; dm_digtable.rssi_high_thresh = DM_DIG_THRESH_HIGH; @@ -1212,9 +1207,9 @@ static void _rtl92e_dm_ctrl_initgain_byrssi_driver(struct net_device *dev) } if (priv->rtllib->state == RTLLIB_LINKED) - dm_digtable.CurSTAConnectState = DIG_STA_CONNECT; + dm_digtable.cur_sta_connect_state = DIG_STA_CONNECT; else - dm_digtable.CurSTAConnectState = DIG_STA_DISCONNECT; + dm_digtable.cur_sta_connect_state = DIG_STA_DISCONNECT; dm_digtable.rssi_val = priv->undecorated_smoothed_pwdb; @@ -1223,7 +1218,7 @@ static void _rtl92e_dm_ctrl_initgain_byrssi_driver(struct net_device *dev) _rtl92e_dm_cs_ratio(dev); if (dm_digtable.dig_algorithm_switch) dm_digtable.dig_algorithm_switch = 0; - dm_digtable.PreSTAConnectState = dm_digtable.CurSTAConnectState; + dm_digtable.pre_sta_connect_state = dm_digtable.cur_sta_connect_state; } @@ -1373,8 +1368,8 @@ static void _rtl92e_dm_initial_gain(struct net_device *dev) return; } - if (dm_digtable.PreSTAConnectState == dm_digtable.CurSTAConnectState) { - if (dm_digtable.CurSTAConnectState == DIG_STA_CONNECT) { + if (dm_digtable.pre_sta_connect_state == dm_digtable.cur_sta_connect_state) { + if (dm_digtable.cur_sta_connect_state == DIG_STA_CONNECT) { long gain_range = dm_digtable.rssi_val + 10 - dm_digtable.backoff_val; gain_range = clamp_t(long, gain_range, @@ -1424,8 +1419,8 @@ static void _rtl92e_dm_pd_th(struct net_device *dev) reset_cnt = 0; } - if (dm_digtable.PreSTAConnectState == dm_digtable.CurSTAConnectState) { - if (dm_digtable.CurSTAConnectState == DIG_STA_CONNECT) { + if (dm_digtable.pre_sta_connect_state == dm_digtable.cur_sta_connect_state) { + if (dm_digtable.cur_sta_connect_state == DIG_STA_CONNECT) { if (dm_digtable.rssi_val >= dm_digtable.rssi_high_power_highthresh) dm_digtable.curpd_thstate = @@ -1492,8 +1487,8 @@ static void _rtl92e_dm_cs_ratio(struct net_device *dev) reset_cnt = 0; } - if (dm_digtable.PreSTAConnectState == dm_digtable.CurSTAConnectState) { - if (dm_digtable.CurSTAConnectState == DIG_STA_CONNECT) { + if (dm_digtable.pre_sta_connect_state == dm_digtable.cur_sta_connect_state) { + if (dm_digtable.cur_sta_connect_state == DIG_STA_CONNECT) { if (dm_digtable.rssi_val <= dm_digtable.rssi_low_thresh) dm_digtable.curcs_ratio_state = DIG_CS_RATIO_LOWER; else if (dm_digtable.rssi_val >= dm_digtable.rssi_high_thresh) @@ -1537,7 +1532,7 @@ void rtl92e_dm_init_edca_turbo(struct net_device *dev) static void _rtl92e_dm_check_edca_turbo(struct net_device *dev) { struct r8192_priv *priv = rtllib_priv(dev); - struct rt_hi_throughput *pHTInfo = priv->rtllib->pHTInfo; + struct rt_hi_throughput *ht_info = priv->rtllib->ht_info; static unsigned long lastTxOkCnt; static unsigned long lastRxOkCnt; @@ -1548,18 +1543,18 @@ static void _rtl92e_dm_check_edca_turbo(struct net_device *dev) goto dm_CheckEdcaTurbo_EXIT; if (priv->rtllib->state != RTLLIB_LINKED) goto dm_CheckEdcaTurbo_EXIT; - if (priv->rtllib->pHTInfo->iot_action & HT_IOT_ACT_DISABLE_EDCA_TURBO) + if (priv->rtllib->ht_info->iot_action & HT_IOT_ACT_DISABLE_EDCA_TURBO) goto dm_CheckEdcaTurbo_EXIT; if (!priv->rtllib->bis_any_nonbepkts) { curTxOkCnt = priv->stats.txbytesunicast - lastTxOkCnt; curRxOkCnt = priv->stats.rxbytesunicast - lastRxOkCnt; - if (pHTInfo->iot_action & HT_IOT_ACT_EDCA_BIAS_ON_RX) { + if (ht_info->iot_action & HT_IOT_ACT_EDCA_BIAS_ON_RX) { if (curTxOkCnt > 4*curRxOkCnt) { if (priv->bis_cur_rdlstate || !priv->bcurrent_turbo_EDCA) { rtl92e_writel(dev, EDCAPARA_BE, - edca_setting_UL[pHTInfo->IOTPeer]); + edca_setting_UL[ht_info->IOTPeer]); priv->bis_cur_rdlstate = false; } } else { @@ -1567,10 +1562,10 @@ static void _rtl92e_dm_check_edca_turbo(struct net_device *dev) !priv->bcurrent_turbo_EDCA) { if (priv->rtllib->mode == WIRELESS_MODE_G) rtl92e_writel(dev, EDCAPARA_BE, - edca_setting_DL_GMode[pHTInfo->IOTPeer]); + edca_setting_DL_GMode[ht_info->IOTPeer]); else rtl92e_writel(dev, EDCAPARA_BE, - edca_setting_DL[pHTInfo->IOTPeer]); + edca_setting_DL[ht_info->IOTPeer]); priv->bis_cur_rdlstate = true; } } @@ -1581,17 +1576,17 @@ static void _rtl92e_dm_check_edca_turbo(struct net_device *dev) !priv->bcurrent_turbo_EDCA) { if (priv->rtllib->mode == WIRELESS_MODE_G) rtl92e_writel(dev, EDCAPARA_BE, - edca_setting_DL_GMode[pHTInfo->IOTPeer]); + edca_setting_DL_GMode[ht_info->IOTPeer]); else rtl92e_writel(dev, EDCAPARA_BE, - edca_setting_DL[pHTInfo->IOTPeer]); + edca_setting_DL[ht_info->IOTPeer]); priv->bis_cur_rdlstate = true; } } else { if (priv->bis_cur_rdlstate || !priv->bcurrent_turbo_EDCA) { rtl92e_writel(dev, EDCAPARA_BE, - edca_setting_UL[pHTInfo->IOTPeer]); + edca_setting_UL[ht_info->IOTPeer]); priv->bis_cur_rdlstate = false; } @@ -1626,23 +1621,23 @@ static void _rtl92e_dm_init_cts_to_self(struct net_device *dev) static void _rtl92e_dm_cts_to_self(struct net_device *dev) { struct r8192_priv *priv = rtllib_priv((struct net_device *)dev); - struct rt_hi_throughput *pHTInfo = priv->rtllib->pHTInfo; + struct rt_hi_throughput *ht_info = priv->rtllib->ht_info; static unsigned long lastTxOkCnt; static unsigned long lastRxOkCnt; unsigned long curTxOkCnt = 0; unsigned long curRxOkCnt = 0; if (!priv->rtllib->bCTSToSelfEnable) { - pHTInfo->iot_action &= ~HT_IOT_ACT_FORCED_CTS2SELF; + ht_info->iot_action &= ~HT_IOT_ACT_FORCED_CTS2SELF; return; } - if (pHTInfo->IOTPeer == HT_IOT_PEER_BROADCOM) { + if (ht_info->IOTPeer == HT_IOT_PEER_BROADCOM) { curTxOkCnt = priv->stats.txbytesunicast - lastTxOkCnt; curRxOkCnt = priv->stats.rxbytesunicast - lastRxOkCnt; if (curRxOkCnt > 4*curTxOkCnt) - pHTInfo->iot_action &= ~HT_IOT_ACT_FORCED_CTS2SELF; + ht_info->iot_action &= ~HT_IOT_ACT_FORCED_CTS2SELF; else - pHTInfo->iot_action |= HT_IOT_ACT_FORCED_CTS2SELF; + ht_info->iot_action |= HT_IOT_ACT_FORCED_CTS2SELF; lastTxOkCnt = priv->stats.txbytesunicast; lastRxOkCnt = priv->stats.rxbytesunicast; @@ -1653,10 +1648,10 @@ static void _rtl92e_dm_cts_to_self(struct net_device *dev) static void _rtl92e_dm_init_wa_broadcom_iot(struct net_device *dev) { struct r8192_priv *priv = rtllib_priv((struct net_device *)dev); - struct rt_hi_throughput *pHTInfo = priv->rtllib->pHTInfo; + struct rt_hi_throughput *ht_info = priv->rtllib->ht_info; - pHTInfo->bWAIotBroadcom = false; - pHTInfo->WAIotTH = WAIotTHVal; + ht_info->bWAIotBroadcom = false; + ht_info->WAIotTH = WAIotTHVal; } static void _rtl92e_dm_check_rf_ctrl_gpio(void *data) @@ -1698,7 +1693,7 @@ static void _rtl92e_dm_check_rf_ctrl_gpio(void *data) if (bActuallySet) { mdelay(1000); - priv->bHwRfOffAction = 1; + priv->hw_rf_off_action = 1; rtl92e_set_rf_state(dev, rf_power_state_to_set, RF_CHANGE_BY_HW); if (priv->hw_radio_off) argv[1] = "RFOFF"; @@ -1997,7 +1992,7 @@ static void _rtl92e_dm_fsync_timer_callback(struct timer_list *t) if (priv->rtllib->state == RTLLIB_LINKED && priv->rtllib->bfsync_enable && - (priv->rtllib->pHTInfo->iot_action & HT_IOT_ACT_CDD_FSYNC)) { + (priv->rtllib->ht_info->iot_action & HT_IOT_ACT_CDD_FSYNC)) { u32 rate_bitmap; for (rate_index = 0; rate_index <= 27; rate_index++) { @@ -2126,8 +2121,8 @@ static void _rtl92e_dm_end_sw_fsync(struct net_device *dev) static void _rtl92e_dm_start_sw_fsync(struct net_device *dev) { struct r8192_priv *priv = rtllib_priv(dev); - u32 rateIndex; - u32 rateBitmap; + u32 rate_index; + u32 rate_bitmap; priv->rate_record = 0; priv->ContinueDiffCount = 0; @@ -2141,12 +2136,12 @@ static void _rtl92e_dm_start_sw_fsync(struct net_device *dev) priv->rtllib->fsync_firstdiff_ratethreshold = 200; priv->rtllib->fsync_seconddiff_ratethreshold = 200; } - for (rateIndex = 0; rateIndex <= 27; rateIndex++) { - rateBitmap = 1 << rateIndex; - if (priv->rtllib->fsync_rate_bitmap & rateBitmap) + for (rate_index = 0; rate_index <= 27; rate_index++) { + rate_bitmap = 1 << rate_index; + if (priv->rtllib->fsync_rate_bitmap & rate_bitmap) priv->rate_record += priv->stats.received_rate_histogram[1] - [rateIndex]; + [rate_index]; } if (timer_pending(&priv->fsync_timer)) del_timer_sync(&priv->fsync_timer); @@ -2168,7 +2163,7 @@ static void _rtl92e_dm_check_fsync(struct net_device *dev) static u32 reset_cnt; if (priv->rtllib->state == RTLLIB_LINKED && - priv->rtllib->pHTInfo->IOTPeer == HT_IOT_PEER_BROADCOM) { + priv->rtllib->ht_info->IOTPeer == HT_IOT_PEER_BROADCOM) { if (priv->rtllib->bfsync_enable == 0) { switch (priv->rtllib->fsync_state) { case Default_Fsync: @@ -2293,7 +2288,7 @@ static void _rtl92e_dm_dynamic_tx_power(struct net_device *dev) priv->bDynamicTxLowPower = false; return; } - if ((priv->rtllib->pHTInfo->IOTPeer == HT_IOT_PEER_ATHEROS) && + if ((priv->rtllib->ht_info->IOTPeer == HT_IOT_PEER_ATHEROS) && (priv->rtllib->mode == IEEE_G)) { txhipower_threshold = TX_POWER_ATHEROAP_THRESH_HIGH; txlowpower_threshold = TX_POWER_ATHEROAP_THRESH_LOW; diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_dm.h b/drivers/staging/rtl8192e/rtl8192e/rtl_dm.h index 51e295d389a839311e522700ca3c287954beecbf..1d4d7d98a8599bdb3a98118f473bb4456abe9527 100644 --- a/drivers/staging/rtl8192e/rtl8192e/rtl_dm.h +++ b/drivers/staging/rtl8192e/rtl8192e/rtl_dm.h @@ -66,8 +66,8 @@ struct dig_t { u8 dig_state; u8 dig_highpwr_state; - u8 CurSTAConnectState; - u8 PreSTAConnectState; + u8 cur_sta_connect_state; + u8 pre_sta_connect_state; u8 curpd_thstate; u8 prepd_thstate; @@ -152,9 +152,9 @@ enum dm_cck_rx_path_method { struct dcmd_txcmd { - u32 Op; - u32 Length; - u32 Value; + u32 op; + u32 length; + u32 value; }; /*------------------------------Define structure----------------------------*/ diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_eeprom.c b/drivers/staging/rtl8192e/rtl8192e/rtl_eeprom.c index 59532ed2156d36a81e84c4c15ced49c8a43f8e6d..db57c655c6953aeab2a04ae67a3b45da4c2d8095 100644 --- a/drivers/staging/rtl8192e/rtl8192e/rtl_eeprom.c +++ b/drivers/staging/rtl8192e/rtl8192e/rtl_eeprom.c @@ -79,6 +79,6 @@ u32 rtl92e_eeprom_read(struct net_device *dev, u32 addr) ret = _rtl92e_eeprom_xfer(dev, (addr & 0x3F) | (0x6 << 6), 9); rtl92e_writeb(dev, EPROM_CMD, - (EPROM_CMD_NORMAL<rtllib->PowerSaveControl); + struct rt_pwr_save_ctrl *psc = (struct rt_pwr_save_ctrl *) + &priv->rtllib->pwr_save_ctrl; - pPSC->bSwRfProcessing = true; - rtl92e_set_rf_state(dev, pPSC->eInactivePowerState, RF_CHANGE_BY_IPS); + psc->bSwRfProcessing = true; + rtl92e_set_rf_state(dev, psc->eInactivePowerState, RF_CHANGE_BY_IPS); - pPSC->bSwRfProcessing = false; + psc->bSwRfProcessing = false; } void rtl92e_ips_enter(struct net_device *dev) { struct r8192_priv *priv = rtllib_priv(dev); - struct rt_pwr_save_ctrl *pPSC = (struct rt_pwr_save_ctrl *) - &(priv->rtllib->PowerSaveControl); + struct rt_pwr_save_ctrl *psc = (struct rt_pwr_save_ctrl *) + &priv->rtllib->pwr_save_ctrl; enum rt_rf_power_state rt_state; - if (pPSC->bInactivePs) { - rt_state = priv->rtllib->rf_power_state; - if (rt_state == rf_on && !pPSC->bSwRfProcessing && - (priv->rtllib->state != RTLLIB_LINKED) && - (priv->rtllib->iw_mode != IW_MODE_MASTER)) { - pPSC->eInactivePowerState = rf_off; - priv->isRFOff = true; - priv->bInPowerSaveMode = true; - _rtl92e_ps_update_rf_state(dev); - } + rt_state = priv->rtllib->rf_power_state; + if (rt_state == rf_on && !psc->bSwRfProcessing && + (priv->rtllib->state != RTLLIB_LINKED) && + (priv->rtllib->iw_mode != IW_MODE_MASTER)) { + psc->eInactivePowerState = rf_off; + _rtl92e_ps_update_rf_state(dev); } } void rtl92e_ips_leave(struct net_device *dev) { struct r8192_priv *priv = rtllib_priv(dev); - struct rt_pwr_save_ctrl *pPSC = (struct rt_pwr_save_ctrl *) - &(priv->rtllib->PowerSaveControl); + struct rt_pwr_save_ctrl *psc = (struct rt_pwr_save_ctrl *) + &priv->rtllib->pwr_save_ctrl; enum rt_rf_power_state rt_state; - if (pPSC->bInactivePs) { - rt_state = priv->rtllib->rf_power_state; - if (rt_state != rf_on && !pPSC->bSwRfProcessing && - priv->rtllib->rf_off_reason <= RF_CHANGE_BY_IPS) { - pPSC->eInactivePowerState = rf_on; - priv->bInPowerSaveMode = false; - _rtl92e_ps_update_rf_state(dev); - } + rt_state = priv->rtllib->rf_power_state; + if (rt_state != rf_on && !psc->bSwRfProcessing && + priv->rtllib->rf_off_reason <= RF_CHANGE_BY_IPS) { + psc->eInactivePowerState = rf_on; + _rtl92e_ps_update_rf_state(dev); } } @@ -165,18 +158,15 @@ void rtl92e_rtllib_ips_leave_wq(struct net_device *dev) enum rt_rf_power_state rt_state; rt_state = priv->rtllib->rf_power_state; - - if (priv->rtllib->PowerSaveControl.bInactivePs) { - if (rt_state == rf_off) { - if (priv->rtllib->rf_off_reason > RF_CHANGE_BY_IPS) { - netdev_warn(dev, "%s(): RF is OFF.\n", - __func__); - return; - } - netdev_info(dev, "=========>%s(): rtl92e_ips_leave\n", + if (rt_state == rf_off) { + if (priv->rtllib->rf_off_reason > RF_CHANGE_BY_IPS) { + netdev_warn(dev, "%s(): RF is OFF.\n", __func__); - schedule_work(&priv->rtllib->ips_leave_wq); + return; } + netdev_info(dev, "=========>%s(): rtl92e_ips_leave\n", + __func__); + schedule_work(&priv->rtllib->ips_leave_wq); } } @@ -216,8 +206,8 @@ static bool _rtl92e_ps_set_mode(struct net_device *dev, u8 rtPsMode) void rtl92e_leisure_ps_enter(struct net_device *dev) { struct r8192_priv *priv = rtllib_priv(dev); - struct rt_pwr_save_ctrl *pPSC = (struct rt_pwr_save_ctrl *) - &(priv->rtllib->PowerSaveControl); + struct rt_pwr_save_ctrl *psc = (struct rt_pwr_save_ctrl *) + &priv->rtllib->pwr_save_ctrl; if (!((priv->rtllib->iw_mode == IW_MODE_INFRA) && (priv->rtllib->state == RTLLIB_LINKED)) @@ -225,38 +215,31 @@ void rtl92e_leisure_ps_enter(struct net_device *dev) (priv->rtllib->iw_mode == IW_MODE_MASTER)) return; - if (pPSC->bLeisurePs) { - if (pPSC->LpsIdleCount >= RT_CHECK_FOR_HANG_PERIOD) { + if (psc->bLeisurePs) { + if (psc->LpsIdleCount >= RT_CHECK_FOR_HANG_PERIOD) { if (priv->rtllib->ps == RTLLIB_PS_DISABLED) { - if (!pPSC->bFwCtrlLPS) { - if (priv->rtllib->SetFwCmdHandler) - priv->rtllib->SetFwCmdHandler( - dev, FW_CMD_LPS_ENTER); - } + if (priv->rtllib->SetFwCmdHandler) + priv->rtllib->SetFwCmdHandler(dev, FW_CMD_LPS_ENTER); _rtl92e_ps_set_mode(dev, RTLLIB_PS_MBCAST | RTLLIB_PS_UNICAST); } } else - pPSC->LpsIdleCount++; + psc->LpsIdleCount++; } } void rtl92e_leisure_ps_leave(struct net_device *dev) { struct r8192_priv *priv = rtllib_priv(dev); - struct rt_pwr_save_ctrl *pPSC = (struct rt_pwr_save_ctrl *) - &(priv->rtllib->PowerSaveControl); + struct rt_pwr_save_ctrl *psc = (struct rt_pwr_save_ctrl *) + &priv->rtllib->pwr_save_ctrl; - if (pPSC->bLeisurePs) { + if (psc->bLeisurePs) { if (priv->rtllib->ps != RTLLIB_PS_DISABLED) { _rtl92e_ps_set_mode(dev, RTLLIB_PS_DISABLED); - - if (!pPSC->bFwCtrlLPS) { - if (priv->rtllib->SetFwCmdHandler) - priv->rtllib->SetFwCmdHandler(dev, - FW_CMD_LPS_LEAVE); - } + if (priv->rtllib->SetFwCmdHandler) + priv->rtllib->SetFwCmdHandler(dev, FW_CMD_LPS_LEAVE); } } } diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_wx.c b/drivers/staging/rtl8192e/rtl8192e/rtl_wx.c index 4920cb49e3811799cc933da7bce1d0be3b159ee5..bf0030144e5d8c590b214276087e37798181f95b 100644 --- a/drivers/staging/rtl8192e/rtl8192e/rtl_wx.c +++ b/drivers/staging/rtl8192e/rtl8192e/rtl_wx.c @@ -159,21 +159,21 @@ static int _rtl92e_wx_adapter_power_status(struct net_device *dev, union iwreq_data *wrqu, char *extra) { struct r8192_priv *priv = rtllib_priv(dev); - struct rt_pwr_save_ctrl *pPSC = (struct rt_pwr_save_ctrl *) - (&priv->rtllib->PowerSaveControl); + struct rt_pwr_save_ctrl *psc = (struct rt_pwr_save_ctrl *) + (&priv->rtllib->pwr_save_ctrl); struct rtllib_device *ieee = priv->rtllib; mutex_lock(&priv->wx_mutex); if (*extra || priv->force_lps) { priv->ps_force = false; - pPSC->bLeisurePs = true; + psc->bLeisurePs = true; } else { if (priv->rtllib->state == RTLLIB_LINKED) rtl92e_leisure_ps_leave(dev); priv->ps_force = true; - pPSC->bLeisurePs = false; + psc->bLeisurePs = false; ieee->ps = *extra; } @@ -188,15 +188,15 @@ static int _rtl92e_wx_set_lps_awake_interval(struct net_device *dev, char *extra) { struct r8192_priv *priv = rtllib_priv(dev); - struct rt_pwr_save_ctrl *pPSC = (struct rt_pwr_save_ctrl *) - (&priv->rtllib->PowerSaveControl); + struct rt_pwr_save_ctrl *psc = (struct rt_pwr_save_ctrl *) + (&priv->rtllib->pwr_save_ctrl); mutex_lock(&priv->wx_mutex); netdev_info(dev, "%s(): set lps awake interval ! extra is %d\n", __func__, *extra); - pPSC->RegMaxLPSAwakeIntvl = *extra; + psc->reg_max_lps_awake_intvl = *extra; mutex_unlock(&priv->wx_mutex); return 0; } @@ -251,23 +251,21 @@ static int _rtl92e_wx_set_mode(struct net_device *dev, rt_state = priv->rtllib->rf_power_state; mutex_lock(&priv->wx_mutex); if (wrqu->mode == IW_MODE_ADHOC || wrqu->mode == IW_MODE_MONITOR || - ieee->bNetPromiscuousMode) { - if (priv->rtllib->PowerSaveControl.bInactivePs) { - if (rt_state == rf_off) { - if (priv->rtllib->rf_off_reason > - RF_CHANGE_BY_IPS) { - netdev_warn(dev, "%s(): RF is OFF.\n", - __func__); - mutex_unlock(&priv->wx_mutex); - return -1; - } - netdev_info(dev, - "=========>%s(): rtl92e_ips_leave\n", + ieee->net_promiscuous_md) { + if (rt_state == rf_off) { + if (priv->rtllib->rf_off_reason > + RF_CHANGE_BY_IPS) { + netdev_warn(dev, "%s(): RF is OFF.\n", __func__); - mutex_lock(&priv->rtllib->ips_mutex); - rtl92e_ips_leave(dev); - mutex_unlock(&priv->rtllib->ips_mutex); + mutex_unlock(&priv->wx_mutex); + return -1; } + netdev_info(dev, + "=========>%s(): rtl92e_ips_leave\n", + __func__); + mutex_lock(&priv->rtllib->ips_mutex); + rtl92e_ips_leave(dev); + mutex_unlock(&priv->rtllib->ips_mutex); } } ret = rtllib_wx_set_mode(priv->rtllib, a, wrqu, b); @@ -395,7 +393,7 @@ static int _rtl92e_wx_set_scan(struct net_device *dev, rt_state = priv->rtllib->rf_power_state; if (!priv->up) return -ENETDOWN; - if (priv->rtllib->LinkDetectInfo.bBusyTraffic == true) + if (priv->rtllib->link_detect_info.bBusyTraffic == true) return -EAGAIN; if (wrqu->data.flags & IW_SCAN_THIS_ESSID) { @@ -414,19 +412,17 @@ static int _rtl92e_wx_set_scan(struct net_device *dev, priv->rtllib->FirstIe_InScan = true; if (priv->rtllib->state != RTLLIB_LINKED) { - if (priv->rtllib->PowerSaveControl.bInactivePs) { - if (rt_state == rf_off) { - if (priv->rtllib->rf_off_reason > - RF_CHANGE_BY_IPS) { - netdev_warn(dev, "%s(): RF is OFF.\n", - __func__); - mutex_unlock(&priv->wx_mutex); - return -1; - } - mutex_lock(&priv->rtllib->ips_mutex); - rtl92e_ips_leave(dev); - mutex_unlock(&priv->rtllib->ips_mutex); + if (rt_state == rf_off) { + if (priv->rtllib->rf_off_reason > + RF_CHANGE_BY_IPS) { + netdev_warn(dev, "%s(): RF is OFF.\n", + __func__); + mutex_unlock(&priv->wx_mutex); + return -1; } + mutex_lock(&priv->rtllib->ips_mutex); + rtl92e_ips_leave(dev); + mutex_unlock(&priv->rtllib->ips_mutex); } rtllib_stop_scan(priv->rtllib); if (priv->rtllib->LedControlHandler) @@ -919,7 +915,7 @@ static int _rtl92e_wx_set_encode_ext(struct net_device *dev, key, 0); } else { if ((ieee->pairwise_key_type == KEY_TYPE_CCMP) && - ieee->pHTInfo->bCurrentHTSupport) + ieee->ht_info->bCurrentHTSupport) rtl92e_writeb(dev, 0x173, 1); rtl92e_set_key(dev, 4, idx, alg, (u8 *)ieee->ap_mac_addr, 0, key); @@ -1018,29 +1014,29 @@ static int _rtl92e_wx_set_promisc_mode(struct net_device *dev, u32 info_buf[3]; u32 oid; - u32 bPromiscuousOn; - u32 bFilterSourceStationFrame; + u32 promiscuous_on; + u32 fltr_src_sta_frame; if (copy_from_user(info_buf, wrqu->data.pointer, sizeof(info_buf))) return -EFAULT; oid = info_buf[0]; - bPromiscuousOn = info_buf[1]; - bFilterSourceStationFrame = info_buf[2]; + promiscuous_on = info_buf[1]; + fltr_src_sta_frame = info_buf[2]; if (oid == OID_RT_INTEL_PROMISCUOUS_MODE) { - ieee->IntelPromiscuousModeInfo.bPromiscuousOn = - (bPromiscuousOn) ? (true) : (false); - ieee->IntelPromiscuousModeInfo.bFilterSourceStationFrame = - (bFilterSourceStationFrame) ? (true) : (false); - (bPromiscuousOn) ? + ieee->intel_promiscuous_md_info.promiscuous_on = + (promiscuous_on) ? (true) : (false); + ieee->intel_promiscuous_md_info.fltr_src_sta_frame = + (fltr_src_sta_frame) ? (true) : (false); + (promiscuous_on) ? (rtllib_EnableIntelPromiscuousMode(dev, false)) : (rtllib_DisableIntelPromiscuousMode(dev, false)); netdev_info(dev, "=======>%s(), on = %d, filter src sta = %d\n", - __func__, bPromiscuousOn, - bFilterSourceStationFrame); + __func__, promiscuous_on, + fltr_src_sta_frame); } else { return -1; } @@ -1058,8 +1054,8 @@ static int _rtl92e_wx_get_promisc_mode(struct net_device *dev, mutex_lock(&priv->wx_mutex); snprintf(extra, 45, "PromiscuousMode:%d, FilterSrcSTAFrame:%d", - ieee->IntelPromiscuousModeInfo.bPromiscuousOn, - ieee->IntelPromiscuousModeInfo.bFilterSourceStationFrame); + ieee->intel_promiscuous_md_info.promiscuous_on, + ieee->intel_promiscuous_md_info.fltr_src_sta_frame); wrqu->data.length = strlen(extra) + 1; mutex_unlock(&priv->wx_mutex); diff --git a/drivers/staging/rtl8192e/rtl819x_BAProc.c b/drivers/staging/rtl8192e/rtl819x_BAProc.c index 19d13b3fcecf72d78521d122a24cbaa8cc2441d3..acc19514bca60a1b8663a8dea5cd4ba90e87ff0e 100644 --- a/drivers/staging/rtl8192e/rtl819x_BAProc.c +++ b/drivers/staging/rtl8192e/rtl819x_BAProc.c @@ -62,6 +62,7 @@ void ResetBaEntry(struct ba_record *pBA) pBA->dialog_token = 0; pBA->ba_start_seq_ctrl.short_data = 0; } + static struct sk_buff *rtllib_ADDBA(struct rtllib_device *ieee, u8 *Dst, struct ba_record *pBA, u16 StatusCode, u8 type) @@ -111,7 +112,7 @@ static struct sk_buff *rtllib_ADDBA(struct rtllib_device *ieee, u8 *Dst, tag += 2; if (type == ACT_ADDBAREQ) { - memcpy(tag, (u8 *)&(pBA->ba_start_seq_ctrl), 2); + memcpy(tag, (u8 *)&pBA->ba_start_seq_ctrl, 2); tag += 2; } @@ -159,7 +160,6 @@ static struct sk_buff *rtllib_DELBA(struct rtllib_device *ieee, u8 *dst, *tag++ = ACT_CAT_BA; *tag++ = ACT_DELBA; - put_unaligned_le16(DelbaParamSet.short_data, tag); tag += 2; @@ -180,11 +180,10 @@ static void rtllib_send_ADDBAReq(struct rtllib_device *ieee, u8 *dst, skb = rtllib_ADDBA(ieee, dst, pBA, 0, ACT_ADDBAREQ); - if (skb) { + if (skb) softmac_mgmt_xmit(skb, ieee); - } else { + else netdev_dbg(ieee->dev, "Failed to generate ADDBAReq packet.\n"); - } } static void rtllib_send_ADDBARsp(struct rtllib_device *ieee, u8 *dst, @@ -245,17 +244,17 @@ int rtllib_rx_ADDBAReq(struct rtllib_device *ieee, struct sk_buff *skb) pBaStartSeqCtrl = (union sequence_control *)(req + 7); if (!ieee->current_network.qos_data.active || - !ieee->pHTInfo->bCurrentHTSupport || - (ieee->pHTInfo->iot_action & HT_IOT_ACT_REJECT_ADDBA_REQ)) { + !ieee->ht_info->bCurrentHTSupport || + (ieee->ht_info->iot_action & HT_IOT_ACT_REJECT_ADDBA_REQ)) { rc = ADDBA_STATUS_REFUSED; netdev_warn(ieee->dev, "Failed to reply on ADDBA_REQ as some capability is not ready(%d, %d)\n", ieee->current_network.qos_data.active, - ieee->pHTInfo->bCurrentHTSupport); + ieee->ht_info->bCurrentHTSupport); goto OnADDBAReq_Fail; } - if (!GetTs(ieee, (struct ts_common_info **)(&pTS), dst, - (u8)(pBaParamSet->field.tid), RX_DIR, true)) { + if (!GetTs(ieee, (struct ts_common_info **)&pTS, dst, + (u8)(pBaParamSet->field.tid), RX_DIR, true)) { rc = ADDBA_STATUS_REFUSED; netdev_warn(ieee->dev, "%s(): can't get TS\n", __func__); goto OnADDBAReq_Fail; @@ -278,7 +277,7 @@ int rtllib_rx_ADDBAReq(struct rtllib_device *ieee, struct sk_buff *skb) pBA->ba_start_seq_ctrl = *pBaStartSeqCtrl; if (ieee->GetHalfNmodeSupportByAPsHandler(ieee->dev) || - (ieee->pHTInfo->iot_action & HT_IOT_ACT_ALLOW_PEER_AGG_ONE_PKT)) + (ieee->ht_info->iot_action & HT_IOT_ACT_ALLOW_PEER_AGG_ONE_PKT)) pBA->ba_param_set.field.buffer_size = 1; else pBA->ba_param_set.field.buffer_size = 32; @@ -327,18 +326,18 @@ int rtllib_rx_ADDBARsp(struct rtllib_device *ieee, struct sk_buff *skb) pBaTimeoutVal = (u16 *)(tag + 7); if (!ieee->current_network.qos_data.active || - !ieee->pHTInfo->bCurrentHTSupport || - !ieee->pHTInfo->bCurrentAMPDUEnable) { + !ieee->ht_info->bCurrentHTSupport || + !ieee->ht_info->bCurrentAMPDUEnable) { netdev_warn(ieee->dev, "reject to ADDBA_RSP as some capability is not ready(%d, %d, %d)\n", ieee->current_network.qos_data.active, - ieee->pHTInfo->bCurrentHTSupport, - ieee->pHTInfo->bCurrentAMPDUEnable); + ieee->ht_info->bCurrentHTSupport, + ieee->ht_info->bCurrentAMPDUEnable); ReasonCode = DELBA_REASON_UNKNOWN_BA; goto OnADDBARsp_Reject; } - if (!GetTs(ieee, (struct ts_common_info **)(&pTS), dst, + if (!GetTs(ieee, (struct ts_common_info **)&pTS, dst, (u8)(pBaParamSet->field.tid), TX_DIR, false)) { netdev_warn(ieee->dev, "%s(): can't get TS\n", __func__); ReasonCode = DELBA_REASON_UNKNOWN_BA; @@ -375,7 +374,6 @@ int rtllib_rx_ADDBARsp(struct rtllib_device *ieee, struct sk_buff *skb) goto OnADDBARsp_Reject; } - pAdmittedBA->dialog_token = *pDialogToken; pAdmittedBA->ba_timeout_value = *pBaTimeoutVal; pAdmittedBA->ba_start_seq_ctrl = pPendingBA->ba_start_seq_ctrl; @@ -415,11 +413,11 @@ int rtllib_rx_DELBA(struct rtllib_device *ieee, struct sk_buff *skb) } if (!ieee->current_network.qos_data.active || - !ieee->pHTInfo->bCurrentHTSupport) { + !ieee->ht_info->bCurrentHTSupport) { netdev_warn(ieee->dev, "received DELBA while QOS or HT is not supported(%d, %d)\n", ieee->current_network. qos_data.active, - ieee->pHTInfo->bCurrentHTSupport); + ieee->ht_info->bCurrentHTSupport); return -1; } @@ -435,7 +433,7 @@ int rtllib_rx_DELBA(struct rtllib_device *ieee, struct sk_buff *skb) struct rx_ts_record *pRxTs; if (!GetTs(ieee, (struct ts_common_info **)&pRxTs, dst, - (u8)pDelBaParamSet->field.tid, RX_DIR, false)) { + (u8)pDelBaParamSet->field.tid, RX_DIR, false)) { netdev_warn(ieee->dev, "%s(): can't get TS for RXTS. dst:%pM TID:%d\n", __func__, dst, diff --git a/drivers/staging/rtl8192e/rtl819x_HT.h b/drivers/staging/rtl8192e/rtl819x_HT.h index 76bc9c5a6d83d0e3a2ffa02726373cba926b3e30..22e4f126ed565cfebbbcaea3c910e5e045c7bc73 100644 --- a/drivers/staging/rtl8192e/rtl819x_HT.h +++ b/drivers/staging/rtl8192e/rtl819x_HT.h @@ -96,7 +96,7 @@ enum ht_aggre_mode { struct rt_hi_throughput { - u8 bEnableHT; + u8 enable_ht; u8 bCurrentHTSupport; u8 bRegBW40MHz; diff --git a/drivers/staging/rtl8192e/rtl819x_HTProc.c b/drivers/staging/rtl8192e/rtl819x_HTProc.c index ef3dca51cf99b3af2fea79c5ab454ddbc3b889b4..fe30a291e64c9dacd0f54ecee03b1e9e6e6080d6 100644 --- a/drivers/staging/rtl8192e/rtl819x_HTProc.c +++ b/drivers/staging/rtl8192e/rtl819x_HTProc.c @@ -69,47 +69,48 @@ static u8 LINKSYS_MARVELL_4400N[3] = {0x00, 0x14, 0xa4}; void HTUpdateDefaultSetting(struct rtllib_device *ieee) { - struct rt_hi_throughput *pHTInfo = ieee->pHTInfo; - pHTInfo->bRegShortGI20MHz = 1; - pHTInfo->bRegShortGI40MHz = 1; + struct rt_hi_throughput *ht_info = ieee->ht_info; - pHTInfo->bRegBW40MHz = 1; + ht_info->bRegShortGI20MHz = 1; + ht_info->bRegShortGI40MHz = 1; - if (pHTInfo->bRegBW40MHz) - pHTInfo->bRegSuppCCK = 1; + ht_info->bRegBW40MHz = 1; + + if (ht_info->bRegBW40MHz) + ht_info->bRegSuppCCK = 1; else - pHTInfo->bRegSuppCCK = true; + ht_info->bRegSuppCCK = true; - pHTInfo->nAMSDU_MaxSize = 7935UL; - pHTInfo->bAMSDU_Support = 0; + ht_info->nAMSDU_MaxSize = 7935UL; + ht_info->bAMSDU_Support = 0; - pHTInfo->bAMPDUEnable = 1; - pHTInfo->AMPDU_Factor = 2; - pHTInfo->MPDU_Density = 0; + ht_info->bAMPDUEnable = 1; + ht_info->AMPDU_Factor = 2; + ht_info->MPDU_Density = 0; - pHTInfo->self_mimo_ps = 3; - if (pHTInfo->self_mimo_ps == 2) - pHTInfo->self_mimo_ps = 3; - ieee->bTxDisableRateFallBack = 0; - ieee->bTxUseDriverAssingedRate = 0; + ht_info->self_mimo_ps = 3; + if (ht_info->self_mimo_ps == 2) + ht_info->self_mimo_ps = 3; + ieee->tx_dis_rate_fallback = 0; + ieee->tx_use_drv_assinged_rate = 0; ieee->bTxEnableFwCalcDur = 1; - pHTInfo->reg_rt2rt_aggregation = 1; + ht_info->reg_rt2rt_aggregation = 1; - pHTInfo->reg_rx_reorder_enable = 1; - pHTInfo->rx_reorder_win_size = 64; - pHTInfo->rx_reorder_pending_time = 30; + ht_info->reg_rx_reorder_enable = 1; + ht_info->rx_reorder_win_size = 64; + ht_info->rx_reorder_pending_time = 30; } static u16 HTMcsToDataRate(struct rtllib_device *ieee, u8 nMcsRate) { - struct rt_hi_throughput *pHTInfo = ieee->pHTInfo; + struct rt_hi_throughput *ht_info = ieee->ht_info; - u8 is40MHz = (pHTInfo->bCurBW40MHz) ? 1 : 0; - u8 isShortGI = (pHTInfo->bCurBW40MHz) ? - ((pHTInfo->bCurShortGI40MHz) ? 1 : 0) : - ((pHTInfo->bCurShortGI20MHz) ? 1 : 0); + u8 is40MHz = (ht_info->bCurBW40MHz) ? 1 : 0; + u8 isShortGI = (ht_info->bCurBW40MHz) ? + ((ht_info->bCurShortGI40MHz) ? 1 : 0) : + ((ht_info->bCurShortGI20MHz) ? 1 : 0); return MCS_DATA_RATE[is40MHz][isShortGI][(nMcsRate & 0x7f)]; } @@ -151,8 +152,8 @@ bool IsHTHalfNmodeAPs(struct rtllib_device *ieee) (net->ralink_cap_exist)) retValue = true; else if (!memcmp(net->bssid, UNKNOWN_BORADCOM, 3) || - !memcmp(net->bssid, LINKSYSWRT330_LINKSYSWRT300_BROADCOM, 3) || - !memcmp(net->bssid, LINKSYSWRT350_LINKSYSWRT150_BROADCOM, 3) || + !memcmp(net->bssid, LINKSYSWRT330_LINKSYSWRT300_BROADCOM, 3) || + !memcmp(net->bssid, LINKSYSWRT350_LINKSYSWRT150_BROADCOM, 3) || (net->broadcom_cap_exist)) retValue = true; else if (net->bssht.bd_rt2rt_aggregation) @@ -165,45 +166,45 @@ bool IsHTHalfNmodeAPs(struct rtllib_device *ieee) static void HTIOTPeerDetermine(struct rtllib_device *ieee) { - struct rt_hi_throughput *pHTInfo = ieee->pHTInfo; + struct rt_hi_throughput *ht_info = ieee->ht_info; struct rtllib_network *net = &ieee->current_network; if (net->bssht.bd_rt2rt_aggregation) { - pHTInfo->IOTPeer = HT_IOT_PEER_REALTEK; + ht_info->IOTPeer = HT_IOT_PEER_REALTEK; if (net->bssht.rt2rt_ht_mode & RT_HT_CAP_USE_92SE) - pHTInfo->IOTPeer = HT_IOT_PEER_REALTEK_92SE; + ht_info->IOTPeer = HT_IOT_PEER_REALTEK_92SE; if (net->bssht.rt2rt_ht_mode & RT_HT_CAP_USE_SOFTAP) - pHTInfo->IOTPeer = HT_IOT_PEER_92U_SOFTAP; + ht_info->IOTPeer = HT_IOT_PEER_92U_SOFTAP; } else if (net->broadcom_cap_exist) { - pHTInfo->IOTPeer = HT_IOT_PEER_BROADCOM; + ht_info->IOTPeer = HT_IOT_PEER_BROADCOM; } else if (!memcmp(net->bssid, UNKNOWN_BORADCOM, 3) || !memcmp(net->bssid, LINKSYSWRT330_LINKSYSWRT300_BROADCOM, 3) || !memcmp(net->bssid, LINKSYSWRT350_LINKSYSWRT150_BROADCOM, 3)) { - pHTInfo->IOTPeer = HT_IOT_PEER_BROADCOM; + ht_info->IOTPeer = HT_IOT_PEER_BROADCOM; } else if ((memcmp(net->bssid, BELKINF5D8233V1_RALINK, 3) == 0) || (memcmp(net->bssid, BELKINF5D82334V3_RALINK, 3) == 0) || (memcmp(net->bssid, PCI_RALINK, 3) == 0) || (memcmp(net->bssid, EDIMAX_RALINK, 3) == 0) || (memcmp(net->bssid, AIRLINK_RALINK, 3) == 0) || net->ralink_cap_exist) { - pHTInfo->IOTPeer = HT_IOT_PEER_RALINK; + ht_info->IOTPeer = HT_IOT_PEER_RALINK; } else if ((net->atheros_cap_exist) || (memcmp(net->bssid, DLINK_ATHEROS_1, 3) == 0) || (memcmp(net->bssid, DLINK_ATHEROS_2, 3) == 0)) { - pHTInfo->IOTPeer = HT_IOT_PEER_ATHEROS; + ht_info->IOTPeer = HT_IOT_PEER_ATHEROS; } else if ((memcmp(net->bssid, CISCO_BROADCOM, 3) == 0) || net->cisco_cap_exist) { - pHTInfo->IOTPeer = HT_IOT_PEER_CISCO; + ht_info->IOTPeer = HT_IOT_PEER_CISCO; } else if ((memcmp(net->bssid, LINKSYS_MARVELL_4400N, 3) == 0) || net->marvell_cap_exist) { - pHTInfo->IOTPeer = HT_IOT_PEER_MARVELL; + ht_info->IOTPeer = HT_IOT_PEER_MARVELL; } else if (net->airgo_cap_exist) { - pHTInfo->IOTPeer = HT_IOT_PEER_AIRGO; + ht_info->IOTPeer = HT_IOT_PEER_AIRGO; } else { - pHTInfo->IOTPeer = HT_IOT_PEER_UNKNOWN; + ht_info->IOTPeer = HT_IOT_PEER_UNKNOWN; } - netdev_dbg(ieee->dev, "IOTPEER: %x\n", pHTInfo->IOTPeer); + netdev_dbg(ieee->dev, "IOTPEER: %x\n", ht_info->IOTPeer); } static u8 HTIOTActIsDisableMCS14(struct rtllib_device *ieee, u8 *PeerMacAddr) @@ -232,7 +233,7 @@ static u8 HTIOTActIsMgntUseCCK6M(struct rtllib_device *ieee, { u8 retValue = 0; - if (ieee->pHTInfo->IOTPeer == HT_IOT_PEER_BROADCOM) + if (ieee->ht_info->IOTPeer == HT_IOT_PEER_BROADCOM) retValue = 1; return retValue; @@ -242,49 +243,49 @@ static u8 HTIOTActIsCCDFsync(struct rtllib_device *ieee) { u8 retValue = 0; - if (ieee->pHTInfo->IOTPeer == HT_IOT_PEER_BROADCOM) + if (ieee->ht_info->IOTPeer == HT_IOT_PEER_BROADCOM) retValue = 1; return retValue; } static void HTIOTActDetermineRaFunc(struct rtllib_device *ieee, bool bPeerRx2ss) { - struct rt_hi_throughput *pHTInfo = ieee->pHTInfo; + struct rt_hi_throughput *ht_info = ieee->ht_info; - pHTInfo->iot_ra_func &= HT_IOT_RAFUNC_DISABLE_ALL; + ht_info->iot_ra_func &= HT_IOT_RAFUNC_DISABLE_ALL; - if (pHTInfo->IOTPeer == HT_IOT_PEER_RALINK && !bPeerRx2ss) - pHTInfo->iot_ra_func |= HT_IOT_RAFUNC_PEER_1R; + if (ht_info->IOTPeer == HT_IOT_PEER_RALINK && !bPeerRx2ss) + ht_info->iot_ra_func |= HT_IOT_RAFUNC_PEER_1R; - if (pHTInfo->iot_action & HT_IOT_ACT_AMSDU_ENABLE) - pHTInfo->iot_ra_func |= HT_IOT_RAFUNC_TX_AMSDU; + if (ht_info->iot_action & HT_IOT_ACT_AMSDU_ENABLE) + ht_info->iot_ra_func |= HT_IOT_RAFUNC_TX_AMSDU; } -void HTResetIOTSetting(struct rt_hi_throughput *pHTInfo) +void HTResetIOTSetting(struct rt_hi_throughput *ht_info) { - pHTInfo->iot_action = 0; - pHTInfo->IOTPeer = HT_IOT_PEER_UNKNOWN; - pHTInfo->iot_ra_func = 0; + ht_info->iot_action = 0; + ht_info->IOTPeer = HT_IOT_PEER_UNKNOWN; + ht_info->iot_ra_func = 0; } void HTConstructCapabilityElement(struct rtllib_device *ieee, u8 *posHTCap, u8 *len, u8 IsEncrypt, bool bAssoc) { - struct rt_hi_throughput *pHT = ieee->pHTInfo; + struct rt_hi_throughput *pHT = ieee->ht_info; struct ht_capab_ele *pCapELE = NULL; if (!posHTCap || !pHT) { netdev_warn(ieee->dev, - "%s(): posHTCap and pHTInfo are null\n", __func__); + "%s(): posHTCap and ht_info are null\n", __func__); return; } memset(posHTCap, 0, *len); if ((bAssoc) && (pHT->ePeerHTSpecVer == HT_SPEC_VER_EWC)) { - u8 EWC11NHTCap[] = {0x00, 0x90, 0x4c, 0x33}; + static const u8 EWC11NHTCap[] = { 0x00, 0x90, 0x4c, 0x33 }; memcpy(posHTCap, EWC11NHTCap, sizeof(EWC11NHTCap)); - pCapELE = (struct ht_capab_ele *)&(posHTCap[4]); + pCapELE = (struct ht_capab_ele *)&posHTCap[4]; *len = 30 + 2; } else { pCapELE = (struct ht_capab_ele *)posHTCap; @@ -322,7 +323,7 @@ void HTConstructCapabilityElement(struct rtllib_device *ieee, u8 *posHTCap, pCapELE->MPDUDensity = 0; } - memcpy(pCapELE->MCS, ieee->Regdot11HTOperationalRateSet, 16); + memcpy(pCapELE->MCS, ieee->reg_dot11ht_oper_rate_set, 16); memset(&pCapELE->ExtHTCapInfo, 0, 2); memset(pCapELE->TxBFCap, 0, 4); @@ -351,7 +352,7 @@ void HTConstructCapabilityElement(struct rtllib_device *ieee, u8 *posHTCap, void HTConstructInfoElement(struct rtllib_device *ieee, u8 *posHTInfo, u8 *len, u8 IsEncrypt) { - struct rt_hi_throughput *pHT = ieee->pHTInfo; + struct rt_hi_throughput *pHT = ieee->ht_info; struct ht_info_ele *pHTInfoEle = (struct ht_info_ele *)posHTInfo; if (!posHTInfo || !pHTInfoEle) { @@ -488,7 +489,7 @@ static u8 HTFilterMCSRate(struct rtllib_device *ieee, u8 *pSupportMCS, u8 i; for (i = 0; i <= 15; i++) - pOperateMCS[i] = ieee->Regdot11TxHTOperationalRateSet[i] & + pOperateMCS[i] = ieee->reg_dot11tx_ht_oper_rate_set[i] & pSupportMCS[i]; HT_PickMCSRate(ieee, pOperateMCS); @@ -508,163 +509,159 @@ void HTSetConnectBwMode(struct rtllib_device *ieee, void HTOnAssocRsp(struct rtllib_device *ieee) { - struct rt_hi_throughput *pHTInfo = ieee->pHTInfo; + struct rt_hi_throughput *ht_info = ieee->ht_info; struct ht_capab_ele *pPeerHTCap = NULL; struct ht_info_ele *pPeerHTInfo = NULL; u16 nMaxAMSDUSize = 0; u8 *pMcsFilter = NULL; - static u8 EWC11NHTCap[] = {0x00, 0x90, 0x4c, 0x33}; - static u8 EWC11NHTInfo[] = {0x00, 0x90, 0x4c, 0x34}; + static const u8 EWC11NHTCap[] = { 0x00, 0x90, 0x4c, 0x33 }; + static const u8 EWC11NHTInfo[] = { 0x00, 0x90, 0x4c, 0x34 }; - if (!pHTInfo->bCurrentHTSupport) { + if (!ht_info->bCurrentHTSupport) { netdev_warn(ieee->dev, "%s(): HT_DISABLE\n", __func__); return; } netdev_dbg(ieee->dev, "%s(): HT_ENABLE\n", __func__); - if (!memcmp(pHTInfo->PeerHTCapBuf, EWC11NHTCap, sizeof(EWC11NHTCap))) - pPeerHTCap = (struct ht_capab_ele *)(&pHTInfo->PeerHTCapBuf[4]); + if (!memcmp(ht_info->PeerHTCapBuf, EWC11NHTCap, sizeof(EWC11NHTCap))) + pPeerHTCap = (struct ht_capab_ele *)(&ht_info->PeerHTCapBuf[4]); else - pPeerHTCap = (struct ht_capab_ele *)(pHTInfo->PeerHTCapBuf); + pPeerHTCap = (struct ht_capab_ele *)(ht_info->PeerHTCapBuf); - if (!memcmp(pHTInfo->PeerHTInfoBuf, EWC11NHTInfo, sizeof(EWC11NHTInfo))) + if (!memcmp(ht_info->PeerHTInfoBuf, EWC11NHTInfo, sizeof(EWC11NHTInfo))) pPeerHTInfo = (struct ht_info_ele *) - (&pHTInfo->PeerHTInfoBuf[4]); + (&ht_info->PeerHTInfoBuf[4]); else - pPeerHTInfo = (struct ht_info_ele *)(pHTInfo->PeerHTInfoBuf); + pPeerHTInfo = (struct ht_info_ele *)(ht_info->PeerHTInfoBuf); #ifdef VERBOSE_DEBUG print_hex_dump_bytes("%s: ", __func__, DUMP_PREFIX_NONE, pPeerHTCap, sizeof(struct ht_capab_ele)); #endif HTSetConnectBwMode(ieee, (enum ht_channel_width)(pPeerHTCap->ChlWidth), - (enum ht_extchnl_offset)(pPeerHTInfo->ExtChlOffset)); - pHTInfo->cur_tx_bw40mhz = ((pPeerHTInfo->RecommemdedTxWidth == 1) ? + (enum ht_extchnl_offset)(pPeerHTInfo->ExtChlOffset)); + ht_info->cur_tx_bw40mhz = ((pPeerHTInfo->RecommemdedTxWidth == 1) ? true : false); - pHTInfo->bCurShortGI20MHz = ((pHTInfo->bRegShortGI20MHz) ? + ht_info->bCurShortGI20MHz = ((ht_info->bRegShortGI20MHz) ? ((pPeerHTCap->ShortGI20Mhz == 1) ? true : false) : false); - pHTInfo->bCurShortGI40MHz = ((pHTInfo->bRegShortGI40MHz) ? + ht_info->bCurShortGI40MHz = ((ht_info->bRegShortGI40MHz) ? ((pPeerHTCap->ShortGI40Mhz == 1) ? true : false) : false); - pHTInfo->bCurSuppCCK = ((pHTInfo->bRegSuppCCK) ? + ht_info->bCurSuppCCK = ((ht_info->bRegSuppCCK) ? ((pPeerHTCap->DssCCk == 1) ? true : false) : false); - pHTInfo->bCurrent_AMSDU_Support = pHTInfo->bAMSDU_Support; + ht_info->bCurrent_AMSDU_Support = ht_info->bAMSDU_Support; nMaxAMSDUSize = (pPeerHTCap->MaxAMSDUSize == 0) ? 3839 : 7935; - if (pHTInfo->nAMSDU_MaxSize > nMaxAMSDUSize) - pHTInfo->nCurrent_AMSDU_MaxSize = nMaxAMSDUSize; + if (ht_info->nAMSDU_MaxSize > nMaxAMSDUSize) + ht_info->nCurrent_AMSDU_MaxSize = nMaxAMSDUSize; else - pHTInfo->nCurrent_AMSDU_MaxSize = pHTInfo->nAMSDU_MaxSize; + ht_info->nCurrent_AMSDU_MaxSize = ht_info->nAMSDU_MaxSize; - pHTInfo->bCurrentAMPDUEnable = pHTInfo->bAMPDUEnable; + ht_info->bCurrentAMPDUEnable = ht_info->bAMPDUEnable; if (ieee->rtllib_ap_sec_type && - (ieee->rtllib_ap_sec_type(ieee) & (SEC_ALG_WEP | SEC_ALG_TKIP))) { - if ((pHTInfo->IOTPeer == HT_IOT_PEER_ATHEROS) || - (pHTInfo->IOTPeer == HT_IOT_PEER_UNKNOWN)) - pHTInfo->bCurrentAMPDUEnable = false; + (ieee->rtllib_ap_sec_type(ieee) & (SEC_ALG_WEP | SEC_ALG_TKIP))) { + if ((ht_info->IOTPeer == HT_IOT_PEER_ATHEROS) || + (ht_info->IOTPeer == HT_IOT_PEER_UNKNOWN)) + ht_info->bCurrentAMPDUEnable = false; } - if (!pHTInfo->reg_rt2rt_aggregation) { - if (pHTInfo->AMPDU_Factor > pPeerHTCap->MaxRxAMPDUFactor) - pHTInfo->CurrentAMPDUFactor = + if (!ht_info->reg_rt2rt_aggregation) { + if (ht_info->AMPDU_Factor > pPeerHTCap->MaxRxAMPDUFactor) + ht_info->CurrentAMPDUFactor = pPeerHTCap->MaxRxAMPDUFactor; else - pHTInfo->CurrentAMPDUFactor = pHTInfo->AMPDU_Factor; + ht_info->CurrentAMPDUFactor = ht_info->AMPDU_Factor; } else { if (ieee->current_network.bssht.bd_rt2rt_aggregation) { if (ieee->pairwise_key_type != KEY_TYPE_NA) - pHTInfo->CurrentAMPDUFactor = + ht_info->CurrentAMPDUFactor = pPeerHTCap->MaxRxAMPDUFactor; else - pHTInfo->CurrentAMPDUFactor = HT_AGG_SIZE_64K; + ht_info->CurrentAMPDUFactor = HT_AGG_SIZE_64K; } else { - if (pPeerHTCap->MaxRxAMPDUFactor < HT_AGG_SIZE_32K) - pHTInfo->CurrentAMPDUFactor = - pPeerHTCap->MaxRxAMPDUFactor; - else - pHTInfo->CurrentAMPDUFactor = HT_AGG_SIZE_32K; + ht_info->CurrentAMPDUFactor = min_t(u32, pPeerHTCap->MaxRxAMPDUFactor, + HT_AGG_SIZE_32K); } } - if (pHTInfo->MPDU_Density > pPeerHTCap->MPDUDensity) - pHTInfo->current_mpdu_density = pHTInfo->MPDU_Density; - else - pHTInfo->current_mpdu_density = pPeerHTCap->MPDUDensity; - if (pHTInfo->iot_action & HT_IOT_ACT_TX_USE_AMSDU_8K) { - pHTInfo->bCurrentAMPDUEnable = false; - pHTInfo->ForcedAMSDUMode = HT_AGG_FORCE_ENABLE; + ht_info->current_mpdu_density = max_t(u8, ht_info->MPDU_Density, + pPeerHTCap->MPDUDensity); + if (ht_info->iot_action & HT_IOT_ACT_TX_USE_AMSDU_8K) { + ht_info->bCurrentAMPDUEnable = false; + ht_info->ForcedAMSDUMode = HT_AGG_FORCE_ENABLE; } - pHTInfo->cur_rx_reorder_enable = pHTInfo->reg_rx_reorder_enable; + ht_info->cur_rx_reorder_enable = ht_info->reg_rx_reorder_enable; if (pPeerHTCap->MCS[0] == 0) pPeerHTCap->MCS[0] = 0xff; HTIOTActDetermineRaFunc(ieee, ((pPeerHTCap->MCS[1]) != 0)); - HTFilterMCSRate(ieee, pPeerHTCap->MCS, ieee->dot11HTOperationalRateSet); + HTFilterMCSRate(ieee, pPeerHTCap->MCS, ieee->dot11ht_oper_rate_set); - pHTInfo->peer_mimo_ps = pPeerHTCap->MimoPwrSave; - if (pHTInfo->peer_mimo_ps == MIMO_PS_STATIC) + ht_info->peer_mimo_ps = pPeerHTCap->MimoPwrSave; + if (ht_info->peer_mimo_ps == MIMO_PS_STATIC) pMcsFilter = MCS_FILTER_1SS; else pMcsFilter = MCS_FILTER_ALL; ieee->HTHighestOperaRate = HTGetHighestMCSRate(ieee, - ieee->dot11HTOperationalRateSet, pMcsFilter); + ieee->dot11ht_oper_rate_set, + pMcsFilter); ieee->HTCurrentOperaRate = ieee->HTHighestOperaRate; - pHTInfo->current_op_mode = pPeerHTInfo->OptMode; + ht_info->current_op_mode = pPeerHTInfo->OptMode; } void HTInitializeHTInfo(struct rtllib_device *ieee) { - struct rt_hi_throughput *pHTInfo = ieee->pHTInfo; + struct rt_hi_throughput *ht_info = ieee->ht_info; - pHTInfo->bCurrentHTSupport = false; + ht_info->bCurrentHTSupport = false; - pHTInfo->bCurBW40MHz = false; - pHTInfo->cur_tx_bw40mhz = false; + ht_info->bCurBW40MHz = false; + ht_info->cur_tx_bw40mhz = false; - pHTInfo->bCurShortGI20MHz = false; - pHTInfo->bCurShortGI40MHz = false; - pHTInfo->forced_short_gi = false; + ht_info->bCurShortGI20MHz = false; + ht_info->bCurShortGI40MHz = false; + ht_info->forced_short_gi = false; - pHTInfo->bCurSuppCCK = true; + ht_info->bCurSuppCCK = true; - pHTInfo->bCurrent_AMSDU_Support = false; - pHTInfo->nCurrent_AMSDU_MaxSize = pHTInfo->nAMSDU_MaxSize; - pHTInfo->current_mpdu_density = pHTInfo->MPDU_Density; - pHTInfo->CurrentAMPDUFactor = pHTInfo->AMPDU_Factor; + ht_info->bCurrent_AMSDU_Support = false; + ht_info->nCurrent_AMSDU_MaxSize = ht_info->nAMSDU_MaxSize; + ht_info->current_mpdu_density = ht_info->MPDU_Density; + ht_info->CurrentAMPDUFactor = ht_info->AMPDU_Factor; - memset((void *)(&(pHTInfo->SelfHTCap)), 0, - sizeof(pHTInfo->SelfHTCap)); - memset((void *)(&(pHTInfo->SelfHTInfo)), 0, - sizeof(pHTInfo->SelfHTInfo)); - memset((void *)(&(pHTInfo->PeerHTCapBuf)), 0, - sizeof(pHTInfo->PeerHTCapBuf)); - memset((void *)(&(pHTInfo->PeerHTInfoBuf)), 0, - sizeof(pHTInfo->PeerHTInfoBuf)); + memset((void *)(&ht_info->SelfHTCap), 0, + sizeof(ht_info->SelfHTCap)); + memset((void *)(&ht_info->SelfHTInfo), 0, + sizeof(ht_info->SelfHTInfo)); + memset((void *)(&ht_info->PeerHTCapBuf), 0, + sizeof(ht_info->PeerHTCapBuf)); + memset((void *)(&ht_info->PeerHTInfoBuf), 0, + sizeof(ht_info->PeerHTInfoBuf)); - pHTInfo->sw_bw_in_progress = false; + ht_info->sw_bw_in_progress = false; - pHTInfo->ePeerHTSpecVer = HT_SPEC_VER_IEEE; + ht_info->ePeerHTSpecVer = HT_SPEC_VER_IEEE; - pHTInfo->current_rt2rt_aggregation = false; - pHTInfo->current_rt2rt_long_slot_time = false; - pHTInfo->RT2RT_HT_Mode = (enum rt_ht_capability)0; + ht_info->current_rt2rt_aggregation = false; + ht_info->current_rt2rt_long_slot_time = false; + ht_info->RT2RT_HT_Mode = (enum rt_ht_capability)0; - pHTInfo->IOTPeer = 0; - pHTInfo->iot_action = 0; - pHTInfo->iot_ra_func = 0; + ht_info->IOTPeer = 0; + ht_info->iot_action = 0; + ht_info->iot_ra_func = 0; { - u8 *RegHTSuppRateSets = &(ieee->RegHTSuppRateSet[0]); + u8 *RegHTSuppRateSets = &ieee->reg_ht_supp_rate_set[0]; RegHTSuppRateSets[0] = 0xFF; RegHTSuppRateSets[1] = 0xFF; @@ -690,130 +687,130 @@ void HTInitializeBssDesc(struct bss_ht *pBssHT) void HTResetSelfAndSavePeerSetting(struct rtllib_device *ieee, struct rtllib_network *pNetwork) { - struct rt_hi_throughput *pHTInfo = ieee->pHTInfo; + struct rt_hi_throughput *ht_info = ieee->ht_info; u8 bIOTAction = 0; - /* unmark bEnableHT flag here is the same reason why unmarked in + /* unmark enable_ht flag here is the same reason why unmarked in * function rtllib_softmac_new_net. WB 2008.09.10 */ if (pNetwork->bssht.bd_support_ht) { - pHTInfo->bCurrentHTSupport = true; - pHTInfo->ePeerHTSpecVer = pNetwork->bssht.bd_ht_spec_ver; + ht_info->bCurrentHTSupport = true; + ht_info->ePeerHTSpecVer = pNetwork->bssht.bd_ht_spec_ver; if (pNetwork->bssht.bd_ht_cap_len > 0 && - pNetwork->bssht.bd_ht_cap_len <= sizeof(pHTInfo->PeerHTCapBuf)) - memcpy(pHTInfo->PeerHTCapBuf, + pNetwork->bssht.bd_ht_cap_len <= sizeof(ht_info->PeerHTCapBuf)) + memcpy(ht_info->PeerHTCapBuf, pNetwork->bssht.bd_ht_cap_buf, pNetwork->bssht.bd_ht_cap_len); if (pNetwork->bssht.bd_ht_info_len > 0 && pNetwork->bssht.bd_ht_info_len <= - sizeof(pHTInfo->PeerHTInfoBuf)) - memcpy(pHTInfo->PeerHTInfoBuf, + sizeof(ht_info->PeerHTInfoBuf)) + memcpy(ht_info->PeerHTInfoBuf, pNetwork->bssht.bd_ht_info_buf, pNetwork->bssht.bd_ht_info_len); - if (pHTInfo->reg_rt2rt_aggregation) { - pHTInfo->current_rt2rt_aggregation = + if (ht_info->reg_rt2rt_aggregation) { + ht_info->current_rt2rt_aggregation = pNetwork->bssht.bd_rt2rt_aggregation; - pHTInfo->current_rt2rt_long_slot_time = + ht_info->current_rt2rt_long_slot_time = pNetwork->bssht.bd_rt2rt_long_slot_time; - pHTInfo->RT2RT_HT_Mode = pNetwork->bssht.rt2rt_ht_mode; + ht_info->RT2RT_HT_Mode = pNetwork->bssht.rt2rt_ht_mode; } else { - pHTInfo->current_rt2rt_aggregation = false; - pHTInfo->current_rt2rt_long_slot_time = false; - pHTInfo->RT2RT_HT_Mode = (enum rt_ht_capability)0; + ht_info->current_rt2rt_aggregation = false; + ht_info->current_rt2rt_long_slot_time = false; + ht_info->RT2RT_HT_Mode = (enum rt_ht_capability)0; } HTIOTPeerDetermine(ieee); - pHTInfo->iot_action = 0; + ht_info->iot_action = 0; bIOTAction = HTIOTActIsDisableMCS14(ieee, pNetwork->bssid); if (bIOTAction) - pHTInfo->iot_action |= HT_IOT_ACT_DISABLE_MCS14; + ht_info->iot_action |= HT_IOT_ACT_DISABLE_MCS14; bIOTAction = HTIOTActIsDisableMCS15(ieee); if (bIOTAction) - pHTInfo->iot_action |= HT_IOT_ACT_DISABLE_MCS15; + ht_info->iot_action |= HT_IOT_ACT_DISABLE_MCS15; bIOTAction = HTIOTActIsDisableMCSTwoSpatialStream(ieee); if (bIOTAction) - pHTInfo->iot_action |= HT_IOT_ACT_DISABLE_ALL_2SS; + ht_info->iot_action |= HT_IOT_ACT_DISABLE_ALL_2SS; bIOTAction = HTIOTActIsDisableEDCATurbo(ieee, pNetwork->bssid); if (bIOTAction) - pHTInfo->iot_action |= HT_IOT_ACT_DISABLE_EDCA_TURBO; + ht_info->iot_action |= HT_IOT_ACT_DISABLE_EDCA_TURBO; bIOTAction = HTIOTActIsMgntUseCCK6M(ieee, pNetwork); if (bIOTAction) - pHTInfo->iot_action |= HT_IOT_ACT_MGNT_USE_CCK_6M; + ht_info->iot_action |= HT_IOT_ACT_MGNT_USE_CCK_6M; bIOTAction = HTIOTActIsCCDFsync(ieee); if (bIOTAction) - pHTInfo->iot_action |= HT_IOT_ACT_CDD_FSYNC; + ht_info->iot_action |= HT_IOT_ACT_CDD_FSYNC; } else { - pHTInfo->bCurrentHTSupport = false; - pHTInfo->current_rt2rt_aggregation = false; - pHTInfo->current_rt2rt_long_slot_time = false; - pHTInfo->RT2RT_HT_Mode = (enum rt_ht_capability)0; + ht_info->bCurrentHTSupport = false; + ht_info->current_rt2rt_aggregation = false; + ht_info->current_rt2rt_long_slot_time = false; + ht_info->RT2RT_HT_Mode = (enum rt_ht_capability)0; - pHTInfo->iot_action = 0; - pHTInfo->iot_ra_func = 0; + ht_info->iot_action = 0; + ht_info->iot_ra_func = 0; } } void HT_update_self_and_peer_setting(struct rtllib_device *ieee, struct rtllib_network *pNetwork) { - struct rt_hi_throughput *pHTInfo = ieee->pHTInfo; + struct rt_hi_throughput *ht_info = ieee->ht_info; struct ht_info_ele *pPeerHTInfo = (struct ht_info_ele *)pNetwork->bssht.bd_ht_info_buf; - if (pHTInfo->bCurrentHTSupport) { + if (ht_info->bCurrentHTSupport) { if (pNetwork->bssht.bd_ht_info_len != 0) - pHTInfo->current_op_mode = pPeerHTInfo->OptMode; + ht_info->current_op_mode = pPeerHTInfo->OptMode; } } EXPORT_SYMBOL(HT_update_self_and_peer_setting); void HTUseDefaultSetting(struct rtllib_device *ieee) { - struct rt_hi_throughput *pHTInfo = ieee->pHTInfo; + struct rt_hi_throughput *ht_info = ieee->ht_info; - if (pHTInfo->bEnableHT) { - pHTInfo->bCurrentHTSupport = true; - pHTInfo->bCurSuppCCK = pHTInfo->bRegSuppCCK; + if (ht_info->enable_ht) { + ht_info->bCurrentHTSupport = true; + ht_info->bCurSuppCCK = ht_info->bRegSuppCCK; - pHTInfo->bCurBW40MHz = pHTInfo->bRegBW40MHz; - pHTInfo->bCurShortGI20MHz = pHTInfo->bRegShortGI20MHz; + ht_info->bCurBW40MHz = ht_info->bRegBW40MHz; + ht_info->bCurShortGI20MHz = ht_info->bRegShortGI20MHz; - pHTInfo->bCurShortGI40MHz = pHTInfo->bRegShortGI40MHz; + ht_info->bCurShortGI40MHz = ht_info->bRegShortGI40MHz; if (ieee->iw_mode == IW_MODE_ADHOC) ieee->current_network.qos_data.active = ieee->current_network.qos_data.supported; - pHTInfo->bCurrent_AMSDU_Support = pHTInfo->bAMSDU_Support; - pHTInfo->nCurrent_AMSDU_MaxSize = pHTInfo->nAMSDU_MaxSize; + ht_info->bCurrent_AMSDU_Support = ht_info->bAMSDU_Support; + ht_info->nCurrent_AMSDU_MaxSize = ht_info->nAMSDU_MaxSize; - pHTInfo->bCurrentAMPDUEnable = pHTInfo->bAMPDUEnable; - pHTInfo->CurrentAMPDUFactor = pHTInfo->AMPDU_Factor; + ht_info->bCurrentAMPDUEnable = ht_info->bAMPDUEnable; + ht_info->CurrentAMPDUFactor = ht_info->AMPDU_Factor; - pHTInfo->current_mpdu_density = pHTInfo->current_mpdu_density; + ht_info->current_mpdu_density = ht_info->current_mpdu_density; - HTFilterMCSRate(ieee, ieee->Regdot11TxHTOperationalRateSet, - ieee->dot11HTOperationalRateSet); + HTFilterMCSRate(ieee, ieee->reg_dot11tx_ht_oper_rate_set, + ieee->dot11ht_oper_rate_set); ieee->HTHighestOperaRate = HTGetHighestMCSRate(ieee, - ieee->dot11HTOperationalRateSet, - MCS_FILTER_ALL); + ieee->dot11ht_oper_rate_set, + MCS_FILTER_ALL); ieee->HTCurrentOperaRate = ieee->HTHighestOperaRate; } else { - pHTInfo->bCurrentHTSupport = false; + ht_info->bCurrentHTSupport = false; } } u8 HTCCheck(struct rtllib_device *ieee, u8 *pFrame) { - if (ieee->pHTInfo->bCurrentHTSupport) { + if (ieee->ht_info->bCurrentHTSupport) { if ((IsQoSDataFrame(pFrame) && Frame_Order(pFrame)) == 1) { netdev_dbg(ieee->dev, "HT CONTROL FILED EXIST!!\n"); return true; @@ -824,13 +821,13 @@ u8 HTCCheck(struct rtllib_device *ieee, u8 *pFrame) static void HTSetConnectBwModeCallback(struct rtllib_device *ieee) { - struct rt_hi_throughput *pHTInfo = ieee->pHTInfo; + struct rt_hi_throughput *ht_info = ieee->ht_info; - if (pHTInfo->bCurBW40MHz) { - if (pHTInfo->CurSTAExtChnlOffset == HT_EXTCHNL_OFFSET_UPPER) + if (ht_info->bCurBW40MHz) { + if (ht_info->CurSTAExtChnlOffset == HT_EXTCHNL_OFFSET_UPPER) ieee->set_chan(ieee->dev, ieee->current_network.channel + 2); - else if (pHTInfo->CurSTAExtChnlOffset == + else if (ht_info->CurSTAExtChnlOffset == HT_EXTCHNL_OFFSET_LOWER) ieee->set_chan(ieee->dev, ieee->current_network.channel - 2); @@ -839,29 +836,29 @@ static void HTSetConnectBwModeCallback(struct rtllib_device *ieee) ieee->current_network.channel); ieee->SetBWModeHandler(ieee->dev, HT_CHANNEL_WIDTH_20_40, - pHTInfo->CurSTAExtChnlOffset); + ht_info->CurSTAExtChnlOffset); } else { ieee->set_chan(ieee->dev, ieee->current_network.channel); ieee->SetBWModeHandler(ieee->dev, HT_CHANNEL_WIDTH_20, HT_EXTCHNL_OFFSET_NO_EXT); } - pHTInfo->sw_bw_in_progress = false; + ht_info->sw_bw_in_progress = false; } void HTSetConnectBwMode(struct rtllib_device *ieee, enum ht_channel_width bandwidth, enum ht_extchnl_offset Offset) { - struct rt_hi_throughput *pHTInfo = ieee->pHTInfo; + struct rt_hi_throughput *ht_info = ieee->ht_info; - if (!pHTInfo->bRegBW40MHz) + if (!ht_info->bRegBW40MHz) return; if (ieee->GetHalfNmodeSupportByAPsHandler(ieee->dev)) bandwidth = HT_CHANNEL_WIDTH_20; - if (pHTInfo->sw_bw_in_progress) { + if (ht_info->sw_bw_in_progress) { pr_info("%s: sw_bw_in_progress!!\n", __func__); return; } @@ -871,21 +868,21 @@ void HTSetConnectBwMode(struct rtllib_device *ieee, Offset = HT_EXTCHNL_OFFSET_NO_EXT; if (Offset == HT_EXTCHNL_OFFSET_UPPER || Offset == HT_EXTCHNL_OFFSET_LOWER) { - pHTInfo->bCurBW40MHz = true; - pHTInfo->CurSTAExtChnlOffset = Offset; + ht_info->bCurBW40MHz = true; + ht_info->CurSTAExtChnlOffset = Offset; } else { - pHTInfo->bCurBW40MHz = false; - pHTInfo->CurSTAExtChnlOffset = HT_EXTCHNL_OFFSET_NO_EXT; + ht_info->bCurBW40MHz = false; + ht_info->CurSTAExtChnlOffset = HT_EXTCHNL_OFFSET_NO_EXT; } } else { - pHTInfo->bCurBW40MHz = false; - pHTInfo->CurSTAExtChnlOffset = HT_EXTCHNL_OFFSET_NO_EXT; + ht_info->bCurBW40MHz = false; + ht_info->CurSTAExtChnlOffset = HT_EXTCHNL_OFFSET_NO_EXT; } - netdev_dbg(ieee->dev, "%s():pHTInfo->bCurBW40MHz:%x\n", __func__, - pHTInfo->bCurBW40MHz); + netdev_dbg(ieee->dev, "%s():ht_info->bCurBW40MHz:%x\n", __func__, + ht_info->bCurBW40MHz); - pHTInfo->sw_bw_in_progress = true; + ht_info->sw_bw_in_progress = true; HTSetConnectBwModeCallback(ieee); } diff --git a/drivers/staging/rtl8192e/rtl819x_Qos.h b/drivers/staging/rtl8192e/rtl819x_Qos.h index 5073f9f40fdc2c3c82d43be2103d1fad2d041670..c010eb0d6036689912f64e5ab37493585bd15e5d 100644 --- a/drivers/staging/rtl8192e/rtl819x_Qos.h +++ b/drivers/staging/rtl8192e/rtl819x_Qos.h @@ -97,13 +97,6 @@ enum direction_value { DIR_BI_DIR = 3, }; -enum acm_method { - eAcmWay0_SwAndHw = 0, - eAcmWay1_HW = 1, - eAcmWay2_SW = 2, -}; - - struct acm { u64 UsedTime; u64 MediumTime; diff --git a/drivers/staging/rtl8192e/rtl819x_TSProc.c b/drivers/staging/rtl8192e/rtl819x_TSProc.c index 05c7e822f37278eb66656ebdede07badd35a9011..68c131afc2ba4e326b11b591a66067f0a1d3ea2e 100644 --- a/drivers/staging/rtl8192e/rtl819x_TSProc.c +++ b/drivers/staging/rtl8192e/rtl819x_TSProc.c @@ -83,7 +83,7 @@ static void RxPktPendingTimeout(struct timer_list *t) if (bPktInBuf && (pRxTs->rx_timeout_indicate_seq == 0xffff)) { pRxTs->rx_timeout_indicate_seq = pRxTs->rx_indicate_seq; mod_timer(&pRxTs->rx_pkt_pending_timer, jiffies + - msecs_to_jiffies(ieee->pHTInfo->rx_reorder_pending_time) + msecs_to_jiffies(ieee->ht_info->rx_reorder_pending_time) ); } spin_unlock_irqrestore(&(ieee->reorder_spinlock), flags); diff --git a/drivers/staging/rtl8192e/rtllib.h b/drivers/staging/rtl8192e/rtllib.h index 3c72ed2a30a410d1edd5010749634d896ce50bf1..1152fbf4338377853d5b976f28f8afb90dca2848 100644 --- a/drivers/staging/rtl8192e/rtllib.h +++ b/drivers/staging/rtl8192e/rtllib.h @@ -111,11 +111,11 @@ static inline void *netdev_priv_rsl(struct net_device *dev) #define SUPPORT_CKIP_MIC 0x08 #define SUPPORT_CKIP_PK 0x10 #define RT_RF_OFF_LEVL_HALT_NIC BIT3 -#define RT_IN_PS_LEVEL(pPSC, _PS_FLAG) \ - ((pPSC->CurPsLevel & _PS_FLAG) ? true : false) -#define RT_CLEAR_PS_LEVEL(pPSC, _PS_FLAG) \ - (pPSC->CurPsLevel &= (~(_PS_FLAG))) -#define RT_SET_PS_LEVEL(pPSC, _PS_FLAG) (pPSC->CurPsLevel |= _PS_FLAG) +#define RT_IN_PS_LEVEL(psc, _PS_FLAG) \ + ((psc->CurPsLevel & _PS_FLAG) ? true : false) +#define RT_CLEAR_PS_LEVEL(psc, _PS_FLAG) \ + (psc->CurPsLevel &= (~(_PS_FLAG))) +#define RT_SET_PS_LEVEL(psc, _PS_FLAG) (psc->CurPsLevel |= _PS_FLAG) /* defined for skb cb field */ /* At most 28 byte */ @@ -126,8 +126,8 @@ struct cb_desc { u8 bFirstSeg:1; u8 bLastSeg:1; u8 bEncrypt:1; - u8 bTxDisableRateFallBack:1; - u8 bTxUseDriverAssingedRate:1; + u8 tx_dis_rate_fallback:1; + u8 tx_use_drv_assinged_rate:1; u8 bHwSec:1; u8 nStuckCount; @@ -1250,23 +1250,17 @@ enum rt_rf_power_state { }; struct rt_pwr_save_ctrl { - - bool bInactivePs; - bool bIPSModeBackup; bool bSwRfProcessing; enum rt_rf_power_state eInactivePowerState; enum ips_callback_function ReturnPoint; bool bLeisurePs; u8 LpsIdleCount; - u8 RegMaxLPSAwakeIntvl; + u8 reg_max_lps_awake_intvl; u8 LPSAwakeIntvl; u32 CurPsLevel; u32 RegRfPsLevel; - - bool bFwCtrlLPS; - }; #define RT_RF_CHANGE_SOURCE u32 @@ -1390,8 +1384,8 @@ struct rt_pmkid_list { }; struct rt_intel_promisc_mode { - bool bPromiscuousOn; - bool bFilterSourceStationFrame; + bool promiscuous_on; + bool fltr_src_sta_frame; }; @@ -1438,17 +1432,17 @@ struct rtllib_device { RT_RF_CHANGE_SOURCE rf_off_reason; bool is_set_key; bool wx_set_enc; - struct rt_hi_throughput *pHTInfo; + struct rt_hi_throughput *ht_info; spinlock_t reorder_spinlock; - u8 Regdot11HTOperationalRateSet[16]; - u8 Regdot11TxHTOperationalRateSet[16]; - u8 dot11HTOperationalRateSet[16]; - u8 RegHTSuppRateSet[16]; + u8 reg_dot11ht_oper_rate_set[16]; + u8 reg_dot11tx_ht_oper_rate_set[16]; + u8 dot11ht_oper_rate_set[16]; + u8 reg_ht_supp_rate_set[16]; u8 HTCurrentOperaRate; u8 HTHighestOperaRate; - u8 bTxDisableRateFallBack; - u8 bTxUseDriverAssingedRate; + u8 tx_dis_rate_fallback; + u8 tx_use_drv_assinged_rate; u8 bTxEnableFwCalcDur; atomic_t atm_swbw; @@ -1476,8 +1470,8 @@ struct rtllib_device { int scan_age; int iw_mode; /* operating mode (IW_MODE_*) */ - bool bNetPromiscuousMode; - struct rt_intel_promisc_mode IntelPromiscuousModeInfo; + bool net_promiscuous_md; + struct rt_intel_promisc_mode intel_promiscuous_md_info; spinlock_t lock; spinlock_t wpax_suitlist_lock; @@ -1630,7 +1624,6 @@ struct rtllib_device { int mgmt_queue_tail; u8 AsocRetryCount; struct sk_buff_head skb_waitQ[MAX_QUEUE_SIZE]; - struct sk_buff_head skb_aggQ[MAX_QUEUE_SIZE]; bool bdynamic_txpower_enable; @@ -1649,9 +1642,9 @@ struct rtllib_device { struct bandwidth_autoswitch bandwidth_auto_switch; bool FwRWRF; - struct rt_link_detect LinkDetectInfo; + struct rt_link_detect link_detect_info; bool bIsAggregateFrame; - struct rt_pwr_save_ctrl PowerSaveControl; + struct rt_pwr_save_ctrl pwr_save_ctrl; /* used if IEEE_SOFTMAC_TX_QUEUE is set */ struct tx_pending tx_pending; @@ -2095,7 +2088,7 @@ u8 HTGetHighestMCSRate(struct rtllib_device *ieee, u8 *pMCSRateSet, extern u8 MCS_FILTER_ALL[]; extern u16 MCS_DATA_RATE[2][2][77]; u8 HTCCheck(struct rtllib_device *ieee, u8 *pFrame); -void HTResetIOTSetting(struct rt_hi_throughput *pHTInfo); +void HTResetIOTSetting(struct rt_hi_throughput *ht_info); bool IsHTHalfNmodeAPs(struct rtllib_device *ieee); u16 TxCountToDataRate(struct rtllib_device *ieee, u8 nDataRate); int rtllib_rx_ADDBAReq(struct rtllib_device *ieee, struct sk_buff *skb); diff --git a/drivers/staging/rtl8192e/rtllib_crypt_tkip.c b/drivers/staging/rtl8192e/rtllib_crypt_tkip.c index 8bc95651e384f1130c0b28cc91607d76ab62a9eb..9fdfcc017ee69a08f3c4520b0699d8745d6619af 100644 --- a/drivers/staging/rtl8192e/rtllib_crypt_tkip.c +++ b/drivers/staging/rtl8192e/rtllib_crypt_tkip.c @@ -62,7 +62,7 @@ static void *rtllib_tkip_init(int key_idx) return NULL; priv = kzalloc(sizeof(*priv), GFP_ATOMIC); - if (priv == NULL) + if (!priv) goto fail; priv->key_idx = key_idx; @@ -91,7 +91,6 @@ fail: return NULL; } - static void rtllib_tkip_deinit(void *priv) { struct rtllib_tkip_data *_priv = priv; @@ -103,49 +102,41 @@ static void rtllib_tkip_deinit(void *priv) kfree_sensitive(priv); } - static inline u16 RotR1(u16 val) { return (val >> 1) | (val << 15); } - static inline u8 Lo8(u16 val) { return val & 0xff; } - static inline u8 Hi8(u16 val) { return val >> 8; } - static inline u16 Lo16(u32 val) { return val & 0xffff; } - static inline u16 Hi16(u32 val) { return val >> 16; } - static inline u16 Mk16(u8 hi, u8 lo) { return lo | (hi << 8); } - static inline u16 Mk16_le(u16 *v) { return *v; } - static const u16 Sbox[256] = { 0xC6A5, 0xF884, 0xEE99, 0xF68D, 0xFF0D, 0xD6BD, 0xDEB1, 0x9154, 0x6050, 0x0203, 0xCEA9, 0x567D, 0xE719, 0xB562, 0x4DE6, 0xEC9A, @@ -181,17 +172,14 @@ static const u16 Sbox[256] = { 0x82C3, 0x29B0, 0x5A77, 0x1E11, 0x7BCB, 0xA8FC, 0x6DD6, 0x2C3A, }; - static inline u16 _S_(u16 v) { u16 t = Sbox[Hi8(v)]; return Sbox[Lo8(v)] ^ ((t << 8) | (t >> 8)); } - #define PHASE1_LOOP_COUNT 8 - static void tkip_mixing_phase1(u16 *TTAK, const u8 *TK, const u8 *TA, u32 IV32) { int i, j; @@ -213,7 +201,6 @@ static void tkip_mixing_phase1(u16 *TTAK, const u8 *TK, const u8 *TA, u32 IV32) } } - static void tkip_mixing_phase2(u8 *WEPSeed, const u8 *TK, const u16 *TTAK, u16 IV16) { @@ -263,7 +250,6 @@ static void tkip_mixing_phase2(u8 *WEPSeed, const u8 *TK, const u16 *TTAK, #endif } - static int rtllib_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv) { struct rtllib_tkip_data *tkey = priv; @@ -285,14 +271,14 @@ static int rtllib_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv) if (!tcb_desc->bHwSec) { if (!tkey->tx_phase1_done) { tkip_mixing_phase1(tkey->tx_ttak, tkey->key, hdr->addr2, - tkey->tx_iv32); + tkey->tx_iv32); tkey->tx_phase1_done = 1; } tkip_mixing_phase2(rc4key, tkey->key, tkey->tx_ttak, tkey->tx_iv16); - } else + } else { tkey->tx_phase1_done = 1; - + } len = skb->len - hdr_len; pos = skb_push(skb, 8); @@ -336,8 +322,6 @@ static int rtllib_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv) if (!tcb_desc->bHwSec) return ret; return 0; - - } static int rtllib_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv) @@ -389,8 +373,8 @@ static int rtllib_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv) if (!tcb_desc->bHwSec || (skb->cb[0] == 1)) { if ((iv32 < tkey->rx_iv32 || - (iv32 == tkey->rx_iv32 && iv16 <= tkey->rx_iv16)) && - tkey->initialized) { + (iv32 == tkey->rx_iv32 && iv16 <= tkey->rx_iv16)) && + tkey->initialized) { if (net_ratelimit()) { netdev_dbg(skb->dev, "Replay detected: STA= %pM previous TSC %08x%04x received TSC %08x%04x\n", @@ -436,7 +420,6 @@ static int rtllib_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv) tkey->dot11RSNAStatsTKIPICVErrors++; return -5; } - } /* Update real counters only after Michael MIC verification has @@ -453,7 +436,6 @@ static int rtllib_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv) return keyidx; } - static int michael_mic(struct crypto_shash *tfm_michael, u8 *key, u8 *hdr, u8 *data, size_t data_len, u8 *mic) { @@ -506,12 +488,15 @@ static void michael_mic_hdr(struct sk_buff *skb, u8 *hdr) break; } - hdr[12] = 0; /* priority */ + /* priority */ + hdr[12] = 0; - hdr[13] = hdr[14] = hdr[15] = 0; /* reserved */ + /* reserved */ + hdr[13] = 0; + hdr[14] = 0; + hdr[15] = 0; } - static int rtllib_michael_mic_add(struct sk_buff *skb, int hdr_len, void *priv) { struct rtllib_tkip_data *tkey = priv; @@ -533,13 +518,12 @@ static int rtllib_michael_mic_add(struct sk_buff *skb, int hdr_len, void *priv) tkey->tx_hdr[12] = *(skb->data + hdr_len - 2) & 0x07; pos = skb_put(skb, 8); if (michael_mic(tkey->tx_tfm_michael, &tkey->key[16], tkey->tx_hdr, - skb->data + hdr_len, skb->len - 8 - hdr_len, pos)) + skb->data + hdr_len, skb->len - 8 - hdr_len, pos)) return -1; return 0; } - static void rtllib_michael_mic_failure(struct net_device *dev, struct rtllib_hdr_4addr *hdr, int keyidx) @@ -609,7 +593,6 @@ static int rtllib_michael_mic_verify(struct sk_buff *skb, int keyidx, return 0; } - static int rtllib_tkip_set_key(void *key, int len, u8 *seq, void *priv) { struct rtllib_tkip_data *tkey = priv; @@ -632,15 +615,15 @@ static int rtllib_tkip_set_key(void *key, int len, u8 *seq, void *priv) (seq[3] << 8) | seq[2]; tkey->rx_iv16 = (seq[1] << 8) | seq[0]; } - } else if (len == 0) + } else if (len == 0) { tkey->key_set = 0; - else + } else { return -1; + } return 0; } - static int rtllib_tkip_get_key(void *key, int len, u8 *seq, void *priv) { struct rtllib_tkip_data *tkey = priv; @@ -671,7 +654,6 @@ static int rtllib_tkip_get_key(void *key, int len, u8 *seq, void *priv) return TKIP_KEY_LEN; } - static void rtllib_tkip_print_stats(struct seq_file *m, void *priv) { struct rtllib_tkip_data *tkip = priv; @@ -713,13 +695,11 @@ static struct lib80211_crypto_ops rtllib_crypt_tkip = { .owner = THIS_MODULE, }; - static int __init rtllib_crypto_tkip_init(void) { return lib80211_register_crypto_ops(&rtllib_crypt_tkip); } - static void __exit rtllib_crypto_tkip_exit(void) { lib80211_unregister_crypto_ops(&rtllib_crypt_tkip); diff --git a/drivers/staging/rtl8192e/rtllib_crypt_wep.c b/drivers/staging/rtl8192e/rtllib_crypt_wep.c index 7790271a6a40debada6abeb5a464609ee4889848..062285e4d939822a5e7b9a60ccf95ff188fc8d04 100644 --- a/drivers/staging/rtl8192e/rtllib_crypt_wep.c +++ b/drivers/staging/rtl8192e/rtllib_crypt_wep.c @@ -27,7 +27,6 @@ struct prism2_wep_data { struct arc4_ctx tx_ctx_arc4; }; - static void *prism2_wep_init(int keyidx) { struct prism2_wep_data *priv; @@ -46,7 +45,6 @@ static void *prism2_wep_init(int keyidx) return priv; } - static void prism2_wep_deinit(void *priv) { kfree_sensitive(priv); @@ -120,7 +118,6 @@ static int prism2_wep_encrypt(struct sk_buff *skb, int hdr_len, void *priv) return 0; } - /* Perform WEP decryption on given struct buffer. Buffer includes whole WEP * part of the frame: IV (4 bytes), encrypted payload (including SNAP header), * ICV (4 bytes). len includes both IV and ICV. @@ -180,7 +177,6 @@ static int prism2_wep_decrypt(struct sk_buff *skb, int hdr_len, void *priv) return 0; } - static int prism2_wep_set_key(void *key, int len, u8 *seq, void *priv) { struct prism2_wep_data *wep = priv; @@ -194,7 +190,6 @@ static int prism2_wep_set_key(void *key, int len, u8 *seq, void *priv) return 0; } - static int prism2_wep_get_key(void *key, int len, u8 *seq, void *priv) { struct prism2_wep_data *wep = priv; @@ -207,7 +202,6 @@ static int prism2_wep_get_key(void *key, int len, u8 *seq, void *priv) return wep->key_len; } - static void prism2_wep_print_stats(struct seq_file *m, void *priv) { struct prism2_wep_data *wep = priv; @@ -231,13 +225,11 @@ static struct lib80211_crypto_ops rtllib_crypt_wep = { .owner = THIS_MODULE, }; - static int __init rtllib_crypto_wep_init(void) { return lib80211_register_crypto_ops(&rtllib_crypt_wep); } - static void __exit rtllib_crypto_wep_exit(void) { lib80211_unregister_crypto_ops(&rtllib_crypt_wep); diff --git a/drivers/staging/rtl8192e/rtllib_module.c b/drivers/staging/rtl8192e/rtllib_module.c index 41697ef55dbd2e09b58172ac20317863c754c11e..d6a4d6b4ec578a7df5474a76e7be979c81d6c46d 100644 --- a/drivers/staging/rtl8192e/rtllib_module.c +++ b/drivers/staging/rtl8192e/rtllib_module.c @@ -107,7 +107,7 @@ struct net_device *alloc_rtllib(int sizeof_priv) spin_lock_init(&ieee->lock); spin_lock_init(&ieee->wpax_suitlist_lock); spin_lock_init(&ieee->reorder_spinlock); - atomic_set(&(ieee->atm_swbw), 0); + atomic_set(&ieee->atm_swbw, 0); /* SAM FIXME */ lib80211_crypt_info_init(&ieee->crypt_info, "RTLLIB", &ieee->lock); @@ -125,8 +125,8 @@ struct net_device *alloc_rtllib(int sizeof_priv) if (err) goto free_crypt_info; - ieee->pHTInfo = kzalloc(sizeof(struct rt_hi_throughput), GFP_KERNEL); - if (!ieee->pHTInfo) + ieee->ht_info = kzalloc(sizeof(struct rt_hi_throughput), GFP_KERNEL); + if (!ieee->ht_info) goto free_softmac; HTUpdateDefaultSetting(ieee); @@ -160,7 +160,7 @@ void free_rtllib(struct net_device *dev) struct rtllib_device *ieee = (struct rtllib_device *) netdev_priv_rsl(dev); - kfree(ieee->pHTInfo); + kfree(ieee->ht_info); rtllib_softmac_free(ieee); lib80211_crypt_info_free(&ieee->crypt_info); diff --git a/drivers/staging/rtl8192e/rtllib_rx.c b/drivers/staging/rtl8192e/rtllib_rx.c index 46d75e925ee9b780e07fddf53f29c1c5b6e76a2e..669e74a67190d34d0fef3dd2efbf6cab8f3dd620 100644 --- a/drivers/staging/rtl8192e/rtllib_rx.c +++ b/drivers/staging/rtl8192e/rtllib_rx.c @@ -567,9 +567,9 @@ static void RxReorderIndicatePacket(struct rtllib_device *ieee, struct rtllib_rxb *prxb, struct rx_ts_record *pTS, u16 SeqNum) { - struct rt_hi_throughput *pHTInfo = ieee->pHTInfo; + struct rt_hi_throughput *ht_info = ieee->ht_info; struct rx_reorder_entry *pReorderEntry = NULL; - u8 WinSize = pHTInfo->rx_reorder_win_size; + u8 WinSize = ht_info->rx_reorder_win_size; u16 WinEnd = 0; u8 index = 0; bool bMatchWinStart = false, bPktInBuf = false; @@ -591,7 +591,7 @@ static void RxReorderIndicatePacket(struct rtllib_device *ieee, netdev_dbg(ieee->dev, "Packet Drop! IndicateSeq: %d, NewSeq: %d\n", pTS->rx_indicate_seq, SeqNum); - pHTInfo->rx_reorder_drop_counter++; + ht_info->rx_reorder_drop_counter++; { int i; @@ -755,7 +755,7 @@ static void RxReorderIndicatePacket(struct rtllib_device *ieee, netdev_dbg(ieee->dev, "%s(): SET rx timeout timer\n", __func__); pTS->rx_timeout_indicate_seq = pTS->rx_indicate_seq; mod_timer(&pTS->rx_pkt_pending_timer, jiffies + - msecs_to_jiffies(pHTInfo->rx_reorder_pending_time)); + msecs_to_jiffies(ht_info->rx_reorder_pending_time)); } spin_unlock_irqrestore(&(ieee->reorder_spinlock), flags); } @@ -924,7 +924,7 @@ static int rtllib_rx_check_duplicate(struct rtllib_device *ieee, sc = le16_to_cpu(hdr->seq_ctl); frag = WLAN_GET_SEQ_FRAG(sc); - if (!ieee->pHTInfo->cur_rx_reorder_enable || + if (!ieee->ht_info->cur_rx_reorder_enable || !ieee->current_network.qos_data.active || !IsDataFrame(skb->data) || IsLegacyDataFrame(skb->data)) { @@ -999,8 +999,8 @@ static int rtllib_rx_data_filter(struct rtllib_device *ieee, u16 fc, } /* Filter packets sent by an STA that will be forwarded by AP */ - if (ieee->IntelPromiscuousModeInfo.bPromiscuousOn && - ieee->IntelPromiscuousModeInfo.bFilterSourceStationFrame) { + if (ieee->intel_promiscuous_md_info.promiscuous_on && + ieee->intel_promiscuous_md_info.fltr_src_sta_frame) { if ((fc & RTLLIB_FCTL_TODS) && !(fc & RTLLIB_FCTL_FROMDS) && !ether_addr_equal(dst, ieee->current_network.bssid) && ether_addr_equal(bssid, ieee->current_network.bssid)) { @@ -1011,7 +1011,7 @@ static int rtllib_rx_data_filter(struct rtllib_device *ieee, u16 fc, /* Nullfunc frames may have PS-bit set, so they must be passed to * hostap_handle_sta_rx() before being dropped here. */ - if (!ieee->IntelPromiscuousModeInfo.bPromiscuousOn) { + if (!ieee->intel_promiscuous_md_info.promiscuous_on) { if (stype != RTLLIB_STYPE_DATA && stype != RTLLIB_STYPE_DATA_CFACK && stype != RTLLIB_STYPE_DATA_CFPOLL && @@ -1211,9 +1211,9 @@ static void rtllib_rx_check_leave_lps(struct rtllib_device *ieee, u8 unicast, if (unicast) { if (ieee->state == RTLLIB_LINKED) { - if (((ieee->LinkDetectInfo.NumRxUnicastOkInPeriod + - ieee->LinkDetectInfo.NumTxOkInPeriod) > 8) || - (ieee->LinkDetectInfo.NumRxUnicastOkInPeriod > 2)) { + if (((ieee->link_detect_info.NumRxUnicastOkInPeriod + + ieee->link_detect_info.NumTxOkInPeriod) > 8) || + (ieee->link_detect_info.NumRxUnicastOkInPeriod > 2)) { if (ieee->LeisurePSLeave) ieee->LeisurePSLeave(ieee->dev); } @@ -1317,7 +1317,7 @@ static int rtllib_rx_InfraAdhoc(struct rtllib_device *ieee, struct sk_buff *skb, multicast = is_multicast_ether_addr(hdr->addr1); unicast = !multicast; if (unicast && !ether_addr_equal(dev->dev_addr, hdr->addr1)) { - if (ieee->bNetPromiscuousMode) + if (ieee->net_promiscuous_md) bToOtherSTA = true; else goto rx_dropped; @@ -1355,8 +1355,8 @@ static int rtllib_rx_InfraAdhoc(struct rtllib_device *ieee, struct sk_buff *skb, /* Update statstics for AP roaming */ if (!bToOtherSTA) { - ieee->LinkDetectInfo.NumRecvDataInPeriod++; - ieee->LinkDetectInfo.NumRxOkInPeriod++; + ieee->link_detect_info.NumRecvDataInPeriod++; + ieee->link_detect_info.NumRxOkInPeriod++; } /* Data frame - extract src/dst addresses */ @@ -1437,12 +1437,12 @@ static int rtllib_rx_InfraAdhoc(struct rtllib_device *ieee, struct sk_buff *skb, else nr_subframes = 1; if (unicast) - ieee->LinkDetectInfo.NumRxUnicastOkInPeriod += nr_subframes; + ieee->link_detect_info.NumRxUnicastOkInPeriod += nr_subframes; rtllib_rx_check_leave_lps(ieee, unicast, nr_subframes); } /* Indicate packets to upper layer or Rx Reorder */ - if (!ieee->pHTInfo->cur_rx_reorder_enable || pTS == NULL || bToOtherSTA) + if (!ieee->ht_info->cur_rx_reorder_enable || pTS == NULL || bToOtherSTA) rtllib_rx_indicate_pkt_legacy(ieee, rx_stats, rxb, dst, src); else RxReorderIndicatePacket(ieee, rxb, pTS, SeqNum); @@ -1489,9 +1489,9 @@ static int rtllib_rx_Monitor(struct rtllib_device *ieee, struct sk_buff *skb, hdrlen += 4; } - rtllib_monitor_rx(ieee, skb, rx_stats, hdrlen); ieee->stats.rx_packets++; ieee->stats.rx_bytes += skb->len; + rtllib_monitor_rx(ieee, skb, rx_stats, hdrlen); return 1; } @@ -1776,7 +1776,7 @@ static inline void rtllib_extract_country_ie( if (rtllib_act_scanning(ieee, false) && ieee->FirstIe_InScan) netdev_info(ieee->dev, - "Received beacon ContryIE, SSID: <%s>\n", + "Received beacon CountryIE, SSID: <%s>\n", network->ssid); dot11d_update_country(ieee, addr2, info_element->len, @@ -2620,7 +2620,7 @@ static inline void rtllib_process_probe_response( } if (is_beacon(frame_ctl)) { if (ieee->state >= RTLLIB_LINKED) - ieee->LinkDetectInfo.NumRecvBcnInPeriod++; + ieee->link_detect_info.NumRecvBcnInPeriod++; } } list_for_each_entry(target, &ieee->network_list, list) { diff --git a/drivers/staging/rtl8192e/rtllib_softmac.c b/drivers/staging/rtl8192e/rtllib_softmac.c index 1a3ca3e57623a860849e6b5f62514323c88d870a..2552aa089700f82a875ec34937dfe521e7541cb8 100644 --- a/drivers/staging/rtl8192e/rtllib_softmac.c +++ b/drivers/staging/rtl8192e/rtllib_softmac.c @@ -148,8 +148,7 @@ static void init_mgmt_queue(struct rtllib_device *ieee) } -u8 -MgntQuery_TxRateExcludeCCKRates(struct rtllib_device *ieee) +u8 MgntQuery_TxRateExcludeCCKRates(struct rtllib_device *ieee) { u16 i; u8 QueryRate = 0; @@ -177,10 +176,10 @@ MgntQuery_TxRateExcludeCCKRates(struct rtllib_device *ieee) static u8 MgntQuery_MgntFrameTxRate(struct rtllib_device *ieee) { - struct rt_hi_throughput *pHTInfo = ieee->pHTInfo; + struct rt_hi_throughput *ht_info = ieee->ht_info; u8 rate; - if (pHTInfo->iot_action & HT_IOT_ACT_MGNT_USE_CCK_6M) + if (ht_info->iot_action & HT_IOT_ACT_MGNT_USE_CCK_6M) rate = 0x0c; else rate = ieee->basic_rate & 0x7f; @@ -188,7 +187,7 @@ static u8 MgntQuery_MgntFrameTxRate(struct rtllib_device *ieee) if (rate == 0) { if (ieee->mode == IEEE_A || ieee->mode == IEEE_N_5G || - (ieee->mode == IEEE_N_24G && !pHTInfo->bCurSuppCCK)) + (ieee->mode == IEEE_N_24G && !ht_info->bCurSuppCCK)) rate = 0x0c; else rate = 0x02; @@ -221,8 +220,8 @@ inline void softmac_mgmt_xmit(struct sk_buff *skb, struct rtllib_device *ieee) tcb_desc->data_rate = MgntQuery_MgntFrameTxRate(ieee); tcb_desc->RATRIndex = 7; - tcb_desc->bTxDisableRateFallBack = 1; - tcb_desc->bTxUseDriverAssingedRate = 1; + tcb_desc->tx_dis_rate_fallback = 1; + tcb_desc->tx_use_drv_assinged_rate = 1; if (single) { if (ieee->queue_stop) { enqueue_mgmt(ieee, skb); @@ -299,8 +298,8 @@ softmac_ps_mgmt_xmit(struct sk_buff *skb, tcb_desc->data_rate = MgntQuery_MgntFrameTxRate(ieee); tcb_desc->RATRIndex = 7; - tcb_desc->bTxDisableRateFallBack = 1; - tcb_desc->bTxUseDriverAssingedRate = 1; + tcb_desc->tx_dis_rate_fallback = 1; + tcb_desc->tx_use_drv_assinged_rate = 1; if (single) { if (type != RTLLIB_FTYPE_CTL) { header->seq_ctl = cpu_to_le16(ieee->seq_ctrl[0] << 4); @@ -446,7 +445,7 @@ void rtllib_EnableIntelPromiscuousMode(struct net_device *dev, ieee->SetHwRegHandler(dev, HW_VAR_CECHK_BSSID, (u8 *)&bFilterOutNonAssociatedBSSID); - ieee->bNetPromiscuousMode = true; + ieee->net_promiscuous_md = true; } EXPORT_SYMBOL(rtllib_EnableIntelPromiscuousMode); @@ -467,7 +466,7 @@ void rtllib_DisableIntelPromiscuousMode(struct net_device *dev, ieee->SetHwRegHandler(dev, HW_VAR_CECHK_BSSID, (u8 *)&bFilterOutNonAssociatedBSSID); - ieee->bNetPromiscuousMode = false; + ieee->net_promiscuous_md = false; } EXPORT_SYMBOL(rtllib_DisableIntelPromiscuousMode); @@ -830,7 +829,7 @@ static struct sk_buff *rtllib_probe_resp(struct rtllib_device *ieee, u8 tmp_ht_cap_len = 0; u8 *tmp_ht_info_buf = NULL; u8 tmp_ht_info_len = 0; - struct rt_hi_throughput *pHTInfo = ieee->pHTInfo; + struct rt_hi_throughput *ht_info = ieee->ht_info; u8 *tmp_generic_ie_buf = NULL; u8 tmp_generic_ie_len = 0; @@ -844,7 +843,7 @@ static struct sk_buff *rtllib_probe_resp(struct rtllib_device *ieee, if ((ieee->current_network.mode == IEEE_G) || (ieee->current_network.mode == IEEE_N_24G && - ieee->pHTInfo->bCurSuppCCK)) { + ieee->ht_info->bCurSuppCCK)) { erp_len = 3; erpinfo_content = 0; if (ieee->current_network.buseprotection) @@ -855,20 +854,20 @@ static struct sk_buff *rtllib_probe_resp(struct rtllib_device *ieee, crypt = ieee->crypt_info.crypt[ieee->crypt_info.tx_keyidx]; encrypt = ieee->host_encrypt && crypt && crypt->ops && ((strcmp(crypt->ops->name, "R-WEP") == 0 || wpa_ie_len)); - if (ieee->pHTInfo->bCurrentHTSupport) { - tmp_ht_cap_buf = (u8 *)&(ieee->pHTInfo->SelfHTCap); - tmp_ht_cap_len = sizeof(ieee->pHTInfo->SelfHTCap); - tmp_ht_info_buf = (u8 *)&(ieee->pHTInfo->SelfHTInfo); - tmp_ht_info_len = sizeof(ieee->pHTInfo->SelfHTInfo); + if (ieee->ht_info->bCurrentHTSupport) { + tmp_ht_cap_buf = (u8 *)&(ieee->ht_info->SelfHTCap); + tmp_ht_cap_len = sizeof(ieee->ht_info->SelfHTCap); + tmp_ht_info_buf = (u8 *)&(ieee->ht_info->SelfHTInfo); + tmp_ht_info_len = sizeof(ieee->ht_info->SelfHTInfo); HTConstructCapabilityElement(ieee, tmp_ht_cap_buf, &tmp_ht_cap_len, encrypt, false); HTConstructInfoElement(ieee, tmp_ht_info_buf, &tmp_ht_info_len, encrypt); - if (pHTInfo->reg_rt2rt_aggregation) { - tmp_generic_ie_buf = ieee->pHTInfo->sz_rt2rt_agg_buf; + if (ht_info->reg_rt2rt_aggregation) { + tmp_generic_ie_buf = ieee->ht_info->sz_rt2rt_agg_buf; tmp_generic_ie_len = - sizeof(ieee->pHTInfo->sz_rt2rt_agg_buf); + sizeof(ieee->ht_info->sz_rt2rt_agg_buf); HTConstructRT2RTAggElement(ieee, tmp_generic_ie_buf, &tmp_generic_ie_len); } @@ -1180,19 +1179,19 @@ rtllib_association_req(struct rtllib_network *beacon, if ((ieee->rtllib_ap_sec_type && (ieee->rtllib_ap_sec_type(ieee) & SEC_ALG_TKIP)) || ieee->bForcedBgMode) { - ieee->pHTInfo->bEnableHT = 0; + ieee->ht_info->enable_ht = 0; ieee->mode = WIRELESS_MODE_G; } - if (ieee->pHTInfo->bCurrentHTSupport && ieee->pHTInfo->bEnableHT) { - ht_cap_buf = (u8 *)&(ieee->pHTInfo->SelfHTCap); - ht_cap_len = sizeof(ieee->pHTInfo->SelfHTCap); + if (ieee->ht_info->bCurrentHTSupport && ieee->ht_info->enable_ht) { + ht_cap_buf = (u8 *)&(ieee->ht_info->SelfHTCap); + ht_cap_len = sizeof(ieee->ht_info->SelfHTCap); HTConstructCapabilityElement(ieee, ht_cap_buf, &ht_cap_len, encrypt, true); - if (ieee->pHTInfo->current_rt2rt_aggregation) { - realtek_ie_buf = ieee->pHTInfo->sz_rt2rt_agg_buf; + if (ieee->ht_info->current_rt2rt_aggregation) { + realtek_ie_buf = ieee->ht_info->sz_rt2rt_agg_buf; realtek_ie_len = - sizeof(ieee->pHTInfo->sz_rt2rt_agg_buf); + sizeof(ieee->ht_info->sz_rt2rt_agg_buf); HTConstructRT2RTAggElement(ieee, realtek_ie_buf, &realtek_ie_len); } @@ -1325,8 +1324,8 @@ rtllib_association_req(struct rtllib_network *beacon, memcpy(tag, osCcxVerNum.Octet, osCcxVerNum.Length); tag += osCcxVerNum.Length; } - if (ieee->pHTInfo->bCurrentHTSupport && ieee->pHTInfo->bEnableHT) { - if (ieee->pHTInfo->ePeerHTSpecVer != HT_SPEC_VER_EWC) { + if (ieee->ht_info->bCurrentHTSupport && ieee->ht_info->enable_ht) { + if (ieee->ht_info->ePeerHTSpecVer != HT_SPEC_VER_EWC) { tag = skb_put(skb, ht_cap_len); *tag++ = MFIE_TYPE_HT_CAP; *tag++ = ht_cap_len - 2; @@ -1359,8 +1358,8 @@ rtllib_association_req(struct rtllib_network *beacon, rtllib_TURBO_Info(ieee, &tag); } - if (ieee->pHTInfo->bCurrentHTSupport && ieee->pHTInfo->bEnableHT) { - if (ieee->pHTInfo->ePeerHTSpecVer == HT_SPEC_VER_EWC) { + if (ieee->ht_info->bCurrentHTSupport && ieee->ht_info->enable_ht) { + if (ieee->ht_info->ePeerHTSpecVer == HT_SPEC_VER_EWC) { tag = skb_put(skb, ht_cap_len); *tag++ = MFIE_TYPE_GENERIC; *tag++ = ht_cap_len - 2; @@ -1368,7 +1367,7 @@ rtllib_association_req(struct rtllib_network *beacon, tag += ht_cap_len - 2; } - if (ieee->pHTInfo->current_rt2rt_aggregation) { + if (ieee->ht_info->current_rt2rt_aggregation) { tag = skb_put(skb, realtek_ie_len); *tag++ = MFIE_TYPE_GENERIC; *tag++ = realtek_ie_len - 2; @@ -1505,7 +1504,7 @@ static void rtllib_associate_complete_wq(void *data) container_of_work_rsl(data, struct rtllib_device, associate_complete_wq); - struct rt_pwr_save_ctrl *pPSC = &(ieee->PowerSaveControl); + struct rt_pwr_save_ctrl *psc = &ieee->pwr_save_ctrl; netdev_info(ieee->dev, "Associated successfully with %pM\n", ieee->current_network.bssid); @@ -1525,25 +1524,25 @@ static void rtllib_associate_complete_wq(void *data) ieee->SetWirelessMode(ieee->dev, IEEE_B); netdev_info(ieee->dev, "Using B rates:%d\n", ieee->rate); } - if (ieee->pHTInfo->bCurrentHTSupport && ieee->pHTInfo->bEnableHT) { + if (ieee->ht_info->bCurrentHTSupport && ieee->ht_info->enable_ht) { netdev_info(ieee->dev, "Successfully associated, ht enabled\n"); HTOnAssocRsp(ieee); } else { netdev_info(ieee->dev, "Successfully associated, ht not enabled(%d, %d)\n", - ieee->pHTInfo->bCurrentHTSupport, - ieee->pHTInfo->bEnableHT); - memset(ieee->dot11HTOperationalRateSet, 0, 16); + ieee->ht_info->bCurrentHTSupport, + ieee->ht_info->enable_ht); + memset(ieee->dot11ht_oper_rate_set, 0, 16); } - ieee->LinkDetectInfo.SlotNum = 2 * (1 + + ieee->link_detect_info.SlotNum = 2 * (1 + ieee->current_network.beacon_interval / 500); - if (ieee->LinkDetectInfo.NumRecvBcnInPeriod == 0 || - ieee->LinkDetectInfo.NumRecvDataInPeriod == 0) { - ieee->LinkDetectInfo.NumRecvBcnInPeriod = 1; - ieee->LinkDetectInfo.NumRecvDataInPeriod = 1; + if (ieee->link_detect_info.NumRecvBcnInPeriod == 0 || + ieee->link_detect_info.NumRecvDataInPeriod == 0) { + ieee->link_detect_info.NumRecvBcnInPeriod = 1; + ieee->link_detect_info.NumRecvDataInPeriod = 1; } - pPSC->LpsIdleCount = 0; + psc->LpsIdleCount = 0; ieee->link_change(ieee->dev); if (ieee->is_silent_reset) { @@ -1685,7 +1684,7 @@ inline void rtllib_softmac_new_net(struct rtllib_device *ieee, ieee->current_network.ssid, ieee->current_network.channel, ieee->current_network.qos_data.supported, - ieee->pHTInfo->bEnableHT, + ieee->ht_info->enable_ht, ieee->current_network.bssht.bd_support_ht, ieee->current_network.mode, ieee->current_network.flags); @@ -1694,7 +1693,7 @@ inline void rtllib_softmac_new_net(struct rtllib_device *ieee, !(ieee->softmac_features & IEEE_SOFTMAC_SCAN)) rtllib_stop_scan_syncro(ieee); - HTResetIOTSetting(ieee->pHTInfo); + HTResetIOTSetting(ieee->ht_info); ieee->wmm_acm = 0; if (ieee->iw_mode == IW_MODE_INFRA) { /* Join the network for the first time */ @@ -1704,7 +1703,7 @@ inline void rtllib_softmac_new_net(struct rtllib_device *ieee, HTResetSelfAndSavePeerSetting(ieee, &(ieee->current_network)); else - ieee->pHTInfo->bCurrentHTSupport = + ieee->ht_info->bCurrentHTSupport = false; ieee->state = RTLLIB_ASSOCIATING; @@ -1729,7 +1728,7 @@ inline void rtllib_softmac_new_net(struct rtllib_device *ieee, netdev_info(ieee->dev, "Using B rates\n"); } - memset(ieee->dot11HTOperationalRateSet, 0, 16); + memset(ieee->dot11ht_oper_rate_set, 0, 16); ieee->state = RTLLIB_LINKED; } } @@ -1894,7 +1893,7 @@ static inline u16 assoc_parse(struct rtllib_device *ieee, struct sk_buff *skb, ((ieee->mode == IEEE_G) && (ieee->current_network.mode == IEEE_N_24G) && (ieee->AsocRetryCount++ < (RT_ASOC_RETRY_LIMIT-1)))) { - ieee->pHTInfo->iot_action |= HT_IOT_ACT_PURE_N_MODE; + ieee->ht_info->iot_action |= HT_IOT_ACT_PURE_N_MODE; } else { ieee->AsocRetryCount = 0; } @@ -1961,7 +1960,7 @@ static short rtllib_sta_ps_sleep(struct rtllib_device *ieee, u64 *time) { int timeout; u8 dtim; - struct rt_pwr_save_ctrl *pPSC = &(ieee->PowerSaveControl); + struct rt_pwr_save_ctrl *psc = &ieee->pwr_save_ctrl; if (ieee->LPSDelayCnt) { ieee->LPSDelayCnt--; @@ -1991,21 +1990,21 @@ static short rtllib_sta_ps_sleep(struct rtllib_device *ieee, u64 *time) if (time) { if (ieee->bAwakePktSent) { - pPSC->LPSAwakeIntvl = 1; + psc->LPSAwakeIntvl = 1; } else { u8 MaxPeriod = 1; - if (pPSC->LPSAwakeIntvl == 0) - pPSC->LPSAwakeIntvl = 1; - if (pPSC->RegMaxLPSAwakeIntvl == 0) + if (psc->LPSAwakeIntvl == 0) + psc->LPSAwakeIntvl = 1; + if (psc->reg_max_lps_awake_intvl == 0) MaxPeriod = 1; - else if (pPSC->RegMaxLPSAwakeIntvl == 0xFF) + else if (psc->reg_max_lps_awake_intvl == 0xFF) MaxPeriod = ieee->current_network.dtim_period; else - MaxPeriod = pPSC->RegMaxLPSAwakeIntvl; - pPSC->LPSAwakeIntvl = (pPSC->LPSAwakeIntvl >= + MaxPeriod = psc->reg_max_lps_awake_intvl; + psc->LPSAwakeIntvl = (psc->LPSAwakeIntvl >= MaxPeriod) ? MaxPeriod : - (pPSC->LPSAwakeIntvl + 1); + (psc->LPSAwakeIntvl + 1); } { u8 LPSAwakeIntvl_tmp = 0; @@ -2013,23 +2012,23 @@ static short rtllib_sta_ps_sleep(struct rtllib_device *ieee, u64 *time) u8 count = ieee->current_network.tim.tim_count; if (count == 0) { - if (pPSC->LPSAwakeIntvl > period) + if (psc->LPSAwakeIntvl > period) LPSAwakeIntvl_tmp = period + - (pPSC->LPSAwakeIntvl - + (psc->LPSAwakeIntvl - period) - - ((pPSC->LPSAwakeIntvl-period) % + ((psc->LPSAwakeIntvl-period) % period); else - LPSAwakeIntvl_tmp = pPSC->LPSAwakeIntvl; + LPSAwakeIntvl_tmp = psc->LPSAwakeIntvl; } else { - if (pPSC->LPSAwakeIntvl > + if (psc->LPSAwakeIntvl > ieee->current_network.tim.tim_count) LPSAwakeIntvl_tmp = count + - (pPSC->LPSAwakeIntvl - count) - - ((pPSC->LPSAwakeIntvl-count)%period); + (psc->LPSAwakeIntvl - count) - + ((psc->LPSAwakeIntvl-count)%period); else - LPSAwakeIntvl_tmp = pPSC->LPSAwakeIntvl; + LPSAwakeIntvl_tmp = psc->LPSAwakeIntvl; } *time = ieee->current_network.last_dtim_sta_time @@ -2101,7 +2100,7 @@ static void rtllib_sta_wakeup(struct rtllib_device *ieee, short nl) { if (ieee->sta_sleep == LPS_IS_WAKE) { if (nl) { - if (ieee->pHTInfo->iot_action & + if (ieee->ht_info->iot_action & HT_IOT_ACT_NULL_DATA_POWER_SAVING) { ieee->ack_tx_to_ieee = 1; rtllib_sta_ps_send_null_frame(ieee, 0); @@ -2117,7 +2116,7 @@ static void rtllib_sta_wakeup(struct rtllib_device *ieee, short nl) if (ieee->sta_sleep == LPS_IS_SLEEP) ieee->sta_wake_up(ieee->dev); if (nl) { - if (ieee->pHTInfo->iot_action & + if (ieee->ht_info->iot_action & HT_IOT_ACT_NULL_DATA_POWER_SAVING) { ieee->ack_tx_to_ieee = 1; rtllib_sta_ps_send_null_frame(ieee, 0); @@ -2152,7 +2151,7 @@ void rtllib_ps_tx_ack(struct rtllib_device *ieee, short success) if ((ieee->sta_sleep == LPS_IS_WAKE) && !success) { spin_lock_irqsave(&ieee->mgmt_tx_lock, flags2); - if (ieee->pHTInfo->iot_action & + if (ieee->ht_info->iot_action & HT_IOT_ACT_NULL_DATA_POWER_SAVING) rtllib_sta_ps_send_null_frame(ieee, 0); else @@ -2236,10 +2235,10 @@ rtllib_rx_assoc_resp(struct rtllib_device *ieee, struct sk_buff *skb, kfree(network); return 1; } - memcpy(ieee->pHTInfo->PeerHTCapBuf, + memcpy(ieee->ht_info->PeerHTCapBuf, network->bssht.bd_ht_cap_buf, network->bssht.bd_ht_cap_len); - memcpy(ieee->pHTInfo->PeerHTInfoBuf, + memcpy(ieee->ht_info->PeerHTInfoBuf, network->bssht.bd_ht_info_buf, network->bssht.bd_ht_info_len); if (ieee->handle_assoc_response != NULL) @@ -2296,7 +2295,7 @@ static void rtllib_rx_auth_resp(struct rtllib_device *ieee, struct sk_buff *skb) if (ieee->open_wep || !challenge) { ieee->state = RTLLIB_ASSOCIATING_AUTHENTICATED; ieee->softmac_stats.rx_auth_rs_ok++; - if (!(ieee->pHTInfo->iot_action & HT_IOT_ACT_PURE_N_MODE)) { + if (!(ieee->ht_info->iot_action & HT_IOT_ACT_PURE_N_MODE)) { if (!ieee->GetNmodeSupportBySecCfg(ieee->dev)) { if (IsHTHalfNmodeAPs(ieee)) { bSupportNmode = true; @@ -2370,7 +2369,7 @@ rtllib_rx_deauth(struct rtllib_device *ieee, struct sk_buff *skb) ieee->state = RTLLIB_ASSOCIATING; ieee->softmac_stats.reassoc++; ieee->is_roaming = true; - ieee->LinkDetectInfo.bBusyTraffic = false; + ieee->link_detect_info.bBusyTraffic = false; rtllib_disassociate(ieee); RemovePeerTS(ieee, header->addr2); if (ieee->LedControlHandler != NULL) @@ -2670,7 +2669,7 @@ static void rtllib_start_ibss_wq(void *data) if ((ieee->mode == IEEE_N_24G) || (ieee->mode == IEEE_N_5G)) HTUseDefaultSetting(ieee); else - ieee->pHTInfo->bCurrentHTSupport = false; + ieee->ht_info->bCurrentHTSupport = false; ieee->SetHwRegHandler(ieee->dev, HW_VAR_MEDIA_STATUS, (u8 *)(&ieee->state)); @@ -2964,13 +2963,13 @@ int rtllib_softmac_init(struct rtllib_device *ieee) if (!ieee->dot11d_info) return -ENOMEM; - ieee->LinkDetectInfo.SlotIndex = 0; - ieee->LinkDetectInfo.SlotNum = 2; - ieee->LinkDetectInfo.NumRecvBcnInPeriod = 0; - ieee->LinkDetectInfo.NumRecvDataInPeriod = 0; - ieee->LinkDetectInfo.NumTxOkInPeriod = 0; - ieee->LinkDetectInfo.NumRxOkInPeriod = 0; - ieee->LinkDetectInfo.NumRxUnicastOkInPeriod = 0; + ieee->link_detect_info.SlotIndex = 0; + ieee->link_detect_info.SlotNum = 2; + ieee->link_detect_info.NumRecvBcnInPeriod = 0; + ieee->link_detect_info.NumRecvDataInPeriod = 0; + ieee->link_detect_info.NumTxOkInPeriod = 0; + ieee->link_detect_info.NumRxOkInPeriod = 0; + ieee->link_detect_info.NumRxUnicastOkInPeriod = 0; ieee->bIsAggregateFrame = false; ieee->assoc_id = 0; ieee->queue_stop = 0; @@ -2985,13 +2984,13 @@ int rtllib_softmac_init(struct rtllib_device *ieee) ieee->ps = RTLLIB_PS_DISABLED; ieee->sta_sleep = LPS_IS_WAKE; - ieee->Regdot11HTOperationalRateSet[0] = 0xff; - ieee->Regdot11HTOperationalRateSet[1] = 0xff; - ieee->Regdot11HTOperationalRateSet[4] = 0x01; + ieee->reg_dot11ht_oper_rate_set[0] = 0xff; + ieee->reg_dot11ht_oper_rate_set[1] = 0xff; + ieee->reg_dot11ht_oper_rate_set[4] = 0x01; - ieee->Regdot11TxHTOperationalRateSet[0] = 0xff; - ieee->Regdot11TxHTOperationalRateSet[1] = 0xff; - ieee->Regdot11TxHTOperationalRateSet[4] = 0x01; + ieee->reg_dot11tx_ht_oper_rate_set[0] = 0xff; + ieee->reg_dot11tx_ht_oper_rate_set[1] = 0xff; + ieee->reg_dot11tx_ht_oper_rate_set[4] = 0x01; ieee->FirstIe_InScan = false; ieee->actscanning = false; diff --git a/drivers/staging/rtl8192e/rtllib_softmac_wx.c b/drivers/staging/rtl8192e/rtllib_softmac_wx.c index 1e5ad3b476ef74b20624ef50e03a409487ee8919..06f3d75dc10200ee0adcfa0450c16a32326c86f9 100644 --- a/drivers/staging/rtl8192e/rtllib_softmac_wx.c +++ b/drivers/staging/rtl8192e/rtllib_softmac_wx.c @@ -359,11 +359,11 @@ void rtllib_wx_sync_scan_wq(void *data) if (ieee->ScanOperationBackupHandler) ieee->ScanOperationBackupHandler(ieee->dev, SCAN_OPT_BACKUP); - if (ieee->pHTInfo->bCurrentHTSupport && ieee->pHTInfo->bEnableHT && - ieee->pHTInfo->bCurBW40MHz) { + if (ieee->ht_info->bCurrentHTSupport && ieee->ht_info->enable_ht && + ieee->ht_info->bCurBW40MHz) { b40M = 1; - chan_offset = ieee->pHTInfo->CurSTAExtChnlOffset; - bandwidth = (enum ht_channel_width)ieee->pHTInfo->bCurBW40MHz; + chan_offset = ieee->ht_info->CurSTAExtChnlOffset; + bandwidth = (enum ht_channel_width)ieee->ht_info->bCurBW40MHz; ieee->SetBWModeHandler(ieee->dev, HT_CHANNEL_WIDTH_20, HT_EXTCHNL_OFFSET_NO_EXT); } @@ -391,10 +391,10 @@ void rtllib_wx_sync_scan_wq(void *data) /* Notify AP that I wake up again */ rtllib_sta_ps_send_null_frame(ieee, 0); - if (ieee->LinkDetectInfo.NumRecvBcnInPeriod == 0 || - ieee->LinkDetectInfo.NumRecvDataInPeriod == 0) { - ieee->LinkDetectInfo.NumRecvBcnInPeriod = 1; - ieee->LinkDetectInfo.NumRecvDataInPeriod = 1; + if (ieee->link_detect_info.NumRecvBcnInPeriod == 0 || + ieee->link_detect_info.NumRecvDataInPeriod == 0) { + ieee->link_detect_info.NumRecvBcnInPeriod = 1; + ieee->link_detect_info.NumRecvDataInPeriod = 1; } if (ieee->data_hard_resume) @@ -564,9 +564,8 @@ int rtllib_wx_set_power(struct rtllib_device *ieee, ieee->ps = RTLLIB_PS_DISABLED; goto exit; } - if (wrqu->power.flags & IW_POWER_TIMEOUT) { + if (wrqu->power.flags & IW_POWER_TIMEOUT) ieee->ps_timeout = wrqu->power.value / 1000; - } if (wrqu->power.flags & IW_POWER_PERIOD) ieee->ps_period = wrqu->power.value / 1000; diff --git a/drivers/staging/rtl8192e/rtllib_tx.c b/drivers/staging/rtl8192e/rtllib_tx.c index e307020580a0571755ed0cdeaca7958dfcec9851..9ab8ee46ef66898e654b4990ab0bd2ad2ff9ca6a 100644 --- a/drivers/staging/rtl8192e/rtllib_tx.c +++ b/drivers/staging/rtl8192e/rtllib_tx.c @@ -191,7 +191,6 @@ int rtllib_encrypt_fragment(struct rtllib_device *ieee, struct sk_buff *frag, return 0; } - void rtllib_txb_free(struct rtllib_txb *txb) { if (unlikely(!txb)) @@ -267,14 +266,14 @@ static void rtllib_tx_query_agg_cap(struct rtllib_device *ieee, struct sk_buff *skb, struct cb_desc *tcb_desc) { - struct rt_hi_throughput *pHTInfo = ieee->pHTInfo; + struct rt_hi_throughput *ht_info = ieee->ht_info; struct tx_ts_record *pTxTs = NULL; struct rtllib_hdr_1addr *hdr = (struct rtllib_hdr_1addr *)skb->data; if (rtllib_act_scanning(ieee, false)) return; - if (!pHTInfo->bCurrentHTSupport || !pHTInfo->bEnableHT) + if (!ht_info->bCurrentHTSupport || !ht_info->enable_ht) return; if (!IsQoSDataFrame(skb->data)) return; @@ -284,14 +283,14 @@ static void rtllib_tx_query_agg_cap(struct rtllib_device *ieee, if (tcb_desc->bdhcp || ieee->CntAfterLink < 2) return; - if (pHTInfo->iot_action & HT_IOT_ACT_TX_NO_AGGREGATION) + if (ht_info->iot_action & HT_IOT_ACT_TX_NO_AGGREGATION) return; if (!ieee->GetNmodeSupportBySecCfg(ieee->dev)) return; - if (pHTInfo->bCurrentAMPDUEnable) { + if (ht_info->bCurrentAMPDUEnable) { if (!GetTs(ieee, (struct ts_common_info **)(&pTxTs), hdr->addr1, - skb->priority, TX_DIR, true)) { + skb->priority, TX_DIR, true)) { netdev_info(ieee->dev, "%s: can't get TS\n", __func__); return; } @@ -307,26 +306,26 @@ static void rtllib_tx_query_agg_cap(struct rtllib_device *ieee, goto FORCED_AGG_SETTING; } else if (!pTxTs->bUsingBa) { if (SN_LESS(pTxTs->TxAdmittedBARecord.ba_start_seq_ctrl.field.seq_num, - (pTxTs->TxCurSeq+1)%4096)) + (pTxTs->TxCurSeq + 1) % 4096)) pTxTs->bUsingBa = true; else goto FORCED_AGG_SETTING; } if (ieee->iw_mode == IW_MODE_INFRA) { tcb_desc->bAMPDUEnable = true; - tcb_desc->ampdu_factor = pHTInfo->CurrentAMPDUFactor; - tcb_desc->ampdu_density = pHTInfo->current_mpdu_density; + tcb_desc->ampdu_factor = ht_info->CurrentAMPDUFactor; + tcb_desc->ampdu_density = ht_info->current_mpdu_density; } } FORCED_AGG_SETTING: - switch (pHTInfo->ForcedAMPDUMode) { + switch (ht_info->ForcedAMPDUMode) { case HT_AGG_AUTO: break; case HT_AGG_FORCE_ENABLE: tcb_desc->bAMPDUEnable = true; - tcb_desc->ampdu_density = pHTInfo->forced_mpdu_density; - tcb_desc->ampdu_factor = pHTInfo->forced_ampdu_factor; + tcb_desc->ampdu_density = ht_info->forced_mpdu_density; + tcb_desc->ampdu_factor = ht_info->forced_ampdu_factor; break; case HT_AGG_FORCE_DISABLE: @@ -351,32 +350,32 @@ static void rtllib_query_ShortPreambleMode(struct rtllib_device *ieee, static void rtllib_query_HTCapShortGI(struct rtllib_device *ieee, struct cb_desc *tcb_desc) { - struct rt_hi_throughput *pHTInfo = ieee->pHTInfo; + struct rt_hi_throughput *ht_info = ieee->ht_info; tcb_desc->bUseShortGI = false; - if (!pHTInfo->bCurrentHTSupport || !pHTInfo->bEnableHT) + if (!ht_info->bCurrentHTSupport || !ht_info->enable_ht) return; - if (pHTInfo->forced_short_gi) { + if (ht_info->forced_short_gi) { tcb_desc->bUseShortGI = true; return; } - if (pHTInfo->bCurBW40MHz && pHTInfo->bCurShortGI40MHz) + if (ht_info->bCurBW40MHz && ht_info->bCurShortGI40MHz) tcb_desc->bUseShortGI = true; - else if (!pHTInfo->bCurBW40MHz && pHTInfo->bCurShortGI20MHz) + else if (!ht_info->bCurBW40MHz && ht_info->bCurShortGI20MHz) tcb_desc->bUseShortGI = true; } static void rtllib_query_BandwidthMode(struct rtllib_device *ieee, struct cb_desc *tcb_desc) { - struct rt_hi_throughput *pHTInfo = ieee->pHTInfo; + struct rt_hi_throughput *ht_info = ieee->ht_info; tcb_desc->bPacketBW = false; - if (!pHTInfo->bCurrentHTSupport || !pHTInfo->bEnableHT) + if (!ht_info->bCurrentHTSupport || !ht_info->enable_ht) return; if (tcb_desc->bMulticast || tcb_desc->bBroadcast) @@ -384,7 +383,7 @@ static void rtllib_query_BandwidthMode(struct rtllib_device *ieee, if ((tcb_desc->data_rate & 0x80) == 0) return; - if (pHTInfo->bCurBW40MHz && pHTInfo->cur_tx_bw40mhz && + if (ht_info->bCurBW40MHz && ht_info->cur_tx_bw40mhz && !ieee->bandwidth_auto_switch.bforced_tx20Mhz) tcb_desc->bPacketBW = true; } @@ -393,7 +392,7 @@ static void rtllib_query_protectionmode(struct rtllib_device *ieee, struct cb_desc *tcb_desc, struct sk_buff *skb) { - struct rt_hi_throughput *pHTInfo; + struct rt_hi_throughput *ht_info; tcb_desc->bRTSSTBC = false; tcb_desc->bRTSUseShortGI = false; @@ -404,7 +403,7 @@ static void rtllib_query_protectionmode(struct rtllib_device *ieee, if (tcb_desc->bBroadcast || tcb_desc->bMulticast) return; - if (is_broadcast_ether_addr(skb->data+16)) + if (is_broadcast_ether_addr(skb->data + 16)) return; if (ieee->mode < IEEE_N_24G) { @@ -419,15 +418,15 @@ static void rtllib_query_protectionmode(struct rtllib_device *ieee, return; } - pHTInfo = ieee->pHTInfo; + ht_info = ieee->ht_info; while (true) { - if (pHTInfo->iot_action & HT_IOT_ACT_FORCED_CTS2SELF) { + if (ht_info->iot_action & HT_IOT_ACT_FORCED_CTS2SELF) { tcb_desc->bCTSEnable = true; tcb_desc->rts_rate = MGN_24M; tcb_desc->bRTSEnable = true; break; - } else if (pHTInfo->iot_action & (HT_IOT_ACT_FORCED_RTS | + } else if (ht_info->iot_action & (HT_IOT_ACT_FORCED_RTS | HT_IOT_ACT_PURE_N_MODE)) { tcb_desc->bRTSEnable = true; tcb_desc->rts_rate = MGN_24M; @@ -439,12 +438,12 @@ static void rtllib_query_protectionmode(struct rtllib_device *ieee, tcb_desc->rts_rate = MGN_24M; break; } - if (pHTInfo->bCurrentHTSupport && pHTInfo->bEnableHT) { - u8 HTOpMode = pHTInfo->current_op_mode; + if (ht_info->bCurrentHTSupport && ht_info->enable_ht) { + u8 HTOpMode = ht_info->current_op_mode; - if ((pHTInfo->bCurBW40MHz && (HTOpMode == 2 || - HTOpMode == 3)) || - (!pHTInfo->bCurBW40MHz && HTOpMode == 3)) { + if ((ht_info->bCurBW40MHz && (HTOpMode == 2 || + HTOpMode == 3)) || + (!ht_info->bCurBW40MHz && HTOpMode == 3)) { tcb_desc->rts_rate = MGN_24M; tcb_desc->bRTSEnable = true; break; @@ -475,17 +474,16 @@ NO_PROTECTION: tcb_desc->bRTSBW = false; } - static void rtllib_txrate_selectmode(struct rtllib_device *ieee, struct cb_desc *tcb_desc) { - if (ieee->bTxDisableRateFallBack) - tcb_desc->bTxDisableRateFallBack = true; + if (ieee->tx_dis_rate_fallback) + tcb_desc->tx_dis_rate_fallback = true; - if (ieee->bTxUseDriverAssingedRate) - tcb_desc->bTxUseDriverAssingedRate = true; - if (!tcb_desc->bTxDisableRateFallBack || - !tcb_desc->bTxUseDriverAssingedRate) { + if (ieee->tx_use_drv_assinged_rate) + tcb_desc->tx_use_drv_assinged_rate = true; + if (!tcb_desc->tx_dis_rate_fallback || + !tcb_desc->tx_use_drv_assinged_rate) { if (ieee->iw_mode == IW_MODE_INFRA || ieee->iw_mode == IW_MODE_ADHOC) tcb_desc->RATRIndex = 0; @@ -503,10 +501,10 @@ static u16 rtllib_query_seqnum(struct rtllib_device *ieee, struct sk_buff *skb, struct tx_ts_record *pTS = NULL; if (!GetTs(ieee, (struct ts_common_info **)(&pTS), dst, - skb->priority, TX_DIR, true)) + skb->priority, TX_DIR, true)) return 0; seqnum = pTS->TxCurSeq; - pTS->TxCurSeq = (pTS->TxCurSeq+1)%4096; + pTS->TxCurSeq = (pTS->TxCurSeq + 1) % 4096; return seqnum; } return 0; @@ -582,7 +580,6 @@ static int rtllib_xmit_inter(struct sk_buff *skb, struct net_device *dev) goto success; } - if (likely(ieee->raw_tx == 0)) { if (unlikely(skb->len < SNAP_SIZE + sizeof(u16))) { netdev_warn(ieee->dev, "skb too small (%d).\n", @@ -614,14 +611,14 @@ static int rtllib_xmit_inter(struct sk_buff *skb, struct net_device *dev) if (skb->len > 282) { if (ether_type == ETH_P_IP) { const struct iphdr *ip = (struct iphdr *) - ((u8 *)skb->data+14); + ((u8 *)skb->data + 14); if (ip->protocol == IPPROTO_UDP) { struct udphdr *udp; udp = (struct udphdr *)((u8 *)ip + (ip->ihl << 2)); if (((((u8 *)udp)[1] == 68) && - (((u8 *)udp)[3] == 67)) || + (((u8 *)udp)[3] == 67)) || ((((u8 *)udp)[1] == 67) && (((u8 *)udp)[3] == 68))) { bdhcp = true; @@ -715,11 +712,11 @@ static int rtllib_xmit_inter(struct sk_buff *skb, struct net_device *dev) /* in case we are a client verify acm is not set for this ac */ while (unlikely(ieee->wmm_acm & (0x01 << skb->priority))) { netdev_info(ieee->dev, "skb->priority = %x\n", - skb->priority); + skb->priority); if (wme_downgrade_ac(skb)) break; netdev_info(ieee->dev, "converted skb->priority = %x\n", - skb->priority); + skb->priority); } qos_ctl |= skb->priority; @@ -805,8 +802,8 @@ static int rtllib_xmit_inter(struct sk_buff *skb, struct net_device *dev) * MOREFRAGS bit to the frame control */ if (i != nr_frags - 1) { - frag_hdr->frame_ctl = cpu_to_le16( - fc | RTLLIB_FCTL_MOREFRAGS); + frag_hdr->frame_ctl = cpu_to_le16(fc | + RTLLIB_FCTL_MOREFRAGS); bytes = bytes_per_frag; } else { @@ -816,18 +813,18 @@ static int rtllib_xmit_inter(struct sk_buff *skb, struct net_device *dev) if ((qos_activated) && (!bIsMulticast)) { frag_hdr->seq_ctl = cpu_to_le16(rtllib_query_seqnum(ieee, skb_frag, - header.addr1)); + header.addr1)); frag_hdr->seq_ctl = - cpu_to_le16(le16_to_cpu(frag_hdr->seq_ctl)<<4 | i); + cpu_to_le16(le16_to_cpu(frag_hdr->seq_ctl) << 4 | i); } else { frag_hdr->seq_ctl = - cpu_to_le16(ieee->seq_ctrl[0]<<4 | i); + cpu_to_le16(ieee->seq_ctrl[0] << 4 | i); } /* Put a SNAP header on the first fragment */ if (i == 0) { - rtllib_put_snap( - skb_put(skb_frag, SNAP_SIZE + - sizeof(u16)), ether_type); + rtllib_put_snap(skb_put(skb_frag, + SNAP_SIZE + + sizeof(u16)), ether_type); bytes -= SNAP_SIZE + sizeof(u16); } @@ -885,19 +882,18 @@ static int rtllib_xmit_inter(struct sk_buff *skb, struct net_device *dev) tcb_desc->priority = skb->priority; if (ether_type == ETH_P_PAE) { - if (ieee->pHTInfo->iot_action & + if (ieee->ht_info->iot_action & HT_IOT_ACT_WA_IOT_Broadcom) { tcb_desc->data_rate = MgntQuery_TxRateExcludeCCKRates(ieee); - tcb_desc->bTxDisableRateFallBack = false; + tcb_desc->tx_dis_rate_fallback = false; } else { tcb_desc->data_rate = ieee->basic_rate; - tcb_desc->bTxDisableRateFallBack = 1; + tcb_desc->tx_dis_rate_fallback = 1; } - tcb_desc->RATRIndex = 7; - tcb_desc->bTxUseDriverAssingedRate = 1; + tcb_desc->tx_use_drv_assinged_rate = 1; } else { if (is_multicast_ether_addr(header.addr1)) tcb_desc->bMulticast = 1; @@ -910,19 +906,18 @@ static int rtllib_xmit_inter(struct sk_buff *skb, struct net_device *dev) tcb_desc->data_rate = rtllib_current_rate(ieee); if (bdhcp) { - if (ieee->pHTInfo->iot_action & + if (ieee->ht_info->iot_action & HT_IOT_ACT_WA_IOT_Broadcom) { tcb_desc->data_rate = MgntQuery_TxRateExcludeCCKRates(ieee); - tcb_desc->bTxDisableRateFallBack = false; + tcb_desc->tx_dis_rate_fallback = false; } else { tcb_desc->data_rate = MGN_1M; - tcb_desc->bTxDisableRateFallBack = 1; + tcb_desc->tx_dis_rate_fallback = 1; } - tcb_desc->RATRIndex = 7; - tcb_desc->bTxUseDriverAssingedRate = 1; + tcb_desc->tx_use_drv_assinged_rate = 1; tcb_desc->bdhcp = 1; } @@ -959,7 +954,6 @@ static int rtllib_xmit_inter(struct sk_buff *skb, struct net_device *dev) netif_stop_queue(dev); stats->tx_errors++; return 1; - } netdev_tx_t rtllib_xmit(struct sk_buff *skb, struct net_device *dev) diff --git a/drivers/staging/rtl8192e/rtllib_wx.c b/drivers/staging/rtl8192e/rtllib_wx.c index da2c41c9b92f1ff4e5850cffc2ba1b33bdd49cb1..217426ee2e92a5b61120a9da6d78e6722c56726a 100644 --- a/drivers/staging/rtl8192e/rtllib_wx.c +++ b/drivers/staging/rtl8192e/rtllib_wx.c @@ -217,7 +217,7 @@ static inline char *rtl819x_translate_scan(struct rtllib_device *ieee, p = custom; p += scnprintf(p, MAX_CUSTOM_LEN - (p - custom), " Last beacon: %lums ago", - (jiffies - network->last_scanned) / (HZ / 100)); + (100 * (jiffies - network->last_scanned)) / HZ); iwe.u.data.length = p - custom; if (iwe.u.data.length) start = iwe_stream_add_point_rsl(info, start, stop, @@ -258,8 +258,8 @@ int rtllib_wx_get_scan(struct rtllib_device *ieee, escape_essid(network->ssid, network->ssid_len), network->bssid, - (jiffies - network->last_scanned) / - (HZ / 100)); + (100 * (jiffies - network->last_scanned)) / + HZ); } spin_unlock_irqrestore(&ieee->lock, flags); diff --git a/drivers/staging/rtl8192u/TODO b/drivers/staging/rtl8192u/TODO new file mode 100644 index 0000000000000000000000000000000000000000..ab9d5d145b3bc13e2cf068397b0ee4d3b445e34d --- /dev/null +++ b/drivers/staging/rtl8192u/TODO @@ -0,0 +1,16 @@ +To-do list: + +* Correct the coding style according to Linux guidelines; please read the document + at https://www.kernel.org/doc/html/latest/process/coding-style.html. +* Remove unnecessary debugging/printing macros; for those that are still needed + use the proper kernel API (pr_debug(), dev_dbg(), netdev_dbg()). +* Remove dead code such as unusued functions, variables, fields, etc.. +* Use in-kernel API and remove unnecessary wrappers where possible. +* Fix bugs due to code that sleeps in atomic context. +* Remove the HAL layer and migrate its functionality into the relevant parts of + the driver. +* Switch to use LIB80211. +* Switch to use MAC80211. +* Switch to use CFG80211. +* Improve the error handling of various functions, particularly those that use + existing kernel APIs. diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211.h b/drivers/staging/rtl8192u/ieee80211/ieee80211.h index 9cd4b18967459db6e5be3d4dcabcef751219b0b7..c5c43d2fb93eb2ec9ab10a32d0e8e4a15c560408 100644 --- a/drivers/staging/rtl8192u/ieee80211/ieee80211.h +++ b/drivers/staging/rtl8192u/ieee80211/ieee80211.h @@ -223,73 +223,10 @@ struct cb_desc { #define MAX_IE_LEN 0xff // added for kernel conflict -#define ieee80211_crypt_deinit_entries ieee80211_crypt_deinit_entries_rsl -#define ieee80211_crypt_deinit_handler ieee80211_crypt_deinit_handler_rsl -#define ieee80211_crypt_delayed_deinit ieee80211_crypt_delayed_deinit_rsl -#define ieee80211_register_crypto_ops ieee80211_register_crypto_ops_rsl -#define ieee80211_unregister_crypto_ops ieee80211_unregister_crypto_ops_rsl -#define ieee80211_get_crypto_ops ieee80211_get_crypto_ops_rsl - -#define ieee80211_ccmp_null ieee80211_ccmp_null_rsl - -#define ieee80211_tkip_null ieee80211_tkip_null_rsl - -#define free_ieee80211 free_ieee80211_rsl -#define alloc_ieee80211 alloc_ieee80211_rsl - -#define ieee80211_rx ieee80211_rx_rsl -#define ieee80211_rx_mgt ieee80211_rx_mgt_rsl - -#define ieee80211_get_beacon ieee80211_get_beacon_rsl #define ieee80211_wake_queue ieee80211_wake_queue_rsl #define ieee80211_stop_queue ieee80211_stop_queue_rsl -#define ieee80211_reset_queue ieee80211_reset_queue_rsl -#define ieee80211_softmac_stop_protocol ieee80211_softmac_stop_protocol_rsl -#define ieee80211_softmac_start_protocol ieee80211_softmac_start_protocol_rsl -#define ieee80211_is_shortslot ieee80211_is_shortslot_rsl -#define ieee80211_is_54g ieee80211_is_54g_rsl -#define ieee80211_wpa_supplicant_ioctl ieee80211_wpa_supplicant_ioctl_rsl -#define ieee80211_ps_tx_ack ieee80211_ps_tx_ack_rsl -#define ieee80211_softmac_xmit ieee80211_softmac_xmit_rsl -#define ieee80211_stop_send_beacons ieee80211_stop_send_beacons_rsl #define notify_wx_assoc_event notify_wx_assoc_event_rsl #define SendDisassociation SendDisassociation_rsl -#define ieee80211_disassociate ieee80211_disassociate_rsl -#define ieee80211_start_send_beacons ieee80211_start_send_beacons_rsl -#define ieee80211_stop_scan ieee80211_stop_scan_rsl -#define ieee80211_send_probe_requests ieee80211_send_probe_requests_rsl -#define ieee80211_softmac_scan_syncro ieee80211_softmac_scan_syncro_rsl -#define ieee80211_start_scan_syncro ieee80211_start_scan_syncro_rsl - -#define ieee80211_wx_get_essid ieee80211_wx_get_essid_rsl -#define ieee80211_wx_set_essid ieee80211_wx_set_essid_rsl -#define ieee80211_wx_set_rate ieee80211_wx_set_rate_rsl -#define ieee80211_wx_get_rate ieee80211_wx_get_rate_rsl -#define ieee80211_wx_set_wap ieee80211_wx_set_wap_rsl -#define ieee80211_wx_get_wap ieee80211_wx_get_wap_rsl -#define ieee80211_wx_set_mode ieee80211_wx_set_mode_rsl -#define ieee80211_wx_get_mode ieee80211_wx_get_mode_rsl -#define ieee80211_wx_set_scan ieee80211_wx_set_scan_rsl -#define ieee80211_wx_get_freq ieee80211_wx_get_freq_rsl -#define ieee80211_wx_set_freq ieee80211_wx_set_freq_rsl -#define ieee80211_wx_set_rawtx ieee80211_wx_set_rawtx_rsl -#define ieee80211_wx_get_name ieee80211_wx_get_name_rsl -#define ieee80211_wx_set_power ieee80211_wx_set_power_rsl -#define ieee80211_wx_get_power ieee80211_wx_get_power_rsl -#define ieee80211_wlan_frequencies ieee80211_wlan_frequencies_rsl -#define ieee80211_wx_set_rts ieee80211_wx_set_rts_rsl -#define ieee80211_wx_get_rts ieee80211_wx_get_rts_rsl - -#define ieee80211_txb_free ieee80211_txb_free_rsl - -#define ieee80211_wx_set_gen_ie ieee80211_wx_set_gen_ie_rsl -#define ieee80211_wx_get_scan ieee80211_wx_get_scan_rsl -#define ieee80211_wx_set_encode ieee80211_wx_set_encode_rsl -#define ieee80211_wx_get_encode ieee80211_wx_get_encode_rsl -#define ieee80211_wx_set_mlme ieee80211_wx_set_mlme_rsl -#define ieee80211_wx_set_auth ieee80211_wx_set_auth_rsl -#define ieee80211_wx_set_encode_ext ieee80211_wx_set_encode_ext_rsl -#define ieee80211_wx_get_encode_ext ieee80211_wx_get_encode_ext_rsl struct ieee_param { @@ -2256,7 +2193,6 @@ void ieee80211_ps_tx_ack(struct ieee80211_device *ieee, short success); void softmac_mgmt_xmit(struct sk_buff *skb, struct ieee80211_device *ieee); /* ieee80211_crypt_ccmp&tkip&wep.c */ -void ieee80211_tkip_null(void); int ieee80211_crypto_init(void); void ieee80211_crypto_deinit(void); diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c index 7b120b8cb9824db8576966a54cad4b78c7f7238c..9bfd24ad46b692ca3b95a3491ee4560449939f1e 100644 --- a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c +++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c @@ -716,9 +716,3 @@ void ieee80211_crypto_tkip_exit(void) { ieee80211_unregister_crypto_ops(&ieee80211_crypt_tkip); } - -void ieee80211_tkip_null(void) -{ -// printk("============>%s()\n", __func__); - return; -} diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_module.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_module.c index b94fe9b449b6cef8ff37564d65d67d721bd26628..3f93939bc4ee0fee39bb2c72dee7a7585f66b937 100644 --- a/drivers/staging/rtl8192u/ieee80211/ieee80211_module.c +++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_module.c @@ -159,9 +159,6 @@ struct net_device *alloc_ieee80211(int sizeof_priv) ieee->last_packet_time[i] = 0; } -/* These function were added to load crypte module autoly */ - ieee80211_tkip_null(); - return dev; failed: diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c index b58e75932ecd5e28e302c7f955a68fafb4e4507b..5c73e3f8541aed19caa36e823956ed9557f57ff8 100644 --- a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c +++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c @@ -951,9 +951,11 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb, #endif if (ieee->iw_mode == IW_MODE_MONITOR) { + unsigned int len = skb->len; + ieee80211_monitor_rx(ieee, skb, rx_stats); stats->rx_packets++; - stats->rx_bytes += skb->len; + stats->rx_bytes += len; return 1; } @@ -1806,7 +1808,7 @@ int ieee80211_parse_info_param(struct ieee80211_device *ieee, info_element->data[0] == 0x00 && info_element->data[1] == 0x13 && info_element->data[2] == 0x74)) { - netdev_dbg(ieee->dev, "========> athros AP is exist\n"); + netdev_dbg(ieee->dev, "========> Atheros AP exists\n"); network->atheros_cap_exist = true; } else network->atheros_cap_exist = false; diff --git a/drivers/staging/rtl8712/os_intfs.c b/drivers/staging/rtl8712/os_intfs.c index 003e9720512408e97e994131f955eb6be074e066..a2f3645be0cc819cef0dea589d042091846a1f94 100644 --- a/drivers/staging/rtl8712/os_intfs.c +++ b/drivers/staging/rtl8712/os_intfs.c @@ -304,25 +304,42 @@ int r8712_init_drv_sw(struct _adapter *padapter) padapter->cmdpriv.padapter = padapter; ret = r8712_init_evt_priv(&padapter->evtpriv); if (ret) - return ret; + goto free_cmd; ret = r8712_init_mlme_priv(padapter); if (ret) - return ret; - _r8712_init_xmit_priv(&padapter->xmitpriv, padapter); - _r8712_init_recv_priv(&padapter->recvpriv, padapter); + goto free_evt; + ret = _r8712_init_xmit_priv(&padapter->xmitpriv, padapter); + if (ret) + goto free_mlme; + ret = _r8712_init_recv_priv(&padapter->recvpriv, padapter); + if (ret) + goto free_xmit; memset((unsigned char *)&padapter->securitypriv, 0, sizeof(struct security_priv)); timer_setup(&padapter->securitypriv.tkip_timer, r8712_use_tkipkey_handler, 0); ret = _r8712_init_sta_priv(&padapter->stapriv); if (ret) - return ret; + goto free_recv; padapter->stapriv.padapter = padapter; r8712_init_bcmc_stainfo(padapter); r8712_init_pwrctrl_priv(padapter); mp871xinit(padapter); init_default_value(padapter); r8712_InitSwLeds(padapter); + + return 0; + +free_recv: + _r8712_free_recv_priv(&padapter->recvpriv); +free_xmit: + _free_xmit_priv(&padapter->xmitpriv); +free_mlme: + r8712_free_mlme_priv(&padapter->mlmepriv); +free_evt: + r8712_free_evt_priv(&padapter->evtpriv); +free_cmd: + r8712_free_cmd_priv(&padapter->cmdpriv); return ret; } diff --git a/drivers/staging/rtl8712/recv_osdep.h b/drivers/staging/rtl8712/recv_osdep.h index d8c1fa74f54402b1807cc52b35a01a9aa5abfdd1..fbe3f28685064785dc1ecd9539b28d40737be22e 100644 --- a/drivers/staging/rtl8712/recv_osdep.h +++ b/drivers/staging/rtl8712/recv_osdep.h @@ -18,15 +18,15 @@ #include "drv_types.h" #include -void _r8712_init_recv_priv(struct recv_priv *precvpriv, - struct _adapter *padapter); +int _r8712_init_recv_priv(struct recv_priv *precvpriv, + struct _adapter *padapter); void _r8712_free_recv_priv(struct recv_priv *precvpriv); void r8712_recv_entry(union recv_frame *precv_frame); void r8712_recv_indicatepkt(struct _adapter *adapter, union recv_frame *precv_frame); void r8712_handle_tkip_mic_err(struct _adapter *padapter, u8 bgroup); -void r8712_init_recv_priv(struct recv_priv *precvpriv, - struct _adapter *padapter); +int r8712_init_recv_priv(struct recv_priv *precvpriv, + struct _adapter *padapter); void r8712_free_recv_priv(struct recv_priv *precvpriv); void r8712_os_recv_resource_alloc(struct _adapter *padapter, union recv_frame *precvframe); diff --git a/drivers/staging/rtl8712/rtl8712_recv.c b/drivers/staging/rtl8712/rtl8712_recv.c index 7f1fdd058551126f3679d0e6c3919d4602fadd0d..7da014ab0723e0b6578364033ce277cca15e603b 100644 --- a/drivers/staging/rtl8712/rtl8712_recv.c +++ b/drivers/staging/rtl8712/rtl8712_recv.c @@ -30,8 +30,8 @@ static void recv_tasklet(struct tasklet_struct *t); -void r8712_init_recv_priv(struct recv_priv *precvpriv, - struct _adapter *padapter) +int r8712_init_recv_priv(struct recv_priv *precvpriv, + struct _adapter *padapter) { int i; struct recv_buf *precvbuf; @@ -44,7 +44,7 @@ void r8712_init_recv_priv(struct recv_priv *precvpriv, precvpriv->pallocated_recv_buf = kzalloc(NR_RECVBUFF * sizeof(struct recv_buf) + 4, GFP_ATOMIC); if (!precvpriv->pallocated_recv_buf) - return; + return -ENOMEM; precvpriv->precv_buf = precvpriv->pallocated_recv_buf + 4 - ((addr_t)(precvpriv->pallocated_recv_buf) & 3); precvbuf = (struct recv_buf *)precvpriv->precv_buf; @@ -75,6 +75,7 @@ void r8712_init_recv_priv(struct recv_priv *precvpriv, } pskb = NULL; } + return 0; } void r8712_free_recv_priv(struct recv_priv *precvpriv) diff --git a/drivers/staging/rtl8712/rtl8712_xmit.c b/drivers/staging/rtl8712/rtl8712_xmit.c index 84a22eba7ebfd205f7e47a0bdfccb03346d1c734..4cb01f590673e31a2c8540dd10c5fda2f51d34cb 100644 --- a/drivers/staging/rtl8712/rtl8712_xmit.c +++ b/drivers/staging/rtl8712/rtl8712_xmit.c @@ -601,7 +601,7 @@ int r8712_xmitframe_complete(struct _adapter *padapter, #ifdef CONFIG_R8712_TX_AGGR struct xmit_frame *p2ndxmitframe = NULL; #else - int res = _SUCCESS, xcnt = 0; + int res = _SUCCESS; #endif phwxmits = pxmitpriv->hwxmits; @@ -673,7 +673,6 @@ int r8712_xmitframe_complete(struct _adapter *padapter, dump_xframe(padapter, pxmitframe); else r8712_free_xmitframe_ex(pxmitpriv, pxmitframe); - xcnt++; #endif } else { /* pxmitframe == NULL && p2ndxmitframe == NULL */ diff --git a/drivers/staging/rtl8712/rtl871x_recv.c b/drivers/staging/rtl8712/rtl871x_recv.c index de9a568eaffaaa8342c4aefb21ad705d874d62e8..8a3566214af726ec347772a817b67de545f8a669 100644 --- a/drivers/staging/rtl8712/rtl871x_recv.c +++ b/drivers/staging/rtl8712/rtl871x_recv.c @@ -17,9 +17,7 @@ #define _RTL871X_RECV_C_ #include -#include #include -#include #include #include #include @@ -44,9 +42,10 @@ void _r8712_init_sta_recv_priv(struct sta_recv_priv *psta_recvpriv) _init_queue(&psta_recvpriv->defrag_q); } -void _r8712_init_recv_priv(struct recv_priv *precvpriv, - struct _adapter *padapter) +int _r8712_init_recv_priv(struct recv_priv *precvpriv, + struct _adapter *padapter) { + int ret; sint i; union recv_frame *precvframe; @@ -60,8 +59,7 @@ void _r8712_init_recv_priv(struct recv_priv *precvpriv, sizeof(union recv_frame) + RXFRAME_ALIGN_SZ, GFP_ATOMIC); if (!precvpriv->pallocated_frame_buf) - return; - kmemleak_not_leak(precvpriv->pallocated_frame_buf); + return -ENOMEM; precvpriv->precv_frame_buf = precvpriv->pallocated_frame_buf + RXFRAME_ALIGN_SZ - ((addr_t)(precvpriv->pallocated_frame_buf) & @@ -76,7 +74,11 @@ void _r8712_init_recv_priv(struct recv_priv *precvpriv, precvframe++; } precvpriv->rx_pending_cnt = 1; - r8712_init_recv_priv(precvpriv, padapter); + ret = r8712_init_recv_priv(precvpriv, padapter); + if (ret) + kfree(precvpriv->pallocated_frame_buf); + + return ret; } void _r8712_free_recv_priv(struct recv_priv *precvpriv) diff --git a/drivers/staging/rtl8723bs/core/rtw_efuse.c b/drivers/staging/rtl8723bs/core/rtw_efuse.c index 06e727ce9cc294827f0a8fe0208f3cf1d09276c2..eb848f9bbf2c3604b6407096941d924dfd8d8e71 100644 --- a/drivers/staging/rtl8723bs/core/rtw_efuse.c +++ b/drivers/staging/rtl8723bs/core/rtw_efuse.c @@ -282,7 +282,7 @@ u8 efuse_OneByteWrite(struct adapter *padapter, u16 addr, u8 data, bool bPseudoT { u8 tmpidx = 0; u8 bResult = false; - u32 efuseValue = 0; + u32 efuseValue; if (bPseudoTest) return Efuse_Write1ByteToFakeContent(addr, data); diff --git a/drivers/staging/rtl8723bs/core/rtw_ieee80211.c b/drivers/staging/rtl8723bs/core/rtw_ieee80211.c index 3d8a64f694481b21357e7185334faf78180f2687..30e7457a9c3120682514499db57d9d429f970a84 100644 --- a/drivers/staging/rtl8723bs/core/rtw_ieee80211.c +++ b/drivers/staging/rtl8723bs/core/rtw_ieee80211.c @@ -1063,18 +1063,18 @@ void rtw_get_bcn_info(struct wlan_network *pnetwork) /* parsing HT_CAP_IE */ p = rtw_get_ie(pnetwork->network.ies + _FIXED_IE_LENGTH_, WLAN_EID_HT_CAPABILITY, &len, pnetwork->network.ie_length - _FIXED_IE_LENGTH_); if (p && len > 0) { - pht_cap = (struct ieee80211_ht_cap *)(p + 2); - pnetwork->bcn_info.ht_cap_info = le16_to_cpu(pht_cap->cap_info); + pht_cap = (struct ieee80211_ht_cap *)(p + 2); + pnetwork->bcn_info.ht_cap_info = le16_to_cpu(pht_cap->cap_info); } else { - pnetwork->bcn_info.ht_cap_info = 0; + pnetwork->bcn_info.ht_cap_info = 0; } /* parsing HT_INFO_IE */ p = rtw_get_ie(pnetwork->network.ies + _FIXED_IE_LENGTH_, WLAN_EID_HT_OPERATION, &len, pnetwork->network.ie_length - _FIXED_IE_LENGTH_); if (p && len > 0) { - pht_info = (struct HT_info_element *)(p + 2); - pnetwork->bcn_info.ht_info_infos_0 = pht_info->infos[0]; + pht_info = (struct HT_info_element *)(p + 2); + pnetwork->bcn_info.ht_info_infos_0 = pht_info->infos[0]; } else { - pnetwork->bcn_info.ht_info_infos_0 = 0; + pnetwork->bcn_info.ht_info_infos_0 = 0; } } diff --git a/drivers/staging/rtl8723bs/core/rtw_ioctl_set.c b/drivers/staging/rtl8723bs/core/rtw_ioctl_set.c index 8c11daff2d59026553274efc67d074e117b0dd60..3b44f0dd5b0aee38c89ba1da65d7e7f8af4556aa 100644 --- a/drivers/staging/rtl8723bs/core/rtw_ioctl_set.c +++ b/drivers/staging/rtl8723bs/core/rtw_ioctl_set.c @@ -78,6 +78,7 @@ u8 rtw_do_join(struct adapter *padapter) goto exit; } else { int select_ret; + spin_unlock_bh(&(pmlmepriv->scanned_queue.lock)); select_ret = rtw_select_and_join_from_scanned_queue(pmlmepriv); if (select_ret == _SUCCESS) { @@ -159,7 +160,7 @@ u8 rtw_set_802_11_ssid(struct adapter *padapter, struct ndis_802_11_ssid *ssid) if (check_fwstate(pmlmepriv, _FW_LINKED|WIFI_ADHOC_MASTER_STATE) == true) { if ((pmlmepriv->assoc_ssid.ssid_length == ssid->ssid_length) && (!memcmp(&pmlmepriv->assoc_ssid.ssid, ssid->ssid, ssid->ssid_length))) { - if ((check_fwstate(pmlmepriv, WIFI_STATION_STATE) == false)) { + if (check_fwstate(pmlmepriv, WIFI_STATION_STATE) == false) { if (rtw_is_same_ibss(padapter, pnetwork) == false) { /* if in WIFI_ADHOC_MASTER_STATE | WIFI_ADHOC_STATE, create bss or rejoin again */ rtw_disassoc_cmd(padapter, 0, true); @@ -311,7 +312,7 @@ u8 rtw_set_802_11_infrastructure_mode(struct adapter *padapter, if ((*pold_state == Ndis802_11Infrastructure) || (*pold_state == Ndis802_11IBSS)) { if (check_fwstate(pmlmepriv, _FW_LINKED) == true) rtw_indicate_disconnect(padapter); /* will clr Linked_state; before this function, we must have checked whether issue dis-assoc_cmd or not */ - } + } *pold_state = networktype; @@ -367,7 +368,7 @@ u8 rtw_set_802_11_disassociate(struct adapter *padapter) u8 rtw_set_802_11_bssid_list_scan(struct adapter *padapter, struct ndis_802_11_ssid *pssid, int ssid_max_num) { - struct mlme_priv *pmlmepriv = &padapter->mlmepriv; + struct mlme_priv *pmlmepriv = &padapter->mlmepriv; u8 res = true; if (!padapter) { @@ -462,11 +463,11 @@ exit: } /* -* rtw_get_cur_max_rate - -* @adapter: pointer to struct adapter structure -* -* Return 0 or 100Kbps -*/ + * rtw_get_cur_max_rate - + * @adapter: pointer to struct adapter structure + * + * Return 0 or 100Kbps + */ u16 rtw_get_cur_max_rate(struct adapter *adapter) { int i = 0; diff --git a/drivers/staging/rtl8723bs/core/rtw_mlme.c b/drivers/staging/rtl8723bs/core/rtw_mlme.c index 6498fd17e1d3e2ca28fd2b9e3934bd9e06482951..c6fd6cf741ef6a795710a295ee03e0eb3a32f954 100644 --- a/drivers/staging/rtl8723bs/core/rtw_mlme.c +++ b/drivers/staging/rtl8723bs/core/rtw_mlme.c @@ -389,7 +389,7 @@ int is_same_network(struct wlan_bssid_ex *src, struct wlan_bssid_ex *dst, u8 fea __le16 tmps, tmpd; if (rtw_bug_check(dst, src, &s_cap, &d_cap) == false) - return false; + return false; memcpy((u8 *)&tmps, rtw_get_capability_from_ie(src->ies), 2); memcpy((u8 *)&tmpd, rtw_get_capability_from_ie(dst->ies), 2); @@ -669,7 +669,7 @@ int rtw_is_desired_network(struct adapter *adapter, struct wlan_network *pnetwor uint ie_len = 0; if ((desired_encmode == Ndis802_11EncryptionDisabled) && (privacy != 0)) - bselected = false; + bselected = false; if (psecuritypriv->ndisauthtype == Ndis802_11AuthModeWPA2PSK) { p = rtw_get_ie(pnetwork->network.ies + _BEACON_IE_OFFSET_, WLAN_EID_RSN, &ie_len, (pnetwork->network.ie_length - _BEACON_IE_OFFSET_)); @@ -795,7 +795,7 @@ void rtw_surveydone_event_callback(struct adapter *adapter, u8 *pbuf) pmlmepriv->to_join = false; s_ret = rtw_select_and_join_from_scanned_queue(pmlmepriv); if (s_ret == _SUCCESS) { - _set_timer(&pmlmepriv->assoc_timer, MAX_JOIN_TIMEOUT); + _set_timer(&pmlmepriv->assoc_timer, MAX_JOIN_TIMEOUT); } else if (s_ret == 2) {/* there is no need to wait for join */ _clr_fwstate_(pmlmepriv, _FW_UNDER_LINKING); rtw_indicate_connect(adapter); @@ -2010,8 +2010,8 @@ int rtw_restruct_wmm_ie(struct adapter *adapter, u8 *in_ie, u8 *out_ie, uint in_ if (in_ie[i] == 0xDD && in_ie[i+2] == 0x00 && in_ie[i+3] == 0x50 && in_ie[i+4] == 0xF2 && in_ie[i+5] == 0x02 && i+5 < in_len) { /* WMM element ID and OUI */ for (j = i; j < i + 9; j++) { - out_ie[ielength] = in_ie[j]; - ielength++; + out_ie[ielength] = in_ie[j]; + ielength++; } out_ie[initial_out_len + 1] = 0x07; out_ie[initial_out_len + 6] = 0x00; @@ -2064,15 +2064,13 @@ static int rtw_append_pmkid(struct adapter *Adapter, int iEntry, u8 *ie, uint ie if (ie[13] <= 20) { /* The RSN IE didn't include the PMK ID, append the PMK information */ - ie[ie_len] = 1; - ie_len++; - ie[ie_len] = 0; /* PMKID count = 0x0100 */ - ie_len++; - memcpy(&ie[ie_len], &psecuritypriv->PMKIDList[iEntry].PMKID, 16); - - ie_len += 16; - ie[13] += 18;/* PMKID length = 2+16 */ - + ie[ie_len] = 1; + ie_len++; + ie[ie_len] = 0; /* PMKID count = 0x0100 */ + ie_len++; + memcpy(&ie[ie_len], &psecuritypriv->PMKIDList[iEntry].PMKID, 16); + ie_len += 16; + ie[13] += 18;/* PMKID length = 2+16 */ } return ie_len; } @@ -2091,9 +2089,9 @@ signed int rtw_restruct_sec_ie(struct adapter *adapter, u8 *in_ie, u8 *out_ie, u memcpy(out_ie, in_ie, 12); ielength = 12; if ((ndisauthmode == Ndis802_11AuthModeWPA) || (ndisauthmode == Ndis802_11AuthModeWPAPSK)) - authmode = WLAN_EID_VENDOR_SPECIFIC; + authmode = WLAN_EID_VENDOR_SPECIFIC; if ((ndisauthmode == Ndis802_11AuthModeWPA2) || (ndisauthmode == Ndis802_11AuthModeWPA2PSK)) - authmode = WLAN_EID_RSN; + authmode = WLAN_EID_RSN; if (check_fwstate(pmlmepriv, WIFI_UNDER_WPS)) { memcpy(out_ie+ielength, psecuritypriv->wps_ie, psecuritypriv->wps_ie_len); diff --git a/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c b/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c index 8e74b4f47b9473bcb2fd290b4584609fc391e63f..1148c9829890cf9b4de6a52037fefae8acae0876 100644 --- a/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c +++ b/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c @@ -5447,7 +5447,7 @@ u8 disconnect_hdl(struct adapter *padapter, unsigned char *pbuf) u8 val8; if (is_client_associated_to_ap(padapter)) - issue_deauth_ex(padapter, pnetwork->mac_address, WLAN_REASON_DEAUTH_LEAVING, param->deauth_timeout_ms/100, 100); + issue_deauth_ex(padapter, pnetwork->mac_address, WLAN_REASON_DEAUTH_LEAVING, param->deauth_timeout_ms/100, 100); if (((pmlmeinfo->state&0x03) == WIFI_FW_ADHOC_STATE) || ((pmlmeinfo->state&0x03) == WIFI_FW_AP_STATE)) { /* Stop BCN */ diff --git a/drivers/staging/rtl8723bs/core/rtw_recv.c b/drivers/staging/rtl8723bs/core/rtw_recv.c index 2825375bff94289351d5cf65fe9bce6b5b21f48b..7c7b6495965f5ae859c960d772cf4e66058fb9c7 100644 --- a/drivers/staging/rtl8723bs/core/rtw_recv.c +++ b/drivers/staging/rtl8723bs/core/rtw_recv.c @@ -161,7 +161,7 @@ int rtw_free_recvframe(union recv_frame *precvframe, struct __queue *pfree_recv_ if (padapter) { if (pfree_recv_queue == &precvpriv->free_recv_queue) - precvpriv->free_recvframe_cnt++; + precvpriv->free_recvframe_cnt++; } spin_unlock_bh(&pfree_recv_queue->lock); return _SUCCESS; @@ -691,8 +691,8 @@ static signed int sta2sta_data_frame(struct adapter *adapter, union recv_frame * if (bmcast) { /* For AP mode, if DA == MCAST, then BSSID should be also MCAST */ if (!IS_MCAST(pattrib->bssid)) { - ret = _FAIL; - goto exit; + ret = _FAIL; + goto exit; } } else { /* not mc-frame */ /* For AP mode, if DA is non-MCAST, then it must be BSSID, and bssid == BSSID */ diff --git a/drivers/staging/rtl8723bs/core/rtw_sta_mgt.c b/drivers/staging/rtl8723bs/core/rtw_sta_mgt.c index beb11d89db1866c2dfbf18d3f35f765a8f2aaec1..c7de81f21bec0def85fd3fcd2cfc0b38fea183f4 100644 --- a/drivers/staging/rtl8723bs/core/rtw_sta_mgt.c +++ b/drivers/staging/rtl8723bs/core/rtw_sta_mgt.c @@ -551,7 +551,7 @@ u8 rtw_access_ctrl(struct adapter *padapter, u8 *mac_addr) else if (pacl_list->mode == 2)/* deny unless in accept list */ res = match; else - res = true; + res = true; return res; } diff --git a/drivers/staging/rtl8723bs/core/rtw_wlan_util.c b/drivers/staging/rtl8723bs/core/rtw_wlan_util.c index 18ba846c0b7b7203059320ac2ee7ac8869959244..ba39c8b1a9ae1fd6f81b1c40e21a73244c4aafe3 100644 --- a/drivers/staging/rtl8723bs/core/rtw_wlan_util.c +++ b/drivers/staging/rtl8723bs/core/rtw_wlan_util.c @@ -986,15 +986,11 @@ void HT_caps_handler(struct adapter *padapter, struct ndis_80211_var_ie *pIE) pmlmeinfo->HT_caps.u.HT_cap[i] &= (pIE->data[i]); } else { /* modify from fw by Thomas 2010/11/17 */ - if ((pmlmeinfo->HT_caps.u.HT_cap_element.AMPDU_para & 0x3) > (pIE->data[i] & 0x3)) - max_AMPDU_len = (pIE->data[i] & 0x3); - else - max_AMPDU_len = (pmlmeinfo->HT_caps.u.HT_cap_element.AMPDU_para & 0x3); + max_AMPDU_len = min(pmlmeinfo->HT_caps.u.HT_cap_element.AMPDU_para & 0x3, + pIE->data[i] & 0x3); - if ((pmlmeinfo->HT_caps.u.HT_cap_element.AMPDU_para & 0x1c) > (pIE->data[i] & 0x1c)) - min_MPDU_spacing = (pmlmeinfo->HT_caps.u.HT_cap_element.AMPDU_para & 0x1c); - else - min_MPDU_spacing = (pIE->data[i] & 0x1c); + min_MPDU_spacing = max(pmlmeinfo->HT_caps.u.HT_cap_element.AMPDU_para & 0x1c, + pIE->data[i] & 0x1c); pmlmeinfo->HT_caps.u.HT_cap_element.AMPDU_para = max_AMPDU_len | min_MPDU_spacing; } diff --git a/drivers/staging/rtl8723bs/hal/HalPhyRf_8723B.c b/drivers/staging/rtl8723bs/hal/HalPhyRf_8723B.c index a52748f7b56eb8208405a48983a7b9930e4f5927..22e33b97800d1fe29e2c44f826cbfff34e8b6eb9 100644 --- a/drivers/staging/rtl8723bs/hal/HalPhyRf_8723B.c +++ b/drivers/staging/rtl8723bs/hal/HalPhyRf_8723B.c @@ -244,8 +244,8 @@ void ODM_TxPwrTrackSetPwr_8723B( Final_CCK_Swing_Index = 0; setIqkMatrix_8723B(pDM_Odm, Final_OFDM_Swing_Index, RFPath, - pDM_Odm->RFCalibrateInfo.IQKMatrixRegSetting[ChannelMappedIndex].Value[0][0], - pDM_Odm->RFCalibrateInfo.IQKMatrixRegSetting[ChannelMappedIndex].Value[0][1]); + pDM_Odm->RFCalibrateInfo.iqk_matrix_regs_setting_value[ChannelMappedIndex][0], + pDM_Odm->RFCalibrateInfo.iqk_matrix_regs_setting_value[ChannelMappedIndex][1]); setCCKFilterCoefficient(pDM_Odm, Final_CCK_Swing_Index); @@ -257,8 +257,8 @@ void ODM_TxPwrTrackSetPwr_8723B( pDM_Odm->Remnant_OFDMSwingIdx[RFPath] = Final_OFDM_Swing_Index - PwrTrackingLimit_OFDM; setIqkMatrix_8723B(pDM_Odm, PwrTrackingLimit_OFDM, RFPath, - pDM_Odm->RFCalibrateInfo.IQKMatrixRegSetting[ChannelMappedIndex].Value[0][0], - pDM_Odm->RFCalibrateInfo.IQKMatrixRegSetting[ChannelMappedIndex].Value[0][1]); + pDM_Odm->RFCalibrateInfo.iqk_matrix_regs_setting_value[ChannelMappedIndex][0], + pDM_Odm->RFCalibrateInfo.iqk_matrix_regs_setting_value[ChannelMappedIndex][1]); pDM_Odm->Modify_TxAGC_Flag_PathA = true; PHY_SetTxPowerIndexByRateSection(Adapter, RFPath, pHalData->CurrentChannel, OFDM); @@ -267,16 +267,16 @@ void ODM_TxPwrTrackSetPwr_8723B( pDM_Odm->Remnant_OFDMSwingIdx[RFPath] = Final_OFDM_Swing_Index; setIqkMatrix_8723B(pDM_Odm, 0, RFPath, - pDM_Odm->RFCalibrateInfo.IQKMatrixRegSetting[ChannelMappedIndex].Value[0][0], - pDM_Odm->RFCalibrateInfo.IQKMatrixRegSetting[ChannelMappedIndex].Value[0][1]); + pDM_Odm->RFCalibrateInfo.iqk_matrix_regs_setting_value[ChannelMappedIndex][0], + pDM_Odm->RFCalibrateInfo.iqk_matrix_regs_setting_value[ChannelMappedIndex][1]); pDM_Odm->Modify_TxAGC_Flag_PathA = true; PHY_SetTxPowerIndexByRateSection(Adapter, RFPath, pHalData->CurrentChannel, OFDM); PHY_SetTxPowerIndexByRateSection(Adapter, RFPath, pHalData->CurrentChannel, HT_MCS0_MCS7); } else { setIqkMatrix_8723B(pDM_Odm, Final_OFDM_Swing_Index, RFPath, - pDM_Odm->RFCalibrateInfo.IQKMatrixRegSetting[ChannelMappedIndex].Value[0][0], - pDM_Odm->RFCalibrateInfo.IQKMatrixRegSetting[ChannelMappedIndex].Value[0][1]); + pDM_Odm->RFCalibrateInfo.iqk_matrix_regs_setting_value[ChannelMappedIndex][0], + pDM_Odm->RFCalibrateInfo.iqk_matrix_regs_setting_value[ChannelMappedIndex][1]); if (pDM_Odm->Modify_TxAGC_Flag_PathA) { /* If TxAGC has changed, reset TxAGC again */ pDM_Odm->Remnant_OFDMSwingIdx[RFPath] = 0; @@ -1759,9 +1759,8 @@ void PHY_IQCalibrate_8723B( /* To Fix BSOD when final_candidate is 0xff */ /* by sherry 20120321 */ if (final_candidate < 4) { - for (i = 0; i < IQK_Matrix_REG_NUM; i++) - pDM_Odm->RFCalibrateInfo.IQKMatrixRegSetting[0].Value[0][i] = result[final_candidate][i]; - pDM_Odm->RFCalibrateInfo.IQKMatrixRegSetting[0].bIQKDone = true; + for (i = 0; i < IQK_MATRIX_REG_NUM; i++) + pDM_Odm->RFCalibrateInfo.iqk_matrix_regs_setting_value[0][i] = result[final_candidate][i]; } _PHY_SaveADDARegisters8723B(padapter, IQK_BB_REG_92C, pDM_Odm->RFCalibrateInfo.IQK_BB_backup_recover, 9); diff --git a/drivers/staging/rtl8723bs/hal/odm.h b/drivers/staging/rtl8723bs/hal/odm.h index fe9782d2d4fd23699569c3e6ae68fe9ace665a7f..f5c804a1b9d55776c72eb88bb68946b3b307fe96 100644 --- a/drivers/staging/rtl8723bs/hal/odm.h +++ b/drivers/staging/rtl8723bs/hal/odm.h @@ -193,8 +193,8 @@ struct odm_rate_adaptive { #define HP_THERMAL_NUM 8 #define AVG_THERMAL_NUM 8 -#define IQK_Matrix_REG_NUM 8 -#define IQK_Matrix_Settings_NUM 14 /* Channels_2_4G_NUM */ +#define IQK_MATRIX_REG_NUM 8 +#define IQK_MATRIX_SETTINGS_NUM 14 /* Channels_2_4G_NUM */ #define DM_Type_ByFW 0 #define DM_Type_ByDriver 1 @@ -479,12 +479,6 @@ enum odm_type_alna_e { /* tag_ODM_TYPE_ALNA_Definition */ TYPE_ALNA3 = BIT(3)|BIT(2)|BIT(1)|BIT(0) }; -struct iqk_matrix_regs_setting { /* _IQK_MATRIX_REGS_SETTING */ - bool bIQKDone; - s32 Value[3][IQK_Matrix_REG_NUM]; - bool bBWIqkResultSaved[3]; -}; - /* Remove PATHDIV_PARA struct to odm_PathDiv.h */ struct odm_rf_cal_t { /* ODM_RF_Calibration_Structure */ @@ -530,7 +524,7 @@ struct odm_rf_cal_t { /* ODM_RF_Calibration_Structure */ u8 ThermalValue_HP[HP_THERMAL_NUM]; u8 ThermalValue_HP_index; - struct iqk_matrix_regs_setting IQKMatrixRegSetting[IQK_Matrix_Settings_NUM]; + s32 iqk_matrix_regs_setting_value[IQK_MATRIX_SETTINGS_NUM][IQK_MATRIX_REG_NUM]; bool bNeedIQK; bool bIQKInProgress; u8 Delta_IQK; diff --git a/drivers/staging/rtl8723bs/hal/odm_DIG.c b/drivers/staging/rtl8723bs/hal/odm_DIG.c index 07edf74ccfe5224c0f9b2156b38c0147aaef7f3b..97a51546463a69ed536da25f1b503c88c55e3fec 100644 --- a/drivers/staging/rtl8723bs/hal/odm_DIG.c +++ b/drivers/staging/rtl8723bs/hal/odm_DIG.c @@ -598,10 +598,7 @@ void odm_DIGbyRSSI_LPS(void *pDM_VOID) /* Lower bound checking */ /* RSSI Lower bound check */ - if ((pDM_Odm->RSSI_Min-10) > DM_DIG_MIN_NIC) - RSSI_Lower = pDM_Odm->RSSI_Min-10; - else - RSSI_Lower = DM_DIG_MIN_NIC; + RSSI_Lower = max(pDM_Odm->RSSI_Min - 10, DM_DIG_MIN_NIC); /* Upper and Lower Bound checking */ if (CurrentIGI > DM_DIG_MAX_NIC) diff --git a/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c b/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c index 6aeb169c6ebf0a0effdc474a0fcd1f3be8fbce84..54004f846cf0fd25b940c40b169c08d54eebe24d 100644 --- a/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c +++ b/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c @@ -1551,7 +1551,7 @@ static int rtw_cfg80211_set_wpa_ie(struct adapter *padapter, u8 *pie, size_t iel wps_ie = rtw_get_wps_ie(buf, ielen, NULL, &wps_ielen); if (wps_ie && wps_ielen > 0) { - padapter->securitypriv.wps_ie_len = wps_ielen < MAX_WPS_IE_LEN ? wps_ielen : MAX_WPS_IE_LEN; + padapter->securitypriv.wps_ie_len = min_t(uint, wps_ielen, MAX_WPS_IE_LEN); memcpy(padapter->securitypriv.wps_ie, wps_ie, padapter->securitypriv.wps_ie_len); set_fwstate(&padapter->mlmepriv, WIFI_UNDER_WPS); } else { diff --git a/drivers/staging/rts5208/sd.c b/drivers/staging/rts5208/sd.c index d1fafd530c80e61a391fe1b5c1f38e6f4be81516..4b7122add51aa07daf68e75c31efa566b7c028cd 100644 --- a/drivers/staging/rts5208/sd.c +++ b/drivers/staging/rts5208/sd.c @@ -4506,7 +4506,8 @@ int sd_execute_write_data(struct scsi_cmnd *srb, struct rtsx_chip *chip) if (CHK_SD(sd_card)) { retval = reset_sd(chip); if (retval != STATUS_SUCCESS) { - sd_card->sd_lock_status &= ~(SD_UNLOCK_POW_ON | SD_SDR_RST); + sd_card->sd_lock_status &= + ~(SD_UNLOCK_POW_ON | SD_SDR_RST); goto sd_execute_write_cmd_failed; } } diff --git a/drivers/staging/sm750fb/Kconfig b/drivers/staging/sm750fb/Kconfig index acb6c08d09dce7bfe2626ecd67a66c30fe4cf911..1461c89701c3ac5550289599556e9e3f3a089f15 100644 --- a/drivers/staging/sm750fb/Kconfig +++ b/drivers/staging/sm750fb/Kconfig @@ -9,7 +9,7 @@ config FB_SM750 select VIDEO_NOMODESET help Frame buffer driver for the Silicon Motion SM750 chip - with 2D accelearion and dual head support. + with 2D acceleration and dual head support. This driver is also available as a module. The module will be called sm750fb. If you want to compile it as a module, say M diff --git a/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c b/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c index fd456d1f7061193567f4534545fb301108ff9866..797ebe2a973a1c4224d30413db699a34747ec190 100644 --- a/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c +++ b/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c @@ -87,21 +87,21 @@ static struct mmal_fmt formats[] = { .depth = 12, .mmal_component = COMP_CAMERA, .ybbp = 1, - .remove_padding = 1, + .remove_padding = true, }, { .fourcc = V4L2_PIX_FMT_YUYV, .mmal = MMAL_ENCODING_YUYV, .depth = 16, .mmal_component = COMP_CAMERA, .ybbp = 2, - .remove_padding = 0, + .remove_padding = false, }, { .fourcc = V4L2_PIX_FMT_RGB24, .mmal = MMAL_ENCODING_RGB24, .depth = 24, .mmal_component = COMP_CAMERA, .ybbp = 3, - .remove_padding = 0, + .remove_padding = false, }, { .fourcc = V4L2_PIX_FMT_JPEG, .flags = V4L2_FMT_FLAG_COMPRESSED, @@ -109,7 +109,7 @@ static struct mmal_fmt formats[] = { .depth = 8, .mmal_component = COMP_IMAGE_ENCODE, .ybbp = 0, - .remove_padding = 0, + .remove_padding = false, }, { .fourcc = V4L2_PIX_FMT_H264, .flags = V4L2_FMT_FLAG_COMPRESSED, @@ -117,7 +117,7 @@ static struct mmal_fmt formats[] = { .depth = 8, .mmal_component = COMP_VIDEO_ENCODE, .ybbp = 0, - .remove_padding = 0, + .remove_padding = false, }, { .fourcc = V4L2_PIX_FMT_MJPEG, .flags = V4L2_FMT_FLAG_COMPRESSED, @@ -125,63 +125,63 @@ static struct mmal_fmt formats[] = { .depth = 8, .mmal_component = COMP_VIDEO_ENCODE, .ybbp = 0, - .remove_padding = 0, + .remove_padding = false, }, { .fourcc = V4L2_PIX_FMT_YVYU, .mmal = MMAL_ENCODING_YVYU, .depth = 16, .mmal_component = COMP_CAMERA, .ybbp = 2, - .remove_padding = 0, + .remove_padding = false, }, { .fourcc = V4L2_PIX_FMT_VYUY, .mmal = MMAL_ENCODING_VYUY, .depth = 16, .mmal_component = COMP_CAMERA, .ybbp = 2, - .remove_padding = 0, + .remove_padding = false, }, { .fourcc = V4L2_PIX_FMT_UYVY, .mmal = MMAL_ENCODING_UYVY, .depth = 16, .mmal_component = COMP_CAMERA, .ybbp = 2, - .remove_padding = 0, + .remove_padding = false, }, { .fourcc = V4L2_PIX_FMT_NV12, .mmal = MMAL_ENCODING_NV12, .depth = 12, .mmal_component = COMP_CAMERA, .ybbp = 1, - .remove_padding = 1, + .remove_padding = true, }, { .fourcc = V4L2_PIX_FMT_BGR24, .mmal = MMAL_ENCODING_BGR24, .depth = 24, .mmal_component = COMP_CAMERA, .ybbp = 3, - .remove_padding = 0, + .remove_padding = false, }, { .fourcc = V4L2_PIX_FMT_YVU420, .mmal = MMAL_ENCODING_YV12, .depth = 12, .mmal_component = COMP_CAMERA, .ybbp = 1, - .remove_padding = 1, + .remove_padding = true, }, { .fourcc = V4L2_PIX_FMT_NV21, .mmal = MMAL_ENCODING_NV21, .depth = 12, .mmal_component = COMP_CAMERA, .ybbp = 1, - .remove_padding = 1, + .remove_padding = true, }, { .fourcc = V4L2_PIX_FMT_BGR32, .mmal = MMAL_ENCODING_BGRA, .depth = 32, .mmal_component = COMP_CAMERA, .ybbp = 4, - .remove_padding = 0, + .remove_padding = false, }, }; @@ -1147,7 +1147,7 @@ static int mmal_setup_components(struct bcm2835_mmal_dev *dev, struct vchiq_mmal_port *port = NULL, *camera_port = NULL; struct vchiq_mmal_component *encode_component = NULL; struct mmal_fmt *mfmt = get_format(f); - u32 remove_padding; + bool remove_padding; if (!mfmt) return -EINVAL; diff --git a/drivers/staging/vc04_services/vchiq-mmal/mmal-vchiq.c b/drivers/staging/vc04_services/vchiq-mmal/mmal-vchiq.c index cb921c94996a193f4288680bd0167b48ceeae30c..294b184d4a495e774b29ddacce17a888828a049f 100644 --- a/drivers/staging/vc04_services/vchiq-mmal/mmal-vchiq.c +++ b/drivers/staging/vc04_services/vchiq-mmal/mmal-vchiq.c @@ -863,9 +863,9 @@ static int port_info_get(struct vchiq_mmal_instance *instance, goto release_msg; if (rmsg->u.port_info_get_reply.port.is_enabled == 0) - port->enabled = 0; + port->enabled = false; else - port->enabled = 1; + port->enabled = true; /* copy the values out of the message */ port->handle = rmsg->u.port_info_get_reply.port_handle; @@ -1304,7 +1304,7 @@ static int port_disable(struct vchiq_mmal_instance *instance, if (!port->enabled) return 0; - port->enabled = 0; + port->enabled = false; ret = port_action_port(instance, port, MMAL_MSG_PORT_ACTION_TYPE_DISABLE); @@ -1359,7 +1359,7 @@ static int port_enable(struct vchiq_mmal_instance *instance, if (ret) goto done; - port->enabled = 1; + port->enabled = true; if (port->buffer_cb) { /* send buffer headers to videocore */ @@ -1531,7 +1531,7 @@ int vchiq_mmal_port_connect_tunnel(struct vchiq_mmal_instance *instance, pr_err("failed disconnecting src port\n"); goto release_unlock; } - src->connected->enabled = 0; + src->connected->enabled = false; src->connected = NULL; } @@ -1648,7 +1648,7 @@ int vchiq_mmal_component_init(struct vchiq_mmal_instance *instance, for (idx = 0; idx < VCHIQ_MMAL_MAX_COMPONENTS; idx++) { if (!instance->component[idx].in_use) { component = &instance->component[idx]; - component->in_use = 1; + component->in_use = true; break; } } @@ -1724,7 +1724,7 @@ release_component: destroy_component(instance, component); unlock: if (component) - component->in_use = 0; + component->in_use = false; mutex_unlock(&instance->vchiq_mutex); return ret; @@ -1747,7 +1747,7 @@ int vchiq_mmal_component_finalise(struct vchiq_mmal_instance *instance, ret = destroy_component(instance, component); - component->in_use = 0; + component->in_use = false; mutex_unlock(&instance->vchiq_mutex); @@ -1799,7 +1799,7 @@ int vchiq_mmal_component_disable(struct vchiq_mmal_instance *instance, ret = disable_component(instance, component); if (ret == 0) - component->enabled = 0; + component->enabled = false; mutex_unlock(&instance->vchiq_mutex); diff --git a/drivers/staging/vc04_services/vchiq-mmal/mmal-vchiq.h b/drivers/staging/vc04_services/vchiq-mmal/mmal-vchiq.h index 6006e29232b3a40e17ac16e37a878c86f6c92c31..09f030919d4e285e7e8d2485a2bef93400e846b4 100644 --- a/drivers/staging/vc04_services/vchiq-mmal/mmal-vchiq.h +++ b/drivers/staging/vc04_services/vchiq-mmal/mmal-vchiq.h @@ -48,7 +48,7 @@ typedef void (*vchiq_mmal_buffer_cb)( int status, struct mmal_buffer *buffer); struct vchiq_mmal_port { - u32 enabled:1; + bool enabled; u32 handle; u32 type; /* port type, cached to use on port info set */ u32 index; /* port index, cached to use on port info set */ @@ -82,8 +82,8 @@ struct vchiq_mmal_port { }; struct vchiq_mmal_component { - u32 in_use:1; - u32 enabled:1; + bool in_use; + bool enabled; u32 handle; /* VideoCore handle for component */ u32 inputs; /* Number of input ports */ u32 outputs; /* Number of output ports */ diff --git a/drivers/staging/vme_user/vme.h b/drivers/staging/vme_user/vme.h index b204a9b4be1bdc2c68c8ae3cf26dce18bb3e0f18..98da8d039d60606a05652b068625dcb0259acd80 100644 --- a/drivers/staging/vme_user/vme.h +++ b/drivers/staging/vme_user/vme.h @@ -27,7 +27,6 @@ enum vme_resource_type { #define VME_A64_MAX 0x10000000000000000ULL #define VME_CRCSR_MAX 0x1000000ULL - /* VME Cycle Types */ #define VME_SCT 0x1 #define VME_BLT 0x2 @@ -185,6 +184,5 @@ int vme_bus_num(struct vme_dev *); int vme_register_driver(struct vme_driver *, unsigned int); void vme_unregister_driver(struct vme_driver *); - #endif /* _VME_H_ */ diff --git a/drivers/staging/vme_user/vme_fake.c b/drivers/staging/vme_user/vme_fake.c index dd646b0c531d411e5132a8fd50c3a9fae643ae33..f5d2c345978acfa73435d8cb723c193d75fb1df4 100644 --- a/drivers/staging/vme_user/vme_fake.c +++ b/drivers/staging/vme_user/vme_fake.c @@ -356,7 +356,6 @@ static int __fake_master_get(struct vme_master_resource *image, int *enabled, return 0; } - static int fake_master_get(struct vme_master_resource *image, int *enabled, unsigned long long *vme_base, unsigned long long *size, u32 *aspace, u32 *cycle, u32 *dwidth) @@ -373,7 +372,6 @@ static int fake_master_get(struct vme_master_resource *image, int *enabled, return retval; } - static void fake_lm_check(struct fake_driver *bridge, unsigned long long addr, u32 aspace, u32 cycle) { @@ -1060,7 +1058,6 @@ static void fake_crcsr_exit(struct vme_bridge *fake_bridge) kfree(bridge->crcsr_kernel); } - static int __init fake_init(void) { int retval, i; @@ -1072,7 +1069,9 @@ static int __init fake_init(void) struct vme_lm_resource *lm; /* We need a fake parent device */ - vme_root = __root_device_register("vme", THIS_MODULE); + vme_root = root_device_register("vme"); + if (IS_ERR(vme_root)) + return PTR_ERR(vme_root); /* If we want to support more than one bridge at some point, we need to * dynamically allocate this so we get one per device. @@ -1238,7 +1237,6 @@ err_struct: } - static void __exit fake_exit(void) { struct list_head *pos = NULL; @@ -1294,7 +1292,6 @@ static void __exit fake_exit(void) root_device_unregister(vme_root); } - MODULE_PARM_DESC(geoid, "Set geographical addressing"); module_param(geoid, int, 0); diff --git a/drivers/staging/vme_user/vme_tsi148.c b/drivers/staging/vme_user/vme_tsi148.c index 020e0b3bce64b49d54d96250c31fc301085d1fe5..482049cfc664067bb6b7fc9a3204d0881ef2e833 100644 --- a/drivers/staging/vme_user/vme_tsi148.c +++ b/drivers/staging/vme_user/vme_tsi148.c @@ -34,7 +34,6 @@ static int tsi148_probe(struct pci_dev *, const struct pci_device_id *); static void tsi148_remove(struct pci_dev *); - /* Module parameter */ static bool err_chk; static int geoid; @@ -673,7 +672,6 @@ static int tsi148_slave_get(struct vme_slave_resource *image, int *enabled, /* Need granularity before we set the size */ *size = (unsigned long long)((vme_bound - *vme_base) + granularity); - if ((ctl & TSI148_LCSR_ITAT_2eSSTM_M) == TSI148_LCSR_ITAT_2eSSTM_160) *cycle |= VME_2eSST160; if ((ctl & TSI148_LCSR_ITAT_2eSSTM_M) == TSI148_LCSR_ITAT_2eSSTM_267) @@ -1142,7 +1140,6 @@ static int __tsi148_master_get(struct vme_master_resource *image, int *enabled, return 0; } - static int tsi148_master_get(struct vme_master_resource *image, int *enabled, unsigned long long *vme_base, unsigned long long *size, u32 *aspace, u32 *cycle, u32 *dwidth) @@ -1244,7 +1241,6 @@ out: return retval; } - static ssize_t tsi148_master_write(struct vme_master_resource *image, void *buf, size_t count, loff_t offset) { @@ -1751,6 +1747,7 @@ static int tsi148_dma_list_add(struct vme_dma_list *list, return 0; err_dma: + list_del(&entry->list); err_dest: err_source: err_align: @@ -1999,7 +1996,6 @@ static int tsi148_lm_get(struct vme_lm_resource *lm, if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A64) *aspace |= VME_A64; - if (lm_ctl & TSI148_LCSR_LMAT_SUPR) *cycle |= VME_SUPER; if (lm_ctl & TSI148_LCSR_LMAT_NPRIV) @@ -2550,7 +2546,6 @@ static void tsi148_remove(struct pci_dev *pdev) bridge = tsi148_bridge->driver_priv; - dev_dbg(&pdev->dev, "Driver is being unloaded.\n"); /* diff --git a/drivers/staging/vme_user/vme_tsi148.h b/drivers/staging/vme_user/vme_tsi148.h index 226fedc6f1671d930c14cc59105794329d26a089..b3cb4a089cc868e39500a932a4047be4652f0975 100644 --- a/drivers/staging/vme_user/vme_tsi148.h +++ b/drivers/staging/vme_user/vme_tsi148.h @@ -87,7 +87,6 @@ struct tsi148_dma_entry { * Control and Status Registers */ - /* * Command/Status Registers (CRG + $004) */ @@ -342,7 +341,6 @@ static const int TSI148_LCSR_VIACK[8] = { 0, TSI148_LCSR_VIACK1, #define TSI148_LCSR_IT7_ITOFL 0x3F4 #define TSI148_LCSR_IT7_ITAT 0x3F8 - #define TSI148_LCSR_IT0 0x300 #define TSI148_LCSR_IT1 0x320 #define TSI148_LCSR_IT2 0x340 @@ -464,7 +462,6 @@ static const int TSI148_LCSR_IT[8] = { TSI148_LCSR_IT0, TSI148_LCSR_IT1, #define TSI148_LCSR_DMA0 0x500 #define TSI148_LCSR_DMA1 0x580 - static const int TSI148_LCSR_DMA[TSI148_MAX_DMA] = { TSI148_LCSR_DMA0, TSI148_LCSR_DMA1 }; @@ -532,9 +529,6 @@ static const int TSI148_GCSR_MBOX[4] = { TSI148_GCSR_MBOX0, #define TSI148_CSRBSR 0xFF8 #define TSI148_CBAR 0xFFC - - - /* * TSI148 Register Bit Definitions */ @@ -828,7 +822,6 @@ static const int TSI148_GCSR_MBOX[4] = { TSI148_GCSR_MBOX0, #define TSI148_LCSR_VEAT_AM_M (0x3F<<8) /* Address Mode Mask */ #define TSI148_LCSR_VEAT_XAM_M (0xFF<<0) /* Master AMode Mask */ - /* * VMEbus PCI Error Diagnostics PCI/X Attributes Register CRG + $280 */ diff --git a/drivers/staging/vt6655/rxtx.c b/drivers/staging/vt6655/rxtx.c index 5bdb5176772c015eb3a185f2337cb478cca61a3f..522d34ca9b0ff54921521ace43cfd0d76c280410 100644 --- a/drivers/staging/vt6655/rxtx.c +++ b/drivers/staging/vt6655/rxtx.c @@ -54,17 +54,17 @@ */ #define CRITICAL_PACKET_LEN 256 -static const unsigned short wTimeStampOff[2][MAX_RATE] = { +static const unsigned short time_stamp_off[2][MAX_RATE] = { {384, 288, 226, 209, 54, 43, 37, 31, 28, 25, 24, 23}, /* Long Preamble */ {384, 192, 130, 113, 54, 43, 37, 31, 28, 25, 24, 23}, /* Short Preamble */ }; -static const unsigned short wFB_Opt0[2][5] = { +static const unsigned short fb_opt0[2][5] = { {RATE_12M, RATE_18M, RATE_24M, RATE_36M, RATE_48M}, /* fallback_rate0 */ {RATE_12M, RATE_12M, RATE_18M, RATE_24M, RATE_36M}, /* fallback_rate1 */ }; -static const unsigned short wFB_Opt1[2][5] = { +static const unsigned short fb_opt1[2][5] = { {RATE_12M, RATE_18M, RATE_24M, RATE_24M, RATE_36M}, /* fallback_rate0 */ {RATE_6M, RATE_6M, RATE_12M, RATE_12M, RATE_18M}, /* fallback_rate1 */ }; @@ -142,7 +142,7 @@ s_uFillDataHead( static __le16 vnt_time_stamp_off(struct vnt_private *priv, u16 rate) { - return cpu_to_le16(wTimeStampOff[priv->preamble_type % 2] + return cpu_to_le16(time_stamp_off[priv->preamble_type % 2] [rate % MAX_RATE]); } @@ -310,9 +310,9 @@ s_uGetDataDuration( wRate -= RATE_18M; if (byFBOption == AUTO_FB_0) - wRate = wFB_Opt0[FB_RATE0][wRate]; + wRate = fb_opt0[FB_RATE0][wRate]; else - wRate = wFB_Opt1[FB_RATE0][wRate]; + wRate = fb_opt1[FB_RATE0][wRate]; uNextPktTime = s_uGetTxRsvTime(pDevice, byPktType, len, wRate, bNeedAck); @@ -365,52 +365,52 @@ s_uGetRTSCTSDuration( case RTSDUR_BA_F0: /* RTSDuration_ba_f0 */ uCTSTime = bb_get_frame_time(pDevice->preamble_type, byPktType, 14, pDevice->byTopCCKBasicRate); if ((byFBOption == AUTO_FB_0) && (wRate >= RATE_18M) && (wRate <= RATE_54M)) - uDurTime = uCTSTime + 2 * pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wFB_Opt0[FB_RATE0][wRate - RATE_18M], bNeedAck); + uDurTime = uCTSTime + 2 * pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, fb_opt0[FB_RATE0][wRate - RATE_18M], bNeedAck); else if ((byFBOption == AUTO_FB_1) && (wRate >= RATE_18M) && (wRate <= RATE_54M)) - uDurTime = uCTSTime + 2 * pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wFB_Opt1[FB_RATE0][wRate - RATE_18M], bNeedAck); + uDurTime = uCTSTime + 2 * pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, fb_opt1[FB_RATE0][wRate - RATE_18M], bNeedAck); break; case RTSDUR_AA_F0: /* RTSDuration_aa_f0 */ uCTSTime = bb_get_frame_time(pDevice->preamble_type, byPktType, 14, pDevice->byTopOFDMBasicRate); if ((byFBOption == AUTO_FB_0) && (wRate >= RATE_18M) && (wRate <= RATE_54M)) - uDurTime = uCTSTime + 2 * pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wFB_Opt0[FB_RATE0][wRate - RATE_18M], bNeedAck); + uDurTime = uCTSTime + 2 * pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, fb_opt0[FB_RATE0][wRate - RATE_18M], bNeedAck); else if ((byFBOption == AUTO_FB_1) && (wRate >= RATE_18M) && (wRate <= RATE_54M)) - uDurTime = uCTSTime + 2 * pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wFB_Opt1[FB_RATE0][wRate - RATE_18M], bNeedAck); + uDurTime = uCTSTime + 2 * pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, fb_opt1[FB_RATE0][wRate - RATE_18M], bNeedAck); break; case RTSDUR_BA_F1: /* RTSDuration_ba_f1 */ uCTSTime = bb_get_frame_time(pDevice->preamble_type, byPktType, 14, pDevice->byTopCCKBasicRate); if ((byFBOption == AUTO_FB_0) && (wRate >= RATE_18M) && (wRate <= RATE_54M)) - uDurTime = uCTSTime + 2 * pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wFB_Opt0[FB_RATE1][wRate - RATE_18M], bNeedAck); + uDurTime = uCTSTime + 2 * pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, fb_opt0[FB_RATE1][wRate - RATE_18M], bNeedAck); else if ((byFBOption == AUTO_FB_1) && (wRate >= RATE_18M) && (wRate <= RATE_54M)) - uDurTime = uCTSTime + 2 * pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wFB_Opt1[FB_RATE1][wRate - RATE_18M], bNeedAck); + uDurTime = uCTSTime + 2 * pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, fb_opt1[FB_RATE1][wRate - RATE_18M], bNeedAck); break; case RTSDUR_AA_F1: /* RTSDuration_aa_f1 */ uCTSTime = bb_get_frame_time(pDevice->preamble_type, byPktType, 14, pDevice->byTopOFDMBasicRate); if ((byFBOption == AUTO_FB_0) && (wRate >= RATE_18M) && (wRate <= RATE_54M)) - uDurTime = uCTSTime + 2 * pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wFB_Opt0[FB_RATE1][wRate - RATE_18M], bNeedAck); + uDurTime = uCTSTime + 2 * pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, fb_opt0[FB_RATE1][wRate - RATE_18M], bNeedAck); else if ((byFBOption == AUTO_FB_1) && (wRate >= RATE_18M) && (wRate <= RATE_54M)) - uDurTime = uCTSTime + 2 * pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wFB_Opt1[FB_RATE1][wRate - RATE_18M], bNeedAck); + uDurTime = uCTSTime + 2 * pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, fb_opt1[FB_RATE1][wRate - RATE_18M], bNeedAck); break; case CTSDUR_BA_F0: /* CTSDuration_ba_f0 */ if ((byFBOption == AUTO_FB_0) && (wRate >= RATE_18M) && (wRate <= RATE_54M)) - uDurTime = pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wFB_Opt0[FB_RATE0][wRate - RATE_18M], bNeedAck); + uDurTime = pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, fb_opt0[FB_RATE0][wRate - RATE_18M], bNeedAck); else if ((byFBOption == AUTO_FB_1) && (wRate >= RATE_18M) && (wRate <= RATE_54M)) - uDurTime = pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wFB_Opt1[FB_RATE0][wRate - RATE_18M], bNeedAck); + uDurTime = pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, fb_opt1[FB_RATE0][wRate - RATE_18M], bNeedAck); break; case CTSDUR_BA_F1: /* CTSDuration_ba_f1 */ if ((byFBOption == AUTO_FB_0) && (wRate >= RATE_18M) && (wRate <= RATE_54M)) - uDurTime = pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wFB_Opt0[FB_RATE1][wRate - RATE_18M], bNeedAck); + uDurTime = pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, fb_opt0[FB_RATE1][wRate - RATE_18M], bNeedAck); else if ((byFBOption == AUTO_FB_1) && (wRate >= RATE_18M) && (wRate <= RATE_54M)) - uDurTime = pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wFB_Opt1[FB_RATE1][wRate - RATE_18M], bNeedAck); + uDurTime = pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, fb_opt1[FB_RATE1][wRate - RATE_18M], bNeedAck); break; diff --git a/drivers/staging/wlan-ng/TODO b/drivers/staging/wlan-ng/TODO new file mode 100644 index 0000000000000000000000000000000000000000..ab9d5d145b3bc13e2cf068397b0ee4d3b445e34d --- /dev/null +++ b/drivers/staging/wlan-ng/TODO @@ -0,0 +1,16 @@ +To-do list: + +* Correct the coding style according to Linux guidelines; please read the document + at https://www.kernel.org/doc/html/latest/process/coding-style.html. +* Remove unnecessary debugging/printing macros; for those that are still needed + use the proper kernel API (pr_debug(), dev_dbg(), netdev_dbg()). +* Remove dead code such as unusued functions, variables, fields, etc.. +* Use in-kernel API and remove unnecessary wrappers where possible. +* Fix bugs due to code that sleeps in atomic context. +* Remove the HAL layer and migrate its functionality into the relevant parts of + the driver. +* Switch to use LIB80211. +* Switch to use MAC80211. +* Switch to use CFG80211. +* Improve the error handling of various functions, particularly those that use + existing kernel APIs. diff --git a/drivers/staging/wlan-ng/p80211mgmt.h b/drivers/staging/wlan-ng/p80211mgmt.h index 1ef30d3f3159e7da3456cb00cd337065101e5123..fc23fae5651b9ecef550cc89a65bb65fab1aca3e 100644 --- a/drivers/staging/wlan-ng/p80211mgmt.h +++ b/drivers/staging/wlan-ng/p80211mgmt.h @@ -217,305 +217,4 @@ #define WLAN_SET_MGMT_CAP_INFO_PBCC(n) ((n) << 6) #define WLAN_SET_MGMT_CAP_INFO_AGILITY(n) ((n) << 7) -/*-- Information Element Types --------------------*/ -/* prototype structure, all IEs start with these members */ - -struct wlan_ie { - u8 eid; - u8 len; -} __packed; - -/*-- Service Set Identity (SSID) -----------------*/ -struct wlan_ie_ssid { - u8 eid; - u8 len; - u8 ssid[1]; /* may be zero, ptrs may overlap */ -} __packed; - -/*-- Supported Rates -----------------------------*/ -struct wlan_ie_supp_rates { - u8 eid; - u8 len; - u8 rates[1]; /* had better be at LEAST one! */ -} __packed; - -/*-- FH Parameter Set ----------------------------*/ -struct wlan_ie_fh_parms { - u8 eid; - u8 len; - u16 dwell; - u8 hopset; - u8 hoppattern; - u8 hopindex; -} __packed; - -/*-- DS Parameter Set ----------------------------*/ -struct wlan_ie_ds_parms { - u8 eid; - u8 len; - u8 curr_ch; -} __packed; - -/*-- CF Parameter Set ----------------------------*/ - -struct wlan_ie_cf_parms { - u8 eid; - u8 len; - u8 cfp_cnt; - u8 cfp_period; - u16 cfp_maxdur; - u16 cfp_durremaining; -} __packed; - -/*-- TIM ------------------------------------------*/ -struct wlan_ie_tim { - u8 eid; - u8 len; - u8 dtim_cnt; - u8 dtim_period; - u8 bitmap_ctl; - u8 virt_bm[1]; -} __packed; - -/*-- IBSS Parameter Set ---------------------------*/ -struct wlan_ie_ibss_parms { - u8 eid; - u8 len; - u16 atim_win; -} __packed; - -/*-- Challenge Text ------------------------------*/ -struct wlan_ie_challenge { - u8 eid; - u8 len; - u8 challenge[1]; -} __packed; - -/*-------------------------------------------------*/ -/* Frame Types */ - -/* prototype structure, all mgmt frame types will start with these members */ -struct wlan_fr_mgmt { - u16 type; - u16 len; /* DOES NOT include CRC !!!! */ - u8 *buf; - struct p80211_hdr *hdr; - /* used for target specific data, skb in Linux */ - void *priv; - /*-- fixed fields -----------*/ - /*-- info elements ----------*/ -}; - -/*-- Beacon ---------------------------------------*/ -struct wlan_fr_beacon { - u16 type; - u16 len; - u8 *buf; - struct p80211_hdr *hdr; - /* used for target specific data, skb in Linux */ - void *priv; - /*-- fixed fields -----------*/ - u64 *ts; - u16 *bcn_int; - u16 *cap_info; - /*-- info elements ----------*/ - struct wlan_ie_ssid *ssid; - struct wlan_ie_supp_rates *supp_rates; - struct wlan_ie_fh_parms *fh_parms; - struct wlan_ie_ds_parms *ds_parms; - struct wlan_ie_cf_parms *cf_parms; - struct wlan_ie_ibss_parms *ibss_parms; - struct wlan_ie_tim *tim; - -}; - -/*-- IBSS ATIM ------------------------------------*/ -struct wlan_fr_ibssatim { - u16 type; - u16 len; - u8 *buf; - struct p80211_hdr *hdr; - /* used for target specific data, skb in Linux */ - void *priv; - - /*-- fixed fields -----------*/ - /*-- info elements ----------*/ - - /* this frame type has a null body */ - -}; - -/*-- Disassociation -------------------------------*/ -struct wlan_fr_disassoc { - u16 type; - u16 len; - u8 *buf; - struct p80211_hdr *hdr; - /* used for target specific data, skb in Linux */ - void *priv; - /*-- fixed fields -----------*/ - u16 *reason; - - /*-- info elements ----------*/ - -}; - -/*-- Association Request --------------------------*/ -struct wlan_fr_assocreq { - u16 type; - u16 len; - u8 *buf; - struct p80211_hdr *hdr; - /* used for target specific data, skb in Linux */ - void *priv; - /*-- fixed fields -----------*/ - u16 *cap_info; - u16 *listen_int; - /*-- info elements ----------*/ - struct wlan_ie_ssid *ssid; - struct wlan_ie_supp_rates *supp_rates; - -}; - -/*-- Association Response -------------------------*/ -struct wlan_fr_assocresp { - u16 type; - u16 len; - u8 *buf; - struct p80211_hdr *hdr; - /* used for target specific data, skb in Linux */ - void *priv; - /*-- fixed fields -----------*/ - u16 *cap_info; - u16 *status; - u16 *aid; - /*-- info elements ----------*/ - struct wlan_ie_supp_rates *supp_rates; - -}; - -/*-- Reassociation Request ------------------------*/ -struct wlan_fr_reassocreq { - u16 type; - u16 len; - u8 *buf; - struct p80211_hdr *hdr; - /* used for target specific data, skb in Linux */ - void *priv; - /*-- fixed fields -----------*/ - u16 *cap_info; - u16 *listen_int; - u8 *curr_ap; - /*-- info elements ----------*/ - struct wlan_ie_ssid *ssid; - struct wlan_ie_supp_rates *supp_rates; - -}; - -/*-- Reassociation Response -----------------------*/ -struct wlan_fr_reassocresp { - u16 type; - u16 len; - u8 *buf; - struct p80211_hdr *hdr; - /* used for target specific data, skb in Linux */ - void *priv; - /*-- fixed fields -----------*/ - u16 *cap_info; - u16 *status; - u16 *aid; - /*-- info elements ----------*/ - struct wlan_ie_supp_rates *supp_rates; - -}; - -/*-- Probe Request --------------------------------*/ -struct wlan_fr_probereq { - u16 type; - u16 len; - u8 *buf; - struct p80211_hdr *hdr; - /* used for target specific data, skb in Linux */ - void *priv; - /*-- fixed fields -----------*/ - /*-- info elements ----------*/ - struct wlan_ie_ssid *ssid; - struct wlan_ie_supp_rates *supp_rates; - -}; - -/*-- Probe Response -------------------------------*/ -struct wlan_fr_proberesp { - u16 type; - u16 len; - u8 *buf; - struct p80211_hdr *hdr; - /* used for target specific data, skb in Linux */ - void *priv; - /*-- fixed fields -----------*/ - u64 *ts; - u16 *bcn_int; - u16 *cap_info; - /*-- info elements ----------*/ - struct wlan_ie_ssid *ssid; - struct wlan_ie_supp_rates *supp_rates; - struct wlan_ie_fh_parms *fh_parms; - struct wlan_ie_ds_parms *ds_parms; - struct wlan_ie_cf_parms *cf_parms; - struct wlan_ie_ibss_parms *ibss_parms; -}; - -/*-- Authentication -------------------------------*/ -struct wlan_fr_authen { - u16 type; - u16 len; - u8 *buf; - struct p80211_hdr *hdr; - /* used for target specific data, skb in Linux */ - void *priv; - /*-- fixed fields -----------*/ - u16 *auth_alg; - u16 *auth_seq; - u16 *status; - /*-- info elements ----------*/ - struct wlan_ie_challenge *challenge; - -}; - -/*-- Deauthenication -----------------------------*/ -struct wlan_fr_deauthen { - u16 type; - u16 len; - u8 *buf; - struct p80211_hdr *hdr; - /* used for target specific data, skb in Linux */ - void *priv; - /*-- fixed fields -----------*/ - u16 *reason; - - /*-- info elements ----------*/ - -}; - -void wlan_mgmt_encode_beacon(struct wlan_fr_beacon *f); -void wlan_mgmt_decode_beacon(struct wlan_fr_beacon *f); -void wlan_mgmt_encode_disassoc(struct wlan_fr_disassoc *f); -void wlan_mgmt_decode_disassoc(struct wlan_fr_disassoc *f); -void wlan_mgmt_encode_assocreq(struct wlan_fr_assocreq *f); -void wlan_mgmt_decode_assocreq(struct wlan_fr_assocreq *f); -void wlan_mgmt_encode_assocresp(struct wlan_fr_assocresp *f); -void wlan_mgmt_decode_assocresp(struct wlan_fr_assocresp *f); -void wlan_mgmt_encode_reassocreq(struct wlan_fr_reassocreq *f); -void wlan_mgmt_decode_reassocreq(struct wlan_fr_reassocreq *f); -void wlan_mgmt_encode_reassocresp(struct wlan_fr_reassocresp *f); -void wlan_mgmt_decode_reassocresp(struct wlan_fr_reassocresp *f); -void wlan_mgmt_encode_probereq(struct wlan_fr_probereq *f); -void wlan_mgmt_decode_probereq(struct wlan_fr_probereq *f); -void wlan_mgmt_encode_proberesp(struct wlan_fr_proberesp *f); -void wlan_mgmt_decode_proberesp(struct wlan_fr_proberesp *f); -void wlan_mgmt_encode_authen(struct wlan_fr_authen *f); -void wlan_mgmt_decode_authen(struct wlan_fr_authen *f); -void wlan_mgmt_encode_deauthen(struct wlan_fr_deauthen *f); -void wlan_mgmt_decode_deauthen(struct wlan_fr_deauthen *f); - #endif /* _P80211MGMT_H */ diff --git a/drivers/staging/wlan-ng/p80211netdev.c b/drivers/staging/wlan-ng/p80211netdev.c index e04fc666d218ea582297ecc1ff618e952b20a296..6bef419e8ad0c8a05c1a546693d9ed179fb85ea3 100644 --- a/drivers/staging/wlan-ng/p80211netdev.c +++ b/drivers/staging/wlan-ng/p80211netdev.c @@ -881,55 +881,42 @@ static int p80211_rx_typedrop(struct wlandevice *wlandev, u16 fc) wlandev->rx.mgmt++; switch (fstype) { case WLAN_FSTYPE_ASSOCREQ: - /* printk("assocreq"); */ wlandev->rx.assocreq++; break; case WLAN_FSTYPE_ASSOCRESP: - /* printk("assocresp"); */ wlandev->rx.assocresp++; break; case WLAN_FSTYPE_REASSOCREQ: - /* printk("reassocreq"); */ wlandev->rx.reassocreq++; break; case WLAN_FSTYPE_REASSOCRESP: - /* printk("reassocresp"); */ wlandev->rx.reassocresp++; break; case WLAN_FSTYPE_PROBEREQ: - /* printk("probereq"); */ wlandev->rx.probereq++; break; case WLAN_FSTYPE_PROBERESP: - /* printk("proberesp"); */ wlandev->rx.proberesp++; break; case WLAN_FSTYPE_BEACON: - /* printk("beacon"); */ wlandev->rx.beacon++; break; case WLAN_FSTYPE_ATIM: - /* printk("atim"); */ wlandev->rx.atim++; break; case WLAN_FSTYPE_DISASSOC: - /* printk("disassoc"); */ wlandev->rx.disassoc++; break; case WLAN_FSTYPE_AUTHEN: - /* printk("authen"); */ wlandev->rx.authen++; break; case WLAN_FSTYPE_DEAUTHEN: - /* printk("deauthen"); */ wlandev->rx.deauthen++; break; default: - /* printk("unknown"); */ wlandev->rx.mgmt_unknown++; break; } - /* printk("\n"); */ drop = 2; break; @@ -943,35 +930,27 @@ static int p80211_rx_typedrop(struct wlandevice *wlandev, u16 fc) wlandev->rx.ctl++; switch (fstype) { case WLAN_FSTYPE_PSPOLL: - /* printk("pspoll"); */ wlandev->rx.pspoll++; break; case WLAN_FSTYPE_RTS: - /* printk("rts"); */ wlandev->rx.rts++; break; case WLAN_FSTYPE_CTS: - /* printk("cts"); */ wlandev->rx.cts++; break; case WLAN_FSTYPE_ACK: - /* printk("ack"); */ wlandev->rx.ack++; break; case WLAN_FSTYPE_CFEND: - /* printk("cfend"); */ wlandev->rx.cfend++; break; case WLAN_FSTYPE_CFENDCFACK: - /* printk("cfendcfack"); */ wlandev->rx.cfendcfack++; break; default: - /* printk("unknown"); */ wlandev->rx.ctl_unknown++; break; } - /* printk("\n"); */ drop = 2; break; @@ -1007,7 +986,6 @@ static int p80211_rx_typedrop(struct wlandevice *wlandev, u16 fc) wlandev->rx.cfack_cfpoll++; break; default: - /* printk("unknown"); */ wlandev->rx.data_unknown++; break; } diff --git a/drivers/staging/wlan-ng/p80211types.h b/drivers/staging/wlan-ng/p80211types.h index 6486612a8f31b39914b6952860f4d8e4c4c0a31d..b2ed969604133e59424d01b957ca4b120585b97a 100644 --- a/drivers/staging/wlan-ng/p80211types.h +++ b/drivers/staging/wlan-ng/p80211types.h @@ -231,12 +231,6 @@ struct p80211pstr32 { u8 data[MAXLEN_PSTR32]; } __packed; -/* MAC address array */ -struct p80211macarray { - u32 cnt; - u8 data[1][MAXLEN_PSTR6]; -} __packed; - /* prototype template */ struct p80211item { u32 did; diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c index f2919319ad383092fe37192a5df8a8fc562f1b37..ff49c8f3fe241a2c84963ccd1947e7be43eeca25 100644 --- a/drivers/target/iscsi/iscsi_target_nego.c +++ b/drivers/target/iscsi/iscsi_target_nego.c @@ -1018,6 +1018,13 @@ static int iscsi_target_handle_csg_one(struct iscsit_conn *conn, struct iscsi_lo return 0; } +/* + * RETURN VALUE: + * + * 1 = Login successful + * -1 = Login failed + * 0 = More PDU exchanges required + */ static int iscsi_target_do_login(struct iscsit_conn *conn, struct iscsi_login *login) { int pdu_count = 0; @@ -1363,12 +1370,13 @@ int iscsi_target_start_negotiation( ret = -1; if (ret < 0) { - cancel_delayed_work_sync(&conn->login_work); iscsi_target_restore_sock_callbacks(conn); iscsi_remove_failed_auth_entry(conn); } - if (ret != 0) + if (ret != 0) { + cancel_delayed_work_sync(&conn->login_work); iscsi_target_nego_release(conn); + } return ret; } diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c index 416514c5c7acd5acf92914d7d9348d988e0ebb0c..611b0424e305aaebf0438c47f6f596a355f49a51 100644 --- a/drivers/target/target_core_configfs.c +++ b/drivers/target/target_core_configfs.c @@ -12,6 +12,7 @@ * ****************************************************************************/ +#include #include #include #include @@ -547,6 +548,7 @@ DEF_CONFIGFS_ATTRIB_SHOW(unmap_granularity); DEF_CONFIGFS_ATTRIB_SHOW(unmap_granularity_alignment); DEF_CONFIGFS_ATTRIB_SHOW(unmap_zeroes_data); DEF_CONFIGFS_ATTRIB_SHOW(max_write_same_len); +DEF_CONFIGFS_ATTRIB_SHOW(emulate_rsoc); #define DEF_CONFIGFS_ATTRIB_STORE_U32(_name) \ static ssize_t _name##_store(struct config_item *item, const char *page,\ @@ -577,7 +579,7 @@ static ssize_t _name##_store(struct config_item *item, const char *page, \ bool flag; \ int ret; \ \ - ret = strtobool(page, &flag); \ + ret = kstrtobool(page, &flag); \ if (ret < 0) \ return ret; \ da->_name = flag; \ @@ -637,7 +639,7 @@ static ssize_t emulate_model_alias_store(struct config_item *item, return -EINVAL; } - ret = strtobool(page, &flag); + ret = kstrtobool(page, &flag); if (ret < 0) return ret; @@ -659,7 +661,7 @@ static ssize_t emulate_write_cache_store(struct config_item *item, bool flag; int ret; - ret = strtobool(page, &flag); + ret = kstrtobool(page, &flag); if (ret < 0) return ret; @@ -711,7 +713,7 @@ static ssize_t emulate_tas_store(struct config_item *item, bool flag; int ret; - ret = strtobool(page, &flag); + ret = kstrtobool(page, &flag); if (ret < 0) return ret; @@ -736,7 +738,7 @@ static ssize_t emulate_tpu_store(struct config_item *item, bool flag; int ret; - ret = strtobool(page, &flag); + ret = kstrtobool(page, &flag); if (ret < 0) return ret; @@ -766,7 +768,7 @@ static ssize_t emulate_tpws_store(struct config_item *item, bool flag; int ret; - ret = strtobool(page, &flag); + ret = kstrtobool(page, &flag); if (ret < 0) return ret; @@ -865,7 +867,7 @@ static ssize_t pi_prot_format_store(struct config_item *item, bool flag; int ret; - ret = strtobool(page, &flag); + ret = kstrtobool(page, &flag); if (ret < 0) return ret; @@ -902,7 +904,7 @@ static ssize_t pi_prot_verify_store(struct config_item *item, bool flag; int ret; - ret = strtobool(page, &flag); + ret = kstrtobool(page, &flag); if (ret < 0) return ret; @@ -931,7 +933,7 @@ static ssize_t force_pr_aptpl_store(struct config_item *item, bool flag; int ret; - ret = strtobool(page, &flag); + ret = kstrtobool(page, &flag); if (ret < 0) return ret; if (da->da_dev->export_count) { @@ -953,7 +955,7 @@ static ssize_t emulate_rest_reord_store(struct config_item *item, bool flag; int ret; - ret = strtobool(page, &flag); + ret = kstrtobool(page, &flag); if (ret < 0) return ret; @@ -976,7 +978,7 @@ static ssize_t unmap_zeroes_data_store(struct config_item *item, bool flag; int ret; - ret = strtobool(page, &flag); + ret = kstrtobool(page, &flag); if (ret < 0) return ret; @@ -1100,8 +1102,6 @@ static ssize_t block_size_store(struct config_item *item, } da->block_size = val; - if (da->max_bytes_per_io) - da->hw_max_sectors = da->max_bytes_per_io / val; pr_debug("dev[%p]: SE Device block_size changed to %u\n", da->da_dev, val); @@ -1125,7 +1125,7 @@ static ssize_t alua_support_store(struct config_item *item, bool flag, oldflag; int ret; - ret = strtobool(page, &flag); + ret = kstrtobool(page, &flag); if (ret < 0) return ret; @@ -1164,7 +1164,7 @@ static ssize_t pgr_support_store(struct config_item *item, bool flag, oldflag; int ret; - ret = strtobool(page, &flag); + ret = kstrtobool(page, &flag); if (ret < 0) return ret; @@ -1186,6 +1186,23 @@ static ssize_t pgr_support_store(struct config_item *item, return count; } +static ssize_t emulate_rsoc_store(struct config_item *item, + const char *page, size_t count) +{ + struct se_dev_attrib *da = to_attrib(item); + bool flag; + int ret; + + ret = kstrtobool(page, &flag); + if (ret < 0) + return ret; + + da->emulate_rsoc = flag; + pr_debug("dev[%p]: SE Device REPORT_SUPPORTED_OPERATION_CODES_EMULATION flag: %d\n", + da->da_dev, flag); + return count; +} + CONFIGFS_ATTR(, emulate_model_alias); CONFIGFS_ATTR(, emulate_dpo); CONFIGFS_ATTR(, emulate_fua_write); @@ -1198,6 +1215,7 @@ CONFIGFS_ATTR(, emulate_tpws); CONFIGFS_ATTR(, emulate_caw); CONFIGFS_ATTR(, emulate_3pc); CONFIGFS_ATTR(, emulate_pr); +CONFIGFS_ATTR(, emulate_rsoc); CONFIGFS_ATTR(, pi_prot_type); CONFIGFS_ATTR_RO(, hw_pi_prot_type); CONFIGFS_ATTR(, pi_prot_format); @@ -1261,6 +1279,7 @@ struct configfs_attribute *sbc_attrib_attrs[] = { &attr_max_write_same_len, &attr_alua_support, &attr_pgr_support, + &attr_emulate_rsoc, NULL, }; EXPORT_SYMBOL(sbc_attrib_attrs); diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c index cb4f7cc02f8fa131414f3000ea523950f59eea50..f6e58410ec3f9ced3e2844b9035fa8d13d71225c 100644 --- a/drivers/target/target_core_device.c +++ b/drivers/target/target_core_device.c @@ -804,6 +804,7 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name) dev->dev_attrib.emulate_caw = DA_EMULATE_CAW; dev->dev_attrib.emulate_3pc = DA_EMULATE_3PC; dev->dev_attrib.emulate_pr = DA_EMULATE_PR; + dev->dev_attrib.emulate_rsoc = DA_EMULATE_RSOC; dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE0_PROT; dev->dev_attrib.enforce_pr_isids = DA_ENFORCE_PR_ISIDS; dev->dev_attrib.force_pr_aptpl = DA_FORCE_PR_APTPL; diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c index 95a88f6224cd969f5ab3ae4d719669b9c5e4e3f4..67b18a67317a053720efc28a325d036be1d54be5 100644 --- a/drivers/target/target_core_fabric_configfs.c +++ b/drivers/target/target_core_fabric_configfs.c @@ -11,6 +11,7 @@ * ****************************************************************************/ +#include #include #include #include @@ -829,7 +830,7 @@ static ssize_t target_fabric_tpg_base_enable_store(struct config_item *item, int ret; bool op; - ret = strtobool(page, &op); + ret = kstrtobool(page, &op); if (ret) return ret; diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c index 7e81a53dbf3cacb2e48dd23f477f531b0306637b..fd584111da45c05645b935678481fdcc706e6030 100644 --- a/drivers/target/target_core_file.c +++ b/drivers/target/target_core_file.c @@ -193,7 +193,6 @@ static int fd_configure_device(struct se_device *dev) } dev->dev_attrib.hw_block_size = fd_dev->fd_block_size; - dev->dev_attrib.max_bytes_per_io = FD_MAX_BYTES; dev->dev_attrib.hw_max_sectors = FD_MAX_BYTES / fd_dev->fd_block_size; dev->dev_attrib.hw_queue_depth = FD_MAX_DEVICE_QUEUE_DEPTH; diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c index d9266cf558dcb54cc4a02c912ba8f3de92e1717f..cc838ffd129472ef14b8fa2e4f2a77129e974d3b 100644 --- a/drivers/target/target_core_iblock.c +++ b/drivers/target/target_core_iblock.c @@ -124,7 +124,9 @@ static int iblock_configure_device(struct se_device *dev) q = bdev_get_queue(bd); dev->dev_attrib.hw_block_size = bdev_logical_block_size(bd); - dev->dev_attrib.hw_max_sectors = queue_max_hw_sectors(q); + dev->dev_attrib.hw_max_sectors = mult_frac(queue_max_hw_sectors(q), + SECTOR_SIZE, + dev->dev_attrib.hw_block_size); dev->dev_attrib.hw_queue_depth = q->nr_requests; /* diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c index 1e3216de1e04d6b03084769152c98c69f73cdef6..7536ca7976068a7837c3c5f55f8817909ffa761b 100644 --- a/drivers/target/target_core_sbc.c +++ b/drivers/target/target_core_sbc.c @@ -270,14 +270,6 @@ static inline unsigned long long transport_lba_64(unsigned char *cdb) return get_unaligned_be64(&cdb[2]); } -/* - * For VARIABLE_LENGTH_CDB w/ 32 byte extended CDBs - */ -static inline unsigned long long transport_lba_64_ext(unsigned char *cdb) -{ - return get_unaligned_be64(&cdb[12]); -} - static sense_reason_t sbc_setup_write_same(struct se_cmd *cmd, unsigned char flags, struct sbc_ops *ops) { @@ -454,12 +446,22 @@ static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool succes sense_reason_t ret = TCM_NO_SENSE; int i; - /* - * Handle early failure in transport_generic_request_failure(), - * which will not have taken ->caw_sem yet.. - */ - if (!success && (!cmd->t_data_sg || !cmd->t_bidi_data_sg)) - return TCM_NO_SENSE; + if (!success) { + /* + * Handle early failure in transport_generic_request_failure(), + * which will not have taken ->caw_sem yet.. + */ + if (!cmd->t_data_sg || !cmd->t_bidi_data_sg) + return TCM_NO_SENSE; + + /* + * The command has been stopped or aborted so + * we don't have to perform the write operation. + */ + WARN_ON(!(cmd->transport_state & + (CMD_T_ABORTED | CMD_T_STOP))); + goto out; + } /* * Handle special case for zero-length COMPARE_AND_WRITE */ diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c index 7cca3b15472b3a676046cf7cef541a1933c9ba10..fcc7b10a7ae3547e1ebc3a61cc735abae94c8ba9 100644 --- a/drivers/target/target_core_spc.c +++ b/drivers/target/target_core_spc.c @@ -227,7 +227,7 @@ spc_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf) struct t10_alua_tg_pt_gp *tg_pt_gp; unsigned char *prod = &dev->t10_wwn.model[0]; u32 prod_len; - u32 unit_serial_len, off = 0; + u32 off = 0; u16 len = 0, id_len; off = 4; @@ -272,13 +272,9 @@ check_t10_vend_desc: prod_len += strlen(prod); prod_len++; /* For : */ - if (dev->dev_flags & DF_EMULATED_VPD_UNIT_SERIAL) { - unit_serial_len = strlen(&dev->t10_wwn.unit_serial[0]); - unit_serial_len++; /* For NULL Terminator */ - + if (dev->dev_flags & DF_EMULATED_VPD_UNIT_SERIAL) id_len += sprintf(&buf[off+12], "%s:%s", prod, &dev->t10_wwn.unit_serial[0]); - } buf[off] = 0x2; /* ASCII */ buf[off+1] = 0x1; /* T10 Vendor ID */ buf[off+2] = 0x0; @@ -519,6 +515,7 @@ spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf) struct se_device *dev = cmd->se_dev; u32 mtl = 0; int have_tp = 0, opt, min; + u32 io_max_blocks; /* * Following spc3r22 section 6.5.3 Block Limits VPD page, when @@ -557,7 +554,10 @@ spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf) mtl = (cmd->se_tfo->max_data_sg_nents * PAGE_SIZE) / dev->dev_attrib.block_size; } - put_unaligned_be32(min_not_zero(mtl, dev->dev_attrib.hw_max_sectors), &buf[8]); + io_max_blocks = mult_frac(dev->dev_attrib.hw_max_sectors, + dev->dev_attrib.hw_block_size, + dev->dev_attrib.block_size); + put_unaligned_be32(min_not_zero(mtl, io_max_blocks), &buf[8]); /* * Set OPTIMAL TRANSFER LENGTH @@ -1314,6 +1314,922 @@ spc_emulate_testunitready(struct se_cmd *cmd) return 0; } +static void set_dpofua_usage_bits(u8 *usage_bits, struct se_device *dev) +{ + if (!target_check_fua(dev)) + usage_bits[1] &= ~0x18; + else + usage_bits[1] |= 0x18; +} + +static void set_dpofua_usage_bits32(u8 *usage_bits, struct se_device *dev) +{ + if (!target_check_fua(dev)) + usage_bits[10] &= ~0x18; + else + usage_bits[10] |= 0x18; +} + +static struct target_opcode_descriptor tcm_opcode_read6 = { + .support = SCSI_SUPPORT_FULL, + .opcode = READ_6, + .cdb_size = 6, + .usage_bits = {READ_6, 0x1f, 0xff, 0xff, + 0xff, SCSI_CONTROL_MASK}, +}; + +static struct target_opcode_descriptor tcm_opcode_read10 = { + .support = SCSI_SUPPORT_FULL, + .opcode = READ_10, + .cdb_size = 10, + .usage_bits = {READ_10, 0xf8, 0xff, 0xff, + 0xff, 0xff, SCSI_GROUP_NUMBER_MASK, 0xff, + 0xff, SCSI_CONTROL_MASK}, + .update_usage_bits = set_dpofua_usage_bits, +}; + +static struct target_opcode_descriptor tcm_opcode_read12 = { + .support = SCSI_SUPPORT_FULL, + .opcode = READ_12, + .cdb_size = 12, + .usage_bits = {READ_12, 0xf8, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, SCSI_GROUP_NUMBER_MASK, SCSI_CONTROL_MASK}, + .update_usage_bits = set_dpofua_usage_bits, +}; + +static struct target_opcode_descriptor tcm_opcode_read16 = { + .support = SCSI_SUPPORT_FULL, + .opcode = READ_16, + .cdb_size = 16, + .usage_bits = {READ_16, 0xf8, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, SCSI_GROUP_NUMBER_MASK, SCSI_CONTROL_MASK}, + .update_usage_bits = set_dpofua_usage_bits, +}; + +static struct target_opcode_descriptor tcm_opcode_write6 = { + .support = SCSI_SUPPORT_FULL, + .opcode = WRITE_6, + .cdb_size = 6, + .usage_bits = {WRITE_6, 0x1f, 0xff, 0xff, + 0xff, SCSI_CONTROL_MASK}, +}; + +static struct target_opcode_descriptor tcm_opcode_write10 = { + .support = SCSI_SUPPORT_FULL, + .opcode = WRITE_10, + .cdb_size = 10, + .usage_bits = {WRITE_10, 0xf8, 0xff, 0xff, + 0xff, 0xff, SCSI_GROUP_NUMBER_MASK, 0xff, + 0xff, SCSI_CONTROL_MASK}, + .update_usage_bits = set_dpofua_usage_bits, +}; + +static struct target_opcode_descriptor tcm_opcode_write_verify10 = { + .support = SCSI_SUPPORT_FULL, + .opcode = WRITE_VERIFY, + .cdb_size = 10, + .usage_bits = {WRITE_VERIFY, 0xf0, 0xff, 0xff, + 0xff, 0xff, SCSI_GROUP_NUMBER_MASK, 0xff, + 0xff, SCSI_CONTROL_MASK}, + .update_usage_bits = set_dpofua_usage_bits, +}; + +static struct target_opcode_descriptor tcm_opcode_write12 = { + .support = SCSI_SUPPORT_FULL, + .opcode = WRITE_12, + .cdb_size = 12, + .usage_bits = {WRITE_12, 0xf8, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, SCSI_GROUP_NUMBER_MASK, SCSI_CONTROL_MASK}, + .update_usage_bits = set_dpofua_usage_bits, +}; + +static struct target_opcode_descriptor tcm_opcode_write16 = { + .support = SCSI_SUPPORT_FULL, + .opcode = WRITE_16, + .cdb_size = 16, + .usage_bits = {WRITE_16, 0xf8, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, SCSI_GROUP_NUMBER_MASK, SCSI_CONTROL_MASK}, + .update_usage_bits = set_dpofua_usage_bits, +}; + +static struct target_opcode_descriptor tcm_opcode_write_verify16 = { + .support = SCSI_SUPPORT_FULL, + .opcode = WRITE_VERIFY_16, + .cdb_size = 16, + .usage_bits = {WRITE_VERIFY_16, 0xf0, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, SCSI_GROUP_NUMBER_MASK, SCSI_CONTROL_MASK}, + .update_usage_bits = set_dpofua_usage_bits, +}; + +static bool tcm_is_ws_enabled(struct se_cmd *cmd) +{ + struct sbc_ops *ops = cmd->protocol_data; + struct se_device *dev = cmd->se_dev; + + return (dev->dev_attrib.emulate_tpws && !!ops->execute_unmap) || + !!ops->execute_write_same; +} + +static struct target_opcode_descriptor tcm_opcode_write_same32 = { + .support = SCSI_SUPPORT_FULL, + .serv_action_valid = 1, + .opcode = VARIABLE_LENGTH_CMD, + .service_action = WRITE_SAME_32, + .cdb_size = 32, + .usage_bits = {VARIABLE_LENGTH_CMD, SCSI_CONTROL_MASK, 0x00, 0x00, + 0x00, 0x00, SCSI_GROUP_NUMBER_MASK, 0x18, + 0x00, WRITE_SAME_32, 0xe8, 0x00, + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0xff, 0xff, 0xff, 0xff}, + .enabled = tcm_is_ws_enabled, + .update_usage_bits = set_dpofua_usage_bits32, +}; + +static bool tcm_is_caw_enabled(struct se_cmd *cmd) +{ + struct se_device *dev = cmd->se_dev; + + return dev->dev_attrib.emulate_caw; +} + +static struct target_opcode_descriptor tcm_opcode_compare_write = { + .support = SCSI_SUPPORT_FULL, + .opcode = COMPARE_AND_WRITE, + .cdb_size = 16, + .usage_bits = {COMPARE_AND_WRITE, 0x18, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0x00, 0x00, + 0x00, 0xff, SCSI_GROUP_NUMBER_MASK, SCSI_CONTROL_MASK}, + .enabled = tcm_is_caw_enabled, + .update_usage_bits = set_dpofua_usage_bits, +}; + +static struct target_opcode_descriptor tcm_opcode_read_capacity = { + .support = SCSI_SUPPORT_FULL, + .opcode = READ_CAPACITY, + .cdb_size = 10, + .usage_bits = {READ_CAPACITY, 0x00, 0xff, 0xff, + 0xff, 0xff, 0x00, 0x00, + 0x01, SCSI_CONTROL_MASK}, +}; + +static struct target_opcode_descriptor tcm_opcode_read_capacity16 = { + .support = SCSI_SUPPORT_FULL, + .serv_action_valid = 1, + .opcode = SERVICE_ACTION_IN_16, + .service_action = SAI_READ_CAPACITY_16, + .cdb_size = 16, + .usage_bits = {SERVICE_ACTION_IN_16, SAI_READ_CAPACITY_16, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0xff, 0xff, + 0xff, 0xff, 0x00, SCSI_CONTROL_MASK}, +}; + +static bool tcm_is_rep_ref_enabled(struct se_cmd *cmd) +{ + struct se_device *dev = cmd->se_dev; + + spin_lock(&dev->t10_alua.lba_map_lock); + if (list_empty(&dev->t10_alua.lba_map_list)) { + spin_unlock(&dev->t10_alua.lba_map_lock); + return false; + } + spin_unlock(&dev->t10_alua.lba_map_lock); + return true; + +} + +static struct target_opcode_descriptor tcm_opcode_read_report_refferals = { + .support = SCSI_SUPPORT_FULL, + .serv_action_valid = 1, + .opcode = SERVICE_ACTION_IN_16, + .service_action = SAI_REPORT_REFERRALS, + .cdb_size = 16, + .usage_bits = {SERVICE_ACTION_IN_16, SAI_REPORT_REFERRALS, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0xff, 0xff, + 0xff, 0xff, 0x00, SCSI_CONTROL_MASK}, + .enabled = tcm_is_rep_ref_enabled, +}; + +static struct target_opcode_descriptor tcm_opcode_sync_cache = { + .support = SCSI_SUPPORT_FULL, + .opcode = SYNCHRONIZE_CACHE, + .cdb_size = 10, + .usage_bits = {SYNCHRONIZE_CACHE, 0x02, 0xff, 0xff, + 0xff, 0xff, SCSI_GROUP_NUMBER_MASK, 0xff, + 0xff, SCSI_CONTROL_MASK}, +}; + +static struct target_opcode_descriptor tcm_opcode_sync_cache16 = { + .support = SCSI_SUPPORT_FULL, + .opcode = SYNCHRONIZE_CACHE_16, + .cdb_size = 16, + .usage_bits = {SYNCHRONIZE_CACHE_16, 0x02, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, SCSI_GROUP_NUMBER_MASK, SCSI_CONTROL_MASK}, +}; + +static bool tcm_is_unmap_enabled(struct se_cmd *cmd) +{ + struct sbc_ops *ops = cmd->protocol_data; + struct se_device *dev = cmd->se_dev; + + return ops->execute_unmap && dev->dev_attrib.emulate_tpu; +} + +static struct target_opcode_descriptor tcm_opcode_unmap = { + .support = SCSI_SUPPORT_FULL, + .opcode = UNMAP, + .cdb_size = 10, + .usage_bits = {UNMAP, 0x00, 0x00, 0x00, + 0x00, 0x00, SCSI_GROUP_NUMBER_MASK, 0xff, + 0xff, SCSI_CONTROL_MASK}, + .enabled = tcm_is_unmap_enabled, +}; + +static struct target_opcode_descriptor tcm_opcode_write_same = { + .support = SCSI_SUPPORT_FULL, + .opcode = WRITE_SAME, + .cdb_size = 10, + .usage_bits = {WRITE_SAME, 0xe8, 0xff, 0xff, + 0xff, 0xff, SCSI_GROUP_NUMBER_MASK, 0xff, + 0xff, SCSI_CONTROL_MASK}, + .enabled = tcm_is_ws_enabled, +}; + +static struct target_opcode_descriptor tcm_opcode_write_same16 = { + .support = SCSI_SUPPORT_FULL, + .opcode = WRITE_SAME_16, + .cdb_size = 16, + .usage_bits = {WRITE_SAME_16, 0xe8, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, SCSI_GROUP_NUMBER_MASK, SCSI_CONTROL_MASK}, + .enabled = tcm_is_ws_enabled, +}; + +static struct target_opcode_descriptor tcm_opcode_verify = { + .support = SCSI_SUPPORT_FULL, + .opcode = VERIFY, + .cdb_size = 10, + .usage_bits = {VERIFY, 0x00, 0xff, 0xff, + 0xff, 0xff, SCSI_GROUP_NUMBER_MASK, 0xff, + 0xff, SCSI_CONTROL_MASK}, +}; + +static struct target_opcode_descriptor tcm_opcode_verify16 = { + .support = SCSI_SUPPORT_FULL, + .opcode = VERIFY_16, + .cdb_size = 16, + .usage_bits = {VERIFY_16, 0x00, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, SCSI_GROUP_NUMBER_MASK, SCSI_CONTROL_MASK}, +}; + +static struct target_opcode_descriptor tcm_opcode_start_stop = { + .support = SCSI_SUPPORT_FULL, + .opcode = START_STOP, + .cdb_size = 6, + .usage_bits = {START_STOP, 0x01, 0x00, 0x00, + 0x01, SCSI_CONTROL_MASK}, +}; + +static struct target_opcode_descriptor tcm_opcode_mode_select = { + .support = SCSI_SUPPORT_FULL, + .opcode = MODE_SELECT, + .cdb_size = 6, + .usage_bits = {MODE_SELECT, 0x10, 0x00, 0x00, + 0xff, SCSI_CONTROL_MASK}, +}; + +static struct target_opcode_descriptor tcm_opcode_mode_select10 = { + .support = SCSI_SUPPORT_FULL, + .opcode = MODE_SELECT_10, + .cdb_size = 10, + .usage_bits = {MODE_SELECT_10, 0x10, 0x00, 0x00, + 0x00, 0x00, 0x00, 0xff, + 0xff, SCSI_CONTROL_MASK}, +}; + +static struct target_opcode_descriptor tcm_opcode_mode_sense = { + .support = SCSI_SUPPORT_FULL, + .opcode = MODE_SENSE, + .cdb_size = 6, + .usage_bits = {MODE_SENSE, 0x08, 0xff, 0xff, + 0xff, SCSI_CONTROL_MASK}, +}; + +static struct target_opcode_descriptor tcm_opcode_mode_sense10 = { + .support = SCSI_SUPPORT_FULL, + .opcode = MODE_SENSE_10, + .cdb_size = 10, + .usage_bits = {MODE_SENSE_10, 0x18, 0xff, 0xff, + 0x00, 0x00, 0x00, 0xff, + 0xff, SCSI_CONTROL_MASK}, +}; + +static struct target_opcode_descriptor tcm_opcode_pri_read_keys = { + .support = SCSI_SUPPORT_FULL, + .serv_action_valid = 1, + .opcode = PERSISTENT_RESERVE_IN, + .service_action = PRI_READ_KEYS, + .cdb_size = 10, + .usage_bits = {PERSISTENT_RESERVE_IN, PRI_READ_KEYS, 0x00, 0x00, + 0x00, 0x00, 0x00, 0xff, + 0xff, SCSI_CONTROL_MASK}, +}; + +static struct target_opcode_descriptor tcm_opcode_pri_read_resrv = { + .support = SCSI_SUPPORT_FULL, + .serv_action_valid = 1, + .opcode = PERSISTENT_RESERVE_IN, + .service_action = PRI_READ_RESERVATION, + .cdb_size = 10, + .usage_bits = {PERSISTENT_RESERVE_IN, PRI_READ_RESERVATION, 0x00, 0x00, + 0x00, 0x00, 0x00, 0xff, + 0xff, SCSI_CONTROL_MASK}, +}; + +static bool tcm_is_pr_enabled(struct se_cmd *cmd) +{ + struct se_device *dev = cmd->se_dev; + + return dev->dev_attrib.emulate_pr; +} + +static struct target_opcode_descriptor tcm_opcode_pri_read_caps = { + .support = SCSI_SUPPORT_FULL, + .serv_action_valid = 1, + .opcode = PERSISTENT_RESERVE_IN, + .service_action = PRI_REPORT_CAPABILITIES, + .cdb_size = 10, + .usage_bits = {PERSISTENT_RESERVE_IN, PRI_REPORT_CAPABILITIES, 0x00, 0x00, + 0x00, 0x00, 0x00, 0xff, + 0xff, SCSI_CONTROL_MASK}, + .enabled = tcm_is_pr_enabled, +}; + +static struct target_opcode_descriptor tcm_opcode_pri_read_full_status = { + .support = SCSI_SUPPORT_FULL, + .serv_action_valid = 1, + .opcode = PERSISTENT_RESERVE_IN, + .service_action = PRI_READ_FULL_STATUS, + .cdb_size = 10, + .usage_bits = {PERSISTENT_RESERVE_IN, PRI_READ_FULL_STATUS, 0x00, 0x00, + 0x00, 0x00, 0x00, 0xff, + 0xff, SCSI_CONTROL_MASK}, + .enabled = tcm_is_pr_enabled, +}; + +static struct target_opcode_descriptor tcm_opcode_pro_register = { + .support = SCSI_SUPPORT_FULL, + .serv_action_valid = 1, + .opcode = PERSISTENT_RESERVE_OUT, + .service_action = PRO_REGISTER, + .cdb_size = 10, + .usage_bits = {PERSISTENT_RESERVE_OUT, PRO_REGISTER, 0xff, 0x00, + 0x00, 0xff, 0xff, 0xff, + 0xff, SCSI_CONTROL_MASK}, + .enabled = tcm_is_pr_enabled, +}; + +static struct target_opcode_descriptor tcm_opcode_pro_reserve = { + .support = SCSI_SUPPORT_FULL, + .serv_action_valid = 1, + .opcode = PERSISTENT_RESERVE_OUT, + .service_action = PRO_RESERVE, + .cdb_size = 10, + .usage_bits = {PERSISTENT_RESERVE_OUT, PRO_RESERVE, 0xff, 0x00, + 0x00, 0xff, 0xff, 0xff, + 0xff, SCSI_CONTROL_MASK}, + .enabled = tcm_is_pr_enabled, +}; + +static struct target_opcode_descriptor tcm_opcode_pro_release = { + .support = SCSI_SUPPORT_FULL, + .serv_action_valid = 1, + .opcode = PERSISTENT_RESERVE_OUT, + .service_action = PRO_RELEASE, + .cdb_size = 10, + .usage_bits = {PERSISTENT_RESERVE_OUT, PRO_RELEASE, 0xff, 0x00, + 0x00, 0xff, 0xff, 0xff, + 0xff, SCSI_CONTROL_MASK}, + .enabled = tcm_is_pr_enabled, +}; + +static struct target_opcode_descriptor tcm_opcode_pro_clear = { + .support = SCSI_SUPPORT_FULL, + .serv_action_valid = 1, + .opcode = PERSISTENT_RESERVE_OUT, + .service_action = PRO_CLEAR, + .cdb_size = 10, + .usage_bits = {PERSISTENT_RESERVE_OUT, PRO_CLEAR, 0xff, 0x00, + 0x00, 0xff, 0xff, 0xff, + 0xff, SCSI_CONTROL_MASK}, + .enabled = tcm_is_pr_enabled, +}; + +static struct target_opcode_descriptor tcm_opcode_pro_preempt = { + .support = SCSI_SUPPORT_FULL, + .serv_action_valid = 1, + .opcode = PERSISTENT_RESERVE_OUT, + .service_action = PRO_PREEMPT, + .cdb_size = 10, + .usage_bits = {PERSISTENT_RESERVE_OUT, PRO_PREEMPT, 0xff, 0x00, + 0x00, 0xff, 0xff, 0xff, + 0xff, SCSI_CONTROL_MASK}, + .enabled = tcm_is_pr_enabled, +}; + +static struct target_opcode_descriptor tcm_opcode_pro_preempt_abort = { + .support = SCSI_SUPPORT_FULL, + .serv_action_valid = 1, + .opcode = PERSISTENT_RESERVE_OUT, + .service_action = PRO_PREEMPT_AND_ABORT, + .cdb_size = 10, + .usage_bits = {PERSISTENT_RESERVE_OUT, PRO_PREEMPT_AND_ABORT, 0xff, 0x00, + 0x00, 0xff, 0xff, 0xff, + 0xff, SCSI_CONTROL_MASK}, + .enabled = tcm_is_pr_enabled, +}; + +static struct target_opcode_descriptor tcm_opcode_pro_reg_ign_exist = { + .support = SCSI_SUPPORT_FULL, + .serv_action_valid = 1, + .opcode = PERSISTENT_RESERVE_OUT, + .service_action = PRO_REGISTER_AND_IGNORE_EXISTING_KEY, + .cdb_size = 10, + .usage_bits = { + PERSISTENT_RESERVE_OUT, PRO_REGISTER_AND_IGNORE_EXISTING_KEY, + 0xff, 0x00, + 0x00, 0xff, 0xff, 0xff, + 0xff, SCSI_CONTROL_MASK}, + .enabled = tcm_is_pr_enabled, +}; + +static struct target_opcode_descriptor tcm_opcode_pro_register_move = { + .support = SCSI_SUPPORT_FULL, + .serv_action_valid = 1, + .opcode = PERSISTENT_RESERVE_OUT, + .service_action = PRO_REGISTER_AND_MOVE, + .cdb_size = 10, + .usage_bits = {PERSISTENT_RESERVE_OUT, PRO_REGISTER_AND_MOVE, 0xff, 0x00, + 0x00, 0xff, 0xff, 0xff, + 0xff, SCSI_CONTROL_MASK}, + .enabled = tcm_is_pr_enabled, +}; + +static bool tcm_is_scsi2_reservations_enabled(struct se_cmd *cmd) +{ + struct se_device *dev = cmd->se_dev; + + return dev->dev_attrib.emulate_pr; +} + +static struct target_opcode_descriptor tcm_opcode_release = { + .support = SCSI_SUPPORT_FULL, + .opcode = RELEASE, + .cdb_size = 6, + .usage_bits = {RELEASE, 0x00, 0x00, 0x00, + 0x00, SCSI_CONTROL_MASK}, + .enabled = tcm_is_scsi2_reservations_enabled, +}; + +static struct target_opcode_descriptor tcm_opcode_release10 = { + .support = SCSI_SUPPORT_FULL, + .opcode = RELEASE_10, + .cdb_size = 10, + .usage_bits = {RELEASE_10, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0xff, + 0xff, SCSI_CONTROL_MASK}, + .enabled = tcm_is_scsi2_reservations_enabled, +}; + +static struct target_opcode_descriptor tcm_opcode_reserve = { + .support = SCSI_SUPPORT_FULL, + .opcode = RESERVE, + .cdb_size = 6, + .usage_bits = {RESERVE, 0x00, 0x00, 0x00, + 0x00, SCSI_CONTROL_MASK}, + .enabled = tcm_is_scsi2_reservations_enabled, +}; + +static struct target_opcode_descriptor tcm_opcode_reserve10 = { + .support = SCSI_SUPPORT_FULL, + .opcode = RESERVE_10, + .cdb_size = 10, + .usage_bits = {RESERVE_10, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0xff, + 0xff, SCSI_CONTROL_MASK}, + .enabled = tcm_is_scsi2_reservations_enabled, +}; + +static struct target_opcode_descriptor tcm_opcode_request_sense = { + .support = SCSI_SUPPORT_FULL, + .opcode = REQUEST_SENSE, + .cdb_size = 6, + .usage_bits = {REQUEST_SENSE, 0x00, 0x00, 0x00, + 0xff, SCSI_CONTROL_MASK}, +}; + +static struct target_opcode_descriptor tcm_opcode_inquiry = { + .support = SCSI_SUPPORT_FULL, + .opcode = INQUIRY, + .cdb_size = 6, + .usage_bits = {INQUIRY, 0x01, 0xff, 0xff, + 0xff, SCSI_CONTROL_MASK}, +}; + +static bool tcm_is_3pc_enabled(struct se_cmd *cmd) +{ + struct se_device *dev = cmd->se_dev; + + return dev->dev_attrib.emulate_3pc; +} + +static struct target_opcode_descriptor tcm_opcode_extended_copy_lid1 = { + .support = SCSI_SUPPORT_FULL, + .serv_action_valid = 1, + .opcode = EXTENDED_COPY, + .cdb_size = 16, + .usage_bits = {EXTENDED_COPY, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0xff, 0xff, + 0xff, 0xff, 0x00, SCSI_CONTROL_MASK}, + .enabled = tcm_is_3pc_enabled, +}; + +static struct target_opcode_descriptor tcm_opcode_rcv_copy_res_op_params = { + .support = SCSI_SUPPORT_FULL, + .serv_action_valid = 1, + .opcode = RECEIVE_COPY_RESULTS, + .service_action = RCR_SA_OPERATING_PARAMETERS, + .cdb_size = 16, + .usage_bits = {RECEIVE_COPY_RESULTS, RCR_SA_OPERATING_PARAMETERS, + 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0xff, 0xff, + 0xff, 0xff, 0x00, SCSI_CONTROL_MASK}, + .enabled = tcm_is_3pc_enabled, +}; + +static struct target_opcode_descriptor tcm_opcode_report_luns = { + .support = SCSI_SUPPORT_FULL, + .opcode = REPORT_LUNS, + .cdb_size = 12, + .usage_bits = {REPORT_LUNS, 0x00, 0xff, 0x00, + 0x00, 0x00, 0xff, 0xff, + 0xff, 0xff, 0x00, SCSI_CONTROL_MASK}, +}; + +static struct target_opcode_descriptor tcm_opcode_test_unit_ready = { + .support = SCSI_SUPPORT_FULL, + .opcode = TEST_UNIT_READY, + .cdb_size = 6, + .usage_bits = {TEST_UNIT_READY, 0x00, 0x00, 0x00, + 0x00, SCSI_CONTROL_MASK}, +}; + +static struct target_opcode_descriptor tcm_opcode_report_target_pgs = { + .support = SCSI_SUPPORT_FULL, + .serv_action_valid = 1, + .opcode = MAINTENANCE_IN, + .service_action = MI_REPORT_TARGET_PGS, + .cdb_size = 12, + .usage_bits = {MAINTENANCE_IN, 0xE0 | MI_REPORT_TARGET_PGS, 0x00, 0x00, + 0x00, 0x00, 0xff, 0xff, + 0xff, 0xff, 0x00, SCSI_CONTROL_MASK}, +}; + + +static bool spc_rsoc_enabled(struct se_cmd *cmd) +{ + struct se_device *dev = cmd->se_dev; + + return dev->dev_attrib.emulate_rsoc; +} + +static struct target_opcode_descriptor tcm_opcode_report_supp_opcodes = { + .support = SCSI_SUPPORT_FULL, + .serv_action_valid = 1, + .opcode = MAINTENANCE_IN, + .service_action = MI_REPORT_SUPPORTED_OPERATION_CODES, + .cdb_size = 12, + .usage_bits = {MAINTENANCE_IN, MI_REPORT_SUPPORTED_OPERATION_CODES, + 0x87, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0x00, SCSI_CONTROL_MASK}, + .enabled = spc_rsoc_enabled, +}; + +static bool tcm_is_set_tpg_enabled(struct se_cmd *cmd) +{ + struct t10_alua_tg_pt_gp *l_tg_pt_gp; + struct se_lun *l_lun = cmd->se_lun; + + rcu_read_lock(); + l_tg_pt_gp = rcu_dereference(l_lun->lun_tg_pt_gp); + if (!l_tg_pt_gp) { + rcu_read_unlock(); + return false; + } + if (!(l_tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA)) { + rcu_read_unlock(); + return false; + } + rcu_read_unlock(); + + return true; +} + +static struct target_opcode_descriptor tcm_opcode_set_tpg = { + .support = SCSI_SUPPORT_FULL, + .serv_action_valid = 1, + .opcode = MAINTENANCE_OUT, + .service_action = MO_SET_TARGET_PGS, + .cdb_size = 12, + .usage_bits = {MAINTENANCE_OUT, MO_SET_TARGET_PGS, 0x00, 0x00, + 0x00, 0x00, 0xff, 0xff, + 0xff, 0xff, 0x00, SCSI_CONTROL_MASK}, + .enabled = tcm_is_set_tpg_enabled, +}; + +static struct target_opcode_descriptor *tcm_supported_opcodes[] = { + &tcm_opcode_read6, + &tcm_opcode_read10, + &tcm_opcode_read12, + &tcm_opcode_read16, + &tcm_opcode_write6, + &tcm_opcode_write10, + &tcm_opcode_write_verify10, + &tcm_opcode_write12, + &tcm_opcode_write16, + &tcm_opcode_write_verify16, + &tcm_opcode_write_same32, + &tcm_opcode_compare_write, + &tcm_opcode_read_capacity, + &tcm_opcode_read_capacity16, + &tcm_opcode_read_report_refferals, + &tcm_opcode_sync_cache, + &tcm_opcode_sync_cache16, + &tcm_opcode_unmap, + &tcm_opcode_write_same, + &tcm_opcode_write_same16, + &tcm_opcode_verify, + &tcm_opcode_verify16, + &tcm_opcode_start_stop, + &tcm_opcode_mode_select, + &tcm_opcode_mode_select10, + &tcm_opcode_mode_sense, + &tcm_opcode_mode_sense10, + &tcm_opcode_pri_read_keys, + &tcm_opcode_pri_read_resrv, + &tcm_opcode_pri_read_caps, + &tcm_opcode_pri_read_full_status, + &tcm_opcode_pro_register, + &tcm_opcode_pro_reserve, + &tcm_opcode_pro_release, + &tcm_opcode_pro_clear, + &tcm_opcode_pro_preempt, + &tcm_opcode_pro_preempt_abort, + &tcm_opcode_pro_reg_ign_exist, + &tcm_opcode_pro_register_move, + &tcm_opcode_release, + &tcm_opcode_release10, + &tcm_opcode_reserve, + &tcm_opcode_reserve10, + &tcm_opcode_request_sense, + &tcm_opcode_inquiry, + &tcm_opcode_extended_copy_lid1, + &tcm_opcode_rcv_copy_res_op_params, + &tcm_opcode_report_luns, + &tcm_opcode_test_unit_ready, + &tcm_opcode_report_target_pgs, + &tcm_opcode_report_supp_opcodes, + &tcm_opcode_set_tpg, +}; + +static int +spc_rsoc_encode_command_timeouts_descriptor(unsigned char *buf, u8 ctdp, + struct target_opcode_descriptor *descr) +{ + if (!ctdp) + return 0; + + put_unaligned_be16(0xa, buf); + buf[3] = descr->specific_timeout; + put_unaligned_be32(descr->nominal_timeout, &buf[4]); + put_unaligned_be32(descr->recommended_timeout, &buf[8]); + + return 12; +} + +static int +spc_rsoc_encode_command_descriptor(unsigned char *buf, u8 ctdp, + struct target_opcode_descriptor *descr) +{ + int td_size = 0; + + buf[0] = descr->opcode; + + put_unaligned_be16(descr->service_action, &buf[2]); + + buf[5] = (ctdp << 1) | descr->serv_action_valid; + put_unaligned_be16(descr->cdb_size, &buf[6]); + + td_size = spc_rsoc_encode_command_timeouts_descriptor(&buf[8], ctdp, + descr); + + return 8 + td_size; +} + +static int +spc_rsoc_encode_one_command_descriptor(unsigned char *buf, u8 ctdp, + struct target_opcode_descriptor *descr, + struct se_device *dev) +{ + int td_size = 0; + + if (!descr) { + buf[1] = (ctdp << 7) | SCSI_SUPPORT_NOT_SUPPORTED; + return 2; + } + + buf[1] = (ctdp << 7) | SCSI_SUPPORT_FULL; + put_unaligned_be16(descr->cdb_size, &buf[2]); + memcpy(&buf[4], descr->usage_bits, descr->cdb_size); + if (descr->update_usage_bits) + descr->update_usage_bits(&buf[4], dev); + + td_size = spc_rsoc_encode_command_timeouts_descriptor( + &buf[4 + descr->cdb_size], ctdp, descr); + + return 4 + descr->cdb_size + td_size; +} + +static sense_reason_t +spc_rsoc_get_descr(struct se_cmd *cmd, struct target_opcode_descriptor **opcode) +{ + struct target_opcode_descriptor *descr; + struct se_session *sess = cmd->se_sess; + unsigned char *cdb = cmd->t_task_cdb; + u8 opts = cdb[2] & 0x3; + u8 requested_opcode; + u16 requested_sa; + int i; + + requested_opcode = cdb[3]; + requested_sa = ((u16)cdb[4]) << 8 | cdb[5]; + *opcode = NULL; + + if (opts > 3) { + pr_debug("TARGET_CORE[%s]: Invalid REPORT SUPPORTED OPERATION CODES" + " with unsupported REPORTING OPTIONS %#x for 0x%08llx from %s\n", + cmd->se_tfo->fabric_name, opts, + cmd->se_lun->unpacked_lun, + sess->se_node_acl->initiatorname); + return TCM_INVALID_CDB_FIELD; + } + + for (i = 0; i < ARRAY_SIZE(tcm_supported_opcodes); i++) { + descr = tcm_supported_opcodes[i]; + if (descr->opcode != requested_opcode) + continue; + + switch (opts) { + case 0x1: + /* + * If the REQUESTED OPERATION CODE field specifies an + * operation code for which the device server implements + * service actions, then the device server shall + * terminate the command with CHECK CONDITION status, + * with the sense key set to ILLEGAL REQUEST, and the + * additional sense code set to INVALID FIELD IN CDB + */ + if (descr->serv_action_valid) + return TCM_INVALID_CDB_FIELD; + + if (!descr->enabled || descr->enabled(cmd)) + *opcode = descr; + break; + case 0x2: + /* + * If the REQUESTED OPERATION CODE field specifies an + * operation code for which the device server does not + * implement service actions, then the device server + * shall terminate the command with CHECK CONDITION + * status, with the sense key set to ILLEGAL REQUEST, + * and the additional sense code set to INVALID FIELD IN CDB. + */ + if (descr->serv_action_valid && + descr->service_action == requested_sa) { + if (!descr->enabled || descr->enabled(cmd)) + *opcode = descr; + } else if (!descr->serv_action_valid) + return TCM_INVALID_CDB_FIELD; + break; + case 0x3: + /* + * The command support data for the operation code and + * service action a specified in the REQUESTED OPERATION + * CODE field and REQUESTED SERVICE ACTION field shall + * be returned in the one_command parameter data format. + */ + if (descr->service_action == requested_sa) + if (!descr->enabled || descr->enabled(cmd)) + *opcode = descr; + break; + } + } + + return 0; +} + +static sense_reason_t +spc_emulate_report_supp_op_codes(struct se_cmd *cmd) +{ + int descr_num = ARRAY_SIZE(tcm_supported_opcodes); + struct target_opcode_descriptor *descr = NULL; + unsigned char *cdb = cmd->t_task_cdb; + u8 rctd = (cdb[2] >> 7) & 0x1; + unsigned char *buf = NULL; + int response_length = 0; + u8 opts = cdb[2] & 0x3; + unsigned char *rbuf; + sense_reason_t ret = 0; + int i; + + if (!cmd->se_dev->dev_attrib.emulate_rsoc) + return TCM_UNSUPPORTED_SCSI_OPCODE; + + rbuf = transport_kmap_data_sg(cmd); + if (cmd->data_length && !rbuf) { + ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + goto out; + } + + if (opts == 0) + response_length = 4 + (8 + rctd * 12) * descr_num; + else { + ret = spc_rsoc_get_descr(cmd, &descr); + if (ret) + goto out; + + if (descr) + response_length = 4 + descr->cdb_size + rctd * 12; + else + response_length = 2; + } + + buf = kzalloc(response_length, GFP_KERNEL); + if (!buf) { + ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + goto out; + } + response_length = 0; + + if (opts == 0) { + response_length += 4; + + for (i = 0; i < ARRAY_SIZE(tcm_supported_opcodes); i++) { + descr = tcm_supported_opcodes[i]; + if (descr->enabled && !descr->enabled(cmd)) + continue; + + response_length += spc_rsoc_encode_command_descriptor( + &buf[response_length], rctd, descr); + } + put_unaligned_be32(response_length - 3, buf); + } else { + response_length = spc_rsoc_encode_one_command_descriptor( + &buf[response_length], rctd, descr, + cmd->se_dev); + } + + memcpy(rbuf, buf, min_t(u32, response_length, cmd->data_length)); +out: + kfree(buf); + transport_kunmap_data_sg(cmd); + + if (!ret) + target_complete_cmd_with_length(cmd, SAM_STAT_GOOD, response_length); + return ret; +} + sense_reason_t spc_parse_cdb(struct se_cmd *cmd, unsigned int *size) { @@ -1439,6 +2355,10 @@ spc_parse_cdb(struct se_cmd *cmd, unsigned int *size) cmd->execute_cmd = target_emulate_report_target_port_groups; } + if ((cdb[1] & 0x1f) == + MI_REPORT_SUPPORTED_OPERATION_CODES) + cmd->execute_cmd = + spc_emulate_report_supp_op_codes; *size = get_unaligned_be32(&cdb[6]); } else { /* diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c index 8713cda0c2fb5652ca4a48719432c579c6243122..49eaee022ef1d5488ee148d956c4787445adf751 100644 --- a/drivers/target/target_core_xcopy.c +++ b/drivers/target/target_core_xcopy.c @@ -582,11 +582,11 @@ static int target_xcopy_read_source( struct xcopy_op *xop, struct se_device *src_dev, sector_t src_lba, - u32 src_sectors) + u32 src_bytes) { struct xcopy_pt_cmd xpt_cmd; struct se_cmd *se_cmd = &xpt_cmd.se_cmd; - u32 length = (src_sectors * src_dev->dev_attrib.block_size); + u32 transfer_length_block = src_bytes / src_dev->dev_attrib.block_size; int rc; unsigned char cdb[16]; bool remote_port = (xop->op_origin == XCOL_DEST_RECV_OP); @@ -597,11 +597,11 @@ static int target_xcopy_read_source( memset(&cdb[0], 0, 16); cdb[0] = READ_16; put_unaligned_be64(src_lba, &cdb[2]); - put_unaligned_be32(src_sectors, &cdb[10]); - pr_debug("XCOPY: Built READ_16: LBA: %llu Sectors: %u Length: %u\n", - (unsigned long long)src_lba, src_sectors, length); + put_unaligned_be32(transfer_length_block, &cdb[10]); + pr_debug("XCOPY: Built READ_16: LBA: %llu Blocks: %u Length: %u\n", + (unsigned long long)src_lba, transfer_length_block, src_bytes); - __target_init_cmd(se_cmd, &xcopy_pt_tfo, &xcopy_pt_sess, length, + __target_init_cmd(se_cmd, &xcopy_pt_tfo, &xcopy_pt_sess, src_bytes, DMA_FROM_DEVICE, 0, &xpt_cmd.sense_buffer[0], 0); rc = target_xcopy_setup_pt_cmd(&xpt_cmd, xop, src_dev, &cdb[0], @@ -627,11 +627,11 @@ static int target_xcopy_write_destination( struct xcopy_op *xop, struct se_device *dst_dev, sector_t dst_lba, - u32 dst_sectors) + u32 dst_bytes) { struct xcopy_pt_cmd xpt_cmd; struct se_cmd *se_cmd = &xpt_cmd.se_cmd; - u32 length = (dst_sectors * dst_dev->dev_attrib.block_size); + u32 transfer_length_block = dst_bytes / dst_dev->dev_attrib.block_size; int rc; unsigned char cdb[16]; bool remote_port = (xop->op_origin == XCOL_SOURCE_RECV_OP); @@ -642,11 +642,11 @@ static int target_xcopy_write_destination( memset(&cdb[0], 0, 16); cdb[0] = WRITE_16; put_unaligned_be64(dst_lba, &cdb[2]); - put_unaligned_be32(dst_sectors, &cdb[10]); - pr_debug("XCOPY: Built WRITE_16: LBA: %llu Sectors: %u Length: %u\n", - (unsigned long long)dst_lba, dst_sectors, length); + put_unaligned_be32(transfer_length_block, &cdb[10]); + pr_debug("XCOPY: Built WRITE_16: LBA: %llu Blocks: %u Length: %u\n", + (unsigned long long)dst_lba, transfer_length_block, dst_bytes); - __target_init_cmd(se_cmd, &xcopy_pt_tfo, &xcopy_pt_sess, length, + __target_init_cmd(se_cmd, &xcopy_pt_tfo, &xcopy_pt_sess, dst_bytes, DMA_TO_DEVICE, 0, &xpt_cmd.sense_buffer[0], 0); rc = target_xcopy_setup_pt_cmd(&xpt_cmd, xop, dst_dev, &cdb[0], @@ -670,9 +670,10 @@ static void target_xcopy_do_work(struct work_struct *work) struct se_cmd *ec_cmd = xop->xop_se_cmd; struct se_device *src_dev, *dst_dev; sector_t src_lba, dst_lba, end_lba; - unsigned int max_sectors; + unsigned long long max_bytes, max_bytes_src, max_bytes_dst, max_blocks; int rc = 0; - unsigned short nolb, max_nolb, copied_nolb = 0; + unsigned short nolb; + unsigned int copied_bytes = 0; sense_reason_t sense_rc; sense_rc = target_parse_xcopy_cmd(xop); @@ -691,23 +692,31 @@ static void target_xcopy_do_work(struct work_struct *work) nolb = xop->nolb; end_lba = src_lba + nolb; /* - * Break up XCOPY I/O into hw_max_sectors sized I/O based on the - * smallest max_sectors between src_dev + dev_dev, or + * Break up XCOPY I/O into hw_max_sectors * hw_block_size sized + * I/O based on the smallest max_bytes between src_dev + dst_dev */ - max_sectors = min(src_dev->dev_attrib.hw_max_sectors, - dst_dev->dev_attrib.hw_max_sectors); - max_sectors = min_t(u32, max_sectors, XCOPY_MAX_SECTORS); + max_bytes_src = (unsigned long long) src_dev->dev_attrib.hw_max_sectors * + src_dev->dev_attrib.hw_block_size; + max_bytes_dst = (unsigned long long) dst_dev->dev_attrib.hw_max_sectors * + dst_dev->dev_attrib.hw_block_size; - max_nolb = min_t(u16, max_sectors, ((u16)(~0U))); + max_bytes = min_t(u64, max_bytes_src, max_bytes_dst); + max_bytes = min_t(u64, max_bytes, XCOPY_MAX_BYTES); - pr_debug("target_xcopy_do_work: nolb: %hu, max_nolb: %hu end_lba: %llu\n", - nolb, max_nolb, (unsigned long long)end_lba); - pr_debug("target_xcopy_do_work: Starting src_lba: %llu, dst_lba: %llu\n", + /* + * Using shift instead of the division because otherwise GCC + * generates __udivdi3 that is missing on i386 + */ + max_blocks = max_bytes >> ilog2(src_dev->dev_attrib.block_size); + + pr_debug("%s: nolb: %u, max_blocks: %llu end_lba: %llu\n", __func__, + nolb, max_blocks, (unsigned long long)end_lba); + pr_debug("%s: Starting src_lba: %llu, dst_lba: %llu\n", __func__, (unsigned long long)src_lba, (unsigned long long)dst_lba); - while (src_lba < end_lba) { - unsigned short cur_nolb = min(nolb, max_nolb); - u32 cur_bytes = cur_nolb * src_dev->dev_attrib.block_size; + while (nolb) { + u32 cur_bytes = min_t(u64, max_bytes, nolb * src_dev->dev_attrib.block_size); + unsigned short cur_nolb = cur_bytes / src_dev->dev_attrib.block_size; if (cur_bytes != xop->xop_data_bytes) { /* @@ -724,43 +733,43 @@ static void target_xcopy_do_work(struct work_struct *work) xop->xop_data_bytes = cur_bytes; } - pr_debug("target_xcopy_do_work: Calling read src_dev: %p src_lba: %llu," - " cur_nolb: %hu\n", src_dev, (unsigned long long)src_lba, cur_nolb); + pr_debug("%s: Calling read src_dev: %p src_lba: %llu, cur_nolb: %hu\n", + __func__, src_dev, (unsigned long long)src_lba, cur_nolb); - rc = target_xcopy_read_source(ec_cmd, xop, src_dev, src_lba, cur_nolb); + rc = target_xcopy_read_source(ec_cmd, xop, src_dev, src_lba, cur_bytes); if (rc < 0) goto out; - src_lba += cur_nolb; - pr_debug("target_xcopy_do_work: Incremented READ src_lba to %llu\n", + src_lba += cur_bytes / src_dev->dev_attrib.block_size; + pr_debug("%s: Incremented READ src_lba to %llu\n", __func__, (unsigned long long)src_lba); - pr_debug("target_xcopy_do_work: Calling write dst_dev: %p dst_lba: %llu," - " cur_nolb: %hu\n", dst_dev, (unsigned long long)dst_lba, cur_nolb); + pr_debug("%s: Calling write dst_dev: %p dst_lba: %llu, cur_nolb: %u\n", + __func__, dst_dev, (unsigned long long)dst_lba, cur_nolb); rc = target_xcopy_write_destination(ec_cmd, xop, dst_dev, - dst_lba, cur_nolb); + dst_lba, cur_bytes); if (rc < 0) goto out; - dst_lba += cur_nolb; - pr_debug("target_xcopy_do_work: Incremented WRITE dst_lba to %llu\n", + dst_lba += cur_bytes / dst_dev->dev_attrib.block_size; + pr_debug("%s: Incremented WRITE dst_lba to %llu\n", __func__, (unsigned long long)dst_lba); - copied_nolb += cur_nolb; - nolb -= cur_nolb; + copied_bytes += cur_bytes; + nolb -= cur_bytes / src_dev->dev_attrib.block_size; } xcopy_pt_undepend_remotedev(xop); target_free_sgl(xop->xop_data_sg, xop->xop_data_nents); kfree(xop); - pr_debug("target_xcopy_do_work: Final src_lba: %llu, dst_lba: %llu\n", + pr_debug("%s: Final src_lba: %llu, dst_lba: %llu\n", __func__, (unsigned long long)src_lba, (unsigned long long)dst_lba); - pr_debug("target_xcopy_do_work: Blocks copied: %hu, Bytes Copied: %u\n", - copied_nolb, copied_nolb * dst_dev->dev_attrib.block_size); + pr_debug("%s: Blocks copied: %u, Bytes Copied: %u\n", __func__, + copied_bytes / dst_dev->dev_attrib.block_size, copied_bytes); - pr_debug("target_xcopy_do_work: Setting X-COPY GOOD status -> sending response\n"); + pr_debug("%s: Setting X-COPY GOOD status -> sending response\n", __func__); target_complete_cmd(ec_cmd, SAM_STAT_GOOD); return; @@ -776,8 +785,8 @@ out: err_free: kfree(xop); - pr_warn_ratelimited("target_xcopy_do_work: rc: %d, sense: %u, XCOPY operation failed\n", - rc, sense_rc); + pr_warn_ratelimited("%s: rc: %d, sense: %u, XCOPY operation failed\n", + __func__, rc, sense_rc); target_complete_cmd_with_sense(ec_cmd, SAM_STAT_CHECK_CONDITION, sense_rc); } @@ -1009,8 +1018,14 @@ sense_reason_t target_do_receive_copy_results(struct se_cmd *se_cmd) { unsigned char *cdb = &se_cmd->t_task_cdb[0]; int sa = (cdb[1] & 0x1f), list_id = cdb[2]; + struct se_device *dev = se_cmd->se_dev; sense_reason_t rc = TCM_NO_SENSE; + if (!dev->dev_attrib.emulate_3pc) { + pr_debug("Third-party copy operations explicitly disabled\n"); + return TCM_UNSUPPORTED_SCSI_OPCODE; + } + pr_debug("Entering target_do_receive_copy_results: SA: 0x%02x, List ID:" " 0x%02x, AL: %u\n", sa, list_id, se_cmd->data_length); diff --git a/drivers/target/target_core_xcopy.h b/drivers/target/target_core_xcopy.h index e5f20005179a86d1414ddbd87939f142b959400c..0aad7dc658955e1e0960263d24e7ee8795601b72 100644 --- a/drivers/target/target_core_xcopy.h +++ b/drivers/target/target_core_xcopy.h @@ -5,7 +5,7 @@ #define XCOPY_TARGET_DESC_LEN 32 #define XCOPY_SEGMENT_DESC_LEN 28 #define XCOPY_NAA_IEEE_REGEX_LEN 16 -#define XCOPY_MAX_SECTORS 4096 +#define XCOPY_MAX_BYTES 16777216 /* 16 MB */ /* * SPC4r37 6.4.6.1 diff --git a/drivers/thermal/gov_power_allocator.c b/drivers/thermal/gov_power_allocator.c index 2d1aeaba38a889b0a62025ca7b089aa430f40cbf..d5d4eae16771b358346c55b9c17d13f052cbaf0a 100644 --- a/drivers/thermal/gov_power_allocator.c +++ b/drivers/thermal/gov_power_allocator.c @@ -8,7 +8,6 @@ #define pr_fmt(fmt) "Power allocator: " fmt -#include #include #include diff --git a/drivers/thermal/imx8mm_thermal.c b/drivers/thermal/imx8mm_thermal.c index e2c2673025a7ac45754574d2d824fa84fed1c37d..d247b48696cb63dc53373ddc27c397b67777d8ff 100644 --- a/drivers/thermal/imx8mm_thermal.c +++ b/drivers/thermal/imx8mm_thermal.c @@ -10,21 +10,40 @@ #include #include #include +#include #include #include #include +#include #include #include "thermal_core.h" +#include "thermal_hwmon.h" #define TER 0x0 /* TMU enable */ #define TPS 0x4 #define TRITSR 0x20 /* TMU immediate temp */ +/* TMU calibration data registers */ +#define TASR 0x28 +#define TASR_BUF_SLOPE_MASK GENMASK(19, 16) +#define TASR_BUF_VREF_MASK GENMASK(4, 0) /* TMU_V1 */ +#define TASR_BUF_VERF_SEL_MASK GENMASK(1, 0) /* TMU_V2 */ +#define TCALIV(n) (0x30 + ((n) * 4)) +#define TCALIV_EN BIT(31) +#define TCALIV_HR_MASK GENMASK(23, 16) /* TMU_V1 */ +#define TCALIV_RT_MASK GENMASK(7, 0) /* TMU_V1 */ +#define TCALIV_SNSR105C_MASK GENMASK(27, 16) /* TMU_V2 */ +#define TCALIV_SNSR25C_MASK GENMASK(11, 0) /* TMU_V2 */ +#define TRIM 0x3c +#define TRIM_BJT_CUR_MASK GENMASK(23, 20) +#define TRIM_BGR_MASK GENMASK(31, 28) +#define TRIM_VLSB_MASK GENMASK(15, 12) +#define TRIM_EN_CH BIT(7) #define TER_ADC_PD BIT(30) #define TER_EN BIT(31) -#define TRITSR_TEMP0_VAL_MASK 0xff -#define TRITSR_TEMP1_VAL_MASK 0xff0000 +#define TRITSR_TEMP0_VAL_MASK GENMASK(7, 0) +#define TRITSR_TEMP1_VAL_MASK GENMASK(23, 16) #define PROBE_SEL_ALL GENMASK(31, 30) @@ -32,6 +51,25 @@ #define SIGN_BIT BIT(7) #define TEMP_VAL_MASK GENMASK(6, 0) +/* TMU OCOTP calibration data bitfields */ +#define ANA0_EN BIT(25) +#define ANA0_BUF_VREF_MASK GENMASK(24, 20) +#define ANA0_BUF_SLOPE_MASK GENMASK(19, 16) +#define ANA0_HR_MASK GENMASK(15, 8) +#define ANA0_RT_MASK GENMASK(7, 0) +#define TRIM2_VLSB_MASK GENMASK(23, 20) +#define TRIM2_BGR_MASK GENMASK(19, 16) +#define TRIM2_BJT_CUR_MASK GENMASK(15, 12) +#define TRIM2_BUF_SLOP_SEL_MASK GENMASK(11, 8) +#define TRIM2_BUF_VERF_SEL_MASK GENMASK(7, 6) +#define TRIM3_TCA25_0_LSB_MASK GENMASK(31, 28) +#define TRIM3_TCA40_0_MASK GENMASK(27, 16) +#define TRIM4_TCA40_1_MASK GENMASK(31, 20) +#define TRIM4_TCA105_0_MASK GENMASK(19, 8) +#define TRIM4_TCA25_0_MSB_MASK GENMASK(7, 0) +#define TRIM5_TCA105_1_MASK GENMASK(23, 12) +#define TRIM5_TCA25_1_MASK GENMASK(11, 0) + #define VER1_TEMP_LOW_LIMIT 10000 #define VER2_TEMP_LOW_LIMIT -40000 #define VER2_TEMP_HIGH_LIMIT 125000 @@ -65,8 +103,14 @@ static int imx8mm_tmu_get_temp(void *data, int *temp) u32 val; val = readl_relaxed(tmu->base + TRITSR) & TRITSR_TEMP0_VAL_MASK; + + /* + * Do not validate against the V bit (bit 31) due to errata + * ERR051272: TMU: Bit 31 of registers TMU_TSCR/TMU_TRITSR/TMU_TRATSR invalid + */ + *temp = val * 1000; - if (*temp < VER1_TEMP_LOW_LIMIT) + if (*temp < VER1_TEMP_LOW_LIMIT || *temp > VER2_TEMP_HIGH_LIMIT) return -EAGAIN; return 0; @@ -128,6 +172,129 @@ static void imx8mm_tmu_probe_sel_all(struct imx8mm_tmu *tmu) writel_relaxed(val, tmu->base + TPS); } +static int imx8mm_tmu_probe_set_calib_v1(struct platform_device *pdev, + struct imx8mm_tmu *tmu) +{ + struct device *dev = &pdev->dev; + u32 ana0; + int ret; + + ret = nvmem_cell_read_u32(&pdev->dev, "calib", &ana0); + if (ret) { + dev_warn(dev, "Failed to read OCOTP nvmem cell (%d).\n", ret); + return ret; + } + + writel(FIELD_PREP(TASR_BUF_VREF_MASK, + FIELD_GET(ANA0_BUF_VREF_MASK, ana0)) | + FIELD_PREP(TASR_BUF_SLOPE_MASK, + FIELD_GET(ANA0_BUF_SLOPE_MASK, ana0)), + tmu->base + TASR); + + writel(FIELD_PREP(TCALIV_RT_MASK, FIELD_GET(ANA0_RT_MASK, ana0)) | + FIELD_PREP(TCALIV_HR_MASK, FIELD_GET(ANA0_HR_MASK, ana0)) | + ((ana0 & ANA0_EN) ? TCALIV_EN : 0), + tmu->base + TCALIV(0)); + + return 0; +} + +static int imx8mm_tmu_probe_set_calib_v2(struct platform_device *pdev, + struct imx8mm_tmu *tmu) +{ + struct device *dev = &pdev->dev; + struct nvmem_cell *cell; + u32 trim[4] = { 0 }; + size_t len; + void *buf; + + cell = nvmem_cell_get(dev, "calib"); + if (IS_ERR(cell)) + return PTR_ERR(cell); + + buf = nvmem_cell_read(cell, &len); + nvmem_cell_put(cell); + + if (IS_ERR(buf)) + return PTR_ERR(buf); + + memcpy(trim, buf, min(len, sizeof(trim))); + kfree(buf); + + if (len != 16) { + dev_err(dev, + "OCOTP nvmem cell length is %zu, must be 16.\n", len); + return -EINVAL; + } + + /* Blank sample hardware */ + if (!trim[0] && !trim[1] && !trim[2] && !trim[3]) { + /* Use a default 25C binary codes */ + writel(FIELD_PREP(TCALIV_SNSR25C_MASK, 0x63c), + tmu->base + TCALIV(0)); + writel(FIELD_PREP(TCALIV_SNSR25C_MASK, 0x63c), + tmu->base + TCALIV(1)); + return 0; + } + + writel(FIELD_PREP(TASR_BUF_VERF_SEL_MASK, + FIELD_GET(TRIM2_BUF_VERF_SEL_MASK, trim[0])) | + FIELD_PREP(TASR_BUF_SLOPE_MASK, + FIELD_GET(TRIM2_BUF_SLOP_SEL_MASK, trim[0])), + tmu->base + TASR); + + writel(FIELD_PREP(TRIM_BJT_CUR_MASK, + FIELD_GET(TRIM2_BJT_CUR_MASK, trim[0])) | + FIELD_PREP(TRIM_BGR_MASK, FIELD_GET(TRIM2_BGR_MASK, trim[0])) | + FIELD_PREP(TRIM_VLSB_MASK, FIELD_GET(TRIM2_VLSB_MASK, trim[0])) | + TRIM_EN_CH, + tmu->base + TRIM); + + writel(FIELD_PREP(TCALIV_SNSR25C_MASK, + FIELD_GET(TRIM3_TCA25_0_LSB_MASK, trim[1]) | + (FIELD_GET(TRIM4_TCA25_0_MSB_MASK, trim[2]) << 4)) | + FIELD_PREP(TCALIV_SNSR105C_MASK, + FIELD_GET(TRIM4_TCA105_0_MASK, trim[2])), + tmu->base + TCALIV(0)); + + writel(FIELD_PREP(TCALIV_SNSR25C_MASK, + FIELD_GET(TRIM5_TCA25_1_MASK, trim[3])) | + FIELD_PREP(TCALIV_SNSR105C_MASK, + FIELD_GET(TRIM5_TCA105_1_MASK, trim[3])), + tmu->base + TCALIV(1)); + + writel(FIELD_PREP(TCALIV_SNSR25C_MASK, + FIELD_GET(TRIM3_TCA40_0_MASK, trim[1])) | + FIELD_PREP(TCALIV_SNSR105C_MASK, + FIELD_GET(TRIM4_TCA40_1_MASK, trim[2])), + tmu->base + TCALIV(2)); + + return 0; +} + +static int imx8mm_tmu_probe_set_calib(struct platform_device *pdev, + struct imx8mm_tmu *tmu) +{ + struct device *dev = &pdev->dev; + + /* + * Lack of calibration data OCOTP reference is not considered + * fatal to retain compatibility with old DTs. It is however + * strongly recommended to update such old DTs to get correct + * temperature compensation values for each SoC. + */ + if (!of_find_property(pdev->dev.of_node, "nvmem-cells", NULL)) { + dev_warn(dev, + "No OCOTP nvmem reference found, SoC-specific calibration not loaded. Please update your DT.\n"); + return 0; + } + + if (tmu->socdata->version == TMU_VER1) + return imx8mm_tmu_probe_set_calib_v1(pdev, tmu); + + return imx8mm_tmu_probe_set_calib_v2(pdev, tmu); +} + static int imx8mm_tmu_probe(struct platform_device *pdev) { const struct thermal_soc_data *data; @@ -176,10 +343,17 @@ static int imx8mm_tmu_probe(struct platform_device *pdev) goto disable_clk; } tmu->sensors[i].hw_id = i; + + if (devm_thermal_add_hwmon_sysfs(tmu->sensors[i].tzd)) + dev_warn(&pdev->dev, "failed to add hwmon sysfs attributes\n"); } platform_set_drvdata(pdev, tmu); + ret = imx8mm_tmu_probe_set_calib(pdev, tmu); + if (ret) + goto disable_clk; + /* enable all the probes for V2 TMU */ if (tmu->socdata->version == TMU_VER2) imx8mm_tmu_probe_sel_all(tmu); diff --git a/drivers/thermal/imx_sc_thermal.c b/drivers/thermal/imx_sc_thermal.c index 5d92b70a5d53a3f1be716f6adcb16aff3323a618..4df925e3a80bd72951844e697319df9bb03d0512 100644 --- a/drivers/thermal/imx_sc_thermal.c +++ b/drivers/thermal/imx_sc_thermal.c @@ -127,11 +127,6 @@ static int imx_sc_thermal_probe(struct platform_device *pdev) return 0; } -static int imx_sc_thermal_remove(struct platform_device *pdev) -{ - return 0; -} - static int imx_sc_sensors[] = { IMX_SC_R_SYSTEM, IMX_SC_R_PMIC_0, -1 }; static const struct of_device_id imx_sc_thermal_table[] = { @@ -142,7 +137,6 @@ MODULE_DEVICE_TABLE(of, imx_sc_thermal_table); static struct platform_driver imx_sc_thermal_driver = { .probe = imx_sc_thermal_probe, - .remove = imx_sc_thermal_remove, .driver = { .name = "imx-sc-thermal", .of_match_table = imx_sc_thermal_table, diff --git a/drivers/thermal/intel/therm_throt.c b/drivers/thermal/intel/therm_throt.c index 4bb7fddaa14378273dd34b3f6e1c12ba32a02a07..2e22bb82b7389e740c70dc66de24351372861bf0 100644 --- a/drivers/thermal/intel/therm_throt.c +++ b/drivers/thermal/intel/therm_throt.c @@ -194,7 +194,7 @@ static const struct attribute_group thermal_attr_group = { #define THERM_STATUS_PROCHOT_LOG BIT(1) #define THERM_STATUS_CLEAR_CORE_MASK (BIT(1) | BIT(3) | BIT(5) | BIT(7) | BIT(9) | BIT(11) | BIT(13) | BIT(15)) -#define THERM_STATUS_CLEAR_PKG_MASK (BIT(1) | BIT(3) | BIT(5) | BIT(7) | BIT(9) | BIT(11) | BIT(26)) +#define THERM_STATUS_CLEAR_PKG_MASK (BIT(1) | BIT(3) | BIT(5) | BIT(7) | BIT(9) | BIT(11)) /* * Clear the bits in package thermal status register for bit = 1 @@ -211,6 +211,9 @@ void thermal_clear_package_intr_status(int level, u64 bit_mask) } else { msr = MSR_IA32_PACKAGE_THERM_STATUS; msr_val = THERM_STATUS_CLEAR_PKG_MASK; + if (boot_cpu_has(X86_FEATURE_HFI)) + msr_val |= BIT(26); + } msr_val &= ~bit_mask; diff --git a/drivers/thermal/k3_j72xx_bandgap.c b/drivers/thermal/k3_j72xx_bandgap.c index 16b6bcf1bf4fa171b9e7a0aa413a943f0cef4394..031ea1091909a78549dfb2191b9a739eef3f7248 100644 --- a/drivers/thermal/k3_j72xx_bandgap.c +++ b/drivers/thermal/k3_j72xx_bandgap.c @@ -177,7 +177,6 @@ struct k3_j72xx_bandgap { struct device *dev; void __iomem *base; void __iomem *cfg2_base; - void __iomem *fuse_base; struct k3_thermal_data *ts_data[K3_VTM_MAX_NUM_TS]; }; @@ -249,14 +248,7 @@ static inline int k3_bgp_read_temp(struct k3_thermal_data *devdata, /* Get temperature callback function for thermal zone */ static int k3_thermal_get_temp(struct thermal_zone_device *tz, int *temp) { - struct k3_thermal_data *data = tz->devdata; - int ret = 0; - - ret = k3_bgp_read_temp(data, temp); - if (ret) - return ret; - - return ret; + return k3_bgp_read_temp(tz->devdata, temp); } static const struct thermal_zone_device_ops k3_of_thermal_ops = { @@ -283,7 +275,7 @@ static int k3_j72xx_bandgap_temp_to_adc_code(int temp) } static void get_efuse_values(int id, struct k3_thermal_data *data, int *err, - struct k3_j72xx_bandgap *bgp) + void __iomem *fuse_base) { int i, tmp, pow; int ct_offsets[5][K3_VTM_CORRECTION_TEMP_CNT] = { @@ -305,16 +297,16 @@ static void get_efuse_values(int id, struct k3_thermal_data *data, int *err, /* Extract the offset value using bit-mask */ if (ct_offsets[id][i] == -1 && i == 1) { /* 25C offset Case of Sensor 2 split between 2 regs */ - tmp = (readl(bgp->fuse_base + 0x8) & 0xE0000000) >> (29); - tmp |= ((readl(bgp->fuse_base + 0xC) & 0x1F) << 3); + tmp = (readl(fuse_base + 0x8) & 0xE0000000) >> (29); + tmp |= ((readl(fuse_base + 0xC) & 0x1F) << 3); pow = tmp & 0x80; } else if (ct_offsets[id][i] == -1 && i == 2) { /* 125C Case of Sensor 3 split between 2 regs */ - tmp = (readl(bgp->fuse_base + 0x4) & 0xF8000000) >> (27); - tmp |= ((readl(bgp->fuse_base + 0x8) & 0xF) << 5); + tmp = (readl(fuse_base + 0x4) & 0xF8000000) >> (27); + tmp |= ((readl(fuse_base + 0x8) & 0xF) << 5); pow = tmp & 0x100; } else { - tmp = readl(bgp->fuse_base + ct_offsets[id][i]); + tmp = readl(fuse_base + ct_offsets[id][i]); tmp &= ct_bm[id][i]; tmp = tmp >> __ffs(ct_bm[id][i]); @@ -347,7 +339,7 @@ static void print_look_up_table(struct device *dev, int *ref_table) } struct k3_j72xx_bandgap_data { - unsigned int has_errata_i2128; + const bool has_errata_i2128; }; static int k3_j72xx_bandgap_probe(struct platform_device *pdev) @@ -358,11 +350,12 @@ static int k3_j72xx_bandgap_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct k3_j72xx_bandgap *bgp; struct k3_thermal_data *data; - int workaround_needed = 0; + bool workaround_needed = false; const struct k3_j72xx_bandgap_data *driver_data; struct thermal_zone_device *ti_thermal; int *ref_table; struct err_values err_vals; + void __iomem *fuse_base; const s64 golden_factors[] = { -490019999999999936, @@ -393,15 +386,32 @@ static int k3_j72xx_bandgap_probe(struct platform_device *pdev) if (IS_ERR(bgp->cfg2_base)) return PTR_ERR(bgp->cfg2_base); - res = platform_get_resource(pdev, IORESOURCE_MEM, 2); - bgp->fuse_base = devm_ioremap_resource(dev, res); - if (IS_ERR(bgp->fuse_base)) - return PTR_ERR(bgp->fuse_base); - driver_data = of_device_get_match_data(dev); if (driver_data) workaround_needed = driver_data->has_errata_i2128; + /* + * Some of TI's J721E SoCs require a software trimming procedure + * for the temperature monitors to function properly. To determine + * if this particular SoC is NOT affected, both bits in the + * WKUP_SPARE_FUSE0[31:30] will be set (0xC0000000) indicating + * when software trimming should NOT be applied. + * + * https://www.ti.com/lit/er/sprz455c/sprz455c.pdf + */ + if (workaround_needed) { + res = platform_get_resource(pdev, IORESOURCE_MEM, 2); + fuse_base = devm_ioremap_resource(dev, res); + if (IS_ERR(fuse_base)) + return PTR_ERR(fuse_base); + + if ((readl(fuse_base) & 0xc0000000) == 0xc0000000) + workaround_needed = false; + } + + dev_dbg(bgp->dev, "Work around %sneeded\n", + workaround_needed ? "" : "not "); + pm_runtime_enable(dev); ret = pm_runtime_get_sync(dev); if (ret < 0) { @@ -434,13 +444,6 @@ static int k3_j72xx_bandgap_probe(struct platform_device *pdev) goto err_free_ref_table; } - /* Workaround not needed if bit30/bit31 is set even for J721e */ - if (workaround_needed && (readl(bgp->fuse_base + 0x0) & 0xc0000000) == 0xc0000000) - workaround_needed = false; - - dev_dbg(bgp->dev, "Work around %sneeded\n", - workaround_needed ? "not " : ""); - if (!workaround_needed) init_table(5, ref_table, golden_factors); else @@ -459,7 +462,7 @@ static int k3_j72xx_bandgap_probe(struct platform_device *pdev) err_vals.refs[1] = PLUS30CREF; err_vals.refs[2] = PLUS125CREF; err_vals.refs[3] = PLUS150CREF; - get_efuse_values(id, &data[id], err_vals.errs, bgp); + get_efuse_values(id, &data[id], err_vals.errs, fuse_base); } if (id == 0 && workaround_needed) @@ -529,11 +532,11 @@ static int k3_j72xx_bandgap_remove(struct platform_device *pdev) } static const struct k3_j72xx_bandgap_data k3_j72xx_bandgap_j721e_data = { - .has_errata_i2128 = 1, + .has_errata_i2128 = true, }; static const struct k3_j72xx_bandgap_data k3_j72xx_bandgap_j7200_data = { - .has_errata_i2128 = 0, + .has_errata_i2128 = false, }; static const struct of_device_id of_k3_j72xx_bandgap_match[] = { diff --git a/drivers/thermal/qcom/lmh.c b/drivers/thermal/qcom/lmh.c index d3d9b9fa49e8191d7bcd8a4a506ea82f02db2685..4122a51e987414bf3c12bce3c83fc1b95c33b35d 100644 --- a/drivers/thermal/qcom/lmh.c +++ b/drivers/thermal/qcom/lmh.c @@ -45,7 +45,7 @@ static irqreturn_t lmh_handle_irq(int hw_irq, void *data) if (irq) generic_handle_irq(irq); - return 0; + return IRQ_HANDLED; } static void lmh_enable_interrupt(struct irq_data *d) diff --git a/drivers/thermal/qcom/qcom-spmi-adc-tm5.c b/drivers/thermal/qcom/qcom-spmi-adc-tm5.c index 1b2c43eab27d12b00235e27c09b0f2745ff62107..ff47fc9ac9c5e872021f687bb567e8b767c9f448 100644 --- a/drivers/thermal/qcom/qcom-spmi-adc-tm5.c +++ b/drivers/thermal/qcom/qcom-spmi-adc-tm5.c @@ -678,7 +678,7 @@ static int adc_tm5_register_tzd(struct adc_tm5_chip *adc_tm) &adc_tm5_thermal_ops); if (IS_ERR(tzd)) { if (PTR_ERR(tzd) == -ENODEV) { - dev_warn(adc_tm->dev, "thermal sensor on channel %d is not used\n", + dev_dbg(adc_tm->dev, "thermal sensor on channel %d is not used\n", adc_tm->channels[i].channel); continue; } @@ -1030,10 +1030,8 @@ static int adc_tm5_probe(struct platform_device *pdev) return irq; ret = adc_tm5_get_dt_data(adc_tm, node); - if (ret) { - dev_err(dev, "get dt data failed: %d\n", ret); - return ret; - } + if (ret) + return dev_err_probe(dev, ret, "get dt data failed\n"); ret = adc_tm->data->init(adc_tm); if (ret) { diff --git a/drivers/thermal/qcom/qcom-spmi-temp-alarm.c b/drivers/thermal/qcom/qcom-spmi-temp-alarm.c index be785ab37e53d5f877212980b166605e7afba62e..ad84978109e6f7a4e53751b286e1ca9af6ebf916 100644 --- a/drivers/thermal/qcom/qcom-spmi-temp-alarm.c +++ b/drivers/thermal/qcom/qcom-spmi-temp-alarm.c @@ -252,7 +252,8 @@ static int qpnp_tm_update_critical_trip_temp(struct qpnp_tm_chip *chip, disable_s2_shutdown = true; else dev_warn(chip->dev, - "No ADC is configured and critical temperature is above the maximum stage 2 threshold of 140 C! Configuring stage 2 shutdown at 140 C.\n"); + "No ADC is configured and critical temperature %d mC is above the maximum stage 2 threshold of %ld mC! Configuring stage 2 shutdown at %ld mC.\n", + temp, stage2_threshold_max, stage2_threshold_max); } skip: diff --git a/drivers/thermal/qcom/tsens-8960.c b/drivers/thermal/qcom/tsens-8960.c index 67c1748cdf734c9d7c39a44c118037def6427800..4585904fb38002bb25c385be4cedc9e860745480 100644 --- a/drivers/thermal/qcom/tsens-8960.c +++ b/drivers/thermal/qcom/tsens-8960.c @@ -269,9 +269,12 @@ static const struct tsens_ops ops_8960 = { static struct tsens_features tsens_8960_feat = { .ver_major = VER_0, .crit_int = 0, + .combo_int = 0, .adc = 1, .srot_split = 0, .max_sensors = 11, + .trip_min_temp = -40000, + .trip_max_temp = 120000, }; struct tsens_plat_data data_8960 = { diff --git a/drivers/thermal/qcom/tsens-v0_1.c b/drivers/thermal/qcom/tsens-v0_1.c index 327f37202c69fff1b853dda82d396c730107529a..04d012e4f7288221be16cbad8ba411633b9b41ff 100644 --- a/drivers/thermal/qcom/tsens-v0_1.c +++ b/drivers/thermal/qcom/tsens-v0_1.c @@ -539,9 +539,12 @@ static int calibrate_9607(struct tsens_priv *priv) static struct tsens_features tsens_v0_1_feat = { .ver_major = VER_0_1, .crit_int = 0, + .combo_int = 0, .adc = 1, .srot_split = 1, .max_sensors = 11, + .trip_min_temp = -40000, + .trip_max_temp = 120000, }; static const struct reg_field tsens_v0_1_regfields[MAX_REGFIELDS] = { diff --git a/drivers/thermal/qcom/tsens-v1.c b/drivers/thermal/qcom/tsens-v1.c index 573e261ccca74e7adbfd233e02a27adad610ce65..1d7f8a80bd13a22446ca03e6e1df1cc3f8edc86f 100644 --- a/drivers/thermal/qcom/tsens-v1.c +++ b/drivers/thermal/qcom/tsens-v1.c @@ -302,9 +302,12 @@ static int calibrate_8976(struct tsens_priv *priv) static struct tsens_features tsens_v1_feat = { .ver_major = VER_1_X, .crit_int = 0, + .combo_int = 0, .adc = 1, .srot_split = 1, .max_sensors = 11, + .trip_min_temp = -40000, + .trip_max_temp = 120000, }; static const struct reg_field tsens_v1_regfields[MAX_REGFIELDS] = { diff --git a/drivers/thermal/qcom/tsens-v2.c b/drivers/thermal/qcom/tsens-v2.c index b293ed32174b5530dad894ad427dbe40a525ac64..29a61d2d6ca312c715a086e848ae0978aaf7dbd2 100644 --- a/drivers/thermal/qcom/tsens-v2.c +++ b/drivers/thermal/qcom/tsens-v2.c @@ -31,9 +31,23 @@ static struct tsens_features tsens_v2_feat = { .ver_major = VER_2_X, .crit_int = 1, + .combo_int = 0, .adc = 0, .srot_split = 1, .max_sensors = 16, + .trip_min_temp = -40000, + .trip_max_temp = 120000, +}; + +static struct tsens_features ipq8074_feat = { + .ver_major = VER_2_X, + .crit_int = 1, + .combo_int = 1, + .adc = 0, + .srot_split = 1, + .max_sensors = 16, + .trip_min_temp = 0, + .trip_max_temp = 204000, }; static const struct reg_field tsens_v2_regfields[MAX_REGFIELDS] = { @@ -101,6 +115,12 @@ struct tsens_plat_data data_tsens_v2 = { .fields = tsens_v2_regfields, }; +struct tsens_plat_data data_ipq8074 = { + .ops = &ops_generic_v2, + .feat = &ipq8074_feat, + .fields = tsens_v2_regfields, +}; + /* Kept around for backward compatibility with old msm8996.dtsi */ struct tsens_plat_data data_8996 = { .num_sensors = 13, diff --git a/drivers/thermal/qcom/tsens.c b/drivers/thermal/qcom/tsens.c index b1b10005fb286e81833f0d52df9e58d7289c3b7d..b5b136ff323f9a38fe58f335c5433acb97868a13 100644 --- a/drivers/thermal/qcom/tsens.c +++ b/drivers/thermal/qcom/tsens.c @@ -532,6 +532,27 @@ static irqreturn_t tsens_irq_thread(int irq, void *data) return IRQ_HANDLED; } +/** + * tsens_combined_irq_thread() - Threaded interrupt handler for combined interrupts + * @irq: irq number + * @data: tsens controller private data + * + * Handle the combined interrupt as if it were 2 separate interrupts, so call the + * critical handler first and then the up/low one. + * + * Return: IRQ_HANDLED + */ +static irqreturn_t tsens_combined_irq_thread(int irq, void *data) +{ + irqreturn_t ret; + + ret = tsens_critical_irq_thread(irq, data); + if (ret != IRQ_HANDLED) + return ret; + + return tsens_irq_thread(irq, data); +} + static int tsens_set_trips(struct thermal_zone_device *tz, int low, int high) { struct tsens_sensor *s = tz->devdata; @@ -552,8 +573,8 @@ static int tsens_set_trips(struct thermal_zone_device *tz, int low, int high) dev_dbg(dev, "[%u] %s: proposed thresholds: (%d:%d)\n", hw_id, __func__, low, high); - cl_high = clamp_val(high, -40000, 120000); - cl_low = clamp_val(low, -40000, 120000); + cl_high = clamp_val(high, priv->feat->trip_min_temp, priv->feat->trip_max_temp); + cl_low = clamp_val(low, priv->feat->trip_min_temp, priv->feat->trip_max_temp); high_val = tsens_mC_to_hw(s, cl_high); low_val = tsens_mC_to_hw(s, cl_low); @@ -692,7 +713,7 @@ static int dbg_version_show(struct seq_file *s, void *data) return ret; seq_printf(s, "%d.%d.%d\n", maj_ver, min_ver, step_ver); } else { - seq_puts(s, "0.1.0\n"); + seq_printf(s, "0.%d.0\n", priv->feat->ver_major); } return 0; @@ -704,21 +725,14 @@ DEFINE_SHOW_ATTRIBUTE(dbg_sensors); static void tsens_debug_init(struct platform_device *pdev) { struct tsens_priv *priv = platform_get_drvdata(pdev); - struct dentry *root, *file; - root = debugfs_lookup("tsens", NULL); - if (!root) + priv->debug_root = debugfs_lookup("tsens", NULL); + if (!priv->debug_root) priv->debug_root = debugfs_create_dir("tsens", NULL); - else - priv->debug_root = root; - - file = debugfs_lookup("version", priv->debug_root); - if (!file) - debugfs_create_file("version", 0444, priv->debug_root, - pdev, &dbg_version_fops); /* A directory for each instance of the TSENS IP */ priv->debug = debugfs_create_dir(dev_name(&pdev->dev), priv->debug_root); + debugfs_create_file("version", 0444, priv->debug, pdev, &dbg_version_fops); debugfs_create_file("sensors", 0444, priv->debug, pdev, &dbg_sensors_fops); } #else @@ -918,8 +932,6 @@ int __init init_common(struct tsens_priv *priv) if (tsens_version(priv) >= VER_0_1) tsens_enable_irq(priv); - tsens_debug_init(op); - err_put_device: put_device(&op->dev); return ret; @@ -959,6 +971,9 @@ static const struct of_device_id tsens_table[] = { { .compatible = "qcom,ipq8064-tsens", .data = &data_8960, + }, { + .compatible = "qcom,ipq8074-tsens", + .data = &data_ipq8074, }, { .compatible = "qcom,mdm9607-tsens", .data = &data_9607, @@ -1071,13 +1086,18 @@ static int tsens_register(struct tsens_priv *priv) tsens_mC_to_hw(priv->sensor, 0)); } - ret = tsens_register_irq(priv, "uplow", tsens_irq_thread); - if (ret < 0) - return ret; + if (priv->feat->combo_int) { + ret = tsens_register_irq(priv, "combined", + tsens_combined_irq_thread); + } else { + ret = tsens_register_irq(priv, "uplow", tsens_irq_thread); + if (ret < 0) + return ret; - if (priv->feat->crit_int) - ret = tsens_register_irq(priv, "critical", - tsens_critical_irq_thread); + if (priv->feat->crit_int) + ret = tsens_register_irq(priv, "critical", + tsens_critical_irq_thread); + } return ret; } @@ -1153,7 +1173,11 @@ static int tsens_probe(struct platform_device *pdev) } } - return tsens_register(priv); + ret = tsens_register(priv); + if (!ret) + tsens_debug_init(pdev); + + return ret; } static int tsens_remove(struct platform_device *pdev) diff --git a/drivers/thermal/qcom/tsens.h b/drivers/thermal/qcom/tsens.h index ba05c82333565bfc6d610931b297aee02cbea5e0..899af128855f74dd376d5685aefde76013b8246a 100644 --- a/drivers/thermal/qcom/tsens.h +++ b/drivers/thermal/qcom/tsens.h @@ -493,19 +493,25 @@ enum regfield_ids { * struct tsens_features - Features supported by the IP * @ver_major: Major number of IP version * @crit_int: does the IP support critical interrupts? + * @combo_int: does the IP use one IRQ for up, low and critical thresholds? * @adc: do the sensors only output adc code (instead of temperature)? * @srot_split: does the IP neatly splits the register space into SROT and TM, * with SROT only being available to secure boot firmware? * @has_watchdog: does this IP support watchdog functionality? * @max_sensors: maximum sensors supported by this version of the IP + * @trip_min_temp: minimum trip temperature supported by this version of the IP + * @trip_max_temp: maximum trip temperature supported by this version of the IP */ struct tsens_features { unsigned int ver_major; unsigned int crit_int:1; + unsigned int combo_int:1; unsigned int adc:1; unsigned int srot_split:1; unsigned int has_watchdog:1; unsigned int max_sensors; + int trip_min_temp; + int trip_max_temp; }; /** @@ -591,6 +597,6 @@ extern struct tsens_plat_data data_8916, data_8939, data_8974, data_9607; extern struct tsens_plat_data data_tsens_v1, data_8976; /* TSENS v2 targets */ -extern struct tsens_plat_data data_8996, data_tsens_v2; +extern struct tsens_plat_data data_8996, data_ipq8074, data_tsens_v2; #endif /* __QCOM_TSENS_H__ */ diff --git a/drivers/thermal/st/stm_thermal.c b/drivers/thermal/st/stm_thermal.c index 78feb802a87d2b1a344d548091afd8da39e149fa..e7834ccc7976bd33b36834063b0e8a796d87fa08 100644 --- a/drivers/thermal/st/stm_thermal.c +++ b/drivers/thermal/st/stm_thermal.c @@ -488,7 +488,6 @@ MODULE_DEVICE_TABLE(of, stm_thermal_of_match); static int stm_thermal_probe(struct platform_device *pdev) { struct stm_thermal_sensor *sensor; - struct resource *res; void __iomem *base; int ret; @@ -506,8 +505,7 @@ static int stm_thermal_probe(struct platform_device *pdev) sensor->dev = &pdev->dev; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - base = devm_ioremap_resource(&pdev->dev, res); + base = devm_platform_get_and_ioremap_resource(pdev, 0, NULL); if (IS_ERR(base)) return PTR_ERR(base); diff --git a/drivers/thermal/thermal_of.c b/drivers/thermal/thermal_of.c index d4b6335ace15faa1954691b05138efe5f9b7557d..aacba30bc10c19e5cea2abc5a793c200a95c02f0 100644 --- a/drivers/thermal/thermal_of.c +++ b/drivers/thermal/thermal_of.c @@ -604,13 +604,15 @@ struct thermal_zone_device *thermal_of_zone_register(struct device_node *sensor, if (IS_ERR(np)) { if (PTR_ERR(np) != -ENODEV) pr_err("Failed to find thermal zone for %pOFn id=%d\n", sensor, id); - return ERR_CAST(np); + ret = PTR_ERR(np); + goto out_kfree_of_ops; } trips = thermal_of_trips_init(np, &ntrips); if (IS_ERR(trips)) { pr_err("Failed to find trip points for %pOFn id=%d\n", sensor, id); - return ERR_CAST(trips); + ret = PTR_ERR(trips); + goto out_kfree_of_ops; } ret = thermal_of_monitor_init(np, &delay, &pdelay); @@ -659,6 +661,8 @@ out_kfree_tzp: kfree(tzp); out_kfree_trips: kfree(trips); +out_kfree_of_ops: + kfree(of_ops); return ERR_PTR(ret); } diff --git a/drivers/thermal/ti-soc-thermal/ti-bandgap.c b/drivers/thermal/ti-soc-thermal/ti-bandgap.c index 67050a1a5b07f8f165e8a53b077b236c96985e78..a1c9a153018320788f4b39855e1f038aa23e46ae 100644 --- a/drivers/thermal/ti-soc-thermal/ti-bandgap.c +++ b/drivers/thermal/ti-soc-thermal/ti-bandgap.c @@ -878,7 +878,7 @@ static struct ti_bandgap *ti_bandgap_build(struct platform_device *pdev) */ static const struct soc_device_attribute soc_no_cpu_notifier[] = { { .machine = "OMAP4430" }, - { /* sentinel */ }, + { /* sentinel */ } }; /*** Device driver call backs ***/ diff --git a/drivers/thunderbolt/acpi.c b/drivers/thunderbolt/acpi.c index 7a8adf5ad5a09d66032b32b06bea154e8a745368..317e4f5fdb9792c214a5b07cd1c167853688aeb4 100644 --- a/drivers/thunderbolt/acpi.c +++ b/drivers/thunderbolt/acpi.c @@ -15,24 +15,20 @@ static acpi_status tb_acpi_add_link(acpi_handle handle, u32 level, void *data, void **return_value) { struct acpi_device *adev = acpi_fetch_acpi_dev(handle); - struct fwnode_reference_args args; struct fwnode_handle *fwnode; struct tb_nhi *nhi = data; struct pci_dev *pdev; struct device *dev; - int ret; if (!adev) return AE_OK; - fwnode = acpi_fwnode_handle(adev); - ret = fwnode_property_get_reference_args(fwnode, "usb4-host-interface", - NULL, 0, 0, &args); - if (ret) + fwnode = fwnode_find_reference(acpi_fwnode_handle(adev), "usb4-host-interface", 0); + if (IS_ERR(fwnode)) return AE_OK; /* It needs to reference this NHI */ - if (dev_fwnode(&nhi->pdev->dev) != args.fwnode) + if (dev_fwnode(&nhi->pdev->dev) != fwnode) goto out_put; /* @@ -100,7 +96,7 @@ static acpi_status tb_acpi_add_link(acpi_handle handle, u32 level, void *data, } out_put: - fwnode_handle_put(args.fwnode); + fwnode_handle_put(fwnode); return AE_OK; } diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c index 60da5c23ccaf437c568066620121219f470aa69b..363d712aa364347c25e361f8b79bca470be47a8a 100644 --- a/drivers/thunderbolt/switch.c +++ b/drivers/thunderbolt/switch.c @@ -8,12 +8,13 @@ #include #include +#include #include #include #include #include #include -#include +#include #include "tb.h" @@ -644,7 +645,7 @@ static int __tb_port_enable(struct tb_port *port, bool enable) if (ret) return ret; - tb_port_dbg(port, "lane %sabled\n", enable ? "en" : "dis"); + tb_port_dbg(port, "lane %s\n", str_enabled_disabled(enable)); return 0; } diff --git a/drivers/thunderbolt/tb_regs.h b/drivers/thunderbolt/tb_regs.h index 86319dca0f8ca90585879dbd6e7b3f4c489f85d0..3c38b0cb8f74eb9ebe905cf4b8d91f2e6606bb01 100644 --- a/drivers/thunderbolt/tb_regs.h +++ b/drivers/thunderbolt/tb_regs.h @@ -361,6 +361,8 @@ struct tb_regs_port_header { #define PORT_CS_18_BE BIT(8) #define PORT_CS_18_TCM BIT(9) #define PORT_CS_18_CPS BIT(10) +#define PORT_CS_18_WOCS BIT(16) +#define PORT_CS_18_WODS BIT(17) #define PORT_CS_18_WOU4S BIT(18) #define PORT_CS_19 0x13 #define PORT_CS_19_PC BIT(3) diff --git a/drivers/thunderbolt/usb4.c b/drivers/thunderbolt/usb4.c index f986854aa207ade99a87afa2e205269d15176e0d..2ed50fcbcca7a91fc85ea6789d5430c079898735 100644 --- a/drivers/thunderbolt/usb4.c +++ b/drivers/thunderbolt/usb4.c @@ -155,6 +155,8 @@ static inline int usb4_switch_op_data(struct tb_switch *sw, u16 opcode, static void usb4_switch_check_wakes(struct tb_switch *sw) { + bool wakeup_usb4 = false; + struct usb4_port *usb4; struct tb_port *port; bool wakeup = false; u32 val; @@ -173,20 +175,31 @@ static void usb4_switch_check_wakes(struct tb_switch *sw) wakeup = val & (ROUTER_CS_6_WOPS | ROUTER_CS_6_WOUS); } - /* Check for any connected downstream ports for USB4 wake */ + /* + * Check for any downstream ports for USB4 wake, + * connection wake and disconnection wake. + */ tb_switch_for_each_port(sw, port) { - if (!tb_port_has_remote(port)) + if (!port->cap_usb4) continue; if (tb_port_read(port, &val, TB_CFG_PORT, port->cap_usb4 + PORT_CS_18, 1)) break; - tb_port_dbg(port, "USB4 wake: %s\n", - (val & PORT_CS_18_WOU4S) ? "yes" : "no"); + tb_port_dbg(port, "USB4 wake: %s, connection wake: %s, disconnection wake: %s\n", + (val & PORT_CS_18_WOU4S) ? "yes" : "no", + (val & PORT_CS_18_WOCS) ? "yes" : "no", + (val & PORT_CS_18_WODS) ? "yes" : "no"); + + wakeup_usb4 = val & (PORT_CS_18_WOU4S | PORT_CS_18_WOCS | + PORT_CS_18_WODS); + + usb4 = port->usb4; + if (device_may_wakeup(&usb4->dev) && wakeup_usb4) + pm_wakeup_event(&usb4->dev, 0); - if (val & PORT_CS_18_WOU4S) - wakeup = true; + wakeup |= wakeup_usb4; } if (wakeup) @@ -366,6 +379,7 @@ bool usb4_switch_lane_bonding_possible(struct tb_switch *sw) */ int usb4_switch_set_wake(struct tb_switch *sw, unsigned int flags) { + struct usb4_port *usb4; struct tb_port *port; u64 route = tb_route(sw); u32 val; @@ -395,10 +409,13 @@ int usb4_switch_set_wake(struct tb_switch *sw, unsigned int flags) val |= PORT_CS_19_WOU4; } else { bool configured = val & PORT_CS_19_PC; + usb4 = port->usb4; - if ((flags & TB_WAKE_ON_CONNECT) && !configured) + if (((flags & TB_WAKE_ON_CONNECT) | + device_may_wakeup(&usb4->dev)) && !configured) val |= PORT_CS_19_WOC; - if ((flags & TB_WAKE_ON_DISCONNECT) && configured) + if (((flags & TB_WAKE_ON_DISCONNECT) | + device_may_wakeup(&usb4->dev)) && configured) val |= PORT_CS_19_WOD; if ((flags & TB_WAKE_ON_USB4) && configured) val |= PORT_CS_19_WOU4; diff --git a/drivers/thunderbolt/usb4_port.c b/drivers/thunderbolt/usb4_port.c index 1a30c0a2328655cc8669ccbc062e05f6152f8c5d..e355bfd6343ff0d0616d1917fbcef9acf123023d 100644 --- a/drivers/thunderbolt/usb4_port.c +++ b/drivers/thunderbolt/usb4_port.c @@ -284,6 +284,9 @@ struct usb4_port *usb4_port_device_add(struct tb_port *port) } } + if (!tb_is_upstream_port(port)) + device_set_wakeup_capable(&usb4->dev, true); + pm_runtime_no_callbacks(&usb4->dev); pm_runtime_set_active(&usb4->dev); pm_runtime_enable(&usb4->dev); diff --git a/drivers/thunderbolt/xdomain.c b/drivers/thunderbolt/xdomain.c index f00b2f62d8e3c92841ba8bb6276d583724e7c31d..cfa83486c9dad6892f9b8b0c3ddf14c434ac9ae5 100644 --- a/drivers/thunderbolt/xdomain.c +++ b/drivers/thunderbolt/xdomain.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include #include @@ -341,7 +342,6 @@ static int tb_xdp_properties_request(struct tb_ctl *ctl, u64 route, memcpy(&req.src_uuid, src_uuid, sizeof(*src_uuid)); memcpy(&req.dst_uuid, dst_uuid, sizeof(*dst_uuid)); - len = 0; data_len = 0; do { @@ -1344,7 +1344,7 @@ static int tb_xdomain_bond_lanes_uuid_high(struct tb_xdomain *xd) tb_port_update_credits(port); tb_xdomain_update_link_attributes(xd); - dev_dbg(&xd->dev, "lane bonding %sabled\n", width == 2 ? "en" : "dis"); + dev_dbg(&xd->dev, "lane bonding %s\n", str_enabled_disabled(width == 2)); return 0; } diff --git a/drivers/tty/Kconfig b/drivers/tty/Kconfig index cc30ff93e2e472f685fbb0525e7919b6ba3d717a..d35fc068da74e39831ba8d77547a6a9aa37f79ac 100644 --- a/drivers/tty/Kconfig +++ b/drivers/tty/Kconfig @@ -149,6 +149,25 @@ config LEGACY_PTY_COUNT When not in use, each legacy PTY occupies 12 bytes on 32-bit architectures and 24 bytes on 64-bit architectures. +config LEGACY_TIOCSTI + bool "Allow legacy TIOCSTI usage" + default y + help + Historically the kernel has allowed TIOCSTI, which will push + characters into a controlling TTY. This continues to be used + as a malicious privilege escalation mechanism, and provides no + meaningful real-world utility any more. Its use is considered + a dangerous legacy operation, and can be disabled on most + systems. + + Say 'Y here only if you have confirmed that your system's + userspace depends on this functionality to continue operating + normally. + + This functionality can be changed at runtime with the + dev.tty.legacy_tiocsti sysctl. This configuration option sets + the default value of the sysctl. + config LDISC_AUTOLOAD bool "Automatically load TTY Line Disciplines" default y diff --git a/drivers/tty/ehv_bytechan.c b/drivers/tty/ehv_bytechan.c index 19d32cb6af84e30101b5bb54ee20b4faa4b230a8..8595483f4697751fb1ffcb203d3ad3b9711988d5 100644 --- a/drivers/tty/ehv_bytechan.c +++ b/drivers/tty/ehv_bytechan.c @@ -118,7 +118,7 @@ static int find_console_handle(void) return 0; stdout_irq = irq_of_parse_and_map(np, 0); - if (stdout_irq == NO_IRQ) { + if (!stdout_irq) { pr_err("ehv-bc: no 'interrupts' property in %pOF node\n", np); return 0; } @@ -696,7 +696,7 @@ static int ehv_bc_tty_probe(struct platform_device *pdev) bc->rx_irq = irq_of_parse_and_map(np, 0); bc->tx_irq = irq_of_parse_and_map(np, 1); - if ((bc->rx_irq == NO_IRQ) || (bc->tx_irq == NO_IRQ)) { + if (!bc->rx_irq || !bc->tx_irq) { dev_err(&pdev->dev, "no 'interrupts' property in %pOFn node\n", np); ret = -ENODEV; diff --git a/drivers/tty/hvc/hvc_rtas.c b/drivers/tty/hvc/hvc_rtas.c index e8b8c645482b0ef7ffafe46dfa9ea22635db849e..184d325abeeda4bc4ef8939565d1ea0e4094ebc9 100644 --- a/drivers/tty/hvc/hvc_rtas.c +++ b/drivers/tty/hvc/hvc_rtas.c @@ -26,7 +26,7 @@ #include "hvc_console.h" #define hvc_rtas_cookie 0x67781e15 -struct hvc_struct *hvc_rtas_dev; +static struct hvc_struct *hvc_rtas_dev; static int rtascons_put_char_token = RTAS_UNKNOWN_SERVICE; static int rtascons_get_char_token = RTAS_UNKNOWN_SERVICE; diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c index b6e0cc4571eacc403d9ac56b58340b18791fd841..daf12132deb175a459f2c8830a42572285d12c11 100644 --- a/drivers/tty/n_gsm.c +++ b/drivers/tty/n_gsm.c @@ -38,8 +38,10 @@ #include #include #include +#include #include #include +#include #include #include #include @@ -75,7 +77,12 @@ module_param(debug, int, 0600); #define T1 10 /* 100mS */ #define T2 34 /* 333mS */ +#define T3 10 /* 10s */ #define N2 3 /* Retry 3 times */ +#define K 2 /* outstanding I frames */ + +#define MAX_T3 255 /* In seconds. */ +#define MAX_WINDOW_SIZE 7 /* Limit of K in error recovery mode. */ /* Use long timers for testing at low speed with debug on */ #ifdef DEBUG_TIMING @@ -89,6 +96,7 @@ module_param(debug, int, 0600); */ #define MAX_MRU 1500 #define MAX_MTU 1500 +#define MIN_MTU (PROT_OVERHEAD + 1) /* SOF, ADDR, CTRL, LEN1, LEN2, ..., FCS, EOF */ #define PROT_OVERHEAD 7 #define GSM_NET_TX_TIMEOUT (HZ*10) @@ -120,6 +128,7 @@ struct gsm_msg { enum gsm_dlci_state { DLCI_CLOSED, + DLCI_CONFIGURE, /* Sending PN (for adaption > 1) */ DLCI_OPENING, /* Sending SABM not seen UA */ DLCI_OPEN, /* SABM/UA complete */ DLCI_CLOSING, /* Sending DISC not seen UA/DM */ @@ -159,7 +168,12 @@ struct gsm_dlci { int prev_adaption; u32 modem_rx; /* Our incoming virtual modem lines */ u32 modem_tx; /* Our outgoing modem lines */ + unsigned int mtu; bool dead; /* Refuse re-open */ + /* Configuration */ + u8 prio; /* Priority */ + u8 ftype; /* Frame type */ + u8 k; /* Window size */ /* Flow control */ bool throttled; /* Private copy of throttle state */ bool constipated; /* Throttle status for outgoing */ @@ -172,6 +186,32 @@ struct gsm_dlci { struct net_device *net; /* network interface, if created */ }; +/* + * Parameter bits used for parameter negotiation according to 3GPP 27.010 + * chapter 5.4.6.3.1. + */ + +struct gsm_dlci_param_bits { + u8 d_bits; + u8 i_cl_bits; + u8 p_bits; + u8 t_bits; + __le16 n_bits; + u8 na_bits; + u8 k_bits; +}; + +static_assert(sizeof(struct gsm_dlci_param_bits) == 8); + +#define PN_D_FIELD_DLCI GENMASK(5, 0) +#define PN_I_CL_FIELD_FTYPE GENMASK(3, 0) +#define PN_I_CL_FIELD_ADAPTION GENMASK(7, 4) +#define PN_P_FIELD_PRIO GENMASK(5, 0) +#define PN_T_FIELD_T1 GENMASK(7, 0) +#define PN_N_FIELD_N1 GENMASK(15, 0) +#define PN_NA_FIELD_N2 GENMASK(7, 0) +#define PN_K_FIELD_K GENMASK(2, 0) + /* Total number of supported devices */ #define GSM_TTY_MINORS 256 @@ -282,7 +322,9 @@ struct gsm_mux { int adaption; /* 1 or 2 supported */ u8 ftype; /* UI or UIH */ int t1, t2; /* Timers in 1/100th of a sec */ + unsigned int t3; /* Power wake-up timer in seconds. */ int n2; /* Retry count */ + u8 k; /* Window size */ /* Statistics (not currently exposed) */ unsigned long bad_fcs; @@ -397,6 +439,7 @@ static const u8 gsm_fcs8[256] = { #define INIT_FCS 0xFF #define GOOD_FCS 0xCF +static void gsm_dlci_close(struct gsm_dlci *dlci); static int gsmld_output(struct gsm_mux *gsm, u8 *data, int len); static int gsm_modem_update(struct gsm_dlci *dlci, u8 brk); static struct gsm_msg *gsm_data_alloc(struct gsm_mux *gsm, u8 addr, int len, @@ -519,6 +562,57 @@ static void gsm_hex_dump_bytes(const char *fname, const u8 *data, kfree(prefix); } +/** + * gsm_encode_params - encode DLCI parameters + * @dlci: DLCI to encode from + * @params: buffer to fill with the encoded parameters + * + * Encodes the parameters according to GSM 07.10 section 5.4.6.3.1 + * table 3. + */ +static int gsm_encode_params(const struct gsm_dlci *dlci, + struct gsm_dlci_param_bits *params) +{ + const struct gsm_mux *gsm = dlci->gsm; + unsigned int i, cl; + + switch (dlci->ftype) { + case UIH: + i = 0; /* UIH */ + break; + case UI: + i = 1; /* UI */ + break; + default: + pr_debug("unsupported frame type %d\n", dlci->ftype); + return -EINVAL; + } + + switch (dlci->adaption) { + case 1: /* Unstructured */ + cl = 0; /* convergence layer type 1 */ + break; + case 2: /* Unstructured with modem bits. */ + cl = 1; /* convergence layer type 2 */ + break; + default: + pr_debug("unsupported adaption %d\n", dlci->adaption); + return -EINVAL; + } + + params->d_bits = FIELD_PREP(PN_D_FIELD_DLCI, dlci->addr); + /* UIH, convergence layer type 1 */ + params->i_cl_bits = FIELD_PREP(PN_I_CL_FIELD_FTYPE, i) | + FIELD_PREP(PN_I_CL_FIELD_ADAPTION, cl); + params->p_bits = FIELD_PREP(PN_P_FIELD_PRIO, dlci->prio); + params->t_bits = FIELD_PREP(PN_T_FIELD_T1, gsm->t1); + params->n_bits = cpu_to_le16(FIELD_PREP(PN_N_FIELD_N1, dlci->mtu)); + params->na_bits = FIELD_PREP(PN_NA_FIELD_N2, gsm->n2); + params->k_bits = FIELD_PREP(PN_K_FIELD_K, dlci->k); + + return 0; +} + /** * gsm_register_devices - register all tty devices for a given mux index * @@ -1076,12 +1170,12 @@ static int gsm_dlci_data_output(struct gsm_mux *gsm, struct gsm_dlci *dlci) return 0; /* MTU/MRU count only the data bits but watch adaption mode */ - if ((len + h) > gsm->mtu) - len = gsm->mtu - h; + if ((len + h) > dlci->mtu) + len = dlci->mtu - h; size = len + h; - msg = gsm_data_alloc(gsm, dlci->addr, size, gsm->ftype); + msg = gsm_data_alloc(gsm, dlci->addr, size, dlci->ftype); if (!msg) return -ENOMEM; dp = msg->data; @@ -1145,19 +1239,19 @@ static int gsm_dlci_data_output_framed(struct gsm_mux *gsm, len = dlci->skb->len + overhead; /* MTU/MRU count only the data bits */ - if (len > gsm->mtu) { + if (len > dlci->mtu) { if (dlci->adaption == 3) { /* Over long frame, bin it */ dev_kfree_skb_any(dlci->skb); dlci->skb = NULL; return 0; } - len = gsm->mtu; + len = dlci->mtu; } else last = 1; size = len + overhead; - msg = gsm_data_alloc(gsm, dlci->addr, size, gsm->ftype); + msg = gsm_data_alloc(gsm, dlci->addr, size, dlci->ftype); if (msg == NULL) { skb_queue_tail(&dlci->skb_list, dlci->skb); dlci->skb = NULL; @@ -1214,7 +1308,7 @@ static int gsm_dlci_modem_output(struct gsm_mux *gsm, struct gsm_dlci *dlci, return -EINVAL; } - msg = gsm_data_alloc(gsm, dlci->addr, size, gsm->ftype); + msg = gsm_data_alloc(gsm, dlci->addr, size, dlci->ftype); if (!msg) { pr_err("%s: gsm_data_alloc error", __func__); return -ENOMEM; @@ -1287,7 +1381,7 @@ static int gsm_dlci_data_sweep(struct gsm_mux *gsm) } if (!sent) break; - }; + } return ret; } @@ -1340,8 +1434,9 @@ static void gsm_dlci_data_kick(struct gsm_dlci *dlci) static int gsm_control_command(struct gsm_mux *gsm, int cmd, const u8 *data, int dlen) { - struct gsm_msg *msg = gsm_data_alloc(gsm, 0, dlen + 2, gsm->ftype); + struct gsm_msg *msg; + msg = gsm_data_alloc(gsm, 0, dlen + 2, gsm->dlci[0]->ftype); if (msg == NULL) return -ENOMEM; @@ -1367,7 +1462,8 @@ static void gsm_control_reply(struct gsm_mux *gsm, int cmd, const u8 *data, int dlen) { struct gsm_msg *msg; - msg = gsm_data_alloc(gsm, 0, dlen + 2, gsm->ftype); + + msg = gsm_data_alloc(gsm, 0, dlen + 2, gsm->dlci[0]->ftype); if (msg == NULL) return; msg->data[0] = (cmd & 0xFE) << 1 | EA; /* Clear C/R */ @@ -1437,6 +1533,116 @@ static void gsm_process_modem(struct tty_struct *tty, struct gsm_dlci *dlci, dlci->modem_rx = mlines; } +/** + * gsm_process_negotiation - process received parameters + * @gsm: GSM channel + * @addr: DLCI address + * @cr: command/response + * @params: encoded parameters from the parameter negotiation message + * + * Used when the response for our parameter negotiation command was + * received. + */ +static int gsm_process_negotiation(struct gsm_mux *gsm, unsigned int addr, + unsigned int cr, + const struct gsm_dlci_param_bits *params) +{ + struct gsm_dlci *dlci = gsm->dlci[addr]; + unsigned int ftype, i, adaption, prio, n1, k; + + i = FIELD_GET(PN_I_CL_FIELD_FTYPE, params->i_cl_bits); + adaption = FIELD_GET(PN_I_CL_FIELD_ADAPTION, params->i_cl_bits) + 1; + prio = FIELD_GET(PN_P_FIELD_PRIO, params->p_bits); + n1 = FIELD_GET(PN_N_FIELD_N1, get_unaligned_le16(¶ms->n_bits)); + k = FIELD_GET(PN_K_FIELD_K, params->k_bits); + + if (n1 < MIN_MTU) { + if (debug & DBG_ERRORS) + pr_info("%s N1 out of range in PN\n", __func__); + return -EINVAL; + } + + switch (i) { + case 0x00: + ftype = UIH; + break; + case 0x01: + ftype = UI; + break; + case 0x02: /* I frames are not supported */ + if (debug & DBG_ERRORS) + pr_info("%s unsupported I frame request in PN\n", + __func__); + return -EINVAL; + default: + if (debug & DBG_ERRORS) + pr_info("%s i out of range in PN\n", __func__); + return -EINVAL; + } + + if (!cr && gsm->initiator) { + if (adaption != dlci->adaption) { + if (debug & DBG_ERRORS) + pr_info("%s invalid adaption %d in PN\n", + __func__, adaption); + return -EINVAL; + } + if (prio != dlci->prio) { + if (debug & DBG_ERRORS) + pr_info("%s invalid priority %d in PN", + __func__, prio); + return -EINVAL; + } + if (n1 > gsm->mru || n1 > dlci->mtu) { + /* We requested a frame size but the other party wants + * to send larger frames. The standard allows only a + * smaller response value than requested (5.4.6.3.1). + */ + if (debug & DBG_ERRORS) + pr_info("%s invalid N1 %d in PN\n", __func__, + n1); + return -EINVAL; + } + dlci->mtu = n1; + if (ftype != dlci->ftype) { + if (debug & DBG_ERRORS) + pr_info("%s invalid i %d in PN\n", __func__, i); + return -EINVAL; + } + if (ftype != UI && ftype != UIH && k > dlci->k) { + if (debug & DBG_ERRORS) + pr_info("%s invalid k %d in PN\n", __func__, k); + return -EINVAL; + } + dlci->k = k; + } else if (cr && !gsm->initiator) { + /* Only convergence layer type 1 and 2 are supported. */ + if (adaption != 1 && adaption != 2) { + if (debug & DBG_ERRORS) + pr_info("%s invalid adaption %d in PN\n", + __func__, adaption); + return -EINVAL; + } + dlci->adaption = adaption; + if (n1 > gsm->mru) { + /* Propose a smaller value */ + dlci->mtu = gsm->mru; + } else if (n1 > MAX_MTU) { + /* Propose a smaller value */ + dlci->mtu = MAX_MTU; + } else { + dlci->mtu = n1; + } + dlci->prio = prio; + dlci->ftype = ftype; + dlci->k = k; + } else { + return -EINVAL; + } + + return 0; +} + /** * gsm_control_modem - modem status received * @gsm: GSM channel @@ -1490,6 +1696,65 @@ static void gsm_control_modem(struct gsm_mux *gsm, const u8 *data, int clen) gsm_control_reply(gsm, CMD_MSC, data, clen); } +/** + * gsm_control_negotiation - parameter negotiation received + * @gsm: GSM channel + * @cr: command/response flag + * @data: data following command + * @dlen: data length + * + * We have received a parameter negotiation message. This is used by + * the GSM mux protocol to configure protocol parameters for a new DLCI. + */ +static void gsm_control_negotiation(struct gsm_mux *gsm, unsigned int cr, + const u8 *data, unsigned int dlen) +{ + unsigned int addr; + struct gsm_dlci_param_bits pn_reply; + struct gsm_dlci *dlci; + struct gsm_dlci_param_bits *params; + + if (dlen < sizeof(struct gsm_dlci_param_bits)) + return; + + /* Invalid DLCI? */ + params = (struct gsm_dlci_param_bits *)data; + addr = FIELD_GET(PN_D_FIELD_DLCI, params->d_bits); + if (addr == 0 || addr >= NUM_DLCI || !gsm->dlci[addr]) + return; + dlci = gsm->dlci[addr]; + + /* Too late for parameter negotiation? */ + if ((!cr && dlci->state == DLCI_OPENING) || dlci->state == DLCI_OPEN) + return; + + /* Process the received parameters */ + if (gsm_process_negotiation(gsm, addr, cr, params) != 0) { + /* Negotiation failed. Close the link. */ + if (debug & DBG_ERRORS) + pr_info("%s PN failed\n", __func__); + gsm_dlci_close(dlci); + return; + } + + if (cr) { + /* Reply command with accepted parameters. */ + if (gsm_encode_params(dlci, &pn_reply) == 0) + gsm_control_reply(gsm, CMD_PN, (const u8 *)&pn_reply, + sizeof(pn_reply)); + else if (debug & DBG_ERRORS) + pr_info("%s PN invalid\n", __func__); + } else if (dlci->state == DLCI_CONFIGURE) { + /* Proceed with link setup by sending SABM before UA */ + dlci->state = DLCI_OPENING; + gsm_command(gsm, dlci->addr, SABM|PF); + mod_timer(&dlci->t1, jiffies + gsm->t1 * HZ / 100); + } else { + if (debug & DBG_ERRORS) + pr_info("%s PN in invalid state\n", __func__); + } +} + /** * gsm_control_rls - remote line status * @gsm: GSM channel @@ -1599,8 +1864,12 @@ static void gsm_control_message(struct gsm_mux *gsm, unsigned int command, /* Modem wishes to enter power saving state */ gsm_control_reply(gsm, CMD_PSC, NULL, 0); break; + /* Optional commands */ + case CMD_PN: + /* Modem sends a parameter negotiation command */ + gsm_control_negotiation(gsm, 1, data, clen); + break; /* Optional unsupported commands */ - case CMD_PN: /* Parameter negotiation */ case CMD_RPN: /* Remote port negotiation */ case CMD_SNC: /* Service negotiation command */ default: @@ -1633,8 +1902,8 @@ static void gsm_control_response(struct gsm_mux *gsm, unsigned int command, spin_lock_irqsave(&gsm->control_lock, flags); ctrl = gsm->pending_cmd; - /* Does the reply match our command */ command |= 1; + /* Does the reply match our command */ if (ctrl != NULL && (command == ctrl->cmd || command == CMD_NSC)) { /* Our command was replied to, kill the retry timer */ del_timer(&gsm->t2_timer); @@ -1644,6 +1913,9 @@ static void gsm_control_response(struct gsm_mux *gsm, unsigned int command, ctrl->error = -EOPNOTSUPP; ctrl->done = 1; wake_up(&gsm->event); + /* Or did we receive the PN response to our PN command */ + } else if (command == CMD_PN) { + gsm_control_negotiation(gsm, 0, data, clen); } spin_unlock_irqrestore(&gsm->control_lock, flags); } @@ -1821,6 +2093,32 @@ static void gsm_dlci_open(struct gsm_dlci *dlci) wake_up(&dlci->gsm->event); } +/** + * gsm_dlci_negotiate - start parameter negotiation + * @dlci: DLCI to open + * + * Starts the parameter negotiation for the new DLCI. This needs to be done + * before the DLCI initialized the channel via SABM. + */ +static int gsm_dlci_negotiate(struct gsm_dlci *dlci) +{ + struct gsm_mux *gsm = dlci->gsm; + struct gsm_dlci_param_bits params; + int ret; + + ret = gsm_encode_params(dlci, ¶ms); + if (ret != 0) + return ret; + + /* We cannot asynchronous wait for the command response with + * gsm_command() and gsm_control_wait() at this point. + */ + ret = gsm_control_command(gsm, CMD_PN, (const u8 *)¶ms, + sizeof(params)); + + return ret; +} + /** * gsm_dlci_t1 - T1 timer expiry * @t: timer contained in the DLCI that opened @@ -1842,6 +2140,14 @@ static void gsm_dlci_t1(struct timer_list *t) struct gsm_mux *gsm = dlci->gsm; switch (dlci->state) { + case DLCI_CONFIGURE: + if (dlci->retries && gsm_dlci_negotiate(dlci) == 0) { + dlci->retries--; + mod_timer(&dlci->t1, jiffies + gsm->t1 * HZ / 100); + } else { + gsm_dlci_begin_close(dlci); /* prevent half open link */ + } + break; case DLCI_OPENING: if (dlci->retries) { dlci->retries--; @@ -1880,17 +2186,46 @@ static void gsm_dlci_t1(struct timer_list *t) * to the modem which should then reply with a UA or ADM, at which point * we will move into open state. Opening is done asynchronously with retry * running off timers and the responses. + * Parameter negotiation is performed before SABM if required. */ static void gsm_dlci_begin_open(struct gsm_dlci *dlci) { - struct gsm_mux *gsm = dlci->gsm; - if (dlci->state == DLCI_OPEN || dlci->state == DLCI_OPENING) + struct gsm_mux *gsm = dlci ? dlci->gsm : NULL; + bool need_pn = false; + + if (!gsm) return; - dlci->retries = gsm->n2; - dlci->state = DLCI_OPENING; - gsm_command(dlci->gsm, dlci->addr, SABM|PF); - mod_timer(&dlci->t1, jiffies + gsm->t1 * HZ / 100); + + if (dlci->addr != 0) { + if (gsm->adaption != 1 || gsm->adaption != dlci->adaption) + need_pn = true; + if (dlci->prio != (roundup(dlci->addr + 1, 8) - 1)) + need_pn = true; + if (gsm->ftype != dlci->ftype) + need_pn = true; + } + + switch (dlci->state) { + case DLCI_CLOSED: + case DLCI_CLOSING: + dlci->retries = gsm->n2; + if (!need_pn) { + dlci->state = DLCI_OPENING; + gsm_command(gsm, dlci->addr, SABM|PF); + } else { + /* Configure DLCI before setup */ + dlci->state = DLCI_CONFIGURE; + if (gsm_dlci_negotiate(dlci) != 0) { + gsm_dlci_close(dlci); + return; + } + } + mod_timer(&dlci->t1, jiffies + gsm->t1 * HZ / 100); + break; + default: + break; + } } /** @@ -2078,6 +2413,13 @@ static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr) dlci->gsm = gsm; dlci->addr = addr; dlci->adaption = gsm->adaption; + dlci->mtu = gsm->mtu; + if (addr == 0) + dlci->prio = 0; + else + dlci->prio = roundup(addr + 1, 8) - 1; + dlci->ftype = gsm->ftype; + dlci->k = gsm->k; dlci->state = DLCI_CLOSED; if (addr) { dlci->data = gsm_dlci_data; @@ -2652,7 +2994,9 @@ static struct gsm_mux *gsm_alloc_mux(void) gsm->t1 = T1; gsm->t2 = T2; + gsm->t3 = T3; gsm->n2 = N2; + gsm->k = K; gsm->ftype = UIH; gsm->adaption = 1; gsm->encoding = GSM_ADV_OPT; @@ -2692,7 +3036,7 @@ static void gsm_copy_config_values(struct gsm_mux *gsm, c->initiator = gsm->initiator; c->t1 = gsm->t1; c->t2 = gsm->t2; - c->t3 = 0; /* Not supported */ + c->t3 = gsm->t3; c->n2 = gsm->n2; if (gsm->ftype == UIH) c->i = 1; @@ -2701,7 +3045,7 @@ static void gsm_copy_config_values(struct gsm_mux *gsm, pr_debug("Ftype %d i %d\n", gsm->ftype, c->i); c->mru = gsm->mru; c->mtu = gsm->mtu; - c->k = 0; + c->k = gsm->k; } static int gsm_config(struct gsm_mux *gsm, struct gsm_config *c) @@ -2714,7 +3058,11 @@ static int gsm_config(struct gsm_mux *gsm, struct gsm_config *c) if ((c->adaption != 1 && c->adaption != 2) || c->k) return -EOPNOTSUPP; /* Check the MRU/MTU range looks sane */ - if (c->mru > MAX_MRU || c->mtu > MAX_MTU || c->mru < 8 || c->mtu < 8) + if (c->mru < MIN_MTU || c->mtu < MIN_MTU) + return -EINVAL; + if (c->mru > MAX_MRU || c->mtu > MAX_MTU) + return -EINVAL; + if (c->t3 > MAX_T3) return -EINVAL; if (c->n2 > 255) return -EINVAL; @@ -2722,6 +3070,8 @@ static int gsm_config(struct gsm_mux *gsm, struct gsm_config *c) return -EINVAL; if (c->initiator > 1) return -EINVAL; + if (c->k > MAX_WINDOW_SIZE) + return -EINVAL; if (c->i == 0 || c->i > 2) /* UIH and UI only */ return -EINVAL; /* @@ -2769,6 +3119,10 @@ static int gsm_config(struct gsm_mux *gsm, struct gsm_config *c) gsm->t1 = c->t1; if (c->t2) gsm->t2 = c->t2; + if (c->t3) + gsm->t3 = c->t3; + if (c->k) + gsm->k = c->k; /* * FIXME: We need to separate activation/deactivation from adding @@ -3299,9 +3653,9 @@ static int gsm_create_network(struct gsm_dlci *dlci, struct gsm_netconfig *nc) pr_err("alloc_netdev failed\n"); return -ENOMEM; } - net->mtu = dlci->gsm->mtu; - net->min_mtu = 8; - net->max_mtu = dlci->gsm->mtu; + net->mtu = dlci->mtu; + net->min_mtu = MIN_MTU; + net->max_mtu = dlci->mtu; mux_net = netdev_priv(net); mux_net->dlci = dlci; kref_init(&mux_net->ref); diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c index 597019690ae62b088faffdae4d2e7be0165dcae0..c8f56c9b1a1c8064477fc57fe32da60b169a612a 100644 --- a/drivers/tty/n_tty.c +++ b/drivers/tty/n_tty.c @@ -2130,7 +2130,7 @@ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file, ssize_t retval = 0; long timeout; bool packet; - size_t tail; + size_t old_tail; /* * Is this a continuation of a read started earler? @@ -2193,7 +2193,7 @@ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file, } packet = tty->ctrl.packet; - tail = ldata->read_tail; + old_tail = ldata->read_tail; add_wait_queue(&tty->read_wait, &wait); while (nr) { @@ -2282,7 +2282,7 @@ more_to_be_read: if (time) timeout = time; } - if (tail != ldata->read_tail) + if (old_tail != ldata->read_tail) n_tty_kick_worker(tty); up_read(&tty->termios_rwsem); diff --git a/drivers/tty/serial/21285.c b/drivers/tty/serial/21285.c index c7d34823f7155ae253332f6d2051879c3e87c731..185462fd959c6d7bf9bb2d1fba8caa9e2535a3a9 100644 --- a/drivers/tty/serial/21285.c +++ b/drivers/tty/serial/21285.c @@ -154,35 +154,13 @@ static irqreturn_t serial21285_rx_chars(int irq, void *dev_id) static irqreturn_t serial21285_tx_chars(int irq, void *dev_id) { struct uart_port *port = dev_id; - struct circ_buf *xmit = &port->state->xmit; - int count = 256; - - if (port->x_char) { - *CSR_UARTDR = port->x_char; - port->icount.tx++; - port->x_char = 0; - goto out; - } - if (uart_circ_empty(xmit) || uart_tx_stopped(port)) { - serial21285_stop_tx(port); - goto out; - } - - do { - *CSR_UARTDR = xmit->buf[xmit->tail]; - xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); - port->icount.tx++; - if (uart_circ_empty(xmit)) - break; - } while (--count > 0 && !(*CSR_UARTFLG & 0x20)); - - if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) - uart_write_wakeup(port); + u8 ch; - if (uart_circ_empty(xmit)) - serial21285_stop_tx(port); + uart_port_tx_limited(port, ch, 256, + !(*CSR_UARTFLG & 0x20), + *CSR_UARTDR = ch, + ({})); - out: return IRQ_HANDLED; } diff --git a/drivers/tty/serial/8250/8250_bcm7271.c b/drivers/tty/serial/8250/8250_bcm7271.c index fa8ccf204d860223e4c5339791295e43d9575e9b..ed5a94747692057de234b130470578896dcb6e98 100644 --- a/drivers/tty/serial/8250/8250_bcm7271.c +++ b/drivers/tty/serial/8250/8250_bcm7271.c @@ -425,9 +425,7 @@ static int brcmuart_tx_dma(struct uart_8250_port *p) priv->dma.tx_err = 0; memcpy(priv->tx_buf, &xmit->buf[xmit->tail], tx_size); - xmit->tail += tx_size; - xmit->tail &= UART_XMIT_SIZE - 1; - p->port.icount.tx += tx_size; + uart_xmit_advance(&p->port, tx_size); if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) uart_write_wakeup(&p->port); @@ -1212,9 +1210,17 @@ static struct platform_driver brcmuart_platform_driver = { static int __init brcmuart_init(void) { + int ret; + brcmuart_debugfs_root = debugfs_create_dir( brcmuart_platform_driver.driver.name, NULL); - return platform_driver_register(&brcmuart_platform_driver); + ret = platform_driver_register(&brcmuart_platform_driver); + if (ret) { + debugfs_remove_recursive(brcmuart_debugfs_root); + return ret; + } + + return 0; } module_init(brcmuart_init); diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c index 74568292186fba2dd3adbb91c4ebba801d4b5389..ab63c308be0a2f288e9bca062e4d2a48e783bbe9 100644 --- a/drivers/tty/serial/8250/8250_core.c +++ b/drivers/tty/serial/8250/8250_core.c @@ -32,6 +32,7 @@ #include #include #include +#include #include #include #ifdef CONFIG_SPARC @@ -1175,8 +1176,8 @@ static int __init serial8250_init(void) serial8250_isa_init_ports(); - pr_info("Serial: 8250/16550 driver, %d ports, IRQ sharing %sabled\n", - nr_uarts, share_irqs ? "en" : "dis"); + pr_info("Serial: 8250/16550 driver, %d ports, IRQ sharing %s\n", + nr_uarts, str_enabled_disabled(share_irqs)); #ifdef CONFIG_SPARC ret = sunserial_register_minors(&serial8250_reg, UART_NR); diff --git a/drivers/tty/serial/8250/8250_dma.c b/drivers/tty/serial/8250/8250_dma.c index b85c82616e8cbb8d7853dfd5c0fef202299803b8..37d6af2ec4272e35724f03902d9e32900a6da8bd 100644 --- a/drivers/tty/serial/8250/8250_dma.c +++ b/drivers/tty/serial/8250/8250_dma.c @@ -38,9 +38,8 @@ static void __dma_tx_complete(void *param) spin_unlock_irqrestore(&p->port.lock, flags); } -static void __dma_rx_complete(void *param) +static void __dma_rx_complete(struct uart_8250_port *p) { - struct uart_8250_port *p = param; struct uart_8250_dma *dma = p->dma; struct tty_port *tty_port = &p->port.state->port; struct dma_tx_state state; @@ -57,6 +56,20 @@ static void __dma_rx_complete(void *param) tty_flip_buffer_push(tty_port); } +static void dma_rx_complete(void *param) +{ + struct uart_8250_port *p = param; + struct uart_8250_dma *dma = p->dma; + unsigned long flags; + + __dma_rx_complete(p); + + spin_lock_irqsave(&p->port.lock, flags); + if (!dma->rx_running && (serial_lsr_in(p) & UART_LSR_DR)) + p->dma->rx_dma(p); + spin_unlock_irqrestore(&p->port.lock, flags); +} + int serial8250_tx_dma(struct uart_8250_port *p) { struct uart_8250_dma *dma = p->dma; @@ -130,7 +143,7 @@ int serial8250_rx_dma(struct uart_8250_port *p) return -EBUSY; dma->rx_running = 1; - desc->callback = __dma_rx_complete; + desc->callback = dma_rx_complete; desc->callback_param = p; dma->rx_cookie = dmaengine_submit(desc); diff --git a/drivers/tty/serial/8250/8250_ingenic.c b/drivers/tty/serial/8250/8250_ingenic.c index 2b2f5d8d24b918eebf89062ffde1eb683fd9ff65..617b8ce60d6b5cc06ed37b7f386d8a3891f5e1c4 100644 --- a/drivers/tty/serial/8250/8250_ingenic.c +++ b/drivers/tty/serial/8250/8250_ingenic.c @@ -87,7 +87,7 @@ static void __init ingenic_early_console_setup_clock(struct earlycon_device *dev dev->port.uartclk = be32_to_cpup(prop); } -static int __init ingenic_early_console_setup(struct earlycon_device *dev, +static int __init ingenic_earlycon_setup_tail(struct earlycon_device *dev, const char *opt) { struct uart_port *port = &dev->port; @@ -103,8 +103,6 @@ static int __init ingenic_early_console_setup(struct earlycon_device *dev, uart_parse_options(opt, &baud, &parity, &bits, &flow); } - ingenic_early_console_setup_clock(dev); - if (dev->baud) baud = dev->baud; divisor = DIV_ROUND_CLOSEST(port->uartclk, 16 * baud); @@ -129,9 +127,36 @@ static int __init ingenic_early_console_setup(struct earlycon_device *dev, return 0; } +static int __init ingenic_early_console_setup(struct earlycon_device *dev, + const char *opt) +{ + ingenic_early_console_setup_clock(dev); + + return ingenic_earlycon_setup_tail(dev, opt); +} + +static int __init jz4750_early_console_setup(struct earlycon_device *dev, + const char *opt) +{ + /* + * JZ4750/55/60 have an optional /2 divider between the EXT + * oscillator and some peripherals including UART, which will + * be enabled if using a 24 MHz oscillator, and disabled when + * using a 12 MHz oscillator. + */ + ingenic_early_console_setup_clock(dev); + if (dev->port.uartclk >= 16000000) + dev->port.uartclk /= 2; + + return ingenic_earlycon_setup_tail(dev, opt); +} + OF_EARLYCON_DECLARE(jz4740_uart, "ingenic,jz4740-uart", ingenic_early_console_setup); +OF_EARLYCON_DECLARE(jz4750_uart, "ingenic,jz4750-uart", + jz4750_early_console_setup); + OF_EARLYCON_DECLARE(jz4770_uart, "ingenic,jz4770-uart", ingenic_early_console_setup); @@ -328,6 +353,7 @@ static const struct ingenic_uart_config x1000_uart_config = { static const struct of_device_id of_match[] = { { .compatible = "ingenic,jz4740-uart", .data = &jz4740_uart_config }, + { .compatible = "ingenic,jz4750-uart", .data = &jz4760_uart_config }, { .compatible = "ingenic,jz4760-uart", .data = &jz4760_uart_config }, { .compatible = "ingenic,jz4770-uart", .data = &jz4760_uart_config }, { .compatible = "ingenic,jz4775-uart", .data = &jz4760_uart_config }, diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c index 3f33014022f0e9f5774e44fc7df291cd1912c26e..734f092ef839abaadd3e0ff8763d25f348252fb2 100644 --- a/drivers/tty/serial/8250/8250_omap.c +++ b/drivers/tty/serial/8250/8250_omap.c @@ -44,6 +44,7 @@ #define UART_HAS_EFR2 BIT(4) #define UART_HAS_RHR_IT_DIS BIT(5) #define UART_RX_TIMEOUT_QUIRK BIT(6) +#define UART_HAS_NATIVE_RS485 BIT(7) #define OMAP_UART_FCR_RX_TRIG 6 #define OMAP_UART_FCR_TX_TRIG 4 @@ -101,6 +102,11 @@ #define UART_OMAP_IER2 0x1B #define UART_OMAP_IER2_RHR_IT_DIS BIT(2) +/* Mode Definition Register 3 */ +#define UART_OMAP_MDR3 0x20 +#define UART_OMAP_MDR3_DIR_POL BIT(3) +#define UART_OMAP_MDR3_DIR_EN BIT(4) + /* Enhanced features register 2 */ #define UART_OMAP_EFR2 0x23 #define UART_OMAP_EFR2_TIMEOUT_BEHAVE BIT(6) @@ -112,6 +118,7 @@ struct omap8250_priv { int line; u8 habit; u8 mdr1; + u8 mdr3; u8 efr; u8 scr; u8 wer; @@ -346,7 +353,10 @@ static void omap8250_restore_regs(struct uart_8250_port *up) __omap8250_set_mctrl(&up->port, up->port.mctrl); - if (up->port.rs485.flags & SER_RS485_ENABLED) + serial_out(up, UART_OMAP_MDR3, priv->mdr3); + + if (up->port.rs485.flags & SER_RS485_ENABLED && + up->port.rs485_config == serial8250_em485_config) serial8250_em485_stop_tx(up); } @@ -794,6 +804,74 @@ static void omap_8250_unthrottle(struct uart_port *port) pm_runtime_put_autosuspend(port->dev); } +static int omap8250_rs485_config(struct uart_port *port, + struct ktermios *termios, + struct serial_rs485 *rs485) +{ + struct omap8250_priv *priv = port->private_data; + struct uart_8250_port *up = up_to_u8250p(port); + u32 fixed_delay_rts_before_send = 0; + u32 fixed_delay_rts_after_send = 0; + unsigned int baud; + + /* + * There is a fixed delay of 3 bit clock cycles after the TX shift + * register is going empty to allow time for the stop bit to transition + * through the transceiver before direction is changed to receive. + * + * Additionally there appears to be a 1 bit clock delay between writing + * to the THR register and transmission of the start bit, per page 8783 + * of the AM65 TRM: https://www.ti.com/lit/ug/spruid7e/spruid7e.pdf + */ + if (priv->quot) { + if (priv->mdr1 == UART_OMAP_MDR1_16X_MODE) + baud = port->uartclk / (16 * priv->quot); + else + baud = port->uartclk / (13 * priv->quot); + + fixed_delay_rts_after_send = 3 * MSEC_PER_SEC / baud; + fixed_delay_rts_before_send = 1 * MSEC_PER_SEC / baud; + } + + /* + * Fall back to RS485 software emulation if the UART is missing + * hardware support, if the device tree specifies an mctrl_gpio + * (indicates that RTS is unavailable due to a pinmux conflict) + * or if the requested delays exceed the fixed hardware delays. + */ + if (!(priv->habit & UART_HAS_NATIVE_RS485) || + mctrl_gpio_to_gpiod(up->gpios, UART_GPIO_RTS) || + rs485->delay_rts_after_send > fixed_delay_rts_after_send || + rs485->delay_rts_before_send > fixed_delay_rts_before_send) { + priv->mdr3 &= ~UART_OMAP_MDR3_DIR_EN; + serial_out(up, UART_OMAP_MDR3, priv->mdr3); + + port->rs485_config = serial8250_em485_config; + return serial8250_em485_config(port, termios, rs485); + } + + rs485->delay_rts_after_send = fixed_delay_rts_after_send; + rs485->delay_rts_before_send = fixed_delay_rts_before_send; + + if (rs485->flags & SER_RS485_ENABLED) + priv->mdr3 |= UART_OMAP_MDR3_DIR_EN; + else + priv->mdr3 &= ~UART_OMAP_MDR3_DIR_EN; + + /* + * Retain same polarity semantics as RS485 software emulation, + * i.e. SER_RS485_RTS_ON_SEND means driving RTS low on send. + */ + if (rs485->flags & SER_RS485_RTS_ON_SEND) + priv->mdr3 &= ~UART_OMAP_MDR3_DIR_POL; + else + priv->mdr3 |= UART_OMAP_MDR3_DIR_POL; + + serial_out(up, UART_OMAP_MDR3, priv->mdr3); + + return 0; +} + #ifdef CONFIG_SERIAL_8250_DMA static int omap_8250_rx_dma(struct uart_8250_port *p); @@ -1243,7 +1321,7 @@ static struct omap8250_dma_params am33xx_dma = { static struct omap8250_platdata am654_platdata = { .dma_params = &am654_dma, .habit = UART_HAS_EFR2 | UART_HAS_RHR_IT_DIS | - UART_RX_TIMEOUT_QUIRK, + UART_RX_TIMEOUT_QUIRK | UART_HAS_NATIVE_RS485, }; static struct omap8250_platdata am33xx_platdata = { @@ -1336,7 +1414,8 @@ static int omap8250_probe(struct platform_device *pdev) up.port.shutdown = omap_8250_shutdown; up.port.throttle = omap_8250_throttle; up.port.unthrottle = omap_8250_unthrottle; - up.port.rs485_config = serial8250_em485_config; + up.port.rs485_config = omap8250_rs485_config; + /* same rs485_supported for software emulation and native RS485 */ up.port.rs485_supported = serial8250_em485_supported; up.rs485_start_tx = serial8250_em485_start_tx; up.rs485_stop_tx = serial8250_em485_stop_tx; diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c index 388172289627ad5c03fd376660e1b7090dac1256..beba8f38b3dcb40d22de1edb9a83316b1a7ae3e0 100644 --- a/drivers/tty/serial/8250/8250_port.c +++ b/drivers/tty/serial/8250/8250_port.c @@ -1842,8 +1842,7 @@ void serial8250_tx_chars(struct uart_8250_port *up) */ serial_in(up, UART_SCR); } - xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); - port->icount.tx++; + uart_xmit_advance(port, 1); if (uart_circ_empty(xmit)) break; if ((up->capabilities & UART_CAP_HFIFO) && diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig index 434f83168546cfeafd0d4735045c9d7b8b2d93ab..c55b947f3cdbb7a789dec40a380c0c9d9a6c64fc 100644 --- a/drivers/tty/serial/Kconfig +++ b/drivers/tty/serial/Kconfig @@ -958,6 +958,7 @@ config SERIAL_OMAP_CONSOLE config SERIAL_SIFIVE tristate "SiFive UART support" depends on OF + default SOC_SIFIVE || SOC_CANAAN select SERIAL_CORE help Select this option if you are building a kernel for a device that @@ -967,6 +968,7 @@ config SERIAL_SIFIVE config SERIAL_SIFIVE_CONSOLE bool "Console on SiFive UART" depends on SERIAL_SIFIVE=y + default SOC_SIFIVE || SOC_CANAAN select SERIAL_CORE_CONSOLE select SERIAL_EARLYCON help diff --git a/drivers/tty/serial/altera_jtaguart.c b/drivers/tty/serial/altera_jtaguart.c index c2d154d78e545904a3a6ac9c2d5ede39dda9be6e..9f843d1cee40e9ffddd77dd87c635cef5c28c30f 100644 --- a/drivers/tty/serial/altera_jtaguart.c +++ b/drivers/tty/serial/altera_jtaguart.c @@ -50,15 +50,6 @@ #define ALTERA_JTAGUART_CONTROL_AC_MSK 0x00000400 #define ALTERA_JTAGUART_CONTROL_WSPACE_MSK 0xFFFF0000 -/* - * Local per-uart structure. - */ -struct altera_jtaguart { - struct uart_port port; - unsigned int sigs; /* Local copy of line sigs */ - unsigned long imr; /* Local IMR mirror */ -}; - static unsigned int altera_jtaguart_tx_space(struct uart_port *port, u32 *ctlp) { u32 ctl = readl(port->membase + ALTERA_JTAGUART_CONTROL_REG); @@ -85,29 +76,23 @@ static void altera_jtaguart_set_mctrl(struct uart_port *port, unsigned int sigs) static void altera_jtaguart_start_tx(struct uart_port *port) { - struct altera_jtaguart *pp = - container_of(port, struct altera_jtaguart, port); - - pp->imr |= ALTERA_JTAGUART_CONTROL_WE_MSK; - writel(pp->imr, port->membase + ALTERA_JTAGUART_CONTROL_REG); + port->read_status_mask |= ALTERA_JTAGUART_CONTROL_WE_MSK; + writel(port->read_status_mask, + port->membase + ALTERA_JTAGUART_CONTROL_REG); } static void altera_jtaguart_stop_tx(struct uart_port *port) { - struct altera_jtaguart *pp = - container_of(port, struct altera_jtaguart, port); - - pp->imr &= ~ALTERA_JTAGUART_CONTROL_WE_MSK; - writel(pp->imr, port->membase + ALTERA_JTAGUART_CONTROL_REG); + port->read_status_mask &= ~ALTERA_JTAGUART_CONTROL_WE_MSK; + writel(port->read_status_mask, + port->membase + ALTERA_JTAGUART_CONTROL_REG); } static void altera_jtaguart_stop_rx(struct uart_port *port) { - struct altera_jtaguart *pp = - container_of(port, struct altera_jtaguart, port); - - pp->imr &= ~ALTERA_JTAGUART_CONTROL_RE_MSK; - writel(pp->imr, port->membase + ALTERA_JTAGUART_CONTROL_REG); + port->read_status_mask &= ~ALTERA_JTAGUART_CONTROL_RE_MSK; + writel(port->read_status_mask, + port->membase + ALTERA_JTAGUART_CONTROL_REG); } static void altera_jtaguart_break_ctl(struct uart_port *port, int break_state) @@ -123,78 +108,51 @@ static void altera_jtaguart_set_termios(struct uart_port *port, tty_termios_copy_hw(termios, old); } -static void altera_jtaguart_rx_chars(struct altera_jtaguart *pp) +static void altera_jtaguart_rx_chars(struct uart_port *port) { - struct uart_port *port = &pp->port; - unsigned char ch, flag; + unsigned char ch; unsigned long status; while ((status = readl(port->membase + ALTERA_JTAGUART_DATA_REG)) & ALTERA_JTAGUART_DATA_RVALID_MSK) { ch = status & ALTERA_JTAGUART_DATA_DATA_MSK; - flag = TTY_NORMAL; port->icount.rx++; if (uart_handle_sysrq_char(port, ch)) continue; - uart_insert_char(port, 0, 0, ch, flag); + uart_insert_char(port, 0, 0, ch, TTY_NORMAL); } tty_flip_buffer_push(&port->state->port); } -static void altera_jtaguart_tx_chars(struct altera_jtaguart *pp) +static void altera_jtaguart_tx_chars(struct uart_port *port) { - struct uart_port *port = &pp->port; - struct circ_buf *xmit = &port->state->xmit; - unsigned int pending, count; - - if (port->x_char) { - /* Send special char - probably flow control */ - writel(port->x_char, port->membase + ALTERA_JTAGUART_DATA_REG); - port->x_char = 0; - port->icount.tx++; - return; - } + unsigned int count; + u8 ch; - pending = uart_circ_chars_pending(xmit); - if (pending > 0) { - count = altera_jtaguart_tx_space(port, NULL); - if (count > pending) - count = pending; - if (count > 0) { - pending -= count; - while (count--) { - writel(xmit->buf[xmit->tail], - port->membase + ALTERA_JTAGUART_DATA_REG); - xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); - port->icount.tx++; - } - if (pending < WAKEUP_CHARS) - uart_write_wakeup(port); - } - } + count = altera_jtaguart_tx_space(port, NULL); - if (pending == 0) - altera_jtaguart_stop_tx(port); + uart_port_tx_limited(port, ch, count, + true, + writel(ch, port->membase + ALTERA_JTAGUART_DATA_REG), + ({})); } static irqreturn_t altera_jtaguart_interrupt(int irq, void *data) { struct uart_port *port = data; - struct altera_jtaguart *pp = - container_of(port, struct altera_jtaguart, port); unsigned int isr; isr = (readl(port->membase + ALTERA_JTAGUART_CONTROL_REG) >> - ALTERA_JTAGUART_CONTROL_RI_OFF) & pp->imr; + ALTERA_JTAGUART_CONTROL_RI_OFF) & port->read_status_mask; spin_lock(&port->lock); if (isr & ALTERA_JTAGUART_CONTROL_RE_MSK) - altera_jtaguart_rx_chars(pp); + altera_jtaguart_rx_chars(port); if (isr & ALTERA_JTAGUART_CONTROL_WE_MSK) - altera_jtaguart_tx_chars(pp); + altera_jtaguart_tx_chars(port); spin_unlock(&port->lock); @@ -211,8 +169,6 @@ static void altera_jtaguart_config_port(struct uart_port *port, int flags) static int altera_jtaguart_startup(struct uart_port *port) { - struct altera_jtaguart *pp = - container_of(port, struct altera_jtaguart, port); unsigned long flags; int ret; @@ -227,8 +183,9 @@ static int altera_jtaguart_startup(struct uart_port *port) spin_lock_irqsave(&port->lock, flags); /* Enable RX interrupts now */ - pp->imr = ALTERA_JTAGUART_CONTROL_RE_MSK; - writel(pp->imr, port->membase + ALTERA_JTAGUART_CONTROL_REG); + port->read_status_mask = ALTERA_JTAGUART_CONTROL_RE_MSK; + writel(port->read_status_mask, + port->membase + ALTERA_JTAGUART_CONTROL_REG); spin_unlock_irqrestore(&port->lock, flags); @@ -237,15 +194,14 @@ static int altera_jtaguart_startup(struct uart_port *port) static void altera_jtaguart_shutdown(struct uart_port *port) { - struct altera_jtaguart *pp = - container_of(port, struct altera_jtaguart, port); unsigned long flags; spin_lock_irqsave(&port->lock, flags); /* Disable all interrupts now */ - pp->imr = 0; - writel(pp->imr, port->membase + ALTERA_JTAGUART_CONTROL_REG); + port->read_status_mask = 0; + writel(port->read_status_mask, + port->membase + ALTERA_JTAGUART_CONTROL_REG); spin_unlock_irqrestore(&port->lock, flags); @@ -298,7 +254,7 @@ static const struct uart_ops altera_jtaguart_ops = { }; #define ALTERA_JTAGUART_MAXPORTS 1 -static struct altera_jtaguart altera_jtaguart_ports[ALTERA_JTAGUART_MAXPORTS]; +static struct uart_port altera_jtaguart_ports[ALTERA_JTAGUART_MAXPORTS]; #if defined(CONFIG_SERIAL_ALTERA_JTAGUART_CONSOLE) @@ -341,7 +297,7 @@ static void altera_jtaguart_console_putc(struct uart_port *port, unsigned char c static void altera_jtaguart_console_write(struct console *co, const char *s, unsigned int count) { - struct uart_port *port = &(altera_jtaguart_ports + co->index)->port; + struct uart_port *port = &altera_jtaguart_ports[co->index]; uart_console_write(port, s, count, altera_jtaguart_console_putc); } @@ -353,7 +309,7 @@ static int __init altera_jtaguart_console_setup(struct console *co, if (co->index < 0 || co->index >= ALTERA_JTAGUART_MAXPORTS) return -EINVAL; - port = &altera_jtaguart_ports[co->index].port; + port = &altera_jtaguart_ports[co->index]; if (port->membase == NULL) return -ENODEV; return 0; @@ -433,7 +389,7 @@ static int altera_jtaguart_probe(struct platform_device *pdev) if (i >= ALTERA_JTAGUART_MAXPORTS) return -EINVAL; - port = &altera_jtaguart_ports[i].port; + port = &altera_jtaguart_ports[i]; res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (res_mem) @@ -477,7 +433,7 @@ static int altera_jtaguart_remove(struct platform_device *pdev) if (i == -1) i = 0; - port = &altera_jtaguart_ports[i].port; + port = &altera_jtaguart_ports[i]; uart_remove_one_port(&altera_jtaguart_driver, port); iounmap(port->membase); diff --git a/drivers/tty/serial/altera_uart.c b/drivers/tty/serial/altera_uart.c index 82f2790de28d198e5611f571cfadd2610505bcf0..9ce3d24af53600d6f458dd979969f0d69fe69ca4 100644 --- a/drivers/tty/serial/altera_uart.c +++ b/drivers/tty/serial/altera_uart.c @@ -247,47 +247,29 @@ static void altera_uart_rx_chars(struct uart_port *port) static void altera_uart_tx_chars(struct uart_port *port) { - struct circ_buf *xmit = &port->state->xmit; - - if (port->x_char) { - /* Send special char - probably flow control */ - altera_uart_writel(port, port->x_char, ALTERA_UART_TXDATA_REG); - port->x_char = 0; - port->icount.tx++; - return; - } - - while (altera_uart_readl(port, ALTERA_UART_STATUS_REG) & - ALTERA_UART_STATUS_TRDY_MSK) { - if (xmit->head == xmit->tail) - break; - altera_uart_writel(port, xmit->buf[xmit->tail], - ALTERA_UART_TXDATA_REG); - xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); - port->icount.tx++; - } - - if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) - uart_write_wakeup(port); + u8 ch; - if (uart_circ_empty(xmit)) - altera_uart_stop_tx(port); + uart_port_tx(port, ch, + altera_uart_readl(port, ALTERA_UART_STATUS_REG) & + ALTERA_UART_STATUS_TRDY_MSK, + altera_uart_writel(port, ch, ALTERA_UART_TXDATA_REG)); } static irqreturn_t altera_uart_interrupt(int irq, void *data) { struct uart_port *port = data; struct altera_uart *pp = container_of(port, struct altera_uart, port); + unsigned long flags; unsigned int isr; isr = altera_uart_readl(port, ALTERA_UART_STATUS_REG) & pp->imr; - spin_lock(&port->lock); + spin_lock_irqsave(&port->lock, flags); if (isr & ALTERA_UART_STATUS_RRDY_MSK) altera_uart_rx_chars(port); if (isr & ALTERA_UART_STATUS_TRDY_MSK) altera_uart_tx_chars(port); - spin_unlock(&port->lock); + spin_unlock_irqrestore(&port->lock, flags); return IRQ_RETVAL(isr); } diff --git a/drivers/tty/serial/amba-pl010.c b/drivers/tty/serial/amba-pl010.c index af27fb8ec145161cb190e69c012d4da5264ac78d..a98fae2ca422d2eb05e85be85390ab5cef50cddc 100644 --- a/drivers/tty/serial/amba-pl010.c +++ b/drivers/tty/serial/amba-pl010.c @@ -164,34 +164,12 @@ static void pl010_rx_chars(struct uart_port *port) static void pl010_tx_chars(struct uart_port *port) { - struct circ_buf *xmit = &port->state->xmit; - int count; + u8 ch; - if (port->x_char) { - writel(port->x_char, port->membase + UART01x_DR); - port->icount.tx++; - port->x_char = 0; - return; - } - if (uart_circ_empty(xmit) || uart_tx_stopped(port)) { - pl010_stop_tx(port); - return; - } - - count = port->fifosize >> 1; - do { - writel(xmit->buf[xmit->tail], port->membase + UART01x_DR); - xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); - port->icount.tx++; - if (uart_circ_empty(xmit)) - break; - } while (--count > 0); - - if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) - uart_write_wakeup(port); - - if (uart_circ_empty(xmit)) - pl010_stop_tx(port); + uart_port_tx_limited(port, ch, port->fifosize >> 1, + true, + writel(ch, port->membase + UART01x_DR), + ({})); } static void pl010_modem_status(struct uart_amba_port *uap) diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c index 5cdced39eafdb92758d5b4f79a123e2d76bf5f6b..d75c39f4622b7b6ae53f1f9bc053dd0c5476083b 100644 --- a/drivers/tty/serial/amba-pl011.c +++ b/drivers/tty/serial/amba-pl011.c @@ -677,8 +677,7 @@ static int pl011_dma_tx_refill(struct uart_amba_port *uap) * Now we know that DMA will fire, so advance the ring buffer * with the stuff we just dispatched. */ - xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1); - uap->port.icount.tx += count; + uart_xmit_advance(&uap->port, count); if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) uart_write_wakeup(&uap->port); @@ -1045,6 +1044,9 @@ static void pl011_dma_rx_callback(void *data) */ static inline void pl011_dma_rx_stop(struct uart_amba_port *uap) { + if (!uap->using_rx_dma) + return; + /* FIXME. Just disable the DMA enable */ uap->dmacr &= ~UART011_RXDMAE; pl011_write(uap->dmacr, uap, REG_DMACR); @@ -1828,8 +1830,17 @@ static void pl011_enable_interrupts(struct uart_amba_port *uap) static void pl011_unthrottle_rx(struct uart_port *port) { struct uart_amba_port *uap = container_of(port, struct uart_amba_port, port); + unsigned long flags; - pl011_enable_interrupts(uap); + spin_lock_irqsave(&uap->port.lock, flags); + + uap->im = UART011_RTIM; + if (!pl011_dma_rx_running(uap)) + uap->im |= UART011_RXIM; + + pl011_write(uap->im, uap, REG_IMSC); + + spin_unlock_irqrestore(&uap->port.lock, flags); } static int pl011_startup(struct uart_port *port) diff --git a/drivers/tty/serial/apbuart.c b/drivers/tty/serial/apbuart.c index 450f4edfda0f00a1fb36910d26b2ad5202489015..915ee4b0d594944eecbf1615aa4ecd8330f10e23 100644 --- a/drivers/tty/serial/apbuart.c +++ b/drivers/tty/serial/apbuart.c @@ -122,36 +122,12 @@ static void apbuart_rx_chars(struct uart_port *port) static void apbuart_tx_chars(struct uart_port *port) { - struct circ_buf *xmit = &port->state->xmit; - int count; - - if (port->x_char) { - UART_PUT_CHAR(port, port->x_char); - port->icount.tx++; - port->x_char = 0; - return; - } - - if (uart_circ_empty(xmit) || uart_tx_stopped(port)) { - apbuart_stop_tx(port); - return; - } - - /* amba: fill FIFO */ - count = port->fifosize >> 1; - do { - UART_PUT_CHAR(port, xmit->buf[xmit->tail]); - xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); - port->icount.tx++; - if (uart_circ_empty(xmit)) - break; - } while (--count > 0); - - if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) - uart_write_wakeup(port); + u8 ch; - if (uart_circ_empty(xmit)) - apbuart_stop_tx(port); + uart_port_tx_limited(port, ch, port->fifosize >> 1, + true, + UART_PUT_CHAR(port, ch), + ({})); } static irqreturn_t apbuart_int(int irq, void *dev_id) diff --git a/drivers/tty/serial/ar933x_uart.c b/drivers/tty/serial/ar933x_uart.c index 925484a42c821e8ea70acbefe443b164f5001d4e..4c3d04c6826a94ff5062e7eab48738e9fd9f3074 100644 --- a/drivers/tty/serial/ar933x_uart.c +++ b/drivers/tty/serial/ar933x_uart.c @@ -425,8 +425,7 @@ static void ar933x_uart_tx_chars(struct ar933x_uart_port *up) ar933x_uart_putc(up, xmit->buf[xmit->tail]); - xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); - up->port.icount.tx++; + uart_xmit_advance(&up->port, 1); } while (--count > 0); if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) diff --git a/drivers/tty/serial/arc_uart.c b/drivers/tty/serial/arc_uart.c index 2a65ea2660e10c5d903a19a46511b15d45c4514c..748e8b1cf4f7f3e0cebc39e03dfba8b9a1fcd0f7 100644 --- a/drivers/tty/serial/arc_uart.c +++ b/drivers/tty/serial/arc_uart.c @@ -166,8 +166,7 @@ static void arc_serial_tx_chars(struct uart_port *port) sent = 1; } else if (!uart_circ_empty(xmit)) { ch = xmit->buf[xmit->tail]; - xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); - port->icount.tx++; + uart_xmit_advance(port, 1); while (!(UART_GET_STATUS(port) & TXEMPTY)) cpu_relax(); UART_SET_DATA(port, ch); diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c index bd07f79a2df91b2263df5d0f1f17cbb1343e304f..f1c06e12efa0bb7e720c004fffcc51394e2fd885 100644 --- a/drivers/tty/serial/atmel_serial.c +++ b/drivers/tty/serial/atmel_serial.c @@ -552,19 +552,23 @@ static u_int atmel_get_mctrl(struct uart_port *port) static void atmel_stop_tx(struct uart_port *port) { struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); + bool is_pdc = atmel_use_pdc_tx(port); + bool is_dma = is_pdc || atmel_use_dma_tx(port); - if (atmel_use_pdc_tx(port)) { + if (is_pdc) { /* disable PDC transmit */ atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTDIS); } - /* - * Disable the transmitter. - * This is mandatory when DMA is used, otherwise the DMA buffer - * is fully transmitted. - */ - atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXDIS); - atmel_port->tx_stopped = true; + if (is_dma) { + /* + * Disable the transmitter. + * This is mandatory when DMA is used, otherwise the DMA buffer + * is fully transmitted. + */ + atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXDIS); + atmel_port->tx_stopped = true; + } /* Disable interrupts */ atmel_uart_writel(port, ATMEL_US_IDR, atmel_port->tx_done_mask); @@ -572,7 +576,6 @@ static void atmel_stop_tx(struct uart_port *port) if (atmel_uart_is_half_duplex(port)) if (!atomic_read(&atmel_port->tasklet_shutdown)) atmel_start_rx(port); - } /* @@ -581,27 +584,31 @@ static void atmel_stop_tx(struct uart_port *port) static void atmel_start_tx(struct uart_port *port) { struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); + bool is_pdc = atmel_use_pdc_tx(port); + bool is_dma = is_pdc || atmel_use_dma_tx(port); - if (atmel_use_pdc_tx(port) && (atmel_uart_readl(port, ATMEL_PDC_PTSR) + if (is_pdc && (atmel_uart_readl(port, ATMEL_PDC_PTSR) & ATMEL_PDC_TXTEN)) /* The transmitter is already running. Yes, we really need this.*/ return; - if (atmel_use_pdc_tx(port) || atmel_use_dma_tx(port)) - if (atmel_uart_is_half_duplex(port)) - atmel_stop_rx(port); + if (is_dma && atmel_uart_is_half_duplex(port)) + atmel_stop_rx(port); - if (atmel_use_pdc_tx(port)) + if (is_pdc) { /* re-enable PDC transmit */ atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTEN); + } /* Enable interrupts */ atmel_uart_writel(port, ATMEL_US_IER, atmel_port->tx_done_mask); - /* re-enable the transmitter */ - atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN); - atmel_port->tx_stopped = false; + if (is_dma) { + /* re-enable the transmitter */ + atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN); + atmel_port->tx_stopped = false; + } } /* @@ -824,30 +831,14 @@ static void atmel_rx_chars(struct uart_port *port) */ static void atmel_tx_chars(struct uart_port *port) { - struct circ_buf *xmit = &port->state->xmit; struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); + bool pending; + u8 ch; - if (port->x_char && - (atmel_uart_readl(port, ATMEL_US_CSR) & ATMEL_US_TXRDY)) { - atmel_uart_write_char(port, port->x_char); - port->icount.tx++; - port->x_char = 0; - } - if (uart_circ_empty(xmit) || uart_tx_stopped(port)) - return; - - while (atmel_uart_readl(port, ATMEL_US_CSR) & ATMEL_US_TXRDY) { - atmel_uart_write_char(port, xmit->buf[xmit->tail]); - xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); - port->icount.tx++; - if (uart_circ_empty(xmit)) - break; - } - - if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) - uart_write_wakeup(port); - - if (!uart_circ_empty(xmit)) { + pending = uart_port_tx(port, ch, + atmel_uart_readl(port, ATMEL_US_CSR) & ATMEL_US_TXRDY, + atmel_uart_write_char(port, ch)); + if (pending) { /* we still have characters to transmit, so we should continue * transmitting them when TX is ready, regardless of * mode or duplexity @@ -875,10 +866,7 @@ static void atmel_complete_tx_dma(void *arg) if (chan) dmaengine_terminate_all(chan); - xmit->tail += atmel_port->tx_len; - xmit->tail &= UART_XMIT_SIZE - 1; - - port->icount.tx += atmel_port->tx_len; + uart_xmit_advance(port, atmel_port->tx_len); spin_lock_irq(&atmel_port->lock_tx); async_tx_ack(atmel_port->desc_tx); @@ -1471,11 +1459,7 @@ static void atmel_tx_pdc(struct uart_port *port) /* nothing left to transmit? */ if (atmel_uart_readl(port, ATMEL_PDC_TCR)) return; - - xmit->tail += pdc->ofs; - xmit->tail &= UART_XMIT_SIZE - 1; - - port->icount.tx += pdc->ofs; + uart_xmit_advance(port, pdc->ofs); pdc->ofs = 0; /* more to transmit - setup next transfer */ diff --git a/drivers/tty/serial/bcm63xx_uart.c b/drivers/tty/serial/bcm63xx_uart.c index 5d9737c2d1f2c8ec0f2ef1bd73061ad9aea623e4..62bc7244dc67f4c9afe08a786b6a9c47df33fe69 100644 --- a/drivers/tty/serial/bcm63xx_uart.c +++ b/drivers/tty/serial/bcm63xx_uart.c @@ -303,53 +303,24 @@ static void bcm_uart_do_rx(struct uart_port *port) */ static void bcm_uart_do_tx(struct uart_port *port) { - struct circ_buf *xmit; - unsigned int val, max_count; - - if (port->x_char) { - bcm_uart_writel(port, port->x_char, UART_FIFO_REG); - port->icount.tx++; - port->x_char = 0; - return; - } - - if (uart_tx_stopped(port)) { - bcm_uart_stop_tx(port); - return; - } - - xmit = &port->state->xmit; - if (uart_circ_empty(xmit)) - goto txq_empty; + unsigned int val; + bool pending; + u8 ch; val = bcm_uart_readl(port, UART_MCTL_REG); val = (val & UART_MCTL_TXFIFOFILL_MASK) >> UART_MCTL_TXFIFOFILL_SHIFT; - max_count = port->fifosize - val; - - while (max_count--) { - unsigned int c; - c = xmit->buf[xmit->tail]; - bcm_uart_writel(port, c, UART_FIFO_REG); - xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); - port->icount.tx++; - if (uart_circ_empty(xmit)) - break; - } - - if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) - uart_write_wakeup(port); - - if (uart_circ_empty(xmit)) - goto txq_empty; - return; + pending = uart_port_tx_limited(port, ch, port->fifosize - val, + true, + bcm_uart_writel(port, ch, UART_FIFO_REG), + ({})); + if (pending) + return; -txq_empty: /* nothing to send, disable transmit interrupt */ val = bcm_uart_readl(port, UART_IR_REG); val &= ~UART_TX_INT_MASK; bcm_uart_writel(port, val, UART_IR_REG); - return; } /* diff --git a/drivers/tty/serial/clps711x.c b/drivers/tty/serial/clps711x.c index 404b43a5ae33fc94935a892f073c145470c25926..e190dce58f4628c3b2226155226b884948549b92 100644 --- a/drivers/tty/serial/clps711x.c +++ b/drivers/tty/serial/clps711x.c @@ -166,8 +166,7 @@ static irqreturn_t uart_clps711x_int_tx(int irq, void *dev_id) u32 sysflg = 0; writew(xmit->buf[xmit->tail], port->membase + UARTDR_OFFSET); - xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); - port->icount.tx++; + uart_xmit_advance(port, 1); regmap_read(s->syscon, SYSFLG_OFFSET, &sysflg); if (sysflg & SYSFLG_UTXFF) diff --git a/drivers/tty/serial/cpm_uart/cpm_uart_core.c b/drivers/tty/serial/cpm_uart/cpm_uart_core.c index b4369ed45ae2d101db021e5adca4710d62ba2b51..5565f302cb2126907436a093f912a1e010efb5dc 100644 --- a/drivers/tty/serial/cpm_uart/cpm_uart_core.c +++ b/drivers/tty/serial/cpm_uart/cpm_uart_core.c @@ -684,8 +684,7 @@ static int cpm_uart_tx_pump(struct uart_port *port) p = cpm2cpu_addr(in_be32(&bdp->cbd_bufaddr), pinfo); while (count < pinfo->tx_fifosize) { *p++ = xmit->buf[xmit->tail]; - xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); - port->icount.tx++; + uart_xmit_advance(port, 1); count++; if (xmit->head == xmit->tail) break; diff --git a/drivers/tty/serial/digicolor-usart.c b/drivers/tty/serial/digicolor-usart.c index 0c0a62346f23195b546f7707f8fab518ba2cb51d..ed197705f7eea028ef1f18ba276b9ed399f048f9 100644 --- a/drivers/tty/serial/digicolor-usart.c +++ b/drivers/tty/serial/digicolor-usart.c @@ -202,8 +202,7 @@ static void digicolor_uart_tx(struct uart_port *port) while (!uart_circ_empty(xmit)) { writeb(xmit->buf[xmit->tail], port->membase + UA_EMI_REC); - xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); - port->icount.tx++; + uart_xmit_advance(port, 1); if (digicolor_uart_tx_full(port)) break; diff --git a/drivers/tty/serial/dz.c b/drivers/tty/serial/dz.c index 829b452daee9d4b813342484e5c7c8548a953c4e..6b7ed7f2f3cac64fbfd3e87705bd54ea9ae2d525 100644 --- a/drivers/tty/serial/dz.c +++ b/drivers/tty/serial/dz.c @@ -279,9 +279,8 @@ static inline void dz_transmit_chars(struct dz_mux *mux) * so we go one char at a time) :-< */ tmp = xmit->buf[xmit->tail]; - xmit->tail = (xmit->tail + 1) & (DZ_XMIT_SIZE - 1); dz_out(dport, DZ_TDR, tmp); - dport->port.icount.tx++; + uart_xmit_advance(&dport->port, 1); if (uart_circ_chars_pending(xmit) < DZ_WAKEUP_CHARS) uart_write_wakeup(&dport->port); diff --git a/drivers/tty/serial/fsl_linflexuart.c b/drivers/tty/serial/fsl_linflexuart.c index 84e8153e54200f388520344b9ef55e8435af4cf6..6fc21b6684e6fe879bc075813af4af061d35008c 100644 --- a/drivers/tty/serial/fsl_linflexuart.c +++ b/drivers/tty/serial/fsl_linflexuart.c @@ -178,8 +178,7 @@ static inline void linflex_transmit_buffer(struct uart_port *sport) while (!uart_circ_empty(xmit)) { linflex_put_char(sport, xmit->buf[xmit->tail]); - xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); - sport->icount.tx++; + uart_xmit_advance(sport, 1); } if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c index 888e01fbd9c5f82dff7b5bf0a841dea5d88c12fb..5e69fb73f570f68d21ae99b765da5f614d1d9cc5 100644 --- a/drivers/tty/serial/fsl_lpuart.c +++ b/drivers/tty/serial/fsl_lpuart.c @@ -18,6 +18,8 @@ #include #include #include +#include +#include #include #include #include @@ -232,6 +234,7 @@ /* Rx DMA timeout in ms, which is used to calculate Rx ring buffer size */ #define DMA_RX_TIMEOUT (10) +#define UART_AUTOSUSPEND_TIMEOUT 3000 #define DRIVER_NAME "fsl-lpuart" #define DEV_NAME "ttyLP" @@ -509,9 +512,7 @@ static void lpuart_dma_tx_complete(void *arg) dma_unmap_sg(chan->device->dev, sgl, sport->dma_tx_nents, DMA_TO_DEVICE); - xmit->tail = (xmit->tail + sport->dma_tx_bytes) & (UART_XMIT_SIZE - 1); - - sport->port.icount.tx += sport->dma_tx_bytes; + uart_xmit_advance(&sport->port, sport->dma_tx_bytes); sport->dma_tx_in_progress = false; spin_unlock_irqrestore(&sport->port.lock, flags); @@ -582,7 +583,7 @@ static void lpuart_flush_buffer(struct uart_port *port) sport->dma_tx_nents, DMA_TO_DEVICE); sport->dma_tx_in_progress = false; } - dmaengine_terminate_all(chan); + dmaengine_terminate_async(chan); } if (lpuart_is_32(sport)) { @@ -716,32 +717,12 @@ static int lpuart32_poll_get_char(struct uart_port *port) static inline void lpuart_transmit_buffer(struct lpuart_port *sport) { - struct circ_buf *xmit = &sport->port.state->xmit; - - if (sport->port.x_char) { - writeb(sport->port.x_char, sport->port.membase + UARTDR); - sport->port.icount.tx++; - sport->port.x_char = 0; - return; - } - - if (lpuart_stopped_or_empty(&sport->port)) { - lpuart_stop_tx(&sport->port); - return; - } - - while (!uart_circ_empty(xmit) && - (readb(sport->port.membase + UARTTCFIFO) < sport->txfifo_size)) { - writeb(xmit->buf[xmit->tail], sport->port.membase + UARTDR); - xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); - sport->port.icount.tx++; - } - - if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) - uart_write_wakeup(&sport->port); + struct uart_port *port = &sport->port; + u8 ch; - if (uart_circ_empty(xmit)) - lpuart_stop_tx(&sport->port); + uart_port_tx(port, ch, + readb(port->membase + UARTTCFIFO) < sport->txfifo_size, + writeb(ch, port->membase + UARTDR)); } static inline void lpuart32_transmit_buffer(struct lpuart_port *sport) @@ -766,8 +747,7 @@ static inline void lpuart32_transmit_buffer(struct lpuart_port *sport) txcnt &= UARTWATER_COUNT_MASK; while (!uart_circ_empty(xmit) && (txcnt < sport->txfifo_size)) { lpuart32_write(&sport->port, xmit->buf[xmit->tail], UARTDATA); - xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); - sport->port.icount.tx++; + uart_xmit_advance(&sport->port, 1); txcnt = lpuart32_read(&sport->port, UARTWATER); txcnt = txcnt >> UARTWATER_TXCNT_OFF; txcnt &= UARTWATER_COUNT_MASK; @@ -815,6 +795,20 @@ static void lpuart32_start_tx(struct uart_port *port) } } +static void +lpuart_uart_pm(struct uart_port *port, unsigned int state, unsigned int oldstate) +{ + switch (state) { + case UART_PM_STATE_OFF: + pm_runtime_mark_last_busy(port->dev); + pm_runtime_put_autosuspend(port->dev); + break; + default: + pm_runtime_get_sync(port->dev); + break; + } +} + /* return TIOCSER_TEMT when transmitter is not busy */ static unsigned int lpuart_tx_empty(struct uart_port *port) { @@ -1333,7 +1327,7 @@ static void lpuart_dma_rx_free(struct uart_port *port) struct lpuart_port, port); struct dma_chan *chan = sport->dma_rx_chan; - dmaengine_terminate_all(chan); + dmaengine_terminate_sync(chan); dma_unmap_sg(chan->device->dev, &sport->rx_sgl, 1, DMA_FROM_DEVICE); kfree(sport->rx_ring.buf); sport->rx_ring.tail = 0; @@ -1650,10 +1644,23 @@ err: sport->lpuart_dma_rx_use = false; } +static void lpuart_hw_setup(struct lpuart_port *sport) +{ + unsigned long flags; + + spin_lock_irqsave(&sport->port.lock, flags); + + lpuart_setup_watermark_enable(sport); + + lpuart_rx_dma_startup(sport); + lpuart_tx_dma_startup(sport); + + spin_unlock_irqrestore(&sport->port.lock, flags); +} + static int lpuart_startup(struct uart_port *port) { struct lpuart_port *sport = container_of(port, struct lpuart_port, port); - unsigned long flags; unsigned char temp; /* determine FIFO size and enable FIFO mode */ @@ -1667,15 +1674,7 @@ static int lpuart_startup(struct uart_port *port) UARTPFIFO_FIFOSIZE_MASK); lpuart_request_dma(sport); - - spin_lock_irqsave(&sport->port.lock, flags); - - lpuart_setup_watermark_enable(sport); - - lpuart_rx_dma_startup(sport); - lpuart_tx_dma_startup(sport); - - spin_unlock_irqrestore(&sport->port.lock, flags); + lpuart_hw_setup(sport); return 0; } @@ -1698,10 +1697,25 @@ static void lpuart32_configure(struct lpuart_port *sport) lpuart32_write(&sport->port, temp, UARTCTRL); } +static void lpuart32_hw_setup(struct lpuart_port *sport) +{ + unsigned long flags; + + spin_lock_irqsave(&sport->port.lock, flags); + + lpuart32_setup_watermark_enable(sport); + + lpuart_rx_dma_startup(sport); + lpuart_tx_dma_startup(sport); + + lpuart32_configure(sport); + + spin_unlock_irqrestore(&sport->port.lock, flags); +} + static int lpuart32_startup(struct uart_port *port) { struct lpuart_port *sport = container_of(port, struct lpuart_port, port); - unsigned long flags; unsigned long temp; /* determine FIFO size */ @@ -1726,17 +1740,8 @@ static int lpuart32_startup(struct uart_port *port) } lpuart_request_dma(sport); + lpuart32_hw_setup(sport); - spin_lock_irqsave(&sport->port.lock, flags); - - lpuart32_setup_watermark_enable(sport); - - lpuart_rx_dma_startup(sport); - lpuart_tx_dma_startup(sport); - - lpuart32_configure(sport); - - spin_unlock_irqrestore(&sport->port.lock, flags); return 0; } @@ -1752,7 +1757,7 @@ static void lpuart_dma_shutdown(struct lpuart_port *sport) if (wait_event_interruptible_timeout(sport->dma_wait, !sport->dma_tx_in_progress, msecs_to_jiffies(300)) <= 0) { sport->dma_tx_in_progress = false; - dmaengine_terminate_all(sport->dma_tx_chan); + dmaengine_terminate_sync(sport->dma_tx_chan); } sport->lpuart_dma_tx_use = false; } @@ -2240,6 +2245,7 @@ static const struct uart_ops lpuart_pops = { .startup = lpuart_startup, .shutdown = lpuart_shutdown, .set_termios = lpuart_set_termios, + .pm = lpuart_uart_pm, .type = lpuart_type, .request_port = lpuart_request_port, .release_port = lpuart_release_port, @@ -2264,6 +2270,7 @@ static const struct uart_ops lpuart32_pops = { .startup = lpuart32_startup, .shutdown = lpuart32_shutdown, .set_termios = lpuart32_set_termios, + .pm = lpuart_uart_pm, .type = lpuart_type, .request_port = lpuart_request_port, .release_port = lpuart_release_port, @@ -2744,6 +2751,11 @@ static int lpuart_probe(struct platform_device *pdev) handler = lpuart_int; } + pm_runtime_use_autosuspend(&pdev->dev); + pm_runtime_set_autosuspend_delay(&pdev->dev, UART_AUTOSUSPEND_TIMEOUT); + pm_runtime_set_active(&pdev->dev); + pm_runtime_enable(&pdev->dev); + ret = lpuart_global_reset(sport); if (ret) goto failed_reset; @@ -2768,6 +2780,9 @@ failed_irq_request: failed_attach_port: failed_get_rs485: failed_reset: + pm_runtime_disable(&pdev->dev); + pm_runtime_set_suspended(&pdev->dev); + pm_runtime_dont_use_autosuspend(&pdev->dev); lpuart_disable_clks(sport); return ret; } @@ -2786,100 +2801,241 @@ static int lpuart_remove(struct platform_device *pdev) if (sport->dma_rx_chan) dma_release_channel(sport->dma_rx_chan); + pm_runtime_disable(&pdev->dev); + pm_runtime_set_suspended(&pdev->dev); + pm_runtime_dont_use_autosuspend(&pdev->dev); return 0; } -static int __maybe_unused lpuart_suspend(struct device *dev) +static int lpuart_runtime_suspend(struct device *dev) { - struct lpuart_port *sport = dev_get_drvdata(dev); - unsigned long temp; - bool irq_wake; + struct platform_device *pdev = to_platform_device(dev); + struct lpuart_port *sport = platform_get_drvdata(pdev); - if (lpuart_is_32(sport)) { - /* disable Rx/Tx and interrupts */ - temp = lpuart32_read(&sport->port, UARTCTRL); - temp &= ~(UARTCTRL_TE | UARTCTRL_TIE | UARTCTRL_TCIE); - lpuart32_write(&sport->port, temp, UARTCTRL); - } else { - /* disable Rx/Tx and interrupts */ - temp = readb(sport->port.membase + UARTCR2); - temp &= ~(UARTCR2_TE | UARTCR2_TIE | UARTCR2_TCIE); - writeb(temp, sport->port.membase + UARTCR2); - } + lpuart_disable_clks(sport); - uart_suspend_port(&lpuart_reg, &sport->port); + return 0; +}; - /* uart_suspend_port() might set wakeup flag */ - irq_wake = irqd_is_wakeup_set(irq_get_irq_data(sport->port.irq)); +static int lpuart_runtime_resume(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct lpuart_port *sport = platform_get_drvdata(pdev); - if (sport->lpuart_dma_rx_use) { - /* - * EDMA driver during suspend will forcefully release any - * non-idle DMA channels. If port wakeup is enabled or if port - * is console port or 'no_console_suspend' is set the Rx DMA - * cannot resume as expected, hence gracefully release the - * Rx DMA path before suspend and start Rx DMA path on resume. - */ - if (irq_wake) { - del_timer_sync(&sport->lpuart_timer); - lpuart_dma_rx_free(&sport->port); - } + return lpuart_enable_clks(sport); +}; - /* Disable Rx DMA to use UART port as wakeup source */ - if (lpuart_is_32(sport)) { - temp = lpuart32_read(&sport->port, UARTBAUD); - lpuart32_write(&sport->port, temp & ~UARTBAUD_RDMAE, - UARTBAUD); +static void serial_lpuart_enable_wakeup(struct lpuart_port *sport, bool on) +{ + unsigned int val, baud; + + if (lpuart_is_32(sport)) { + val = lpuart32_read(&sport->port, UARTCTRL); + baud = lpuart32_read(&sport->port, UARTBAUD); + if (on) { + /* set rx_watermark to 0 in wakeup source mode */ + lpuart32_write(&sport->port, 0, UARTWATER); + val |= UARTCTRL_RIE; + /* clear RXEDGIF flag before enable RXEDGIE interrupt */ + lpuart32_write(&sport->port, UARTSTAT_RXEDGIF, UARTSTAT); + baud |= UARTBAUD_RXEDGIE; } else { - writeb(readb(sport->port.membase + UARTCR5) & - ~UARTCR5_RDMAS, sport->port.membase + UARTCR5); + val &= ~UARTCTRL_RIE; + baud &= ~UARTBAUD_RXEDGIE; } + lpuart32_write(&sport->port, val, UARTCTRL); + lpuart32_write(&sport->port, baud, UARTBAUD); + } else { + val = readb(sport->port.membase + UARTCR2); + if (on) + val |= UARTCR2_RIE; + else + val &= ~UARTCR2_RIE; + writeb(val, sport->port.membase + UARTCR2); } +} - if (sport->lpuart_dma_tx_use) { - sport->dma_tx_in_progress = false; - dmaengine_terminate_all(sport->dma_tx_chan); +static bool lpuart_uport_is_active(struct lpuart_port *sport) +{ + struct tty_port *port = &sport->port.state->port; + struct tty_struct *tty; + struct device *tty_dev; + int may_wake = 0; + + tty = tty_port_tty_get(port); + if (tty) { + tty_dev = tty->dev; + may_wake = device_may_wakeup(tty_dev); + tty_kref_put(tty); } - if (sport->port.suspended && !irq_wake) - lpuart_disable_clks(sport); + if ((tty_port_initialized(port) && may_wake) || + (!console_suspend_enabled && uart_console(&sport->port))) + return true; - return 0; + return false; } -static int __maybe_unused lpuart_resume(struct device *dev) +static int lpuart_suspend_noirq(struct device *dev) { struct lpuart_port *sport = dev_get_drvdata(dev); bool irq_wake = irqd_is_wakeup_set(irq_get_irq_data(sport->port.irq)); - if (sport->port.suspended && !irq_wake) - lpuart_enable_clks(sport); + if (lpuart_uport_is_active(sport)) + serial_lpuart_enable_wakeup(sport, !!irq_wake); - if (lpuart_is_32(sport)) - lpuart32_setup_watermark_enable(sport); - else - lpuart_setup_watermark_enable(sport); + pinctrl_pm_select_sleep_state(dev); - if (sport->lpuart_dma_rx_use) { - if (irq_wake) { - if (!lpuart_start_rx_dma(sport)) - rx_dma_timer_init(sport); - else - sport->lpuart_dma_rx_use = false; + return 0; +} + +static int lpuart_resume_noirq(struct device *dev) +{ + struct lpuart_port *sport = dev_get_drvdata(dev); + unsigned int val; + + pinctrl_pm_select_default_state(dev); + + if (lpuart_uport_is_active(sport)) { + serial_lpuart_enable_wakeup(sport, false); + + /* clear the wakeup flags */ + if (lpuart_is_32(sport)) { + val = lpuart32_read(&sport->port, UARTSTAT); + lpuart32_write(&sport->port, val, UARTSTAT); } } - lpuart_tx_dma_startup(sport); + return 0; +} - if (lpuart_is_32(sport)) - lpuart32_configure(sport); +static int lpuart_suspend(struct device *dev) +{ + struct lpuart_port *sport = dev_get_drvdata(dev); + unsigned long temp, flags; + uart_suspend_port(&lpuart_reg, &sport->port); + + if (lpuart_uport_is_active(sport)) { + spin_lock_irqsave(&sport->port.lock, flags); + if (lpuart_is_32(sport)) { + /* disable Rx/Tx and interrupts */ + temp = lpuart32_read(&sport->port, UARTCTRL); + temp &= ~(UARTCTRL_TE | UARTCTRL_TIE | UARTCTRL_TCIE); + lpuart32_write(&sport->port, temp, UARTCTRL); + } else { + /* disable Rx/Tx and interrupts */ + temp = readb(sport->port.membase + UARTCR2); + temp &= ~(UARTCR2_TE | UARTCR2_TIE | UARTCR2_TCIE); + writeb(temp, sport->port.membase + UARTCR2); + } + spin_unlock_irqrestore(&sport->port.lock, flags); + + if (sport->lpuart_dma_rx_use) { + /* + * EDMA driver during suspend will forcefully release any + * non-idle DMA channels. If port wakeup is enabled or if port + * is console port or 'no_console_suspend' is set the Rx DMA + * cannot resume as expected, hence gracefully release the + * Rx DMA path before suspend and start Rx DMA path on resume. + */ + del_timer_sync(&sport->lpuart_timer); + lpuart_dma_rx_free(&sport->port); + + /* Disable Rx DMA to use UART port as wakeup source */ + spin_lock_irqsave(&sport->port.lock, flags); + if (lpuart_is_32(sport)) { + temp = lpuart32_read(&sport->port, UARTBAUD); + lpuart32_write(&sport->port, temp & ~UARTBAUD_RDMAE, + UARTBAUD); + } else { + writeb(readb(sport->port.membase + UARTCR5) & + ~UARTCR5_RDMAS, sport->port.membase + UARTCR5); + } + spin_unlock_irqrestore(&sport->port.lock, flags); + } + + if (sport->lpuart_dma_tx_use) { + spin_lock_irqsave(&sport->port.lock, flags); + if (lpuart_is_32(sport)) { + temp = lpuart32_read(&sport->port, UARTBAUD); + temp &= ~UARTBAUD_TDMAE; + lpuart32_write(&sport->port, temp, UARTBAUD); + } else { + temp = readb(sport->port.membase + UARTCR5); + temp &= ~UARTCR5_TDMAS; + writeb(temp, sport->port.membase + UARTCR5); + } + spin_unlock_irqrestore(&sport->port.lock, flags); + sport->dma_tx_in_progress = false; + dmaengine_terminate_sync(sport->dma_tx_chan); + } + } else if (pm_runtime_active(sport->port.dev)) { + lpuart_disable_clks(sport); + pm_runtime_disable(sport->port.dev); + pm_runtime_set_suspended(sport->port.dev); + } + + return 0; +} + +static void lpuart_console_fixup(struct lpuart_port *sport) +{ + struct tty_port *port = &sport->port.state->port; + struct uart_port *uport = &sport->port; + struct ktermios termios; + + /* i.MX7ULP enter VLLS mode that lpuart module power off and registers + * all lost no matter the port is wakeup source. + * For console port, console baud rate setting lost and print messy + * log when enable the console port as wakeup source. To avoid the + * issue happen, user should not enable uart port as wakeup source + * in VLLS mode, or restore console setting here. + */ + if (is_imx7ulp_lpuart(sport) && lpuart_uport_is_active(sport) && + console_suspend_enabled && uart_console(&sport->port)) { + + mutex_lock(&port->mutex); + memset(&termios, 0, sizeof(struct ktermios)); + termios.c_cflag = uport->cons->cflag; + if (port->tty && termios.c_cflag == 0) + termios = port->tty->termios; + uport->ops->set_termios(uport, &termios, NULL); + mutex_unlock(&port->mutex); + } +} + +static int lpuart_resume(struct device *dev) +{ + struct lpuart_port *sport = dev_get_drvdata(dev); + int ret; + + if (lpuart_uport_is_active(sport)) { + if (lpuart_is_32(sport)) + lpuart32_hw_setup(sport); + else + lpuart_hw_setup(sport); + } else if (pm_runtime_active(sport->port.dev)) { + ret = lpuart_enable_clks(sport); + if (ret) + return ret; + pm_runtime_set_active(sport->port.dev); + pm_runtime_enable(sport->port.dev); + } + + lpuart_console_fixup(sport); uart_resume_port(&lpuart_reg, &sport->port); return 0; } -static SIMPLE_DEV_PM_OPS(lpuart_pm_ops, lpuart_suspend, lpuart_resume); +static const struct dev_pm_ops lpuart_pm_ops = { + RUNTIME_PM_OPS(lpuart_runtime_suspend, + lpuart_runtime_resume, NULL) + NOIRQ_SYSTEM_SLEEP_PM_OPS(lpuart_suspend_noirq, + lpuart_resume_noirq) + SYSTEM_SLEEP_PM_OPS(lpuart_suspend, lpuart_resume) +}; static struct platform_driver lpuart_driver = { .probe = lpuart_probe, @@ -2887,7 +3043,7 @@ static struct platform_driver lpuart_driver = { .driver = { .name = "fsl-lpuart", .of_match_table = lpuart_dt_ids, - .pm = &lpuart_pm_ops, + .pm = pm_ptr(&lpuart_pm_ops), }, }; diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c index aadda66405b47d49f34366c9502f29dcbc8983c3..757825edb0cd9694b622186d9afbf1b2f50d934c 100644 --- a/drivers/tty/serial/imx.c +++ b/drivers/tty/serial/imx.c @@ -489,7 +489,7 @@ static void imx_uart_stop_tx(struct uart_port *port) static void imx_uart_stop_rx(struct uart_port *port) { struct imx_port *sport = (struct imx_port *)port; - u32 ucr1, ucr2, ucr4; + u32 ucr1, ucr2, ucr4, uts; ucr1 = imx_uart_readl(sport, UCR1); ucr2 = imx_uart_readl(sport, UCR2); @@ -505,7 +505,18 @@ static void imx_uart_stop_rx(struct uart_port *port) imx_uart_writel(sport, ucr1, UCR1); imx_uart_writel(sport, ucr4, UCR4); - ucr2 &= ~UCR2_RXEN; + /* See SER_RS485_ENABLED/UTS_LOOP comment in imx_uart_probe() */ + if (port->rs485.flags & SER_RS485_ENABLED && + port->rs485.flags & SER_RS485_RTS_ON_SEND && + sport->have_rtscts && !sport->have_rtsgpio) { + uts = imx_uart_readl(sport, imx_uart_uts_reg(sport)); + uts |= UTS_LOOP; + imx_uart_writel(sport, uts, imx_uart_uts_reg(sport)); + ucr2 |= UCR2_RXEN; + } else { + ucr2 &= ~UCR2_RXEN; + } + imx_uart_writel(sport, ucr2, UCR2); } @@ -563,8 +574,7 @@ static inline void imx_uart_transmit_buffer(struct imx_port *sport) /* send xmit->buf[xmit->tail] * out the port here */ imx_uart_writel(sport, xmit->buf[xmit->tail], URTX0); - xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); - sport->port.icount.tx++; + uart_xmit_advance(&sport->port, 1); } if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) @@ -590,9 +600,7 @@ static void imx_uart_dma_tx_callback(void *data) ucr1 &= ~UCR1_TXDMAEN; imx_uart_writel(sport, ucr1, UCR1); - /* update the stat */ - xmit->tail = (xmit->tail + sport->tx_bytes) & (UART_XMIT_SIZE - 1); - sport->port.icount.tx += sport->tx_bytes; + uart_xmit_advance(&sport->port, sport->tx_bytes); dev_dbg(sport->port.dev, "we finish the TX DMA.\n"); @@ -1393,7 +1401,7 @@ static int imx_uart_startup(struct uart_port *port) int retval, i; unsigned long flags; int dma_is_inited = 0; - u32 ucr1, ucr2, ucr3, ucr4; + u32 ucr1, ucr2, ucr3, ucr4, uts; retval = clk_prepare_enable(sport->clk_per); if (retval) @@ -1498,6 +1506,11 @@ static int imx_uart_startup(struct uart_port *port) imx_uart_writel(sport, ucr2, UCR2); } + /* See SER_RS485_ENABLED/UTS_LOOP comment in imx_uart_probe() */ + uts = imx_uart_readl(sport, imx_uart_uts_reg(sport)); + uts &= ~UTS_LOOP; + imx_uart_writel(sport, uts, imx_uart_uts_reg(sport)); + spin_unlock_irqrestore(&sport->port.lock, flags); return 0; @@ -1507,7 +1520,7 @@ static void imx_uart_shutdown(struct uart_port *port) { struct imx_port *sport = (struct imx_port *)port; unsigned long flags; - u32 ucr1, ucr2, ucr4; + u32 ucr1, ucr2, ucr4, uts; if (sport->dma_is_enabled) { dmaengine_terminate_sync(sport->dma_chan_tx); @@ -1551,7 +1564,18 @@ static void imx_uart_shutdown(struct uart_port *port) spin_lock_irqsave(&sport->port.lock, flags); ucr1 = imx_uart_readl(sport, UCR1); - ucr1 &= ~(UCR1_TRDYEN | UCR1_RRDYEN | UCR1_RTSDEN | UCR1_UARTEN | UCR1_RXDMAEN | UCR1_ATDMAEN); + ucr1 &= ~(UCR1_TRDYEN | UCR1_RRDYEN | UCR1_RTSDEN | UCR1_RXDMAEN | UCR1_ATDMAEN); + /* See SER_RS485_ENABLED/UTS_LOOP comment in imx_uart_probe() */ + if (port->rs485.flags & SER_RS485_ENABLED && + port->rs485.flags & SER_RS485_RTS_ON_SEND && + sport->have_rtscts && !sport->have_rtsgpio) { + uts = imx_uart_readl(sport, imx_uart_uts_reg(sport)); + uts |= UTS_LOOP; + imx_uart_writel(sport, uts, imx_uart_uts_reg(sport)); + ucr1 |= UCR1_UARTEN; + } else { + ucr1 &= ~UCR1_UARTEN; + } imx_uart_writel(sport, ucr1, UCR1); ucr4 = imx_uart_readl(sport, UCR4); @@ -2213,7 +2237,7 @@ static int imx_uart_probe(struct platform_device *pdev) void __iomem *base; u32 dma_buf_conf[2]; int ret = 0; - u32 ucr1; + u32 ucr1, ucr2, uts; struct resource *res; int txirq, rxirq, rtsirq; @@ -2350,6 +2374,31 @@ static int imx_uart_probe(struct platform_device *pdev) ucr1 &= ~(UCR1_ADEN | UCR1_TRDYEN | UCR1_IDEN | UCR1_RRDYEN | UCR1_RTSDEN); imx_uart_writel(sport, ucr1, UCR1); + /* + * In case RS485 is enabled without GPIO RTS control, the UART IP + * is used to control CTS signal. Keep both the UART and Receiver + * enabled, otherwise the UART IP pulls CTS signal always HIGH no + * matter how the UCR2 CTSC and CTS bits are set. To prevent any + * data from being fed into the RX FIFO, enable loopback mode in + * UTS register, which disconnects the RX path from external RXD + * pin and connects it to the Transceiver, which is disabled, so + * no data can be fed to the RX FIFO that way. + */ + if (sport->port.rs485.flags & SER_RS485_ENABLED && + sport->have_rtscts && !sport->have_rtsgpio) { + uts = imx_uart_readl(sport, imx_uart_uts_reg(sport)); + uts |= UTS_LOOP; + imx_uart_writel(sport, uts, imx_uart_uts_reg(sport)); + + ucr1 = imx_uart_readl(sport, UCR1); + ucr1 |= UCR1_UARTEN; + imx_uart_writel(sport, ucr1, UCR1); + + ucr2 = imx_uart_readl(sport, UCR2); + ucr2 |= UCR2_RXEN; + imx_uart_writel(sport, ucr2, UCR2); + } + if (!imx_uart_is_imx1(sport) && sport->dte_mode) { /* * The DCEDTE bit changes the direction of DSR, DCD, DTR and RI diff --git a/drivers/tty/serial/ip22zilog.c b/drivers/tty/serial/ip22zilog.c index dd0a8915ce4ff1a25a6663d9be29fec00465176d..b1f27e1681358b6ff192c34b60a6bd73a6cdd750 100644 --- a/drivers/tty/serial/ip22zilog.c +++ b/drivers/tty/serial/ip22zilog.c @@ -409,8 +409,7 @@ static void ip22zilog_transmit_chars(struct uart_ip22zilog_port *up, ZSDELAY(); ZS_WSYNC(channel); - xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); - up->port.icount.tx++; + uart_xmit_advance(&up->port, 1); if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) uart_write_wakeup(&up->port); @@ -609,8 +608,7 @@ static void ip22zilog_start_tx(struct uart_port *port) ZSDELAY(); ZS_WSYNC(channel); - xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); - port->icount.tx++; + uart_xmit_advance(port, 1); if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) uart_write_wakeup(&up->port); diff --git a/drivers/tty/serial/lantiq.c b/drivers/tty/serial/lantiq.c index c892f3c7d1abc1b6f2ee7e2755e09f01db20de60..a58e9277dfad8752b10af8ca5bd8ad7383984531 100644 --- a/drivers/tty/serial/lantiq.c +++ b/drivers/tty/serial/lantiq.c @@ -95,7 +95,6 @@ #define ASCFSTAT_TXFFLMASK 0x3F00 #define ASCFSTAT_TXFREEMASK 0x3F000000 -static void lqasc_tx_chars(struct uart_port *port); static struct ltq_uart_port *lqasc_port[MAXPORTS]; static struct uart_driver lqasc_reg; @@ -151,9 +150,12 @@ lqasc_start_tx(struct uart_port *port) { unsigned long flags; struct ltq_uart_port *ltq_port = to_ltq_uart_port(port); + u8 ch; spin_lock_irqsave(<q_port->lock, flags); - lqasc_tx_chars(port); + uart_port_tx(port, ch, + lqasc_tx_ready(port), + writeb(ch, port->membase + LTQ_ASC_TBUF)); spin_unlock_irqrestore(<q_port->lock, flags); return; } @@ -226,36 +228,6 @@ lqasc_rx_chars(struct uart_port *port) return 0; } -static void -lqasc_tx_chars(struct uart_port *port) -{ - struct circ_buf *xmit = &port->state->xmit; - if (uart_tx_stopped(port)) { - lqasc_stop_tx(port); - return; - } - - while (lqasc_tx_ready(port)) { - if (port->x_char) { - writeb(port->x_char, port->membase + LTQ_ASC_TBUF); - port->icount.tx++; - port->x_char = 0; - continue; - } - - if (uart_circ_empty(xmit)) - break; - - writeb(port->state->xmit.buf[port->state->xmit.tail], - port->membase + LTQ_ASC_TBUF); - xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); - port->icount.tx++; - } - - if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) - uart_write_wakeup(port); -} - static irqreturn_t lqasc_tx_int(int irq, void *_port) { diff --git a/drivers/tty/serial/liteuart.c b/drivers/tty/serial/liteuart.c index 4c0604325ee910746f79a8c1807d15fb9867cd82..062812fe1b09253c76d9b3bce59f652b376f6806 100644 --- a/drivers/tty/serial/liteuart.c +++ b/drivers/tty/serial/liteuart.c @@ -136,8 +136,7 @@ static void liteuart_start_tx(struct uart_port *port) } else if (!uart_circ_empty(xmit)) { while (xmit->head != xmit->tail) { ch = xmit->buf[xmit->tail]; - xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); - port->icount.tx++; + uart_xmit_advance(port, 1); liteuart_putchar(port, ch); } } diff --git a/drivers/tty/serial/lpc32xx_hs.c b/drivers/tty/serial/lpc32xx_hs.c index ed47f476833831bc7eb5cd57f23cea22ac04a523..b38fe4728c26490dcd1e4124add07894ec5b8645 100644 --- a/drivers/tty/serial/lpc32xx_hs.c +++ b/drivers/tty/serial/lpc32xx_hs.c @@ -276,8 +276,6 @@ static void __serial_lpc32xx_rx(struct uart_port *port) tty_flip_buffer_push(tport); } -static void serial_lpc32xx_stop_tx(struct uart_port *port); - static bool serial_lpc32xx_tx_ready(struct uart_port *port) { u32 level = readl(LPC32XX_HSUART_LEVEL(port->membase)); @@ -287,34 +285,11 @@ static bool serial_lpc32xx_tx_ready(struct uart_port *port) static void __serial_lpc32xx_tx(struct uart_port *port) { - struct circ_buf *xmit = &port->state->xmit; - - if (port->x_char) { - writel((u32)port->x_char, LPC32XX_HSUART_FIFO(port->membase)); - port->icount.tx++; - port->x_char = 0; - return; - } - - if (uart_circ_empty(xmit) || uart_tx_stopped(port)) - goto exit_tx; - - /* Transfer data */ - while (serial_lpc32xx_tx_ready(port)) { - writel((u32) xmit->buf[xmit->tail], - LPC32XX_HSUART_FIFO(port->membase)); - xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); - port->icount.tx++; - if (uart_circ_empty(xmit)) - break; - } - - if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) - uart_write_wakeup(port); + u8 ch; -exit_tx: - if (uart_circ_empty(xmit)) - serial_lpc32xx_stop_tx(port); + uart_port_tx(port, ch, + serial_lpc32xx_tx_ready(port), + writel(ch, LPC32XX_HSUART_FIFO(port->membase))); } static irqreturn_t serial_lpc32xx_interrupt(int irq, void *dev_id) diff --git a/drivers/tty/serial/max3100.c b/drivers/tty/serial/max3100.c index c69602f356fdc81aff6c5c65fcea514f2c1c7a1c..bb74f23251fe43973a77409dddf1c949178272ed 100644 --- a/drivers/tty/serial/max3100.c +++ b/drivers/tty/serial/max3100.c @@ -292,9 +292,7 @@ static void max3100_work(struct work_struct *w) } else if (!uart_circ_empty(xmit) && !uart_tx_stopped(&s->port)) { tx = xmit->buf[xmit->tail]; - xmit->tail = (xmit->tail + 1) & - (UART_XMIT_SIZE - 1); - s->port.icount.tx++; + uart_xmit_advance(&s->port, 1); } if (tx != 0xffff) { max3100_calc_parity(s, &tx); diff --git a/drivers/tty/serial/max310x.c b/drivers/tty/serial/max310x.c index fbf6e2b3161c5a30287b8454bb1c057349f42516..4eb24e3407f803970bf0aa85cdf6c49a0438091f 100644 --- a/drivers/tty/serial/max310x.c +++ b/drivers/tty/serial/max310x.c @@ -787,10 +787,7 @@ static void max310x_handle_tx(struct uart_port *port) } else { max310x_batch_write(port, xmit->buf + xmit->tail, to_send); } - - /* Add data to send */ - port->icount.tx += to_send; - xmit->tail = (xmit->tail + to_send) & (UART_XMIT_SIZE - 1); + uart_xmit_advance(port, to_send); } if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) diff --git a/drivers/tty/serial/mcf.c b/drivers/tty/serial/mcf.c index b1cd9a76dd93bc4d49c8d6b1d10d3f8ba4168dc1..3239babe12a4edb954fe0736280d6dfc4d6fddb6 100644 --- a/drivers/tty/serial/mcf.c +++ b/drivers/tty/serial/mcf.c @@ -327,34 +327,16 @@ static void mcf_rx_chars(struct mcf_uart *pp) static void mcf_tx_chars(struct mcf_uart *pp) { struct uart_port *port = &pp->port; - struct circ_buf *xmit = &port->state->xmit; - - if (port->x_char) { - /* Send special char - probably flow control */ - writeb(port->x_char, port->membase + MCFUART_UTB); - port->x_char = 0; - port->icount.tx++; - return; - } - - while (readb(port->membase + MCFUART_USR) & MCFUART_USR_TXREADY) { - if (uart_circ_empty(xmit)) - break; - writeb(xmit->buf[xmit->tail], port->membase + MCFUART_UTB); - xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE -1); - port->icount.tx++; - } + bool pending; + u8 ch; - if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) - uart_write_wakeup(port); + pending = uart_port_tx(port, ch, + readb(port->membase + MCFUART_USR) & MCFUART_USR_TXREADY, + writeb(ch, port->membase + MCFUART_UTB)); - if (uart_circ_empty(xmit)) { - mcf_stop_tx(port); - /* Disable TX to negate RTS automatically */ - if (port->rs485.flags & SER_RS485_ENABLED) - writeb(MCFUART_UCR_TXDISABLE, - port->membase + MCFUART_UCR); - } + /* Disable TX to negate RTS automatically */ + if (!pending && (port->rs485.flags & SER_RS485_ENABLED)) + writeb(MCFUART_UCR_TXDISABLE, port->membase + MCFUART_UCR); } /****************************************************************************/ diff --git a/drivers/tty/serial/men_z135_uart.c b/drivers/tty/serial/men_z135_uart.c index 3690f5cf0f4342334fe601f80e550a571eb06ccb..d2502aaa3e8c7c49fdc87857d56e36ad8d27c496 100644 --- a/drivers/tty/serial/men_z135_uart.c +++ b/drivers/tty/serial/men_z135_uart.c @@ -352,11 +352,8 @@ static void men_z135_handle_tx(struct men_z135_port *uart) n = min(n, s); memcpy_toio(port->membase + MEN_Z135_TX_RAM, &xmit->buf[xmit->tail], n); - xmit->tail = (xmit->tail + n) & (UART_XMIT_SIZE - 1); - iowrite32(n & 0x3ff, port->membase + MEN_Z135_TX_CTRL); - - port->icount.tx += n; + uart_xmit_advance(port, n); if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) uart_write_wakeup(port); diff --git a/drivers/tty/serial/meson_uart.c b/drivers/tty/serial/meson_uart.c index 056243c12836c307c6268a9a62eead9130513c81..74110017988a8b65aa16f6afafec297c5faf7377 100644 --- a/drivers/tty/serial/meson_uart.c +++ b/drivers/tty/serial/meson_uart.c @@ -162,8 +162,7 @@ static void meson_uart_start_tx(struct uart_port *port) ch = xmit->buf[xmit->tail]; writel(ch, port->membase + AML_UART_WFIFO); - xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); - port->icount.tx++; + uart_xmit_advance(port, 1); } if (!uart_circ_empty(xmit)) { diff --git a/drivers/tty/serial/milbeaut_usio.c b/drivers/tty/serial/milbeaut_usio.c index c15e0d84dc7e32fd35b0f8f2dc14f6ce602a4e48..44988a2941b8d12d7a5684ca7f35d119577235b3 100644 --- a/drivers/tty/serial/milbeaut_usio.c +++ b/drivers/tty/serial/milbeaut_usio.c @@ -98,8 +98,7 @@ static void mlb_usio_tx_chars(struct uart_port *port) do { writew(xmit->buf[xmit->tail], port->membase + MLB_USIO_REG_DR); - xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); - port->icount.tx++; + uart_xmit_advance(port, 1); if (uart_circ_empty(xmit)) break; diff --git a/drivers/tty/serial/mpc52xx_uart.c b/drivers/tty/serial/mpc52xx_uart.c index 73362d4bc45d3cd5daee86cbafdea1a516f0584d..384ca195e3d5d24eb0d8fc332d28981e2c2ea730 100644 --- a/drivers/tty/serial/mpc52xx_uart.c +++ b/drivers/tty/serial/mpc52xx_uart.c @@ -1428,42 +1428,11 @@ mpc52xx_uart_int_rx_chars(struct uart_port *port) static inline bool mpc52xx_uart_int_tx_chars(struct uart_port *port) { - struct circ_buf *xmit = &port->state->xmit; - - /* Process out of band chars */ - if (port->x_char) { - psc_ops->write_char(port, port->x_char); - port->icount.tx++; - port->x_char = 0; - return true; - } - - /* Nothing to do ? */ - if (uart_circ_empty(xmit) || uart_tx_stopped(port)) { - mpc52xx_uart_stop_tx(port); - return false; - } - - /* Send chars */ - while (psc_ops->raw_tx_rdy(port)) { - psc_ops->write_char(port, xmit->buf[xmit->tail]); - xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); - port->icount.tx++; - if (uart_circ_empty(xmit)) - break; - } - - /* Wake up */ - if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) - uart_write_wakeup(port); - - /* Maybe we're done after all */ - if (uart_circ_empty(xmit)) { - mpc52xx_uart_stop_tx(port); - return false; - } + u8 ch; - return true; + return uart_port_tx(port, ch, + psc_ops->raw_tx_rdy(port), + psc_ops->write_char(port, ch)); } static irqreturn_t diff --git a/drivers/tty/serial/mps2-uart.c b/drivers/tty/serial/mps2-uart.c index 2e3e6cf1681784c484d6a8653dbda5f9670ca122..860d161fa59484f85fddefcbd821c08d9029d1e9 100644 --- a/drivers/tty/serial/mps2-uart.c +++ b/drivers/tty/serial/mps2-uart.c @@ -129,29 +129,11 @@ static void mps2_uart_stop_tx(struct uart_port *port) static void mps2_uart_tx_chars(struct uart_port *port) { - struct circ_buf *xmit = &port->state->xmit; - - while (!(mps2_uart_read8(port, UARTn_STATE) & UARTn_STATE_TX_FULL)) { - if (port->x_char) { - mps2_uart_write8(port, port->x_char, UARTn_DATA); - port->x_char = 0; - port->icount.tx++; - continue; - } - - if (uart_circ_empty(xmit) || uart_tx_stopped(port)) - break; - - mps2_uart_write8(port, xmit->buf[xmit->tail], UARTn_DATA); - xmit->tail = (xmit->tail + 1) % UART_XMIT_SIZE; - port->icount.tx++; - } - - if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) - uart_write_wakeup(port); + u8 ch; - if (uart_circ_empty(xmit)) - mps2_uart_stop_tx(port); + uart_port_tx(port, ch, + mps2_uart_tx_empty(port), + mps2_uart_write8(port, ch, UARTn_DATA)); } static void mps2_uart_start_tx(struct uart_port *port) diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c index 7dd19a2815794e9f3fb9264600843ac082cf6666..843798e630844cfaa931a833244153fcab1b51cc 100644 --- a/drivers/tty/serial/msm_serial.c +++ b/drivers/tty/serial/msm_serial.c @@ -464,12 +464,9 @@ static void msm_complete_tx_dma(void *args) } count = dma->count - state.residue; - port->icount.tx += count; + uart_xmit_advance(port, count); dma->count = 0; - xmit->tail += count; - xmit->tail &= UART_XMIT_SIZE - 1; - /* Restore "Tx FIFO below watermark" interrupt */ msm_port->imr |= MSM_UART_IMR_TXLEV; msm_write(port, msm_port->imr, MSM_UART_IMR); @@ -819,7 +816,7 @@ static void msm_handle_rx(struct uart_port *port) port->icount.rx++; } - /* Mask conditions we're ignorning. */ + /* Mask conditions we're ignoring. */ sr &= port->read_status_mask; if (sr & MSM_UART_SR_RX_BREAK) @@ -866,13 +863,11 @@ static void msm_handle_tx_pio(struct uart_port *port, unsigned int tx_count) else num_chars = 1; - for (i = 0; i < num_chars; i++) { + for (i = 0; i < num_chars; i++) buf[i] = xmit->buf[xmit->tail + i]; - port->icount.tx++; - } iowrite32_rep(tf, buf, 1); - xmit->tail = (xmit->tail + num_chars) & (UART_XMIT_SIZE - 1); + uart_xmit_advance(port, num_chars); tf_pointer += num_chars; } diff --git a/drivers/tty/serial/mux.c b/drivers/tty/serial/mux.c index ed0e763f622a880c448da1b323c05eeb507e10c7..85ce1e9af44aee4b2929c09e45c8004bae9fc41c 100644 --- a/drivers/tty/serial/mux.c +++ b/drivers/tty/serial/mux.c @@ -171,6 +171,13 @@ static void mux_break_ctl(struct uart_port *port, int break_state) { } +static void mux_tx_done(struct uart_port *port) +{ + /* FIXME js: really needs to wait? */ + while (UART_GET_FIFO_CNT(port)) + udelay(1); +} + /** * mux_write - Write chars to the mux fifo. * @port: Ptr to the uart_port. @@ -180,39 +187,13 @@ static void mux_break_ctl(struct uart_port *port, int break_state) */ static void mux_write(struct uart_port *port) { - int count; - struct circ_buf *xmit = &port->state->xmit; - - if(port->x_char) { - UART_PUT_CHAR(port, port->x_char); - port->icount.tx++; - port->x_char = 0; - return; - } - - if(uart_circ_empty(xmit) || uart_tx_stopped(port)) { - mux_stop_tx(port); - return; - } - - count = (port->fifosize) - UART_GET_FIFO_CNT(port); - do { - UART_PUT_CHAR(port, xmit->buf[xmit->tail]); - xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); - port->icount.tx++; - if(uart_circ_empty(xmit)) - break; - - } while(--count > 0); - - while(UART_GET_FIFO_CNT(port)) - udelay(1); - - if(uart_circ_chars_pending(xmit) < WAKEUP_CHARS) - uart_write_wakeup(port); + u8 ch; - if (uart_circ_empty(xmit)) - mux_stop_tx(port); + uart_port_tx_limited(port, ch, + port->fifosize - UART_GET_FIFO_CNT(port), + true, + UART_PUT_CHAR(port, ch), + mux_tx_done(port)); } /** diff --git a/drivers/tty/serial/mvebu-uart.c b/drivers/tty/serial/mvebu-uart.c index ba16e1da6bd3e74cf2a4996a297d5313ef7acc8d..31f739c7a08bf78f05912c097b2c67b8d9509580 100644 --- a/drivers/tty/serial/mvebu-uart.c +++ b/drivers/tty/serial/mvebu-uart.c @@ -223,8 +223,7 @@ static void mvebu_uart_start_tx(struct uart_port *port) if (IS_EXTENDED(port) && !uart_circ_empty(xmit)) { writel(xmit->buf[xmit->tail], port->membase + UART_TSH(port)); - xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); - port->icount.tx++; + uart_xmit_advance(port, 1); } ctl = readl(port->membase + UART_INTR(port)); @@ -335,40 +334,12 @@ ignore_char: static void mvebu_uart_tx_chars(struct uart_port *port, unsigned int status) { - struct circ_buf *xmit = &port->state->xmit; - unsigned int count; - unsigned int st; - - if (port->x_char) { - writel(port->x_char, port->membase + UART_TSH(port)); - port->icount.tx++; - port->x_char = 0; - return; - } - - if (uart_circ_empty(xmit) || uart_tx_stopped(port)) { - mvebu_uart_stop_tx(port); - return; - } - - for (count = 0; count < port->fifosize; count++) { - writel(xmit->buf[xmit->tail], port->membase + UART_TSH(port)); - xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); - port->icount.tx++; - - if (uart_circ_empty(xmit)) - break; - - st = readl(port->membase + UART_STAT); - if (st & STAT_TX_FIFO_FUL) - break; - } - - if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) - uart_write_wakeup(port); + u8 ch; - if (uart_circ_empty(xmit)) - mvebu_uart_stop_tx(port); + uart_port_tx_limited(port, ch, port->fifosize, + !(readl(port->membase + UART_STAT) & STAT_TX_FIFO_FUL), + writel(ch, port->membase + UART_TSH(port)), + ({})); } static irqreturn_t mvebu_uart_isr(int irq, void *dev_id) diff --git a/drivers/tty/serial/mxs-auart.c b/drivers/tty/serial/mxs-auart.c index d21a4f3ef2fe614de096df2ac694ee102a1878fc..ef6e7bb6105cad2b0463eabef2060ae4043fe9cc 100644 --- a/drivers/tty/serial/mxs-auart.c +++ b/drivers/tty/serial/mxs-auart.c @@ -569,6 +569,8 @@ static int mxs_auart_dma_tx(struct mxs_auart_port *s, int size) static void mxs_auart_tx_chars(struct mxs_auart_port *s) { struct circ_buf *xmit = &s->port.state->xmit; + bool pending; + u8 ch; if (auart_dma_enabled(s)) { u32 i = 0; @@ -603,31 +605,13 @@ static void mxs_auart_tx_chars(struct mxs_auart_port *s) return; } - - while (!(mxs_read(s, REG_STAT) & AUART_STAT_TXFF)) { - if (s->port.x_char) { - s->port.icount.tx++; - mxs_write(s->port.x_char, s, REG_DATA); - s->port.x_char = 0; - continue; - } - if (!uart_circ_empty(xmit) && !uart_tx_stopped(&s->port)) { - s->port.icount.tx++; - mxs_write(xmit->buf[xmit->tail], s, REG_DATA); - xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); - } else - break; - } - if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) - uart_write_wakeup(&s->port); - - if (uart_circ_empty(&(s->port.state->xmit))) - mxs_clr(AUART_INTR_TXIEN, s, REG_INTR); - else + pending = uart_port_tx(&s->port, ch, + !(mxs_read(s, REG_STAT) & AUART_STAT_TXFF), + mxs_write(ch, s, REG_DATA)); + if (pending) mxs_set(AUART_INTR_TXIEN, s, REG_INTR); - - if (uart_tx_stopped(&s->port)) - mxs_auart_stop_tx(&s->port); + else + mxs_clr(AUART_INTR_TXIEN, s, REG_INTR); } static void mxs_auart_rx_char(struct mxs_auart_port *s) diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c index 7d0d2718ef5953962917617395e630621f5044e3..82d35dbbfa6cbb471d476efee3beb15cb6c0acba 100644 --- a/drivers/tty/serial/omap-serial.c +++ b/drivers/tty/serial/omap-serial.c @@ -347,34 +347,12 @@ static void serial_omap_put_char(struct uart_omap_port *up, unsigned char ch) static void transmit_chars(struct uart_omap_port *up, unsigned int lsr) { - struct circ_buf *xmit = &up->port.state->xmit; - int count; + u8 ch; - if (up->port.x_char) { - serial_omap_put_char(up, up->port.x_char); - up->port.icount.tx++; - up->port.x_char = 0; - return; - } - if (uart_circ_empty(xmit) || uart_tx_stopped(&up->port)) { - serial_omap_stop_tx(&up->port); - return; - } - count = up->port.fifosize / 4; - do { - serial_omap_put_char(up, xmit->buf[xmit->tail]); - xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); - up->port.icount.tx++; - - if (uart_circ_empty(xmit)) - break; - } while (--count > 0); - - if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) - uart_write_wakeup(&up->port); - - if (uart_circ_empty(xmit)) - serial_omap_stop_tx(&up->port); + uart_port_tx_limited(&up->port, ch, up->port.fifosize / 4, + true, + serial_omap_put_char(up, ch), + ({})); } static inline void serial_omap_enable_ier_thri(struct uart_omap_port *up) diff --git a/drivers/tty/serial/owl-uart.c b/drivers/tty/serial/owl-uart.c index fde39cc1145db53caf5f562abe0db63c037935c6..e99970a9437ff2cf7d6c3245c3ec46259fe0d89d 100644 --- a/drivers/tty/serial/owl-uart.c +++ b/drivers/tty/serial/owl-uart.c @@ -181,35 +181,11 @@ static void owl_uart_start_tx(struct uart_port *port) static void owl_uart_send_chars(struct uart_port *port) { - struct circ_buf *xmit = &port->state->xmit; - unsigned int ch; - - if (port->x_char) { - while (!(owl_uart_read(port, OWL_UART_STAT) & OWL_UART_STAT_TFFU)) - cpu_relax(); - owl_uart_write(port, port->x_char, OWL_UART_TXDAT); - port->icount.tx++; - port->x_char = 0; - } - - if (uart_tx_stopped(port)) - return; - - while (!(owl_uart_read(port, OWL_UART_STAT) & OWL_UART_STAT_TFFU)) { - if (uart_circ_empty(xmit)) - break; + u8 ch; - ch = xmit->buf[xmit->tail]; - owl_uart_write(port, ch, OWL_UART_TXDAT); - xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); - port->icount.tx++; - } - - if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) - uart_write_wakeup(port); - - if (uart_circ_empty(xmit)) - owl_uart_stop_tx(port); + uart_port_tx(port, ch, + !(owl_uart_read(port, OWL_UART_STAT) & OWL_UART_STAT_TFFU), + owl_uart_write(port, ch, OWL_UART_TXDAT)); } static void owl_uart_receive_chars(struct uart_port *port) diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c index c59ce788657999dbd1931b5e8aef8f3e7ddd118e..3d54a43768cd2497180920ce1103f34d1ad3cd14 100644 --- a/drivers/tty/serial/pch_uart.c +++ b/drivers/tty/serial/pch_uart.c @@ -694,6 +694,7 @@ static void pch_request_dma(struct uart_port *port) if (!chan) { dev_err(priv->port.dev, "%s:dma_request_channel FAILS(Tx)\n", __func__); + pci_dev_put(dma_dev); return; } priv->chan_tx = chan; @@ -710,6 +711,7 @@ static void pch_request_dma(struct uart_port *port) __func__); dma_release_channel(priv->chan_tx); priv->chan_tx = NULL; + pci_dev_put(dma_dev); return; } @@ -717,6 +719,8 @@ static void pch_request_dma(struct uart_port *port) priv->rx_buf_virt = dma_alloc_coherent(port->dev, port->fifosize, &priv->rx_buf_dma, GFP_KERNEL); priv->chan_rx = chan; + + pci_dev_put(dma_dev); } static void pch_dma_rx_complete(void *arg) @@ -738,15 +742,12 @@ static void pch_dma_tx_complete(void *arg) { struct eg20t_port *priv = arg; struct uart_port *port = &priv->port; - struct circ_buf *xmit = &port->state->xmit; struct scatterlist *sg = priv->sg_tx_p; int i; - for (i = 0; i < priv->nent; i++, sg++) { - xmit->tail += sg_dma_len(sg); - port->icount.tx += sg_dma_len(sg); - } - xmit->tail &= UART_XMIT_SIZE - 1; + for (i = 0; i < priv->nent; i++, sg++) + uart_xmit_advance(port, sg_dma_len(sg)); + async_tx_ack(priv->desc_tx); dma_unmap_sg(port->dev, sg, priv->orig_nent, DMA_TO_DEVICE); priv->tx_dma_use = 0; @@ -843,8 +844,7 @@ static unsigned int handle_tx(struct eg20t_port *priv) while (!uart_tx_stopped(port) && !uart_circ_empty(xmit) && fifo_size) { iowrite8(xmit->buf[xmit->tail], priv->membase + PCH_UART_THR); - xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); - port->icount.tx++; + uart_xmit_advance(port, 1); fifo_size--; tx_empty = 0; } diff --git a/drivers/tty/serial/pic32_uart.c b/drivers/tty/serial/pic32_uart.c index c38754d593ca7597700b5d5c13e6e078dc837f80..ba3435263c1f77d6f672c54f7d3bd38bec09929f 100644 --- a/drivers/tty/serial/pic32_uart.c +++ b/drivers/tty/serial/pic32_uart.c @@ -376,8 +376,7 @@ static void pic32_uart_do_tx(struct uart_port *port) pic32_uart_writel(sport, PIC32_UART_TX, c); - xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); - port->icount.tx++; + uart_xmit_advance(port, 1); if (uart_circ_empty(xmit)) break; if (--max_count == 0) diff --git a/drivers/tty/serial/pmac_zilog.c b/drivers/tty/serial/pmac_zilog.c index fe2e4ec423f7948e5fd005c97b0d94e62a580b3e..13668ffdb1e7d781557b5a2b6472c9f6aba9e784 100644 --- a/drivers/tty/serial/pmac_zilog.c +++ b/drivers/tty/serial/pmac_zilog.c @@ -410,8 +410,7 @@ static void pmz_transmit_chars(struct uart_pmac_port *uap) write_zsdata(uap, xmit->buf[xmit->tail]); zssync(uap); - xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); - uap->port.icount.tx++; + uart_xmit_advance(&uap->port, 1); if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) uart_write_wakeup(&uap->port); @@ -627,8 +626,7 @@ static void pmz_start_tx(struct uart_port *port) return; write_zsdata(uap, xmit->buf[xmit->tail]); zssync(uap); - xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); - port->icount.tx++; + uart_xmit_advance(port, 1); if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) uart_write_wakeup(&uap->port); diff --git a/drivers/tty/serial/pxa.c b/drivers/tty/serial/pxa.c index 2d25231fad847c849f0e44b76f495eb602b5bf4d..444fa4b654aca487a0383727eff632f360567a6b 100644 --- a/drivers/tty/serial/pxa.c +++ b/drivers/tty/serial/pxa.c @@ -174,35 +174,12 @@ static inline void receive_chars(struct uart_pxa_port *up, int *status) static void transmit_chars(struct uart_pxa_port *up) { - struct circ_buf *xmit = &up->port.state->xmit; - int count; + u8 ch; - if (up->port.x_char) { - serial_out(up, UART_TX, up->port.x_char); - up->port.icount.tx++; - up->port.x_char = 0; - return; - } - if (uart_circ_empty(xmit) || uart_tx_stopped(&up->port)) { - serial_pxa_stop_tx(&up->port); - return; - } - - count = up->port.fifosize / 2; - do { - serial_out(up, UART_TX, xmit->buf[xmit->tail]); - xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); - up->port.icount.tx++; - if (uart_circ_empty(xmit)) - break; - } while (--count > 0); - - if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) - uart_write_wakeup(&up->port); - - - if (uart_circ_empty(xmit)) - serial_pxa_stop_tx(&up->port); + uart_port_tx_limited(&up->port, ch, up->port.fifosize / 2, + true, + serial_out(up, UART_TX, ch), + ({})); } static void serial_pxa_start_tx(struct uart_port *port) diff --git a/drivers/tty/serial/qcom_geni_serial.c b/drivers/tty/serial/qcom_geni_serial.c index 83b66b73303afde411c03690a2303a7395888dc3..b487823f0e61724ba8cdb38ff11f8e5c28bfbf6a 100644 --- a/drivers/tty/serial/qcom_geni_serial.c +++ b/drivers/tty/serial/qcom_geni_serial.c @@ -924,6 +924,7 @@ static int qcom_geni_serial_port_setup(struct uart_port *uport) false, true, true); geni_se_init(&port->se, UART_RX_WM, port->rx_fifo_depth - 2); geni_se_select_mode(&port->se, GENI_SE_FIFO); + qcom_geni_serial_start_rx(uport); port->setup = true; return 0; @@ -1547,9 +1548,43 @@ static int __maybe_unused qcom_geni_serial_sys_resume(struct device *dev) return ret; } +static int qcom_geni_serial_sys_hib_resume(struct device *dev) +{ + int ret = 0; + struct uart_port *uport; + struct qcom_geni_private_data *private_data; + struct qcom_geni_serial_port *port = dev_get_drvdata(dev); + + uport = &port->uport; + private_data = uport->private_data; + + if (uart_console(uport)) { + geni_icc_set_tag(&port->se, 0x7); + geni_icc_set_bw(&port->se); + ret = uart_resume_port(private_data->drv, uport); + /* + * For hibernation usecase clients for + * console UART won't call port setup during restore, + * hence call port setup for console uart. + */ + qcom_geni_serial_port_setup(uport); + } else { + /* + * Peripheral register settings are lost during hibernation. + * Update setup flag such that port setup happens again + * during next session. Clients of HS-UART will close and + * open the port during hibernation. + */ + port->setup = false; + } + return ret; +} + static const struct dev_pm_ops qcom_geni_serial_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(qcom_geni_serial_sys_suspend, qcom_geni_serial_sys_resume) + .restore = qcom_geni_serial_sys_hib_resume, + .thaw = qcom_geni_serial_sys_hib_resume, }; static const struct of_device_id qcom_geni_serial_match_table[] = { diff --git a/drivers/tty/serial/rda-uart.c b/drivers/tty/serial/rda-uart.c index 0e387e2144fa25937ddfde1d26cf8fb47a68c5ea..be5c842b5ba951d126f78883ab0e8196ae1025cf 100644 --- a/drivers/tty/serial/rda-uart.c +++ b/drivers/tty/serial/rda-uart.c @@ -353,8 +353,7 @@ static void rda_uart_send_chars(struct uart_port *port) ch = xmit->buf[xmit->tail]; rda_uart_write(port, ch, RDA_UART_RXTX_BUFFER); - xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); - port->icount.tx++; + uart_xmit_advance(port, 1); } if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) diff --git a/drivers/tty/serial/rp2.c b/drivers/tty/serial/rp2.c index b81afb06f1f40ae8d6cd9fd86a6b657bdfc460cf..749b873a5d99b9e0293cbce141108836e3ee3dca 100644 --- a/drivers/tty/serial/rp2.c +++ b/drivers/tty/serial/rp2.c @@ -427,32 +427,13 @@ static void rp2_rx_chars(struct rp2_uart_port *up) static void rp2_tx_chars(struct rp2_uart_port *up) { - u16 max_tx = FIFO_SIZE - readw(up->base + RP2_TX_FIFO_COUNT); - struct circ_buf *xmit = &up->port.state->xmit; + u8 ch; - if (uart_tx_stopped(&up->port)) { - rp2_uart_stop_tx(&up->port); - return; - } - - for (; max_tx != 0; max_tx--) { - if (up->port.x_char) { - writeb(up->port.x_char, up->base + RP2_DATA_BYTE); - up->port.x_char = 0; - up->port.icount.tx++; - continue; - } - if (uart_circ_empty(xmit)) { - rp2_uart_stop_tx(&up->port); - break; - } - writeb(xmit->buf[xmit->tail], up->base + RP2_DATA_BYTE); - xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); - up->port.icount.tx++; - } - - if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) - uart_write_wakeup(&up->port); + uart_port_tx_limited(&up->port, ch, + FIFO_SIZE - readw(up->base + RP2_TX_FIFO_COUNT), + true, + writeb(ch, up->base + RP2_DATA_BYTE), + ({})); } static void rp2_ch_interrupt(struct rp2_uart_port *up) diff --git a/drivers/tty/serial/sa1100.c b/drivers/tty/serial/sa1100.c index dd9e3253cab4487d7774528ed87f3a1a35de634e..55107bbc00ceb2ea9870ee7d7062689a5c719982 100644 --- a/drivers/tty/serial/sa1100.c +++ b/drivers/tty/serial/sa1100.c @@ -228,14 +228,7 @@ sa1100_rx_chars(struct sa1100_port *sport) static void sa1100_tx_chars(struct sa1100_port *sport) { - struct circ_buf *xmit = &sport->port.state->xmit; - - if (sport->port.x_char) { - UART_PUT_CHAR(sport, sport->port.x_char); - sport->port.icount.tx++; - sport->port.x_char = 0; - return; - } + u8 ch; /* * Check the modem control lines before @@ -243,28 +236,9 @@ static void sa1100_tx_chars(struct sa1100_port *sport) */ sa1100_mctrl_check(sport); - if (uart_circ_empty(xmit) || uart_tx_stopped(&sport->port)) { - sa1100_stop_tx(&sport->port); - return; - } - - /* - * Tried using FIFO (not checking TNF) for fifo fill: - * still had the '4 bytes repeated' problem. - */ - while (UART_GET_UTSR1(sport) & UTSR1_TNF) { - UART_PUT_CHAR(sport, xmit->buf[xmit->tail]); - xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); - sport->port.icount.tx++; - if (uart_circ_empty(xmit)) - break; - } - - if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) - uart_write_wakeup(&sport->port); - - if (uart_circ_empty(xmit)) - sa1100_stop_tx(&sport->port); + uart_port_tx(&sport->port, ch, + UART_GET_UTSR1(sport) & UTSR1_TNF, + UART_PUT_CHAR(sport, ch)); } static irqreturn_t sa1100_int(int irq, void *dev_id) diff --git a/drivers/tty/serial/samsung_tty.c b/drivers/tty/serial/samsung_tty.c index 9c252c9ca95ad98d4051733276eb972c4f89ff30..0fce856434dafd8012fc16ca43fddb2b7644dfc8 100644 --- a/drivers/tty/serial/samsung_tty.c +++ b/drivers/tty/serial/samsung_tty.c @@ -288,7 +288,6 @@ static void s3c24xx_serial_stop_tx(struct uart_port *port) { struct s3c24xx_uart_port *ourport = to_ourport(port); struct s3c24xx_uart_dma *dma = ourport->dma; - struct circ_buf *xmit = &port->state->xmit; struct dma_tx_state state; int count; @@ -316,8 +315,7 @@ static void s3c24xx_serial_stop_tx(struct uart_port *port) DMA_TO_DEVICE); async_tx_ack(dma->tx_desc); count = dma->tx_bytes_requested - state.residue; - xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1); - port->icount.tx += count; + uart_xmit_advance(port, count); } ourport->tx_enabled = 0; @@ -351,8 +349,7 @@ static void s3c24xx_serial_tx_dma_complete(void *args) spin_lock_irqsave(&port->lock, flags); - xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1); - port->icount.tx += count; + uart_xmit_advance(port, count); ourport->tx_in_progress = 0; if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) @@ -916,8 +913,7 @@ static void s3c24xx_serial_tx_chars(struct s3c24xx_uart_port *ourport) break; wr_reg(port, S3C2410_UTXH, xmit->buf[xmit->tail]); - xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); - port->icount.tx++; + uart_xmit_advance(port, 1); count--; } diff --git a/drivers/tty/serial/sb1250-duart.c b/drivers/tty/serial/sb1250-duart.c index c5d2b6cdcb4a1f44cfa0d0c64dae17a0d3551b10..de56f383964e68c749dbab7c41b586bd89ba2e3c 100644 --- a/drivers/tty/serial/sb1250-duart.c +++ b/drivers/tty/serial/sb1250-duart.c @@ -399,8 +399,7 @@ static void sbd_transmit_chars(struct sbd_port *sport) /* Send char. */ if (!stop_tx) { write_sbdchn(sport, R_DUART_TX_HOLD, xmit->buf[xmit->tail]); - xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); - sport->port.icount.tx++; + uart_xmit_advance(&sport->port, 1); if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) uart_write_wakeup(&sport->port); diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c index 524921360ca786bea6c0298f3b319f7170b28d76..39f92eb1e6989bb190b6442dbbebbd357a501c65 100644 --- a/drivers/tty/serial/sc16is7xx.c +++ b/drivers/tty/serial/sc16is7xx.c @@ -686,13 +686,10 @@ static void sc16is7xx_handle_tx(struct uart_port *port) } to_send = (to_send > txlen) ? txlen : to_send; - /* Add data to send */ - port->icount.tx += to_send; - /* Convert to linear buffer */ for (i = 0; i < to_send; ++i) { s->buf[i] = xmit->buf[xmit->tail]; - xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); + uart_xmit_advance(port, 1); } sc16is7xx_fifo_write(port, to_send); diff --git a/drivers/tty/serial/sccnxp.c b/drivers/tty/serial/sccnxp.c index dd98509f52e5a866e175f87de8b1723d0b1eafb2..7df6878226340d263528e06eea0b925f79abaf4e 100644 --- a/drivers/tty/serial/sccnxp.c +++ b/drivers/tty/serial/sccnxp.c @@ -468,8 +468,7 @@ static void sccnxp_handle_tx(struct uart_port *port) break; sccnxp_port_write(port, SCCNXP_THR_REG, xmit->buf[xmit->tail]); - xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); - port->icount.tx++; + uart_xmit_advance(port, 1); } if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) diff --git a/drivers/tty/serial/serial-tegra.c b/drivers/tty/serial/serial-tegra.c index b7170cb9a544f7321f9731ca5bb673b588f1f766..e5b9773db5e36eb32c125615d2b350aa04da5baa 100644 --- a/drivers/tty/serial/serial-tegra.c +++ b/drivers/tty/serial/serial-tegra.c @@ -496,8 +496,7 @@ static void tegra_uart_fill_tx_fifo(struct tegra_uart_port *tup, int max_bytes) break; } tegra_uart_write(tup, xmit->buf[xmit->tail], UART_TX); - xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); - tup->uport.icount.tx++; + uart_xmit_advance(&tup->uport, 1); } } @@ -619,8 +618,9 @@ static void tegra_uart_stop_tx(struct uart_port *u) if (tup->tx_in_progress != TEGRA_UART_TX_DMA) return; - dmaengine_terminate_all(tup->tx_dma_chan); + dmaengine_pause(tup->tx_dma_chan); dmaengine_tx_status(tup->tx_dma_chan, tup->tx_cookie, &state); + dmaengine_terminate_all(tup->tx_dma_chan); count = tup->tx_bytes_requested - state.residue; async_tx_ack(tup->tx_dma_desc); uart_xmit_advance(&tup->uport, count); @@ -763,8 +763,9 @@ static void tegra_uart_terminate_rx_dma(struct tegra_uart_port *tup) return; } - dmaengine_terminate_all(tup->rx_dma_chan); + dmaengine_pause(tup->rx_dma_chan); dmaengine_tx_status(tup->rx_dma_chan, tup->rx_cookie, &state); + dmaengine_terminate_all(tup->rx_dma_chan); tegra_uart_rx_buffer_push(tup, state.residue); tup->rx_dma_active = false; diff --git a/drivers/tty/serial/serial_txx9.c b/drivers/tty/serial/serial_txx9.c index e12f1dc18c38bc7627cf18a74920a889222ae0e9..eab387b01e36ff8ff57070a9d5809e2974b2140f 100644 --- a/drivers/tty/serial/serial_txx9.c +++ b/drivers/tty/serial/serial_txx9.c @@ -321,34 +321,12 @@ receive_chars(struct uart_port *up, unsigned int *status) static inline void transmit_chars(struct uart_port *up) { - struct circ_buf *xmit = &up->state->xmit; - int count; + u8 ch; - if (up->x_char) { - sio_out(up, TXX9_SITFIFO, up->x_char); - up->icount.tx++; - up->x_char = 0; - return; - } - if (uart_circ_empty(xmit) || uart_tx_stopped(up)) { - serial_txx9_stop_tx(up); - return; - } - - count = TXX9_SIO_TX_FIFO; - do { - sio_out(up, TXX9_SITFIFO, xmit->buf[xmit->tail]); - xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); - up->icount.tx++; - if (uart_circ_empty(xmit)) - break; - } while (--count > 0); - - if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) - uart_write_wakeup(up); - - if (uart_circ_empty(xmit)) - serial_txx9_stop_tx(up); + uart_port_tx_limited(up, ch, TXX9_SIO_TX_FIFO, + true, + sio_out(up, TXX9_SITFIFO, ch), + ({})); } static irqreturn_t serial_txx9_interrupt(int irq, void *dev_id) diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c index 76452fe2af867425fe48290e0a7cdd7dfe061f7b..7bd0807209299bba3f6c7af65a8d7f492d71e7de 100644 --- a/drivers/tty/serial/sh-sci.c +++ b/drivers/tty/serial/sh-sci.c @@ -1181,10 +1181,7 @@ static void sci_dma_tx_complete(void *arg) spin_lock_irqsave(&port->lock, flags); - xmit->tail += s->tx_dma_len; - xmit->tail &= UART_XMIT_SIZE - 1; - - port->icount.tx += s->tx_dma_len; + uart_xmit_advance(port, s->tx_dma_len); if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) uart_write_wakeup(port); diff --git a/drivers/tty/serial/sifive.c b/drivers/tty/serial/sifive.c index 7fb6760b5c37ad87a1740fb77ead0715afc8210b..1f565a216e748c940db0132c23cccd824a0f7bcb 100644 --- a/drivers/tty/serial/sifive.c +++ b/drivers/tty/serial/sifive.c @@ -288,33 +288,12 @@ static void __ssp_transmit_char(struct sifive_serial_port *ssp, int ch) */ static void __ssp_transmit_chars(struct sifive_serial_port *ssp) { - struct circ_buf *xmit = &ssp->port.state->xmit; - int count; - - if (ssp->port.x_char) { - __ssp_transmit_char(ssp, ssp->port.x_char); - ssp->port.icount.tx++; - ssp->port.x_char = 0; - return; - } - if (uart_circ_empty(xmit) || uart_tx_stopped(&ssp->port)) { - sifive_serial_stop_tx(&ssp->port); - return; - } - count = SIFIVE_TX_FIFO_DEPTH; - do { - __ssp_transmit_char(ssp, xmit->buf[xmit->tail]); - xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); - ssp->port.icount.tx++; - if (uart_circ_empty(xmit)) - break; - } while (--count > 0); - - if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) - uart_write_wakeup(&ssp->port); + u8 ch; - if (uart_circ_empty(xmit)) - sifive_serial_stop_tx(&ssp->port); + uart_port_tx_limited(&ssp->port, ch, SIFIVE_TX_FIFO_DEPTH, + true, + __ssp_transmit_char(ssp, ch), + ({})); } /** diff --git a/drivers/tty/serial/sprd_serial.c b/drivers/tty/serial/sprd_serial.c index 342a87967631575f1d857201328e76c9b8fdad55..492a3bdab5ba280c5367533944b8be60a7106bbc 100644 --- a/drivers/tty/serial/sprd_serial.c +++ b/drivers/tty/serial/sprd_serial.c @@ -206,7 +206,6 @@ static void sprd_stop_tx_dma(struct uart_port *port) { struct sprd_uart_port *sp = container_of(port, struct sprd_uart_port, port); - struct circ_buf *xmit = &port->state->xmit; struct dma_tx_state state; u32 trans_len; @@ -215,8 +214,7 @@ static void sprd_stop_tx_dma(struct uart_port *port) dmaengine_tx_status(sp->tx_dma.chn, sp->tx_dma.cookie, &state); if (state.residue) { trans_len = state.residue - sp->tx_dma.phys_addr; - xmit->tail = (xmit->tail + trans_len) & (UART_XMIT_SIZE - 1); - port->icount.tx += trans_len; + uart_xmit_advance(port, trans_len); dma_unmap_single(port->dev, sp->tx_dma.phys_addr, sp->tx_dma.trans_len, DMA_TO_DEVICE); } @@ -253,8 +251,7 @@ static void sprd_complete_tx_dma(void *data) dma_unmap_single(port->dev, sp->tx_dma.phys_addr, sp->tx_dma.trans_len, DMA_TO_DEVICE); - xmit->tail = (xmit->tail + sp->tx_dma.trans_len) & (UART_XMIT_SIZE - 1); - port->icount.tx += sp->tx_dma.trans_len; + uart_xmit_advance(port, sp->tx_dma.trans_len); if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) uart_write_wakeup(port); @@ -626,35 +623,12 @@ static inline void sprd_rx(struct uart_port *port) static inline void sprd_tx(struct uart_port *port) { - struct circ_buf *xmit = &port->state->xmit; - int count; - - if (port->x_char) { - serial_out(port, SPRD_TXD, port->x_char); - port->icount.tx++; - port->x_char = 0; - return; - } - - if (uart_circ_empty(xmit) || uart_tx_stopped(port)) { - sprd_stop_tx(port); - return; - } - - count = THLD_TX_EMPTY; - do { - serial_out(port, SPRD_TXD, xmit->buf[xmit->tail]); - xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); - port->icount.tx++; - if (uart_circ_empty(xmit)) - break; - } while (--count > 0); - - if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) - uart_write_wakeup(port); + u8 ch; - if (uart_circ_empty(xmit)) - sprd_stop_tx(port); + uart_port_tx_limited(port, ch, THLD_TX_EMPTY, + true, + serial_out(port, SPRD_TXD, ch), + ({})); } /* this handles the interrupt from one port */ diff --git a/drivers/tty/serial/st-asc.c b/drivers/tty/serial/st-asc.c index fcecea689a0ddd3e52bcebf887b7c6c027c515e8..5215e6910f6869f0eb3d4dafd1ad87e5aea37fa5 100644 --- a/drivers/tty/serial/st-asc.c +++ b/drivers/tty/serial/st-asc.c @@ -237,50 +237,12 @@ static inline unsigned asc_hw_txroom(struct uart_port *port) */ static void asc_transmit_chars(struct uart_port *port) { - struct circ_buf *xmit = &port->state->xmit; - int txroom; - unsigned char c; - - txroom = asc_hw_txroom(port); - - if ((txroom != 0) && port->x_char) { - c = port->x_char; - port->x_char = 0; - asc_out(port, ASC_TXBUF, c); - port->icount.tx++; - txroom = asc_hw_txroom(port); - } - - if (uart_tx_stopped(port)) { - /* - * We should try and stop the hardware here, but I - * don't think the ASC has any way to do that. - */ - asc_disable_tx_interrupts(port); - return; - } - - if (uart_circ_empty(xmit)) { - asc_disable_tx_interrupts(port); - return; - } - - if (txroom == 0) - return; - - do { - c = xmit->buf[xmit->tail]; - xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); - asc_out(port, ASC_TXBUF, c); - port->icount.tx++; - txroom--; - } while ((txroom > 0) && (!uart_circ_empty(xmit))); - - if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) - uart_write_wakeup(port); + u8 ch; - if (uart_circ_empty(xmit)) - asc_disable_tx_interrupts(port); + uart_port_tx_limited(port, ch, asc_hw_txroom(port), + true, + asc_out(port, ASC_TXBUF, ch), + ({})); } static void asc_receive_chars(struct uart_port *port) diff --git a/drivers/tty/serial/stm32-usart.c b/drivers/tty/serial/stm32-usart.c index dfdbcf092facc2a5afe79194e4ab4ef2038da4e5..a1490033aa164b77ae884e34749fab32d28fe511 100644 --- a/drivers/tty/serial/stm32-usart.c +++ b/drivers/tty/serial/stm32-usart.c @@ -596,8 +596,7 @@ static void stm32_usart_transmit_chars_pio(struct uart_port *port) if (!(readl_relaxed(port->membase + ofs->isr) & USART_SR_TXE)) break; writel_relaxed(xmit->buf[xmit->tail], port->membase + ofs->tdr); - xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); - port->icount.tx++; + uart_xmit_advance(port, 1); } /* rely on TXE irq (mask or unmask) for sending remaining data */ @@ -673,8 +672,8 @@ static void stm32_usart_transmit_chars_dma(struct uart_port *port) stm32_usart_set_bits(port, ofs->cr3, USART_CR3_DMAT); - xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1); - port->icount.tx += count; + uart_xmit_advance(port, count); + return; fallback_err: @@ -1681,22 +1680,10 @@ static int stm32_usart_serial_probe(struct platform_device *pdev) if (!stm32port->info) return -EINVAL; - ret = stm32_usart_init_port(stm32port, pdev); - if (ret) - return ret; - - if (stm32port->wakeup_src) { - device_set_wakeup_capable(&pdev->dev, true); - ret = dev_pm_set_wake_irq(&pdev->dev, stm32port->port.irq); - if (ret) - goto err_deinit_port; - } - stm32port->rx_ch = dma_request_chan(&pdev->dev, "rx"); - if (PTR_ERR(stm32port->rx_ch) == -EPROBE_DEFER) { - ret = -EPROBE_DEFER; - goto err_wakeirq; - } + if (PTR_ERR(stm32port->rx_ch) == -EPROBE_DEFER) + return -EPROBE_DEFER; + /* Fall back in interrupt mode for any non-deferral error */ if (IS_ERR(stm32port->rx_ch)) stm32port->rx_ch = NULL; @@ -1710,6 +1697,17 @@ static int stm32_usart_serial_probe(struct platform_device *pdev) if (IS_ERR(stm32port->tx_ch)) stm32port->tx_ch = NULL; + ret = stm32_usart_init_port(stm32port, pdev); + if (ret) + goto err_dma_tx; + + if (stm32port->wakeup_src) { + device_set_wakeup_capable(&pdev->dev, true); + ret = dev_pm_set_wake_irq(&pdev->dev, stm32port->port.irq); + if (ret) + goto err_deinit_port; + } + if (stm32port->rx_ch && stm32_usart_of_dma_rx_probe(stm32port, pdev)) { /* Fall back in interrupt mode */ dma_release_channel(stm32port->rx_ch); @@ -1746,19 +1744,11 @@ err_port: pm_runtime_set_suspended(&pdev->dev); pm_runtime_put_noidle(&pdev->dev); - if (stm32port->tx_ch) { + if (stm32port->tx_ch) stm32_usart_of_dma_tx_remove(stm32port, pdev); - dma_release_channel(stm32port->tx_ch); - } - if (stm32port->rx_ch) stm32_usart_of_dma_rx_remove(stm32port, pdev); -err_dma_rx: - if (stm32port->rx_ch) - dma_release_channel(stm32port->rx_ch); - -err_wakeirq: if (stm32port->wakeup_src) dev_pm_clear_wake_irq(&pdev->dev); @@ -1768,6 +1758,14 @@ err_deinit_port: stm32_usart_deinit_port(stm32port); +err_dma_tx: + if (stm32port->tx_ch) + dma_release_channel(stm32port->tx_ch); + +err_dma_rx: + if (stm32port->rx_ch) + dma_release_channel(stm32port->rx_ch); + return ret; } diff --git a/drivers/tty/serial/sunhv.c b/drivers/tty/serial/sunhv.c index 1938ba5e98c0eee0bed60379b59c83ea76fce577..16c746a632587ec2f08484be24ef42fc81eaeeac 100644 --- a/drivers/tty/serial/sunhv.c +++ b/drivers/tty/serial/sunhv.c @@ -47,8 +47,7 @@ static void transmit_chars_putchar(struct uart_port *port, struct circ_buf *xmit if (status != HV_EOK) break; - xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); - port->icount.tx++; + uart_xmit_advance(port, 1); } } @@ -63,8 +62,7 @@ static void transmit_chars_write(struct uart_port *port, struct circ_buf *xmit) status = sun4v_con_write(ra, len, &sent); if (status != HV_EOK) break; - xmit->tail = (xmit->tail + sent) & (UART_XMIT_SIZE - 1); - port->icount.tx += sent; + uart_xmit_advance(port, sent); } } diff --git a/drivers/tty/serial/sunplus-uart.c b/drivers/tty/serial/sunplus-uart.c index 7afe61a0e72e42e53089f831aa3b5e765c4a9b43..727942c43c454e2e71181d72cd66c800c8c80e72 100644 --- a/drivers/tty/serial/sunplus-uart.c +++ b/drivers/tty/serial/sunplus-uart.c @@ -216,9 +216,7 @@ static void transmit_chars(struct uart_port *port) do { sp_uart_put_char(port, xmit->buf[xmit->tail]); - xmit->tail = (xmit->tail + 1) % UART_XMIT_SIZE; - port->icount.tx++; - + uart_xmit_advance(port, 1); if (uart_circ_empty(xmit)) break; } while (sunplus_tx_buf_not_full(port)); diff --git a/drivers/tty/serial/sunsab.c b/drivers/tty/serial/sunsab.c index 99608b2a2b74f2b0482d4b235f927f9a8affbf78..48b39fdb0397a8b98ac0c67972de7dea46ac9288 100644 --- a/drivers/tty/serial/sunsab.c +++ b/drivers/tty/serial/sunsab.c @@ -266,8 +266,7 @@ static void transmit_chars(struct uart_sunsab_port *up, for (i = 0; i < up->port.fifosize; i++) { writeb(xmit->buf[xmit->tail], &up->regs->w.xfifo[i]); - xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); - up->port.icount.tx++; + uart_xmit_advance(&up->port, 1); if (uart_circ_empty(xmit)) break; } @@ -453,8 +452,7 @@ static void sunsab_start_tx(struct uart_port *port) for (i = 0; i < up->port.fifosize; i++) { writeb(xmit->buf[xmit->tail], &up->regs->w.xfifo[i]); - xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); - up->port.icount.tx++; + uart_xmit_advance(&up->port, 1); if (uart_circ_empty(xmit)) break; } @@ -1133,7 +1131,13 @@ static int __init sunsab_init(void) } } - return platform_driver_register(&sab_driver); + err = platform_driver_register(&sab_driver); + if (err) { + kfree(sunsab_ports); + sunsab_ports = NULL; + } + + return err; } static void __exit sunsab_exit(void) diff --git a/drivers/tty/serial/sunsu.c b/drivers/tty/serial/sunsu.c index 9ea7e567540d60131abce88902a5c2442ce8edc1..fed052a0b9310005a57f0d99cc80ab73303f4c9d 100644 --- a/drivers/tty/serial/sunsu.c +++ b/drivers/tty/serial/sunsu.c @@ -417,8 +417,7 @@ static void transmit_chars(struct uart_sunsu_port *up) count = up->port.fifosize; do { serial_out(up, UART_TX, xmit->buf[xmit->tail]); - xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); - up->port.icount.tx++; + uart_xmit_advance(&up->port, 1); if (uart_circ_empty(xmit)) break; } while (--count > 0); diff --git a/drivers/tty/serial/sunzilog.c b/drivers/tty/serial/sunzilog.c index 87425290687d5b79c6eca4db66d69b0e1e6df9d7..ccb809216e941d9283cf247d2942a195a2a502bf 100644 --- a/drivers/tty/serial/sunzilog.c +++ b/drivers/tty/serial/sunzilog.c @@ -508,8 +508,7 @@ static void sunzilog_transmit_chars(struct uart_sunzilog_port *up, ZSDELAY(); ZS_WSYNC(channel); - xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); - up->port.icount.tx++; + uart_xmit_advance(&up->port, 1); if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) uart_write_wakeup(&up->port); @@ -709,8 +708,7 @@ static void sunzilog_start_tx(struct uart_port *port) ZSDELAY(); ZS_WSYNC(channel); - xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); - port->icount.tx++; + uart_xmit_advance(port, 1); if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) uart_write_wakeup(&up->port); diff --git a/drivers/tty/serial/timbuart.c b/drivers/tty/serial/timbuart.c index bb19ed012def74097e87e29193d701fb7af28983..0859394a78cdfd6ee08e15c3636d928572803938 100644 --- a/drivers/tty/serial/timbuart.c +++ b/drivers/tty/serial/timbuart.c @@ -101,8 +101,7 @@ static void timbuart_tx_chars(struct uart_port *port) !uart_circ_empty(xmit)) { iowrite8(xmit->buf[xmit->tail], port->membase + TIMBUART_TXFIFO); - xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); - port->icount.tx++; + uart_xmit_advance(port, 1); } dev_dbg(port->dev, diff --git a/drivers/tty/serial/uartlite.c b/drivers/tty/serial/uartlite.c index eca41ac5477cb8b8a1722c3a643c1720f2ef4b56..94584e54ebbed7e78650da57baf75d473eb00540 100644 --- a/drivers/tty/serial/uartlite.c +++ b/drivers/tty/serial/uartlite.c @@ -203,8 +203,7 @@ static int ulite_transmit(struct uart_port *port, int stat) return 0; uart_out32(xmit->buf[xmit->tail], ULITE_TX, port); - xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE-1); - port->icount.tx++; + uart_xmit_advance(port, 1); /* wake up */ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) diff --git a/drivers/tty/serial/ucc_uart.c b/drivers/tty/serial/ucc_uart.c index 82cf14dd3d433f83a5b07c66b076622614cc4f68..b09b6496ee3ebe881d1d8603d26148ef82baf782 100644 --- a/drivers/tty/serial/ucc_uart.c +++ b/drivers/tty/serial/ucc_uart.c @@ -372,8 +372,7 @@ static int qe_uart_tx_pump(struct uart_qe_port *qe_port) p = qe2cpu_addr(be32_to_cpu(bdp->buf), qe_port); while (count < qe_port->tx_fifosize) { *p++ = xmit->buf[xmit->tail]; - xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); - port->icount.tx++; + uart_xmit_advance(port, 1); count++; if (xmit->head == xmit->tail) break; diff --git a/drivers/tty/serial/vt8500_serial.c b/drivers/tty/serial/vt8500_serial.c index 10fbdb09965f3f63aadccb2ad376728e793600f1..cc9157df732f4ddbfe2953765c25cd08f3a25b51 100644 --- a/drivers/tty/serial/vt8500_serial.c +++ b/drivers/tty/serial/vt8500_serial.c @@ -168,7 +168,7 @@ static void handle_rx(struct uart_port *port) c = readw(port->membase + VT8500_RXFIFO) & 0x3ff; - /* Mask conditions we're ignorning. */ + /* Mask conditions we're ignoring. */ c &= ~port->read_status_mask; if (c & FER) { @@ -196,33 +196,11 @@ static unsigned int vt8500_tx_empty(struct uart_port *port) static void handle_tx(struct uart_port *port) { - struct circ_buf *xmit = &port->state->xmit; + u8 ch; - if (port->x_char) { - writeb(port->x_char, port->membase + VT8500_TXFIFO); - port->icount.tx++; - port->x_char = 0; - } - if (uart_circ_empty(xmit) || uart_tx_stopped(port)) { - vt8500_stop_tx(port); - return; - } - - while (vt8500_tx_empty(port)) { - if (uart_circ_empty(xmit)) - break; - - writeb(xmit->buf[xmit->tail], port->membase + VT8500_TXFIFO); - - xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); - port->icount.tx++; - } - - if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) - uart_write_wakeup(port); - - if (uart_circ_empty(xmit)) - vt8500_stop_tx(port); + uart_port_tx(port, ch, + vt8500_tx_empty(port), + writeb(ch, port->membase + VT8500_TXFIFO)); } static void vt8500_start_tx(struct uart_port *port) diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c index 0cbd1892c53b7012fbce8db4fec7723ee9ccfd42..8e521c69a959889af89a863d762397781322dffe 100644 --- a/drivers/tty/serial/xilinx_uartps.c +++ b/drivers/tty/serial/xilinx_uartps.c @@ -326,9 +326,7 @@ static void cdns_uart_handle_tx(void *dev_id) !(readl(port->membase + CDNS_UART_SR) & CDNS_UART_SR_TXFULL)) { writel(xmit->buf[xmit->tail], port->membase + CDNS_UART_FIFO); - - port->icount.tx++; - xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); + uart_xmit_advance(port, 1); numbytes--; } diff --git a/drivers/tty/serial/zs.c b/drivers/tty/serial/zs.c index 688db7d8b7488bdb91886ebfb3e5ca6c95065c00..730c648e32ff353af577961ab8213daf1369ce2e 100644 --- a/drivers/tty/serial/zs.c +++ b/drivers/tty/serial/zs.c @@ -623,8 +623,7 @@ static void zs_raw_transmit_chars(struct zs_port *zport) /* Send char. */ write_zsdata(zport, xmit->buf[xmit->tail]); - xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); - zport->port.icount.tx++; + uart_xmit_advance(&zport->port, 1); if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) uart_write_wakeup(&zport->port); diff --git a/drivers/tty/synclink_gt.c b/drivers/tty/synclink_gt.c index 25e9befdda3a8f07ff4b13bf4e315bc132bc5f79..72b76cdde534d313c5a261079b5b176e02366d79 100644 --- a/drivers/tty/synclink_gt.c +++ b/drivers/tty/synclink_gt.c @@ -1433,16 +1433,8 @@ static int hdlcdev_open(struct net_device *dev) int rc; unsigned long flags; - if (!try_module_get(THIS_MODULE)) - return -EBUSY; - DBGINFO(("%s hdlcdev_open\n", dev->name)); - /* generic HDLC layer open processing */ - rc = hdlc_open(dev); - if (rc) - return rc; - /* arbitrate between network and tty opens */ spin_lock_irqsave(&info->netlock, flags); if (info->port.count != 0 || info->netcount != 0) { @@ -1461,6 +1453,16 @@ static int hdlcdev_open(struct net_device *dev) return rc; } + /* generic HDLC layer open processing */ + rc = hdlc_open(dev); + if (rc) { + shutdown(info); + spin_lock_irqsave(&info->netlock, flags); + info->netcount = 0; + spin_unlock_irqrestore(&info->netlock, flags); + return rc; + } + /* assert RTS and DTR, apply hardware settings */ info->signals |= SerialSignal_RTS | SerialSignal_DTR; program_hw(info); @@ -1506,7 +1508,6 @@ static int hdlcdev_close(struct net_device *dev) info->netcount=0; spin_unlock_irqrestore(&info->netlock, flags); - module_put(THIS_MODULE); return 0; } diff --git a/drivers/tty/tty.h b/drivers/tty/tty.h index 1c08c9b67b16ca3f9990a8fd8b2cdefa619b3786..f45cd683c02ea86d18ff1a2d39c6296acf3437b8 100644 --- a/drivers/tty/tty.h +++ b/drivers/tty/tty.h @@ -93,7 +93,7 @@ void tty_ldisc_release(struct tty_struct *tty); int __must_check tty_ldisc_init(struct tty_struct *tty); void tty_ldisc_deinit(struct tty_struct *tty); -void tty_sysctl_init(void); +extern int tty_ldisc_autoload; /* tty_audit.c */ #ifdef CONFIG_AUDIT diff --git a/drivers/tty/tty_buffer.c b/drivers/tty/tty_buffer.c index 5e287dedce0185e5221e0d6b8a71adad32ca39b7..2df86ed9057426e545e7524f0ab2830d6ba74baa 100644 --- a/drivers/tty/tty_buffer.c +++ b/drivers/tty/tty_buffer.c @@ -21,7 +21,7 @@ #include "tty.h" #define MIN_TTYB_SIZE 256 -#define TTYB_ALIGN_MASK 255 +#define TTYB_ALIGN_MASK 0xff /* * Byte threshold to limit memory consumption for flip buffers. @@ -37,7 +37,7 @@ * logic this must match. */ -#define TTY_BUFFER_PAGE (((PAGE_SIZE - sizeof(struct tty_buffer)) / 2) & ~0xFF) +#define TTY_BUFFER_PAGE (((PAGE_SIZE - sizeof(struct tty_buffer)) / 2) & ~TTYB_ALIGN_MASK) /** * tty_buffer_lock_exclusive - gain exclusive access to buffer @@ -107,7 +107,7 @@ static void tty_buffer_reset(struct tty_buffer *p, size_t size) p->commit = 0; p->lookahead = 0; p->read = 0; - p->flags = 0; + p->flags = true; } /** @@ -249,7 +249,7 @@ void tty_buffer_flush(struct tty_struct *tty, struct tty_ldisc *ld) * __tty_buffer_request_room - grow tty buffer if needed * @port: tty port * @size: size desired - * @flags: buffer flags if new buffer allocated (default = 0) + * @flags: buffer has to store flags along character data * * Make at least @size bytes of linear space available for the tty buffer. * @@ -260,19 +260,19 @@ void tty_buffer_flush(struct tty_struct *tty, struct tty_ldisc *ld) * Returns: the size we managed to find. */ static int __tty_buffer_request_room(struct tty_port *port, size_t size, - int flags) + bool flags) { struct tty_bufhead *buf = &port->buf; struct tty_buffer *b, *n; int left, change; b = buf->tail; - if (b->flags & TTYB_NORMAL) + if (!b->flags) left = 2 * b->size - b->used; else left = b->size - b->used; - change = (b->flags & TTYB_NORMAL) && (~flags & TTYB_NORMAL); + change = !b->flags && flags; if (change || left < size) { /* This is the slow path - looking for new buffers to use */ n = tty_buffer_alloc(port, size); @@ -300,7 +300,7 @@ static int __tty_buffer_request_room(struct tty_port *port, size_t size, int tty_buffer_request_room(struct tty_port *port, size_t size) { - return __tty_buffer_request_room(port, size, 0); + return __tty_buffer_request_room(port, size, true); } EXPORT_SYMBOL_GPL(tty_buffer_request_room); @@ -320,17 +320,17 @@ int tty_insert_flip_string_fixed_flag(struct tty_port *port, const unsigned char *chars, char flag, size_t size) { int copied = 0; + bool flags = flag != TTY_NORMAL; do { int goal = min_t(size_t, size - copied, TTY_BUFFER_PAGE); - int flags = (flag == TTY_NORMAL) ? TTYB_NORMAL : 0; int space = __tty_buffer_request_room(port, goal, flags); struct tty_buffer *tb = port->buf.tail; if (unlikely(space == 0)) break; memcpy(char_buf_ptr(tb, tb->used), chars, space); - if (~tb->flags & TTYB_NORMAL) + if (tb->flags) memset(flag_buf_ptr(tb, tb->used), flag, space); tb->used += space; copied += space; @@ -393,13 +393,13 @@ EXPORT_SYMBOL(tty_insert_flip_string_flags); int __tty_insert_flip_char(struct tty_port *port, unsigned char ch, char flag) { struct tty_buffer *tb; - int flags = (flag == TTY_NORMAL) ? TTYB_NORMAL : 0; + bool flags = flag != TTY_NORMAL; if (!__tty_buffer_request_room(port, 1, flags)) return 0; tb = port->buf.tail; - if (~tb->flags & TTYB_NORMAL) + if (tb->flags) *flag_buf_ptr(tb, tb->used) = flag; *char_buf_ptr(tb, tb->used++) = ch; @@ -424,13 +424,13 @@ EXPORT_SYMBOL(__tty_insert_flip_char); int tty_prepare_flip_string(struct tty_port *port, unsigned char **chars, size_t size) { - int space = __tty_buffer_request_room(port, size, TTYB_NORMAL); + int space = __tty_buffer_request_room(port, size, false); if (likely(space)) { struct tty_buffer *tb = port->buf.tail; *chars = char_buf_ptr(tb, tb->used); - if (~tb->flags & TTYB_NORMAL) + if (tb->flags) memset(flag_buf_ptr(tb, tb->used), TTY_NORMAL, space); tb->used += space; } @@ -492,7 +492,7 @@ static void lookahead_bufs(struct tty_port *port, struct tty_buffer *head) unsigned char *p, *f = NULL; p = char_buf_ptr(head, head->lookahead); - if (~head->flags & TTYB_NORMAL) + if (head->flags) f = flag_buf_ptr(head, head->lookahead); port->client_ops->lookahead_buf(port, p, f, count); @@ -509,7 +509,7 @@ receive_buf(struct tty_port *port, struct tty_buffer *head, int count) const char *f = NULL; int n; - if (~head->flags & TTYB_NORMAL) + if (head->flags) f = flag_buf_ptr(head, head->read); n = port->client_ops->receive_buf(port, p, f, count); diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c index cafdff5757160c876d4e9cb78015f0df2be843d0..3149114bf130e8d30c55a3e9789824eb57bf78f3 100644 --- a/drivers/tty/tty_io.c +++ b/drivers/tty/tty_io.c @@ -2255,6 +2255,7 @@ static int tty_fasync(int fd, struct file *filp, int on) return retval; } +static bool tty_legacy_tiocsti __read_mostly = IS_ENABLED(CONFIG_LEGACY_TIOCSTI); /** * tiocsti - fake input character * @tty: tty to fake input into @@ -2273,6 +2274,9 @@ static int tiocsti(struct tty_struct *tty, char __user *p) char ch, mbz = 0; struct tty_ldisc *ld; + if (!tty_legacy_tiocsti) + return -EIO; + if ((current->signal->tty != tty) && !capable(CAP_SYS_ADMIN)) return -EPERM; if (get_user(ch, p)) @@ -3494,7 +3498,7 @@ void tty_default_fops(struct file_operations *fops) *fops = tty_fops; } -static char *tty_devnode(struct device *dev, umode_t *mode) +static char *tty_devnode(const struct device *dev, umode_t *mode) { if (!mode) return NULL; @@ -3588,13 +3592,51 @@ void console_sysfs_notify(void) sysfs_notify(&consdev->kobj, NULL, "active"); } +static struct ctl_table tty_table[] = { + { + .procname = "legacy_tiocsti", + .data = &tty_legacy_tiocsti, + .maxlen = sizeof(tty_legacy_tiocsti), + .mode = 0644, + .proc_handler = proc_dobool, + }, + { + .procname = "ldisc_autoload", + .data = &tty_ldisc_autoload, + .maxlen = sizeof(tty_ldisc_autoload), + .mode = 0644, + .proc_handler = proc_dointvec, + .extra1 = SYSCTL_ZERO, + .extra2 = SYSCTL_ONE, + }, + { } +}; + +static struct ctl_table tty_dir_table[] = { + { + .procname = "tty", + .mode = 0555, + .child = tty_table, + }, + { } +}; + +static struct ctl_table tty_root_table[] = { + { + .procname = "dev", + .mode = 0555, + .child = tty_dir_table, + }, + { } +}; + /* * Ok, now we can initialize the rest of the tty devices and can count * on memory allocations, interrupts etc.. */ int __init tty_init(void) { - tty_sysctl_init(); + register_sysctl_table(tty_root_table); cdev_init(&tty_cdev, &tty_fops); if (cdev_add(&tty_cdev, MKDEV(TTYAUX_MAJOR, 0), 1) || register_chrdev_region(MKDEV(TTYAUX_MAJOR, 0), 1, "/dev/tty") < 0) @@ -3616,4 +3658,3 @@ int __init tty_init(void) #endif return 0; } - diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c index 776d8a62f77cc8374ebea10901c7b988a8d71d02..e758f44729e7e54d00ea79461adeefab59f1efee 100644 --- a/drivers/tty/tty_ldisc.c +++ b/drivers/tty/tty_ldisc.c @@ -117,7 +117,7 @@ static void put_ldops(struct tty_ldisc_ops *ldops) raw_spin_unlock_irqrestore(&tty_ldiscs_lock, flags); } -static int tty_ldisc_autoload = IS_BUILTIN(CONFIG_LDISC_AUTOLOAD); +int tty_ldisc_autoload = IS_BUILTIN(CONFIG_LDISC_AUTOLOAD); /** * tty_ldisc_get - take a reference to an ldisc @@ -817,39 +817,3 @@ void tty_ldisc_deinit(struct tty_struct *tty) tty_ldisc_put(tty->ldisc); tty->ldisc = NULL; } - -static struct ctl_table tty_table[] = { - { - .procname = "ldisc_autoload", - .data = &tty_ldisc_autoload, - .maxlen = sizeof(tty_ldisc_autoload), - .mode = 0644, - .proc_handler = proc_dointvec, - .extra1 = SYSCTL_ZERO, - .extra2 = SYSCTL_ONE, - }, - { } -}; - -static struct ctl_table tty_dir_table[] = { - { - .procname = "tty", - .mode = 0555, - .child = tty_table, - }, - { } -}; - -static struct ctl_table tty_root_table[] = { - { - .procname = "dev", - .mode = 0555, - .child = tty_dir_table, - }, - { } -}; - -void tty_sysctl_init(void) -{ - register_sysctl_table(tty_root_table); -} diff --git a/drivers/ufs/core/ufs-sysfs.c b/drivers/ufs/core/ufs-sysfs.c index 53aea56d1de135bf39ef2d013c3fe52b85cbe9d1..883f0e44b54e76dac719a0373259b65fc8edeccb 100644 --- a/drivers/ufs/core/ufs-sysfs.c +++ b/drivers/ufs/core/ufs-sysfs.c @@ -1234,8 +1234,7 @@ static ssize_t _pname##_show(struct device *dev, \ struct scsi_device *sdev = to_scsi_device(dev); \ struct ufs_hba *hba = shost_priv(sdev->host); \ u8 lun = ufshcd_scsi_to_upiu_lun(sdev->lun); \ - if (!ufs_is_valid_unit_desc_lun(&hba->dev_info, lun, \ - _duname##_DESC_PARAM##_puname)) \ + if (!ufs_is_valid_unit_desc_lun(&hba->dev_info, lun)) \ return -EINVAL; \ return ufs_sysfs_read_desc_param(hba, QUERY_DESC_IDN_##_duname, \ lun, _duname##_DESC_PARAM##_puname, buf, _size); \ @@ -1286,9 +1285,27 @@ static struct attribute *ufs_sysfs_unit_descriptor[] = { NULL, }; +static umode_t ufs_unit_descriptor_is_visible(struct kobject *kobj, struct attribute *attr, int n) +{ + struct device *dev = container_of(kobj, struct device, kobj); + struct scsi_device *sdev = to_scsi_device(dev); + u8 lun = ufshcd_scsi_to_upiu_lun(sdev->lun); + umode_t mode = attr->mode; + + if (lun == UFS_UPIU_BOOT_WLUN || lun == UFS_UPIU_UFS_DEVICE_WLUN) + /* Boot and device WLUN have no unit descriptors */ + mode = 0; + if (lun == UFS_UPIU_RPMB_WLUN && attr == &dev_attr_wb_buf_alloc_units.attr) + mode = 0; + + return mode; +} + + const struct attribute_group ufs_sysfs_unit_descriptor_group = { .name = "unit_descriptor", .attrs = ufs_sysfs_unit_descriptor, + .is_visible = ufs_unit_descriptor_is_visible, }; static ssize_t dyn_cap_needed_attribute_show(struct device *dev, diff --git a/drivers/ufs/core/ufshcd-priv.h b/drivers/ufs/core/ufshcd-priv.h index f68ca33f6ac78c7ce2fdb656ed51421f7841e102..a9e8e1f5afe7aa8def71d1648904f55ef66d2a35 100644 --- a/drivers/ufs/core/ufshcd-priv.h +++ b/drivers/ufs/core/ufshcd-priv.h @@ -293,16 +293,12 @@ static inline int ufshcd_rpm_put(struct ufs_hba *hba) * @lun: LU number to check * @return: true if the lun has a matching unit descriptor, false otherwise */ -static inline bool ufs_is_valid_unit_desc_lun(struct ufs_dev_info *dev_info, - u8 lun, u8 param_offset) +static inline bool ufs_is_valid_unit_desc_lun(struct ufs_dev_info *dev_info, u8 lun) { if (!dev_info || !dev_info->max_lu_supported) { pr_err("Max General LU supported by UFS isn't initialized\n"); return false; } - /* WB is available only for the logical unit from 0 to 7 */ - if (param_offset == UNIT_DESC_PARAM_WB_BUF_ALLOC_UNITS) - return lun < UFS_UPIU_MAX_WB_LUN_ID; return lun == UFS_UPIU_RPMB_WLUN || (lun < dev_info->max_lu_supported); } diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c index d2b11d5b91ce456dd80324e8c390ba2ff324aeeb..e18c9f4463ec592a70b95fa7e788e33286a86caf 100644 --- a/drivers/ufs/core/ufshcd.c +++ b/drivers/ufs/core/ufshcd.c @@ -486,6 +486,9 @@ static void ufshcd_print_evt_hist(struct ufs_hba *hba) ufshcd_print_evt(hba, UFS_EVT_RESUME_ERR, "resume_fail"); ufshcd_print_evt(hba, UFS_EVT_SUSPEND_ERR, "suspend_fail"); + ufshcd_print_evt(hba, UFS_EVT_WL_RES_ERR, "wlun resume_fail"); + ufshcd_print_evt(hba, UFS_EVT_WL_SUSP_ERR, + "wlun suspend_fail"); ufshcd_print_evt(hba, UFS_EVT_DEV_RESET, "dev_reset"); ufshcd_print_evt(hba, UFS_EVT_HOST_RESET, "host_reset"); ufshcd_print_evt(hba, UFS_EVT_ABORT, "task_abort"); @@ -2013,7 +2016,6 @@ static void ufshcd_exit_clk_gating(struct ufs_hba *hba) destroy_workqueue(hba->clk_gating.clk_gating_workq); } -/* Must be called with host lock acquired */ static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba) { bool queue_resume_work = false; @@ -3606,7 +3608,7 @@ static inline int ufshcd_read_unit_desc_param(struct ufs_hba *hba, * Unit descriptors are only available for general purpose LUs (LUN id * from 0 to 7) and RPMB Well known LU. */ - if (!ufs_is_valid_unit_desc_lun(&hba->dev_info, lun, param_offset)) + if (!ufs_is_valid_unit_desc_lun(&hba->dev_info, lun)) return -EOPNOTSUPP; return ufshcd_read_desc_param(hba, QUERY_DESC_IDN_UNIT, lun, @@ -4478,7 +4480,7 @@ static int ufshcd_complete_dev_init(struct ufs_hba *hba) QUERY_FLAG_IDN_FDEVICEINIT, 0, NULL); if (err) { dev_err(hba->dev, - "%s setting fDeviceInit flag failed with error %d\n", + "%s: setting fDeviceInit flag failed with error %d\n", __func__, err); goto out; } @@ -4495,11 +4497,11 @@ static int ufshcd_complete_dev_init(struct ufs_hba *hba) if (err) { dev_err(hba->dev, - "%s reading fDeviceInit flag failed with error %d\n", + "%s: reading fDeviceInit flag failed with error %d\n", __func__, err); } else if (flag_res) { dev_err(hba->dev, - "%s fDeviceInit was not cleared by the device\n", + "%s: fDeviceInit was not cleared by the device\n", __func__); err = -EBUSY; } @@ -4666,14 +4668,18 @@ int ufshcd_hba_enable(struct ufs_hba *hba) /* enable UIC related interrupts */ ufshcd_enable_intr(hba, UFSHCD_UIC_MASK); ret = ufshcd_dme_reset(hba); - if (!ret) { - ret = ufshcd_dme_enable(hba); - if (!ret) - ufshcd_vops_hce_enable_notify(hba, POST_CHANGE); - if (ret) - dev_err(hba->dev, - "Host controller enable failed with non-hce\n"); + if (ret) { + dev_err(hba->dev, "DME_RESET failed\n"); + return ret; + } + + ret = ufshcd_dme_enable(hba); + if (ret) { + dev_err(hba->dev, "Enabling DME failed\n"); + return ret; } + + ufshcd_vops_hce_enable_notify(hba, POST_CHANGE); } else { ret = ufshcd_hba_execute_hce(hba); } @@ -4859,100 +4865,6 @@ static int ufshcd_verify_dev_init(struct ufs_hba *hba) return err; } -/** - * ufshcd_set_queue_depth - set lun queue depth - * @sdev: pointer to SCSI device - * - * Read bLUQueueDepth value and activate scsi tagged command - * queueing. For WLUN, queue depth is set to 1. For best-effort - * cases (bLUQueueDepth = 0) the queue depth is set to a maximum - * value that host can queue. - */ -static void ufshcd_set_queue_depth(struct scsi_device *sdev) -{ - int ret = 0; - u8 lun_qdepth; - struct ufs_hba *hba; - - hba = shost_priv(sdev->host); - - lun_qdepth = hba->nutrs; - ret = ufshcd_read_unit_desc_param(hba, - ufshcd_scsi_to_upiu_lun(sdev->lun), - UNIT_DESC_PARAM_LU_Q_DEPTH, - &lun_qdepth, - sizeof(lun_qdepth)); - - /* Some WLUN doesn't support unit descriptor */ - if (ret == -EOPNOTSUPP) - lun_qdepth = 1; - else if (!lun_qdepth) - /* eventually, we can figure out the real queue depth */ - lun_qdepth = hba->nutrs; - else - lun_qdepth = min_t(int, lun_qdepth, hba->nutrs); - - dev_dbg(hba->dev, "%s: activate tcq with queue depth %d\n", - __func__, lun_qdepth); - scsi_change_queue_depth(sdev, lun_qdepth); -} - -/* - * ufshcd_get_lu_wp - returns the "b_lu_write_protect" from UNIT DESCRIPTOR - * @hba: per-adapter instance - * @lun: UFS device lun id - * @b_lu_write_protect: pointer to buffer to hold the LU's write protect info - * - * Returns 0 in case of success and b_lu_write_protect status would be returned - * @b_lu_write_protect parameter. - * Returns -ENOTSUPP if reading b_lu_write_protect is not supported. - * Returns -EINVAL in case of invalid parameters passed to this function. - */ -static int ufshcd_get_lu_wp(struct ufs_hba *hba, - u8 lun, - u8 *b_lu_write_protect) -{ - int ret; - - if (!b_lu_write_protect) - ret = -EINVAL; - /* - * According to UFS device spec, RPMB LU can't be write - * protected so skip reading bLUWriteProtect parameter for - * it. For other W-LUs, UNIT DESCRIPTOR is not available. - */ - else if (lun >= hba->dev_info.max_lu_supported) - ret = -ENOTSUPP; - else - ret = ufshcd_read_unit_desc_param(hba, - lun, - UNIT_DESC_PARAM_LU_WR_PROTECT, - b_lu_write_protect, - sizeof(*b_lu_write_protect)); - return ret; -} - -/** - * ufshcd_get_lu_power_on_wp_status - get LU's power on write protect - * status - * @hba: per-adapter instance - * @sdev: pointer to SCSI device - * - */ -static inline void ufshcd_get_lu_power_on_wp_status(struct ufs_hba *hba, - const struct scsi_device *sdev) -{ - if (hba->dev_info.f_power_on_wp_en && - !hba->dev_info.is_lu_power_on_wp) { - u8 b_lu_write_protect; - - if (!ufshcd_get_lu_wp(hba, ufshcd_scsi_to_upiu_lun(sdev->lun), - &b_lu_write_protect) && - (b_lu_write_protect == UFS_LU_POWER_ON_WP)) - hba->dev_info.is_lu_power_on_wp = true; - } -} - /** * ufshcd_setup_links - associate link b/w device wlun and other luns * @sdev: pointer to SCSI device @@ -4990,6 +4902,58 @@ static void ufshcd_setup_links(struct ufs_hba *hba, struct scsi_device *sdev) } } +/** + * ufshcd_lu_init - Initialize the relevant parameters of the LU + * @hba: per-adapter instance + * @sdev: pointer to SCSI device + */ +static void ufshcd_lu_init(struct ufs_hba *hba, struct scsi_device *sdev) +{ + int len = hba->desc_size[QUERY_DESC_IDN_UNIT]; + u8 lun = ufshcd_scsi_to_upiu_lun(sdev->lun); + u8 lun_qdepth = hba->nutrs; + u8 *desc_buf; + int ret; + + desc_buf = kzalloc(len, GFP_KERNEL); + if (!desc_buf) + goto set_qdepth; + + ret = ufshcd_read_unit_desc_param(hba, lun, 0, desc_buf, len); + if (ret < 0) { + if (ret == -EOPNOTSUPP) + /* If LU doesn't support unit descriptor, its queue depth is set to 1 */ + lun_qdepth = 1; + kfree(desc_buf); + goto set_qdepth; + } + + if (desc_buf[UNIT_DESC_PARAM_LU_Q_DEPTH]) { + /* + * In per-LU queueing architecture, bLUQueueDepth will not be 0, then we will + * use the smaller between UFSHCI CAP.NUTRS and UFS LU bLUQueueDepth + */ + lun_qdepth = min_t(int, desc_buf[UNIT_DESC_PARAM_LU_Q_DEPTH], hba->nutrs); + } + /* + * According to UFS device specification, the write protection mode is only supported by + * normal LU, not supported by WLUN. + */ + if (hba->dev_info.f_power_on_wp_en && lun < hba->dev_info.max_lu_supported && + !hba->dev_info.is_lu_power_on_wp && + desc_buf[UNIT_DESC_PARAM_LU_WR_PROTECT] == UFS_LU_POWER_ON_WP) + hba->dev_info.is_lu_power_on_wp = true; + + kfree(desc_buf); +set_qdepth: + /* + * For WLUNs that don't support unit descriptor, queue depth is set to 1. For LUs whose + * bLUQueueDepth == 0, the queue depth is set to a maximum value that host can queue. + */ + dev_dbg(hba->dev, "Set LU %x queue depth %d\n", lun, lun_qdepth); + scsi_change_queue_depth(sdev, lun_qdepth); +} + /** * ufshcd_slave_alloc - handle initial SCSI device configurations * @sdev: pointer to SCSI device @@ -5017,9 +4981,7 @@ static int ufshcd_slave_alloc(struct scsi_device *sdev) /* WRITE_SAME command is not supported */ sdev->no_write_same = 1; - ufshcd_set_queue_depth(sdev); - - ufshcd_get_lu_power_on_wp_status(hba, sdev); + ufshcd_lu_init(hba, sdev); ufshcd_setup_links(hba, sdev); @@ -5382,6 +5344,26 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba, } } +/* Any value that is not an existing queue number is fine for this constant. */ +enum { + UFSHCD_POLL_FROM_INTERRUPT_CONTEXT = -1 +}; + +static void ufshcd_clear_polled(struct ufs_hba *hba, + unsigned long *completed_reqs) +{ + int tag; + + for_each_set_bit(tag, completed_reqs, hba->nutrs) { + struct scsi_cmnd *cmd = hba->lrb[tag].cmd; + + if (!cmd) + continue; + if (scsi_cmd_to_rq(cmd)->cmd_flags & REQ_POLLED) + __clear_bit(tag, completed_reqs); + } +} + /* * Returns > 0 if one or more commands have been completed or 0 if no * requests have been completed. @@ -5398,13 +5380,17 @@ static int ufshcd_poll(struct Scsi_Host *shost, unsigned int queue_num) WARN_ONCE(completed_reqs & ~hba->outstanding_reqs, "completed: %#lx; outstanding: %#lx\n", completed_reqs, hba->outstanding_reqs); + if (queue_num == UFSHCD_POLL_FROM_INTERRUPT_CONTEXT) { + /* Do not complete polled requests from interrupt context. */ + ufshcd_clear_polled(hba, &completed_reqs); + } hba->outstanding_reqs &= ~completed_reqs; spin_unlock_irqrestore(&hba->outstanding_lock, flags); if (completed_reqs) __ufshcd_transfer_req_compl(hba, completed_reqs); - return completed_reqs; + return completed_reqs != 0; } /** @@ -5435,7 +5421,7 @@ static irqreturn_t ufshcd_transfer_req_compl(struct ufs_hba *hba) * Ignore the ufshcd_poll() return value and return IRQ_HANDLED since we * do not want polling to trigger spurious interrupt complaints. */ - ufshcd_poll(hba->host, 0); + ufshcd_poll(hba->host, UFSHCD_POLL_FROM_INTERRUPT_CONTEXT); return IRQ_HANDLED; } @@ -6199,6 +6185,38 @@ static bool ufshcd_is_pwr_mode_restore_needed(struct ufs_hba *hba) return false; } +static bool ufshcd_abort_all(struct ufs_hba *hba) +{ + bool needs_reset = false; + int tag, ret; + + /* Clear pending transfer requests */ + for_each_set_bit(tag, &hba->outstanding_reqs, hba->nutrs) { + ret = ufshcd_try_to_abort_task(hba, tag); + dev_err(hba->dev, "Aborting tag %d / CDB %#02x %s\n", tag, + hba->lrb[tag].cmd ? hba->lrb[tag].cmd->cmnd[0] : -1, + ret ? "failed" : "succeeded"); + if (ret) { + needs_reset = true; + goto out; + } + } + + /* Clear pending task management requests */ + for_each_set_bit(tag, &hba->outstanding_tasks, hba->nutmrs) { + if (ufshcd_clear_tm_cmd(hba, tag)) { + needs_reset = true; + goto out; + } + } + +out: + /* Complete the requests that are cleared by s/w */ + ufshcd_complete_requests(hba); + + return needs_reset; +} + /** * ufshcd_err_handler - handle UFS errors that require s/w attention * @work: pointer to work structure @@ -6210,10 +6228,7 @@ static void ufshcd_err_handler(struct work_struct *work) unsigned long flags; bool needs_restore; bool needs_reset; - bool err_xfer; - bool err_tm; int pmc_err; - int tag; hba = container_of(work, struct ufs_hba, eh_work); @@ -6242,8 +6257,6 @@ static void ufshcd_err_handler(struct work_struct *work) again: needs_restore = false; needs_reset = false; - err_xfer = false; - err_tm = false; if (hba->ufshcd_state != UFSHCD_STATE_ERROR) hba->ufshcd_state = UFSHCD_STATE_RESET; @@ -6312,34 +6325,13 @@ again: hba->silence_err_logs = true; /* release lock as clear command might sleep */ spin_unlock_irqrestore(hba->host->host_lock, flags); - /* Clear pending transfer requests */ - for_each_set_bit(tag, &hba->outstanding_reqs, hba->nutrs) { - if (ufshcd_try_to_abort_task(hba, tag)) { - err_xfer = true; - goto lock_skip_pending_xfer_clear; - } - dev_err(hba->dev, "Aborted tag %d / CDB %#02x\n", tag, - hba->lrb[tag].cmd ? hba->lrb[tag].cmd->cmnd[0] : -1); - } - - /* Clear pending task management requests */ - for_each_set_bit(tag, &hba->outstanding_tasks, hba->nutmrs) { - if (ufshcd_clear_tm_cmd(hba, tag)) { - err_tm = true; - goto lock_skip_pending_xfer_clear; - } - } -lock_skip_pending_xfer_clear: - /* Complete the requests that are cleared by s/w */ - ufshcd_complete_requests(hba); + needs_reset = ufshcd_abort_all(hba); spin_lock_irqsave(hba->host->host_lock, flags); hba->silence_err_logs = false; - if (err_xfer || err_tm) { - needs_reset = true; + if (needs_reset) goto do_reset; - } /* * After all reqs and tasks are cleared from doorbell, @@ -8293,6 +8285,28 @@ out: } } +static enum scsi_timeout_action ufshcd_eh_timed_out(struct scsi_cmnd *scmd) +{ + struct ufs_hba *hba = shost_priv(scmd->device->host); + + if (!hba->system_suspending) { + /* Activate the error handler in the SCSI core. */ + return SCSI_EH_NOT_HANDLED; + } + + /* + * If we get here we know that no TMFs are outstanding and also that + * the only pending command is a START STOP UNIT command. Handle the + * timeout of that command directly to prevent a deadlock between + * ufshcd_set_dev_pwr_mode() and ufshcd_err_handler(). + */ + ufshcd_link_recovery(hba); + dev_info(hba->dev, "%s() finished; outstanding_tasks = %#lx.\n", + __func__, hba->outstanding_tasks); + + return hba->outstanding_reqs ? SCSI_EH_RESET_TIMER : SCSI_EH_DONE; +} + static const struct attribute_group *ufshcd_driver_groups[] = { &ufs_sysfs_unit_descriptor_group, &ufs_sysfs_lun_attributes_group, @@ -8327,6 +8341,7 @@ static struct scsi_host_template ufshcd_driver_template = { .eh_abort_handler = ufshcd_abort, .eh_device_reset_handler = ufshcd_eh_device_reset_handler, .eh_host_reset_handler = ufshcd_eh_host_reset_handler, + .eh_timed_out = ufshcd_eh_timed_out, .this_id = -1, .sg_tablesize = SG_ALL, .cmd_per_lun = UFSHCD_CMD_PER_LUN, @@ -8730,6 +8745,40 @@ static void ufshcd_hba_exit(struct ufs_hba *hba) } } +static int ufshcd_execute_start_stop(struct scsi_device *sdev, + enum ufs_dev_pwr_mode pwr_mode, + struct scsi_sense_hdr *sshdr) +{ + unsigned char cdb[6] = { START_STOP, 0, 0, 0, pwr_mode << 4, 0 }; + struct request *req; + struct scsi_cmnd *scmd; + int ret; + + req = scsi_alloc_request(sdev->request_queue, REQ_OP_DRV_IN, + BLK_MQ_REQ_PM); + if (IS_ERR(req)) + return PTR_ERR(req); + + scmd = blk_mq_rq_to_pdu(req); + scmd->cmd_len = COMMAND_SIZE(cdb[0]); + memcpy(scmd->cmnd, cdb, scmd->cmd_len); + scmd->allowed = 0/*retries*/; + scmd->flags |= SCMD_FAIL_IF_RECOVERING; + req->timeout = 1 * HZ; + req->rq_flags |= RQF_PM | RQF_QUIET; + + blk_execute_rq(req, /*at_head=*/true); + + if (sshdr) + scsi_normalize_sense(scmd->sense_buffer, scmd->sense_len, + sshdr); + ret = scmd->result; + + blk_mq_free_request(req); + + return ret; +} + /** * ufshcd_set_dev_pwr_mode - sends START STOP UNIT command to set device * power mode @@ -8742,25 +8791,17 @@ static void ufshcd_hba_exit(struct ufs_hba *hba) static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba, enum ufs_dev_pwr_mode pwr_mode) { - unsigned char cmd[6] = { START_STOP }; struct scsi_sense_hdr sshdr; struct scsi_device *sdp; unsigned long flags; int ret, retries; - unsigned long deadline; - int32_t remaining; spin_lock_irqsave(hba->host->host_lock, flags); sdp = hba->ufs_device_wlun; - if (sdp) { + if (sdp && scsi_device_online(sdp)) ret = scsi_device_get(sdp); - if (!ret && !scsi_device_online(sdp)) { - ret = -ENODEV; - scsi_device_put(sdp); - } - } else { + else ret = -ENODEV; - } spin_unlock_irqrestore(hba->host->host_lock, flags); if (ret) @@ -8774,24 +8815,18 @@ static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba, */ hba->host->eh_noresume = 1; - cmd[4] = pwr_mode << 4; - /* * Current function would be generally called from the power management * callbacks hence set the RQF_PM flag so that it doesn't resume the * already suspended childs. */ - deadline = jiffies + 10 * HZ; for (retries = 3; retries > 0; --retries) { - ret = -ETIMEDOUT; - remaining = deadline - jiffies; - if (remaining <= 0) - break; - ret = scsi_execute(sdp, cmd, DMA_NONE, NULL, 0, NULL, &sshdr, - remaining / HZ, 0, 0, RQF_PM, NULL); - if (!scsi_status_is_check_condition(ret) || - !scsi_sense_valid(&sshdr) || - sshdr.sense_key != UNIT_ATTENTION) + ret = ufshcd_execute_start_stop(sdp, pwr_mode, &sshdr); + /* + * scsi_execute() only returns a negative value if the request + * queue is dying. + */ + if (ret <= 0) break; } if (ret) { @@ -8803,10 +8838,9 @@ static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba, scsi_print_sense_hdr(sdp, NULL, &sshdr); ret = -EIO; } - } - - if (!ret) + } else { hba->curr_dev_pwr_mode = pwr_mode; + } scsi_device_put(sdp); hba->host->eh_noresume = 0; @@ -8815,7 +8849,7 @@ static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba, static int ufshcd_link_state_transition(struct ufs_hba *hba, enum uic_link_state req_link_state, - int check_for_bkops) + bool check_for_bkops) { int ret = 0; @@ -8966,7 +9000,7 @@ static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba) static int __ufshcd_wl_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op) { int ret = 0; - int check_for_bkops; + bool check_for_bkops; enum ufs_pm_level pm_lvl; enum ufs_dev_pwr_mode req_dev_pwr_mode; enum uic_link_state req_link_state; @@ -9259,6 +9293,7 @@ static int ufshcd_wl_suspend(struct device *dev) hba = shost_priv(sdev->host); down(&hba->host_sem); + hba->system_suspending = true; if (pm_runtime_suspended(dev)) goto out; @@ -9300,6 +9335,7 @@ out: hba->curr_dev_pwr_mode, hba->uic_link_state); if (!ret) hba->is_sys_suspended = false; + hba->system_suspending = false; up(&hba->host_sem); return ret; } diff --git a/drivers/ufs/core/ufshpb.c b/drivers/ufs/core/ufshpb.c index b7f412d0f3019ffdd2d9143fd8dbe1f615ec9917..994f4ac9df5a51bada1444877eee7c9983db2ca8 100644 --- a/drivers/ufs/core/ufshpb.c +++ b/drivers/ufs/core/ufshpb.c @@ -233,11 +233,6 @@ next_srgn: rgn = hpb->rgn_tbl + rgn_idx; srgn = rgn->srgn_tbl + srgn_idx; - if (likely(!srgn->is_last)) - bitmap_len = hpb->entries_per_srgn; - else - bitmap_len = hpb->last_srgn_entries; - if (!ufshpb_is_valid_srgn(rgn, srgn)) return true; @@ -253,6 +248,11 @@ next_srgn: return true; } + if (likely(!srgn->is_last)) + bitmap_len = hpb->entries_per_srgn; + else + bitmap_len = hpb->last_srgn_entries; + if ((srgn_offset + cnt) > bitmap_len) bit_len = bitmap_len - srgn_offset; else @@ -2289,7 +2289,7 @@ static bool ufshpb_check_hpb_reset_query(struct ufs_hba *hba) /* wait for the device to complete HPB reset query */ for (try = 0; try < HPB_RESET_REQ_RETRIES; try++) { dev_dbg(hba->dev, - "%s start flag reset polling %d times\n", + "%s: start flag reset polling %d times\n", __func__, try); /* Poll fHpbReset flag to be cleared */ @@ -2298,7 +2298,7 @@ static bool ufshpb_check_hpb_reset_query(struct ufs_hba *hba) if (err) { dev_err(hba->dev, - "%s reading fHpbReset flag failed with error %d\n", + "%s: reading fHpbReset flag failed with error %d\n", __func__, err); return flag_res; } @@ -2310,7 +2310,7 @@ static bool ufshpb_check_hpb_reset_query(struct ufs_hba *hba) } if (flag_res) { dev_err(hba->dev, - "%s fHpbReset was not cleared by the device\n", + "%s: fHpbReset was not cleared by the device\n", __func__); } out: diff --git a/drivers/ufs/host/ufs-mediatek.c b/drivers/ufs/host/ufs-mediatek.c index 7309f3f87eacf1982d60fb8721aad2573bddb694..21d9b047539fbf1eac38884d3ef9fdcba4c41adb 100644 --- a/drivers/ufs/host/ufs-mediatek.c +++ b/drivers/ufs/host/ufs-mediatek.c @@ -109,7 +109,7 @@ static bool ufs_mtk_is_pmc_via_fastauto(struct ufs_hba *hba) { struct ufs_mtk_host *host = ufshcd_get_variant(hba); - return (host->caps & UFS_MTK_CAP_PMC_VIA_FASTAUTO); + return !!(host->caps & UFS_MTK_CAP_PMC_VIA_FASTAUTO); } static void ufs_mtk_cfg_unipro_cg(struct ufs_hba *hba, bool enable) @@ -441,8 +441,6 @@ static int ufs_mtk_mphy_power_on(struct ufs_hba *hba, bool on) if (ufs_mtk_is_va09_supported(hba)) { ufs_mtk_va09_pwr_ctrl(res, 0); ret = regulator_disable(host->reg_va09); - if (ret < 0) - goto out; } } out: @@ -1097,7 +1095,7 @@ static void ufs_mtk_setup_clk_gating(struct ufs_hba *hba) } } -static int ufs_mtk_post_link(struct ufs_hba *hba) +static void ufs_mtk_post_link(struct ufs_hba *hba) { /* enable unipro clock gating feature */ ufs_mtk_cfg_unipro_cg(hba, true); @@ -1108,8 +1106,6 @@ static int ufs_mtk_post_link(struct ufs_hba *hba) FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, 3); ufs_mtk_setup_clk_gating(hba); - - return 0; } static int ufs_mtk_link_startup_notify(struct ufs_hba *hba, @@ -1122,7 +1118,7 @@ static int ufs_mtk_link_startup_notify(struct ufs_hba *hba, ret = ufs_mtk_pre_link(hba); break; case POST_CHANGE: - ret = ufs_mtk_post_link(hba); + ufs_mtk_post_link(hba); break; default: ret = -EINVAL; @@ -1274,9 +1270,8 @@ static int ufs_mtk_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op, struct arm_smccc_res res; if (status == PRE_CHANGE) { - if (!ufshcd_is_auto_hibern8_supported(hba)) - return 0; - ufs_mtk_auto_hibern8_disable(hba); + if (ufshcd_is_auto_hibern8_supported(hba)) + ufs_mtk_auto_hibern8_disable(hba); return 0; } diff --git a/drivers/uio/uio_dmem_genirq.c b/drivers/uio/uio_dmem_genirq.c index 1106f3376404779bbe5598ee40022814f5b9c861..5313307c2754a8c66ee013687acd7e4bf893aed0 100644 --- a/drivers/uio/uio_dmem_genirq.c +++ b/drivers/uio/uio_dmem_genirq.c @@ -41,6 +41,11 @@ struct uio_dmem_genirq_platdata { unsigned int refcnt; }; +/* Bits in uio_dmem_genirq_platdata.flags */ +enum { + UIO_IRQ_DISABLED = 0, +}; + static int uio_dmem_genirq_open(struct uio_info *info, struct inode *inode) { struct uio_dmem_genirq_platdata *priv = info->priv; @@ -110,8 +115,10 @@ static irqreturn_t uio_dmem_genirq_handler(int irq, struct uio_info *dev_info) * remember the state so we can allow user space to enable it later. */ - if (!test_and_set_bit(0, &priv->flags)) + spin_lock(&priv->lock); + if (!__test_and_set_bit(UIO_IRQ_DISABLED, &priv->flags)) disable_irq_nosync(irq); + spin_unlock(&priv->lock); return IRQ_HANDLED; } @@ -125,20 +132,19 @@ static int uio_dmem_genirq_irqcontrol(struct uio_info *dev_info, s32 irq_on) * in the interrupt controller, but keep track of the * state to prevent per-irq depth damage. * - * Serialize this operation to support multiple tasks. + * Serialize this operation to support multiple tasks and concurrency + * with irq handler on SMP systems. */ spin_lock_irqsave(&priv->lock, flags); if (irq_on) { - if (test_and_clear_bit(0, &priv->flags)) + if (__test_and_clear_bit(UIO_IRQ_DISABLED, &priv->flags)) enable_irq(dev_info->irq); - spin_unlock_irqrestore(&priv->lock, flags); } else { - if (!test_and_set_bit(0, &priv->flags)) { - spin_unlock_irqrestore(&priv->lock, flags); - disable_irq(dev_info->irq); - } + if (!__test_and_set_bit(UIO_IRQ_DISABLED, &priv->flags)) + disable_irq_nosync(dev_info->irq); } + spin_unlock_irqrestore(&priv->lock, flags); return 0; } diff --git a/drivers/uio/uio_fsl_elbc_gpcm.c b/drivers/uio/uio_fsl_elbc_gpcm.c index 7d8eb9dc20681938354c4e320a7600f6cb06dbb2..82dda799f327d8ba7cd13a08e6b1669a9f6d4b43 100644 --- a/drivers/uio/uio_fsl_elbc_gpcm.c +++ b/drivers/uio/uio_fsl_elbc_gpcm.c @@ -390,13 +390,13 @@ static int uio_fsl_elbc_gpcm_probe(struct platform_device *pdev) info->priv = priv; info->name = uio_name; info->version = "0.0.1"; - if (irq != NO_IRQ) { + if (irq) { if (priv->irq_handler) { info->irq = irq; info->irq_flags = IRQF_SHARED; info->handler = priv->irq_handler; } else { - irq = NO_IRQ; + irq = 0; dev_warn(priv->dev, "ignoring irq, no handler\n"); } } @@ -417,7 +417,7 @@ static int uio_fsl_elbc_gpcm_probe(struct platform_device *pdev) dev_info(priv->dev, "eLBC/GPCM device (%s) at 0x%llx, bank %d, irq=%d\n", priv->name, (unsigned long long)res.start, priv->bank, - irq != NO_IRQ ? irq : -1); + irq ? : -1); return 0; out_err2: diff --git a/drivers/usb/Kconfig b/drivers/usb/Kconfig index 578a439e71b579f218b3df18f8f3a0c1f14a416a..a871a988829dbf0e91e3e1532163cf504b145810 100644 --- a/drivers/usb/Kconfig +++ b/drivers/usb/Kconfig @@ -111,8 +111,12 @@ source "drivers/usb/usbip/Kconfig" endif +comment "USB dual-mode controller drivers" + source "drivers/usb/cdns3/Kconfig" +source "drivers/usb/fotg210/Kconfig" + source "drivers/usb/mtu3/Kconfig" source "drivers/usb/musb/Kconfig" diff --git a/drivers/usb/Makefile b/drivers/usb/Makefile index 643edf5fe18c618ed9898bf70a5009e22fa6ab8b..a81e6ef293af27723c73b3b44571fd5b2998f1f8 100644 --- a/drivers/usb/Makefile +++ b/drivers/usb/Makefile @@ -17,6 +17,8 @@ obj-$(CONFIG_USB_CDNS_SUPPORT) += cdns3/ obj-$(CONFIG_USB_CDNS3) += cdns3/ obj-$(CONFIG_USB_CDNSP_PCI) += cdns3/ +obj-$(CONFIG_USB_FOTG210) += fotg210/ + obj-$(CONFIG_USB_MON) += mon/ obj-$(CONFIG_USB_MTU3) += mtu3/ diff --git a/drivers/usb/cdns3/cdnsp-pci.c b/drivers/usb/cdns3/cdnsp-pci.c index fe8a114c586cc784e347a2291b4f3c4369da65c6..efd54ed918b9701e6f765408f298391219ffca3e 100644 --- a/drivers/usb/cdns3/cdnsp-pci.c +++ b/drivers/usb/cdns3/cdnsp-pci.c @@ -192,14 +192,12 @@ static void cdnsp_pci_remove(struct pci_dev *pdev) if (pci_dev_run_wake(pdev)) pm_runtime_get_noresume(&pdev->dev); - if (!pci_is_enabled(func)) { + if (pci_is_enabled(func)) { + cdns_remove(cdnsp); + } else { kfree(cdnsp); - goto pci_put; } - cdns_remove(cdnsp); - -pci_put: pci_dev_put(func); } diff --git a/drivers/usb/cdns3/cdnsp-ring.c b/drivers/usb/cdns3/cdnsp-ring.c index 2f29431f612e04f7a755fc6f2e32b9b8f01b4730..b23e543b3a3d5ba22871cf17bf4a8f2e43ced3ac 100644 --- a/drivers/usb/cdns3/cdnsp-ring.c +++ b/drivers/usb/cdns3/cdnsp-ring.c @@ -2006,10 +2006,11 @@ int cdnsp_queue_bulk_tx(struct cdnsp_device *pdev, struct cdnsp_request *preq) int cdnsp_queue_ctrl_tx(struct cdnsp_device *pdev, struct cdnsp_request *preq) { - u32 field, length_field, remainder; + u32 field, length_field, zlp = 0; struct cdnsp_ep *pep = preq->pep; struct cdnsp_ring *ep_ring; int num_trbs; + u32 maxp; int ret; ep_ring = cdnsp_request_to_transfer_ring(pdev, preq); @@ -2019,26 +2020,33 @@ int cdnsp_queue_ctrl_tx(struct cdnsp_device *pdev, struct cdnsp_request *preq) /* 1 TRB for data, 1 for status */ num_trbs = (pdev->three_stage_setup) ? 2 : 1; + maxp = usb_endpoint_maxp(pep->endpoint.desc); + + if (preq->request.zero && preq->request.length && + (preq->request.length % maxp == 0)) { + num_trbs++; + zlp = 1; + } + ret = cdnsp_prepare_transfer(pdev, preq, num_trbs); if (ret) return ret; /* If there's data, queue data TRBs */ - if (pdev->ep0_expect_in) - field = TRB_TYPE(TRB_DATA) | TRB_IOC; - else - field = TRB_ISP | TRB_TYPE(TRB_DATA) | TRB_IOC; - if (preq->request.length > 0) { - remainder = cdnsp_td_remainder(pdev, 0, preq->request.length, - preq->request.length, preq, 1, 0); + field = TRB_TYPE(TRB_DATA); - length_field = TRB_LEN(preq->request.length) | - TRB_TD_SIZE(remainder) | TRB_INTR_TARGET(0); + if (zlp) + field |= TRB_CHAIN; + else + field |= TRB_IOC | (pdev->ep0_expect_in ? 0 : TRB_ISP); if (pdev->ep0_expect_in) field |= TRB_DIR_IN; + length_field = TRB_LEN(preq->request.length) | + TRB_TD_SIZE(zlp) | TRB_INTR_TARGET(0); + cdnsp_queue_trb(pdev, ep_ring, true, lower_32_bits(preq->request.dma), upper_32_bits(preq->request.dma), length_field, @@ -2046,6 +2054,20 @@ int cdnsp_queue_ctrl_tx(struct cdnsp_device *pdev, struct cdnsp_request *preq) TRB_SETUPID(pdev->setup_id) | pdev->setup_speed); + if (zlp) { + field = TRB_TYPE(TRB_NORMAL) | TRB_IOC; + + if (!pdev->ep0_expect_in) + field = TRB_ISP; + + cdnsp_queue_trb(pdev, ep_ring, true, + lower_32_bits(preq->request.dma), + upper_32_bits(preq->request.dma), 0, + field | ep_ring->cycle_state | + TRB_SETUPID(pdev->setup_id) | + pdev->setup_speed); + } + pdev->ep0_stage = CDNSP_DATA_STAGE; } diff --git a/drivers/usb/chipidea/ci.h b/drivers/usb/chipidea/ci.h index a4a3be049910990fc4eab2013402e95bd6ee4ccb..005c67cb3afb72cadc171d73cc5c25887a7939b3 100644 --- a/drivers/usb/chipidea/ci.h +++ b/drivers/usb/chipidea/ci.h @@ -127,12 +127,16 @@ enum ci_revision { * struct ci_role_driver - host/gadget role driver * @start: start this role * @stop: stop this role + * @suspend: system suspend handler for this role + * @resume: system resume handler for this role * @irq: irq handler for this role * @name: role name string (host/gadget) */ struct ci_role_driver { int (*start)(struct ci_hdrc *); void (*stop)(struct ci_hdrc *); + void (*suspend)(struct ci_hdrc *ci); + void (*resume)(struct ci_hdrc *ci, bool power_lost); irqreturn_t (*irq)(struct ci_hdrc *); const char *name; }; diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c index 9ffcecd3058c173d36d603ca386189f6a4cce6f5..0dc482542d8524f364232fe9fe77b155fbf9d0b2 100644 --- a/drivers/usb/chipidea/ci_hdrc_imx.c +++ b/drivers/usb/chipidea/ci_hdrc_imx.c @@ -355,7 +355,7 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev) data->hsic_pad_regulator = devm_regulator_get_optional(dev, "hsic"); if (PTR_ERR(data->hsic_pad_regulator) == -ENODEV) { - /* no pad regualator is needed */ + /* no pad regulator is needed */ data->hsic_pad_regulator = NULL; } else if (IS_ERR(data->hsic_pad_regulator)) return dev_err_probe(dev, PTR_ERR(data->hsic_pad_regulator), @@ -527,16 +527,19 @@ static void ci_hdrc_imx_shutdown(struct platform_device *pdev) ci_hdrc_imx_remove(pdev); } -static int __maybe_unused imx_controller_suspend(struct device *dev) +static int __maybe_unused imx_controller_suspend(struct device *dev, + pm_message_t msg) { struct ci_hdrc_imx_data *data = dev_get_drvdata(dev); int ret = 0; dev_dbg(dev, "at %s\n", __func__); - ret = imx_usbmisc_hsic_set_clk(data->usbmisc_data, false); + ret = imx_usbmisc_suspend(data->usbmisc_data, + PMSG_IS_AUTO(msg) || device_may_wakeup(dev)); if (ret) { - dev_err(dev, "usbmisc hsic_set_clk failed, ret=%d\n", ret); + dev_err(dev, + "usbmisc suspend failed, ret=%d\n", ret); return ret; } @@ -549,7 +552,8 @@ static int __maybe_unused imx_controller_suspend(struct device *dev) return 0; } -static int __maybe_unused imx_controller_resume(struct device *dev) +static int __maybe_unused imx_controller_resume(struct device *dev, + pm_message_t msg) { struct ci_hdrc_imx_data *data = dev_get_drvdata(dev); int ret = 0; @@ -570,22 +574,15 @@ static int __maybe_unused imx_controller_resume(struct device *dev) data->in_lpm = false; - ret = imx_usbmisc_set_wakeup(data->usbmisc_data, false); + ret = imx_usbmisc_resume(data->usbmisc_data, + PMSG_IS_AUTO(msg) || device_may_wakeup(dev)); if (ret) { - dev_err(dev, "usbmisc set_wakeup failed, ret=%d\n", ret); + dev_err(dev, "usbmisc resume failed, ret=%d\n", ret); goto clk_disable; } - ret = imx_usbmisc_hsic_set_clk(data->usbmisc_data, true); - if (ret) { - dev_err(dev, "usbmisc hsic_set_clk failed, ret=%d\n", ret); - goto hsic_set_clk_fail; - } - return 0; -hsic_set_clk_fail: - imx_usbmisc_set_wakeup(data->usbmisc_data, true); clk_disable: imx_disable_unprepare_clks(dev); return ret; @@ -601,16 +598,7 @@ static int __maybe_unused ci_hdrc_imx_suspend(struct device *dev) /* The core's suspend doesn't run */ return 0; - if (device_may_wakeup(dev)) { - ret = imx_usbmisc_set_wakeup(data->usbmisc_data, true); - if (ret) { - dev_err(dev, "usbmisc set_wakeup failed, ret=%d\n", - ret); - return ret; - } - } - - ret = imx_controller_suspend(dev); + ret = imx_controller_suspend(dev, PMSG_SUSPEND); if (ret) return ret; @@ -624,7 +612,7 @@ static int __maybe_unused ci_hdrc_imx_resume(struct device *dev) int ret; pinctrl_pm_select_default_state(dev); - ret = imx_controller_resume(dev); + ret = imx_controller_resume(dev, PMSG_RESUME); if (!ret && data->supports_runtime_pm) { pm_runtime_disable(dev); pm_runtime_set_active(dev); @@ -637,25 +625,18 @@ static int __maybe_unused ci_hdrc_imx_resume(struct device *dev) static int __maybe_unused ci_hdrc_imx_runtime_suspend(struct device *dev) { struct ci_hdrc_imx_data *data = dev_get_drvdata(dev); - int ret; if (data->in_lpm) { WARN_ON(1); return 0; } - ret = imx_usbmisc_set_wakeup(data->usbmisc_data, true); - if (ret) { - dev_err(dev, "usbmisc set_wakeup failed, ret=%d\n", ret); - return ret; - } - - return imx_controller_suspend(dev); + return imx_controller_suspend(dev, PMSG_AUTO_SUSPEND); } static int __maybe_unused ci_hdrc_imx_runtime_resume(struct device *dev) { - return imx_controller_resume(dev); + return imx_controller_resume(dev, PMSG_AUTO_RESUME); } static const struct dev_pm_ops ci_hdrc_imx_pm_ops = { diff --git a/drivers/usb/chipidea/ci_hdrc_imx.h b/drivers/usb/chipidea/ci_hdrc_imx.h index 7daccb9c5006a35aaa5b40f8711ccafdc419ced2..7135b9a5d9138307d1936bf0079e65767a75e26a 100644 --- a/drivers/usb/chipidea/ci_hdrc_imx.h +++ b/drivers/usb/chipidea/ci_hdrc_imx.h @@ -32,9 +32,9 @@ struct imx_usbmisc_data { int imx_usbmisc_init(struct imx_usbmisc_data *data); int imx_usbmisc_init_post(struct imx_usbmisc_data *data); -int imx_usbmisc_set_wakeup(struct imx_usbmisc_data *data, bool enabled); int imx_usbmisc_hsic_set_connect(struct imx_usbmisc_data *data); -int imx_usbmisc_hsic_set_clk(struct imx_usbmisc_data *data, bool on); int imx_usbmisc_charger_detection(struct imx_usbmisc_data *data, bool connect); +int imx_usbmisc_suspend(struct imx_usbmisc_data *data, bool wakeup); +int imx_usbmisc_resume(struct imx_usbmisc_data *data, bool wakeup); #endif /* __DRIVER_USB_CHIPIDEA_CI_HDRC_IMX_H */ diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c index 6330fa9117926a59a2299f28579a163c4aab2e13..484b1cd234317076027c7c9986d8e769e54d552b 100644 --- a/drivers/usb/chipidea/core.c +++ b/drivers/usb/chipidea/core.c @@ -608,52 +608,59 @@ static int ci_usb_role_switch_set(struct usb_role_switch *sw, enum usb_role role) { struct ci_hdrc *ci = usb_role_switch_get_drvdata(sw); - struct ci_hdrc_cable *cable = NULL; - enum usb_role current_role = ci_role_to_usb_role(ci); - enum ci_role ci_role = usb_role_to_ci_role(role); - unsigned long flags; - - if ((ci_role != CI_ROLE_END && !ci->roles[ci_role]) || - (current_role == role)) - return 0; + struct ci_hdrc_cable *cable; - pm_runtime_get_sync(ci->dev); - /* Stop current role */ - spin_lock_irqsave(&ci->lock, flags); - if (current_role == USB_ROLE_DEVICE) + if (role == USB_ROLE_HOST) { + cable = &ci->platdata->id_extcon; + cable->changed = true; + cable->connected = true; cable = &ci->platdata->vbus_extcon; - else if (current_role == USB_ROLE_HOST) + cable->changed = true; + cable->connected = false; + } else if (role == USB_ROLE_DEVICE) { cable = &ci->platdata->id_extcon; - - if (cable) { cable->changed = true; cable->connected = false; - ci_irq(ci); - spin_unlock_irqrestore(&ci->lock, flags); - if (ci->wq && role != USB_ROLE_NONE) - flush_workqueue(ci->wq); - spin_lock_irqsave(&ci->lock, flags); - } - - cable = NULL; - - /* Start target role */ - if (role == USB_ROLE_DEVICE) cable = &ci->platdata->vbus_extcon; - else if (role == USB_ROLE_HOST) - cable = &ci->platdata->id_extcon; - - if (cable) { cable->changed = true; cable->connected = true; - ci_irq(ci); + } else { + cable = &ci->platdata->id_extcon; + cable->changed = true; + cable->connected = false; + cable = &ci->platdata->vbus_extcon; + cable->changed = true; + cable->connected = false; } - spin_unlock_irqrestore(&ci->lock, flags); - pm_runtime_put_sync(ci->dev); + ci_irq(ci); return 0; } +static enum ci_role ci_get_role(struct ci_hdrc *ci) +{ + enum ci_role role; + + if (ci->roles[CI_ROLE_HOST] && ci->roles[CI_ROLE_GADGET]) { + if (ci->is_otg) { + role = ci_otg_role(ci); + hw_write_otgsc(ci, OTGSC_IDIE, OTGSC_IDIE); + } else { + /* + * If the controller is not OTG capable, but support + * role switch, the defalt role is gadget, and the + * user can switch it through debugfs. + */ + role = CI_ROLE_GADGET; + } + } else { + role = ci->roles[CI_ROLE_HOST] ? CI_ROLE_HOST + : CI_ROLE_GADGET; + } + + return role; +} + static struct usb_role_switch_desc ci_role_switch = { .set = ci_usb_role_switch_set, .get = ci_usb_role_switch_get, @@ -1151,25 +1158,7 @@ static int ci_hdrc_probe(struct platform_device *pdev) } } - if (ci->roles[CI_ROLE_HOST] && ci->roles[CI_ROLE_GADGET]) { - if (ci->is_otg) { - ci->role = ci_otg_role(ci); - /* Enable ID change irq */ - hw_write_otgsc(ci, OTGSC_IDIE, OTGSC_IDIE); - } else { - /* - * If the controller is not OTG capable, but support - * role switch, the defalt role is gadget, and the - * user can switch it through debugfs. - */ - ci->role = CI_ROLE_GADGET; - } - } else { - ci->role = ci->roles[CI_ROLE_HOST] - ? CI_ROLE_HOST - : CI_ROLE_GADGET; - } - + ci->role = ci_get_role(ci); if (!ci_otg_is_fsm_mode(ci)) { /* only update vbus status for peripheral */ if (ci->role == CI_ROLE_GADGET) { @@ -1305,11 +1294,13 @@ static void ci_extcon_wakeup_int(struct ci_hdrc *ci) cable_id = &ci->platdata->id_extcon; cable_vbus = &ci->platdata->vbus_extcon; - if (!IS_ERR(cable_id->edev) && ci->is_otg && + if ((!IS_ERR(cable_id->edev) || !IS_ERR(ci->role_switch)) + && ci->is_otg && (otgsc & OTGSC_IDIE) && (otgsc & OTGSC_IDIS)) ci_irq(ci); - if (!IS_ERR(cable_vbus->edev) && ci->is_otg && + if ((!IS_ERR(cable_vbus->edev) || !IS_ERR(ci->role_switch)) + && ci->is_otg && (otgsc & OTGSC_BSVIE) && (otgsc & OTGSC_BSVIS)) ci_irq(ci); } @@ -1373,6 +1364,10 @@ static int ci_suspend(struct device *dev) return 0; } + /* Extra routine per role before system suspend */ + if (ci->role != CI_ROLE_END && ci_role(ci)->suspend) + ci_role(ci)->suspend(ci); + if (device_may_wakeup(dev)) { if (ci_otg_is_fsm_mode(ci)) ci_otg_fsm_suspend_for_srp(ci); @@ -1386,11 +1381,38 @@ static int ci_suspend(struct device *dev) return 0; } +static void ci_handle_power_lost(struct ci_hdrc *ci) +{ + enum ci_role role; + + disable_irq_nosync(ci->irq); + if (!ci_otg_is_fsm_mode(ci)) { + role = ci_get_role(ci); + + if (ci->role != role) { + ci_handle_id_switch(ci); + } else if (role == CI_ROLE_GADGET) { + if (ci->is_otg && hw_read_otgsc(ci, OTGSC_BSV)) + usb_gadget_vbus_connect(&ci->gadget); + } + } + + enable_irq(ci->irq); +} + static int ci_resume(struct device *dev) { struct ci_hdrc *ci = dev_get_drvdata(dev); + bool power_lost; int ret; + /* Since ASYNCLISTADDR (host mode) and ENDPTLISTADDR (device + * mode) share the same register address. We can check if + * controller resume from power lost based on this address + * due to this register will be reset after power lost. + */ + power_lost = !hw_read(ci, OP_ENDPTLISTADDR, ~0); + if (device_may_wakeup(dev)) disable_irq_wake(ci->irq); @@ -1398,6 +1420,19 @@ static int ci_resume(struct device *dev) if (ret) return ret; + if (power_lost) { + /* shutdown and re-init for phy */ + ci_usb_phy_exit(ci); + ci_usb_phy_init(ci); + } + + /* Extra routine per role after system resume */ + if (ci->role != CI_ROLE_END && ci_role(ci)->resume) + ci_role(ci)->resume(ci, power_lost); + + if (power_lost) + ci_handle_power_lost(ci); + if (ci->supports_runtime_pm) { pm_runtime_disable(dev); pm_runtime_set_active(dev); diff --git a/drivers/usb/chipidea/host.c b/drivers/usb/chipidea/host.c index bc3634a54c6b7f5e5e3aa95bac1e7af0e41e642e..ebe7400243b127baf9c503b0060b80f9c33b6541 100644 --- a/drivers/usb/chipidea/host.c +++ b/drivers/usb/chipidea/host.c @@ -459,6 +459,18 @@ static void ci_hdrc_unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb) ci_hdrc_free_dma_aligned_buffer(urb); } +#ifdef CONFIG_PM_SLEEP +static void ci_hdrc_host_suspend(struct ci_hdrc *ci) +{ + ehci_suspend(ci->hcd, device_may_wakeup(ci->dev)); +} + +static void ci_hdrc_host_resume(struct ci_hdrc *ci, bool power_lost) +{ + ehci_resume(ci->hcd, power_lost); +} +#endif + int ci_hdrc_host_init(struct ci_hdrc *ci) { struct ci_role_driver *rdrv; @@ -472,6 +484,10 @@ int ci_hdrc_host_init(struct ci_hdrc *ci) rdrv->start = host_start; rdrv->stop = host_stop; +#ifdef CONFIG_PM_SLEEP + rdrv->suspend = ci_hdrc_host_suspend; + rdrv->resume = ci_hdrc_host_resume; +#endif rdrv->irq = host_irq; rdrv->name = "host"; ci->roles[CI_ROLE_HOST] = rdrv; diff --git a/drivers/usb/chipidea/otg.c b/drivers/usb/chipidea/otg.c index 7b53274ef9664d17fb5dd7e7cc42d7c13cecbd64..622c3b68aa1e6a4bf55370d42ab4d3768eb478d8 100644 --- a/drivers/usb/chipidea/otg.c +++ b/drivers/usb/chipidea/otg.c @@ -165,7 +165,7 @@ static int hw_wait_vbus_lower_bsv(struct ci_hdrc *ci) return 0; } -static void ci_handle_id_switch(struct ci_hdrc *ci) +void ci_handle_id_switch(struct ci_hdrc *ci) { enum ci_role role = ci_otg_role(ci); diff --git a/drivers/usb/chipidea/otg.h b/drivers/usb/chipidea/otg.h index 5e7a6e571dd2f448f77c8b6eabd4555a62915f6b..87629b81e03ea705099fa30c67b518fd90ab488d 100644 --- a/drivers/usb/chipidea/otg.h +++ b/drivers/usb/chipidea/otg.h @@ -14,6 +14,7 @@ int ci_hdrc_otg_init(struct ci_hdrc *ci); void ci_hdrc_otg_destroy(struct ci_hdrc *ci); enum ci_role ci_otg_role(struct ci_hdrc *ci); void ci_handle_vbus_change(struct ci_hdrc *ci); +void ci_handle_id_switch(struct ci_hdrc *ci); static inline void ci_otg_queue_work(struct ci_hdrc *ci) { disable_irq_nosync(ci->irq); diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c index 8c3e3a635ac2deedba05f83e383d059ecca2c995..54c09245ad055dc8a66c8e6a83d6686b37d2ab54 100644 --- a/drivers/usb/chipidea/udc.c +++ b/drivers/usb/chipidea/udc.c @@ -2181,6 +2181,34 @@ static void udc_id_switch_for_host(struct ci_hdrc *ci) ci->platdata->pins_default); } +#ifdef CONFIG_PM_SLEEP +static void udc_suspend(struct ci_hdrc *ci) +{ + /* + * Set OP_ENDPTLISTADDR to be non-zero for + * checking if controller resume from power lost + * in non-host mode. + */ + if (hw_read(ci, OP_ENDPTLISTADDR, ~0) == 0) + hw_write(ci, OP_ENDPTLISTADDR, ~0, ~0); +} + +static void udc_resume(struct ci_hdrc *ci, bool power_lost) +{ + if (power_lost) { + if (ci->is_otg) + hw_write_otgsc(ci, OTGSC_BSVIS | OTGSC_BSVIE, + OTGSC_BSVIS | OTGSC_BSVIE); + if (ci->vbus_active) + usb_gadget_vbus_disconnect(&ci->gadget); + } + + /* Restore value 0 if it was set for power lost check */ + if (hw_read(ci, OP_ENDPTLISTADDR, ~0) == 0xFFFFFFFF) + hw_write(ci, OP_ENDPTLISTADDR, ~0, 0); +} +#endif + /** * ci_hdrc_gadget_init - initialize device related bits * @ci: the controller @@ -2201,6 +2229,10 @@ int ci_hdrc_gadget_init(struct ci_hdrc *ci) rdrv->start = udc_id_switch_for_device; rdrv->stop = udc_id_switch_for_host; +#ifdef CONFIG_PM_SLEEP + rdrv->suspend = udc_suspend; + rdrv->resume = udc_resume; +#endif rdrv->irq = udc_irq; rdrv->name = "gadget"; diff --git a/drivers/usb/chipidea/usbmisc_imx.c b/drivers/usb/chipidea/usbmisc_imx.c index bac0f5458cab9902c3a4a0ae77d0163decbf0ce2..acdb13316cd0dfbc6533b234bd408f8b6d68488b 100644 --- a/drivers/usb/chipidea/usbmisc_imx.c +++ b/drivers/usb/chipidea/usbmisc_imx.c @@ -150,6 +150,8 @@ struct usbmisc_ops { int (*hsic_set_clk)(struct imx_usbmisc_data *data, bool enabled); /* usb charger detection */ int (*charger_detection)(struct imx_usbmisc_data *data); + /* It's called when system resume from usb power lost */ + int (*power_lost_check)(struct imx_usbmisc_data *data); }; struct imx_usbmisc { @@ -937,6 +939,44 @@ static int usbmisc_imx7ulp_init(struct imx_usbmisc_data *data) return 0; } +static int usbmisc_imx7d_power_lost_check(struct imx_usbmisc_data *data) +{ + struct imx_usbmisc *usbmisc = dev_get_drvdata(data->dev); + unsigned long flags; + u32 val; + + spin_lock_irqsave(&usbmisc->lock, flags); + val = readl(usbmisc->base); + spin_unlock_irqrestore(&usbmisc->lock, flags); + /* + * Here use a power on reset value to judge + * if the controller experienced a power lost + */ + if (val == 0x30001000) + return 1; + else + return 0; +} + +static int usbmisc_imx6sx_power_lost_check(struct imx_usbmisc_data *data) +{ + struct imx_usbmisc *usbmisc = dev_get_drvdata(data->dev); + unsigned long flags; + u32 val; + + spin_lock_irqsave(&usbmisc->lock, flags); + val = readl(usbmisc->base + data->index * 4); + spin_unlock_irqrestore(&usbmisc->lock, flags); + /* + * Here use a power on reset value to judge + * if the controller experienced a power lost + */ + if (val == 0x30001000) + return 1; + else + return 0; +} + static const struct usbmisc_ops imx25_usbmisc_ops = { .init = usbmisc_imx25_init, .post = usbmisc_imx25_post, @@ -970,12 +1010,14 @@ static const struct usbmisc_ops imx6sx_usbmisc_ops = { .init = usbmisc_imx6sx_init, .hsic_set_connect = usbmisc_imx6_hsic_set_connect, .hsic_set_clk = usbmisc_imx6_hsic_set_clk, + .power_lost_check = usbmisc_imx6sx_power_lost_check, }; static const struct usbmisc_ops imx7d_usbmisc_ops = { .init = usbmisc_imx7d_init, .set_wakeup = usbmisc_imx7d_set_wakeup, .charger_detection = imx7d_charger_detection, + .power_lost_check = usbmisc_imx7d_power_lost_check, }; static const struct usbmisc_ops imx7ulp_usbmisc_ops = { @@ -983,6 +1025,7 @@ static const struct usbmisc_ops imx7ulp_usbmisc_ops = { .set_wakeup = usbmisc_imx7d_set_wakeup, .hsic_set_connect = usbmisc_imx6_hsic_set_connect, .hsic_set_clk = usbmisc_imx6_hsic_set_clk, + .power_lost_check = usbmisc_imx7d_power_lost_check, }; static inline bool is_imx53_usbmisc(struct imx_usbmisc_data *data) @@ -1009,30 +1052,29 @@ EXPORT_SYMBOL_GPL(imx_usbmisc_init); int imx_usbmisc_init_post(struct imx_usbmisc_data *data) { struct imx_usbmisc *usbmisc; + int ret = 0; if (!data) return 0; usbmisc = dev_get_drvdata(data->dev); - if (!usbmisc->ops->post) - return 0; - return usbmisc->ops->post(data); -} -EXPORT_SYMBOL_GPL(imx_usbmisc_init_post); - -int imx_usbmisc_set_wakeup(struct imx_usbmisc_data *data, bool enabled) -{ - struct imx_usbmisc *usbmisc; + if (usbmisc->ops->post) + ret = usbmisc->ops->post(data); + if (ret) { + dev_err(data->dev, "post init failed, ret=%d\n", ret); + return ret; + } - if (!data) - return 0; + if (usbmisc->ops->set_wakeup) + ret = usbmisc->ops->set_wakeup(data, false); + if (ret) { + dev_err(data->dev, "set_wakeup failed, ret=%d\n", ret); + return ret; + } - usbmisc = dev_get_drvdata(data->dev); - if (!usbmisc->ops->set_wakeup) - return 0; - return usbmisc->ops->set_wakeup(data, enabled); + return 0; } -EXPORT_SYMBOL_GPL(imx_usbmisc_set_wakeup); +EXPORT_SYMBOL_GPL(imx_usbmisc_init_post); int imx_usbmisc_hsic_set_connect(struct imx_usbmisc_data *data) { @@ -1048,20 +1090,6 @@ int imx_usbmisc_hsic_set_connect(struct imx_usbmisc_data *data) } EXPORT_SYMBOL_GPL(imx_usbmisc_hsic_set_connect); -int imx_usbmisc_hsic_set_clk(struct imx_usbmisc_data *data, bool on) -{ - struct imx_usbmisc *usbmisc; - - if (!data) - return 0; - - usbmisc = dev_get_drvdata(data->dev); - if (!usbmisc->ops->hsic_set_clk || !data->hsic) - return 0; - return usbmisc->ops->hsic_set_clk(data, on); -} -EXPORT_SYMBOL_GPL(imx_usbmisc_hsic_set_clk); - int imx_usbmisc_charger_detection(struct imx_usbmisc_data *data, bool connect) { struct imx_usbmisc *usbmisc; @@ -1094,6 +1122,78 @@ int imx_usbmisc_charger_detection(struct imx_usbmisc_data *data, bool connect) } EXPORT_SYMBOL_GPL(imx_usbmisc_charger_detection); +int imx_usbmisc_suspend(struct imx_usbmisc_data *data, bool wakeup) +{ + struct imx_usbmisc *usbmisc; + int ret = 0; + + if (!data) + return 0; + + usbmisc = dev_get_drvdata(data->dev); + + if (wakeup && usbmisc->ops->set_wakeup) + ret = usbmisc->ops->set_wakeup(data, true); + if (ret) { + dev_err(data->dev, "set_wakeup failed, ret=%d\n", ret); + return ret; + } + + if (usbmisc->ops->hsic_set_clk && data->hsic) + ret = usbmisc->ops->hsic_set_clk(data, false); + if (ret) { + dev_err(data->dev, "set_wakeup failed, ret=%d\n", ret); + return ret; + } + + return ret; +} +EXPORT_SYMBOL_GPL(imx_usbmisc_suspend); + +int imx_usbmisc_resume(struct imx_usbmisc_data *data, bool wakeup) +{ + struct imx_usbmisc *usbmisc; + int ret = 0; + + if (!data) + return 0; + + usbmisc = dev_get_drvdata(data->dev); + + if (usbmisc->ops->power_lost_check) + ret = usbmisc->ops->power_lost_check(data); + if (ret > 0) { + /* re-init if resume from power lost */ + ret = imx_usbmisc_init(data); + if (ret) { + dev_err(data->dev, "re-init failed, ret=%d\n", ret); + return ret; + } + } + + if (wakeup && usbmisc->ops->set_wakeup) + ret = usbmisc->ops->set_wakeup(data, false); + if (ret) { + dev_err(data->dev, "set_wakeup failed, ret=%d\n", ret); + return ret; + } + + if (usbmisc->ops->hsic_set_clk && data->hsic) + ret = usbmisc->ops->hsic_set_clk(data, true); + if (ret) { + dev_err(data->dev, "set_wakeup failed, ret=%d\n", ret); + goto hsic_set_clk_fail; + } + + return 0; + +hsic_set_clk_fail: + if (wakeup && usbmisc->ops->set_wakeup) + usbmisc->ops->set_wakeup(data, true); + return ret; +} +EXPORT_SYMBOL_GPL(imx_usbmisc_resume); + static const struct of_device_id usbmisc_imx_dt_ids[] = { { .compatible = "fsl,imx25-usbmisc", diff --git a/drivers/usb/class/usblp.c b/drivers/usb/class/usblp.c index f27b4aecff3d497f0122de042a8b5d06881565be..5a2e43331064ebce12bb15bd36766bbc41e3c6a2 100644 --- a/drivers/usb/class/usblp.c +++ b/drivers/usb/class/usblp.c @@ -1090,7 +1090,7 @@ static const struct file_operations usblp_fops = { .llseek = noop_llseek, }; -static char *usblp_devnode(struct device *dev, umode_t *mode) +static char *usblp_devnode(const struct device *dev, umode_t *mode) { return kasprintf(GFP_KERNEL, "usb/%s", dev_name(dev)); } diff --git a/drivers/usb/common/ulpi.c b/drivers/usb/common/ulpi.c index d7c8461976ce09133aeeaed474b826ce149738c2..60e8174686a17c731a0a476b90ac8de4c6a15497 100644 --- a/drivers/usb/common/ulpi.c +++ b/drivers/usb/common/ulpi.c @@ -207,7 +207,7 @@ static int ulpi_read_id(struct ulpi *ulpi) /* Test the interface */ ret = ulpi_write(ulpi, ULPI_SCRATCH, 0xaa); if (ret < 0) - goto err; + return ret; ret = ulpi_read(ulpi, ULPI_SCRATCH); if (ret < 0) diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c index 48bc8a4814ac4e985e118242eecf73202bc7148f..725b8dbcfe5f069e19e8de28e0ffcd25c3a41ca7 100644 --- a/drivers/usb/core/config.c +++ b/drivers/usb/core/config.c @@ -61,7 +61,7 @@ static void usb_parse_ssp_isoc_endpoint_companion(struct device *ddev, desc = (struct usb_ssp_isoc_ep_comp_descriptor *) buffer; if (desc->bDescriptorType != USB_DT_SSP_ISOC_ENDPOINT_COMP || size < USB_DT_SSP_ISOC_EP_COMP_SIZE) { - dev_warn(ddev, "Invalid SuperSpeedPlus isoc endpoint companion" + dev_notice(ddev, "Invalid SuperSpeedPlus isoc endpoint companion" "for config %d interface %d altsetting %d ep %d.\n", cfgno, inum, asnum, ep->desc.bEndpointAddress); return; @@ -83,7 +83,7 @@ static void usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno, if (desc->bDescriptorType != USB_DT_SS_ENDPOINT_COMP || size < USB_DT_SS_EP_COMP_SIZE) { - dev_warn(ddev, "No SuperSpeed endpoint companion for config %d " + dev_notice(ddev, "No SuperSpeed endpoint companion for config %d " " interface %d altsetting %d ep %d: " "using minimum values\n", cfgno, inum, asnum, ep->desc.bEndpointAddress); @@ -109,13 +109,13 @@ static void usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno, /* Check the various values */ if (usb_endpoint_xfer_control(&ep->desc) && desc->bMaxBurst != 0) { - dev_warn(ddev, "Control endpoint with bMaxBurst = %d in " + dev_notice(ddev, "Control endpoint with bMaxBurst = %d in " "config %d interface %d altsetting %d ep %d: " "setting to zero\n", desc->bMaxBurst, cfgno, inum, asnum, ep->desc.bEndpointAddress); ep->ss_ep_comp.bMaxBurst = 0; } else if (desc->bMaxBurst > 15) { - dev_warn(ddev, "Endpoint with bMaxBurst = %d in " + dev_notice(ddev, "Endpoint with bMaxBurst = %d in " "config %d interface %d altsetting %d ep %d: " "setting to 15\n", desc->bMaxBurst, cfgno, inum, asnum, ep->desc.bEndpointAddress); @@ -125,7 +125,7 @@ static void usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno, if ((usb_endpoint_xfer_control(&ep->desc) || usb_endpoint_xfer_int(&ep->desc)) && desc->bmAttributes != 0) { - dev_warn(ddev, "%s endpoint with bmAttributes = %d in " + dev_notice(ddev, "%s endpoint with bmAttributes = %d in " "config %d interface %d altsetting %d ep %d: " "setting to zero\n", usb_endpoint_xfer_control(&ep->desc) ? "Control" : "Bulk", @@ -134,7 +134,7 @@ static void usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno, ep->ss_ep_comp.bmAttributes = 0; } else if (usb_endpoint_xfer_bulk(&ep->desc) && desc->bmAttributes > 16) { - dev_warn(ddev, "Bulk endpoint with more than 65536 streams in " + dev_notice(ddev, "Bulk endpoint with more than 65536 streams in " "config %d interface %d altsetting %d ep %d: " "setting to max\n", cfgno, inum, asnum, ep->desc.bEndpointAddress); @@ -142,7 +142,7 @@ static void usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno, } else if (usb_endpoint_xfer_isoc(&ep->desc) && !USB_SS_SSP_ISOC_COMP(desc->bmAttributes) && USB_SS_MULT(desc->bmAttributes) > 3) { - dev_warn(ddev, "Isoc endpoint has Mult of %d in " + dev_notice(ddev, "Isoc endpoint has Mult of %d in " "config %d interface %d altsetting %d ep %d: " "setting to 3\n", USB_SS_MULT(desc->bmAttributes), @@ -160,7 +160,7 @@ static void usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno, else max_tx = 999999; if (le16_to_cpu(desc->wBytesPerInterval) > max_tx) { - dev_warn(ddev, "%s endpoint with wBytesPerInterval of %d in " + dev_notice(ddev, "%s endpoint with wBytesPerInterval of %d in " "config %d interface %d altsetting %d ep %d: " "setting to %d\n", usb_endpoint_xfer_isoc(&ep->desc) ? "Isoc" : "Int", @@ -273,7 +273,7 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, else if (d->bLength >= USB_DT_ENDPOINT_SIZE) n = USB_DT_ENDPOINT_SIZE; else { - dev_warn(ddev, "config %d interface %d altsetting %d has an " + dev_notice(ddev, "config %d interface %d altsetting %d has an " "invalid endpoint descriptor of length %d, skipping\n", cfgno, inum, asnum, d->bLength); goto skip_to_next_endpoint_or_interface_descriptor; @@ -281,7 +281,7 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, i = d->bEndpointAddress & ~USB_ENDPOINT_DIR_MASK; if (i >= 16 || i == 0) { - dev_warn(ddev, "config %d interface %d altsetting %d has an " + dev_notice(ddev, "config %d interface %d altsetting %d has an " "invalid endpoint with address 0x%X, skipping\n", cfgno, inum, asnum, d->bEndpointAddress); goto skip_to_next_endpoint_or_interface_descriptor; @@ -293,7 +293,7 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, /* Check for duplicate endpoint addresses */ if (config_endpoint_is_duplicate(config, inum, asnum, d)) { - dev_warn(ddev, "config %d interface %d altsetting %d has a duplicate endpoint with address 0x%X, skipping\n", + dev_notice(ddev, "config %d interface %d altsetting %d has a duplicate endpoint with address 0x%X, skipping\n", cfgno, inum, asnum, d->bEndpointAddress); goto skip_to_next_endpoint_or_interface_descriptor; } @@ -301,7 +301,7 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, /* Ignore some endpoints */ if (udev->quirks & USB_QUIRK_ENDPOINT_IGNORE) { if (usb_endpoint_is_ignored(udev, ifp, d)) { - dev_warn(ddev, "config %d interface %d altsetting %d has an ignored endpoint with address 0x%X, skipping\n", + dev_notice(ddev, "config %d interface %d altsetting %d has an ignored endpoint with address 0x%X, skipping\n", cfgno, inum, asnum, d->bEndpointAddress); goto skip_to_next_endpoint_or_interface_descriptor; @@ -378,7 +378,7 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, } } if (d->bInterval < i || d->bInterval > j) { - dev_warn(ddev, "config %d interface %d altsetting %d " + dev_notice(ddev, "config %d interface %d altsetting %d " "endpoint 0x%X has an invalid bInterval %d, " "changing to %d\n", cfgno, inum, asnum, @@ -391,7 +391,7 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, * them usable, we will try treating them as Interrupt endpoints. */ if (udev->speed == USB_SPEED_LOW && usb_endpoint_xfer_bulk(d)) { - dev_warn(ddev, "config %d interface %d altsetting %d " + dev_notice(ddev, "config %d interface %d altsetting %d " "endpoint 0x%X is Bulk; changing to Interrupt\n", cfgno, inum, asnum, d->bEndpointAddress); endpoint->desc.bmAttributes = USB_ENDPOINT_XFER_INT; @@ -408,7 +408,7 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, */ maxp = le16_to_cpu(endpoint->desc.wMaxPacketSize); if (maxp == 0 && !(usb_endpoint_xfer_isoc(d) && asnum == 0)) { - dev_warn(ddev, "config %d interface %d altsetting %d endpoint 0x%X has invalid wMaxPacketSize 0\n", + dev_notice(ddev, "config %d interface %d altsetting %d endpoint 0x%X has invalid wMaxPacketSize 0\n", cfgno, inum, asnum, d->bEndpointAddress); } @@ -439,7 +439,7 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, j = maxpacket_maxes[usb_endpoint_type(&endpoint->desc)]; if (maxp > j) { - dev_warn(ddev, "config %d interface %d altsetting %d endpoint 0x%X has invalid maxpacket %d, setting to %d\n", + dev_notice(ddev, "config %d interface %d altsetting %d endpoint 0x%X has invalid maxpacket %d, setting to %d\n", cfgno, inum, asnum, d->bEndpointAddress, maxp, j); maxp = j; endpoint->desc.wMaxPacketSize = cpu_to_le16(i | maxp); @@ -452,7 +452,7 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, */ if (udev->speed == USB_SPEED_HIGH && usb_endpoint_xfer_bulk(d)) { if (maxp != 512) - dev_warn(ddev, "config %d interface %d altsetting %d " + dev_notice(ddev, "config %d interface %d altsetting %d " "bulk endpoint 0x%X has invalid maxpacket %d\n", cfgno, inum, asnum, d->bEndpointAddress, maxp); @@ -533,7 +533,7 @@ static int usb_parse_interface(struct device *ddev, int cfgno, i < intfc->num_altsetting; (++i, ++alt)) { if (alt->desc.bAlternateSetting == asnum) { - dev_warn(ddev, "Duplicate descriptor for config %d " + dev_notice(ddev, "Duplicate descriptor for config %d " "interface %d altsetting %d, skipping\n", cfgno, inum, asnum); goto skip_to_next_interface_descriptor; @@ -559,7 +559,7 @@ static int usb_parse_interface(struct device *ddev, int cfgno, num_ep = num_ep_orig = alt->desc.bNumEndpoints; alt->desc.bNumEndpoints = 0; /* Use as a counter */ if (num_ep > USB_MAXENDPOINTS) { - dev_warn(ddev, "too many endpoints for config %d interface %d " + dev_notice(ddev, "too many endpoints for config %d interface %d " "altsetting %d: %d, using maximum allowed: %d\n", cfgno, inum, asnum, num_ep, USB_MAXENDPOINTS); num_ep = USB_MAXENDPOINTS; @@ -590,7 +590,7 @@ static int usb_parse_interface(struct device *ddev, int cfgno, } if (n != num_ep_orig) - dev_warn(ddev, "config %d interface %d altsetting %d has %d " + dev_notice(ddev, "config %d interface %d altsetting %d has %d " "endpoint descriptor%s, different from the interface " "descriptor's value: %d\n", cfgno, inum, asnum, n, plural(n), num_ep_orig); @@ -625,7 +625,7 @@ static int usb_parse_configuration(struct usb_device *dev, int cfgidx, if (config->desc.bDescriptorType != USB_DT_CONFIG || config->desc.bLength < USB_DT_CONFIG_SIZE || config->desc.bLength > size) { - dev_err(ddev, "invalid descriptor for config index %d: " + dev_notice(ddev, "invalid descriptor for config index %d: " "type = 0x%X, length = %d\n", cfgidx, config->desc.bDescriptorType, config->desc.bLength); return -EINVAL; @@ -636,7 +636,7 @@ static int usb_parse_configuration(struct usb_device *dev, int cfgidx, size -= config->desc.bLength; if (nintf > USB_MAXINTERFACES) { - dev_warn(ddev, "config %d has too many interfaces: %d, " + dev_notice(ddev, "config %d has too many interfaces: %d, " "using maximum allowed: %d\n", cfgno, nintf, USB_MAXINTERFACES); nintf = USB_MAXINTERFACES; @@ -650,7 +650,7 @@ static int usb_parse_configuration(struct usb_device *dev, int cfgidx, (buffer2 += header->bLength, size2 -= header->bLength)) { if (size2 < sizeof(struct usb_descriptor_header)) { - dev_warn(ddev, "config %d descriptor has %d excess " + dev_notice(ddev, "config %d descriptor has %d excess " "byte%s, ignoring\n", cfgno, size2, plural(size2)); break; @@ -658,7 +658,7 @@ static int usb_parse_configuration(struct usb_device *dev, int cfgidx, header = (struct usb_descriptor_header *) buffer2; if ((header->bLength > size2) || (header->bLength < 2)) { - dev_warn(ddev, "config %d has an invalid descriptor " + dev_notice(ddev, "config %d has an invalid descriptor " "of length %d, skipping remainder of the config\n", cfgno, header->bLength); break; @@ -670,7 +670,7 @@ static int usb_parse_configuration(struct usb_device *dev, int cfgidx, d = (struct usb_interface_descriptor *) header; if (d->bLength < USB_DT_INTERFACE_SIZE) { - dev_warn(ddev, "config %d has an invalid " + dev_notice(ddev, "config %d has an invalid " "interface descriptor of length %d, " "skipping\n", cfgno, d->bLength); continue; @@ -680,7 +680,7 @@ static int usb_parse_configuration(struct usb_device *dev, int cfgidx, if ((dev->quirks & USB_QUIRK_HONOR_BNUMINTERFACES) && n >= nintf_orig) { - dev_warn(ddev, "config %d has more interface " + dev_notice(ddev, "config %d has more interface " "descriptors, than it declares in " "bNumInterfaces, ignoring interface " "number: %d\n", cfgno, inum); @@ -688,7 +688,7 @@ static int usb_parse_configuration(struct usb_device *dev, int cfgidx, } if (inum >= nintf_orig) - dev_warn(ddev, "config %d has an invalid " + dev_notice(ddev, "config %d has an invalid " "interface number: %d but max is %d\n", cfgno, inum, nintf_orig - 1); @@ -713,14 +713,14 @@ static int usb_parse_configuration(struct usb_device *dev, int cfgidx, d = (struct usb_interface_assoc_descriptor *)header; if (d->bLength < USB_DT_INTERFACE_ASSOCIATION_SIZE) { - dev_warn(ddev, + dev_notice(ddev, "config %d has an invalid interface association descriptor of length %d, skipping\n", cfgno, d->bLength); continue; } if (iad_num == USB_MAXIADS) { - dev_warn(ddev, "found more Interface " + dev_notice(ddev, "found more Interface " "Association Descriptors " "than allocated for in " "configuration %d\n", cfgno); @@ -731,7 +731,7 @@ static int usb_parse_configuration(struct usb_device *dev, int cfgidx, } else if (header->bDescriptorType == USB_DT_DEVICE || header->bDescriptorType == USB_DT_CONFIG) - dev_warn(ddev, "config %d contains an unexpected " + dev_notice(ddev, "config %d contains an unexpected " "descriptor of type 0x%X, skipping\n", cfgno, header->bDescriptorType); @@ -740,11 +740,11 @@ static int usb_parse_configuration(struct usb_device *dev, int cfgidx, config->desc.wTotalLength = cpu_to_le16(buffer2 - buffer0); if (n != nintf) - dev_warn(ddev, "config %d has %d interface%s, different from " + dev_notice(ddev, "config %d has %d interface%s, different from " "the descriptor's value: %d\n", cfgno, n, plural(n), nintf_orig); else if (n == 0) - dev_warn(ddev, "config %d has no interfaces?\n", cfgno); + dev_notice(ddev, "config %d has no interfaces?\n", cfgno); config->desc.bNumInterfaces = nintf = n; /* Check for missing interface numbers */ @@ -754,7 +754,7 @@ static int usb_parse_configuration(struct usb_device *dev, int cfgidx, break; } if (j >= nintf) - dev_warn(ddev, "config %d has no interface number " + dev_notice(ddev, "config %d has no interface number " "%d\n", cfgno, i); } @@ -762,7 +762,7 @@ static int usb_parse_configuration(struct usb_device *dev, int cfgidx, for (i = 0; i < nintf; ++i) { j = nalts[i]; if (j > USB_MAXALTSETTING) { - dev_warn(ddev, "too many alternate settings for " + dev_notice(ddev, "too many alternate settings for " "config %d interface %d: %d, " "using maximum allowed: %d\n", cfgno, inums[i], j, USB_MAXALTSETTING); @@ -811,7 +811,7 @@ static int usb_parse_configuration(struct usb_device *dev, int cfgidx, break; } if (n >= intfc->num_altsetting) - dev_warn(ddev, "config %d interface %d has no " + dev_notice(ddev, "config %d interface %d has no " "altsetting %d\n", cfgno, inums[i], j); } } @@ -868,7 +868,7 @@ int usb_get_configuration(struct usb_device *dev) int result; if (ncfg > USB_MAXCONFIG) { - dev_warn(ddev, "too many configurations: %d, " + dev_notice(ddev, "too many configurations: %d, " "using maximum allowed: %d\n", ncfg, USB_MAXCONFIG); dev->descriptor.bNumConfigurations = ncfg = USB_MAXCONFIG; } @@ -902,7 +902,7 @@ int usb_get_configuration(struct usb_device *dev) "descriptor/%s: %d\n", cfgno, "start", result); if (result != -EPIPE) goto err; - dev_err(ddev, "chopping to %d config(s)\n", cfgno); + dev_notice(ddev, "chopping to %d config(s)\n", cfgno); dev->descriptor.bNumConfigurations = cfgno; break; } else if (result < 4) { @@ -934,7 +934,7 @@ int usb_get_configuration(struct usb_device *dev) goto err; } if (result < length) { - dev_warn(ddev, "config index %d descriptor too short " + dev_notice(ddev, "config index %d descriptor too short " "(expected %i, got %i)\n", cfgno, length, result); length = result; } @@ -993,7 +993,7 @@ int usb_get_bos_descriptor(struct usb_device *dev) /* Get BOS descriptor */ ret = usb_get_descriptor(dev, USB_DT_BOS, 0, bos, USB_DT_BOS_SIZE); if (ret < USB_DT_BOS_SIZE || bos->bLength < USB_DT_BOS_SIZE) { - dev_err(ddev, "unable to get BOS descriptor or descriptor too short\n"); + dev_notice(ddev, "unable to get BOS descriptor or descriptor too short\n"); if (ret >= 0) ret = -ENOMSG; kfree(bos); @@ -1021,7 +1021,7 @@ int usb_get_bos_descriptor(struct usb_device *dev) ret = usb_get_descriptor(dev, USB_DT_BOS, 0, buffer, total_len); if (ret < total_len) { - dev_err(ddev, "unable to get BOS descriptor set\n"); + dev_notice(ddev, "unable to get BOS descriptor set\n"); if (ret >= 0) ret = -ENOMSG; goto err; @@ -1046,7 +1046,7 @@ int usb_get_bos_descriptor(struct usb_device *dev) } if (cap->bDescriptorType != USB_DT_DEVICE_CAPABILITY) { - dev_warn(ddev, "descriptor type invalid, skip\n"); + dev_notice(ddev, "descriptor type invalid, skip\n"); continue; } diff --git a/drivers/usb/core/file.c b/drivers/usb/core/file.c index 558890ada0e5bdcf8c573af0cc5be59c52c99ef1..da7d88e069e6c098ec1823013cb8b15d5ff53787 100644 --- a/drivers/usb/core/file.c +++ b/drivers/usb/core/file.c @@ -62,7 +62,7 @@ static struct usb_class { struct class *class; } *usb_class; -static char *usb_devnode(struct device *dev, umode_t *mode) +static char *usb_devnode(const struct device *dev, umode_t *mode) { struct usb_class_driver *drv; diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c index 9b77f49b3560c1f61927f9b53134c99649949d1a..ab2f3737764e4331f3e1e9d037cb75d449c861b7 100644 --- a/drivers/usb/core/hcd-pci.c +++ b/drivers/usb/core/hcd-pci.c @@ -558,6 +558,17 @@ static int hcd_pci_suspend_noirq(struct device *dev) return retval; } +static int hcd_pci_poweroff_late(struct device *dev) +{ + struct pci_dev *pci_dev = to_pci_dev(dev); + struct usb_hcd *hcd = pci_get_drvdata(pci_dev); + + if (hcd->driver->pci_poweroff_late && !HCD_DEAD(hcd)) + return hcd->driver->pci_poweroff_late(hcd, device_may_wakeup(dev)); + + return 0; +} + static int hcd_pci_resume_noirq(struct device *dev) { powermac_set_asic(to_pci_dev(dev), 1); @@ -578,6 +589,7 @@ static int hcd_pci_restore(struct device *dev) #define hcd_pci_suspend NULL #define hcd_pci_suspend_noirq NULL +#define hcd_pci_poweroff_late NULL #define hcd_pci_resume_noirq NULL #define hcd_pci_resume NULL #define hcd_pci_restore NULL @@ -615,6 +627,7 @@ const struct dev_pm_ops usb_hcd_pci_pm_ops = { .thaw_noirq = NULL, .thaw = hcd_pci_resume, .poweroff = hcd_pci_suspend, + .poweroff_late = hcd_pci_poweroff_late, .poweroff_noirq = hcd_pci_suspend_noirq, .restore_noirq = hcd_pci_resume_noirq, .restore = hcd_pci_restore, diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c index faeaace0d197dbb88739cebf8bd06617a5712171..8300baedafd20e2d188ae0f7e537c14118b677d7 100644 --- a/drivers/usb/core/hcd.c +++ b/drivers/usb/core/hcd.c @@ -3133,8 +3133,12 @@ int usb_hcd_setup_local_mem(struct usb_hcd *hcd, phys_addr_t phys_addr, GFP_KERNEL, DMA_ATTR_WRITE_COMBINE); - if (IS_ERR(local_mem)) + if (IS_ERR_OR_NULL(local_mem)) { + if (!local_mem) + return -ENOMEM; + return PTR_ERR(local_mem); + } /* * Here we pass a dma_addr_t but the arg type is a phys_addr_t. diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index bbab424b0d5594a84144cfa4352307983fa7b735..77e73fc8d6736b46a4665e63ec593b4fa96663ba 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c @@ -3081,6 +3081,48 @@ done: return status; } +/* + * hub_port_stop_enumerate - stop USB enumeration or ignore port events + * @hub: target hub + * @port1: port num of the port + * @retries: port retries number of hub_port_init() + * + * Return: + * true: ignore port actions/events or give up connection attempts. + * false: keep original behavior. + * + * This function will be based on retries to check whether the port which is + * marked with early_stop attribute would stop enumeration or ignore events. + * + * Note: + * This function didn't change anything if early_stop is not set, and it will + * prevent all connection attempts when early_stop is set and the attempts of + * the port are more than 1. + */ +static bool hub_port_stop_enumerate(struct usb_hub *hub, int port1, int retries) +{ + struct usb_port *port_dev = hub->ports[port1 - 1]; + + if (port_dev->early_stop) { + if (port_dev->ignore_event) + return true; + + /* + * We want unsuccessful attempts to fail quickly. + * Since some devices may need one failure during + * port initialization, we allow two tries but no + * more. + */ + if (retries < 2) + return false; + + port_dev->ignore_event = 1; + } else + port_dev->ignore_event = 0; + + return port_dev->ignore_event; +} + /* Check if a port is power on */ int usb_port_is_power_on(struct usb_hub *hub, unsigned int portstatus) { @@ -4796,6 +4838,11 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1, do_new_scheme = use_new_scheme(udev, retry_counter, port_dev); for (retries = 0; retries < GET_DESCRIPTOR_TRIES; (++retries, msleep(100))) { + if (hub_port_stop_enumerate(hub, port1, retries)) { + retval = -ENODEV; + break; + } + if (do_new_scheme) { struct usb_device_descriptor *buf; int r = 0; @@ -5246,6 +5293,11 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus, status = 0; for (i = 0; i < PORT_INIT_TRIES; i++) { + if (hub_port_stop_enumerate(hub, port1, i)) { + status = -ENODEV; + break; + } + usb_lock_port(port_dev); mutex_lock(hcd->address0_mutex); retry_locked = true; @@ -5614,6 +5666,10 @@ static void port_event(struct usb_hub *hub, int port1) if (!pm_runtime_active(&port_dev->dev)) return; + /* skip port actions if ignore_event and early_stop are true */ + if (port_dev->ignore_event && port_dev->early_stop) + return; + if (hub_handle_remote_wakeup(hub, port1, portstatus, portchange)) connect_change = 1; @@ -5927,6 +5983,10 @@ static int usb_reset_and_verify_device(struct usb_device *udev) mutex_lock(hcd->address0_mutex); for (i = 0; i < PORT_INIT_TRIES; ++i) { + if (hub_port_stop_enumerate(parent_hub, port1, i)) { + ret = -ENODEV; + break; + } /* ep0 maxpacket size may change; let the HCD know about it. * Other endpoints will be handled by re-enumeration. */ diff --git a/drivers/usb/core/hub.h b/drivers/usb/core/hub.h index b2925856b4cb45ea8867171fb158e15d9967a63d..e23833562e4f2e2f7f28bbe0b22b4d26662d3d4e 100644 --- a/drivers/usb/core/hub.h +++ b/drivers/usb/core/hub.h @@ -90,6 +90,8 @@ struct usb_hub { * @is_superspeed cache super-speed status * @usb3_lpm_u1_permit: whether USB3 U1 LPM is permitted. * @usb3_lpm_u2_permit: whether USB3 U2 LPM is permitted. + * @early_stop: whether port initialization will be stopped earlier. + * @ignore_event: whether events of the port are ignored. */ struct usb_port { struct usb_device *child; @@ -103,6 +105,8 @@ struct usb_port { u32 over_current_count; u8 portnum; u32 quirks; + unsigned int early_stop:1; + unsigned int ignore_event:1; unsigned int is_superspeed:1; unsigned int usb3_lpm_u1_permit:1; unsigned int usb3_lpm_u2_permit:1; diff --git a/drivers/usb/core/port.c b/drivers/usb/core/port.c index 38c1a4f4fdeae5025633727774a15eace5b4492f..06a8f1f84f6f8741e014501238847e477a3624ec 100644 --- a/drivers/usb/core/port.c +++ b/drivers/usb/core/port.c @@ -7,6 +7,7 @@ * Author: Lan Tianyu */ +#include #include #include #include @@ -17,6 +18,32 @@ static int usb_port_block_power_off; static const struct attribute_group *port_dev_group[]; +static ssize_t early_stop_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct usb_port *port_dev = to_usb_port(dev); + + return sysfs_emit(buf, "%s\n", port_dev->early_stop ? "yes" : "no"); +} + +static ssize_t early_stop_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct usb_port *port_dev = to_usb_port(dev); + bool value; + + if (kstrtobool(buf, &value)) + return -EINVAL; + + if (value) + port_dev->early_stop = 1; + else + port_dev->early_stop = 0; + + return count; +} +static DEVICE_ATTR_RW(early_stop); + static ssize_t disable_show(struct device *dev, struct device_attribute *attr, char *buf) { @@ -63,7 +90,7 @@ static ssize_t disable_store(struct device *dev, struct device_attribute *attr, bool disabled; int rc; - rc = strtobool(buf, &disabled); + rc = kstrtobool(buf, &disabled); if (rc) return rc; @@ -236,6 +263,7 @@ static struct attribute *port_dev_attrs[] = { &dev_attr_quirks.attr, &dev_attr_over_current_count.attr, &dev_attr_disable.attr, + &dev_attr_early_stop.attr, NULL, }; diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c index 631574718d8ac80c0f6c22118b47c34277145411..8217032dfb85fa27200c1812ae068f317995494e 100644 --- a/drivers/usb/core/sysfs.c +++ b/drivers/usb/core/sysfs.c @@ -13,6 +13,7 @@ #include +#include #include #include #include @@ -505,7 +506,7 @@ static ssize_t usb2_hardware_lpm_store(struct device *dev, if (ret < 0) return -EINTR; - ret = strtobool(buf, &value); + ret = kstrtobool(buf, &value); if (!ret) { udev->usb2_hw_lpm_allowed = value; @@ -975,7 +976,7 @@ static ssize_t interface_authorized_default_store(struct device *dev, int rc = count; bool val; - if (strtobool(buf, &val) != 0) + if (kstrtobool(buf, &val) != 0) return -EINVAL; if (val) @@ -1176,7 +1177,7 @@ static ssize_t interface_authorized_store(struct device *dev, struct usb_interface *intf = to_usb_interface(dev); bool val; - if (strtobool(buf, &val) != 0) + if (kstrtobool(buf, &val) != 0) return -EINVAL; if (val) diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c index 8b15742d9e8aa03301c41a7192207b33dd1a7293..62fa6378d2d73c63365e22ef0ea916517372a589 100644 --- a/drivers/usb/dwc2/gadget.c +++ b/drivers/usb/dwc2/gadget.c @@ -4549,7 +4549,8 @@ static int dwc2_hsotg_udc_start(struct usb_gadget *gadget, hsotg->gadget.dev.of_node = hsotg->dev->of_node; hsotg->gadget.speed = USB_SPEED_UNKNOWN; - if (hsotg->dr_mode == USB_DR_MODE_PERIPHERAL) { + if (hsotg->dr_mode == USB_DR_MODE_PERIPHERAL || + (hsotg->dr_mode == USB_DR_MODE_OTG && dwc2_is_device_mode(hsotg))) { ret = dwc2_lowlevel_hw_enable(hsotg); if (ret) goto err; @@ -4611,7 +4612,8 @@ static int dwc2_hsotg_udc_stop(struct usb_gadget *gadget) if (!IS_ERR_OR_NULL(hsotg->uphy)) otg_set_peripheral(hsotg->uphy->otg, NULL); - if (hsotg->dr_mode == USB_DR_MODE_PERIPHERAL) + if (hsotg->dr_mode == USB_DR_MODE_PERIPHERAL || + (hsotg->dr_mode == USB_DR_MODE_OTG && dwc2_is_device_mode(hsotg))) dwc2_lowlevel_hw_disable(hsotg); return 0; diff --git a/drivers/usb/dwc2/params.c b/drivers/usb/dwc2/params.c index 8eab5f38b1101debbc8a5872ffac4b257506767e..9ed9fd9569404d692f5681139c2d70cf8efb0ebc 100644 --- a/drivers/usb/dwc2/params.c +++ b/drivers/usb/dwc2/params.c @@ -113,6 +113,10 @@ static void dwc2_set_rk_params(struct dwc2_hsotg *hsotg) p->ahbcfg = GAHBCFG_HBSTLEN_INCR16 << GAHBCFG_HBSTLEN_SHIFT; p->power_down = DWC2_POWER_DOWN_PARAM_NONE; + p->lpm = false; + p->lpm_clock_gating = false; + p->besl = false; + p->hird_threshold_en = false; } static void dwc2_set_ltq_params(struct dwc2_hsotg *hsotg) diff --git a/drivers/usb/dwc2/platform.c b/drivers/usb/dwc2/platform.c index ec4ace0107f5f5723fc46435c688b5fe0a3c7f31..23ef759968231a80bab5e78cdf3217d7771298fc 100644 --- a/drivers/usb/dwc2/platform.c +++ b/drivers/usb/dwc2/platform.c @@ -321,7 +321,7 @@ static int dwc2_driver_remove(struct platform_device *dev) reset_control_assert(hsotg->reset); reset_control_assert(hsotg->reset_ecc); - return ret; + return 0; } /** @@ -576,7 +576,8 @@ static int dwc2_driver_probe(struct platform_device *dev) dwc2_debugfs_init(hsotg); /* Gadget code manages lowlevel hw on its own */ - if (hsotg->dr_mode == USB_DR_MODE_PERIPHERAL) + if (hsotg->dr_mode == USB_DR_MODE_PERIPHERAL || + (hsotg->dr_mode == USB_DR_MODE_OTG && dwc2_is_device_mode(hsotg))) dwc2_lowlevel_hw_disable(hsotg); #if IS_ENABLED(CONFIG_USB_DWC2_PERIPHERAL) || \ diff --git a/drivers/usb/dwc3/Kconfig b/drivers/usb/dwc3/Kconfig index 03ededa86da1f8de7f76461a7fd320ea063edb76..b2f72b0e75c6bbfcd763778425ae841576e4373d 100644 --- a/drivers/usb/dwc3/Kconfig +++ b/drivers/usb/dwc3/Kconfig @@ -152,11 +152,11 @@ config USB_DWC3_IMX8MP config USB_DWC3_XILINX tristate "Xilinx Platforms" - depends on (ARCH_ZYNQMP || ARCH_VERSAL) && OF + depends on (ARCH_ZYNQMP || COMPILE_TEST) && OF default USB_DWC3 help Support Xilinx SoCs with DesignWare Core USB3 IP. - This driver handles both ZynqMP and Versal SoC operations. + This driver handles ZynqMP SoC operations. Say 'Y' or 'M' if you have one such device. config USB_DWC3_AM62 diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c index 1f348bc867c22afa68da07e1a81419f97a7cdb57..476b63618511665dc297e8b1ee6272356c9260ad 100644 --- a/drivers/usb/dwc3/core.c +++ b/drivers/usb/dwc3/core.c @@ -122,21 +122,25 @@ static void __dwc3_set_mode(struct work_struct *work) unsigned long flags; int ret; u32 reg; + u32 desired_dr_role; mutex_lock(&dwc->mutex); + spin_lock_irqsave(&dwc->lock, flags); + desired_dr_role = dwc->desired_dr_role; + spin_unlock_irqrestore(&dwc->lock, flags); pm_runtime_get_sync(dwc->dev); if (dwc->current_dr_role == DWC3_GCTL_PRTCAP_OTG) dwc3_otg_update(dwc, 0); - if (!dwc->desired_dr_role) + if (!desired_dr_role) goto out; - if (dwc->desired_dr_role == dwc->current_dr_role) + if (desired_dr_role == dwc->current_dr_role) goto out; - if (dwc->desired_dr_role == DWC3_GCTL_PRTCAP_OTG && dwc->edev) + if (desired_dr_role == DWC3_GCTL_PRTCAP_OTG && dwc->edev) goto out; switch (dwc->current_dr_role) { @@ -164,7 +168,7 @@ static void __dwc3_set_mode(struct work_struct *work) */ if (dwc->current_dr_role && ((DWC3_IP_IS(DWC3) || DWC3_VER_IS_PRIOR(DWC31, 190A)) && - dwc->desired_dr_role != DWC3_GCTL_PRTCAP_OTG)) { + desired_dr_role != DWC3_GCTL_PRTCAP_OTG)) { reg = dwc3_readl(dwc->regs, DWC3_GCTL); reg |= DWC3_GCTL_CORESOFTRESET; dwc3_writel(dwc->regs, DWC3_GCTL, reg); @@ -184,11 +188,11 @@ static void __dwc3_set_mode(struct work_struct *work) spin_lock_irqsave(&dwc->lock, flags); - dwc3_set_prtcap(dwc, dwc->desired_dr_role); + dwc3_set_prtcap(dwc, desired_dr_role); spin_unlock_irqrestore(&dwc->lock, flags); - switch (dwc->desired_dr_role) { + switch (desired_dr_role) { case DWC3_GCTL_PRTCAP_HOST: ret = dwc3_host_init(dwc); if (ret) { @@ -1096,8 +1100,13 @@ static int dwc3_core_init(struct dwc3 *dwc) if (!dwc->ulpi_ready) { ret = dwc3_core_ulpi_init(dwc); - if (ret) + if (ret) { + if (ret == -ETIMEDOUT) { + dwc3_core_soft_reset(dwc); + ret = -EPROBE_DEFER; + } goto err0; + } dwc->ulpi_ready = true; } diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c index fb14511b1e10f98ce8b39841fb49a1f33a98646c..89c9ab2b19f85ea286012a1067f6fbb7a5febf36 100644 --- a/drivers/usb/dwc3/dwc3-pci.c +++ b/drivers/usb/dwc3/dwc3-pci.c @@ -45,7 +45,7 @@ #define PCI_DEVICE_ID_INTEL_ADLN 0x465e #define PCI_DEVICE_ID_INTEL_ADLN_PCH 0x54ee #define PCI_DEVICE_ID_INTEL_ADLS 0x7ae1 -#define PCI_DEVICE_ID_INTEL_RPL 0x460e +#define PCI_DEVICE_ID_INTEL_RPL 0xa70e #define PCI_DEVICE_ID_INTEL_RPLS 0x7a61 #define PCI_DEVICE_ID_INTEL_MTLP 0x7ec1 #define PCI_DEVICE_ID_INTEL_MTL 0x7e7e diff --git a/drivers/usb/dwc3/dwc3-qcom.c b/drivers/usb/dwc3/dwc3-qcom.c index 7c40f3ffc054415aae6b2d03ea1fac0440a66bb6..b0a0351d2d8b5c7bf70320d475f93602e4442f49 100644 --- a/drivers/usb/dwc3/dwc3-qcom.c +++ b/drivers/usb/dwc3/dwc3-qcom.c @@ -261,7 +261,8 @@ static int dwc3_qcom_interconnect_init(struct dwc3_qcom *qcom) if (IS_ERR(qcom->icc_path_apps)) { dev_err(dev, "failed to get apps-usb path: %ld\n", PTR_ERR(qcom->icc_path_apps)); - return PTR_ERR(qcom->icc_path_apps); + ret = PTR_ERR(qcom->icc_path_apps); + goto put_path_ddr; } max_speed = usb_get_maximum_speed(&qcom->dwc3->dev); @@ -274,16 +275,22 @@ static int dwc3_qcom_interconnect_init(struct dwc3_qcom *qcom) } if (ret) { dev_err(dev, "failed to set bandwidth for usb-ddr path: %d\n", ret); - return ret; + goto put_path_apps; } ret = icc_set_bw(qcom->icc_path_apps, APPS_USB_AVG_BW, APPS_USB_PEAK_BW); if (ret) { dev_err(dev, "failed to set bandwidth for apps-usb path: %d\n", ret); - return ret; + goto put_path_apps; } return 0; + +put_path_apps: + icc_put(qcom->icc_path_apps); +put_path_ddr: + icc_put(qcom->icc_path_ddr); + return ret; } /** diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index 6d524fa7644388ae7ef124f51b1f7a4e26963861..789976567f9f008c035e182841dbdf9d66757594 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c @@ -1464,8 +1464,18 @@ static int dwc3_prepare_trbs_sg(struct dwc3_ep *dep, */ if (num_trbs_left == 1 || (needs_extra_trb && num_trbs_left <= 2 && - sg_dma_len(sg_next(s)) >= length)) - must_interrupt = true; + sg_dma_len(sg_next(s)) >= length)) { + struct dwc3_request *r; + + /* Check if previous requests already set IOC */ + list_for_each_entry(r, &dep->started_list, list) { + if (r != req && !r->request.no_interrupt) + break; + + if (r == req) + must_interrupt = true; + } + } dwc3_prepare_one_trb(dep, req, trb_length, 1, i, false, must_interrupt); diff --git a/drivers/usb/fotg210/Kconfig b/drivers/usb/fotg210/Kconfig new file mode 100644 index 0000000000000000000000000000000000000000..2b05968735baa769687ef4507951481f7a01097d --- /dev/null +++ b/drivers/usb/fotg210/Kconfig @@ -0,0 +1,38 @@ +# SPDX-License-Identifier: GPL-2.0 + +config USB_FOTG210 + tristate "Faraday FOTG210 USB2 Dual Role controller" + depends on USB || USB_GADGET + depends on HAS_DMA && HAS_IOMEM + depends on ARCH_GEMINI || COMPILE_TEST + default ARCH_GEMINI + select MFD_SYSCON + help + Faraday FOTG210 is a dual-mode USB controller that can act + in both host controller and peripheral controller mode. + +if USB_FOTG210 + +config USB_FOTG210_HCD + bool "Faraday FOTG210 USB Host Controller support" + depends on USB=y || USB=USB_FOTG210 + help + Faraday FOTG210 is an OTG controller which can be configured as + an USB2.0 host. It is designed to meet USB2.0 EHCI specification + with minor modification. + + To compile this driver as a module, choose M here: the + module will be called fotg210-hcd. + +config USB_FOTG210_UDC + depends on USB_GADGET=y || USB_GADGET=USB_FOTG210 + bool "Faraday FOTG210 USB Peripheral Controller support" + help + Faraday USB2.0 OTG controller which can be configured as + high speed or full speed USB device. This driver suppports + Bulk Transfer so far. + + Say "y" to link the driver statically, or "m" to build a + dynamically linked module called "fotg210-udc". + +endif diff --git a/drivers/usb/fotg210/Makefile b/drivers/usb/fotg210/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..5aecff21f24b8032de6f84ac3294817860c99d65 --- /dev/null +++ b/drivers/usb/fotg210/Makefile @@ -0,0 +1,10 @@ +# SPDX-License-Identifier: GPL-2.0 + +# This setup links the different object files into one single +# module so we don't have to EXPORT() a lot of internal symbols +# or create unnecessary submodules. +fotg210-objs-y += fotg210-core.o +fotg210-objs-$(CONFIG_USB_FOTG210_HCD) += fotg210-hcd.o +fotg210-objs-$(CONFIG_USB_FOTG210_UDC) += fotg210-udc.o +fotg210-objs := $(fotg210-objs-y) +obj-$(CONFIG_USB_FOTG210) += fotg210.o diff --git a/drivers/usb/fotg210/fotg210-core.c b/drivers/usb/fotg210/fotg210-core.c new file mode 100644 index 0000000000000000000000000000000000000000..8a54edf921ac24d5a61aeb3f2c51be5f5d4ad878 --- /dev/null +++ b/drivers/usb/fotg210/fotg210-core.c @@ -0,0 +1,166 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Central probing code for the FOTG210 dual role driver + * We register one driver for the hardware and then we decide + * whether to proceed with probing the host or the peripheral + * driver. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "fotg210.h" + +/* + * Gemini-specific initialization function, only executed on the + * Gemini SoC using the global misc control register. + * + * The gemini USB blocks are connected to either Mini-A (host mode) or + * Mini-B (peripheral mode) plugs. There is no role switch support on the + * Gemini SoC, just either-or. + */ +#define GEMINI_GLOBAL_MISC_CTRL 0x30 +#define GEMINI_MISC_USB0_WAKEUP BIT(14) +#define GEMINI_MISC_USB1_WAKEUP BIT(15) +#define GEMINI_MISC_USB0_VBUS_ON BIT(22) +#define GEMINI_MISC_USB1_VBUS_ON BIT(23) +#define GEMINI_MISC_USB0_MINI_B BIT(29) +#define GEMINI_MISC_USB1_MINI_B BIT(30) + +static int fotg210_gemini_init(struct device *dev, struct resource *res, + enum usb_dr_mode mode) +{ + struct device_node *np = dev->of_node; + struct regmap *map; + bool wakeup; + u32 mask, val; + int ret; + + map = syscon_regmap_lookup_by_phandle(np, "syscon"); + if (IS_ERR(map)) { + dev_err(dev, "no syscon\n"); + return PTR_ERR(map); + } + wakeup = of_property_read_bool(np, "wakeup-source"); + + /* + * Figure out if this is USB0 or USB1 by simply checking the + * physical base address. + */ + mask = 0; + if (res->start == 0x69000000) { + mask = GEMINI_MISC_USB1_VBUS_ON | GEMINI_MISC_USB1_MINI_B | + GEMINI_MISC_USB1_WAKEUP; + if (mode == USB_DR_MODE_HOST) + val = GEMINI_MISC_USB1_VBUS_ON; + else + val = GEMINI_MISC_USB1_MINI_B; + if (wakeup) + val |= GEMINI_MISC_USB1_WAKEUP; + } else { + mask = GEMINI_MISC_USB0_VBUS_ON | GEMINI_MISC_USB0_MINI_B | + GEMINI_MISC_USB0_WAKEUP; + if (mode == USB_DR_MODE_HOST) + val = GEMINI_MISC_USB0_VBUS_ON; + else + val = GEMINI_MISC_USB0_MINI_B; + if (wakeup) + val |= GEMINI_MISC_USB0_WAKEUP; + } + + ret = regmap_update_bits(map, GEMINI_GLOBAL_MISC_CTRL, mask, val); + if (ret) { + dev_err(dev, "failed to initialize Gemini PHY\n"); + return ret; + } + + dev_info(dev, "initialized Gemini PHY in %s mode\n", + (mode == USB_DR_MODE_HOST) ? "host" : "gadget"); + return 0; +} + +static int fotg210_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + enum usb_dr_mode mode; + int ret; + + mode = usb_get_dr_mode(dev); + + if (of_device_is_compatible(dev->of_node, "cortina,gemini-usb")) { + struct resource *res; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + ret = fotg210_gemini_init(dev, res, mode); + if (ret) + return ret; + } + + if (mode == USB_DR_MODE_PERIPHERAL) + ret = fotg210_udc_probe(pdev); + else + ret = fotg210_hcd_probe(pdev); + + return ret; +} + +static int fotg210_remove(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + enum usb_dr_mode mode; + + mode = usb_get_dr_mode(dev); + + if (mode == USB_DR_MODE_PERIPHERAL) + fotg210_udc_remove(pdev); + else + fotg210_hcd_remove(pdev); + + return 0; +} + +#ifdef CONFIG_OF +static const struct of_device_id fotg210_of_match[] = { + { .compatible = "faraday,fotg210" }, + {}, +}; +MODULE_DEVICE_TABLE(of, fotg210_of_match); +#endif + +static struct platform_driver fotg210_driver = { + .driver = { + .name = "fotg210", + .of_match_table = of_match_ptr(fotg210_of_match), + }, + .probe = fotg210_probe, + .remove = fotg210_remove, +}; + +static int __init fotg210_init(void) +{ + if (usb_disabled()) + return -ENODEV; + + if (IS_ENABLED(CONFIG_USB_FOTG210_HCD)) + fotg210_hcd_init(); + return platform_driver_register(&fotg210_driver); +} +module_init(fotg210_init); + +static void __exit fotg210_cleanup(void) +{ + platform_driver_unregister(&fotg210_driver); + if (IS_ENABLED(CONFIG_USB_FOTG210_HCD)) + fotg210_hcd_cleanup(); +} +module_exit(fotg210_cleanup); + +MODULE_AUTHOR("Yuan-Hsin Chen, Feng-Hsin Chiang"); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("FOTG210 Dual Role Controller Driver"); diff --git a/drivers/usb/host/fotg210-hcd.c b/drivers/usb/fotg210/fotg210-hcd.c similarity index 99% rename from drivers/usb/host/fotg210-hcd.c rename to drivers/usb/fotg210/fotg210-hcd.c index 3d1dbcf4c07326ff39a09fe1b25f7a9363e4bfc8..51ac93a2eb98e54509c3891521b3121f30ae7ab4 100644 --- a/drivers/usb/host/fotg210-hcd.c +++ b/drivers/usb/fotg210/fotg210-hcd.c @@ -39,8 +39,8 @@ #include #include -#define DRIVER_AUTHOR "Yuan-Hsin Chen" -#define DRIVER_DESC "FOTG210 Host Controller (EHCI) Driver" +#include "fotg210.h" + static const char hcd_name[] = "fotg210_hcd"; #undef FOTG210_URB_TRACE @@ -77,7 +77,7 @@ MODULE_PARM_DESC(hird, "host initiated resume duration, +1 for each 75us"); #define INTR_MASK (STS_IAA | STS_FATAL | STS_PCD | STS_ERR | STS_INT) -#include "fotg210.h" +#include "fotg210-hcd.h" #define fotg210_dbg(fotg210, fmt, args...) \ dev_dbg(fotg210_to_hcd(fotg210)->self.controller, fmt, ## args) @@ -5490,9 +5490,6 @@ static int fotg210_get_frame(struct usb_hcd *hcd) * functions and in order to facilitate role switching we cannot * give the fotg210 driver exclusive access to those. */ -MODULE_DESCRIPTION(DRIVER_DESC); -MODULE_AUTHOR(DRIVER_AUTHOR); -MODULE_LICENSE("GPL"); static const struct hc_driver fotg210_fotg210_hc_driver = { .description = hcd_name, @@ -5560,7 +5557,7 @@ static void fotg210_init(struct fotg210_hcd *fotg210) * then invokes the start() method for the HCD associated with it * through the hotplug entry's driver_data. */ -static int fotg210_hcd_probe(struct platform_device *pdev) +int fotg210_hcd_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct usb_hcd *hcd; @@ -5652,7 +5649,7 @@ fail_create_hcd: * @dev: USB Host Controller being removed * */ -static int fotg210_hcd_remove(struct platform_device *pdev) +int fotg210_hcd_remove(struct platform_device *pdev) { struct usb_hcd *hcd = platform_get_drvdata(pdev); struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd); @@ -5668,27 +5665,8 @@ static int fotg210_hcd_remove(struct platform_device *pdev) return 0; } -#ifdef CONFIG_OF -static const struct of_device_id fotg210_of_match[] = { - { .compatible = "faraday,fotg210" }, - {}, -}; -MODULE_DEVICE_TABLE(of, fotg210_of_match); -#endif - -static struct platform_driver fotg210_hcd_driver = { - .driver = { - .name = "fotg210-hcd", - .of_match_table = of_match_ptr(fotg210_of_match), - }, - .probe = fotg210_hcd_probe, - .remove = fotg210_hcd_remove, -}; - -static int __init fotg210_hcd_init(void) +int __init fotg210_hcd_init(void) { - int retval = 0; - if (usb_disabled()) return -ENODEV; @@ -5704,24 +5682,11 @@ static int __init fotg210_hcd_init(void) fotg210_debug_root = debugfs_create_dir("fotg210", usb_debug_root); - retval = platform_driver_register(&fotg210_hcd_driver); - if (retval < 0) - goto clean; - return retval; - -clean: - debugfs_remove(fotg210_debug_root); - fotg210_debug_root = NULL; - - clear_bit(USB_EHCI_LOADED, &usb_hcds_loaded); - return retval; + return 0; } -module_init(fotg210_hcd_init); -static void __exit fotg210_hcd_cleanup(void) +void __exit fotg210_hcd_cleanup(void) { - platform_driver_unregister(&fotg210_hcd_driver); debugfs_remove(fotg210_debug_root); clear_bit(USB_EHCI_LOADED, &usb_hcds_loaded); } -module_exit(fotg210_hcd_cleanup); diff --git a/drivers/usb/host/fotg210.h b/drivers/usb/fotg210/fotg210-hcd.h similarity index 100% rename from drivers/usb/host/fotg210.h rename to drivers/usb/fotg210/fotg210-hcd.h diff --git a/drivers/usb/gadget/udc/fotg210-udc.c b/drivers/usb/fotg210/fotg210-udc.c similarity index 89% rename from drivers/usb/gadget/udc/fotg210-udc.c rename to drivers/usb/fotg210/fotg210-udc.c index fdca28e72a3b4d1441afa7044e418bb00994abfa..66e1b7ee3346eda9e75baf6759c137717bc0dc21 100644 --- a/drivers/usb/gadget/udc/fotg210-udc.c +++ b/drivers/usb/fotg210/fotg210-udc.c @@ -15,8 +15,12 @@ #include #include #include +#include +#include +#include #include "fotg210.h" +#include "fotg210-udc.h" #define DRIVER_DESC "FOTG210 USB Device Controller Driver" #define DRIVER_VERSION "30-April-2013" @@ -629,10 +633,10 @@ static void fotg210_request_error(struct fotg210_udc *fotg210) static void fotg210_set_address(struct fotg210_udc *fotg210, struct usb_ctrlrequest *ctrl) { - if (ctrl->wValue >= 0x0100) { + if (le16_to_cpu(ctrl->wValue) >= 0x0100) { fotg210_request_error(fotg210); } else { - fotg210_set_dev_addr(fotg210, ctrl->wValue); + fotg210_set_dev_addr(fotg210, le16_to_cpu(ctrl->wValue)); fotg210_set_cxdone(fotg210); } } @@ -713,17 +717,17 @@ static void fotg210_get_status(struct fotg210_udc *fotg210, switch (ctrl->bRequestType & USB_RECIP_MASK) { case USB_RECIP_DEVICE: - fotg210->ep0_data = 1 << USB_DEVICE_SELF_POWERED; + fotg210->ep0_data = cpu_to_le16(1 << USB_DEVICE_SELF_POWERED); break; case USB_RECIP_INTERFACE: - fotg210->ep0_data = 0; + fotg210->ep0_data = cpu_to_le16(0); break; case USB_RECIP_ENDPOINT: epnum = ctrl->wIndex & USB_ENDPOINT_NUMBER_MASK; if (epnum) fotg210->ep0_data = - fotg210_is_epnstall(fotg210->ep[epnum]) - << USB_ENDPOINT_HALT; + cpu_to_le16(fotg210_is_epnstall(fotg210->ep[epnum]) + << USB_ENDPOINT_HALT); else fotg210_request_error(fotg210); break; @@ -1007,11 +1011,19 @@ static int fotg210_udc_start(struct usb_gadget *g, { struct fotg210_udc *fotg210 = gadget_to_fotg210(g); u32 value; + int ret; /* hook up the driver */ driver->driver.bus = NULL; fotg210->driver = driver; + if (!IS_ERR_OR_NULL(fotg210->phy)) { + ret = otg_set_peripheral(fotg210->phy->otg, + &fotg210->gadget); + if (ret) + dev_err(fotg210->dev, "can't bind to phy\n"); + } + /* enable device global interrupt */ value = ioread32(fotg210->reg + FOTG210_DMCR); value |= DMCR_GLINT_EN; @@ -1053,6 +1065,9 @@ static int fotg210_udc_stop(struct usb_gadget *g) struct fotg210_udc *fotg210 = gadget_to_fotg210(g); unsigned long flags; + if (!IS_ERR_OR_NULL(fotg210->phy)) + return otg_set_peripheral(fotg210->phy->otg, NULL); + spin_lock_irqsave(&fotg210->lock, flags); fotg210_init(fotg210); @@ -1068,28 +1083,71 @@ static const struct usb_gadget_ops fotg210_gadget_ops = { .udc_stop = fotg210_udc_stop, }; -static int fotg210_udc_remove(struct platform_device *pdev) +/** + * fotg210_phy_event - Called by phy upon VBus event + * @nb: notifier block + * @action: phy action, is vbus connect or disconnect + * @data: the usb_gadget structure in fotg210 + * + * Called by the USB Phy when a cable connect or disconnect is sensed. + * + * Returns NOTIFY_OK or NOTIFY_DONE + */ +static int fotg210_phy_event(struct notifier_block *nb, unsigned long action, + void *data) +{ + struct usb_gadget *gadget = data; + + if (!gadget) + return NOTIFY_DONE; + + switch (action) { + case USB_EVENT_VBUS: + usb_gadget_vbus_connect(gadget); + return NOTIFY_OK; + case USB_EVENT_NONE: + usb_gadget_vbus_disconnect(gadget); + return NOTIFY_OK; + default: + return NOTIFY_DONE; + } +} + +static struct notifier_block fotg210_phy_notifier = { + .notifier_call = fotg210_phy_event, +}; + +int fotg210_udc_remove(struct platform_device *pdev) { struct fotg210_udc *fotg210 = platform_get_drvdata(pdev); int i; usb_del_gadget_udc(&fotg210->gadget); + if (!IS_ERR_OR_NULL(fotg210->phy)) { + usb_unregister_notifier(fotg210->phy, &fotg210_phy_notifier); + usb_put_phy(fotg210->phy); + } iounmap(fotg210->reg); free_irq(platform_get_irq(pdev, 0), fotg210); fotg210_ep_free_request(&fotg210->ep[0]->ep, fotg210->ep0_req); for (i = 0; i < FOTG210_MAX_NUM_EP; i++) kfree(fotg210->ep[i]); + + if (!IS_ERR(fotg210->pclk)) + clk_disable_unprepare(fotg210->pclk); + kfree(fotg210); return 0; } -static int fotg210_udc_probe(struct platform_device *pdev) +int fotg210_udc_probe(struct platform_device *pdev) { - struct resource *res, *ires; + struct resource *res; struct fotg210_udc *fotg210 = NULL; - struct fotg210_ep *_ep[FOTG210_MAX_NUM_EP]; + struct device *dev = &pdev->dev; + int irq; int ret = 0; int i; @@ -1099,29 +1157,59 @@ static int fotg210_udc_probe(struct platform_device *pdev) return -ENODEV; } - ires = platform_get_resource(pdev, IORESOURCE_IRQ, 0); - if (!ires) { - pr_err("platform_get_resource IORESOURCE_IRQ error.\n"); + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + pr_err("could not get irq\n"); return -ENODEV; } - ret = -ENOMEM; - /* initialize udc */ fotg210 = kzalloc(sizeof(struct fotg210_udc), GFP_KERNEL); if (fotg210 == NULL) + return -ENOMEM; + + fotg210->dev = dev; + + /* It's OK not to supply this clock */ + fotg210->pclk = devm_clk_get(dev, "PCLK"); + if (!IS_ERR(fotg210->pclk)) { + ret = clk_prepare_enable(fotg210->pclk); + if (ret) { + dev_err(dev, "failed to enable PCLK\n"); + goto err; + } + } else if (PTR_ERR(fotg210->pclk) == -EPROBE_DEFER) { + /* + * Percolate deferrals, for anything else, + * just live without the clocking. + */ + ret = -EPROBE_DEFER; goto err; + } + + fotg210->phy = devm_usb_get_phy_by_phandle(dev, "usb-phy", 0); + if (IS_ERR(fotg210->phy)) { + ret = PTR_ERR(fotg210->phy); + if (ret == -EPROBE_DEFER) + goto err_pclk; + dev_info(dev, "no PHY found\n"); + fotg210->phy = NULL; + } else { + ret = usb_phy_init(fotg210->phy); + if (ret) + goto err_pclk; + dev_info(dev, "found and initialized PHY\n"); + } for (i = 0; i < FOTG210_MAX_NUM_EP; i++) { - _ep[i] = kzalloc(sizeof(struct fotg210_ep), GFP_KERNEL); - if (_ep[i] == NULL) + fotg210->ep[i] = kzalloc(sizeof(struct fotg210_ep), GFP_KERNEL); + if (!fotg210->ep[i]) goto err_alloc; - fotg210->ep[i] = _ep[i]; } fotg210->reg = ioremap(res->start, resource_size(res)); if (fotg210->reg == NULL) { - pr_err("ioremap error.\n"); + dev_err(dev, "ioremap error\n"); goto err_alloc; } @@ -1132,8 +1220,8 @@ static int fotg210_udc_probe(struct platform_device *pdev) fotg210->gadget.ops = &fotg210_gadget_ops; fotg210->gadget.max_speed = USB_SPEED_HIGH; - fotg210->gadget.dev.parent = &pdev->dev; - fotg210->gadget.dev.dma_mask = pdev->dev.dma_mask; + fotg210->gadget.dev.parent = dev; + fotg210->gadget.dev.dma_mask = dev->dma_mask; fotg210->gadget.name = udc_name; INIT_LIST_HEAD(&fotg210->gadget.ep_list); @@ -1176,23 +1264,28 @@ static int fotg210_udc_probe(struct platform_device *pdev) fotg210_disable_unplug(fotg210); - ret = request_irq(ires->start, fotg210_irq, IRQF_SHARED, + ret = request_irq(irq, fotg210_irq, IRQF_SHARED, udc_name, fotg210); if (ret < 0) { - pr_err("request_irq error (%d)\n", ret); + dev_err(dev, "request_irq error (%d)\n", ret); goto err_req; } - ret = usb_add_gadget_udc(&pdev->dev, &fotg210->gadget); + if (!IS_ERR_OR_NULL(fotg210->phy)) + usb_register_notifier(fotg210->phy, &fotg210_phy_notifier); + + ret = usb_add_gadget_udc(dev, &fotg210->gadget); if (ret) goto err_add_udc; - dev_info(&pdev->dev, "version %s\n", DRIVER_VERSION); + dev_info(dev, "version %s\n", DRIVER_VERSION); return 0; err_add_udc: - free_irq(ires->start, fotg210); + if (!IS_ERR_OR_NULL(fotg210->phy)) + usb_unregister_notifier(fotg210->phy, &fotg210_phy_notifier); + free_irq(irq, fotg210); err_req: fotg210_ep_free_request(&fotg210->ep[0]->ep, fotg210->ep0_req); @@ -1203,22 +1296,11 @@ err_map: err_alloc: for (i = 0; i < FOTG210_MAX_NUM_EP; i++) kfree(fotg210->ep[i]); - kfree(fotg210); +err_pclk: + if (!IS_ERR(fotg210->pclk)) + clk_disable_unprepare(fotg210->pclk); err: + kfree(fotg210); return ret; } - -static struct platform_driver fotg210_driver = { - .driver = { - .name = udc_name, - }, - .probe = fotg210_udc_probe, - .remove = fotg210_udc_remove, -}; - -module_platform_driver(fotg210_driver); - -MODULE_AUTHOR("Yuan-Hsin Chen, Feng-Hsin Chiang "); -MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION(DRIVER_DESC); diff --git a/drivers/usb/gadget/udc/fotg210.h b/drivers/usb/fotg210/fotg210-udc.h similarity index 99% rename from drivers/usb/gadget/udc/fotg210.h rename to drivers/usb/fotg210/fotg210-udc.h index 08c32957503be387cef13b6da7a4ff5d8fcb7a01..fadb57ca8d7863068d9c659f5aa6dc7e884561ea 100644 --- a/drivers/usb/gadget/udc/fotg210.h +++ b/drivers/usb/fotg210/fotg210-udc.h @@ -231,9 +231,12 @@ struct fotg210_ep { struct fotg210_udc { spinlock_t lock; /* protect the struct */ void __iomem *reg; + struct clk *pclk; unsigned long irq_trigger; + struct device *dev; + struct usb_phy *phy; struct usb_gadget gadget; struct usb_gadget_driver *driver; diff --git a/drivers/usb/fotg210/fotg210.h b/drivers/usb/fotg210/fotg210.h new file mode 100644 index 0000000000000000000000000000000000000000..ef79d8323d89793c33fdec18480da781cf30b3b5 --- /dev/null +++ b/drivers/usb/fotg210/fotg210.h @@ -0,0 +1,42 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __FOTG210_H +#define __FOTG210_H + +#ifdef CONFIG_USB_FOTG210_HCD +int fotg210_hcd_probe(struct platform_device *pdev); +int fotg210_hcd_remove(struct platform_device *pdev); +int fotg210_hcd_init(void); +void fotg210_hcd_cleanup(void); +#else +static inline int fotg210_hcd_probe(struct platform_device *pdev) +{ + return 0; +} +static inline int fotg210_hcd_remove(struct platform_device *pdev) +{ + return 0; +} +static inline int fotg210_hcd_init(void) +{ + return 0; +} +static inline void fotg210_hcd_cleanup(void) +{ +} +#endif + +#ifdef CONFIG_USB_FOTG210_UDC +int fotg210_udc_probe(struct platform_device *pdev); +int fotg210_udc_remove(struct platform_device *pdev); +#else +static inline int fotg210_udc_probe(struct platform_device *pdev) +{ + return 0; +} +static inline int fotg210_udc_remove(struct platform_device *pdev) +{ + return 0; +} +#endif + +#endif /* __FOTG210_H */ diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c index 3a6b4926193ef2b51aba2abdb1bc2476b378d5f4..96121d1c8df4c71e0d4d0077fa0c37121ddd90ba 100644 --- a/drivers/usb/gadget/configfs.c +++ b/drivers/usb/gadget/configfs.c @@ -3,6 +3,7 @@ #include #include #include +#include #include #include #include @@ -800,7 +801,7 @@ static ssize_t os_desc_use_store(struct config_item *item, const char *page, bool use; mutex_lock(&gi->lock); - ret = strtobool(page, &use); + ret = kstrtobool(page, &use); if (!ret) { gi->use_os_desc = use; ret = len; diff --git a/drivers/usb/gadget/function/f_ecm.c b/drivers/usb/gadget/function/f_ecm.c index ffe2486fce71c95956a21b2c921cf84a247ada02..a7ab30e603e20c2b72a54f550838ed6332ef6da7 100644 --- a/drivers/usb/gadget/function/f_ecm.c +++ b/drivers/usb/gadget/function/f_ecm.c @@ -685,7 +685,7 @@ ecm_bind(struct usb_configuration *c, struct usb_function *f) struct usb_composite_dev *cdev = c->cdev; struct f_ecm *ecm = func_to_ecm(f); struct usb_string *us; - int status; + int status = 0; struct usb_ep *ep; struct f_ecm_opts *ecm_opts; @@ -695,23 +695,19 @@ ecm_bind(struct usb_configuration *c, struct usb_function *f) ecm_opts = container_of(f->fi, struct f_ecm_opts, func_inst); - /* - * in drivers/usb/gadget/configfs.c:configfs_composite_bind() - * configurations are bound in sequence with list_for_each_entry, - * in each configuration its functions are bound in sequence - * with list_for_each_entry, so we assume no race condition - * with regard to ecm_opts->bound access - */ + mutex_lock(&ecm_opts->lock); + + gether_set_gadget(ecm_opts->net, cdev->gadget); + if (!ecm_opts->bound) { - mutex_lock(&ecm_opts->lock); - gether_set_gadget(ecm_opts->net, cdev->gadget); status = gether_register_netdev(ecm_opts->net); - mutex_unlock(&ecm_opts->lock); - if (status) - return status; ecm_opts->bound = true; } + mutex_unlock(&ecm_opts->lock); + if (status) + return status; + ecm_string_defs[1].s = ecm->ethaddr; us = usb_gstrings_attach(cdev, ecm_strings, diff --git a/drivers/usb/gadget/function/f_hid.c b/drivers/usb/gadget/function/f_hid.c index ca0a7d9eaa34e7f78637b036a96761dbf0cbb3b6..a8da3b4a2855a1afce86d1a3863754aad9c7da6a 100644 --- a/drivers/usb/gadget/function/f_hid.c +++ b/drivers/usb/gadget/function/f_hid.c @@ -71,7 +71,7 @@ struct f_hidg { wait_queue_head_t write_queue; struct usb_request *req; - int minor; + struct device dev; struct cdev cdev; struct usb_function func; @@ -84,6 +84,14 @@ static inline struct f_hidg *func_to_hidg(struct usb_function *f) return container_of(f, struct f_hidg, func); } +static void hidg_release(struct device *dev) +{ + struct f_hidg *hidg = container_of(dev, struct f_hidg, dev); + + kfree(hidg->set_report_buf); + kfree(hidg); +} + /*-------------------------------------------------------------------------*/ /* Static descriptors */ @@ -904,9 +912,7 @@ static int hidg_bind(struct usb_configuration *c, struct usb_function *f) struct usb_ep *ep; struct f_hidg *hidg = func_to_hidg(f); struct usb_string *us; - struct device *device; int status; - dev_t dev; /* maybe allocate device-global string IDs, and patch descriptors */ us = usb_gstrings_attach(c->cdev, ct_func_strings, @@ -999,21 +1005,11 @@ static int hidg_bind(struct usb_configuration *c, struct usb_function *f) /* create char device */ cdev_init(&hidg->cdev, &f_hidg_fops); - dev = MKDEV(major, hidg->minor); - status = cdev_add(&hidg->cdev, dev, 1); + status = cdev_device_add(&hidg->cdev, &hidg->dev); if (status) goto fail_free_descs; - device = device_create(hidg_class, NULL, dev, NULL, - "%s%d", "hidg", hidg->minor); - if (IS_ERR(device)) { - status = PTR_ERR(device); - goto del; - } - return 0; -del: - cdev_del(&hidg->cdev); fail_free_descs: usb_free_all_descriptors(f); fail: @@ -1244,9 +1240,7 @@ static void hidg_free(struct usb_function *f) hidg = func_to_hidg(f); opts = container_of(f->fi, struct f_hid_opts, func_inst); - kfree(hidg->report_desc); - kfree(hidg->set_report_buf); - kfree(hidg); + put_device(&hidg->dev); mutex_lock(&opts->lock); --opts->refcnt; mutex_unlock(&opts->lock); @@ -1256,8 +1250,7 @@ static void hidg_unbind(struct usb_configuration *c, struct usb_function *f) { struct f_hidg *hidg = func_to_hidg(f); - device_destroy(hidg_class, MKDEV(major, hidg->minor)); - cdev_del(&hidg->cdev); + cdev_device_del(&hidg->cdev, &hidg->dev); usb_free_all_descriptors(f); } @@ -1266,6 +1259,7 @@ static struct usb_function *hidg_alloc(struct usb_function_instance *fi) { struct f_hidg *hidg; struct f_hid_opts *opts; + int ret; /* allocate and initialize one new instance */ hidg = kzalloc(sizeof(*hidg), GFP_KERNEL); @@ -1275,25 +1269,31 @@ static struct usb_function *hidg_alloc(struct usb_function_instance *fi) opts = container_of(fi, struct f_hid_opts, func_inst); mutex_lock(&opts->lock); - ++opts->refcnt; - hidg->minor = opts->minor; + device_initialize(&hidg->dev); + hidg->dev.release = hidg_release; + hidg->dev.class = hidg_class; + hidg->dev.devt = MKDEV(major, opts->minor); + ret = dev_set_name(&hidg->dev, "hidg%d", opts->minor); + if (ret) + goto err_unlock; + hidg->bInterfaceSubClass = opts->subclass; hidg->bInterfaceProtocol = opts->protocol; hidg->report_length = opts->report_length; hidg->report_desc_length = opts->report_desc_length; if (opts->report_desc) { - hidg->report_desc = kmemdup(opts->report_desc, - opts->report_desc_length, - GFP_KERNEL); + hidg->report_desc = devm_kmemdup(&hidg->dev, opts->report_desc, + opts->report_desc_length, + GFP_KERNEL); if (!hidg->report_desc) { - kfree(hidg); - mutex_unlock(&opts->lock); - return ERR_PTR(-ENOMEM); + ret = -ENOMEM; + goto err_put_device; } } hidg->use_out_ep = !opts->no_out_endpoint; + ++opts->refcnt; mutex_unlock(&opts->lock); hidg->func.name = "hid"; @@ -1308,6 +1308,12 @@ static struct usb_function *hidg_alloc(struct usb_function_instance *fi) hidg->qlen = 4; return &hidg->func; + +err_put_device: + put_device(&hidg->dev); +err_unlock: + mutex_unlock(&opts->lock); + return ERR_PTR(ret); } DECLARE_USB_FUNCTION_INIT(hid, hidg_alloc_inst, hidg_alloc); diff --git a/drivers/usb/gadget/function/f_mass_storage.c b/drivers/usb/gadget/function/f_mass_storage.c index 3abf7f586e2afba4f08be76bbcb29ea6c7fcdfcd..3a30feb47073f0aeeac0bd13e5f82e36cfe2a030 100644 --- a/drivers/usb/gadget/function/f_mass_storage.c +++ b/drivers/usb/gadget/function/f_mass_storage.c @@ -176,6 +176,7 @@ #include #include #include +#include #include #include #include @@ -3387,7 +3388,7 @@ static ssize_t fsg_opts_stall_store(struct config_item *item, const char *page, return -EBUSY; } - ret = strtobool(page, &stall); + ret = kstrtobool(page, &stall); if (!ret) { opts->common->can_stall = stall; ret = len; diff --git a/drivers/usb/gadget/function/f_printer.c b/drivers/usb/gadget/function/f_printer.c index a881c69b1f2bf56cf20fda287efcd708e7abdbe3..4903d761a87241d85c8528285c052d5adb85c1e4 100644 --- a/drivers/usb/gadget/function/f_printer.c +++ b/drivers/usb/gadget/function/f_printer.c @@ -364,7 +364,7 @@ printer_open(struct inode *inode, struct file *fd) spin_unlock_irqrestore(&dev->lock, flags); kref_get(&dev->kref); - DBG(dev, "printer_open returned %x\n", ret); + return ret; } @@ -382,7 +382,6 @@ printer_close(struct inode *inode, struct file *fd) spin_unlock_irqrestore(&dev->lock, flags); kref_put(&dev->kref, printer_dev_free); - DBG(dev, "printer_close\n"); return 0; } @@ -848,8 +847,6 @@ static void printer_reset_interface(struct printer_dev *dev) if (dev->interface < 0) return; - DBG(dev, "%s\n", __func__); - if (dev->in_ep->desc) usb_ep_disable(dev->in_ep); @@ -887,8 +884,6 @@ static void printer_soft_reset(struct printer_dev *dev) { struct usb_request *req; - INFO(dev, "Received Printer Reset Request\n"); - if (usb_ep_disable(dev->in_ep)) DBG(dev, "Failed to disable USB in_ep\n"); if (usb_ep_disable(dev->out_ep)) @@ -1185,8 +1180,6 @@ static void printer_func_disable(struct usb_function *f) { struct printer_dev *dev = func_to_printer(f); - DBG(dev, "%s\n", __func__); - printer_reset_interface(dev); } diff --git a/drivers/usb/gadget/function/f_uvc.c b/drivers/usb/gadget/function/f_uvc.c index 6e196e06181ecf21b2bf225adbadefed9e0c9f2b..32f2c16454670030465f248d492cb53a9b1fdcfd 100644 --- a/drivers/usb/gadget/function/f_uvc.c +++ b/drivers/usb/gadget/function/f_uvc.c @@ -39,9 +39,6 @@ MODULE_PARM_DESC(trace, "Trace level bitmask"); /* string IDs are assigned dynamically */ -#define UVC_STRING_CONTROL_IDX 0 -#define UVC_STRING_STREAMING_IDX 1 - static struct usb_string uvc_en_us_strings[] = { /* [UVC_STRING_CONTROL_IDX].s = DYNAMIC, */ [UVC_STRING_STREAMING_IDX].s = "Video Streaming", @@ -216,8 +213,9 @@ uvc_function_ep0_complete(struct usb_ep *ep, struct usb_request *req) memset(&v4l2_event, 0, sizeof(v4l2_event)); v4l2_event.type = UVC_EVENT_DATA; - uvc_event->data.length = req->actual; - memcpy(&uvc_event->data.data, req->buf, req->actual); + uvc_event->data.length = min_t(unsigned int, req->actual, + sizeof(uvc_event->data.data)); + memcpy(&uvc_event->data.data, req->buf, uvc_event->data.length); v4l2_event_queue(&uvc->vdev, &v4l2_event); } } @@ -228,6 +226,8 @@ uvc_function_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl) struct uvc_device *uvc = to_uvc(f); struct v4l2_event v4l2_event; struct uvc_event *uvc_event = (void *)&v4l2_event.u.data; + unsigned int interface = le16_to_cpu(ctrl->wIndex) & 0xff; + struct usb_ctrlrequest *mctrl; if ((ctrl->bRequestType & USB_TYPE_MASK) != USB_TYPE_CLASS) { uvcg_info(f, "invalid request type\n"); @@ -248,6 +248,16 @@ uvc_function_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl) memset(&v4l2_event, 0, sizeof(v4l2_event)); v4l2_event.type = UVC_EVENT_SETUP; memcpy(&uvc_event->req, ctrl, sizeof(uvc_event->req)); + + /* check for the interface number, fixup the interface number in + * the ctrl request so the userspace doesn't have to bother with + * offset and configfs parsing + */ + mctrl = &uvc_event->req; + mctrl->wIndex &= ~cpu_to_le16(0xff); + if (interface == uvc->streaming_intf) + mctrl->wIndex = cpu_to_le16(UVC_STRING_STREAMING_IDX); + v4l2_event_queue(&uvc->vdev, &v4l2_event); return 0; diff --git a/drivers/usb/gadget/function/storage_common.c b/drivers/usb/gadget/function/storage_common.c index 208c6a92780ab28744b31932c2cdd9961277453a..2a4163b0f6fe26b7e482646fc343a5856e71618b 100644 --- a/drivers/usb/gadget/function/storage_common.c +++ b/drivers/usb/gadget/function/storage_common.c @@ -23,6 +23,7 @@ #include #include #include +#include #include #include "storage_common.h" @@ -396,7 +397,7 @@ ssize_t fsg_store_ro(struct fsg_lun *curlun, struct rw_semaphore *filesem, ssize_t rc; bool ro; - rc = strtobool(buf, &ro); + rc = kstrtobool(buf, &ro); if (rc) return rc; @@ -419,7 +420,7 @@ ssize_t fsg_store_nofua(struct fsg_lun *curlun, const char *buf, size_t count) bool nofua; int ret; - ret = strtobool(buf, &nofua); + ret = kstrtobool(buf, &nofua); if (ret) return ret; @@ -470,7 +471,7 @@ ssize_t fsg_store_cdrom(struct fsg_lun *curlun, struct rw_semaphore *filesem, bool cdrom; int ret; - ret = strtobool(buf, &cdrom); + ret = kstrtobool(buf, &cdrom); if (ret) return ret; @@ -493,7 +494,7 @@ ssize_t fsg_store_removable(struct fsg_lun *curlun, const char *buf, bool removable; int ret; - ret = strtobool(buf, &removable); + ret = kstrtobool(buf, &removable); if (ret) return ret; diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c index e06022873df16a357a2ba5b6f82f64b7c4cb081e..8f12f3f8f6eeb698582376ab305b80d7b4571dd5 100644 --- a/drivers/usb/gadget/function/u_ether.c +++ b/drivers/usb/gadget/function/u_ether.c @@ -798,7 +798,6 @@ struct eth_dev *gether_setup_name(struct usb_gadget *g, net->max_mtu = GETHER_MAX_MTU_SIZE; dev->gadget = g; - SET_NETDEV_DEV(net, &g->dev); SET_NETDEV_DEVTYPE(net, &gadget_type); status = register_netdev(net); @@ -873,8 +872,6 @@ int gether_register_netdev(struct net_device *net) struct usb_gadget *g; int status; - if (!net->dev.parent) - return -EINVAL; dev = netdev_priv(net); g = dev->gadget; @@ -905,7 +902,6 @@ void gether_set_gadget(struct net_device *net, struct usb_gadget *g) dev = netdev_priv(net); dev->gadget = g; - SET_NETDEV_DEV(net, &g->dev); } EXPORT_SYMBOL_GPL(gether_set_gadget); diff --git a/drivers/usb/gadget/function/u_serial.c b/drivers/usb/gadget/function/u_serial.c index 7538279f98179a17b7ced9b182ebb36b6f655304..840626e064e13a5578930809eb03b3ab889b3426 100644 --- a/drivers/usb/gadget/function/u_serial.c +++ b/drivers/usb/gadget/function/u_serial.c @@ -24,6 +24,7 @@ #include #include #include +#include #include #include #include @@ -1070,7 +1071,7 @@ ssize_t gserial_set_console(unsigned char port_num, const char *page, size_t cou bool enable; int ret; - ret = strtobool(page, &enable); + ret = kstrtobool(page, &enable); if (ret) return ret; diff --git a/drivers/usb/gadget/function/uvc_configfs.c b/drivers/usb/gadget/function/uvc_configfs.c index 4303a3283ba0a3a3b9927391553546fe2fbb3ccf..76cb60d13049f81328908b5cfcfa3ac954a47884 100644 --- a/drivers/usb/gadget/function/uvc_configfs.c +++ b/drivers/usb/gadget/function/uvc_configfs.c @@ -1512,7 +1512,7 @@ UVCG_UNCOMPRESSED_ATTR(b_bits_per_pixel, bBitsPerPixel, 8); UVCG_UNCOMPRESSED_ATTR(b_default_frame_index, bDefaultFrameIndex, 8); UVCG_UNCOMPRESSED_ATTR_RO(b_aspect_ratio_x, bAspectRatioX, 8); UVCG_UNCOMPRESSED_ATTR_RO(b_aspect_ratio_y, bAspectRatioY, 8); -UVCG_UNCOMPRESSED_ATTR_RO(bm_interface_flags, bmInterfaceFlags, 8); +UVCG_UNCOMPRESSED_ATTR_RO(bm_interlace_flags, bmInterlaceFlags, 8); #undef UVCG_UNCOMPRESSED_ATTR #undef UVCG_UNCOMPRESSED_ATTR_RO @@ -1541,7 +1541,7 @@ static struct configfs_attribute *uvcg_uncompressed_attrs[] = { &uvcg_uncompressed_attr_b_default_frame_index, &uvcg_uncompressed_attr_b_aspect_ratio_x, &uvcg_uncompressed_attr_b_aspect_ratio_y, - &uvcg_uncompressed_attr_bm_interface_flags, + &uvcg_uncompressed_attr_bm_interlace_flags, &uvcg_uncompressed_attr_bma_controls, NULL, }; @@ -1574,7 +1574,7 @@ static struct config_group *uvcg_uncompressed_make(struct config_group *group, h->desc.bDefaultFrameIndex = 1; h->desc.bAspectRatioX = 0; h->desc.bAspectRatioY = 0; - h->desc.bmInterfaceFlags = 0; + h->desc.bmInterlaceFlags = 0; h->desc.bCopyProtect = 0; INIT_LIST_HEAD(&h->fmt.frames); @@ -1700,7 +1700,7 @@ UVCG_MJPEG_ATTR(b_default_frame_index, bDefaultFrameIndex, 8); UVCG_MJPEG_ATTR_RO(bm_flags, bmFlags, 8); UVCG_MJPEG_ATTR_RO(b_aspect_ratio_x, bAspectRatioX, 8); UVCG_MJPEG_ATTR_RO(b_aspect_ratio_y, bAspectRatioY, 8); -UVCG_MJPEG_ATTR_RO(bm_interface_flags, bmInterfaceFlags, 8); +UVCG_MJPEG_ATTR_RO(bm_interlace_flags, bmInterlaceFlags, 8); #undef UVCG_MJPEG_ATTR #undef UVCG_MJPEG_ATTR_RO @@ -1728,7 +1728,7 @@ static struct configfs_attribute *uvcg_mjpeg_attrs[] = { &uvcg_mjpeg_attr_bm_flags, &uvcg_mjpeg_attr_b_aspect_ratio_x, &uvcg_mjpeg_attr_b_aspect_ratio_y, - &uvcg_mjpeg_attr_bm_interface_flags, + &uvcg_mjpeg_attr_bm_interlace_flags, &uvcg_mjpeg_attr_bma_controls, NULL, }; @@ -1755,7 +1755,7 @@ static struct config_group *uvcg_mjpeg_make(struct config_group *group, h->desc.bDefaultFrameIndex = 1; h->desc.bAspectRatioX = 0; h->desc.bAspectRatioY = 0; - h->desc.bmInterfaceFlags = 0; + h->desc.bmInterlaceFlags = 0; h->desc.bCopyProtect = 0; INIT_LIST_HEAD(&h->fmt.frames); diff --git a/drivers/usb/gadget/legacy/serial.c b/drivers/usb/gadget/legacy/serial.c index dcd3a6603d900f2a1fd743a2ab14f7ff9a087aa1..4974bee6049a6527b123bfe4fb7463dc48b35fc3 100644 --- a/drivers/usb/gadget/legacy/serial.c +++ b/drivers/usb/gadget/legacy/serial.c @@ -9,6 +9,7 @@ #include #include +#include #include #include #include @@ -109,7 +110,7 @@ static int enable_set(const char *s, const struct kernel_param *kp) if (!s) /* called for no-arg enable == default */ return 0; - ret = strtobool(s, &do_enable); + ret = kstrtobool(s, &do_enable); if (ret || enable == do_enable) return ret; diff --git a/drivers/usb/gadget/legacy/webcam.c b/drivers/usb/gadget/legacy/webcam.c index 94e22867da1d0351dbcf1ff0062d3fbe1598eb73..53e38f87472b040cc953ba2dfd65dcdfbe53f82f 100644 --- a/drivers/usb/gadget/legacy/webcam.c +++ b/drivers/usb/gadget/legacy/webcam.c @@ -171,7 +171,7 @@ static const struct uvc_format_uncompressed uvc_format_yuv = { .bDefaultFrameIndex = 1, .bAspectRatioX = 0, .bAspectRatioY = 0, - .bmInterfaceFlags = 0, + .bmInterlaceFlags = 0, .bCopyProtect = 0, }; @@ -222,7 +222,7 @@ static const struct uvc_format_mjpeg uvc_format_mjpg = { .bDefaultFrameIndex = 1, .bAspectRatioX = 0, .bAspectRatioY = 0, - .bmInterfaceFlags = 0, + .bmInterlaceFlags = 0, .bCopyProtect = 0, }; diff --git a/drivers/usb/gadget/udc/Kconfig b/drivers/usb/gadget/udc/Kconfig index 5756acb07b8da810313d7d614adbf0c55d9666be..b3006d8b04ab37d81ca1dadf4215d59c869640bc 100644 --- a/drivers/usb/gadget/udc/Kconfig +++ b/drivers/usb/gadget/udc/Kconfig @@ -33,7 +33,7 @@ menu "USB Peripheral Controller" config USB_AT91 tristate "Atmel AT91 USB Device Port" depends on ARCH_AT91 - depends on OF || COMPILE_TEST + depends on OF help Many Atmel AT91 processors (such as the AT91RM2000) have a full speed USB Device Port with support for five configurable @@ -108,17 +108,6 @@ config USB_FUSB300 help Faraday usb device controller FUSB300 driver -config USB_FOTG210_UDC - depends on HAS_DMA - tristate "Faraday FOTG210 USB Peripheral Controller" - help - Faraday USB2.0 OTG controller which can be configured as - high speed or full speed USB device. This driver supppors - Bulk Transfer so far. - - Say "y" to link the driver statically, or "m" to build a - dynamically linked module called "fotg210_udc". - config USB_GR_UDC tristate "Aeroflex Gaisler GRUSBDC USB Peripheral Controller Driver" depends on HAS_DMA @@ -430,7 +419,7 @@ config USB_EG20T config USB_GADGET_XILINX tristate "Xilinx USB Driver" depends on HAS_DMA - depends on OF || COMPILE_TEST + depends on OF help USB peripheral controller driver for Xilinx USB2 device. Xilinx USB2 device is a soft IP which supports both full diff --git a/drivers/usb/gadget/udc/Makefile b/drivers/usb/gadget/udc/Makefile index 12f9e4c9eb0c5721578b7195409a2878917e8ae2..39daf36a2baa29bbfa8dd3a9ca79cbb3fadd2753 100644 --- a/drivers/usb/gadget/udc/Makefile +++ b/drivers/usb/gadget/udc/Makefile @@ -34,7 +34,6 @@ obj-$(CONFIG_USB_EG20T) += pch_udc.o obj-$(CONFIG_USB_MV_UDC) += mv_udc.o mv_udc-y := mv_udc_core.o obj-$(CONFIG_USB_FUSB300) += fusb300_udc.o -obj-$(CONFIG_USB_FOTG210_UDC) += fotg210-udc.o obj-$(CONFIG_USB_MV_U3D) += mv_u3d_core.o obj-$(CONFIG_USB_GR_UDC) += gr_udc.o obj-$(CONFIG_USB_GADGET_XILINX) += udc-xilinx.o diff --git a/drivers/usb/gadget/udc/aspeed-vhub/core.c b/drivers/usb/gadget/udc/aspeed-vhub/core.c index 7a635c4997774f1f84095781ae81d230a4ccd9c5..ac3ca24f8b0454dfa2ac547e517cad005a21664e 100644 --- a/drivers/usb/gadget/udc/aspeed-vhub/core.c +++ b/drivers/usb/gadget/udc/aspeed-vhub/core.c @@ -37,7 +37,7 @@ void ast_vhub_done(struct ast_vhub_ep *ep, struct ast_vhub_req *req, list_del_init(&req->queue); - if (req->req.status == -EINPROGRESS) + if ((req->req.status == -EINPROGRESS) || (status == -EOVERFLOW)) req->req.status = status; if (req->req.dma) { diff --git a/drivers/usb/gadget/udc/aspeed-vhub/epn.c b/drivers/usb/gadget/udc/aspeed-vhub/epn.c index b5252880b3894c62a4e7adae7d1de477500ab64b..56e55472daa136cc8ea474d6dc5fb25367a888bc 100644 --- a/drivers/usb/gadget/udc/aspeed-vhub/epn.c +++ b/drivers/usb/gadget/udc/aspeed-vhub/epn.c @@ -84,6 +84,7 @@ static void ast_vhub_epn_handle_ack(struct ast_vhub_ep *ep) { struct ast_vhub_req *req; unsigned int len; + int status = 0; u32 stat; /* Read EP status */ @@ -119,9 +120,15 @@ static void ast_vhub_epn_handle_ack(struct ast_vhub_ep *ep) len = VHUB_EP_DMA_TX_SIZE(stat); /* If not using DMA, copy data out if needed */ - if (!req->req.dma && !ep->epn.is_in && len) - memcpy(req->req.buf + req->req.actual, ep->buf, len); - + if (!req->req.dma && !ep->epn.is_in && len) { + if (req->req.actual + len > req->req.length) { + req->last_desc = 1; + status = -EOVERFLOW; + goto done; + } else { + memcpy(req->req.buf + req->req.actual, ep->buf, len); + } + } /* Adjust size */ req->req.actual += len; @@ -129,9 +136,10 @@ static void ast_vhub_epn_handle_ack(struct ast_vhub_ep *ep) if (len < ep->ep.maxpacket) req->last_desc = 1; +done: /* That's it ? complete the request and pick a new one */ if (req->last_desc >= 0) { - ast_vhub_done(ep, req, 0); + ast_vhub_done(ep, req, status); req = list_first_entry_or_null(&ep->queue, struct ast_vhub_req, queue); diff --git a/drivers/usb/gadget/udc/at91_udc.c b/drivers/usb/gadget/udc/at91_udc.c index a9a7b3fc60ec961c544b994d1cb476c5217cbb40..922b4187004b03f93eb3ee4d4c4c2c29e3d31431 100644 --- a/drivers/usb/gadget/udc/at91_udc.c +++ b/drivers/usb/gadget/udc/at91_udc.c @@ -1628,10 +1628,7 @@ static int at91rm9200_udc_init(struct at91_udc *udc) static void at91rm9200_udc_pullup(struct at91_udc *udc, int is_on) { - if (is_on) - gpiod_set_value(udc->board.pullup_pin, 1); - else - gpiod_set_value(udc->board.pullup_pin, 0); + gpiod_set_value(udc->board.pullup_pin, is_on); } static const struct at91_udc_caps at91rm9200_udc_caps = { diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c index c63c0c2cf649d531da684c377a051bc35f742902..23b0629a877431325c3f2f35f900388d9494d2cb 100644 --- a/drivers/usb/gadget/udc/core.c +++ b/drivers/usb/gadget/udc/core.c @@ -734,13 +734,13 @@ int usb_gadget_disconnect(struct usb_gadget *gadget) } ret = gadget->ops->pullup(gadget, 0); - if (!ret) { + if (!ret) gadget->connected = 0; - mutex_lock(&udc_lock); - if (gadget->udc->driver) - gadget->udc->driver->disconnect(gadget); - mutex_unlock(&udc_lock); - } + + mutex_lock(&udc_lock); + if (gadget->udc->driver) + gadget->udc->driver->disconnect(gadget); + mutex_unlock(&udc_lock); out: trace_usb_gadget_disconnect(gadget, ret); @@ -1723,9 +1723,9 @@ static const struct attribute_group *usb_udc_attr_groups[] = { NULL, }; -static int usb_udc_uevent(struct device *dev, struct kobj_uevent_env *env) +static int usb_udc_uevent(const struct device *dev, struct kobj_uevent_env *env) { - struct usb_udc *udc = container_of(dev, struct usb_udc, dev); + const struct usb_udc *udc = container_of(dev, struct usb_udc, dev); int ret; ret = add_uevent_var(env, "USB_UDC_NAME=%s", udc->gadget->name); diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig index 247568bc17a2b7a7c828cb4f795463454634b6ad..8d799d23c476e1795c0b957da50cf15afc0c720c 100644 --- a/drivers/usb/host/Kconfig +++ b/drivers/usb/host/Kconfig @@ -47,7 +47,7 @@ config USB_XHCI_PCI_RENESAS tristate "Support for additional Renesas xHCI controller with firmware" help Say 'Y' to enable the support for the Renesas xHCI controller with - firmware. Make sure you have the firwmare for the device and + firmware. Make sure you have the firmware for the device and installed on your system for this device to work. If unsure, say 'N'. @@ -389,17 +389,6 @@ config USB_ISP1362_HCD To compile this driver as a module, choose M here: the module will be called isp1362-hcd. -config USB_FOTG210_HCD - tristate "FOTG210 HCD support" - depends on USB && HAS_DMA && HAS_IOMEM - help - Faraday FOTG210 is an OTG controller which can be configured as - an USB2.0 host. It is designed to meet USB2.0 EHCI specification - with minor modification. - - To compile this driver as a module, choose M here: the - module will be called fotg210-hcd. - config USB_MAX3421_HCD tristate "MAX3421 HCD (USB-over-SPI) support" depends on USB && SPI diff --git a/drivers/usb/host/Makefile b/drivers/usb/host/Makefile index 2c8a61be7e466c197949ed39980b438d1fb53f2a..6d8ee264c9b2bf85669f608c7c0f50a5e44cb514 100644 --- a/drivers/usb/host/Makefile +++ b/drivers/usb/host/Makefile @@ -84,6 +84,5 @@ obj-$(CONFIG_USB_EHCI_FSL) += ehci-fsl.o obj-$(CONFIG_USB_EHCI_MV) += ehci-mv.o obj-$(CONFIG_USB_HCD_BCMA) += bcma-hcd.o obj-$(CONFIG_USB_HCD_SSB) += ssb-hcd.o -obj-$(CONFIG_USB_FOTG210_HCD) += fotg210-hcd.o obj-$(CONFIG_USB_MAX3421_HCD) += max3421-hcd.o obj-$(CONFIG_USB_XEN_HCD) += xen-hcd.o diff --git a/drivers/usb/host/ehci-grlib.c b/drivers/usb/host/ehci-grlib.c index a2c3b4ec8a8b5ff1183a9456a77b72cec8d1a4db..0717f2ccf49d52fa6a5a9812a885a47784dbcc5b 100644 --- a/drivers/usb/host/ehci-grlib.c +++ b/drivers/usb/host/ehci-grlib.c @@ -99,7 +99,7 @@ static int ehci_hcd_grlib_probe(struct platform_device *op) hcd->rsrc_len = resource_size(&res); irq = irq_of_parse_and_map(dn, 0); - if (irq == NO_IRQ) { + if (!irq) { dev_err(&op->dev, "%s: irq_of_parse_and_map failed\n", __FILE__); rv = -EBUSY; diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c index 17f8b6ea0c356da98441e76f35bb0c7270ee4d0e..4b148fe5e43b28f0b64167c679a1625db0fcfb98 100644 --- a/drivers/usb/host/ehci-pci.c +++ b/drivers/usb/host/ehci-pci.c @@ -411,11 +411,12 @@ static struct pci_driver ehci_pci_driver = { .remove = ehci_pci_remove, .shutdown = usb_hcd_pci_shutdown, -#ifdef CONFIG_PM .driver = { - .pm = &usb_hcd_pci_pm_ops - }, +#ifdef CONFIG_PM + .pm = &usb_hcd_pci_pm_ops, #endif + .probe_type = PROBE_PREFER_ASYNCHRONOUS, + }, }; static int __init ehci_pci_init(void) diff --git a/drivers/usb/host/ehci-ppc-of.c b/drivers/usb/host/ehci-ppc-of.c index 28a19693c19fe9f70adfbfd22da3bff13c91d049..62a0a193798c2daf69b0c5921506d776b080caba 100644 --- a/drivers/usb/host/ehci-ppc-of.c +++ b/drivers/usb/host/ehci-ppc-of.c @@ -119,7 +119,7 @@ static int ehci_hcd_ppc_of_probe(struct platform_device *op) hcd->rsrc_len = resource_size(&res); irq = irq_of_parse_and_map(dn, 0); - if (irq == NO_IRQ) { + if (!irq) { dev_err(&op->dev, "%s: irq_of_parse_and_map failed\n", __FILE__); rv = -EBUSY; diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h index ad3f13a3eaf1b729a97e125a736b76b04078cfd3..c5c7f8782549352164035454f725c434bfff5efb 100644 --- a/drivers/usb/host/ehci.h +++ b/drivers/usb/host/ehci.h @@ -471,7 +471,7 @@ struct ehci_iso_sched { * acts like a qh would, if EHCI had them for ISO. */ struct ehci_iso_stream { - /* first field matches ehci_hq, but is NULL */ + /* first field matches ehci_qh, but is NULL */ struct ehci_qh_hw *hw; u8 bEndpointAddress; diff --git a/drivers/usb/host/fhci-hcd.c b/drivers/usb/host/fhci-hcd.c index 1f666804fa9134af5400656f6888ac423a4a030a..92794ffc25c876286994e93f51f9bd62d48b898d 100644 --- a/drivers/usb/host/fhci-hcd.c +++ b/drivers/usb/host/fhci-hcd.c @@ -676,7 +676,7 @@ static int of_fhci_probe(struct platform_device *ofdev) /* USB Host interrupt. */ usb_irq = irq_of_parse_and_map(node, 0); - if (usb_irq == NO_IRQ) { + if (!usb_irq) { dev_err(dev, "could not get usb irq\n"); ret = -EINVAL; goto err_usb_irq; diff --git a/drivers/usb/host/ohci-ppc-of.c b/drivers/usb/host/ohci-ppc-of.c index 591f675cc9306ffc879478fcc2e56c4d7ed286f4..f2f6c832ec98c23f725f4f8bdc8539fa611f16a4 100644 --- a/drivers/usb/host/ohci-ppc-of.c +++ b/drivers/usb/host/ohci-ppc-of.c @@ -120,7 +120,7 @@ static int ohci_hcd_ppc_of_probe(struct platform_device *op) } irq = irq_of_parse_and_map(dn, 0); - if (irq == NO_IRQ) { + if (!irq) { dev_err(&op->dev, "%s: irq_of_parse_and_map failed\n", __FILE__); rv = -EBUSY; diff --git a/drivers/usb/host/uhci-grlib.c b/drivers/usb/host/uhci-grlib.c index 3ef6d52839e53782e8dddf4866854bf881fca0b6..907d5f01edfd47160bf8e00cd78247688330be88 100644 --- a/drivers/usb/host/uhci-grlib.c +++ b/drivers/usb/host/uhci-grlib.c @@ -116,7 +116,7 @@ static int uhci_hcd_grlib_probe(struct platform_device *op) hcd->rsrc_len = resource_size(&res); irq = irq_of_parse_and_map(dn, 0); - if (irq == NO_IRQ) { + if (!irq) { printk(KERN_ERR "%s: irq_of_parse_and_map failed\n", __FILE__); rv = -EBUSY; goto err_usb; diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c index 4619d5e89d5be1f0b5c3c52d05a29583ccf60e92..94c94db3faf6da4db1af0c9d9a5a3fc23a259c00 100644 --- a/drivers/usb/host/xhci-hub.c +++ b/drivers/usb/host/xhci-hub.c @@ -426,24 +426,37 @@ static unsigned int xhci_port_speed(unsigned int port_status) */ #define XHCI_PORT_RZ ((1<<2) | (1<<24) | (0xf<<28)) -/* +/** + * xhci_port_state_to_neutral() - Clean up read portsc value back into writeable + * @state: u32 port value read from portsc register to be cleanup up + * * Given a port state, this function returns a value that would result in the * port being in the same state, if the value was written to the port status * control register. * Save Read Only (RO) bits and save read/write bits where * writing a 0 clears the bit and writing a 1 sets the bit (RWS). * For all other types (RW1S, RW1CS, RW, and RZ), writing a '0' has no effect. + * + * Return: u32 value that can be written back to portsc register without + * changing port state. */ + u32 xhci_port_state_to_neutral(u32 state) { /* Save read-only status and port state */ return (state & XHCI_PORT_RO) | (state & XHCI_PORT_RWS); } +EXPORT_SYMBOL_GPL(xhci_port_state_to_neutral); -/* - * find slot id based on port number. - * @port: The one-based port number from one of the two split roothubs. +/** + * xhci_find_slot_id_by_port() - Find slot id of a usb device on a roothub port + * @hcd: pointer to hcd of the roothub + * @xhci: pointer to xhci structure + * @port: one-based port number of the port in this roothub. + * + * Return: Slot id of the usb device connected to the root port, 0 if not found */ + int xhci_find_slot_id_by_port(struct usb_hcd *hcd, struct xhci_hcd *xhci, u16 port) { @@ -465,6 +478,7 @@ int xhci_find_slot_id_by_port(struct usb_hcd *hcd, struct xhci_hcd *xhci, return slot_id; } +EXPORT_SYMBOL_GPL(xhci_find_slot_id_by_port); /* * Stop device diff --git a/drivers/usb/host/xhci-mtk.c b/drivers/usb/host/xhci-mtk.c index 01705e559c4225d30f4df791c6dc1ecc41ddc856..f7cbb08fc5068da5f932885c138781b7e88c1bc0 100644 --- a/drivers/usb/host/xhci-mtk.c +++ b/drivers/usb/host/xhci-mtk.c @@ -485,6 +485,7 @@ static int xhci_mtk_probe(struct platform_device *pdev) const struct hc_driver *driver; struct xhci_hcd *xhci; struct resource *res; + struct usb_hcd *usb3_hcd; struct usb_hcd *hcd; int ret = -ENODEV; int wakeup_irq; @@ -593,6 +594,7 @@ static int xhci_mtk_probe(struct platform_device *pdev) xhci = hcd_to_xhci(hcd); xhci->main_hcd = hcd; + xhci->allow_single_roothub = 1; /* * imod_interval is the interrupt moderation value in nanoseconds. @@ -602,24 +604,29 @@ static int xhci_mtk_probe(struct platform_device *pdev) xhci->imod_interval = 5000; device_property_read_u32(dev, "imod-interval-ns", &xhci->imod_interval); - xhci->shared_hcd = usb_create_shared_hcd(driver, dev, - dev_name(dev), hcd); - if (!xhci->shared_hcd) { - ret = -ENOMEM; - goto disable_device_wakeup; - } - ret = usb_add_hcd(hcd, irq, IRQF_SHARED); if (ret) - goto put_usb3_hcd; + goto disable_device_wakeup; - if (HCC_MAX_PSA(xhci->hcc_params) >= 4 && + if (!xhci_has_one_roothub(xhci)) { + xhci->shared_hcd = usb_create_shared_hcd(driver, dev, + dev_name(dev), hcd); + if (!xhci->shared_hcd) { + ret = -ENOMEM; + goto dealloc_usb2_hcd; + } + } + + usb3_hcd = xhci_get_usb3_hcd(xhci); + if (usb3_hcd && HCC_MAX_PSA(xhci->hcc_params) >= 4 && !(xhci->quirks & XHCI_BROKEN_STREAMS)) - xhci->shared_hcd->can_do_streams = 1; + usb3_hcd->can_do_streams = 1; - ret = usb_add_hcd(xhci->shared_hcd, irq, IRQF_SHARED); - if (ret) - goto dealloc_usb2_hcd; + if (xhci->shared_hcd) { + ret = usb_add_hcd(xhci->shared_hcd, irq, IRQF_SHARED); + if (ret) + goto put_usb3_hcd; + } if (wakeup_irq > 0) { ret = dev_pm_set_dedicated_wake_irq_reverse(dev, wakeup_irq); @@ -639,15 +646,14 @@ static int xhci_mtk_probe(struct platform_device *pdev) dealloc_usb3_hcd: usb_remove_hcd(xhci->shared_hcd); - xhci->shared_hcd = NULL; - -dealloc_usb2_hcd: - usb_remove_hcd(hcd); put_usb3_hcd: - xhci_mtk_sch_exit(mtk); usb_put_hcd(xhci->shared_hcd); +dealloc_usb2_hcd: + xhci_mtk_sch_exit(mtk); + usb_remove_hcd(hcd); + disable_device_wakeup: device_init_wakeup(dev, false); @@ -679,10 +685,15 @@ static int xhci_mtk_remove(struct platform_device *pdev) dev_pm_clear_wake_irq(dev); device_init_wakeup(dev, false); - usb_remove_hcd(shared_hcd); - xhci->shared_hcd = NULL; + if (shared_hcd) { + usb_remove_hcd(shared_hcd); + xhci->shared_hcd = NULL; + } usb_remove_hcd(hcd); - usb_put_hcd(shared_hcd); + + if (shared_hcd) + usb_put_hcd(shared_hcd); + usb_put_hcd(hcd); xhci_mtk_sch_exit(mtk); clk_bulk_disable_unprepare(BULK_CLKS_NUM, mtk->clks); @@ -700,13 +711,16 @@ static int __maybe_unused xhci_mtk_suspend(struct device *dev) struct xhci_hcd_mtk *mtk = dev_get_drvdata(dev); struct usb_hcd *hcd = mtk->hcd; struct xhci_hcd *xhci = hcd_to_xhci(hcd); + struct usb_hcd *shared_hcd = xhci->shared_hcd; int ret; xhci_dbg(xhci, "%s: stop port polling\n", __func__); clear_bit(HCD_FLAG_POLL_RH, &hcd->flags); del_timer_sync(&hcd->rh_timer); - clear_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags); - del_timer_sync(&xhci->shared_hcd->rh_timer); + if (shared_hcd) { + clear_bit(HCD_FLAG_POLL_RH, &shared_hcd->flags); + del_timer_sync(&shared_hcd->rh_timer); + } ret = xhci_mtk_host_disable(mtk); if (ret) @@ -718,8 +732,10 @@ static int __maybe_unused xhci_mtk_suspend(struct device *dev) restart_poll_rh: xhci_dbg(xhci, "%s: restart port polling\n", __func__); - set_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags); - usb_hcd_poll_rh_status(xhci->shared_hcd); + if (shared_hcd) { + set_bit(HCD_FLAG_POLL_RH, &shared_hcd->flags); + usb_hcd_poll_rh_status(shared_hcd); + } set_bit(HCD_FLAG_POLL_RH, &hcd->flags); usb_hcd_poll_rh_status(hcd); return ret; @@ -730,6 +746,7 @@ static int __maybe_unused xhci_mtk_resume(struct device *dev) struct xhci_hcd_mtk *mtk = dev_get_drvdata(dev); struct usb_hcd *hcd = mtk->hcd; struct xhci_hcd *xhci = hcd_to_xhci(hcd); + struct usb_hcd *shared_hcd = xhci->shared_hcd; int ret; usb_wakeup_set(mtk, false); @@ -742,8 +759,10 @@ static int __maybe_unused xhci_mtk_resume(struct device *dev) goto disable_clks; xhci_dbg(xhci, "%s: restart port polling\n", __func__); - set_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags); - usb_hcd_poll_rh_status(xhci->shared_hcd); + if (shared_hcd) { + set_bit(HCD_FLAG_POLL_RH, &shared_hcd->flags); + usb_hcd_poll_rh_status(shared_hcd); + } set_bit(HCD_FLAG_POLL_RH, &hcd->flags); usb_hcd_poll_rh_status(hcd); return 0; diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index 7bccbe50bab15100df6b752d930549cca17e05cf..79d679b3e07607b26349e211f1fa37f8248bd0f9 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c @@ -59,6 +59,7 @@ #define PCI_DEVICE_ID_INTEL_TIGER_LAKE_XHCI 0x9a13 #define PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_XHCI 0x1138 #define PCI_DEVICE_ID_INTEL_ALDER_LAKE_PCH_XHCI 0x51ed +#define PCI_DEVICE_ID_INTEL_ALDER_LAKE_N_PCH_XHCI 0x54ed #define PCI_DEVICE_ID_AMD_RENOIR_XHCI 0x1639 #define PCI_DEVICE_ID_AMD_PROMONTORYA_4 0x43b9 @@ -246,7 +247,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) xhci->quirks |= XHCI_MISSING_CAS; if (pdev->vendor == PCI_VENDOR_ID_INTEL && - pdev->device == PCI_DEVICE_ID_INTEL_ALDER_LAKE_PCH_XHCI) + (pdev->device == PCI_DEVICE_ID_INTEL_ALDER_LAKE_PCH_XHCI || + pdev->device == PCI_DEVICE_ID_INTEL_ALDER_LAKE_N_PCH_XHCI)) xhci->quirks |= XHCI_RESET_TO_DEFAULT; if (pdev->vendor == PCI_VENDOR_ID_INTEL && @@ -620,6 +622,57 @@ static int xhci_pci_resume(struct usb_hcd *hcd, bool hibernated) return retval; } +static int xhci_pci_poweroff_late(struct usb_hcd *hcd, bool do_wakeup) +{ + struct xhci_hcd *xhci = hcd_to_xhci(hcd); + struct xhci_port *port; + struct usb_device *udev; + unsigned int slot_id; + u32 portsc; + int i; + + /* + * Systems with XHCI_RESET_TO_DEFAULT quirk have boot firmware that + * cause significant boot delay if usb ports are in suspended U3 state + * during boot. Some USB devices survive in U3 state over S4 hibernate + * + * Disable ports that are in U3 if remote wake is not enabled for either + * host controller or connected device + */ + + if (!(xhci->quirks & XHCI_RESET_TO_DEFAULT)) + return 0; + + for (i = 0; i < HCS_MAX_PORTS(xhci->hcs_params1); i++) { + port = &xhci->hw_ports[i]; + portsc = readl(port->addr); + + if ((portsc & PORT_PLS_MASK) != XDEV_U3) + continue; + + slot_id = xhci_find_slot_id_by_port(port->rhub->hcd, xhci, + port->hcd_portnum + 1); + if (!slot_id || !xhci->devs[slot_id]) { + xhci_err(xhci, "No dev for slot_id %d for port %d-%d in U3\n", + slot_id, port->rhub->hcd->self.busnum, port->hcd_portnum + 1); + continue; + } + + udev = xhci->devs[slot_id]->udev; + + /* if wakeup is enabled then don't disable the port */ + if (udev->do_remote_wakeup && do_wakeup) + continue; + + xhci_dbg(xhci, "port %d-%d in U3 without wakeup, disable it\n", + port->rhub->hcd->self.busnum, port->hcd_portnum + 1); + portsc = xhci_port_state_to_neutral(portsc); + writel(portsc | PORT_PE, port->addr); + } + + return 0; +} + static void xhci_pci_shutdown(struct usb_hcd *hcd) { struct xhci_hcd *xhci = hcd_to_xhci(hcd); @@ -673,11 +726,12 @@ static struct pci_driver xhci_pci_driver = { /* suspend and resume implemented later */ .shutdown = usb_hcd_pci_shutdown, -#ifdef CONFIG_PM .driver = { - .pm = &usb_hcd_pci_pm_ops - }, +#ifdef CONFIG_PM + .pm = &usb_hcd_pci_pm_ops, #endif + .probe_type = PROBE_PREFER_ASYNCHRONOUS, + }, }; static int __init xhci_pci_init(void) @@ -686,6 +740,7 @@ static int __init xhci_pci_init(void) #ifdef CONFIG_PM xhci_pci_hc_driver.pci_suspend = xhci_pci_suspend; xhci_pci_hc_driver.pci_resume = xhci_pci_resume; + xhci_pci_hc_driver.pci_poweroff_late = xhci_pci_poweroff_late; xhci_pci_hc_driver.shutdown = xhci_pci_shutdown; #endif return pci_register_driver(&xhci_pci_driver); diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index ad81e9a508b14b1bfd231e79db06a20d06699b25..ddc30037f9cefa6f606f590ea1ed79325b0ba4da 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -896,7 +896,7 @@ done: } static int xhci_handle_halted_endpoint(struct xhci_hcd *xhci, - struct xhci_virt_ep *ep, unsigned int stream_id, + struct xhci_virt_ep *ep, struct xhci_td *td, enum xhci_ep_reset_type reset_type) { @@ -1110,8 +1110,7 @@ static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id, td->status = -EPROTO; } /* reset ep, reset handler cleans up cancelled tds */ - err = xhci_handle_halted_endpoint(xhci, ep, 0, td, - reset_type); + err = xhci_handle_halted_endpoint(xhci, ep, td, reset_type); if (err) break; ep->ep_state &= ~EP_STOP_CMD_PENDING; @@ -2183,8 +2182,7 @@ static int finish_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep, } /* Almost same procedure as for STALL_ERROR below */ xhci_clear_hub_tt_buffer(xhci, td, ep); - xhci_handle_halted_endpoint(xhci, ep, ep_ring->stream_id, td, - EP_HARD_RESET); + xhci_handle_halted_endpoint(xhci, ep, td, EP_HARD_RESET); return 0; case COMP_STALL_ERROR: /* @@ -2200,8 +2198,7 @@ static int finish_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep, if (ep->ep_index != 0) xhci_clear_hub_tt_buffer(xhci, td, ep); - xhci_handle_halted_endpoint(xhci, ep, ep_ring->stream_id, td, - EP_HARD_RESET); + xhci_handle_halted_endpoint(xhci, ep, td, EP_HARD_RESET); return 0; /* xhci_handle_halted_endpoint marked td cancelled */ default: @@ -2458,7 +2455,7 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep, switch (trb_comp_code) { case COMP_SUCCESS: - ep_ring->err_count = 0; + ep->err_count = 0; /* handle success with untransferred data as short packet */ if (ep_trb != td->last_trb || remaining) { xhci_warn(xhci, "WARN Successful completion on short TX\n"); @@ -2484,14 +2481,13 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep, break; case COMP_USB_TRANSACTION_ERROR: if (xhci->quirks & XHCI_NO_SOFT_RETRY || - (ep_ring->err_count++ > MAX_SOFT_RETRY) || + (ep->err_count++ > MAX_SOFT_RETRY) || le32_to_cpu(slot_ctx->tt_info) & TT_SLOT) break; td->status = 0; - xhci_handle_halted_endpoint(xhci, ep, ep_ring->stream_id, td, - EP_SOFT_RESET); + xhci_handle_halted_endpoint(xhci, ep, td, EP_SOFT_RESET); return 0; default: /* do nothing */ @@ -2565,8 +2561,14 @@ static int handle_tx_event(struct xhci_hcd *xhci, case COMP_USB_TRANSACTION_ERROR: case COMP_INVALID_STREAM_TYPE_ERROR: case COMP_INVALID_STREAM_ID_ERROR: - xhci_handle_halted_endpoint(xhci, ep, 0, NULL, - EP_SOFT_RESET); + xhci_dbg(xhci, "Stream transaction error ep %u no id\n", + ep_index); + if (ep->err_count++ > MAX_SOFT_RETRY) + xhci_handle_halted_endpoint(xhci, ep, NULL, + EP_HARD_RESET); + else + xhci_handle_halted_endpoint(xhci, ep, NULL, + EP_SOFT_RESET); goto cleanup; case COMP_RING_UNDERRUN: case COMP_RING_OVERRUN: @@ -2749,9 +2751,7 @@ static int handle_tx_event(struct xhci_hcd *xhci, if (trb_comp_code == COMP_STALL_ERROR || xhci_requires_manual_halt_cleanup(xhci, ep_ctx, trb_comp_code)) { - xhci_handle_halted_endpoint(xhci, ep, - ep_ring->stream_id, - NULL, + xhci_handle_halted_endpoint(xhci, ep, NULL, EP_HARD_RESET); } goto cleanup; @@ -2844,9 +2844,8 @@ static int handle_tx_event(struct xhci_hcd *xhci, if (trb_comp_code == COMP_STALL_ERROR || xhci_requires_manual_halt_cleanup(xhci, ep_ctx, trb_comp_code)) - xhci_handle_halted_endpoint(xhci, ep, - ep_ring->stream_id, - td, EP_HARD_RESET); + xhci_handle_halted_endpoint(xhci, ep, td, + EP_HARD_RESET); goto cleanup; } @@ -3031,6 +3030,11 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd) if (!(status & STS_EINT)) goto out; + if (status & STS_HCE) { + xhci_warn(xhci, "WARNING: Host Controller Error\n"); + goto out; + } + if (status & STS_FATAL) { xhci_warn(xhci, "WARNING: Host System Error\n"); xhci_halt(xhci); diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index cc084d9505cdf1f70c339149820c0a75ba0cb7b0..c9f06c5e4e9d2a7795aba70c9099e6bce937134f 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h @@ -933,6 +933,7 @@ struct xhci_virt_ep { * have to restore the device state to the previous state */ struct xhci_ring *new_ring; + unsigned int err_count; unsigned int ep_state; #define SET_DEQ_PENDING (1 << 0) #define EP_HALTED (1 << 1) /* For stall handling */ @@ -1627,7 +1628,6 @@ struct xhci_ring { * if we own the TRB (if we are the consumer). See section 4.9.1. */ u32 cycle_state; - unsigned int err_count; unsigned int stream_id; unsigned int num_segs; unsigned int num_trbs_free; diff --git a/drivers/usb/misc/Kconfig b/drivers/usb/misc/Kconfig index 9367c12c7e6f8f5afa9d2ebabd63322a17b4d729..a5f7652db7da1b4810d2c83a1bd2b8a4362f2f24 100644 --- a/drivers/usb/misc/Kconfig +++ b/drivers/usb/misc/Kconfig @@ -298,7 +298,7 @@ config BRCM_USB_PINMAP config USB_ONBOARD_HUB tristate "Onboard USB hub support" - depends on OF || COMPILE_TEST + depends on OF help Say Y here if you want to support discrete onboard USB hubs that don't require an additional control bus for initialization, but diff --git a/drivers/usb/misc/chaoskey.c b/drivers/usb/misc/chaoskey.c index 87067c3d6109b966fb7a35be4e0ed6da9f15c9af..6fb5140e29b9dd641be37ddd42034e4f6466a12f 100644 --- a/drivers/usb/misc/chaoskey.c +++ b/drivers/usb/misc/chaoskey.c @@ -200,7 +200,6 @@ static int chaoskey_probe(struct usb_interface *interface, dev->hwrng.name = dev->name ? dev->name : chaoskey_driver.name; dev->hwrng.read = chaoskey_rng_read; - dev->hwrng.quality = 1024; dev->hwrng_registered = (hwrng_register(&dev->hwrng) == 0); if (!dev->hwrng_registered) diff --git a/drivers/usb/misc/ftdi-elan.c b/drivers/usb/misc/ftdi-elan.c index b2f980409d0b5ead3161e29234e267e38a4416fd..8ce191e3a4c069e47530404bca7895a264cb6b50 100644 --- a/drivers/usb/misc/ftdi-elan.c +++ b/drivers/usb/misc/ftdi-elan.c @@ -1624,7 +1624,6 @@ wait:if (ftdi->disconnected > 0) { char data[30 *3 + 4]; char *d = data; int m = (sizeof(data) - 1) / 3 - 1; - int l = 0; struct u132_target *target = &ftdi->target[ed]; struct u132_command *command = &ftdi->command[ COMMAND_MASK & ftdi->command_next]; @@ -1647,7 +1646,6 @@ wait:if (ftdi->disconnected > 0) { } else if (i++ < m) { int w = sprintf(d, " %02X", *b++); d += w; - l += w; } else d += sprintf(d, " .."); } @@ -1956,7 +1954,6 @@ static int ftdi_elan_synchronize(struct usb_ftdi *ftdi) int long_stop = 10; int retry_on_timeout = 5; int retry_on_empty = 10; - int err_count = 0; retval = ftdi_elan_flush_input_fifo(ftdi); if (retval) return retval; @@ -2051,7 +2048,6 @@ static int ftdi_elan_synchronize(struct usb_ftdi *ftdi) continue; } } else { - err_count += 1; dev_err(&ftdi->udev->dev, "error = %d\n", retval); if (read_stop-- > 0) { diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c index 988a8c02e7e24628cceeae85ef7074dce4198350..f9427a67789cf7e26a1c95ecb28f17087ad30d42 100644 --- a/drivers/usb/misc/iowarrior.c +++ b/drivers/usb/misc/iowarrior.c @@ -717,7 +717,7 @@ static const struct file_operations iowarrior_fops = { .llseek = noop_llseek, }; -static char *iowarrior_devnode(struct device *dev, umode_t *mode) +static char *iowarrior_devnode(const struct device *dev, umode_t *mode) { return kasprintf(GFP_KERNEL, "usb/%s", dev_name(dev)); } diff --git a/drivers/usb/misc/legousbtower.c b/drivers/usb/misc/legousbtower.c index 1c9e09138c1090353f01664b1093513dffe303aa..379cf01a6e96e901ef6565a7e6a5a9632beac015 100644 --- a/drivers/usb/misc/legousbtower.c +++ b/drivers/usb/misc/legousbtower.c @@ -245,7 +245,7 @@ static const struct file_operations tower_fops = { .llseek = tower_llseek, }; -static char *legousbtower_devnode(struct device *dev, umode_t *mode) +static char *legousbtower_devnode(const struct device *dev, umode_t *mode) { return kasprintf(GFP_KERNEL, "usb/%s", dev_name(dev)); } diff --git a/drivers/usb/misc/onboard_usb_hub.c b/drivers/usb/misc/onboard_usb_hub.c index d63c63942af1b983ceddc9314c2a98a02d17d9fa..94e7966e199d19a2cf27e7f41de6b179d105d8d2 100644 --- a/drivers/usb/misc/onboard_usb_hub.c +++ b/drivers/usb/misc/onboard_usb_hub.c @@ -331,6 +331,7 @@ static struct platform_driver onboard_hub_driver = { /************************** USB driver **************************/ +#define VENDOR_ID_GENESYS 0x05e3 #define VENDOR_ID_MICROCHIP 0x0424 #define VENDOR_ID_REALTEK 0x0bda #define VENDOR_ID_TI 0x0451 @@ -407,6 +408,7 @@ static void onboard_hub_usbdev_disconnect(struct usb_device *udev) } static const struct usb_device_id onboard_hub_id_table[] = { + { USB_DEVICE(VENDOR_ID_GENESYS, 0x0608) }, /* Genesys Logic GL850G USB 2.0 */ { USB_DEVICE(VENDOR_ID_MICROCHIP, 0x2514) }, /* USB2514B USB 2.0 */ { USB_DEVICE(VENDOR_ID_REALTEK, 0x0411) }, /* RTS5411 USB 3.1 */ { USB_DEVICE(VENDOR_ID_REALTEK, 0x5411) }, /* RTS5411 USB 2.1 */ diff --git a/drivers/usb/misc/onboard_usb_hub.h b/drivers/usb/misc/onboard_usb_hub.h index 34beab8bce3d67952af50cb6647dca82c740dd62..62129a6a1ba5a3c1eee1182461a4c256b1f86c0c 100644 --- a/drivers/usb/misc/onboard_usb_hub.h +++ b/drivers/usb/misc/onboard_usb_hub.h @@ -22,10 +22,15 @@ static const struct onboard_hub_pdata ti_tusb8041_data = { .reset_us = 3000, }; +static const struct onboard_hub_pdata genesys_gl850g_data = { + .reset_us = 3, +}; + static const struct of_device_id onboard_hub_match[] = { { .compatible = "usb424,2514", .data = µchip_usb424_data, }, { .compatible = "usb451,8140", .data = &ti_tusb8041_data, }, { .compatible = "usb451,8142", .data = &ti_tusb8041_data, }, + { .compatible = "usb5e3,608", .data = &genesys_gl850g_data, }, { .compatible = "usbbda,411", .data = &realtek_rts5411_data, }, { .compatible = "usbbda,5411", .data = &realtek_rts5411_data, }, { .compatible = "usbbda,414", .data = &realtek_rts5411_data, }, diff --git a/drivers/usb/misc/sisusbvga/Kconfig b/drivers/usb/misc/sisusbvga/Kconfig index c12cdd01541024b1704483b1d4120db6bbdeb1b0..42f81c8eaa92cfc0e7f23cc1f4ee8929008a47b1 100644 --- a/drivers/usb/misc/sisusbvga/Kconfig +++ b/drivers/usb/misc/sisusbvga/Kconfig @@ -3,7 +3,6 @@ config USB_SISUSBVGA tristate "USB 2.0 SVGA dongle support (Net2280/SiS315)" depends on (USB_MUSB_HDRC || USB_EHCI_HCD) - select FONT_SUPPORT if USB_SISUSBVGA_CON help Say Y here if you intend to attach a USB2VGA dongle based on a Net2280 and a SiS315 chip. @@ -13,36 +12,3 @@ config USB_SISUSBVGA To compile this driver as a module, choose M here; the module will be called sisusbvga. If unsure, say N. - -config USB_SISUSBVGA_CON - bool "Text console and mode switching support" if USB_SISUSBVGA - depends on VT && BROKEN - select FONT_8x16 - help - Say Y here if you want a VGA text console via the USB dongle or - want to support userland applications that utilize the driver's - display mode switching capabilities. - - Note that this console supports VGA/EGA text mode only. - - By default, the console part of the driver will not kick in when - the driver is initialized. If you want the driver to take over - one or more of the consoles, you need to specify the number of - the first and last consoles (starting at 1) as driver parameters. - - For example, if the driver is compiled as a module: - - modprobe sisusbvga first=1 last=5 - - If you use hotplug, add this to your modutils config files with - the "options" keyword, such as eg. - - options sisusbvga first=1 last=5 - - If the driver is compiled into the kernel image, the parameters - must be given in the kernel command like, such as - - sisusbvga.first=1 sisusbvga.last=5 - - - diff --git a/drivers/usb/misc/sisusbvga/Makefile b/drivers/usb/misc/sisusbvga/Makefile index 6551bce68ac5de6690bec848386d9e5bf5688596..28aa1e6ef823f06a78d105a6434983cceac0975a 100644 --- a/drivers/usb/misc/sisusbvga/Makefile +++ b/drivers/usb/misc/sisusbvga/Makefile @@ -4,6 +4,3 @@ # obj-$(CONFIG_USB_SISUSBVGA) += sisusbvga.o - -sisusbvga-y := sisusb.o -sisusbvga-$(CONFIG_USB_SISUSBVGA_CON) += sisusb_con.o sisusb_init.o diff --git a/drivers/usb/misc/sisusbvga/sisusb.h b/drivers/usb/misc/sisusbvga/sisusb.h index c0fb9e1c5361babf25b08332d3096cd0008f6ac9..e5b1228655d088d07c29182eff4fd521cb7dc7d8 100644 --- a/drivers/usb/misc/sisusbvga/sisusb.h +++ b/drivers/usb/misc/sisusbvga/sisusb.h @@ -48,7 +48,6 @@ /* Include console and mode switching code? */ -#include #include #include "sisusb_struct.h" @@ -126,26 +125,6 @@ struct sisusb_usb_data { unsigned char gfxinit; /* graphics core initialized? */ unsigned short chipid, chipvendor; unsigned short chiprevision; -#ifdef CONFIG_USB_SISUSBVGA_CON - struct SiS_Private *SiS_Pr; - unsigned long scrbuf; - unsigned int scrbuf_size; - int haveconsole, con_first, con_last; - int havethisconsole[MAX_NR_CONSOLES]; - int textmodedestroyed; - unsigned int sisusb_num_columns; /* real number, not vt's idea */ - int cur_start_addr, con_rolled_over; - int sisusb_cursor_loc, bad_cursor_pos; - int sisusb_cursor_size_from; - int sisusb_cursor_size_to; - int current_font_height, current_font_512; - int font_backup_size, font_backup_height, font_backup_512; - char *font_backup; - int font_slot; - struct vc_data *sisusb_display_fg; - int is_gfx; - int con_blanked; -#endif }; #define to_sisusb_dev(d) container_of(d, struct sisusb_usb_data, kref) diff --git a/drivers/usb/misc/sisusbvga/sisusb_con.c b/drivers/usb/misc/sisusbvga/sisusb_con.c deleted file mode 100644 index fcb95fb639e00abf2969afb7698f58cdea38bd49..0000000000000000000000000000000000000000 --- a/drivers/usb/misc/sisusbvga/sisusb_con.c +++ /dev/null @@ -1,1496 +0,0 @@ -// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) -/* - * sisusb - usb kernel driver for SiS315(E) based USB2VGA dongles - * - * VGA text mode console part - * - * Copyright (C) 2005 by Thomas Winischhofer, Vienna, Austria - * - * If distributed as part of the Linux kernel, this code is licensed under the - * terms of the GPL v2. - * - * Otherwise, the following license terms apply: - * - * * Redistribution and use in source and binary forms, with or without - * * modification, are permitted provided that the following conditions - * * are met: - * * 1) Redistributions of source code must retain the above copyright - * * notice, this list of conditions and the following disclaimer. - * * 2) Redistributions in binary form must reproduce the above copyright - * * notice, this list of conditions and the following disclaimer in the - * * documentation and/or other materials provided with the distribution. - * * 3) The name of the author may not be used to endorse or promote products - * * derived from this software without specific psisusbr written permission. - * * - * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESSED OR - * * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - * * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - * Author: Thomas Winischhofer - * - * Portions based on vgacon.c which are - * Created 28 Sep 1997 by Geert Uytterhoeven - * Rewritten by Martin Mares , July 1998 - * based on code Copyright (C) 1991, 1992 Linus Torvalds - * 1995 Jay Estabrook - * - * A note on using in_atomic() in here: We can't handle console - * calls from non-schedulable context due to our USB-dependend - * nature. For now, this driver just ignores any calls if it - * detects this state. - * - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "sisusb.h" -#include "sisusb_init.h" - -/* vc_data -> sisusb conversion table */ -static struct sisusb_usb_data *mysisusbs[MAX_NR_CONSOLES]; - -/* Forward declaration */ -static const struct consw sisusb_con; - -static inline void -sisusbcon_memsetw(u16 *s, u16 c, unsigned int count) -{ - memset16(s, c, count / 2); -} - -static inline void -sisusb_initialize(struct sisusb_usb_data *sisusb) -{ - /* Reset cursor and start address */ - if (sisusb_setidxreg(sisusb, SISCR, 0x0c, 0x00)) - return; - if (sisusb_setidxreg(sisusb, SISCR, 0x0d, 0x00)) - return; - if (sisusb_setidxreg(sisusb, SISCR, 0x0e, 0x00)) - return; - sisusb_setidxreg(sisusb, SISCR, 0x0f, 0x00); -} - -static inline void -sisusbcon_set_start_address(struct sisusb_usb_data *sisusb, struct vc_data *c) -{ - sisusb->cur_start_addr = (c->vc_visible_origin - sisusb->scrbuf) / 2; - - sisusb_setidxreg(sisusb, SISCR, 0x0c, (sisusb->cur_start_addr >> 8)); - sisusb_setidxreg(sisusb, SISCR, 0x0d, (sisusb->cur_start_addr & 0xff)); -} - -void -sisusb_set_cursor(struct sisusb_usb_data *sisusb, unsigned int location) -{ - if (sisusb->sisusb_cursor_loc == location) - return; - - sisusb->sisusb_cursor_loc = location; - - /* Hardware bug: Text cursor appears twice or not at all - * at some positions. Work around it with the cursor skew - * bits. - */ - - if ((location & 0x0007) == 0x0007) { - sisusb->bad_cursor_pos = 1; - location--; - if (sisusb_setidxregandor(sisusb, SISCR, 0x0b, 0x1f, 0x20)) - return; - } else if (sisusb->bad_cursor_pos) { - if (sisusb_setidxregand(sisusb, SISCR, 0x0b, 0x1f)) - return; - sisusb->bad_cursor_pos = 0; - } - - if (sisusb_setidxreg(sisusb, SISCR, 0x0e, (location >> 8))) - return; - sisusb_setidxreg(sisusb, SISCR, 0x0f, (location & 0xff)); -} - -static inline struct sisusb_usb_data * -sisusb_get_sisusb(unsigned short console) -{ - return mysisusbs[console]; -} - -static inline int -sisusb_sisusb_valid(struct sisusb_usb_data *sisusb) -{ - if (!sisusb->present || !sisusb->ready || !sisusb->sisusb_dev) - return 0; - - return 1; -} - -static struct sisusb_usb_data * -sisusb_get_sisusb_lock_and_check(unsigned short console) -{ - struct sisusb_usb_data *sisusb; - - /* We can't handle console calls in non-schedulable - * context due to our locks and the USB transport. - * So we simply ignore them. This should only affect - * some calls to printk. - */ - if (in_atomic()) - return NULL; - - sisusb = sisusb_get_sisusb(console); - if (!sisusb) - return NULL; - - mutex_lock(&sisusb->lock); - - if (!sisusb_sisusb_valid(sisusb) || - !sisusb->havethisconsole[console]) { - mutex_unlock(&sisusb->lock); - return NULL; - } - - return sisusb; -} - -static int -sisusb_is_inactive(struct vc_data *c, struct sisusb_usb_data *sisusb) -{ - if (sisusb->is_gfx || - sisusb->textmodedestroyed || - c->vc_mode != KD_TEXT) - return 1; - - return 0; -} - -/* con_startup console interface routine */ -static const char * -sisusbcon_startup(void) -{ - return "SISUSBCON"; -} - -/* con_init console interface routine */ -static void -sisusbcon_init(struct vc_data *c, int init) -{ - struct sisusb_usb_data *sisusb; - int cols, rows; - - /* This is called by do_take_over_console(), - * ie by us/under our control. It is - * only called after text mode and fonts - * are set up/restored. - */ - - sisusb = sisusb_get_sisusb(c->vc_num); - if (!sisusb) - return; - - mutex_lock(&sisusb->lock); - - if (!sisusb_sisusb_valid(sisusb)) { - mutex_unlock(&sisusb->lock); - return; - } - - c->vc_can_do_color = 1; - - c->vc_complement_mask = 0x7700; - - c->vc_hi_font_mask = sisusb->current_font_512 ? 0x0800 : 0; - - sisusb->haveconsole = 1; - - sisusb->havethisconsole[c->vc_num] = 1; - - /* We only support 640x400 */ - c->vc_scan_lines = 400; - - c->vc_font.height = sisusb->current_font_height; - - /* We only support width = 8 */ - cols = 80; - rows = c->vc_scan_lines / c->vc_font.height; - - /* Increment usage count for our sisusb. - * Doing so saves us from upping/downing - * the disconnect semaphore; we can't - * lose our sisusb until this is undone - * in con_deinit. For all other console - * interface functions, it suffices to - * use sisusb->lock and do a quick check - * of sisusb for device disconnection. - */ - kref_get(&sisusb->kref); - - if (!*c->uni_pagedict_loc) - con_set_default_unimap(c); - - mutex_unlock(&sisusb->lock); - - if (init) { - c->vc_cols = cols; - c->vc_rows = rows; - } else - vc_resize(c, cols, rows); -} - -/* con_deinit console interface routine */ -static void -sisusbcon_deinit(struct vc_data *c) -{ - struct sisusb_usb_data *sisusb; - int i; - - /* This is called by do_take_over_console() - * and others, ie not under our control. - */ - - sisusb = sisusb_get_sisusb(c->vc_num); - if (!sisusb) - return; - - mutex_lock(&sisusb->lock); - - /* Clear ourselves in mysisusbs */ - mysisusbs[c->vc_num] = NULL; - - sisusb->havethisconsole[c->vc_num] = 0; - - /* Free our font buffer if all consoles are gone */ - if (sisusb->font_backup) { - for(i = 0; i < MAX_NR_CONSOLES; i++) { - if (sisusb->havethisconsole[c->vc_num]) - break; - } - if (i == MAX_NR_CONSOLES) { - vfree(sisusb->font_backup); - sisusb->font_backup = NULL; - } - } - - mutex_unlock(&sisusb->lock); - - /* decrement the usage count on our sisusb */ - kref_put(&sisusb->kref, sisusb_delete); -} - -/* interface routine */ -static u8 -sisusbcon_build_attr(struct vc_data *c, u8 color, enum vc_intensity intensity, - bool blink, bool underline, bool reverse, - bool unused) -{ - u8 attr = color; - - if (underline) - attr = (attr & 0xf0) | c->vc_ulcolor; - else if (intensity == VCI_HALF_BRIGHT) - attr = (attr & 0xf0) | c->vc_halfcolor; - - if (reverse) - attr = ((attr) & 0x88) | - ((((attr) >> 4) | - ((attr) << 4)) & 0x77); - - if (blink) - attr ^= 0x80; - - if (intensity == VCI_BOLD) - attr ^= 0x08; - - return attr; -} - -/* Interface routine */ -static void -sisusbcon_invert_region(struct vc_data *vc, u16 *p, int count) -{ - /* Invert a region. This is called with a pointer - * to the console's internal screen buffer. So we - * simply do the inversion there and rely on - * a call to putc(s) to update the real screen. - */ - - while (count--) { - u16 a = *p; - - *p++ = ((a) & 0x88ff) | - (((a) & 0x7000) >> 4) | - (((a) & 0x0700) << 4); - } -} - -static inline void *sisusb_vaddr(const struct sisusb_usb_data *sisusb, - const struct vc_data *c, unsigned int x, unsigned int y) -{ - return (u16 *)c->vc_origin + y * sisusb->sisusb_num_columns + x; -} - -static inline unsigned long sisusb_haddr(const struct sisusb_usb_data *sisusb, - const struct vc_data *c, unsigned int x, unsigned int y) -{ - unsigned long offset = c->vc_origin - sisusb->scrbuf; - - /* 2 bytes per each character */ - offset += 2 * (y * sisusb->sisusb_num_columns + x); - - return sisusb->vrambase + offset; -} - -/* Interface routine */ -static void -sisusbcon_putc(struct vc_data *c, int ch, int y, int x) -{ - struct sisusb_usb_data *sisusb; - - sisusb = sisusb_get_sisusb_lock_and_check(c->vc_num); - if (!sisusb) - return; - - /* sisusb->lock is down */ - if (sisusb_is_inactive(c, sisusb)) { - mutex_unlock(&sisusb->lock); - return; - } - - sisusb_copy_memory(sisusb, sisusb_vaddr(sisusb, c, x, y), - sisusb_haddr(sisusb, c, x, y), 2); - - mutex_unlock(&sisusb->lock); -} - -/* Interface routine */ -static void -sisusbcon_putcs(struct vc_data *c, const unsigned short *s, - int count, int y, int x) -{ - struct sisusb_usb_data *sisusb; - - sisusb = sisusb_get_sisusb_lock_and_check(c->vc_num); - if (!sisusb) - return; - - /* sisusb->lock is down */ - - /* Need to put the characters into the buffer ourselves, - * because the vt does this AFTER calling us. - */ - - memcpy(sisusb_vaddr(sisusb, c, x, y), s, count * 2); - - if (sisusb_is_inactive(c, sisusb)) { - mutex_unlock(&sisusb->lock); - return; - } - - sisusb_copy_memory(sisusb, sisusb_vaddr(sisusb, c, x, y), - sisusb_haddr(sisusb, c, x, y), count * 2); - - mutex_unlock(&sisusb->lock); -} - -/* Interface routine */ -static void -sisusbcon_clear(struct vc_data *c, int y, int x, int height, int width) -{ - struct sisusb_usb_data *sisusb; - u16 eattr = c->vc_video_erase_char; - int i, length, cols; - u16 *dest; - - if (width <= 0 || height <= 0) - return; - - sisusb = sisusb_get_sisusb_lock_and_check(c->vc_num); - if (!sisusb) - return; - - /* sisusb->lock is down */ - - /* Need to clear buffer ourselves, because the vt does - * this AFTER calling us. - */ - - dest = sisusb_vaddr(sisusb, c, x, y); - - cols = sisusb->sisusb_num_columns; - - if (width > cols) - width = cols; - - if (x == 0 && width >= c->vc_cols) { - - sisusbcon_memsetw(dest, eattr, height * cols * 2); - - } else { - - for (i = height; i > 0; i--, dest += cols) - sisusbcon_memsetw(dest, eattr, width * 2); - - } - - if (sisusb_is_inactive(c, sisusb)) { - mutex_unlock(&sisusb->lock); - return; - } - - length = ((height * cols) - x - (cols - width - x)) * 2; - - - sisusb_copy_memory(sisusb, sisusb_vaddr(sisusb, c, x, y), - sisusb_haddr(sisusb, c, x, y), length); - - mutex_unlock(&sisusb->lock); -} - -/* interface routine */ -static int -sisusbcon_switch(struct vc_data *c) -{ - struct sisusb_usb_data *sisusb; - int length; - - /* Returnvalue 0 means we have fully restored screen, - * and vt doesn't need to call do_update_region(). - * Returnvalue != 0 naturally means the opposite. - */ - - sisusb = sisusb_get_sisusb_lock_and_check(c->vc_num); - if (!sisusb) - return 0; - - /* sisusb->lock is down */ - - /* Don't write to screen if in gfx mode */ - if (sisusb_is_inactive(c, sisusb)) { - mutex_unlock(&sisusb->lock); - return 0; - } - - /* That really should not happen. It would mean we are - * being called while the vc is using its private buffer - * as origin. - */ - if (c->vc_origin == (unsigned long)c->vc_screenbuf) { - mutex_unlock(&sisusb->lock); - dev_dbg(&sisusb->sisusb_dev->dev, "ASSERT ORIGIN != SCREENBUF!\n"); - return 0; - } - - /* Check that we don't copy too much */ - length = min((int)c->vc_screenbuf_size, - (int)(sisusb->scrbuf + sisusb->scrbuf_size - c->vc_origin)); - - /* Restore the screen contents */ - memcpy((u16 *)c->vc_origin, (u16 *)c->vc_screenbuf, length); - - sisusb_copy_memory(sisusb, (u8 *)c->vc_origin, - sisusb_haddr(sisusb, c, 0, 0), length); - - mutex_unlock(&sisusb->lock); - - return 0; -} - -/* interface routine */ -static void -sisusbcon_save_screen(struct vc_data *c) -{ - struct sisusb_usb_data *sisusb; - int length; - - /* Save the current screen contents to vc's private - * buffer. - */ - - sisusb = sisusb_get_sisusb_lock_and_check(c->vc_num); - if (!sisusb) - return; - - /* sisusb->lock is down */ - - if (sisusb_is_inactive(c, sisusb)) { - mutex_unlock(&sisusb->lock); - return; - } - - /* Check that we don't copy too much */ - length = min((int)c->vc_screenbuf_size, - (int)(sisusb->scrbuf + sisusb->scrbuf_size - c->vc_origin)); - - /* Save the screen contents to vc's private buffer */ - memcpy((u16 *)c->vc_screenbuf, (u16 *)c->vc_origin, length); - - mutex_unlock(&sisusb->lock); -} - -/* interface routine */ -static void -sisusbcon_set_palette(struct vc_data *c, const unsigned char *table) -{ - struct sisusb_usb_data *sisusb; - int i, j; - - /* Return value not used by vt */ - - if (!con_is_visible(c)) - return; - - sisusb = sisusb_get_sisusb_lock_and_check(c->vc_num); - if (!sisusb) - return; - - /* sisusb->lock is down */ - - if (sisusb_is_inactive(c, sisusb)) { - mutex_unlock(&sisusb->lock); - return; - } - - for (i = j = 0; i < 16; i++) { - if (sisusb_setreg(sisusb, SISCOLIDX, table[i])) - break; - if (sisusb_setreg(sisusb, SISCOLDATA, c->vc_palette[j++] >> 2)) - break; - if (sisusb_setreg(sisusb, SISCOLDATA, c->vc_palette[j++] >> 2)) - break; - if (sisusb_setreg(sisusb, SISCOLDATA, c->vc_palette[j++] >> 2)) - break; - } - - mutex_unlock(&sisusb->lock); -} - -/* interface routine */ -static int -sisusbcon_blank(struct vc_data *c, int blank, int mode_switch) -{ - struct sisusb_usb_data *sisusb; - u8 sr1, cr17, pmreg, cr63; - int ret = 0; - - sisusb = sisusb_get_sisusb_lock_and_check(c->vc_num); - if (!sisusb) - return 0; - - /* sisusb->lock is down */ - - if (mode_switch) - sisusb->is_gfx = blank ? 1 : 0; - - if (sisusb_is_inactive(c, sisusb)) { - mutex_unlock(&sisusb->lock); - return 0; - } - - switch (blank) { - - case 1: /* Normal blanking: Clear screen */ - case -1: - sisusbcon_memsetw((u16 *)c->vc_origin, - c->vc_video_erase_char, - c->vc_screenbuf_size); - sisusb_copy_memory(sisusb, (u8 *)c->vc_origin, - sisusb_haddr(sisusb, c, 0, 0), - c->vc_screenbuf_size); - sisusb->con_blanked = 1; - ret = 1; - break; - - default: /* VESA blanking */ - switch (blank) { - case 0: /* Unblank */ - sr1 = 0x00; - cr17 = 0x80; - pmreg = 0x00; - cr63 = 0x00; - ret = 1; - sisusb->con_blanked = 0; - break; - case VESA_VSYNC_SUSPEND + 1: - sr1 = 0x20; - cr17 = 0x80; - pmreg = 0x80; - cr63 = 0x40; - break; - case VESA_HSYNC_SUSPEND + 1: - sr1 = 0x20; - cr17 = 0x80; - pmreg = 0x40; - cr63 = 0x40; - break; - case VESA_POWERDOWN + 1: - sr1 = 0x20; - cr17 = 0x00; - pmreg = 0xc0; - cr63 = 0x40; - break; - default: - mutex_unlock(&sisusb->lock); - return -EINVAL; - } - - sisusb_setidxregandor(sisusb, SISSR, 0x01, ~0x20, sr1); - sisusb_setidxregandor(sisusb, SISCR, 0x17, 0x7f, cr17); - sisusb_setidxregandor(sisusb, SISSR, 0x1f, 0x3f, pmreg); - sisusb_setidxregandor(sisusb, SISCR, 0x63, 0xbf, cr63); - - } - - mutex_unlock(&sisusb->lock); - - return ret; -} - -/* interface routine */ -static void -sisusbcon_scrolldelta(struct vc_data *c, int lines) -{ - struct sisusb_usb_data *sisusb; - - sisusb = sisusb_get_sisusb_lock_and_check(c->vc_num); - if (!sisusb) - return; - - /* sisusb->lock is down */ - - if (sisusb_is_inactive(c, sisusb)) { - mutex_unlock(&sisusb->lock); - return; - } - - vc_scrolldelta_helper(c, lines, sisusb->con_rolled_over, - (void *)sisusb->scrbuf, sisusb->scrbuf_size); - - sisusbcon_set_start_address(sisusb, c); - - mutex_unlock(&sisusb->lock); -} - -/* Interface routine */ -static void -sisusbcon_cursor(struct vc_data *c, int mode) -{ - struct sisusb_usb_data *sisusb; - int from, to, baseline; - - sisusb = sisusb_get_sisusb_lock_and_check(c->vc_num); - if (!sisusb) - return; - - /* sisusb->lock is down */ - - if (sisusb_is_inactive(c, sisusb)) { - mutex_unlock(&sisusb->lock); - return; - } - - if (c->vc_origin != c->vc_visible_origin) { - c->vc_visible_origin = c->vc_origin; - sisusbcon_set_start_address(sisusb, c); - } - - if (mode == CM_ERASE) { - sisusb_setidxregor(sisusb, SISCR, 0x0a, 0x20); - sisusb->sisusb_cursor_size_to = -1; - mutex_unlock(&sisusb->lock); - return; - } - - sisusb_set_cursor(sisusb, (c->vc_pos - sisusb->scrbuf) / 2); - - baseline = c->vc_font.height - (c->vc_font.height < 10 ? 1 : 2); - - switch (CUR_SIZE(c->vc_cursor_type)) { - case CUR_BLOCK: from = 1; - to = c->vc_font.height; - break; - case CUR_TWO_THIRDS: from = c->vc_font.height / 3; - to = baseline; - break; - case CUR_LOWER_HALF: from = c->vc_font.height / 2; - to = baseline; - break; - case CUR_LOWER_THIRD: from = (c->vc_font.height * 2) / 3; - to = baseline; - break; - case CUR_NONE: from = 31; - to = 30; - break; - default: - case CUR_UNDERLINE: from = baseline - 1; - to = baseline; - break; - } - - if (sisusb->sisusb_cursor_size_from != from || - sisusb->sisusb_cursor_size_to != to) { - - sisusb_setidxreg(sisusb, SISCR, 0x0a, from); - sisusb_setidxregandor(sisusb, SISCR, 0x0b, 0xe0, to); - - sisusb->sisusb_cursor_size_from = from; - sisusb->sisusb_cursor_size_to = to; - } - - mutex_unlock(&sisusb->lock); -} - -static bool -sisusbcon_scroll_area(struct vc_data *c, struct sisusb_usb_data *sisusb, - unsigned int t, unsigned int b, enum con_scroll dir, - unsigned int lines) -{ - int cols = sisusb->sisusb_num_columns; - int length = ((b - t) * cols) * 2; - u16 eattr = c->vc_video_erase_char; - - /* sisusb->lock is down */ - - /* Scroll an area which does not match the - * visible screen's dimensions. This needs - * to be done separately, as it does not - * use hardware panning. - */ - - switch (dir) { - - case SM_UP: - memmove(sisusb_vaddr(sisusb, c, 0, t), - sisusb_vaddr(sisusb, c, 0, t + lines), - (b - t - lines) * cols * 2); - sisusbcon_memsetw(sisusb_vaddr(sisusb, c, 0, b - lines), - eattr, lines * cols * 2); - break; - - case SM_DOWN: - memmove(sisusb_vaddr(sisusb, c, 0, t + lines), - sisusb_vaddr(sisusb, c, 0, t), - (b - t - lines) * cols * 2); - sisusbcon_memsetw(sisusb_vaddr(sisusb, c, 0, t), eattr, - lines * cols * 2); - break; - } - - sisusb_copy_memory(sisusb, sisusb_vaddr(sisusb, c, 0, t), - sisusb_haddr(sisusb, c, 0, t), length); - - mutex_unlock(&sisusb->lock); - - return true; -} - -/* Interface routine */ -static bool -sisusbcon_scroll(struct vc_data *c, unsigned int t, unsigned int b, - enum con_scroll dir, unsigned int lines) -{ - struct sisusb_usb_data *sisusb; - u16 eattr = c->vc_video_erase_char; - int copyall = 0; - unsigned long oldorigin; - unsigned int delta = lines * c->vc_size_row; - - /* Returning != 0 means we have done the scrolling successfully. - * Returning 0 makes vt do the scrolling on its own. - * Note that con_scroll is only called if the console is - * visible. In that case, the origin should be our buffer, - * not the vt's private one. - */ - - if (!lines) - return true; - - sisusb = sisusb_get_sisusb_lock_and_check(c->vc_num); - if (!sisusb) - return false; - - /* sisusb->lock is down */ - - if (sisusb_is_inactive(c, sisusb)) { - mutex_unlock(&sisusb->lock); - return false; - } - - /* Special case */ - if (t || b != c->vc_rows) - return sisusbcon_scroll_area(c, sisusb, t, b, dir, lines); - - if (c->vc_origin != c->vc_visible_origin) { - c->vc_visible_origin = c->vc_origin; - sisusbcon_set_start_address(sisusb, c); - } - - /* limit amount to maximum realistic size */ - if (lines > c->vc_rows) - lines = c->vc_rows; - - oldorigin = c->vc_origin; - - switch (dir) { - - case SM_UP: - - if (c->vc_scr_end + delta >= - sisusb->scrbuf + sisusb->scrbuf_size) { - memcpy((u16 *)sisusb->scrbuf, - (u16 *)(oldorigin + delta), - c->vc_screenbuf_size - delta); - c->vc_origin = sisusb->scrbuf; - sisusb->con_rolled_over = oldorigin - sisusb->scrbuf; - copyall = 1; - } else - c->vc_origin += delta; - - sisusbcon_memsetw( - (u16 *)(c->vc_origin + c->vc_screenbuf_size - delta), - eattr, delta); - - break; - - case SM_DOWN: - - if (oldorigin - delta < sisusb->scrbuf) { - memmove((void *)sisusb->scrbuf + sisusb->scrbuf_size - - c->vc_screenbuf_size + delta, - (u16 *)oldorigin, - c->vc_screenbuf_size - delta); - c->vc_origin = sisusb->scrbuf + - sisusb->scrbuf_size - - c->vc_screenbuf_size; - sisusb->con_rolled_over = 0; - copyall = 1; - } else - c->vc_origin -= delta; - - c->vc_scr_end = c->vc_origin + c->vc_screenbuf_size; - - scr_memsetw((u16 *)(c->vc_origin), eattr, delta); - - break; - } - - if (copyall) - sisusb_copy_memory(sisusb, - (u8 *)c->vc_origin, - sisusb_haddr(sisusb, c, 0, 0), - c->vc_screenbuf_size); - else if (dir == SM_UP) - sisusb_copy_memory(sisusb, - (u8 *)c->vc_origin + c->vc_screenbuf_size - delta, - sisusb_haddr(sisusb, c, 0, 0) + - c->vc_screenbuf_size - delta, - delta); - else - sisusb_copy_memory(sisusb, - (u8 *)c->vc_origin, - sisusb_haddr(sisusb, c, 0, 0), - delta); - - c->vc_scr_end = c->vc_origin + c->vc_screenbuf_size; - c->vc_visible_origin = c->vc_origin; - - sisusbcon_set_start_address(sisusb, c); - - c->vc_pos = c->vc_pos - oldorigin + c->vc_origin; - - mutex_unlock(&sisusb->lock); - - return true; -} - -/* Interface routine */ -static int -sisusbcon_set_origin(struct vc_data *c) -{ - struct sisusb_usb_data *sisusb; - - /* Returning != 0 means we were successful. - * Returning 0 will vt make to use its own - * screenbuffer as the origin. - */ - - sisusb = sisusb_get_sisusb_lock_and_check(c->vc_num); - if (!sisusb) - return 0; - - /* sisusb->lock is down */ - - if (sisusb_is_inactive(c, sisusb) || sisusb->con_blanked) { - mutex_unlock(&sisusb->lock); - return 0; - } - - c->vc_origin = c->vc_visible_origin = sisusb->scrbuf; - - sisusbcon_set_start_address(sisusb, c); - - sisusb->con_rolled_over = 0; - - mutex_unlock(&sisusb->lock); - - return true; -} - -/* Interface routine */ -static int -sisusbcon_resize(struct vc_data *c, unsigned int newcols, unsigned int newrows, - unsigned int user) -{ - struct sisusb_usb_data *sisusb; - int fh; - - sisusb = sisusb_get_sisusb_lock_and_check(c->vc_num); - if (!sisusb) - return -ENODEV; - - fh = sisusb->current_font_height; - - mutex_unlock(&sisusb->lock); - - /* We are quite unflexible as regards resizing. The vt code - * handles sizes where the line length isn't equal the pitch - * quite badly. As regards the rows, our panning tricks only - * work well if the number of rows equals the visible number - * of rows. - */ - - if (newcols != 80 || c->vc_scan_lines / fh != newrows) - return -EINVAL; - - return 0; -} - -int -sisusbcon_do_font_op(struct sisusb_usb_data *sisusb, int set, int slot, - u8 *arg, int cmapsz, int ch512, int dorecalc, - struct vc_data *c, int fh, int uplock) -{ - int font_select = 0x00, i, err = 0; - u32 offset = 0; - u8 dummy; - - /* sisusb->lock is down */ - - /* - * The default font is kept in slot 0. - * A user font is loaded in slot 2 (256 ch) - * or 2+3 (512 ch). - */ - - if ((slot != 0 && slot != 2) || !fh) { - if (uplock) - mutex_unlock(&sisusb->lock); - return -EINVAL; - } - - if (set) - sisusb->font_slot = slot; - - /* Default font is always 256 */ - if (slot == 0) - ch512 = 0; - else - offset = 4 * cmapsz; - - font_select = (slot == 0) ? 0x00 : (ch512 ? 0x0e : 0x0a); - - err |= sisusb_setidxreg(sisusb, SISSR, 0x00, 0x01); /* Reset */ - err |= sisusb_setidxreg(sisusb, SISSR, 0x02, 0x04); /* Write to plane 2 */ - err |= sisusb_setidxreg(sisusb, SISSR, 0x04, 0x07); /* Memory mode a0-bf */ - err |= sisusb_setidxreg(sisusb, SISSR, 0x00, 0x03); /* Reset */ - - if (err) - goto font_op_error; - - err |= sisusb_setidxreg(sisusb, SISGR, 0x04, 0x03); /* Select plane read 2 */ - err |= sisusb_setidxreg(sisusb, SISGR, 0x05, 0x00); /* Disable odd/even */ - err |= sisusb_setidxreg(sisusb, SISGR, 0x06, 0x00); /* Address range a0-bf */ - - if (err) - goto font_op_error; - - if (arg) { - if (set) - for (i = 0; i < cmapsz; i++) { - err |= sisusb_writeb(sisusb, - sisusb->vrambase + offset + i, - arg[i]); - if (err) - break; - } - else - for (i = 0; i < cmapsz; i++) { - err |= sisusb_readb(sisusb, - sisusb->vrambase + offset + i, - &arg[i]); - if (err) - break; - } - - /* - * In 512-character mode, the character map is not contiguous if - * we want to remain EGA compatible -- which we do - */ - - if (ch512) { - if (set) - for (i = 0; i < cmapsz; i++) { - err |= sisusb_writeb(sisusb, - sisusb->vrambase + offset + - (2 * cmapsz) + i, - arg[cmapsz + i]); - if (err) - break; - } - else - for (i = 0; i < cmapsz; i++) { - err |= sisusb_readb(sisusb, - sisusb->vrambase + offset + - (2 * cmapsz) + i, - &arg[cmapsz + i]); - if (err) - break; - } - } - } - - if (err) - goto font_op_error; - - err |= sisusb_setidxreg(sisusb, SISSR, 0x00, 0x01); /* Reset */ - err |= sisusb_setidxreg(sisusb, SISSR, 0x02, 0x03); /* Write to planes 0+1 */ - err |= sisusb_setidxreg(sisusb, SISSR, 0x04, 0x03); /* Memory mode a0-bf */ - if (set) - sisusb_setidxreg(sisusb, SISSR, 0x03, font_select); - err |= sisusb_setidxreg(sisusb, SISSR, 0x00, 0x03); /* Reset end */ - - if (err) - goto font_op_error; - - err |= sisusb_setidxreg(sisusb, SISGR, 0x04, 0x00); /* Select plane read 0 */ - err |= sisusb_setidxreg(sisusb, SISGR, 0x05, 0x10); /* Enable odd/even */ - err |= sisusb_setidxreg(sisusb, SISGR, 0x06, 0x06); /* Address range b8-bf */ - - if (err) - goto font_op_error; - - if ((set) && (ch512 != sisusb->current_font_512)) { - - /* Font is shared among all our consoles. - * And so is the hi_font_mask. - */ - for (i = 0; i < MAX_NR_CONSOLES; i++) { - struct vc_data *d = vc_cons[i].d; - if (d && d->vc_sw == &sisusb_con) - d->vc_hi_font_mask = ch512 ? 0x0800 : 0; - } - - sisusb->current_font_512 = ch512; - - /* color plane enable register: - 256-char: enable intensity bit - 512-char: disable intensity bit */ - sisusb_getreg(sisusb, SISINPSTAT, &dummy); - sisusb_setreg(sisusb, SISAR, 0x12); - sisusb_setreg(sisusb, SISAR, ch512 ? 0x07 : 0x0f); - - sisusb_getreg(sisusb, SISINPSTAT, &dummy); - sisusb_setreg(sisusb, SISAR, 0x20); - sisusb_getreg(sisusb, SISINPSTAT, &dummy); - } - - if (dorecalc) { - - /* - * Adjust the screen to fit a font of a certain height - */ - - unsigned char ovr, vde, fsr; - int rows = 0, maxscan = 0; - - if (c) { - - /* Number of video rows */ - rows = c->vc_scan_lines / fh; - /* Scan lines to actually display-1 */ - maxscan = rows * fh - 1; - - /*printk(KERN_DEBUG "sisusb recalc rows %d maxscan %d fh %d sl %d\n", - rows, maxscan, fh, c->vc_scan_lines);*/ - - sisusb_getidxreg(sisusb, SISCR, 0x07, &ovr); - vde = maxscan & 0xff; - ovr = (ovr & 0xbd) | - ((maxscan & 0x100) >> 7) | - ((maxscan & 0x200) >> 3); - sisusb_setidxreg(sisusb, SISCR, 0x07, ovr); - sisusb_setidxreg(sisusb, SISCR, 0x12, vde); - - } - - sisusb_getidxreg(sisusb, SISCR, 0x09, &fsr); - fsr = (fsr & 0xe0) | (fh - 1); - sisusb_setidxreg(sisusb, SISCR, 0x09, fsr); - sisusb->current_font_height = fh; - - sisusb->sisusb_cursor_size_from = -1; - sisusb->sisusb_cursor_size_to = -1; - - } - - if (uplock) - mutex_unlock(&sisusb->lock); - - if (dorecalc && c) { - int rows = c->vc_scan_lines / fh; - - /* Now adjust our consoles' size */ - - for (i = 0; i < MAX_NR_CONSOLES; i++) { - struct vc_data *vc = vc_cons[i].d; - - if (vc && vc->vc_sw == &sisusb_con) { - if (con_is_visible(vc)) { - vc->vc_sw->con_cursor(vc, CM_DRAW); - } - vc->vc_font.height = fh; - vc_resize(vc, 0, rows); - } - } - } - - return 0; - -font_op_error: - if (uplock) - mutex_unlock(&sisusb->lock); - - return -EIO; -} - -/* Interface routine */ -static int -sisusbcon_font_set(struct vc_data *c, struct console_font *font, - unsigned int flags) -{ - struct sisusb_usb_data *sisusb; - unsigned charcount = font->charcount; - - if (font->width != 8 || (charcount != 256 && charcount != 512)) - return -EINVAL; - - sisusb = sisusb_get_sisusb_lock_and_check(c->vc_num); - if (!sisusb) - return -ENODEV; - - /* sisusb->lock is down */ - - /* Save the user-provided font into a buffer. This - * is used for restoring text mode after quitting - * from X and for the con_getfont routine. - */ - if (sisusb->font_backup) { - if (sisusb->font_backup_size < charcount) { - vfree(sisusb->font_backup); - sisusb->font_backup = NULL; - } - } - - if (!sisusb->font_backup) - sisusb->font_backup = vmalloc(array_size(charcount, 32)); - - if (sisusb->font_backup) { - memcpy(sisusb->font_backup, font->data, array_size(charcount, 32)); - sisusb->font_backup_size = charcount; - sisusb->font_backup_height = font->height; - sisusb->font_backup_512 = (charcount == 512) ? 1 : 0; - } - - /* do_font_op ups sisusb->lock */ - - return sisusbcon_do_font_op(sisusb, 1, 2, font->data, - 8192, (charcount == 512), - (!(flags & KD_FONT_FLAG_DONT_RECALC)) ? 1 : 0, - c, font->height, 1); -} - -/* Interface routine */ -static int -sisusbcon_font_get(struct vc_data *c, struct console_font *font) -{ - struct sisusb_usb_data *sisusb; - - sisusb = sisusb_get_sisusb_lock_and_check(c->vc_num); - if (!sisusb) - return -ENODEV; - - /* sisusb->lock is down */ - - font->width = 8; - font->height = c->vc_font.height; - font->charcount = 256; - - if (!font->data) { - mutex_unlock(&sisusb->lock); - return 0; - } - - if (!sisusb->font_backup) { - mutex_unlock(&sisusb->lock); - return -ENODEV; - } - - /* Copy 256 chars only, like vgacon */ - memcpy(font->data, sisusb->font_backup, 256 * 32); - - mutex_unlock(&sisusb->lock); - - return 0; -} - -/* - * The console `switch' structure for the sisusb console - */ - -static const struct consw sisusb_con = { - .owner = THIS_MODULE, - .con_startup = sisusbcon_startup, - .con_init = sisusbcon_init, - .con_deinit = sisusbcon_deinit, - .con_clear = sisusbcon_clear, - .con_putc = sisusbcon_putc, - .con_putcs = sisusbcon_putcs, - .con_cursor = sisusbcon_cursor, - .con_scroll = sisusbcon_scroll, - .con_switch = sisusbcon_switch, - .con_blank = sisusbcon_blank, - .con_font_set = sisusbcon_font_set, - .con_font_get = sisusbcon_font_get, - .con_set_palette = sisusbcon_set_palette, - .con_scrolldelta = sisusbcon_scrolldelta, - .con_build_attr = sisusbcon_build_attr, - .con_invert_region = sisusbcon_invert_region, - .con_set_origin = sisusbcon_set_origin, - .con_save_screen = sisusbcon_save_screen, - .con_resize = sisusbcon_resize, -}; - -/* Our very own dummy console driver */ - -static const char *sisusbdummycon_startup(void) -{ - return "SISUSBVGADUMMY"; -} - -static void sisusbdummycon_init(struct vc_data *vc, int init) -{ - vc->vc_can_do_color = 1; - if (init) { - vc->vc_cols = 80; - vc->vc_rows = 25; - } else - vc_resize(vc, 80, 25); -} - -static void sisusbdummycon_deinit(struct vc_data *vc) { } -static void sisusbdummycon_clear(struct vc_data *vc, int sy, int sx, - int height, int width) { } -static void sisusbdummycon_putc(struct vc_data *vc, int c, int ypos, - int xpos) { } -static void sisusbdummycon_putcs(struct vc_data *vc, const unsigned short *s, - int count, int ypos, int xpos) { } -static void sisusbdummycon_cursor(struct vc_data *vc, int mode) { } - -static bool sisusbdummycon_scroll(struct vc_data *vc, unsigned int top, - unsigned int bottom, enum con_scroll dir, - unsigned int lines) -{ - return false; -} - -static int sisusbdummycon_switch(struct vc_data *vc) -{ - return 0; -} - -static int sisusbdummycon_blank(struct vc_data *vc, int blank, int mode_switch) -{ - return 0; -} - -static const struct consw sisusb_dummy_con = { - .owner = THIS_MODULE, - .con_startup = sisusbdummycon_startup, - .con_init = sisusbdummycon_init, - .con_deinit = sisusbdummycon_deinit, - .con_clear = sisusbdummycon_clear, - .con_putc = sisusbdummycon_putc, - .con_putcs = sisusbdummycon_putcs, - .con_cursor = sisusbdummycon_cursor, - .con_scroll = sisusbdummycon_scroll, - .con_switch = sisusbdummycon_switch, - .con_blank = sisusbdummycon_blank, -}; - -int -sisusb_console_init(struct sisusb_usb_data *sisusb, int first, int last) -{ - int i, ret; - - mutex_lock(&sisusb->lock); - - /* Erm.. that should not happen */ - if (sisusb->haveconsole || !sisusb->SiS_Pr) { - mutex_unlock(&sisusb->lock); - return 1; - } - - sisusb->con_first = first; - sisusb->con_last = last; - - if (first > last || - first > MAX_NR_CONSOLES || - last > MAX_NR_CONSOLES) { - mutex_unlock(&sisusb->lock); - return 1; - } - - /* If gfxcore not initialized or no consoles given, quit graciously */ - if (!sisusb->gfxinit || first < 1 || last < 1) { - mutex_unlock(&sisusb->lock); - return 0; - } - - sisusb->sisusb_cursor_loc = -1; - sisusb->sisusb_cursor_size_from = -1; - sisusb->sisusb_cursor_size_to = -1; - - /* Set up text mode (and upload default font) */ - if (sisusb_reset_text_mode(sisusb, 1)) { - mutex_unlock(&sisusb->lock); - dev_err(&sisusb->sisusb_dev->dev, "Failed to set up text mode\n"); - return 1; - } - - /* Initialize some gfx registers */ - sisusb_initialize(sisusb); - - for (i = first - 1; i <= last - 1; i++) { - /* Save sisusb for our interface routines */ - mysisusbs[i] = sisusb; - } - - /* Initial console setup */ - sisusb->sisusb_num_columns = 80; - - /* Use a 32K buffer (matches b8000-bffff area) */ - sisusb->scrbuf_size = 32 * 1024; - - /* Allocate screen buffer */ - if (!(sisusb->scrbuf = (unsigned long)vmalloc(sisusb->scrbuf_size))) { - mutex_unlock(&sisusb->lock); - dev_err(&sisusb->sisusb_dev->dev, "Failed to allocate screen buffer\n"); - return 1; - } - - mutex_unlock(&sisusb->lock); - - /* Now grab the desired console(s) */ - console_lock(); - ret = do_take_over_console(&sisusb_con, first - 1, last - 1, 0); - console_unlock(); - if (!ret) - sisusb->haveconsole = 1; - else { - for (i = first - 1; i <= last - 1; i++) - mysisusbs[i] = NULL; - } - - return ret; -} - -void -sisusb_console_exit(struct sisusb_usb_data *sisusb) -{ - int i; - - /* This is called if the device is disconnected - * and while disconnect and lock semaphores - * are up. This should be save because we - * can't lose our sisusb any other way but by - * disconnection (and hence, the disconnect - * sema is for protecting all other access - * functions from disconnection, not the - * other way round). - */ - - /* Now what do we do in case of disconnection: - * One alternative would be to simply call - * give_up_console(). Nah, not a good idea. - * give_up_console() is obviously buggy as it - * only discards the consw pointer from the - * driver_map, but doesn't adapt vc->vc_sw - * of the affected consoles. Hence, the next - * call to any of the console functions will - * eventually take a trip to oops county. - * Also, give_up_console for some reason - * doesn't decrement our module refcount. - * Instead, we switch our consoles to a private - * dummy console. This, of course, keeps our - * refcount up as well, but it works perfectly. - */ - - if (sisusb->haveconsole) { - for (i = 0; i < MAX_NR_CONSOLES; i++) - if (sisusb->havethisconsole[i]) { - console_lock(); - do_take_over_console(&sisusb_dummy_con, i, i, 0); - console_unlock(); - /* At this point, con_deinit for all our - * consoles is executed by do_take_over_console(). - */ - } - sisusb->haveconsole = 0; - } - - vfree((void *)sisusb->scrbuf); - sisusb->scrbuf = 0; - - vfree(sisusb->font_backup); - sisusb->font_backup = NULL; -} - -void __init sisusb_init_concode(void) -{ - int i; - - for (i = 0; i < MAX_NR_CONSOLES; i++) - mysisusbs[i] = NULL; -} diff --git a/drivers/usb/misc/sisusbvga/sisusb_init.c b/drivers/usb/misc/sisusbvga/sisusb_init.c deleted file mode 100644 index 7c11198d5dda0db3d0bcbdf01d91acfca578b5e2..0000000000000000000000000000000000000000 --- a/drivers/usb/misc/sisusbvga/sisusb_init.c +++ /dev/null @@ -1,955 +0,0 @@ -// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) -/* - * sisusb - usb kernel driver for SiS315(E) based USB2VGA dongles - * - * Display mode initializing code - * - * Copyright (C) 2001-2005 by Thomas Winischhofer, Vienna, Austria - * - * If distributed as part of the Linux kernel, this code is licensed under the - * terms of the GPL v2. - * - * Otherwise, the following license terms apply: - * - * * Redistribution and use in source and binary forms, with or without - * * modification, are permitted provided that the following conditions - * * are met: - * * 1) Redistributions of source code must retain the above copyright - * * notice, this list of conditions and the following disclaimer. - * * 2) Redistributions in binary form must reproduce the above copyright - * * notice, this list of conditions and the following disclaimer in the - * * documentation and/or other materials provided with the distribution. - * * 3) The name of the author may not be used to endorse or promote products - * * derived from this software without specific prior written permission. - * * - * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - * * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - * * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - * Author: Thomas Winischhofer - * - */ - -#include -#include -#include -#include -#include - -#include "sisusb.h" -#include "sisusb_init.h" -#include "sisusb_tables.h" - -/*********************************************/ -/* POINTER INITIALIZATION */ -/*********************************************/ - -static void SiSUSB_InitPtr(struct SiS_Private *SiS_Pr) -{ - SiS_Pr->SiS_ModeResInfo = SiSUSB_ModeResInfo; - SiS_Pr->SiS_StandTable = SiSUSB_StandTable; - - SiS_Pr->SiS_SModeIDTable = SiSUSB_SModeIDTable; - SiS_Pr->SiS_EModeIDTable = SiSUSB_EModeIDTable; - SiS_Pr->SiS_RefIndex = SiSUSB_RefIndex; - SiS_Pr->SiS_CRT1Table = SiSUSB_CRT1Table; - - SiS_Pr->SiS_VCLKData = SiSUSB_VCLKData; -} - -/*********************************************/ -/* HELPER: SetReg, GetReg */ -/*********************************************/ - -static void -SiS_SetReg(struct SiS_Private *SiS_Pr, unsigned long port, - unsigned short index, unsigned short data) -{ - sisusb_setidxreg(SiS_Pr->sisusb, port, index, data); -} - -static void -SiS_SetRegByte(struct SiS_Private *SiS_Pr, unsigned long port, - unsigned short data) -{ - sisusb_setreg(SiS_Pr->sisusb, port, data); -} - -static unsigned char -SiS_GetReg(struct SiS_Private *SiS_Pr, unsigned long port, unsigned short index) -{ - u8 data; - - sisusb_getidxreg(SiS_Pr->sisusb, port, index, &data); - - return data; -} - -static unsigned char -SiS_GetRegByte(struct SiS_Private *SiS_Pr, unsigned long port) -{ - u8 data; - - sisusb_getreg(SiS_Pr->sisusb, port, &data); - - return data; -} - -static void -SiS_SetRegANDOR(struct SiS_Private *SiS_Pr, unsigned long port, - unsigned short index, unsigned short DataAND, - unsigned short DataOR) -{ - sisusb_setidxregandor(SiS_Pr->sisusb, port, index, DataAND, DataOR); -} - -static void -SiS_SetRegAND(struct SiS_Private *SiS_Pr, unsigned long port, - unsigned short index, unsigned short DataAND) -{ - sisusb_setidxregand(SiS_Pr->sisusb, port, index, DataAND); -} - -static void -SiS_SetRegOR(struct SiS_Private *SiS_Pr, unsigned long port, - unsigned short index, unsigned short DataOR) -{ - sisusb_setidxregor(SiS_Pr->sisusb, port, index, DataOR); -} - -/*********************************************/ -/* HELPER: DisplayOn, DisplayOff */ -/*********************************************/ - -static void SiS_DisplayOn(struct SiS_Private *SiS_Pr) -{ - SiS_SetRegAND(SiS_Pr, SiS_Pr->SiS_P3c4, 0x01, 0xDF); -} - -/*********************************************/ -/* HELPER: Init Port Addresses */ -/*********************************************/ - -static void SiSUSBRegInit(struct SiS_Private *SiS_Pr, unsigned long BaseAddr) -{ - SiS_Pr->SiS_P3c4 = BaseAddr + 0x14; - SiS_Pr->SiS_P3d4 = BaseAddr + 0x24; - SiS_Pr->SiS_P3c0 = BaseAddr + 0x10; - SiS_Pr->SiS_P3ce = BaseAddr + 0x1e; - SiS_Pr->SiS_P3c2 = BaseAddr + 0x12; - SiS_Pr->SiS_P3ca = BaseAddr + 0x1a; - SiS_Pr->SiS_P3c6 = BaseAddr + 0x16; - SiS_Pr->SiS_P3c7 = BaseAddr + 0x17; - SiS_Pr->SiS_P3c8 = BaseAddr + 0x18; - SiS_Pr->SiS_P3c9 = BaseAddr + 0x19; - SiS_Pr->SiS_P3cb = BaseAddr + 0x1b; - SiS_Pr->SiS_P3cc = BaseAddr + 0x1c; - SiS_Pr->SiS_P3cd = BaseAddr + 0x1d; - SiS_Pr->SiS_P3da = BaseAddr + 0x2a; - SiS_Pr->SiS_Part1Port = BaseAddr + SIS_CRT2_PORT_04; -} - -/*********************************************/ -/* HELPER: GetSysFlags */ -/*********************************************/ - -static void SiS_GetSysFlags(struct SiS_Private *SiS_Pr) -{ - SiS_Pr->SiS_MyCR63 = 0x63; -} - -/*********************************************/ -/* HELPER: Init PCI & Engines */ -/*********************************************/ - -static void SiSInitPCIetc(struct SiS_Private *SiS_Pr) -{ - SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3c4, 0x20, 0xa1); - /* - Enable 2D (0x40) - * - Enable 3D (0x02) - * - Enable 3D vertex command fetch (0x10) - * - Enable 3D command parser (0x08) - * - Enable 3D G/L transformation engine (0x80) - */ - SiS_SetRegOR(SiS_Pr, SiS_Pr->SiS_P3c4, 0x1E, 0xDA); -} - -/*********************************************/ -/* HELPER: SET SEGMENT REGISTERS */ -/*********************************************/ - -static void SiS_SetSegRegLower(struct SiS_Private *SiS_Pr, unsigned short value) -{ - unsigned short temp; - - value &= 0x00ff; - temp = SiS_GetRegByte(SiS_Pr, SiS_Pr->SiS_P3cb) & 0xf0; - temp |= (value >> 4); - SiS_SetRegByte(SiS_Pr, SiS_Pr->SiS_P3cb, temp); - temp = SiS_GetRegByte(SiS_Pr, SiS_Pr->SiS_P3cd) & 0xf0; - temp |= (value & 0x0f); - SiS_SetRegByte(SiS_Pr, SiS_Pr->SiS_P3cd, temp); -} - -static void SiS_SetSegRegUpper(struct SiS_Private *SiS_Pr, unsigned short value) -{ - unsigned short temp; - - value &= 0x00ff; - temp = SiS_GetRegByte(SiS_Pr, SiS_Pr->SiS_P3cb) & 0x0f; - temp |= (value & 0xf0); - SiS_SetRegByte(SiS_Pr, SiS_Pr->SiS_P3cb, temp); - temp = SiS_GetRegByte(SiS_Pr, SiS_Pr->SiS_P3cd) & 0x0f; - temp |= (value << 4); - SiS_SetRegByte(SiS_Pr, SiS_Pr->SiS_P3cd, temp); -} - -static void SiS_SetSegmentReg(struct SiS_Private *SiS_Pr, unsigned short value) -{ - SiS_SetSegRegLower(SiS_Pr, value); - SiS_SetSegRegUpper(SiS_Pr, value); -} - -static void SiS_ResetSegmentReg(struct SiS_Private *SiS_Pr) -{ - SiS_SetSegmentReg(SiS_Pr, 0); -} - -static void -SiS_SetSegmentRegOver(struct SiS_Private *SiS_Pr, unsigned short value) -{ - unsigned short temp = value >> 8; - - temp &= 0x07; - temp |= (temp << 4); - SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3c4, 0x1d, temp); - SiS_SetSegmentReg(SiS_Pr, value); -} - -static void SiS_ResetSegmentRegOver(struct SiS_Private *SiS_Pr) -{ - SiS_SetSegmentRegOver(SiS_Pr, 0); -} - -static void SiS_ResetSegmentRegisters(struct SiS_Private *SiS_Pr) -{ - SiS_ResetSegmentReg(SiS_Pr); - SiS_ResetSegmentRegOver(SiS_Pr); -} - -/*********************************************/ -/* HELPER: SearchModeID */ -/*********************************************/ - -static int -SiS_SearchModeID(struct SiS_Private *SiS_Pr, unsigned short *ModeNo, - unsigned short *ModeIdIndex) -{ - if ((*ModeNo) <= 0x13) { - - if ((*ModeNo) != 0x03) - return 0; - - (*ModeIdIndex) = 0; - - } else { - - for (*ModeIdIndex = 0;; (*ModeIdIndex)++) { - - if (SiS_Pr->SiS_EModeIDTable[*ModeIdIndex].Ext_ModeID == - (*ModeNo)) - break; - - if (SiS_Pr->SiS_EModeIDTable[*ModeIdIndex].Ext_ModeID == - 0xFF) - return 0; - } - - } - - return 1; -} - -/*********************************************/ -/* HELPER: ENABLE CRT1 */ -/*********************************************/ - -static void SiS_HandleCRT1(struct SiS_Private *SiS_Pr) -{ - /* Enable CRT1 gating */ - SiS_SetRegAND(SiS_Pr, SiS_Pr->SiS_P3d4, SiS_Pr->SiS_MyCR63, 0xbf); -} - -/*********************************************/ -/* HELPER: GetColorDepth */ -/*********************************************/ - -static unsigned short -SiS_GetColorDepth(struct SiS_Private *SiS_Pr, unsigned short ModeNo, - unsigned short ModeIdIndex) -{ - static const unsigned short ColorDepth[6] = { 1, 2, 4, 4, 6, 8 }; - unsigned short modeflag; - short index; - - if (ModeNo <= 0x13) { - modeflag = SiS_Pr->SiS_SModeIDTable[ModeIdIndex].St_ModeFlag; - } else { - modeflag = SiS_Pr->SiS_EModeIDTable[ModeIdIndex].Ext_ModeFlag; - } - - index = (modeflag & ModeTypeMask) - ModeEGA; - if (index < 0) - index = 0; - return ColorDepth[index]; -} - -/*********************************************/ -/* HELPER: GetOffset */ -/*********************************************/ - -static unsigned short -SiS_GetOffset(struct SiS_Private *SiS_Pr, unsigned short ModeNo, - unsigned short ModeIdIndex, unsigned short rrti) -{ - unsigned short xres, temp, colordepth, infoflag; - - infoflag = SiS_Pr->SiS_RefIndex[rrti].Ext_InfoFlag; - xres = SiS_Pr->SiS_RefIndex[rrti].XRes; - - colordepth = SiS_GetColorDepth(SiS_Pr, ModeNo, ModeIdIndex); - - temp = xres / 16; - - if (infoflag & InterlaceMode) - temp <<= 1; - - temp *= colordepth; - - if (xres % 16) - temp += (colordepth >> 1); - - return temp; -} - -/*********************************************/ -/* SEQ */ -/*********************************************/ - -static void -SiS_SetSeqRegs(struct SiS_Private *SiS_Pr, unsigned short StandTableIndex) -{ - unsigned char SRdata; - int i; - - SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3c4, 0x00, 0x03); - - SRdata = SiS_Pr->SiS_StandTable[StandTableIndex].SR[0] | 0x20; - SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3c4, 0x01, SRdata); - - for (i = 2; i <= 4; i++) { - SRdata = SiS_Pr->SiS_StandTable[StandTableIndex].SR[i - 1]; - SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3c4, i, SRdata); - } -} - -/*********************************************/ -/* MISC */ -/*********************************************/ - -static void -SiS_SetMiscRegs(struct SiS_Private *SiS_Pr, unsigned short StandTableIndex) -{ - unsigned char Miscdata = SiS_Pr->SiS_StandTable[StandTableIndex].MISC; - - SiS_SetRegByte(SiS_Pr, SiS_Pr->SiS_P3c2, Miscdata); -} - -/*********************************************/ -/* CRTC */ -/*********************************************/ - -static void -SiS_SetCRTCRegs(struct SiS_Private *SiS_Pr, unsigned short StandTableIndex) -{ - unsigned char CRTCdata; - unsigned short i; - - SiS_SetRegAND(SiS_Pr, SiS_Pr->SiS_P3d4, 0x11, 0x7f); - - for (i = 0; i <= 0x18; i++) { - CRTCdata = SiS_Pr->SiS_StandTable[StandTableIndex].CRTC[i]; - SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3d4, i, CRTCdata); - } -} - -/*********************************************/ -/* ATT */ -/*********************************************/ - -static void -SiS_SetATTRegs(struct SiS_Private *SiS_Pr, unsigned short StandTableIndex) -{ - unsigned char ARdata; - unsigned short i; - - for (i = 0; i <= 0x13; i++) { - ARdata = SiS_Pr->SiS_StandTable[StandTableIndex].ATTR[i]; - SiS_GetRegByte(SiS_Pr, SiS_Pr->SiS_P3da); - SiS_SetRegByte(SiS_Pr, SiS_Pr->SiS_P3c0, i); - SiS_SetRegByte(SiS_Pr, SiS_Pr->SiS_P3c0, ARdata); - } - SiS_GetRegByte(SiS_Pr, SiS_Pr->SiS_P3da); - SiS_SetRegByte(SiS_Pr, SiS_Pr->SiS_P3c0, 0x14); - SiS_SetRegByte(SiS_Pr, SiS_Pr->SiS_P3c0, 0x00); - - SiS_GetRegByte(SiS_Pr, SiS_Pr->SiS_P3da); - SiS_SetRegByte(SiS_Pr, SiS_Pr->SiS_P3c0, 0x20); - SiS_GetRegByte(SiS_Pr, SiS_Pr->SiS_P3da); -} - -/*********************************************/ -/* GRC */ -/*********************************************/ - -static void -SiS_SetGRCRegs(struct SiS_Private *SiS_Pr, unsigned short StandTableIndex) -{ - unsigned char GRdata; - unsigned short i; - - for (i = 0; i <= 0x08; i++) { - GRdata = SiS_Pr->SiS_StandTable[StandTableIndex].GRC[i]; - SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3ce, i, GRdata); - } - - if (SiS_Pr->SiS_ModeType > ModeVGA) { - /* 256 color disable */ - SiS_SetRegAND(SiS_Pr, SiS_Pr->SiS_P3ce, 0x05, 0xBF); - } -} - -/*********************************************/ -/* CLEAR EXTENDED REGISTERS */ -/*********************************************/ - -static void SiS_ClearExt1Regs(struct SiS_Private *SiS_Pr, unsigned short ModeNo) -{ - int i; - - for (i = 0x0A; i <= 0x0E; i++) { - SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3c4, i, 0x00); - } - - SiS_SetRegAND(SiS_Pr, SiS_Pr->SiS_P3c4, 0x37, 0xFE); -} - -/*********************************************/ -/* Get rate index */ -/*********************************************/ - -static unsigned short -SiS_GetRatePtr(struct SiS_Private *SiS_Pr, unsigned short ModeNo, - unsigned short ModeIdIndex) -{ - unsigned short rrti, i, index, temp; - - if (ModeNo <= 0x13) - return 0xFFFF; - - index = SiS_GetReg(SiS_Pr, SiS_Pr->SiS_P3d4, 0x33) & 0x0F; - if (index > 0) - index--; - - rrti = SiS_Pr->SiS_EModeIDTable[ModeIdIndex].REFindex; - ModeNo = SiS_Pr->SiS_RefIndex[rrti].ModeID; - - i = 0; - do { - if (SiS_Pr->SiS_RefIndex[rrti + i].ModeID != ModeNo) - break; - - temp = - SiS_Pr->SiS_RefIndex[rrti + i].Ext_InfoFlag & ModeTypeMask; - if (temp < SiS_Pr->SiS_ModeType) - break; - - i++; - index--; - } while (index != 0xFFFF); - - i--; - - return (rrti + i); -} - -/*********************************************/ -/* SYNC */ -/*********************************************/ - -static void SiS_SetCRT1Sync(struct SiS_Private *SiS_Pr, unsigned short rrti) -{ - unsigned short sync = SiS_Pr->SiS_RefIndex[rrti].Ext_InfoFlag >> 8; - sync &= 0xC0; - sync |= 0x2f; - SiS_SetRegByte(SiS_Pr, SiS_Pr->SiS_P3c2, sync); -} - -/*********************************************/ -/* CRTC/2 */ -/*********************************************/ - -static void -SiS_SetCRT1CRTC(struct SiS_Private *SiS_Pr, unsigned short ModeNo, - unsigned short ModeIdIndex, unsigned short rrti) -{ - unsigned char index; - unsigned short temp, i, j, modeflag; - - SiS_SetRegAND(SiS_Pr, SiS_Pr->SiS_P3d4, 0x11, 0x7f); - - modeflag = SiS_Pr->SiS_EModeIDTable[ModeIdIndex].Ext_ModeFlag; - - index = SiS_Pr->SiS_RefIndex[rrti].Ext_CRT1CRTC; - - for (i = 0, j = 0; i <= 7; i++, j++) { - SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3d4, j, - SiS_Pr->SiS_CRT1Table[index].CR[i]); - } - for (j = 0x10; i <= 10; i++, j++) { - SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3d4, j, - SiS_Pr->SiS_CRT1Table[index].CR[i]); - } - for (j = 0x15; i <= 12; i++, j++) { - SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3d4, j, - SiS_Pr->SiS_CRT1Table[index].CR[i]); - } - for (j = 0x0A; i <= 15; i++, j++) { - SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3c4, j, - SiS_Pr->SiS_CRT1Table[index].CR[i]); - } - - temp = SiS_Pr->SiS_CRT1Table[index].CR[16] & 0xE0; - SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3c4, 0x0E, temp); - - temp = ((SiS_Pr->SiS_CRT1Table[index].CR[16]) & 0x01) << 5; - if (modeflag & DoubleScanMode) - temp |= 0x80; - SiS_SetRegANDOR(SiS_Pr, SiS_Pr->SiS_P3d4, 0x09, 0x5F, temp); - - if (SiS_Pr->SiS_ModeType > ModeVGA) - SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3d4, 0x14, 0x4F); -} - -/*********************************************/ -/* OFFSET & PITCH */ -/*********************************************/ -/* (partly overruled by SetPitch() in XF86) */ -/*********************************************/ - -static void -SiS_SetCRT1Offset(struct SiS_Private *SiS_Pr, unsigned short ModeNo, - unsigned short ModeIdIndex, unsigned short rrti) -{ - unsigned short du = SiS_GetOffset(SiS_Pr, ModeNo, ModeIdIndex, rrti); - unsigned short infoflag = SiS_Pr->SiS_RefIndex[rrti].Ext_InfoFlag; - unsigned short temp; - - temp = (du >> 8) & 0x0f; - SiS_SetRegANDOR(SiS_Pr, SiS_Pr->SiS_P3c4, 0x0E, 0xF0, temp); - - SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3d4, 0x13, (du & 0xFF)); - - if (infoflag & InterlaceMode) - du >>= 1; - - du <<= 5; - temp = (du >> 8) & 0xff; - if (du & 0xff) - temp++; - temp++; - SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3c4, 0x10, temp); -} - -/*********************************************/ -/* VCLK */ -/*********************************************/ - -static void -SiS_SetCRT1VCLK(struct SiS_Private *SiS_Pr, unsigned short ModeNo, - unsigned short rrti) -{ - unsigned short index = SiS_Pr->SiS_RefIndex[rrti].Ext_CRTVCLK; - unsigned short clka = SiS_Pr->SiS_VCLKData[index].SR2B; - unsigned short clkb = SiS_Pr->SiS_VCLKData[index].SR2C; - - SiS_SetRegAND(SiS_Pr, SiS_Pr->SiS_P3c4, 0x31, 0xCF); - - SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3c4, 0x2B, clka); - SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3c4, 0x2C, clkb); - SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3c4, 0x2D, 0x01); -} - -/*********************************************/ -/* FIFO */ -/*********************************************/ - -static void -SiS_SetCRT1FIFO_310(struct SiS_Private *SiS_Pr, unsigned short ModeNo, - unsigned short mi) -{ - unsigned short modeflag = SiS_Pr->SiS_EModeIDTable[mi].Ext_ModeFlag; - - /* disable auto-threshold */ - SiS_SetRegAND(SiS_Pr, SiS_Pr->SiS_P3c4, 0x3D, 0xFE); - - SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3c4, 0x08, 0xAE); - SiS_SetRegAND(SiS_Pr, SiS_Pr->SiS_P3c4, 0x09, 0xF0); - - if (ModeNo <= 0x13) - return; - - if ((!(modeflag & DoubleScanMode)) || (!(modeflag & HalfDCLK))) { - SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3c4, 0x08, 0x34); - SiS_SetRegOR(SiS_Pr, SiS_Pr->SiS_P3c4, 0x3D, 0x01); - } -} - -/*********************************************/ -/* MODE REGISTERS */ -/*********************************************/ - -static void -SiS_SetVCLKState(struct SiS_Private *SiS_Pr, unsigned short ModeNo, - unsigned short rrti) -{ - unsigned short data = 0, VCLK = 0, index = 0; - - if (ModeNo > 0x13) { - index = SiS_Pr->SiS_RefIndex[rrti].Ext_CRTVCLK; - VCLK = SiS_Pr->SiS_VCLKData[index].CLOCK; - } - - if (VCLK >= 166) - data |= 0x0c; - SiS_SetRegANDOR(SiS_Pr, SiS_Pr->SiS_P3c4, 0x32, 0xf3, data); - - if (VCLK >= 166) - SiS_SetRegAND(SiS_Pr, SiS_Pr->SiS_P3c4, 0x1f, 0xe7); - - /* DAC speed */ - data = 0x03; - if (VCLK >= 260) - data = 0x00; - else if (VCLK >= 160) - data = 0x01; - else if (VCLK >= 135) - data = 0x02; - - SiS_SetRegANDOR(SiS_Pr, SiS_Pr->SiS_P3c4, 0x07, 0xF8, data); -} - -static void -SiS_SetCRT1ModeRegs(struct SiS_Private *SiS_Pr, unsigned short ModeNo, - unsigned short ModeIdIndex, unsigned short rrti) -{ - unsigned short data, infoflag = 0, modeflag; - - if (ModeNo <= 0x13) - modeflag = SiS_Pr->SiS_SModeIDTable[ModeIdIndex].St_ModeFlag; - else { - modeflag = SiS_Pr->SiS_EModeIDTable[ModeIdIndex].Ext_ModeFlag; - infoflag = SiS_Pr->SiS_RefIndex[rrti].Ext_InfoFlag; - } - - /* Disable DPMS */ - SiS_SetRegAND(SiS_Pr, SiS_Pr->SiS_P3c4, 0x1F, 0x3F); - - data = 0; - if (ModeNo > 0x13) { - if (SiS_Pr->SiS_ModeType > ModeEGA) { - data |= 0x02; - data |= ((SiS_Pr->SiS_ModeType - ModeVGA) << 2); - } - if (infoflag & InterlaceMode) - data |= 0x20; - } - SiS_SetRegANDOR(SiS_Pr, SiS_Pr->SiS_P3c4, 0x06, 0xC0, data); - - data = 0; - if (infoflag & InterlaceMode) { - /* data = (Hsync / 8) - ((Htotal / 8) / 2) + 3 */ - unsigned short hrs = - (SiS_GetReg(SiS_Pr, SiS_Pr->SiS_P3d4, 0x04) | - ((SiS_GetReg(SiS_Pr, SiS_Pr->SiS_P3c4, 0x0b) & 0xc0) << 2)) - - 3; - unsigned short hto = - (SiS_GetReg(SiS_Pr, SiS_Pr->SiS_P3d4, 0x00) | - ((SiS_GetReg(SiS_Pr, SiS_Pr->SiS_P3c4, 0x0b) & 0x03) << 8)) - + 5; - data = hrs - (hto >> 1) + 3; - } - SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3d4, 0x19, (data & 0xFF)); - SiS_SetRegANDOR(SiS_Pr, SiS_Pr->SiS_P3d4, 0x1a, 0xFC, (data >> 8)); - - if (modeflag & HalfDCLK) - SiS_SetRegOR(SiS_Pr, SiS_Pr->SiS_P3c4, 0x01, 0x08); - - data = 0; - if (modeflag & LineCompareOff) - data = 0x08; - SiS_SetRegANDOR(SiS_Pr, SiS_Pr->SiS_P3c4, 0x0F, 0xB7, data); - - if ((SiS_Pr->SiS_ModeType == ModeEGA) && (ModeNo > 0x13)) - SiS_SetRegOR(SiS_Pr, SiS_Pr->SiS_P3c4, 0x0F, 0x40); - - SiS_SetRegAND(SiS_Pr, SiS_Pr->SiS_P3c4, 0x31, 0xfb); - - data = 0x60; - if (SiS_Pr->SiS_ModeType != ModeText) { - data ^= 0x60; - if (SiS_Pr->SiS_ModeType != ModeEGA) - data ^= 0xA0; - } - SiS_SetRegANDOR(SiS_Pr, SiS_Pr->SiS_P3c4, 0x21, 0x1F, data); - - SiS_SetVCLKState(SiS_Pr, ModeNo, rrti); - - if (SiS_GetReg(SiS_Pr, SiS_Pr->SiS_P3d4, 0x31) & 0x40) - SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3d4, 0x52, 0x2c); - else - SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3d4, 0x52, 0x6c); -} - -/*********************************************/ -/* LOAD DAC */ -/*********************************************/ - -static void -SiS_WriteDAC(struct SiS_Private *SiS_Pr, unsigned long DACData, - unsigned short shiftflag, unsigned short dl, unsigned short ah, - unsigned short al, unsigned short dh) -{ - unsigned short d1, d2, d3; - - switch (dl) { - case 0: - d1 = dh; - d2 = ah; - d3 = al; - break; - case 1: - d1 = ah; - d2 = al; - d3 = dh; - break; - default: - d1 = al; - d2 = dh; - d3 = ah; - } - SiS_SetRegByte(SiS_Pr, DACData, (d1 << shiftflag)); - SiS_SetRegByte(SiS_Pr, DACData, (d2 << shiftflag)); - SiS_SetRegByte(SiS_Pr, DACData, (d3 << shiftflag)); -} - -static void -SiS_LoadDAC(struct SiS_Private *SiS_Pr, unsigned short ModeNo, - unsigned short mi) -{ - unsigned short data, data2, time, i, j, k, m, n, o; - unsigned short si, di, bx, sf; - unsigned long DACAddr, DACData; - const unsigned char *table = NULL; - - if (ModeNo < 0x13) - data = SiS_Pr->SiS_SModeIDTable[mi].St_ModeFlag; - else - data = SiS_Pr->SiS_EModeIDTable[mi].Ext_ModeFlag; - - data &= DACInfoFlag; - - j = time = 64; - if (data == 0x00) - table = SiS_MDA_DAC; - else if (data == 0x08) - table = SiS_CGA_DAC; - else if (data == 0x10) - table = SiS_EGA_DAC; - else { - j = 16; - time = 256; - table = SiS_VGA_DAC; - } - - DACAddr = SiS_Pr->SiS_P3c8; - DACData = SiS_Pr->SiS_P3c9; - sf = 0; - SiS_SetRegByte(SiS_Pr, SiS_Pr->SiS_P3c6, 0xFF); - - SiS_SetRegByte(SiS_Pr, DACAddr, 0x00); - - for (i = 0; i < j; i++) { - data = table[i]; - for (k = 0; k < 3; k++) { - data2 = 0; - if (data & 0x01) - data2 += 0x2A; - if (data & 0x02) - data2 += 0x15; - SiS_SetRegByte(SiS_Pr, DACData, (data2 << sf)); - data >>= 2; - } - } - - if (time == 256) { - for (i = 16; i < 32; i++) { - data = table[i] << sf; - for (k = 0; k < 3; k++) - SiS_SetRegByte(SiS_Pr, DACData, data); - } - si = 32; - for (m = 0; m < 9; m++) { - di = si; - bx = si + 4; - for (n = 0; n < 3; n++) { - for (o = 0; o < 5; o++) { - SiS_WriteDAC(SiS_Pr, DACData, sf, n, - table[di], table[bx], - table[si]); - si++; - } - si -= 2; - for (o = 0; o < 3; o++) { - SiS_WriteDAC(SiS_Pr, DACData, sf, n, - table[di], table[si], - table[bx]); - si--; - } - } - si += 5; - } - } -} - -/*********************************************/ -/* SET CRT1 REGISTER GROUP */ -/*********************************************/ - -static void -SiS_SetCRT1Group(struct SiS_Private *SiS_Pr, unsigned short ModeNo, - unsigned short ModeIdIndex) -{ - unsigned short StandTableIndex, rrti; - - SiS_Pr->SiS_CRT1Mode = ModeNo; - - if (ModeNo <= 0x13) - StandTableIndex = 0; - else - StandTableIndex = 1; - - SiS_ResetSegmentRegisters(SiS_Pr); - SiS_SetSeqRegs(SiS_Pr, StandTableIndex); - SiS_SetMiscRegs(SiS_Pr, StandTableIndex); - SiS_SetCRTCRegs(SiS_Pr, StandTableIndex); - SiS_SetATTRegs(SiS_Pr, StandTableIndex); - SiS_SetGRCRegs(SiS_Pr, StandTableIndex); - SiS_ClearExt1Regs(SiS_Pr, ModeNo); - - rrti = SiS_GetRatePtr(SiS_Pr, ModeNo, ModeIdIndex); - - if (rrti != 0xFFFF) { - SiS_SetCRT1Sync(SiS_Pr, rrti); - SiS_SetCRT1CRTC(SiS_Pr, ModeNo, ModeIdIndex, rrti); - SiS_SetCRT1Offset(SiS_Pr, ModeNo, ModeIdIndex, rrti); - SiS_SetCRT1VCLK(SiS_Pr, ModeNo, rrti); - } - - SiS_SetCRT1FIFO_310(SiS_Pr, ModeNo, ModeIdIndex); - - SiS_SetCRT1ModeRegs(SiS_Pr, ModeNo, ModeIdIndex, rrti); - - SiS_LoadDAC(SiS_Pr, ModeNo, ModeIdIndex); - - SiS_DisplayOn(SiS_Pr); -} - -/*********************************************/ -/* SiSSetMode() */ -/*********************************************/ - -int SiSUSBSetMode(struct SiS_Private *SiS_Pr, unsigned short ModeNo) -{ - unsigned short ModeIdIndex; - unsigned long BaseAddr = SiS_Pr->IOAddress; - - SiSUSB_InitPtr(SiS_Pr); - SiSUSBRegInit(SiS_Pr, BaseAddr); - SiS_GetSysFlags(SiS_Pr); - - if (!(SiS_SearchModeID(SiS_Pr, &ModeNo, &ModeIdIndex))) - return 0; - - SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3c4, 0x05, 0x86); - - SiSInitPCIetc(SiS_Pr); - - ModeNo &= 0x7f; - - SiS_Pr->SiS_ModeType = - SiS_Pr->SiS_EModeIDTable[ModeIdIndex].Ext_ModeFlag & ModeTypeMask; - - SiS_Pr->SiS_SetFlag = LowModeTests; - - /* Set mode on CRT1 */ - SiS_SetCRT1Group(SiS_Pr, ModeNo, ModeIdIndex); - - SiS_HandleCRT1(SiS_Pr); - - SiS_DisplayOn(SiS_Pr); - SiS_SetRegByte(SiS_Pr, SiS_Pr->SiS_P3c6, 0xFF); - - /* Store mode number */ - SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3d4, 0x34, ModeNo); - - return 1; -} - -int SiSUSBSetVESAMode(struct SiS_Private *SiS_Pr, unsigned short VModeNo) -{ - unsigned short ModeNo = 0; - int i; - - SiSUSB_InitPtr(SiS_Pr); - - if (VModeNo == 0x03) { - - ModeNo = 0x03; - - } else { - - i = 0; - do { - - if (SiS_Pr->SiS_EModeIDTable[i].Ext_VESAID == VModeNo) { - ModeNo = SiS_Pr->SiS_EModeIDTable[i].Ext_ModeID; - break; - } - - } while (SiS_Pr->SiS_EModeIDTable[i++].Ext_ModeID != 0xff); - - } - - if (!ModeNo) - return 0; - - return SiSUSBSetMode(SiS_Pr, ModeNo); -} diff --git a/drivers/usb/misc/sisusbvga/sisusb_init.h b/drivers/usb/misc/sisusbvga/sisusb_init.h deleted file mode 100644 index b5cd77ae941d7853eb78b95c09483ee34bb0727d..0000000000000000000000000000000000000000 --- a/drivers/usb/misc/sisusbvga/sisusb_init.h +++ /dev/null @@ -1,180 +0,0 @@ -/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */ -/* $XFree86$ */ -/* $XdotOrg$ */ -/* - * Data and prototypes for init.c - * - * Copyright (C) 2001-2005 by Thomas Winischhofer, Vienna, Austria - * - * If distributed as part of the Linux kernel, the following license terms - * apply: - * - * * This program is free software; you can redistribute it and/or modify - * * it under the terms of the GNU General Public License as published by - * * the Free Software Foundation; either version 2 of the named License, - * * or any later version. - * * - * * This program is distributed in the hope that it will be useful, - * * but WITHOUT ANY WARRANTY; without even the implied warranty of - * * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * * GNU General Public License for more details. - * * - * * You should have received a copy of the GNU General Public License - * * along with this program; if not, write to the Free Software - * * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA - * - * Otherwise, the following license terms apply: - * - * * Redistribution and use in source and binary forms, with or without - * * modification, are permitted provided that the following conditions - * * are met: - * * 1) Redistributions of source code must retain the above copyright - * * notice, this list of conditions and the following disclaimer. - * * 2) Redistributions in binary form must reproduce the above copyright - * * notice, this list of conditions and the following disclaimer in the - * * documentation and/or other materials provided with the distribution. - * * 3) The name of the author may not be used to endorse or promote products - * * derived from this software without specific prior written permission. - * * - * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - * * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - * * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - * Author: Thomas Winischhofer - * - */ - -#ifndef _SISUSB_INIT_H_ -#define _SISUSB_INIT_H_ - -/* SiS_ModeType */ -#define ModeText 0x00 -#define ModeCGA 0x01 -#define ModeEGA 0x02 -#define ModeVGA 0x03 -#define Mode15Bpp 0x04 -#define Mode16Bpp 0x05 -#define Mode24Bpp 0x06 -#define Mode32Bpp 0x07 - -#define ModeTypeMask 0x07 -#define IsTextMode 0x07 - -#define DACInfoFlag 0x0018 -#define MemoryInfoFlag 0x01E0 -#define MemorySizeShift 5 - -/* modeflag */ -#define Charx8Dot 0x0200 -#define LineCompareOff 0x0400 -#define CRT2Mode 0x0800 -#define HalfDCLK 0x1000 -#define NoSupportSimuTV 0x2000 -#define NoSupportLCDScale 0x4000 /* SiS bridge: No scaling possible (no matter what panel) */ -#define DoubleScanMode 0x8000 - -/* Infoflag */ -#define SupportTV 0x0008 -#define SupportTV1024 0x0800 -#define SupportCHTV 0x0800 -#define Support64048060Hz 0x0800 /* Special for 640x480 LCD */ -#define SupportHiVision 0x0010 -#define SupportYPbPr750p 0x1000 -#define SupportLCD 0x0020 -#define SupportRAMDAC2 0x0040 /* All (<= 100Mhz) */ -#define SupportRAMDAC2_135 0x0100 /* All except DH (<= 135Mhz) */ -#define SupportRAMDAC2_162 0x0200 /* B, C (<= 162Mhz) */ -#define SupportRAMDAC2_202 0x0400 /* C (<= 202Mhz) */ -#define InterlaceMode 0x0080 -#define SyncPP 0x0000 -#define SyncPN 0x4000 -#define SyncNP 0x8000 -#define SyncNN 0xc000 - -/* SetFlag */ -#define ProgrammingCRT2 0x0001 -#define LowModeTests 0x0002 -#define LCDVESATiming 0x0008 -#define EnableLVDSDDA 0x0010 -#define SetDispDevSwitchFlag 0x0020 -#define CheckWinDos 0x0040 -#define SetDOSMode 0x0080 - -/* Index in ModeResInfo table */ -#define SIS_RI_320x200 0 -#define SIS_RI_320x240 1 -#define SIS_RI_320x400 2 -#define SIS_RI_400x300 3 -#define SIS_RI_512x384 4 -#define SIS_RI_640x400 5 -#define SIS_RI_640x480 6 -#define SIS_RI_800x600 7 -#define SIS_RI_1024x768 8 -#define SIS_RI_1280x1024 9 -#define SIS_RI_1600x1200 10 -#define SIS_RI_1920x1440 11 -#define SIS_RI_2048x1536 12 -#define SIS_RI_720x480 13 -#define SIS_RI_720x576 14 -#define SIS_RI_1280x960 15 -#define SIS_RI_800x480 16 -#define SIS_RI_1024x576 17 -#define SIS_RI_1280x720 18 -#define SIS_RI_856x480 19 -#define SIS_RI_1280x768 20 -#define SIS_RI_1400x1050 21 -#define SIS_RI_1152x864 22 /* Up to here SiS conforming */ -#define SIS_RI_848x480 23 -#define SIS_RI_1360x768 24 -#define SIS_RI_1024x600 25 -#define SIS_RI_1152x768 26 -#define SIS_RI_768x576 27 -#define SIS_RI_1360x1024 28 -#define SIS_RI_1680x1050 29 -#define SIS_RI_1280x800 30 -#define SIS_RI_1920x1080 31 -#define SIS_RI_960x540 32 -#define SIS_RI_960x600 33 - -#define SIS_VIDEO_CAPTURE 0x00 - 0x30 -#define SIS_VIDEO_PLAYBACK 0x02 - 0x30 -#define SIS_CRT2_PORT_04 0x04 - 0x30 - -int SiSUSBSetMode(struct SiS_Private *SiS_Pr, unsigned short ModeNo); -int SiSUSBSetVESAMode(struct SiS_Private *SiS_Pr, unsigned short VModeNo); - -extern int sisusb_setreg(struct sisusb_usb_data *sisusb, u32 port, u8 data); -extern int sisusb_getreg(struct sisusb_usb_data *sisusb, u32 port, u8 * data); -extern int sisusb_setidxreg(struct sisusb_usb_data *sisusb, u32 port, - u8 index, u8 data); -extern int sisusb_getidxreg(struct sisusb_usb_data *sisusb, u32 port, - u8 index, u8 * data); -extern int sisusb_setidxregandor(struct sisusb_usb_data *sisusb, u32 port, - u8 idx, u8 myand, u8 myor); -extern int sisusb_setidxregor(struct sisusb_usb_data *sisusb, u32 port, - u8 index, u8 myor); -extern int sisusb_setidxregand(struct sisusb_usb_data *sisusb, u32 port, - u8 idx, u8 myand); - -void sisusb_delete(struct kref *kref); -int sisusb_writeb(struct sisusb_usb_data *sisusb, u32 adr, u8 data); -int sisusb_readb(struct sisusb_usb_data *sisusb, u32 adr, u8 * data); -int sisusb_copy_memory(struct sisusb_usb_data *sisusb, u8 *src, - u32 dest, int length); -int sisusb_reset_text_mode(struct sisusb_usb_data *sisusb, int init); -int sisusbcon_do_font_op(struct sisusb_usb_data *sisusb, int set, int slot, - u8 * arg, int cmapsz, int ch512, int dorecalc, - struct vc_data *c, int fh, int uplock); -void sisusb_set_cursor(struct sisusb_usb_data *sisusb, unsigned int location); -int sisusb_console_init(struct sisusb_usb_data *sisusb, int first, int last); -void sisusb_console_exit(struct sisusb_usb_data *sisusb); -void sisusb_init_concode(void); - -#endif diff --git a/drivers/usb/misc/sisusbvga/sisusb.c b/drivers/usb/misc/sisusbvga/sisusbvga.c similarity index 91% rename from drivers/usb/misc/sisusbvga/sisusb.c rename to drivers/usb/misc/sisusbvga/sisusbvga.c index f08de33d9ff383475b18908115a09248e157d164..654a79fd3231eae54c09ea186c195cce9485266b 100644 --- a/drivers/usb/misc/sisusbvga/sisusb.c +++ b/drivers/usb/misc/sisusbvga/sisusbvga.c @@ -51,25 +51,11 @@ #include #include "sisusb.h" -#include "sisusb_init.h" - -#ifdef CONFIG_USB_SISUSBVGA_CON -#include -#endif #define SISUSB_DONTSYNC /* Forward declarations / clean-up routines */ -#ifdef CONFIG_USB_SISUSBVGA_CON -static int sisusb_first_vc; -static int sisusb_last_vc; -module_param_named(first, sisusb_first_vc, int, 0); -module_param_named(last, sisusb_last_vc, int, 0); -MODULE_PARM_DESC(first, "Number of first console to take over (1 - MAX_NR_CONSOLES)"); -MODULE_PARM_DESC(last, "Number of last console to take over (1 - MAX_NR_CONSOLES)"); -#endif - static struct usb_driver sisusb_driver; static void sisusb_free_buffers(struct sisusb_usb_data *sisusb) @@ -1198,19 +1184,7 @@ static int sisusb_read_mem_bulk(struct sisusb_usb_data *sisusb, u32 addr, /* High level: Gfx (indexed) register access */ -#ifdef CONFIG_USB_SISUSBVGA_CON -int sisusb_setreg(struct sisusb_usb_data *sisusb, u32 port, u8 data) -{ - return sisusb_write_memio_byte(sisusb, SISUSB_TYPE_IO, port, data); -} - -int sisusb_getreg(struct sisusb_usb_data *sisusb, u32 port, u8 *data) -{ - return sisusb_read_memio_byte(sisusb, SISUSB_TYPE_IO, port, data); -} -#endif - -int sisusb_setidxreg(struct sisusb_usb_data *sisusb, u32 port, +static int sisusb_setidxreg(struct sisusb_usb_data *sisusb, u32 port, u8 index, u8 data) { int ret; @@ -1220,7 +1194,7 @@ int sisusb_setidxreg(struct sisusb_usb_data *sisusb, u32 port, return ret; } -int sisusb_getidxreg(struct sisusb_usb_data *sisusb, u32 port, +static int sisusb_getidxreg(struct sisusb_usb_data *sisusb, u32 port, u8 index, u8 *data) { int ret; @@ -1230,7 +1204,7 @@ int sisusb_getidxreg(struct sisusb_usb_data *sisusb, u32 port, return ret; } -int sisusb_setidxregandor(struct sisusb_usb_data *sisusb, u32 port, u8 idx, +static int sisusb_setidxregandor(struct sisusb_usb_data *sisusb, u32 port, u8 idx, u8 myand, u8 myor) { int ret; @@ -1258,13 +1232,13 @@ static int sisusb_setidxregmask(struct sisusb_usb_data *sisusb, return ret; } -int sisusb_setidxregor(struct sisusb_usb_data *sisusb, u32 port, +static int sisusb_setidxregor(struct sisusb_usb_data *sisusb, u32 port, u8 index, u8 myor) { return sisusb_setidxregandor(sisusb, port, index, 0xff, myor); } -int sisusb_setidxregand(struct sisusb_usb_data *sisusb, u32 port, +static int sisusb_setidxregand(struct sisusb_usb_data *sisusb, u32 port, u8 idx, u8 myand) { return sisusb_setidxregandor(sisusb, port, idx, myand, 0x00); @@ -1272,38 +1246,6 @@ int sisusb_setidxregand(struct sisusb_usb_data *sisusb, u32 port, /* Write/read video ram */ -#ifdef CONFIG_USB_SISUSBVGA_CON -int sisusb_writeb(struct sisusb_usb_data *sisusb, u32 adr, u8 data) -{ - return sisusb_write_memio_byte(sisusb, SISUSB_TYPE_MEM, adr, data); -} - -int sisusb_readb(struct sisusb_usb_data *sisusb, u32 adr, u8 *data) -{ - return sisusb_read_memio_byte(sisusb, SISUSB_TYPE_MEM, adr, data); -} - -int sisusb_copy_memory(struct sisusb_usb_data *sisusb, u8 *src, - u32 dest, int length) -{ - size_t dummy; - - return sisusb_write_mem_bulk(sisusb, dest, src, length, - NULL, 0, &dummy); -} - -#ifdef SISUSBENDIANTEST -static int sisusb_read_memory(struct sisusb_usb_data *sisusb, char *dest, - u32 src, int length) -{ - size_t dummy; - - return sisusb_read_mem_bulk(sisusb, src, dest, length, - NULL, &dummy); -} -#endif -#endif - #ifdef SISUSBENDIANTEST static void sisusb_testreadwrite(struct sisusb_usb_data *sisusb) { @@ -2252,131 +2194,6 @@ static int sisusb_init_gfxdevice(struct sisusb_usb_data *sisusb, int initscreen) return ret; } - -#ifdef CONFIG_USB_SISUSBVGA_CON - -/* Set up default text mode: - * - Set text mode (0x03) - * - Upload default font - * - Upload user font (if available) - */ - -int sisusb_reset_text_mode(struct sisusb_usb_data *sisusb, int init) -{ - int ret = 0, slot = sisusb->font_slot, i; - const struct font_desc *myfont; - u8 *tempbuf; - u16 *tempbufb; - static const char bootstring[] = - "SiSUSB VGA text console, (C) 2005 Thomas Winischhofer."; - static const char bootlogo[] = "(o_ //\\ V_/_"; - - /* sisusb->lock is down */ - - if (!sisusb->SiS_Pr) - return 1; - - sisusb->SiS_Pr->IOAddress = SISUSB_PCI_IOPORTBASE + 0x30; - sisusb->SiS_Pr->sisusb = (void *)sisusb; - - /* Set mode 0x03 */ - SiSUSBSetMode(sisusb->SiS_Pr, 0x03); - - myfont = find_font("VGA8x16"); - if (!myfont) - return 1; - - tempbuf = vmalloc(8192); - if (!tempbuf) - return 1; - - for (i = 0; i < 256; i++) - memcpy(tempbuf + (i * 32), myfont->data + (i * 16), 16); - - /* Upload default font */ - ret = sisusbcon_do_font_op(sisusb, 1, 0, tempbuf, 8192, - 0, 1, NULL, 16, 0); - - vfree(tempbuf); - - /* Upload user font (and reset current slot) */ - if (sisusb->font_backup) { - ret |= sisusbcon_do_font_op(sisusb, 1, 2, sisusb->font_backup, - 8192, sisusb->font_backup_512, 1, NULL, - sisusb->font_backup_height, 0); - if (slot != 2) - sisusbcon_do_font_op(sisusb, 1, 0, NULL, 0, 0, 1, - NULL, 16, 0); - } - - if (init && !sisusb->scrbuf) { - - tempbuf = vmalloc(8192); - if (tempbuf) { - - i = 4096; - tempbufb = (u16 *)tempbuf; - while (i--) - *(tempbufb++) = 0x0720; - - i = 0; - tempbufb = (u16 *)tempbuf; - while (bootlogo[i]) { - *(tempbufb++) = 0x0700 | bootlogo[i++]; - if (!(i % 4)) - tempbufb += 76; - } - - i = 0; - tempbufb = (u16 *)tempbuf + 6; - while (bootstring[i]) - *(tempbufb++) = 0x0700 | bootstring[i++]; - - ret |= sisusb_copy_memory(sisusb, tempbuf, - sisusb->vrambase, 8192); - - vfree(tempbuf); - - } - - } else if (sisusb->scrbuf) { - ret |= sisusb_copy_memory(sisusb, (u8 *)sisusb->scrbuf, - sisusb->vrambase, sisusb->scrbuf_size); - } - - if (sisusb->sisusb_cursor_size_from >= 0 && - sisusb->sisusb_cursor_size_to >= 0) { - sisusb_setidxreg(sisusb, SISCR, 0x0a, - sisusb->sisusb_cursor_size_from); - sisusb_setidxregandor(sisusb, SISCR, 0x0b, 0xe0, - sisusb->sisusb_cursor_size_to); - } else { - sisusb_setidxreg(sisusb, SISCR, 0x0a, 0x2d); - sisusb_setidxreg(sisusb, SISCR, 0x0b, 0x0e); - sisusb->sisusb_cursor_size_to = -1; - } - - slot = sisusb->sisusb_cursor_loc; - if (slot < 0) - slot = 0; - - sisusb->sisusb_cursor_loc = -1; - sisusb->bad_cursor_pos = 1; - - sisusb_set_cursor(sisusb, slot); - - sisusb_setidxreg(sisusb, SISCR, 0x0c, (sisusb->cur_start_addr >> 8)); - sisusb_setidxreg(sisusb, SISCR, 0x0d, (sisusb->cur_start_addr & 0xff)); - - sisusb->textmodedestroyed = 0; - - /* sisusb->lock is down */ - - return ret; -} - -#endif - /* fops */ static int sisusb_open(struct inode *inode, struct file *file) @@ -2434,7 +2251,7 @@ static int sisusb_open(struct inode *inode, struct file *file) return 0; } -void sisusb_delete(struct kref *kref) +static void sisusb_delete(struct kref *kref) { struct sisusb_usb_data *sisusb = to_sisusb_dev(kref); @@ -2446,9 +2263,6 @@ void sisusb_delete(struct kref *kref) sisusb->sisusb_dev = NULL; sisusb_free_buffers(sisusb); sisusb_free_urbs(sisusb); -#ifdef CONFIG_USB_SISUSBVGA_CON - kfree(sisusb->SiS_Pr); -#endif kfree(sisusb); } @@ -2842,54 +2656,8 @@ static int sisusb_handle_command(struct sisusb_usb_data *sisusb, case SUCMD_HANDLETEXTMODE: retval = 0; -#ifdef CONFIG_USB_SISUSBVGA_CON - /* Gfx core must be initialized, SiS_Pr must exist */ - if (!sisusb->gfxinit || !sisusb->SiS_Pr) - return -ENODEV; - - switch (y->data0) { - case 0: - retval = sisusb_reset_text_mode(sisusb, 0); - break; - case 1: - sisusb->textmodedestroyed = 1; - break; - } -#endif - break; - -#ifdef CONFIG_USB_SISUSBVGA_CON - case SUCMD_SETMODE: - /* Gfx core must be initialized, SiS_Pr must exist */ - if (!sisusb->gfxinit || !sisusb->SiS_Pr) - return -ENODEV; - - retval = 0; - - sisusb->SiS_Pr->IOAddress = SISUSB_PCI_IOPORTBASE + 0x30; - sisusb->SiS_Pr->sisusb = (void *)sisusb; - - if (SiSUSBSetMode(sisusb->SiS_Pr, y->data3)) - retval = -EINVAL; - break; - case SUCMD_SETVESAMODE: - /* Gfx core must be initialized, SiS_Pr must exist */ - if (!sisusb->gfxinit || !sisusb->SiS_Pr) - return -ENODEV; - - retval = 0; - - sisusb->SiS_Pr->IOAddress = SISUSB_PCI_IOPORTBASE + 0x30; - sisusb->SiS_Pr->sisusb = (void *)sisusb; - - if (SiSUSBSetVESAMode(sisusb->SiS_Pr, y->data3)) - retval = -EINVAL; - - break; -#endif - default: retval = -EINVAL; } @@ -2942,11 +2710,7 @@ static long sisusb_ioctl(struct file *file, unsigned int cmd, unsigned long arg) x.sisusb_vramsize = sisusb->vramsize; x.sisusb_minor = sisusb->minor; x.sisusb_fbdevactive = 0; -#ifdef CONFIG_USB_SISUSBVGA_CON - x.sisusb_conactive = sisusb->haveconsole ? 1 : 0; -#else x.sisusb_conactive = 0; -#endif memset(x.sisusb_reserved, 0, sizeof(x.sisusb_reserved)); if (copy_to_user((void __user *)arg, &x, sizeof(x))) @@ -3090,15 +2854,6 @@ static int sisusb_probe(struct usb_interface *intf, dev_info(&sisusb->sisusb_dev->dev, "Allocated %d output buffers\n", sisusb->numobufs); -#ifdef CONFIG_USB_SISUSBVGA_CON - /* Allocate our SiS_Pr */ - sisusb->SiS_Pr = kmalloc(sizeof(struct SiS_Private), GFP_KERNEL); - if (!sisusb->SiS_Pr) { - retval = -ENOMEM; - goto error_4; - } -#endif - /* Do remaining init stuff */ init_waitqueue_head(&sisusb->wait_q); @@ -3111,12 +2866,6 @@ static int sisusb_probe(struct usb_interface *intf, if (dev->speed == USB_SPEED_HIGH || dev->speed >= USB_SPEED_SUPER) { int initscreen = 1; -#ifdef CONFIG_USB_SISUSBVGA_CON - if (sisusb_first_vc > 0 && sisusb_last_vc > 0 && - sisusb_first_vc <= sisusb_last_vc && - sisusb_last_vc <= MAX_NR_CONSOLES) - initscreen = 0; -#endif if (sisusb_init_gfxdevice(sisusb, initscreen)) dev_err(&sisusb->sisusb_dev->dev, "Failed to early initialize device\n"); @@ -3133,10 +2882,6 @@ static int sisusb_probe(struct usb_interface *intf, dev_dbg(&sisusb->sisusb_dev->dev, "*** RWTEST END ***\n"); #endif -#ifdef CONFIG_USB_SISUSBVGA_CON - sisusb_console_init(sisusb, sisusb_first_vc, sisusb_last_vc); -#endif - return 0; error_4: @@ -3159,10 +2904,6 @@ static void sisusb_disconnect(struct usb_interface *intf) if (!sisusb) return; -#ifdef CONFIG_USB_SISUSBVGA_CON - sisusb_console_exit(sisusb); -#endif - usb_deregister_dev(intf, &usb_sisusb_class); mutex_lock(&sisusb->lock); @@ -3206,23 +2947,7 @@ static struct usb_driver sisusb_driver = { .id_table = sisusb_table, }; -static int __init usb_sisusb_init(void) -{ - -#ifdef CONFIG_USB_SISUSBVGA_CON - sisusb_init_concode(); -#endif - - return usb_register(&sisusb_driver); -} - -static void __exit usb_sisusb_exit(void) -{ - usb_deregister(&sisusb_driver); -} - -module_init(usb_sisusb_init); -module_exit(usb_sisusb_exit); +module_usb_driver(sisusb_driver); MODULE_AUTHOR("Thomas Winischhofer "); MODULE_DESCRIPTION("sisusbvga - Driver for Net2280/SiS315-based USB2VGA dongles"); diff --git a/drivers/usb/misc/usb251xb.c b/drivers/usb/misc/usb251xb.c index 54337d72bb9ff2c1c9e3f1ed901e659aeef4140f..e3abe67a155d0b93358225d703745a97db89c8f4 100644 --- a/drivers/usb/misc/usb251xb.c +++ b/drivers/usb/misc/usb251xb.c @@ -699,8 +699,7 @@ static int usb251xb_probe(struct usb251xb *hub) return 0; } -static int usb251xb_i2c_probe(struct i2c_client *i2c, - const struct i2c_device_id *id) +static int usb251xb_i2c_probe(struct i2c_client *i2c) { struct usb251xb *hub; @@ -758,7 +757,7 @@ static struct i2c_driver usb251xb_i2c_driver = { .of_match_table = of_match_ptr(usb251xb_of_match), .pm = &usb251xb_pm_ops, }, - .probe = usb251xb_i2c_probe, + .probe_new = usb251xb_i2c_probe, .id_table = usb251xb_id, }; diff --git a/drivers/usb/misc/usb3503.c b/drivers/usb/misc/usb3503.c index c70ca475c7c7e139aa1413edec5d154e4ad7969a..bd47c4437ca46b17c80cdaeaffb2f6cee2f9d93a 100644 --- a/drivers/usb/misc/usb3503.c +++ b/drivers/usb/misc/usb3503.c @@ -280,8 +280,7 @@ err_clk: return err; } -static int usb3503_i2c_probe(struct i2c_client *i2c, - const struct i2c_device_id *id) +static int usb3503_i2c_probe(struct i2c_client *i2c) { struct usb3503 *hub; int err; @@ -400,7 +399,7 @@ static struct i2c_driver usb3503_i2c_driver = { .pm = pm_ptr(&usb3503_i2c_pm_ops), .of_match_table = of_match_ptr(usb3503_of_match), }, - .probe = usb3503_i2c_probe, + .probe_new = usb3503_i2c_probe, .remove = usb3503_i2c_remove, .id_table = usb3503_id, }; diff --git a/drivers/usb/misc/usb4604.c b/drivers/usb/misc/usb4604.c index 2142af9bbdecd4ba72bea946a68ce6e0cafcdab8..6b5e77231efa175fe07fe9d910f064094d05ecfd 100644 --- a/drivers/usb/misc/usb4604.c +++ b/drivers/usb/misc/usb4604.c @@ -97,8 +97,7 @@ static int usb4604_probe(struct usb4604 *hub) return usb4604_switch_mode(hub, hub->mode); } -static int usb4604_i2c_probe(struct i2c_client *i2c, - const struct i2c_device_id *id) +static int usb4604_i2c_probe(struct i2c_client *i2c) { struct usb4604 *hub; @@ -155,7 +154,7 @@ static struct i2c_driver usb4604_i2c_driver = { .pm = pm_ptr(&usb4604_i2c_pm_ops), .of_match_table = of_match_ptr(usb4604_of_match), }, - .probe = usb4604_i2c_probe, + .probe_new = usb4604_i2c_probe, .id_table = usb4604_id, }; module_i2c_driver(usb4604_i2c_driver); diff --git a/drivers/usb/musb/Kconfig b/drivers/usb/musb/Kconfig index 6c8f7763e75e49799a14d603e0582f3067dc2a98..3a1f4bcea80c0198dcca49c38b4b25b3669660ca 100644 --- a/drivers/usb/musb/Kconfig +++ b/drivers/usb/musb/Kconfig @@ -70,12 +70,6 @@ config USB_MUSB_SUNXI select GENERIC_PHY select SUNXI_SRAM -config USB_MUSB_DAVINCI - tristate "DaVinci" - depends on ARCH_DAVINCI_DMx - depends on NOP_USB_XCEIV - depends on BROKEN - config USB_MUSB_DA8XX tristate "DA8xx/OMAP-L1x" depends on ARCH_DAVINCI_DA8XX @@ -94,11 +88,6 @@ config USB_MUSB_OMAP2PLUS depends on OMAP_CONTROL_PHY || !OMAP_CONTROL_PHY select GENERIC_PHY -config USB_MUSB_AM35X - tristate "AM35x" - depends on ARCH_OMAP - depends on NOP_USB_XCEIV - config USB_MUSB_DSPS tristate "TI DSPS platforms" depends on ARCH_OMAP2PLUS || COMPILE_TEST @@ -113,7 +102,6 @@ config USB_MUSB_JZ4740 depends on OF depends on MIPS || COMPILE_TEST depends on USB_MUSB_GADGET - depends on USB=n || USB_OTG_DISABLE_EXTERNAL_HUB select USB_ROLE_SWITCH config USB_MUSB_MEDIATEK @@ -161,12 +149,6 @@ config USB_INVENTRA_DMA help Enable DMA transfers using Mentor's engine. -config USB_TI_CPPI_DMA - bool 'TI CPPI (Davinci)' - depends on USB_MUSB_DAVINCI - help - Enable DMA transfers when TI CPPI DMA is available. - config USB_TI_CPPI41_DMA bool 'TI CPPI 4.1' depends on (ARCH_OMAP || ARCH_DAVINCI_DA8XX) && DMADEVICES diff --git a/drivers/usb/musb/Makefile b/drivers/usb/musb/Makefile index 51dd54a8de4977f4ed025974cc96f8ccb713ea50..5dccf0e453e1b05a133b58ab3023742c7c68bd30 100644 --- a/drivers/usb/musb/Makefile +++ b/drivers/usb/musb/Makefile @@ -16,10 +16,8 @@ musb_hdrc-$(CONFIG_DEBUG_FS) += musb_debugfs.o # Hardware Glue Layer obj-$(CONFIG_USB_MUSB_OMAP2PLUS) += omap2430.o -obj-$(CONFIG_USB_MUSB_AM35X) += am35x.o obj-$(CONFIG_USB_MUSB_DSPS) += musb_dsps.o obj-$(CONFIG_USB_MUSB_TUSB6010) += tusb6010.o -obj-$(CONFIG_USB_MUSB_DAVINCI) += davinci.o obj-$(CONFIG_USB_MUSB_DA8XX) += da8xx.o obj-$(CONFIG_USB_MUSB_UX500) += ux500.o obj-$(CONFIG_USB_MUSB_JZ4740) += jz4740.o @@ -33,7 +31,6 @@ obj-$(CONFIG_USB_MUSB_POLARFIRE_SOC) += mpfs.o # though PIO is always there to back up DMA, and for ep0 musb_hdrc-$(CONFIG_USB_INVENTRA_DMA) += musbhsdma.o -musb_hdrc-$(CONFIG_USB_TI_CPPI_DMA) += cppi_dma.o musb_hdrc-$(CONFIG_USB_TUSB_OMAP_DMA) += tusb6010_omap.o musb_hdrc-$(CONFIG_USB_UX500_DMA) += ux500_dma.o musb_hdrc-$(CONFIG_USB_TI_CPPI41_DMA) += musb_cppi41.o diff --git a/drivers/usb/musb/am35x.c b/drivers/usb/musb/am35x.c deleted file mode 100644 index bf2c0fa6cb3202fd60958fb9692a4e7ef9651f6b..0000000000000000000000000000000000000000 --- a/drivers/usb/musb/am35x.c +++ /dev/null @@ -1,610 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 - -/* - * Texas Instruments AM35x "glue layer" - * - * Copyright (c) 2010, by Texas Instruments - * - * Based on the DA8xx "glue layer" code. - * Copyright (c) 2008-2009, MontaVista Software, Inc. - * - * This file is part of the Inventra Controller Driver for Linux. - */ - -#include -#include -#include -#include -#include -#include -#include -#include - -#include "musb_core.h" - -/* - * AM35x specific definitions - */ -/* USB 2.0 OTG module registers */ -#define USB_REVISION_REG 0x00 -#define USB_CTRL_REG 0x04 -#define USB_STAT_REG 0x08 -#define USB_EMULATION_REG 0x0c -/* 0x10 Reserved */ -#define USB_AUTOREQ_REG 0x14 -#define USB_SRP_FIX_TIME_REG 0x18 -#define USB_TEARDOWN_REG 0x1c -#define EP_INTR_SRC_REG 0x20 -#define EP_INTR_SRC_SET_REG 0x24 -#define EP_INTR_SRC_CLEAR_REG 0x28 -#define EP_INTR_MASK_REG 0x2c -#define EP_INTR_MASK_SET_REG 0x30 -#define EP_INTR_MASK_CLEAR_REG 0x34 -#define EP_INTR_SRC_MASKED_REG 0x38 -#define CORE_INTR_SRC_REG 0x40 -#define CORE_INTR_SRC_SET_REG 0x44 -#define CORE_INTR_SRC_CLEAR_REG 0x48 -#define CORE_INTR_MASK_REG 0x4c -#define CORE_INTR_MASK_SET_REG 0x50 -#define CORE_INTR_MASK_CLEAR_REG 0x54 -#define CORE_INTR_SRC_MASKED_REG 0x58 -/* 0x5c Reserved */ -#define USB_END_OF_INTR_REG 0x60 - -/* Control register bits */ -#define AM35X_SOFT_RESET_MASK 1 - -/* USB interrupt register bits */ -#define AM35X_INTR_USB_SHIFT 16 -#define AM35X_INTR_USB_MASK (0x1ff << AM35X_INTR_USB_SHIFT) -#define AM35X_INTR_DRVVBUS 0x100 -#define AM35X_INTR_RX_SHIFT 16 -#define AM35X_INTR_TX_SHIFT 0 -#define AM35X_TX_EP_MASK 0xffff /* EP0 + 15 Tx EPs */ -#define AM35X_RX_EP_MASK 0xfffe /* 15 Rx EPs */ -#define AM35X_TX_INTR_MASK (AM35X_TX_EP_MASK << AM35X_INTR_TX_SHIFT) -#define AM35X_RX_INTR_MASK (AM35X_RX_EP_MASK << AM35X_INTR_RX_SHIFT) - -#define USB_MENTOR_CORE_OFFSET 0x400 - -struct am35x_glue { - struct device *dev; - struct platform_device *musb; - struct platform_device *phy; - struct clk *phy_clk; - struct clk *clk; -}; - -/* - * am35x_musb_enable - enable interrupts - */ -static void am35x_musb_enable(struct musb *musb) -{ - void __iomem *reg_base = musb->ctrl_base; - u32 epmask; - - /* Workaround: setup IRQs through both register sets. */ - epmask = ((musb->epmask & AM35X_TX_EP_MASK) << AM35X_INTR_TX_SHIFT) | - ((musb->epmask & AM35X_RX_EP_MASK) << AM35X_INTR_RX_SHIFT); - - musb_writel(reg_base, EP_INTR_MASK_SET_REG, epmask); - musb_writel(reg_base, CORE_INTR_MASK_SET_REG, AM35X_INTR_USB_MASK); - - /* Force the DRVVBUS IRQ so we can start polling for ID change. */ - musb_writel(reg_base, CORE_INTR_SRC_SET_REG, - AM35X_INTR_DRVVBUS << AM35X_INTR_USB_SHIFT); -} - -/* - * am35x_musb_disable - disable HDRC and flush interrupts - */ -static void am35x_musb_disable(struct musb *musb) -{ - void __iomem *reg_base = musb->ctrl_base; - - musb_writel(reg_base, CORE_INTR_MASK_CLEAR_REG, AM35X_INTR_USB_MASK); - musb_writel(reg_base, EP_INTR_MASK_CLEAR_REG, - AM35X_TX_INTR_MASK | AM35X_RX_INTR_MASK); - musb_writel(reg_base, USB_END_OF_INTR_REG, 0); -} - -#define portstate(stmt) stmt - -static void am35x_musb_set_vbus(struct musb *musb, int is_on) -{ - WARN_ON(is_on && is_peripheral_active(musb)); -} - -#define POLL_SECONDS 2 - -static void otg_timer(struct timer_list *t) -{ - struct musb *musb = from_timer(musb, t, dev_timer); - void __iomem *mregs = musb->mregs; - u8 devctl; - unsigned long flags; - - /* - * We poll because AM35x's won't expose several OTG-critical - * status change events (from the transceiver) otherwise. - */ - devctl = musb_readb(mregs, MUSB_DEVCTL); - dev_dbg(musb->controller, "Poll devctl %02x (%s)\n", devctl, - usb_otg_state_string(musb->xceiv->otg->state)); - - spin_lock_irqsave(&musb->lock, flags); - switch (musb->xceiv->otg->state) { - case OTG_STATE_A_WAIT_BCON: - devctl &= ~MUSB_DEVCTL_SESSION; - musb_writeb(musb->mregs, MUSB_DEVCTL, devctl); - - devctl = musb_readb(musb->mregs, MUSB_DEVCTL); - if (devctl & MUSB_DEVCTL_BDEVICE) { - musb->xceiv->otg->state = OTG_STATE_B_IDLE; - MUSB_DEV_MODE(musb); - } else { - musb->xceiv->otg->state = OTG_STATE_A_IDLE; - MUSB_HST_MODE(musb); - } - break; - case OTG_STATE_A_WAIT_VFALL: - musb->xceiv->otg->state = OTG_STATE_A_WAIT_VRISE; - musb_writel(musb->ctrl_base, CORE_INTR_SRC_SET_REG, - MUSB_INTR_VBUSERROR << AM35X_INTR_USB_SHIFT); - break; - case OTG_STATE_B_IDLE: - devctl = musb_readb(mregs, MUSB_DEVCTL); - if (devctl & MUSB_DEVCTL_BDEVICE) - mod_timer(&musb->dev_timer, jiffies + POLL_SECONDS * HZ); - else - musb->xceiv->otg->state = OTG_STATE_A_IDLE; - break; - default: - break; - } - spin_unlock_irqrestore(&musb->lock, flags); -} - -static void am35x_musb_try_idle(struct musb *musb, unsigned long timeout) -{ - static unsigned long last_timer; - - if (timeout == 0) - timeout = jiffies + msecs_to_jiffies(3); - - /* Never idle if active, or when VBUS timeout is not set as host */ - if (musb->is_active || (musb->a_wait_bcon == 0 && - musb->xceiv->otg->state == OTG_STATE_A_WAIT_BCON)) { - dev_dbg(musb->controller, "%s active, deleting timer\n", - usb_otg_state_string(musb->xceiv->otg->state)); - del_timer(&musb->dev_timer); - last_timer = jiffies; - return; - } - - if (time_after(last_timer, timeout) && timer_pending(&musb->dev_timer)) { - dev_dbg(musb->controller, "Longer idle timer already pending, ignoring...\n"); - return; - } - last_timer = timeout; - - dev_dbg(musb->controller, "%s inactive, starting idle timer for %u ms\n", - usb_otg_state_string(musb->xceiv->otg->state), - jiffies_to_msecs(timeout - jiffies)); - mod_timer(&musb->dev_timer, timeout); -} - -static irqreturn_t am35x_musb_interrupt(int irq, void *hci) -{ - struct musb *musb = hci; - void __iomem *reg_base = musb->ctrl_base; - struct device *dev = musb->controller; - struct musb_hdrc_platform_data *plat = dev_get_platdata(dev); - struct omap_musb_board_data *data = plat->board_data; - unsigned long flags; - irqreturn_t ret = IRQ_NONE; - u32 epintr, usbintr; - - spin_lock_irqsave(&musb->lock, flags); - - /* Get endpoint interrupts */ - epintr = musb_readl(reg_base, EP_INTR_SRC_MASKED_REG); - - if (epintr) { - musb_writel(reg_base, EP_INTR_SRC_CLEAR_REG, epintr); - - musb->int_rx = - (epintr & AM35X_RX_INTR_MASK) >> AM35X_INTR_RX_SHIFT; - musb->int_tx = - (epintr & AM35X_TX_INTR_MASK) >> AM35X_INTR_TX_SHIFT; - } - - /* Get usb core interrupts */ - usbintr = musb_readl(reg_base, CORE_INTR_SRC_MASKED_REG); - if (!usbintr && !epintr) - goto eoi; - - if (usbintr) { - musb_writel(reg_base, CORE_INTR_SRC_CLEAR_REG, usbintr); - - musb->int_usb = - (usbintr & AM35X_INTR_USB_MASK) >> AM35X_INTR_USB_SHIFT; - } - /* - * DRVVBUS IRQs are the only proxy we have (a very poor one!) for - * AM35x's missing ID change IRQ. We need an ID change IRQ to - * switch appropriately between halves of the OTG state machine. - * Managing DEVCTL.SESSION per Mentor docs requires that we know its - * value but DEVCTL.BDEVICE is invalid without DEVCTL.SESSION set. - * Also, DRVVBUS pulses for SRP (but not at 5V) ... - */ - if (usbintr & (AM35X_INTR_DRVVBUS << AM35X_INTR_USB_SHIFT)) { - int drvvbus = musb_readl(reg_base, USB_STAT_REG); - void __iomem *mregs = musb->mregs; - u8 devctl = musb_readb(mregs, MUSB_DEVCTL); - int err; - - err = musb->int_usb & MUSB_INTR_VBUSERROR; - if (err) { - /* - * The Mentor core doesn't debounce VBUS as needed - * to cope with device connect current spikes. This - * means it's not uncommon for bus-powered devices - * to get VBUS errors during enumeration. - * - * This is a workaround, but newer RTL from Mentor - * seems to allow a better one: "re"-starting sessions - * without waiting for VBUS to stop registering in - * devctl. - */ - musb->int_usb &= ~MUSB_INTR_VBUSERROR; - musb->xceiv->otg->state = OTG_STATE_A_WAIT_VFALL; - mod_timer(&musb->dev_timer, jiffies + POLL_SECONDS * HZ); - WARNING("VBUS error workaround (delay coming)\n"); - } else if (drvvbus) { - MUSB_HST_MODE(musb); - musb->xceiv->otg->state = OTG_STATE_A_WAIT_VRISE; - portstate(musb->port1_status |= USB_PORT_STAT_POWER); - del_timer(&musb->dev_timer); - } else { - musb->is_active = 0; - MUSB_DEV_MODE(musb); - musb->xceiv->otg->state = OTG_STATE_B_IDLE; - portstate(musb->port1_status &= ~USB_PORT_STAT_POWER); - } - - /* NOTE: this must complete power-on within 100 ms. */ - dev_dbg(musb->controller, "VBUS %s (%s)%s, devctl %02x\n", - drvvbus ? "on" : "off", - usb_otg_state_string(musb->xceiv->otg->state), - err ? " ERROR" : "", - devctl); - ret = IRQ_HANDLED; - } - - /* Drop spurious RX and TX if device is disconnected */ - if (musb->int_usb & MUSB_INTR_DISCONNECT) { - musb->int_tx = 0; - musb->int_rx = 0; - } - - if (musb->int_tx || musb->int_rx || musb->int_usb) - ret |= musb_interrupt(musb); - -eoi: - /* EOI needs to be written for the IRQ to be re-asserted. */ - if (ret == IRQ_HANDLED || epintr || usbintr) { - /* clear level interrupt */ - if (data->clear_irq) - data->clear_irq(); - /* write EOI */ - musb_writel(reg_base, USB_END_OF_INTR_REG, 0); - } - - /* Poll for ID change */ - if (musb->xceiv->otg->state == OTG_STATE_B_IDLE) - mod_timer(&musb->dev_timer, jiffies + POLL_SECONDS * HZ); - - spin_unlock_irqrestore(&musb->lock, flags); - - return ret; -} - -static int am35x_musb_set_mode(struct musb *musb, u8 musb_mode) -{ - struct device *dev = musb->controller; - struct musb_hdrc_platform_data *plat = dev_get_platdata(dev); - struct omap_musb_board_data *data = plat->board_data; - int retval = 0; - - if (data->set_mode) - data->set_mode(musb_mode); - else - retval = -EIO; - - return retval; -} - -static int am35x_musb_init(struct musb *musb) -{ - struct device *dev = musb->controller; - struct musb_hdrc_platform_data *plat = dev_get_platdata(dev); - struct omap_musb_board_data *data = plat->board_data; - void __iomem *reg_base = musb->ctrl_base; - u32 rev; - - musb->mregs += USB_MENTOR_CORE_OFFSET; - - /* Returns zero if e.g. not clocked */ - rev = musb_readl(reg_base, USB_REVISION_REG); - if (!rev) - return -ENODEV; - - musb->xceiv = usb_get_phy(USB_PHY_TYPE_USB2); - if (IS_ERR_OR_NULL(musb->xceiv)) - return -EPROBE_DEFER; - - timer_setup(&musb->dev_timer, otg_timer, 0); - - /* Reset the musb */ - if (data->reset) - data->reset(); - - /* Reset the controller */ - musb_writel(reg_base, USB_CTRL_REG, AM35X_SOFT_RESET_MASK); - - /* Start the on-chip PHY and its PLL. */ - if (data->set_phy_power) - data->set_phy_power(1); - - msleep(5); - - musb->isr = am35x_musb_interrupt; - - /* clear level interrupt */ - if (data->clear_irq) - data->clear_irq(); - - return 0; -} - -static int am35x_musb_exit(struct musb *musb) -{ - struct device *dev = musb->controller; - struct musb_hdrc_platform_data *plat = dev_get_platdata(dev); - struct omap_musb_board_data *data = plat->board_data; - - del_timer_sync(&musb->dev_timer); - - /* Shutdown the on-chip PHY and its PLL. */ - if (data->set_phy_power) - data->set_phy_power(0); - - usb_put_phy(musb->xceiv); - - return 0; -} - -/* AM35x supports only 32bit read operation */ -static void am35x_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *dst) -{ - void __iomem *fifo = hw_ep->fifo; - u32 val; - int i; - - /* Read for 32bit-aligned destination address */ - if (likely((0x03 & (unsigned long) dst) == 0) && len >= 4) { - readsl(fifo, dst, len >> 2); - dst += len & ~0x03; - len &= 0x03; - } - /* - * Now read the remaining 1 to 3 byte or complete length if - * unaligned address. - */ - if (len > 4) { - for (i = 0; i < (len >> 2); i++) { - *(u32 *) dst = musb_readl(fifo, 0); - dst += 4; - } - len &= 0x03; - } - if (len > 0) { - val = musb_readl(fifo, 0); - memcpy(dst, &val, len); - } -} - -static const struct musb_platform_ops am35x_ops = { - .quirks = MUSB_DMA_INVENTRA | MUSB_INDEXED_EP, - .init = am35x_musb_init, - .exit = am35x_musb_exit, - - .read_fifo = am35x_read_fifo, -#ifdef CONFIG_USB_INVENTRA_DMA - .dma_init = musbhs_dma_controller_create, - .dma_exit = musbhs_dma_controller_destroy, -#endif - .enable = am35x_musb_enable, - .disable = am35x_musb_disable, - - .set_mode = am35x_musb_set_mode, - .try_idle = am35x_musb_try_idle, - - .set_vbus = am35x_musb_set_vbus, -}; - -static const struct platform_device_info am35x_dev_info = { - .name = "musb-hdrc", - .id = PLATFORM_DEVID_AUTO, - .dma_mask = DMA_BIT_MASK(32), -}; - -static int am35x_probe(struct platform_device *pdev) -{ - struct musb_hdrc_platform_data *pdata = dev_get_platdata(&pdev->dev); - struct platform_device *musb; - struct am35x_glue *glue; - struct platform_device_info pinfo; - struct clk *phy_clk; - struct clk *clk; - - int ret = -ENOMEM; - - glue = kzalloc(sizeof(*glue), GFP_KERNEL); - if (!glue) - goto err0; - - phy_clk = clk_get(&pdev->dev, "fck"); - if (IS_ERR(phy_clk)) { - dev_err(&pdev->dev, "failed to get PHY clock\n"); - ret = PTR_ERR(phy_clk); - goto err3; - } - - clk = clk_get(&pdev->dev, "ick"); - if (IS_ERR(clk)) { - dev_err(&pdev->dev, "failed to get clock\n"); - ret = PTR_ERR(clk); - goto err4; - } - - ret = clk_enable(phy_clk); - if (ret) { - dev_err(&pdev->dev, "failed to enable PHY clock\n"); - goto err5; - } - - ret = clk_enable(clk); - if (ret) { - dev_err(&pdev->dev, "failed to enable clock\n"); - goto err6; - } - - glue->dev = &pdev->dev; - glue->phy_clk = phy_clk; - glue->clk = clk; - - pdata->platform_ops = &am35x_ops; - - glue->phy = usb_phy_generic_register(); - if (IS_ERR(glue->phy)) { - ret = PTR_ERR(glue->phy); - goto err7; - } - platform_set_drvdata(pdev, glue); - - pinfo = am35x_dev_info; - pinfo.parent = &pdev->dev; - pinfo.res = pdev->resource; - pinfo.num_res = pdev->num_resources; - pinfo.data = pdata; - pinfo.size_data = sizeof(*pdata); - pinfo.fwnode = of_fwnode_handle(pdev->dev.of_node); - pinfo.of_node_reused = true; - - glue->musb = musb = platform_device_register_full(&pinfo); - if (IS_ERR(musb)) { - ret = PTR_ERR(musb); - dev_err(&pdev->dev, "failed to register musb device: %d\n", ret); - goto err8; - } - - return 0; - -err8: - usb_phy_generic_unregister(glue->phy); - -err7: - clk_disable(clk); - -err6: - clk_disable(phy_clk); - -err5: - clk_put(clk); - -err4: - clk_put(phy_clk); - -err3: - kfree(glue); - -err0: - return ret; -} - -static int am35x_remove(struct platform_device *pdev) -{ - struct am35x_glue *glue = platform_get_drvdata(pdev); - - platform_device_unregister(glue->musb); - usb_phy_generic_unregister(glue->phy); - clk_disable(glue->clk); - clk_disable(glue->phy_clk); - clk_put(glue->clk); - clk_put(glue->phy_clk); - kfree(glue); - - return 0; -} - -#ifdef CONFIG_PM_SLEEP -static int am35x_suspend(struct device *dev) -{ - struct am35x_glue *glue = dev_get_drvdata(dev); - struct musb_hdrc_platform_data *plat = dev_get_platdata(dev); - struct omap_musb_board_data *data = plat->board_data; - - /* Shutdown the on-chip PHY and its PLL. */ - if (data->set_phy_power) - data->set_phy_power(0); - - clk_disable(glue->phy_clk); - clk_disable(glue->clk); - - return 0; -} - -static int am35x_resume(struct device *dev) -{ - struct am35x_glue *glue = dev_get_drvdata(dev); - struct musb_hdrc_platform_data *plat = dev_get_platdata(dev); - struct omap_musb_board_data *data = plat->board_data; - int ret; - - /* Start the on-chip PHY and its PLL. */ - if (data->set_phy_power) - data->set_phy_power(1); - - ret = clk_enable(glue->phy_clk); - if (ret) { - dev_err(dev, "failed to enable PHY clock\n"); - return ret; - } - - ret = clk_enable(glue->clk); - if (ret) { - dev_err(dev, "failed to enable clock\n"); - return ret; - } - - return 0; -} -#endif - -static SIMPLE_DEV_PM_OPS(am35x_pm_ops, am35x_suspend, am35x_resume); - -static struct platform_driver am35x_driver = { - .probe = am35x_probe, - .remove = am35x_remove, - .driver = { - .name = "musb-am35x", - .pm = &am35x_pm_ops, - }, -}; - -MODULE_DESCRIPTION("AM35x MUSB Glue Layer"); -MODULE_AUTHOR("Ajay Kumar Gupta "); -MODULE_LICENSE("GPL v2"); -module_platform_driver(am35x_driver); diff --git a/drivers/usb/musb/cppi_dma.c b/drivers/usb/musb/cppi_dma.c deleted file mode 100644 index edb5b63d70634d631e9a473947ff910adc162296..0000000000000000000000000000000000000000 --- a/drivers/usb/musb/cppi_dma.c +++ /dev/null @@ -1,1547 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Copyright (C) 2005-2006 by Texas Instruments - * - * This file implements a DMA interface using TI's CPPI DMA. - * For now it's DaVinci-only, but CPPI isn't specific to DaVinci or USB. - * The TUSB6020, using VLYNQ, has CPPI that looks much like DaVinci. - */ - -#include -#include -#include -#include - -#include "musb_core.h" -#include "musb_debug.h" -#include "cppi_dma.h" -#include "davinci.h" - - -/* CPPI DMA status 7-mar-2006: - * - * - See musb_{host,gadget}.c for more info - * - * - Correct RX DMA generally forces the engine into irq-per-packet mode, - * which can easily saturate the CPU under non-mass-storage loads. - * - * NOTES 24-aug-2006 (2.6.18-rc4): - * - * - peripheral RXDMA wedged in a test with packets of length 512/512/1. - * evidently after the 1 byte packet was received and acked, the queue - * of BDs got garbaged so it wouldn't empty the fifo. (rxcsr 0x2003, - * and RX DMA0: 4 left, 80000000 8feff880, 8feff860 8feff860; 8f321401 - * 004001ff 00000001 .. 8feff860) Host was just getting NAKed on tx - * of its next (512 byte) packet. IRQ issues? - * - * REVISIT: the "transfer DMA" glue between CPPI and USB fifos will - * evidently also directly update the RX and TX CSRs ... so audit all - * host and peripheral side DMA code to avoid CSR access after DMA has - * been started. - */ - -/* REVISIT now we can avoid preallocating these descriptors; or - * more simply, switch to a global freelist not per-channel ones. - * Note: at full speed, 64 descriptors == 4K bulk data. - */ -#define NUM_TXCHAN_BD 64 -#define NUM_RXCHAN_BD 64 - -static inline void cpu_drain_writebuffer(void) -{ - wmb(); -#ifdef CONFIG_CPU_ARM926T - /* REVISIT this "should not be needed", - * but lack of it sure seemed to hurt ... - */ - asm("mcr p15, 0, r0, c7, c10, 4 @ drain write buffer\n"); -#endif -} - -static inline struct cppi_descriptor *cppi_bd_alloc(struct cppi_channel *c) -{ - struct cppi_descriptor *bd = c->freelist; - - if (bd) - c->freelist = bd->next; - return bd; -} - -static inline void -cppi_bd_free(struct cppi_channel *c, struct cppi_descriptor *bd) -{ - if (!bd) - return; - bd->next = c->freelist; - c->freelist = bd; -} - -/* - * Start DMA controller - * - * Initialize the DMA controller as necessary. - */ - -/* zero out entire rx state RAM entry for the channel */ -static void cppi_reset_rx(struct cppi_rx_stateram __iomem *rx) -{ - musb_writel(&rx->rx_skipbytes, 0, 0); - musb_writel(&rx->rx_head, 0, 0); - musb_writel(&rx->rx_sop, 0, 0); - musb_writel(&rx->rx_current, 0, 0); - musb_writel(&rx->rx_buf_current, 0, 0); - musb_writel(&rx->rx_len_len, 0, 0); - musb_writel(&rx->rx_cnt_cnt, 0, 0); -} - -/* zero out entire tx state RAM entry for the channel */ -static void cppi_reset_tx(struct cppi_tx_stateram __iomem *tx, u32 ptr) -{ - musb_writel(&tx->tx_head, 0, 0); - musb_writel(&tx->tx_buf, 0, 0); - musb_writel(&tx->tx_current, 0, 0); - musb_writel(&tx->tx_buf_current, 0, 0); - musb_writel(&tx->tx_info, 0, 0); - musb_writel(&tx->tx_rem_len, 0, 0); - /* musb_writel(&tx->tx_dummy, 0, 0); */ - musb_writel(&tx->tx_complete, 0, ptr); -} - -static void cppi_pool_init(struct cppi *cppi, struct cppi_channel *c) -{ - int j; - - /* initialize channel fields */ - c->head = NULL; - c->tail = NULL; - c->last_processed = NULL; - c->channel.status = MUSB_DMA_STATUS_UNKNOWN; - c->controller = cppi; - c->is_rndis = 0; - c->freelist = NULL; - - /* build the BD Free list for the channel */ - for (j = 0; j < NUM_TXCHAN_BD + 1; j++) { - struct cppi_descriptor *bd; - dma_addr_t dma; - - bd = dma_pool_alloc(cppi->pool, GFP_KERNEL, &dma); - bd->dma = dma; - cppi_bd_free(c, bd); - } -} - -static int cppi_channel_abort(struct dma_channel *); - -static void cppi_pool_free(struct cppi_channel *c) -{ - struct cppi *cppi = c->controller; - struct cppi_descriptor *bd; - - (void) cppi_channel_abort(&c->channel); - c->channel.status = MUSB_DMA_STATUS_UNKNOWN; - c->controller = NULL; - - /* free all its bds */ - bd = c->last_processed; - do { - if (bd) - dma_pool_free(cppi->pool, bd, bd->dma); - bd = cppi_bd_alloc(c); - } while (bd); - c->last_processed = NULL; -} - -static void cppi_controller_start(struct cppi *controller) -{ - void __iomem *tibase; - int i; - - /* do whatever is necessary to start controller */ - for (i = 0; i < ARRAY_SIZE(controller->tx); i++) { - controller->tx[i].transmit = true; - controller->tx[i].index = i; - } - for (i = 0; i < ARRAY_SIZE(controller->rx); i++) { - controller->rx[i].transmit = false; - controller->rx[i].index = i; - } - - /* setup BD list on a per channel basis */ - for (i = 0; i < ARRAY_SIZE(controller->tx); i++) - cppi_pool_init(controller, controller->tx + i); - for (i = 0; i < ARRAY_SIZE(controller->rx); i++) - cppi_pool_init(controller, controller->rx + i); - - tibase = controller->tibase; - INIT_LIST_HEAD(&controller->tx_complete); - - /* initialise tx/rx channel head pointers to zero */ - for (i = 0; i < ARRAY_SIZE(controller->tx); i++) { - struct cppi_channel *tx_ch = controller->tx + i; - struct cppi_tx_stateram __iomem *tx; - - INIT_LIST_HEAD(&tx_ch->tx_complete); - - tx = tibase + DAVINCI_TXCPPI_STATERAM_OFFSET(i); - tx_ch->state_ram = tx; - cppi_reset_tx(tx, 0); - } - for (i = 0; i < ARRAY_SIZE(controller->rx); i++) { - struct cppi_channel *rx_ch = controller->rx + i; - struct cppi_rx_stateram __iomem *rx; - - INIT_LIST_HEAD(&rx_ch->tx_complete); - - rx = tibase + DAVINCI_RXCPPI_STATERAM_OFFSET(i); - rx_ch->state_ram = rx; - cppi_reset_rx(rx); - } - - /* enable individual cppi channels */ - musb_writel(tibase, DAVINCI_TXCPPI_INTENAB_REG, - DAVINCI_DMA_ALL_CHANNELS_ENABLE); - musb_writel(tibase, DAVINCI_RXCPPI_INTENAB_REG, - DAVINCI_DMA_ALL_CHANNELS_ENABLE); - - /* enable tx/rx CPPI control */ - musb_writel(tibase, DAVINCI_TXCPPI_CTRL_REG, DAVINCI_DMA_CTRL_ENABLE); - musb_writel(tibase, DAVINCI_RXCPPI_CTRL_REG, DAVINCI_DMA_CTRL_ENABLE); - - /* disable RNDIS mode, also host rx RNDIS autorequest */ - musb_writel(tibase, DAVINCI_RNDIS_REG, 0); - musb_writel(tibase, DAVINCI_AUTOREQ_REG, 0); -} - -/* - * Stop DMA controller - * - * De-Init the DMA controller as necessary. - */ - -static void cppi_controller_stop(struct cppi *controller) -{ - void __iomem *tibase; - int i; - struct musb *musb; - - musb = controller->controller.musb; - - tibase = controller->tibase; - /* DISABLE INDIVIDUAL CHANNEL Interrupts */ - musb_writel(tibase, DAVINCI_TXCPPI_INTCLR_REG, - DAVINCI_DMA_ALL_CHANNELS_ENABLE); - musb_writel(tibase, DAVINCI_RXCPPI_INTCLR_REG, - DAVINCI_DMA_ALL_CHANNELS_ENABLE); - - musb_dbg(musb, "Tearing down RX and TX Channels"); - for (i = 0; i < ARRAY_SIZE(controller->tx); i++) { - /* FIXME restructure of txdma to use bds like rxdma */ - controller->tx[i].last_processed = NULL; - cppi_pool_free(controller->tx + i); - } - for (i = 0; i < ARRAY_SIZE(controller->rx); i++) - cppi_pool_free(controller->rx + i); - - /* in Tx Case proper teardown is supported. We resort to disabling - * Tx/Rx CPPI after cleanup of Tx channels. Before TX teardown is - * complete TX CPPI cannot be disabled. - */ - /*disable tx/rx cppi */ - musb_writel(tibase, DAVINCI_TXCPPI_CTRL_REG, DAVINCI_DMA_CTRL_DISABLE); - musb_writel(tibase, DAVINCI_RXCPPI_CTRL_REG, DAVINCI_DMA_CTRL_DISABLE); -} - -/* While dma channel is allocated, we only want the core irqs active - * for fault reports, otherwise we'd get irqs that we don't care about. - * Except for TX irqs, where dma done != fifo empty and reusable ... - * - * NOTE: docs don't say either way, but irq masking **enables** irqs. - * - * REVISIT same issue applies to pure PIO usage too, and non-cppi dma... - */ -static inline void core_rxirq_disable(void __iomem *tibase, unsigned epnum) -{ - musb_writel(tibase, DAVINCI_USB_INT_MASK_CLR_REG, 1 << (epnum + 8)); -} - -static inline void core_rxirq_enable(void __iomem *tibase, unsigned epnum) -{ - musb_writel(tibase, DAVINCI_USB_INT_MASK_SET_REG, 1 << (epnum + 8)); -} - - -/* - * Allocate a CPPI Channel for DMA. With CPPI, channels are bound to - * each transfer direction of a non-control endpoint, so allocating - * (and deallocating) is mostly a way to notice bad housekeeping on - * the software side. We assume the irqs are always active. - */ -static struct dma_channel * -cppi_channel_allocate(struct dma_controller *c, - struct musb_hw_ep *ep, u8 transmit) -{ - struct cppi *controller; - u8 index; - struct cppi_channel *cppi_ch; - void __iomem *tibase; - struct musb *musb; - - controller = container_of(c, struct cppi, controller); - tibase = controller->tibase; - musb = c->musb; - - /* ep0 doesn't use DMA; remember cppi indices are 0..N-1 */ - index = ep->epnum - 1; - - /* return the corresponding CPPI Channel Handle, and - * probably disable the non-CPPI irq until we need it. - */ - if (transmit) { - if (index >= ARRAY_SIZE(controller->tx)) { - musb_dbg(musb, "no %cX%d CPPI channel", 'T', index); - return NULL; - } - cppi_ch = controller->tx + index; - } else { - if (index >= ARRAY_SIZE(controller->rx)) { - musb_dbg(musb, "no %cX%d CPPI channel", 'R', index); - return NULL; - } - cppi_ch = controller->rx + index; - core_rxirq_disable(tibase, ep->epnum); - } - - /* REVISIT make this an error later once the same driver code works - * with the other DMA engine too - */ - if (cppi_ch->hw_ep) - musb_dbg(musb, "re-allocating DMA%d %cX channel %p", - index, transmit ? 'T' : 'R', cppi_ch); - cppi_ch->hw_ep = ep; - cppi_ch->channel.status = MUSB_DMA_STATUS_FREE; - cppi_ch->channel.max_len = 0x7fffffff; - - musb_dbg(musb, "Allocate CPPI%d %cX", index, transmit ? 'T' : 'R'); - return &cppi_ch->channel; -} - -/* Release a CPPI Channel. */ -static void cppi_channel_release(struct dma_channel *channel) -{ - struct cppi_channel *c; - void __iomem *tibase; - - /* REVISIT: for paranoia, check state and abort if needed... */ - - c = container_of(channel, struct cppi_channel, channel); - tibase = c->controller->tibase; - if (!c->hw_ep) - musb_dbg(c->controller->controller.musb, - "releasing idle DMA channel %p", c); - else if (!c->transmit) - core_rxirq_enable(tibase, c->index + 1); - - /* for now, leave its cppi IRQ enabled (we won't trigger it) */ - c->hw_ep = NULL; - channel->status = MUSB_DMA_STATUS_UNKNOWN; -} - -/* Context: controller irqlocked */ -static void -cppi_dump_rx(int level, struct cppi_channel *c, const char *tag) -{ - void __iomem *base = c->controller->mregs; - struct cppi_rx_stateram __iomem *rx = c->state_ram; - - musb_ep_select(base, c->index + 1); - - musb_dbg(c->controller->controller.musb, - "RX DMA%d%s: %d left, csr %04x, " - "%08x H%08x S%08x C%08x, " - "B%08x L%08x %08x .. %08x", - c->index, tag, - musb_readl(c->controller->tibase, - DAVINCI_RXCPPI_BUFCNT0_REG + 4 * c->index), - musb_readw(c->hw_ep->regs, MUSB_RXCSR), - - musb_readl(&rx->rx_skipbytes, 0), - musb_readl(&rx->rx_head, 0), - musb_readl(&rx->rx_sop, 0), - musb_readl(&rx->rx_current, 0), - - musb_readl(&rx->rx_buf_current, 0), - musb_readl(&rx->rx_len_len, 0), - musb_readl(&rx->rx_cnt_cnt, 0), - musb_readl(&rx->rx_complete, 0) - ); -} - -/* Context: controller irqlocked */ -static void -cppi_dump_tx(int level, struct cppi_channel *c, const char *tag) -{ - void __iomem *base = c->controller->mregs; - struct cppi_tx_stateram __iomem *tx = c->state_ram; - - musb_ep_select(base, c->index + 1); - - musb_dbg(c->controller->controller.musb, - "TX DMA%d%s: csr %04x, " - "H%08x S%08x C%08x %08x, " - "F%08x L%08x .. %08x", - c->index, tag, - musb_readw(c->hw_ep->regs, MUSB_TXCSR), - - musb_readl(&tx->tx_head, 0), - musb_readl(&tx->tx_buf, 0), - musb_readl(&tx->tx_current, 0), - musb_readl(&tx->tx_buf_current, 0), - - musb_readl(&tx->tx_info, 0), - musb_readl(&tx->tx_rem_len, 0), - /* dummy/unused word 6 */ - musb_readl(&tx->tx_complete, 0) - ); -} - -/* Context: controller irqlocked */ -static inline void -cppi_rndis_update(struct cppi_channel *c, int is_rx, - void __iomem *tibase, int is_rndis) -{ - /* we may need to change the rndis flag for this cppi channel */ - if (c->is_rndis != is_rndis) { - u32 value = musb_readl(tibase, DAVINCI_RNDIS_REG); - u32 temp = 1 << (c->index); - - if (is_rx) - temp <<= 16; - if (is_rndis) - value |= temp; - else - value &= ~temp; - musb_writel(tibase, DAVINCI_RNDIS_REG, value); - c->is_rndis = is_rndis; - } -} - -static void cppi_dump_rxbd(const char *tag, struct cppi_descriptor *bd) -{ - pr_debug("RXBD/%s %08x: " - "nxt %08x buf %08x off.blen %08x opt.plen %08x\n", - tag, bd->dma, - bd->hw_next, bd->hw_bufp, bd->hw_off_len, - bd->hw_options); -} - -static void cppi_dump_rxq(int level, const char *tag, struct cppi_channel *rx) -{ - struct cppi_descriptor *bd; - - cppi_dump_rx(level, rx, tag); - if (rx->last_processed) - cppi_dump_rxbd("last", rx->last_processed); - for (bd = rx->head; bd; bd = bd->next) - cppi_dump_rxbd("active", bd); -} - - -/* NOTE: DaVinci autoreq is ignored except for host side "RNDIS" mode RX; - * so we won't ever use it (see "CPPI RX Woes" below). - */ -static inline int cppi_autoreq_update(struct cppi_channel *rx, - void __iomem *tibase, int onepacket, unsigned n_bds) -{ - u32 val; - -#ifdef RNDIS_RX_IS_USABLE - u32 tmp; - /* assert(is_host_active(musb)) */ - - /* start from "AutoReq never" */ - tmp = musb_readl(tibase, DAVINCI_AUTOREQ_REG); - val = tmp & ~((0x3) << (rx->index * 2)); - - /* HCD arranged reqpkt for packet #1. we arrange int - * for all but the last one, maybe in two segments. - */ - if (!onepacket) { -#if 0 - /* use two segments, autoreq "all" then the last "never" */ - val |= ((0x3) << (rx->index * 2)); - n_bds--; -#else - /* one segment, autoreq "all-but-last" */ - val |= ((0x1) << (rx->index * 2)); -#endif - } - - if (val != tmp) { - int n = 100; - - /* make sure that autoreq is updated before continuing */ - musb_writel(tibase, DAVINCI_AUTOREQ_REG, val); - do { - tmp = musb_readl(tibase, DAVINCI_AUTOREQ_REG); - if (tmp == val) - break; - cpu_relax(); - } while (n-- > 0); - } -#endif - - /* REQPKT is turned off after each segment */ - if (n_bds && rx->channel.actual_len) { - void __iomem *regs = rx->hw_ep->regs; - - val = musb_readw(regs, MUSB_RXCSR); - if (!(val & MUSB_RXCSR_H_REQPKT)) { - val |= MUSB_RXCSR_H_REQPKT | MUSB_RXCSR_H_WZC_BITS; - musb_writew(regs, MUSB_RXCSR, val); - /* flush writebuffer */ - val = musb_readw(regs, MUSB_RXCSR); - } - } - return n_bds; -} - - -/* Buffer enqueuing Logic: - * - * - RX builds new queues each time, to help handle routine "early - * termination" cases (faults, including errors and short reads) - * more correctly. - * - * - for now, TX reuses the same queue of BDs every time - * - * REVISIT long term, we want a normal dynamic model. - * ... the goal will be to append to the - * existing queue, processing completed "dma buffers" (segments) on the fly. - * - * Otherwise we force an IRQ latency between requests, which slows us a lot - * (especially in "transparent" dma). Unfortunately that model seems to be - * inherent in the DMA model from the Mentor code, except in the rare case - * of transfers big enough (~128+ KB) that we could append "middle" segments - * in the TX paths. (RX can't do this, see below.) - * - * That's true even in the CPPI- friendly iso case, where most urbs have - * several small segments provided in a group and where the "packet at a time" - * "transparent" DMA model is always correct, even on the RX side. - */ - -/* - * CPPI TX: - * ======== - * TX is a lot more reasonable than RX; it doesn't need to run in - * irq-per-packet mode very often. RNDIS mode seems to behave too - * (except how it handles the exactly-N-packets case). Building a - * txdma queue with multiple requests (urb or usb_request) looks - * like it would work ... but fault handling would need much testing. - * - * The main issue with TX mode RNDIS relates to transfer lengths that - * are an exact multiple of the packet length. It appears that there's - * a hiccup in that case (maybe the DMA completes before the ZLP gets - * written?) boiling down to not being able to rely on CPPI writing any - * terminating zero length packet before the next transfer is written. - * So that's punted to PIO; better yet, gadget drivers can avoid it. - * - * Plus, there's allegedly an undocumented constraint that rndis transfer - * length be a multiple of 64 bytes ... but the chip doesn't act that - * way, and we really don't _want_ that behavior anyway. - * - * On TX, "transparent" mode works ... although experiments have shown - * problems trying to use the SOP/EOP bits in different USB packets. - * - * REVISIT try to handle terminating zero length packets using CPPI - * instead of doing it by PIO after an IRQ. (Meanwhile, make Ethernet - * links avoid that issue by forcing them to avoid zlps.) - */ -static void -cppi_next_tx_segment(struct musb *musb, struct cppi_channel *tx) -{ - unsigned maxpacket = tx->maxpacket; - dma_addr_t addr = tx->buf_dma + tx->offset; - size_t length = tx->buf_len - tx->offset; - struct cppi_descriptor *bd; - unsigned n_bds; - unsigned i; - struct cppi_tx_stateram __iomem *tx_ram = tx->state_ram; - int rndis; - - /* TX can use the CPPI "rndis" mode, where we can probably fit this - * transfer in one BD and one IRQ. The only time we would NOT want - * to use it is when hardware constraints prevent it, or if we'd - * trigger the "send a ZLP?" confusion. - */ - rndis = (maxpacket & 0x3f) == 0 - && length > maxpacket - && length < 0xffff - && (length % maxpacket) != 0; - - if (rndis) { - maxpacket = length; - n_bds = 1; - } else { - if (length) - n_bds = DIV_ROUND_UP(length, maxpacket); - else - n_bds = 1; - n_bds = min(n_bds, (unsigned) NUM_TXCHAN_BD); - length = min(n_bds * maxpacket, length); - } - - musb_dbg(musb, "TX DMA%d, pktSz %d %s bds %d dma 0x%llx len %u", - tx->index, - maxpacket, - rndis ? "rndis" : "transparent", - n_bds, - (unsigned long long)addr, length); - - cppi_rndis_update(tx, 0, musb->ctrl_base, rndis); - - /* assuming here that channel_program is called during - * transfer initiation ... current code maintains state - * for one outstanding request only (no queues, not even - * the implicit ones of an iso urb). - */ - - bd = tx->freelist; - tx->head = bd; - tx->last_processed = NULL; - - /* FIXME use BD pool like RX side does, and just queue - * the minimum number for this request. - */ - - /* Prepare queue of BDs first, then hand it to hardware. - * All BDs except maybe the last should be of full packet - * size; for RNDIS there _is_ only that last packet. - */ - for (i = 0; i < n_bds; ) { - if (++i < n_bds && bd->next) - bd->hw_next = bd->next->dma; - else - bd->hw_next = 0; - - bd->hw_bufp = tx->buf_dma + tx->offset; - - /* FIXME set EOP only on the last packet, - * SOP only on the first ... avoid IRQs - */ - if ((tx->offset + maxpacket) <= tx->buf_len) { - tx->offset += maxpacket; - bd->hw_off_len = maxpacket; - bd->hw_options = CPPI_SOP_SET | CPPI_EOP_SET - | CPPI_OWN_SET | maxpacket; - } else { - /* only this one may be a partial USB Packet */ - u32 partial_len; - - partial_len = tx->buf_len - tx->offset; - tx->offset = tx->buf_len; - bd->hw_off_len = partial_len; - - bd->hw_options = CPPI_SOP_SET | CPPI_EOP_SET - | CPPI_OWN_SET | partial_len; - if (partial_len == 0) - bd->hw_options |= CPPI_ZERO_SET; - } - - musb_dbg(musb, "TXBD %p: nxt %08x buf %08x len %04x opt %08x", - bd, bd->hw_next, bd->hw_bufp, - bd->hw_off_len, bd->hw_options); - - /* update the last BD enqueued to the list */ - tx->tail = bd; - bd = bd->next; - } - - /* BDs live in DMA-coherent memory, but writes might be pending */ - cpu_drain_writebuffer(); - - /* Write to the HeadPtr in state RAM to trigger */ - musb_writel(&tx_ram->tx_head, 0, (u32)tx->freelist->dma); - - cppi_dump_tx(5, tx, "/S"); -} - -/* - * CPPI RX Woes: - * ============= - * Consider a 1KB bulk RX buffer in two scenarios: (a) it's fed two 300 byte - * packets back-to-back, and (b) it's fed two 512 byte packets back-to-back. - * (Full speed transfers have similar scenarios.) - * - * The correct behavior for Linux is that (a) fills the buffer with 300 bytes, - * and the next packet goes into a buffer that's queued later; while (b) fills - * the buffer with 1024 bytes. How to do that with CPPI? - * - * - RX queues in "rndis" mode -- one single BD -- handle (a) correctly, but - * (b) loses **BADLY** because nothing (!) happens when that second packet - * fills the buffer, much less when a third one arrives. (Which makes this - * not a "true" RNDIS mode. In the RNDIS protocol short-packet termination - * is optional, and it's fine if peripherals -- not hosts! -- pad messages - * out to end-of-buffer. Standard PCI host controller DMA descriptors - * implement that mode by default ... which is no accident.) - * - * - RX queues in "transparent" mode -- two BDs with 512 bytes each -- have - * converse problems: (b) is handled right, but (a) loses badly. CPPI RX - * ignores SOP/EOP markings and processes both of those BDs; so both packets - * are loaded into the buffer (with a 212 byte gap between them), and the next - * buffer queued will NOT get its 300 bytes of data. (It seems like SOP/EOP - * are intended as outputs for RX queues, not inputs...) - * - * - A variant of "transparent" mode -- one BD at a time -- is the only way to - * reliably make both cases work, with software handling both cases correctly - * and at the significant penalty of needing an IRQ per packet. (The lack of - * I/O overlap can be slightly ameliorated by enabling double buffering.) - * - * So how to get rid of IRQ-per-packet? The transparent multi-BD case could - * be used in special cases like mass storage, which sets URB_SHORT_NOT_OK - * (or maybe its peripheral side counterpart) to flag (a) scenarios as errors - * with guaranteed driver level fault recovery and scrubbing out what's left - * of that garbaged datastream. - * - * But there seems to be no way to identify the cases where CPPI RNDIS mode - * is appropriate -- which do NOT include RNDIS host drivers, but do include - * the CDC Ethernet driver! -- and the documentation is incomplete/wrong. - * So we can't _ever_ use RX RNDIS mode ... except by using a heuristic - * that applies best on the peripheral side (and which could fail rudely). - * - * Leaving only "transparent" mode; we avoid multi-bd modes in almost all - * cases other than mass storage class. Otherwise we're correct but slow, - * since CPPI penalizes our need for a "true RNDIS" default mode. - */ - - -/* Heuristic, intended to kick in for ethernet/rndis peripheral ONLY - * - * IFF - * (a) peripheral mode ... since rndis peripherals could pad their - * writes to hosts, causing i/o failure; or we'd have to cope with - * a largely unknowable variety of host side protocol variants - * (b) and short reads are NOT errors ... since full reads would - * cause those same i/o failures - * (c) and read length is - * - less than 64KB (max per cppi descriptor) - * - not a multiple of 4096 (g_zero default, full reads typical) - * - N (>1) packets long, ditto (full reads not EXPECTED) - * THEN - * try rx rndis mode - * - * Cost of heuristic failing: RXDMA wedges at the end of transfers that - * fill out the whole buffer. Buggy host side usb network drivers could - * trigger that, but "in the field" such bugs seem to be all but unknown. - * - * So this module parameter lets the heuristic be disabled. When using - * gadgetfs, the heuristic will probably need to be disabled. - */ -static bool cppi_rx_rndis = 1; - -module_param(cppi_rx_rndis, bool, 0); -MODULE_PARM_DESC(cppi_rx_rndis, "enable/disable RX RNDIS heuristic"); - - -/** - * cppi_next_rx_segment - dma read for the next chunk of a buffer - * @musb: the controller - * @rx: dma channel - * @onepacket: true unless caller treats short reads as errors, and - * performs fault recovery above usbcore. - * Context: controller irqlocked - * - * See above notes about why we can't use multi-BD RX queues except in - * rare cases (mass storage class), and can never use the hardware "rndis" - * mode (since it's not a "true" RNDIS mode) with complete safety.. - * - * It's ESSENTIAL that callers specify "onepacket" mode unless they kick in - * code to recover from corrupted datastreams after each short transfer. - */ -static void -cppi_next_rx_segment(struct musb *musb, struct cppi_channel *rx, int onepacket) -{ - unsigned maxpacket = rx->maxpacket; - dma_addr_t addr = rx->buf_dma + rx->offset; - size_t length = rx->buf_len - rx->offset; - struct cppi_descriptor *bd, *tail; - unsigned n_bds; - unsigned i; - void __iomem *tibase = musb->ctrl_base; - int is_rndis = 0; - struct cppi_rx_stateram __iomem *rx_ram = rx->state_ram; - struct cppi_descriptor *d; - - if (onepacket) { - /* almost every USB driver, host or peripheral side */ - n_bds = 1; - - /* maybe apply the heuristic above */ - if (cppi_rx_rndis - && is_peripheral_active(musb) - && length > maxpacket - && (length & ~0xffff) == 0 - && (length & 0x0fff) != 0 - && (length & (maxpacket - 1)) == 0) { - maxpacket = length; - is_rndis = 1; - } - } else { - /* virtually nothing except mass storage class */ - if (length > 0xffff) { - n_bds = 0xffff / maxpacket; - length = n_bds * maxpacket; - } else { - n_bds = DIV_ROUND_UP(length, maxpacket); - } - if (n_bds == 1) - onepacket = 1; - else - n_bds = min(n_bds, (unsigned) NUM_RXCHAN_BD); - } - - /* In host mode, autorequest logic can generate some IN tokens; it's - * tricky since we can't leave REQPKT set in RXCSR after the transfer - * finishes. So: multipacket transfers involve two or more segments. - * And always at least two IRQs ... RNDIS mode is not an option. - */ - if (is_host_active(musb)) - n_bds = cppi_autoreq_update(rx, tibase, onepacket, n_bds); - - cppi_rndis_update(rx, 1, musb->ctrl_base, is_rndis); - - length = min(n_bds * maxpacket, length); - - musb_dbg(musb, "RX DMA%d seg, maxp %d %s bds %d (cnt %d) " - "dma 0x%llx len %u %u/%u", - rx->index, maxpacket, - onepacket - ? (is_rndis ? "rndis" : "onepacket") - : "multipacket", - n_bds, - musb_readl(tibase, - DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4)) - & 0xffff, - (unsigned long long)addr, length, - rx->channel.actual_len, rx->buf_len); - - /* only queue one segment at a time, since the hardware prevents - * correct queue shutdown after unexpected short packets - */ - bd = cppi_bd_alloc(rx); - rx->head = bd; - - /* Build BDs for all packets in this segment */ - for (i = 0, tail = NULL; bd && i < n_bds; i++, tail = bd) { - u32 bd_len; - - if (i) { - bd = cppi_bd_alloc(rx); - if (!bd) - break; - tail->next = bd; - tail->hw_next = bd->dma; - } - bd->hw_next = 0; - - /* all but the last packet will be maxpacket size */ - if (maxpacket < length) - bd_len = maxpacket; - else - bd_len = length; - - bd->hw_bufp = addr; - addr += bd_len; - rx->offset += bd_len; - - bd->hw_off_len = (0 /*offset*/ << 16) + bd_len; - bd->buflen = bd_len; - - bd->hw_options = CPPI_OWN_SET | (i == 0 ? length : 0); - length -= bd_len; - } - - /* we always expect at least one reusable BD! */ - if (!tail) { - WARNING("rx dma%d -- no BDs? need %d\n", rx->index, n_bds); - return; - } else if (i < n_bds) - WARNING("rx dma%d -- only %d of %d BDs\n", rx->index, i, n_bds); - - tail->next = NULL; - tail->hw_next = 0; - - bd = rx->head; - rx->tail = tail; - - /* short reads and other faults should terminate this entire - * dma segment. we want one "dma packet" per dma segment, not - * one per USB packet, terminating the whole queue at once... - * NOTE that current hardware seems to ignore SOP and EOP. - */ - bd->hw_options |= CPPI_SOP_SET; - tail->hw_options |= CPPI_EOP_SET; - - for (d = rx->head; d; d = d->next) - cppi_dump_rxbd("S", d); - - /* in case the preceding transfer left some state... */ - tail = rx->last_processed; - if (tail) { - tail->next = bd; - tail->hw_next = bd->dma; - } - - core_rxirq_enable(tibase, rx->index + 1); - - /* BDs live in DMA-coherent memory, but writes might be pending */ - cpu_drain_writebuffer(); - - /* REVISIT specs say to write this AFTER the BUFCNT register - * below ... but that loses badly. - */ - musb_writel(&rx_ram->rx_head, 0, bd->dma); - - /* bufferCount must be at least 3, and zeroes on completion - * unless it underflows below zero, or stops at two, or keeps - * growing ... grr. - */ - i = musb_readl(tibase, - DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4)) - & 0xffff; - - if (!i) - musb_writel(tibase, - DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4), - n_bds + 2); - else if (n_bds > (i - 3)) - musb_writel(tibase, - DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4), - n_bds - (i - 3)); - - i = musb_readl(tibase, - DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4)) - & 0xffff; - if (i < (2 + n_bds)) { - musb_dbg(musb, "bufcnt%d underrun - %d (for %d)", - rx->index, i, n_bds); - musb_writel(tibase, - DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4), - n_bds + 2); - } - - cppi_dump_rx(4, rx, "/S"); -} - -/** - * cppi_channel_program - program channel for data transfer - * @ch: the channel - * @maxpacket: max packet size - * @mode: For RX, 1 unless the usb protocol driver promised to treat - * all short reads as errors and kick in high level fault recovery. - * For TX, ignored because of RNDIS mode races/glitches. - * @dma_addr: dma address of buffer - * @len: length of buffer - * Context: controller irqlocked - */ -static int cppi_channel_program(struct dma_channel *ch, - u16 maxpacket, u8 mode, - dma_addr_t dma_addr, u32 len) -{ - struct cppi_channel *cppi_ch; - struct cppi *controller; - struct musb *musb; - - cppi_ch = container_of(ch, struct cppi_channel, channel); - controller = cppi_ch->controller; - musb = controller->controller.musb; - - switch (ch->status) { - case MUSB_DMA_STATUS_BUS_ABORT: - case MUSB_DMA_STATUS_CORE_ABORT: - /* fault irq handler should have handled cleanup */ - WARNING("%cX DMA%d not cleaned up after abort!\n", - cppi_ch->transmit ? 'T' : 'R', - cppi_ch->index); - /* WARN_ON(1); */ - break; - case MUSB_DMA_STATUS_BUSY: - WARNING("program active channel? %cX DMA%d\n", - cppi_ch->transmit ? 'T' : 'R', - cppi_ch->index); - /* WARN_ON(1); */ - break; - case MUSB_DMA_STATUS_UNKNOWN: - musb_dbg(musb, "%cX DMA%d not allocated!", - cppi_ch->transmit ? 'T' : 'R', - cppi_ch->index); - fallthrough; - case MUSB_DMA_STATUS_FREE: - break; - } - - ch->status = MUSB_DMA_STATUS_BUSY; - - /* set transfer parameters, then queue up its first segment */ - cppi_ch->buf_dma = dma_addr; - cppi_ch->offset = 0; - cppi_ch->maxpacket = maxpacket; - cppi_ch->buf_len = len; - cppi_ch->channel.actual_len = 0; - - /* TX channel? or RX? */ - if (cppi_ch->transmit) - cppi_next_tx_segment(musb, cppi_ch); - else - cppi_next_rx_segment(musb, cppi_ch, mode); - - return true; -} - -static bool cppi_rx_scan(struct cppi *cppi, unsigned ch) -{ - struct cppi_channel *rx = &cppi->rx[ch]; - struct cppi_rx_stateram __iomem *state = rx->state_ram; - struct cppi_descriptor *bd; - struct cppi_descriptor *last = rx->last_processed; - bool completed = false; - bool acked = false; - int i; - dma_addr_t safe2ack; - void __iomem *regs = rx->hw_ep->regs; - struct musb *musb = cppi->controller.musb; - - cppi_dump_rx(6, rx, "/K"); - - bd = last ? last->next : rx->head; - if (!bd) - return false; - - /* run through all completed BDs */ - for (i = 0, safe2ack = musb_readl(&state->rx_complete, 0); - (safe2ack || completed) && bd && i < NUM_RXCHAN_BD; - i++, bd = bd->next) { - u16 len; - - /* catch latest BD writes from CPPI */ - rmb(); - if (!completed && (bd->hw_options & CPPI_OWN_SET)) - break; - - musb_dbg(musb, "C/RXBD %llx: nxt %08x buf %08x " - "off.len %08x opt.len %08x (%d)", - (unsigned long long)bd->dma, bd->hw_next, bd->hw_bufp, - bd->hw_off_len, bd->hw_options, - rx->channel.actual_len); - - /* actual packet received length */ - if ((bd->hw_options & CPPI_SOP_SET) && !completed) - len = bd->hw_off_len & CPPI_RECV_PKTLEN_MASK; - else - len = 0; - - if (bd->hw_options & CPPI_EOQ_MASK) - completed = true; - - if (!completed && len < bd->buflen) { - /* NOTE: when we get a short packet, RXCSR_H_REQPKT - * must have been cleared, and no more DMA packets may - * active be in the queue... TI docs didn't say, but - * CPPI ignores those BDs even though OWN is still set. - */ - completed = true; - musb_dbg(musb, "rx short %d/%d (%d)", - len, bd->buflen, - rx->channel.actual_len); - } - - /* If we got here, we expect to ack at least one BD; meanwhile - * CPPI may completing other BDs while we scan this list... - * - * RACE: we can notice OWN cleared before CPPI raises the - * matching irq by writing that BD as the completion pointer. - * In such cases, stop scanning and wait for the irq, avoiding - * lost acks and states where BD ownership is unclear. - */ - if (bd->dma == safe2ack) { - musb_writel(&state->rx_complete, 0, safe2ack); - safe2ack = musb_readl(&state->rx_complete, 0); - acked = true; - if (bd->dma == safe2ack) - safe2ack = 0; - } - - rx->channel.actual_len += len; - - cppi_bd_free(rx, last); - last = bd; - - /* stop scanning on end-of-segment */ - if (bd->hw_next == 0) - completed = true; - } - rx->last_processed = last; - - /* dma abort, lost ack, or ... */ - if (!acked && last) { - int csr; - - if (safe2ack == 0 || safe2ack == rx->last_processed->dma) - musb_writel(&state->rx_complete, 0, safe2ack); - if (safe2ack == 0) { - cppi_bd_free(rx, last); - rx->last_processed = NULL; - - /* if we land here on the host side, H_REQPKT will - * be clear and we need to restart the queue... - */ - WARN_ON(rx->head); - } - musb_ep_select(cppi->mregs, rx->index + 1); - csr = musb_readw(regs, MUSB_RXCSR); - if (csr & MUSB_RXCSR_DMAENAB) { - musb_dbg(musb, "list%d %p/%p, last %llx%s, csr %04x", - rx->index, - rx->head, rx->tail, - rx->last_processed - ? (unsigned long long) - rx->last_processed->dma - : 0, - completed ? ", completed" : "", - csr); - cppi_dump_rxq(4, "/what?", rx); - } - } - if (!completed) { - int csr; - - rx->head = bd; - - /* REVISIT seems like "autoreq all but EOP" doesn't... - * setting it here "should" be racey, but seems to work - */ - csr = musb_readw(rx->hw_ep->regs, MUSB_RXCSR); - if (is_host_active(cppi->controller.musb) - && bd - && !(csr & MUSB_RXCSR_H_REQPKT)) { - csr |= MUSB_RXCSR_H_REQPKT; - musb_writew(regs, MUSB_RXCSR, - MUSB_RXCSR_H_WZC_BITS | csr); - csr = musb_readw(rx->hw_ep->regs, MUSB_RXCSR); - } - } else { - rx->head = NULL; - rx->tail = NULL; - } - - cppi_dump_rx(6, rx, completed ? "/completed" : "/cleaned"); - return completed; -} - -irqreturn_t cppi_interrupt(int irq, void *dev_id) -{ - struct musb *musb = dev_id; - struct cppi *cppi; - void __iomem *tibase; - struct musb_hw_ep *hw_ep = NULL; - u32 rx, tx; - int i, index; - unsigned long flags; - - cppi = container_of(musb->dma_controller, struct cppi, controller); - if (cppi->irq) - spin_lock_irqsave(&musb->lock, flags); - - tibase = musb->ctrl_base; - - tx = musb_readl(tibase, DAVINCI_TXCPPI_MASKED_REG); - rx = musb_readl(tibase, DAVINCI_RXCPPI_MASKED_REG); - - if (!tx && !rx) { - if (cppi->irq) - spin_unlock_irqrestore(&musb->lock, flags); - return IRQ_NONE; - } - - musb_dbg(musb, "CPPI IRQ Tx%x Rx%x", tx, rx); - - /* process TX channels */ - for (index = 0; tx; tx = tx >> 1, index++) { - struct cppi_channel *tx_ch; - struct cppi_tx_stateram __iomem *tx_ram; - bool completed = false; - struct cppi_descriptor *bd; - - if (!(tx & 1)) - continue; - - tx_ch = cppi->tx + index; - tx_ram = tx_ch->state_ram; - - /* FIXME need a cppi_tx_scan() routine, which - * can also be called from abort code - */ - - cppi_dump_tx(5, tx_ch, "/E"); - - bd = tx_ch->head; - - /* - * If Head is null then this could mean that a abort interrupt - * that needs to be acknowledged. - */ - if (NULL == bd) { - musb_dbg(musb, "null BD"); - musb_writel(&tx_ram->tx_complete, 0, 0); - continue; - } - - /* run through all completed BDs */ - for (i = 0; !completed && bd && i < NUM_TXCHAN_BD; - i++, bd = bd->next) { - u16 len; - - /* catch latest BD writes from CPPI */ - rmb(); - if (bd->hw_options & CPPI_OWN_SET) - break; - - musb_dbg(musb, "C/TXBD %p n %x b %x off %x opt %x", - bd, bd->hw_next, bd->hw_bufp, - bd->hw_off_len, bd->hw_options); - - len = bd->hw_off_len & CPPI_BUFFER_LEN_MASK; - tx_ch->channel.actual_len += len; - - tx_ch->last_processed = bd; - - /* write completion register to acknowledge - * processing of completed BDs, and possibly - * release the IRQ; EOQ might not be set ... - * - * REVISIT use the same ack strategy as rx - * - * REVISIT have observed bit 18 set; huh?? - */ - /* if ((bd->hw_options & CPPI_EOQ_MASK)) */ - musb_writel(&tx_ram->tx_complete, 0, bd->dma); - - /* stop scanning on end-of-segment */ - if (bd->hw_next == 0) - completed = true; - } - - /* on end of segment, maybe go to next one */ - if (completed) { - /* cppi_dump_tx(4, tx_ch, "/complete"); */ - - /* transfer more, or report completion */ - if (tx_ch->offset >= tx_ch->buf_len) { - tx_ch->head = NULL; - tx_ch->tail = NULL; - tx_ch->channel.status = MUSB_DMA_STATUS_FREE; - - hw_ep = tx_ch->hw_ep; - - musb_dma_completion(musb, index + 1, 1); - - } else { - /* Bigger transfer than we could fit in - * that first batch of descriptors... - */ - cppi_next_tx_segment(musb, tx_ch); - } - } else - tx_ch->head = bd; - } - - /* Start processing the RX block */ - for (index = 0; rx; rx = rx >> 1, index++) { - - if (rx & 1) { - struct cppi_channel *rx_ch; - - rx_ch = cppi->rx + index; - - /* let incomplete dma segments finish */ - if (!cppi_rx_scan(cppi, index)) - continue; - - /* start another dma segment if needed */ - if (rx_ch->channel.actual_len != rx_ch->buf_len - && rx_ch->channel.actual_len - == rx_ch->offset) { - cppi_next_rx_segment(musb, rx_ch, 1); - continue; - } - - /* all segments completed! */ - rx_ch->channel.status = MUSB_DMA_STATUS_FREE; - - hw_ep = rx_ch->hw_ep; - - core_rxirq_disable(tibase, index + 1); - musb_dma_completion(musb, index + 1, 0); - } - } - - /* write to CPPI EOI register to re-enable interrupts */ - musb_writel(tibase, DAVINCI_CPPI_EOI_REG, 0); - - if (cppi->irq) - spin_unlock_irqrestore(&musb->lock, flags); - - return IRQ_HANDLED; -} -EXPORT_SYMBOL_GPL(cppi_interrupt); - -/* Instantiate a software object representing a DMA controller. */ -struct dma_controller * -cppi_dma_controller_create(struct musb *musb, void __iomem *mregs) -{ - struct cppi *controller; - struct device *dev = musb->controller; - struct platform_device *pdev = to_platform_device(dev); - int irq = platform_get_irq_byname(pdev, "dma"); - - controller = kzalloc(sizeof *controller, GFP_KERNEL); - if (!controller) - return NULL; - - controller->mregs = mregs; - controller->tibase = mregs - DAVINCI_BASE_OFFSET; - - controller->controller.musb = musb; - controller->controller.channel_alloc = cppi_channel_allocate; - controller->controller.channel_release = cppi_channel_release; - controller->controller.channel_program = cppi_channel_program; - controller->controller.channel_abort = cppi_channel_abort; - - /* NOTE: allocating from on-chip SRAM would give the least - * contention for memory access, if that ever matters here. - */ - - /* setup BufferPool */ - controller->pool = dma_pool_create("cppi", - controller->controller.musb->controller, - sizeof(struct cppi_descriptor), - CPPI_DESCRIPTOR_ALIGN, 0); - if (!controller->pool) { - kfree(controller); - return NULL; - } - - if (irq > 0) { - if (request_irq(irq, cppi_interrupt, 0, "cppi-dma", musb)) { - dev_err(dev, "request_irq %d failed!\n", irq); - musb_dma_controller_destroy(&controller->controller); - return NULL; - } - controller->irq = irq; - } - - cppi_controller_start(controller); - return &controller->controller; -} -EXPORT_SYMBOL_GPL(cppi_dma_controller_create); - -/* - * Destroy a previously-instantiated DMA controller. - */ -void cppi_dma_controller_destroy(struct dma_controller *c) -{ - struct cppi *cppi; - - cppi = container_of(c, struct cppi, controller); - - cppi_controller_stop(cppi); - - if (cppi->irq) - free_irq(cppi->irq, cppi->controller.musb); - - /* assert: caller stopped the controller first */ - dma_pool_destroy(cppi->pool); - - kfree(cppi); -} -EXPORT_SYMBOL_GPL(cppi_dma_controller_destroy); - -/* - * Context: controller irqlocked, endpoint selected - */ -static int cppi_channel_abort(struct dma_channel *channel) -{ - struct cppi_channel *cppi_ch; - struct cppi *controller; - void __iomem *mbase; - void __iomem *tibase; - void __iomem *regs; - u32 value; - struct cppi_descriptor *queue; - - cppi_ch = container_of(channel, struct cppi_channel, channel); - - controller = cppi_ch->controller; - - switch (channel->status) { - case MUSB_DMA_STATUS_BUS_ABORT: - case MUSB_DMA_STATUS_CORE_ABORT: - /* from RX or TX fault irq handler */ - case MUSB_DMA_STATUS_BUSY: - /* the hardware needs shutting down */ - regs = cppi_ch->hw_ep->regs; - break; - case MUSB_DMA_STATUS_UNKNOWN: - case MUSB_DMA_STATUS_FREE: - return 0; - default: - return -EINVAL; - } - - if (!cppi_ch->transmit && cppi_ch->head) - cppi_dump_rxq(3, "/abort", cppi_ch); - - mbase = controller->mregs; - tibase = controller->tibase; - - queue = cppi_ch->head; - cppi_ch->head = NULL; - cppi_ch->tail = NULL; - - /* REVISIT should rely on caller having done this, - * and caller should rely on us not changing it. - * peripheral code is safe ... check host too. - */ - musb_ep_select(mbase, cppi_ch->index + 1); - - if (cppi_ch->transmit) { - struct cppi_tx_stateram __iomem *tx_ram; - /* REVISIT put timeouts on these controller handshakes */ - - cppi_dump_tx(6, cppi_ch, " (teardown)"); - - /* teardown DMA engine then usb core */ - do { - value = musb_readl(tibase, DAVINCI_TXCPPI_TEAR_REG); - } while (!(value & CPPI_TEAR_READY)); - musb_writel(tibase, DAVINCI_TXCPPI_TEAR_REG, cppi_ch->index); - - tx_ram = cppi_ch->state_ram; - do { - value = musb_readl(&tx_ram->tx_complete, 0); - } while (0xFFFFFFFC != value); - - /* FIXME clean up the transfer state ... here? - * the completion routine should get called with - * an appropriate status code. - */ - - value = musb_readw(regs, MUSB_TXCSR); - value &= ~MUSB_TXCSR_DMAENAB; - value |= MUSB_TXCSR_FLUSHFIFO; - musb_writew(regs, MUSB_TXCSR, value); - musb_writew(regs, MUSB_TXCSR, value); - - /* - * 1. Write to completion Ptr value 0x1(bit 0 set) - * (write back mode) - * 2. Wait for abort interrupt and then put the channel in - * compare mode by writing 1 to the tx_complete register. - */ - cppi_reset_tx(tx_ram, 1); - cppi_ch->head = NULL; - musb_writel(&tx_ram->tx_complete, 0, 1); - cppi_dump_tx(5, cppi_ch, " (done teardown)"); - - /* REVISIT tx side _should_ clean up the same way - * as the RX side ... this does no cleanup at all! - */ - - } else /* RX */ { - u16 csr; - - /* NOTE: docs don't guarantee any of this works ... we - * expect that if the usb core stops telling the cppi core - * to pull more data from it, then it'll be safe to flush - * current RX DMA state iff any pending fifo transfer is done. - */ - - core_rxirq_disable(tibase, cppi_ch->index + 1); - - /* for host, ensure ReqPkt is never set again */ - if (is_host_active(cppi_ch->controller->controller.musb)) { - value = musb_readl(tibase, DAVINCI_AUTOREQ_REG); - value &= ~((0x3) << (cppi_ch->index * 2)); - musb_writel(tibase, DAVINCI_AUTOREQ_REG, value); - } - - csr = musb_readw(regs, MUSB_RXCSR); - - /* for host, clear (just) ReqPkt at end of current packet(s) */ - if (is_host_active(cppi_ch->controller->controller.musb)) { - csr |= MUSB_RXCSR_H_WZC_BITS; - csr &= ~MUSB_RXCSR_H_REQPKT; - } else - csr |= MUSB_RXCSR_P_WZC_BITS; - - /* clear dma enable */ - csr &= ~(MUSB_RXCSR_DMAENAB); - musb_writew(regs, MUSB_RXCSR, csr); - csr = musb_readw(regs, MUSB_RXCSR); - - /* Quiesce: wait for current dma to finish (if not cleanup). - * We can't use bit zero of stateram->rx_sop, since that - * refers to an entire "DMA packet" not just emptying the - * current fifo. Most segments need multiple usb packets. - */ - if (channel->status == MUSB_DMA_STATUS_BUSY) - udelay(50); - - /* scan the current list, reporting any data that was - * transferred and acking any IRQ - */ - cppi_rx_scan(controller, cppi_ch->index); - - /* clobber the existing state once it's idle - * - * NOTE: arguably, we should also wait for all the other - * RX channels to quiesce (how??) and then temporarily - * disable RXCPPI_CTRL_REG ... but it seems that we can - * rely on the controller restarting from state ram, with - * only RXCPPI_BUFCNT state being bogus. BUFCNT will - * correct itself after the next DMA transfer though. - * - * REVISIT does using rndis mode change that? - */ - cppi_reset_rx(cppi_ch->state_ram); - - /* next DMA request _should_ load cppi head ptr */ - - /* ... we don't "free" that list, only mutate it in place. */ - cppi_dump_rx(5, cppi_ch, " (done abort)"); - - /* clean up previously pending bds */ - cppi_bd_free(cppi_ch, cppi_ch->last_processed); - cppi_ch->last_processed = NULL; - - while (queue) { - struct cppi_descriptor *tmp = queue->next; - - cppi_bd_free(cppi_ch, queue); - queue = tmp; - } - } - - channel->status = MUSB_DMA_STATUS_FREE; - cppi_ch->buf_dma = 0; - cppi_ch->offset = 0; - cppi_ch->buf_len = 0; - cppi_ch->maxpacket = 0; - return 0; -} - -/* TBD Queries: - * - * Power Management ... probably turn off cppi during suspend, restart; - * check state ram? Clocking is presumably shared with usb core. - */ diff --git a/drivers/usb/musb/davinci.c b/drivers/usb/musb/davinci.c deleted file mode 100644 index 704435526394383d1d73b84cfc642e85e589f440..0000000000000000000000000000000000000000 --- a/drivers/usb/musb/davinci.c +++ /dev/null @@ -1,606 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Copyright (C) 2005-2006 by Texas Instruments - * - * This file is part of the Inventra Controller Driver for Linux. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include - -#include - -#include "musb_core.h" - -#include "davinci.h" -#include "cppi_dma.h" - - -#define USB_PHY_CTRL IO_ADDRESS(USBPHY_CTL_PADDR) -#define DM355_DEEPSLEEP IO_ADDRESS(DM355_DEEPSLEEP_PADDR) - -struct davinci_glue { - struct device *dev; - struct platform_device *musb; - struct clk *clk; - bool vbus_state; - struct gpio_desc *vbus; - struct work_struct vbus_work; -}; - -/* REVISIT (PM) we should be able to keep the PHY in low power mode most - * of the time (24 MHZ oscillator and PLL off, etc) by setting POWER.D0 - * and, when in host mode, autosuspending idle root ports... PHYPLLON - * (overriding SUSPENDM?) then likely needs to stay off. - */ - -static inline void phy_on(void) -{ - u32 phy_ctrl = __raw_readl(USB_PHY_CTRL); - - /* power everything up; start the on-chip PHY and its PLL */ - phy_ctrl &= ~(USBPHY_OSCPDWN | USBPHY_OTGPDWN | USBPHY_PHYPDWN); - phy_ctrl |= USBPHY_SESNDEN | USBPHY_VBDTCTEN | USBPHY_PHYPLLON; - __raw_writel(phy_ctrl, USB_PHY_CTRL); - - /* wait for PLL to lock before proceeding */ - while ((__raw_readl(USB_PHY_CTRL) & USBPHY_PHYCLKGD) == 0) - cpu_relax(); -} - -static inline void phy_off(void) -{ - u32 phy_ctrl = __raw_readl(USB_PHY_CTRL); - - /* powerdown the on-chip PHY, its PLL, and the OTG block */ - phy_ctrl &= ~(USBPHY_SESNDEN | USBPHY_VBDTCTEN | USBPHY_PHYPLLON); - phy_ctrl |= USBPHY_OSCPDWN | USBPHY_OTGPDWN | USBPHY_PHYPDWN; - __raw_writel(phy_ctrl, USB_PHY_CTRL); -} - -static int dma_off = 1; - -static void davinci_musb_enable(struct musb *musb) -{ - u32 tmp, old, val; - - /* workaround: setup irqs through both register sets */ - tmp = (musb->epmask & DAVINCI_USB_TX_ENDPTS_MASK) - << DAVINCI_USB_TXINT_SHIFT; - musb_writel(musb->ctrl_base, DAVINCI_USB_INT_MASK_SET_REG, tmp); - old = tmp; - tmp = (musb->epmask & (0xfffe & DAVINCI_USB_RX_ENDPTS_MASK)) - << DAVINCI_USB_RXINT_SHIFT; - musb_writel(musb->ctrl_base, DAVINCI_USB_INT_MASK_SET_REG, tmp); - tmp |= old; - - val = ~MUSB_INTR_SOF; - tmp |= ((val & 0x01ff) << DAVINCI_USB_USBINT_SHIFT); - musb_writel(musb->ctrl_base, DAVINCI_USB_INT_MASK_SET_REG, tmp); - - if (is_dma_capable() && !dma_off) - printk(KERN_WARNING "%s %s: dma not reactivated\n", - __FILE__, __func__); - else - dma_off = 0; - - /* force a DRVVBUS irq so we can start polling for ID change */ - musb_writel(musb->ctrl_base, DAVINCI_USB_INT_SET_REG, - DAVINCI_INTR_DRVVBUS << DAVINCI_USB_USBINT_SHIFT); -} - -/* - * Disable the HDRC and flush interrupts - */ -static void davinci_musb_disable(struct musb *musb) -{ - /* because we don't set CTRLR.UINT, "important" to: - * - not read/write INTRUSB/INTRUSBE - * - (except during initial setup, as workaround) - * - use INTSETR/INTCLRR instead - */ - musb_writel(musb->ctrl_base, DAVINCI_USB_INT_MASK_CLR_REG, - DAVINCI_USB_USBINT_MASK - | DAVINCI_USB_TXINT_MASK - | DAVINCI_USB_RXINT_MASK); - musb_writel(musb->ctrl_base, DAVINCI_USB_EOI_REG, 0); - - if (is_dma_capable() && !dma_off) - WARNING("dma still active\n"); -} - - -#define portstate(stmt) stmt - -/* - * VBUS SWITCHING IS BOARD-SPECIFIC ... at least for the DM6446 EVM, - * which doesn't wire DRVVBUS to the FET that switches it. Unclear - * if that's a problem with the DM6446 chip or just with that board. - * - * In either case, the DM355 EVM automates DRVVBUS the normal way, - * when J10 is out, and TI documents it as handling OTG. - */ - -/* I2C operations are always synchronous, and require a task context. - * With unloaded systems, using the shared workqueue seems to suffice - * to satisfy the 100msec A_WAIT_VRISE timeout... - */ -static void evm_deferred_drvvbus(struct work_struct *work) -{ - struct davinci_glue *glue = container_of(work, struct davinci_glue, - vbus_work); - - gpiod_set_value_cansleep(glue->vbus, glue->vbus_state); - glue->vbus_state = !glue->vbus_state; -} - -static void davinci_musb_source_power(struct musb *musb, int is_on, - int immediate) -{ - struct davinci_glue *glue = dev_get_drvdata(musb->controller->parent); - - /* This GPIO handling is entirely optional */ - if (!glue->vbus) - return; - - if (is_on) - is_on = 1; - - if (glue->vbus_state == is_on) - return; - /* 0/1 vs "-1 == unknown/init" */ - glue->vbus_state = !is_on; - - if (machine_is_davinci_evm()) { - if (immediate) - gpiod_set_value_cansleep(glue->vbus, glue->vbus_state); - else - schedule_work(&glue->vbus_work); - } - if (immediate) - glue->vbus_state = is_on; -} - -static void davinci_musb_set_vbus(struct musb *musb, int is_on) -{ - WARN_ON(is_on && is_peripheral_active(musb)); - davinci_musb_source_power(musb, is_on, 0); -} - - -#define POLL_SECONDS 2 - -static void otg_timer(struct timer_list *t) -{ - struct musb *musb = from_timer(musb, t, dev_timer); - void __iomem *mregs = musb->mregs; - u8 devctl; - unsigned long flags; - - /* We poll because DaVinci's won't expose several OTG-critical - * status change events (from the transceiver) otherwise. - */ - devctl = musb_readb(mregs, MUSB_DEVCTL); - dev_dbg(musb->controller, "poll devctl %02x (%s)\n", devctl, - usb_otg_state_string(musb->xceiv->otg->state)); - - spin_lock_irqsave(&musb->lock, flags); - switch (musb->xceiv->otg->state) { - case OTG_STATE_A_WAIT_VFALL: - /* Wait till VBUS falls below SessionEnd (~0.2V); the 1.3 RTL - * seems to mis-handle session "start" otherwise (or in our - * case "recover"), in routine "VBUS was valid by the time - * VBUSERR got reported during enumeration" cases. - */ - if (devctl & MUSB_DEVCTL_VBUS) { - mod_timer(&musb->dev_timer, jiffies + POLL_SECONDS * HZ); - break; - } - musb->xceiv->otg->state = OTG_STATE_A_WAIT_VRISE; - musb_writel(musb->ctrl_base, DAVINCI_USB_INT_SET_REG, - MUSB_INTR_VBUSERROR << DAVINCI_USB_USBINT_SHIFT); - break; - case OTG_STATE_B_IDLE: - /* - * There's no ID-changed IRQ, so we have no good way to tell - * when to switch to the A-Default state machine (by setting - * the DEVCTL.SESSION flag). - * - * Workaround: whenever we're in B_IDLE, try setting the - * session flag every few seconds. If it works, ID was - * grounded and we're now in the A-Default state machine. - * - * NOTE setting the session flag is _supposed_ to trigger - * SRP, but clearly it doesn't. - */ - musb_writeb(mregs, MUSB_DEVCTL, - devctl | MUSB_DEVCTL_SESSION); - devctl = musb_readb(mregs, MUSB_DEVCTL); - if (devctl & MUSB_DEVCTL_BDEVICE) - mod_timer(&musb->dev_timer, jiffies + POLL_SECONDS * HZ); - else - musb->xceiv->otg->state = OTG_STATE_A_IDLE; - break; - default: - break; - } - spin_unlock_irqrestore(&musb->lock, flags); -} - -static irqreturn_t davinci_musb_interrupt(int irq, void *__hci) -{ - unsigned long flags; - irqreturn_t retval = IRQ_NONE; - struct musb *musb = __hci; - struct usb_otg *otg = musb->xceiv->otg; - void __iomem *tibase = musb->ctrl_base; - struct cppi *cppi; - u32 tmp; - - spin_lock_irqsave(&musb->lock, flags); - - /* NOTE: DaVinci shadows the Mentor IRQs. Don't manage them through - * the Mentor registers (except for setup), use the TI ones and EOI. - * - * Docs describe irq "vector" registers associated with the CPPI and - * USB EOI registers. These hold a bitmask corresponding to the - * current IRQ, not an irq handler address. Would using those bits - * resolve some of the races observed in this dispatch code?? - */ - - /* CPPI interrupts share the same IRQ line, but have their own - * mask, state, "vector", and EOI registers. - */ - cppi = container_of(musb->dma_controller, struct cppi, controller); - if (is_cppi_enabled(musb) && musb->dma_controller && !cppi->irq) - retval = cppi_interrupt(irq, __hci); - - /* ack and handle non-CPPI interrupts */ - tmp = musb_readl(tibase, DAVINCI_USB_INT_SRC_MASKED_REG); - musb_writel(tibase, DAVINCI_USB_INT_SRC_CLR_REG, tmp); - dev_dbg(musb->controller, "IRQ %08x\n", tmp); - - musb->int_rx = (tmp & DAVINCI_USB_RXINT_MASK) - >> DAVINCI_USB_RXINT_SHIFT; - musb->int_tx = (tmp & DAVINCI_USB_TXINT_MASK) - >> DAVINCI_USB_TXINT_SHIFT; - musb->int_usb = (tmp & DAVINCI_USB_USBINT_MASK) - >> DAVINCI_USB_USBINT_SHIFT; - - /* DRVVBUS irqs are the only proxy we have (a very poor one!) for - * DaVinci's missing ID change IRQ. We need an ID change IRQ to - * switch appropriately between halves of the OTG state machine. - * Managing DEVCTL.SESSION per Mentor docs requires we know its - * value, but DEVCTL.BDEVICE is invalid without DEVCTL.SESSION set. - * Also, DRVVBUS pulses for SRP (but not at 5V) ... - */ - if (tmp & (DAVINCI_INTR_DRVVBUS << DAVINCI_USB_USBINT_SHIFT)) { - int drvvbus = musb_readl(tibase, DAVINCI_USB_STAT_REG); - void __iomem *mregs = musb->mregs; - u8 devctl = musb_readb(mregs, MUSB_DEVCTL); - int err = musb->int_usb & MUSB_INTR_VBUSERROR; - - err = musb->int_usb & MUSB_INTR_VBUSERROR; - if (err) { - /* The Mentor core doesn't debounce VBUS as needed - * to cope with device connect current spikes. This - * means it's not uncommon for bus-powered devices - * to get VBUS errors during enumeration. - * - * This is a workaround, but newer RTL from Mentor - * seems to allow a better one: "re"starting sessions - * without waiting (on EVM, a **long** time) for VBUS - * to stop registering in devctl. - */ - musb->int_usb &= ~MUSB_INTR_VBUSERROR; - musb->xceiv->otg->state = OTG_STATE_A_WAIT_VFALL; - mod_timer(&musb->dev_timer, jiffies + POLL_SECONDS * HZ); - WARNING("VBUS error workaround (delay coming)\n"); - } else if (drvvbus) { - MUSB_HST_MODE(musb); - musb->xceiv->otg->state = OTG_STATE_A_WAIT_VRISE; - portstate(musb->port1_status |= USB_PORT_STAT_POWER); - del_timer(&musb->dev_timer); - } else { - musb->is_active = 0; - MUSB_DEV_MODE(musb); - musb->xceiv->otg->state = OTG_STATE_B_IDLE; - portstate(musb->port1_status &= ~USB_PORT_STAT_POWER); - } - - /* NOTE: this must complete poweron within 100 msec - * (OTG_TIME_A_WAIT_VRISE) but we don't check for that. - */ - davinci_musb_source_power(musb, drvvbus, 0); - dev_dbg(musb->controller, "VBUS %s (%s)%s, devctl %02x\n", - drvvbus ? "on" : "off", - usb_otg_state_string(musb->xceiv->otg->state), - err ? " ERROR" : "", - devctl); - retval = IRQ_HANDLED; - } - - if (musb->int_tx || musb->int_rx || musb->int_usb) - retval |= musb_interrupt(musb); - - /* irq stays asserted until EOI is written */ - musb_writel(tibase, DAVINCI_USB_EOI_REG, 0); - - /* poll for ID change */ - if (musb->xceiv->otg->state == OTG_STATE_B_IDLE) - mod_timer(&musb->dev_timer, jiffies + POLL_SECONDS * HZ); - - spin_unlock_irqrestore(&musb->lock, flags); - - return retval; -} - -static int davinci_musb_set_mode(struct musb *musb, u8 mode) -{ - /* EVM can't do this (right?) */ - return -EIO; -} - -static int davinci_musb_init(struct musb *musb) -{ - void __iomem *tibase = musb->ctrl_base; - u32 revision; - int ret = -ENODEV; - - musb->xceiv = usb_get_phy(USB_PHY_TYPE_USB2); - if (IS_ERR_OR_NULL(musb->xceiv)) { - ret = -EPROBE_DEFER; - goto unregister; - } - - musb->mregs += DAVINCI_BASE_OFFSET; - - /* returns zero if e.g. not clocked */ - revision = musb_readl(tibase, DAVINCI_USB_VERSION_REG); - if (revision == 0) - goto fail; - - timer_setup(&musb->dev_timer, otg_timer, 0); - - davinci_musb_source_power(musb, 0, 1); - - /* dm355 EVM swaps D+/D- for signal integrity, and - * is clocked from the main 24 MHz crystal. - */ - if (machine_is_davinci_dm355_evm()) { - u32 phy_ctrl = __raw_readl(USB_PHY_CTRL); - - phy_ctrl &= ~(3 << 9); - phy_ctrl |= USBPHY_DATAPOL; - __raw_writel(phy_ctrl, USB_PHY_CTRL); - } - - /* On dm355, the default-A state machine needs DRVVBUS control. - * If we won't be a host, there's no need to turn it on. - */ - if (cpu_is_davinci_dm355()) { - u32 deepsleep = __raw_readl(DM355_DEEPSLEEP); - - deepsleep &= ~DRVVBUS_FORCE; - __raw_writel(deepsleep, DM355_DEEPSLEEP); - } - - /* reset the controller */ - musb_writel(tibase, DAVINCI_USB_CTRL_REG, 0x1); - - /* start the on-chip PHY and its PLL */ - phy_on(); - - msleep(5); - - /* NOTE: irqs are in mixed mode, not bypass to pure-musb */ - pr_debug("DaVinci OTG revision %08x phy %03x control %02x\n", - revision, __raw_readl(USB_PHY_CTRL), - musb_readb(tibase, DAVINCI_USB_CTRL_REG)); - - musb->isr = davinci_musb_interrupt; - return 0; - -fail: - usb_put_phy(musb->xceiv); -unregister: - usb_phy_generic_unregister(); - return ret; -} - -static int davinci_musb_exit(struct musb *musb) -{ - int maxdelay = 30; - u8 devctl, warn = 0; - - del_timer_sync(&musb->dev_timer); - - /* force VBUS off */ - if (cpu_is_davinci_dm355()) { - u32 deepsleep = __raw_readl(DM355_DEEPSLEEP); - - deepsleep &= ~DRVVBUS_FORCE; - deepsleep |= DRVVBUS_OVERRIDE; - __raw_writel(deepsleep, DM355_DEEPSLEEP); - } - - davinci_musb_source_power(musb, 0 /*off*/, 1); - - /* - * delay, to avoid problems with module reload. - * if there's no peripheral connected, this can take a - * long time to fall, especially on EVM with huge C133. - */ - do { - devctl = musb_readb(musb->mregs, MUSB_DEVCTL); - if (!(devctl & MUSB_DEVCTL_VBUS)) - break; - if ((devctl & MUSB_DEVCTL_VBUS) != warn) { - warn = devctl & MUSB_DEVCTL_VBUS; - dev_dbg(musb->controller, "VBUS %d\n", - warn >> MUSB_DEVCTL_VBUS_SHIFT); - } - msleep(1000); - maxdelay--; - } while (maxdelay > 0); - - /* in OTG mode, another host might be connected */ - if (devctl & MUSB_DEVCTL_VBUS) - dev_dbg(musb->controller, "VBUS off timeout (devctl %02x)\n", devctl); - - phy_off(); - - usb_put_phy(musb->xceiv); - - return 0; -} - -static const struct musb_platform_ops davinci_ops = { - .quirks = MUSB_DMA_CPPI, - .init = davinci_musb_init, - .exit = davinci_musb_exit, - -#ifdef CONFIG_USB_TI_CPPI_DMA - .dma_init = cppi_dma_controller_create, - .dma_exit = cppi_dma_controller_destroy, -#endif - .enable = davinci_musb_enable, - .disable = davinci_musb_disable, - - .set_mode = davinci_musb_set_mode, - - .set_vbus = davinci_musb_set_vbus, -}; - -static const struct platform_device_info davinci_dev_info = { - .name = "musb-hdrc", - .id = PLATFORM_DEVID_AUTO, - .dma_mask = DMA_BIT_MASK(32), -}; - -static int davinci_probe(struct platform_device *pdev) -{ - struct resource musb_resources[3]; - struct musb_hdrc_platform_data *pdata = dev_get_platdata(&pdev->dev); - struct platform_device *musb; - struct davinci_glue *glue; - struct platform_device_info pinfo; - struct clk *clk; - - int ret = -ENOMEM; - - glue = devm_kzalloc(&pdev->dev, sizeof(*glue), GFP_KERNEL); - if (!glue) - goto err0; - - clk = devm_clk_get(&pdev->dev, "usb"); - if (IS_ERR(clk)) { - dev_err(&pdev->dev, "failed to get clock\n"); - ret = PTR_ERR(clk); - goto err0; - } - - ret = clk_enable(clk); - if (ret) { - dev_err(&pdev->dev, "failed to enable clock\n"); - goto err0; - } - - glue->dev = &pdev->dev; - glue->clk = clk; - - pdata->platform_ops = &davinci_ops; - - glue->vbus = devm_gpiod_get_optional(&pdev->dev, NULL, GPIOD_OUT_LOW); - if (IS_ERR(glue->vbus)) { - ret = PTR_ERR(glue->vbus); - goto err0; - } else { - glue->vbus_state = -1; - INIT_WORK(&glue->vbus_work, evm_deferred_drvvbus); - } - - usb_phy_generic_register(); - platform_set_drvdata(pdev, glue); - - memset(musb_resources, 0x00, sizeof(*musb_resources) * - ARRAY_SIZE(musb_resources)); - - musb_resources[0].name = pdev->resource[0].name; - musb_resources[0].start = pdev->resource[0].start; - musb_resources[0].end = pdev->resource[0].end; - musb_resources[0].flags = pdev->resource[0].flags; - - musb_resources[1].name = pdev->resource[1].name; - musb_resources[1].start = pdev->resource[1].start; - musb_resources[1].end = pdev->resource[1].end; - musb_resources[1].flags = pdev->resource[1].flags; - - /* - * For DM6467 3 resources are passed. A placeholder for the 3rd - * resource is always there, so it's safe to always copy it... - */ - musb_resources[2].name = pdev->resource[2].name; - musb_resources[2].start = pdev->resource[2].start; - musb_resources[2].end = pdev->resource[2].end; - musb_resources[2].flags = pdev->resource[2].flags; - - pinfo = davinci_dev_info; - pinfo.parent = &pdev->dev; - pinfo.res = musb_resources; - pinfo.num_res = ARRAY_SIZE(musb_resources); - pinfo.data = pdata; - pinfo.size_data = sizeof(*pdata); - - glue->musb = musb = platform_device_register_full(&pinfo); - if (IS_ERR(musb)) { - ret = PTR_ERR(musb); - dev_err(&pdev->dev, "failed to register musb device: %d\n", ret); - goto err1; - } - - return 0; - -err1: - clk_disable(clk); - -err0: - return ret; -} - -static int davinci_remove(struct platform_device *pdev) -{ - struct davinci_glue *glue = platform_get_drvdata(pdev); - - platform_device_unregister(glue->musb); - usb_phy_generic_unregister(); - clk_disable(glue->clk); - - return 0; -} - -static struct platform_driver davinci_driver = { - .probe = davinci_probe, - .remove = davinci_remove, - .driver = { - .name = "musb-davinci", - }, -}; - -MODULE_DESCRIPTION("DaVinci MUSB Glue Layer"); -MODULE_AUTHOR("Felipe Balbi "); -MODULE_LICENSE("GPL v2"); -module_platform_driver(davinci_driver); diff --git a/drivers/usb/musb/davinci.h b/drivers/usb/musb/davinci.h deleted file mode 100644 index c8e67d15b510b7966e0675fb8f1028758c85e250..0000000000000000000000000000000000000000 --- a/drivers/usb/musb/davinci.h +++ /dev/null @@ -1,103 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * Copyright (C) 2005-2006 by Texas Instruments - */ - -#ifndef __MUSB_HDRDF_H__ -#define __MUSB_HDRDF_H__ - -/* - * DaVinci-specific definitions - */ - -/* Integrated highspeed/otg PHY */ -#define USBPHY_CTL_PADDR 0x01c40034 -#define USBPHY_DATAPOL BIT(11) /* (dm355) switch D+/D- */ -#define USBPHY_PHYCLKGD BIT(8) -#define USBPHY_SESNDEN BIT(7) /* v(sess_end) comparator */ -#define USBPHY_VBDTCTEN BIT(6) /* v(bus) comparator */ -#define USBPHY_VBUSSENS BIT(5) /* (dm355,ro) is vbus > 0.5V */ -#define USBPHY_PHYPLLON BIT(4) /* override pll suspend */ -#define USBPHY_CLKO1SEL BIT(3) -#define USBPHY_OSCPDWN BIT(2) -#define USBPHY_OTGPDWN BIT(1) -#define USBPHY_PHYPDWN BIT(0) - -#define DM355_DEEPSLEEP_PADDR 0x01c40048 -#define DRVVBUS_FORCE BIT(2) -#define DRVVBUS_OVERRIDE BIT(1) - -/* For now include usb OTG module registers here */ -#define DAVINCI_USB_VERSION_REG 0x00 -#define DAVINCI_USB_CTRL_REG 0x04 -#define DAVINCI_USB_STAT_REG 0x08 -#define DAVINCI_RNDIS_REG 0x10 -#define DAVINCI_AUTOREQ_REG 0x14 -#define DAVINCI_USB_INT_SOURCE_REG 0x20 -#define DAVINCI_USB_INT_SET_REG 0x24 -#define DAVINCI_USB_INT_SRC_CLR_REG 0x28 -#define DAVINCI_USB_INT_MASK_REG 0x2c -#define DAVINCI_USB_INT_MASK_SET_REG 0x30 -#define DAVINCI_USB_INT_MASK_CLR_REG 0x34 -#define DAVINCI_USB_INT_SRC_MASKED_REG 0x38 -#define DAVINCI_USB_EOI_REG 0x3c -#define DAVINCI_USB_EOI_INTVEC 0x40 - -/* BEGIN CPPI-generic (?) */ - -/* CPPI related registers */ -#define DAVINCI_TXCPPI_CTRL_REG 0x80 -#define DAVINCI_TXCPPI_TEAR_REG 0x84 -#define DAVINCI_CPPI_EOI_REG 0x88 -#define DAVINCI_CPPI_INTVEC_REG 0x8c -#define DAVINCI_TXCPPI_MASKED_REG 0x90 -#define DAVINCI_TXCPPI_RAW_REG 0x94 -#define DAVINCI_TXCPPI_INTENAB_REG 0x98 -#define DAVINCI_TXCPPI_INTCLR_REG 0x9c - -#define DAVINCI_RXCPPI_CTRL_REG 0xC0 -#define DAVINCI_RXCPPI_MASKED_REG 0xD0 -#define DAVINCI_RXCPPI_RAW_REG 0xD4 -#define DAVINCI_RXCPPI_INTENAB_REG 0xD8 -#define DAVINCI_RXCPPI_INTCLR_REG 0xDC - -#define DAVINCI_RXCPPI_BUFCNT0_REG 0xE0 -#define DAVINCI_RXCPPI_BUFCNT1_REG 0xE4 -#define DAVINCI_RXCPPI_BUFCNT2_REG 0xE8 -#define DAVINCI_RXCPPI_BUFCNT3_REG 0xEC - -/* CPPI state RAM entries */ -#define DAVINCI_CPPI_STATERAM_BASE_OFFSET 0x100 - -#define DAVINCI_TXCPPI_STATERAM_OFFSET(chnum) \ - (DAVINCI_CPPI_STATERAM_BASE_OFFSET + ((chnum) * 0x40)) -#define DAVINCI_RXCPPI_STATERAM_OFFSET(chnum) \ - (DAVINCI_CPPI_STATERAM_BASE_OFFSET + 0x20 + ((chnum) * 0x40)) - -/* CPPI masks */ -#define DAVINCI_DMA_CTRL_ENABLE 1 -#define DAVINCI_DMA_CTRL_DISABLE 0 - -#define DAVINCI_DMA_ALL_CHANNELS_ENABLE 0xF -#define DAVINCI_DMA_ALL_CHANNELS_DISABLE 0xF - -/* END CPPI-generic (?) */ - -#define DAVINCI_USB_TX_ENDPTS_MASK 0x1f /* ep0 + 4 tx */ -#define DAVINCI_USB_RX_ENDPTS_MASK 0x1e /* 4 rx */ - -#define DAVINCI_USB_USBINT_SHIFT 16 -#define DAVINCI_USB_TXINT_SHIFT 0 -#define DAVINCI_USB_RXINT_SHIFT 8 - -#define DAVINCI_INTR_DRVVBUS 0x0100 - -#define DAVINCI_USB_USBINT_MASK 0x01ff0000 /* 8 Mentor, DRVVBUS */ -#define DAVINCI_USB_TXINT_MASK \ - (DAVINCI_USB_TX_ENDPTS_MASK << DAVINCI_USB_TXINT_SHIFT) -#define DAVINCI_USB_RXINT_MASK \ - (DAVINCI_USB_RX_ENDPTS_MASK << DAVINCI_USB_RXINT_SHIFT) - -#define DAVINCI_BASE_OFFSET 0x400 - -#endif /* __MUSB_HDRDF_H__ */ diff --git a/drivers/usb/musb/jz4740.c b/drivers/usb/musb/jz4740.c index d1e4e0deb7535f6ccb153c602c9f3760b65f6241..c7b1d2a394d9ad7cb62f7196368147466947b544 100644 --- a/drivers/usb/musb/jz4740.c +++ b/drivers/usb/musb/jz4740.c @@ -11,6 +11,7 @@ #include #include #include +#include #include #include #include @@ -81,6 +82,9 @@ static int jz4740_musb_role_switch_set(struct usb_role_switch *sw, struct jz4740_glue *glue = usb_role_switch_get_drvdata(sw); struct usb_phy *phy = glue->musb->xceiv; + if (!phy) + return 0; + switch (role) { case USB_ROLE_NONE: atomic_notifier_call_chain(&phy->notifier, USB_EVENT_NONE, phy); @@ -105,21 +109,51 @@ static int jz4740_musb_init(struct musb *musb) .driver_data = glue, .fwnode = dev_fwnode(dev), }; + int err; glue->musb = musb; - if (dev->of_node) - musb->xceiv = devm_usb_get_phy_by_phandle(dev, "phys", 0); - else - musb->xceiv = devm_usb_get_phy(dev, USB_PHY_TYPE_USB2); - if (IS_ERR(musb->xceiv)) - return dev_err_probe(dev, PTR_ERR(musb->xceiv), - "No transceiver configured\n"); + if (IS_ENABLED(CONFIG_GENERIC_PHY)) { + musb->phy = devm_of_phy_get_by_index(dev, dev->of_node, 0); + if (IS_ERR(musb->phy)) { + err = PTR_ERR(musb->phy); + if (err != -ENODEV) { + dev_err(dev, "Unable to get PHY\n"); + return err; + } + + musb->phy = NULL; + } + } + + if (musb->phy) { + err = phy_init(musb->phy); + if (err) { + dev_err(dev, "Failed to init PHY\n"); + return err; + } + + err = phy_power_on(musb->phy); + if (err) { + dev_err(dev, "Unable to power on PHY\n"); + goto err_phy_shutdown; + } + } else { + if (dev->of_node) + musb->xceiv = devm_usb_get_phy_by_phandle(dev, "phys", 0); + else + musb->xceiv = devm_usb_get_phy(dev, USB_PHY_TYPE_USB2); + if (IS_ERR(musb->xceiv)) { + dev_err(dev, "No transceiver configured\n"); + return PTR_ERR(musb->xceiv); + } + } glue->role_sw = usb_role_switch_register(dev, &role_sw_desc); if (IS_ERR(glue->role_sw)) { dev_err(dev, "Failed to register USB role switch\n"); - return PTR_ERR(glue->role_sw); + err = PTR_ERR(glue->role_sw); + goto err_phy_power_down; } /* @@ -131,6 +165,14 @@ static int jz4740_musb_init(struct musb *musb) musb->isr = jz4740_musb_interrupt; return 0; + +err_phy_power_down: + if (musb->phy) + phy_power_off(musb->phy); +err_phy_shutdown: + if (musb->phy) + phy_exit(musb->phy); + return err; } static int jz4740_musb_exit(struct musb *musb) @@ -138,6 +180,10 @@ static int jz4740_musb_exit(struct musb *musb) struct jz4740_glue *glue = dev_get_drvdata(musb->controller->parent); usb_role_switch_unregister(glue->role_sw); + if (musb->phy) { + phy_power_off(musb->phy); + phy_exit(musb->phy); + } return 0; } diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c index 03027c6fa3aba3e7205af52ab13e08f87dc99506..648bb6021c5ef5e11269aaf04ba76bf545d6e7fa 100644 --- a/drivers/usb/musb/musb_core.c +++ b/drivers/usb/musb/musb_core.c @@ -502,7 +502,7 @@ int musb_set_host(struct musb *musb) init_data: musb->is_active = 1; - musb->xceiv->otg->state = OTG_STATE_A_IDLE; + musb_set_state(musb, OTG_STATE_A_IDLE); MUSB_HST_MODE(musb); return error; @@ -549,7 +549,7 @@ int musb_set_peripheral(struct musb *musb) init_data: musb->is_active = 0; - musb->xceiv->otg->state = OTG_STATE_B_IDLE; + musb_set_state(musb, OTG_STATE_B_IDLE); MUSB_DEV_MODE(musb); return error; @@ -599,24 +599,24 @@ static void musb_otg_timer_func(struct timer_list *t) unsigned long flags; spin_lock_irqsave(&musb->lock, flags); - switch (musb->xceiv->otg->state) { + switch (musb_get_state(musb)) { case OTG_STATE_B_WAIT_ACON: musb_dbg(musb, "HNP: b_wait_acon timeout; back to b_peripheral"); musb_g_disconnect(musb); - musb->xceiv->otg->state = OTG_STATE_B_PERIPHERAL; + musb_set_state(musb, OTG_STATE_B_PERIPHERAL); musb->is_active = 0; break; case OTG_STATE_A_SUSPEND: case OTG_STATE_A_WAIT_BCON: musb_dbg(musb, "HNP: %s timeout", - usb_otg_state_string(musb->xceiv->otg->state)); + musb_otg_state_string(musb)); musb_platform_set_vbus(musb, 0); - musb->xceiv->otg->state = OTG_STATE_A_WAIT_VFALL; + musb_set_state(musb, OTG_STATE_A_WAIT_VFALL); break; default: musb_dbg(musb, "HNP: Unhandled mode %s", - usb_otg_state_string(musb->xceiv->otg->state)); + musb_otg_state_string(musb)); } spin_unlock_irqrestore(&musb->lock, flags); } @@ -630,20 +630,18 @@ void musb_hnp_stop(struct musb *musb) void __iomem *mbase = musb->mregs; u8 reg; - musb_dbg(musb, "HNP: stop from %s", - usb_otg_state_string(musb->xceiv->otg->state)); + musb_dbg(musb, "HNP: stop from %s", musb_otg_state_string(musb)); - switch (musb->xceiv->otg->state) { + switch (musb_get_state(musb)) { case OTG_STATE_A_PERIPHERAL: musb_g_disconnect(musb); - musb_dbg(musb, "HNP: back to %s", - usb_otg_state_string(musb->xceiv->otg->state)); + musb_dbg(musb, "HNP: back to %s", musb_otg_state_string(musb)); break; case OTG_STATE_B_HOST: musb_dbg(musb, "HNP: Disabling HR"); if (hcd) hcd->self.is_b_host = 0; - musb->xceiv->otg->state = OTG_STATE_B_PERIPHERAL; + musb_set_state(musb, OTG_STATE_B_PERIPHERAL); MUSB_DEV_MODE(musb); reg = musb_readb(mbase, MUSB_POWER); reg |= MUSB_POWER_SUSPENDM; @@ -652,7 +650,7 @@ void musb_hnp_stop(struct musb *musb) break; default: musb_dbg(musb, "HNP: Stopping in unknown state %s", - usb_otg_state_string(musb->xceiv->otg->state)); + musb_otg_state_string(musb)); } /* @@ -667,11 +665,10 @@ static void musb_recover_from_babble(struct musb *musb); static void musb_handle_intr_resume(struct musb *musb, u8 devctl) { - musb_dbg(musb, "RESUME (%s)", - usb_otg_state_string(musb->xceiv->otg->state)); + musb_dbg(musb, "RESUME (%s)", musb_otg_state_string(musb)); if (devctl & MUSB_DEVCTL_HM) { - switch (musb->xceiv->otg->state) { + switch (musb_get_state(musb)) { case OTG_STATE_A_SUSPEND: /* remote wakeup? */ musb->port1_status |= @@ -679,27 +676,27 @@ static void musb_handle_intr_resume(struct musb *musb, u8 devctl) | MUSB_PORT_STAT_RESUME; musb->rh_timer = jiffies + msecs_to_jiffies(USB_RESUME_TIMEOUT); - musb->xceiv->otg->state = OTG_STATE_A_HOST; + musb_set_state(musb, OTG_STATE_A_HOST); musb->is_active = 1; musb_host_resume_root_hub(musb); schedule_delayed_work(&musb->finish_resume_work, msecs_to_jiffies(USB_RESUME_TIMEOUT)); break; case OTG_STATE_B_WAIT_ACON: - musb->xceiv->otg->state = OTG_STATE_B_PERIPHERAL; + musb_set_state(musb, OTG_STATE_B_PERIPHERAL); musb->is_active = 1; MUSB_DEV_MODE(musb); break; default: WARNING("bogus %s RESUME (%s)\n", "host", - usb_otg_state_string(musb->xceiv->otg->state)); + musb_otg_state_string(musb)); } } else { - switch (musb->xceiv->otg->state) { + switch (musb_get_state(musb)) { case OTG_STATE_A_SUSPEND: /* possibly DISCONNECT is upcoming */ - musb->xceiv->otg->state = OTG_STATE_A_HOST; + musb_set_state(musb, OTG_STATE_A_HOST); musb_host_resume_root_hub(musb); break; case OTG_STATE_B_WAIT_ACON: @@ -722,7 +719,7 @@ static void musb_handle_intr_resume(struct musb *musb, u8 devctl) default: WARNING("bogus %s RESUME (%s)\n", "peripheral", - usb_otg_state_string(musb->xceiv->otg->state)); + musb_otg_state_string(musb)); } } } @@ -738,8 +735,7 @@ static irqreturn_t musb_handle_intr_sessreq(struct musb *musb, u8 devctl) return IRQ_HANDLED; } - musb_dbg(musb, "SESSION_REQUEST (%s)", - usb_otg_state_string(musb->xceiv->otg->state)); + musb_dbg(musb, "SESSION_REQUEST (%s)", musb_otg_state_string(musb)); /* IRQ arrives from ID pin sense or (later, if VBUS power * is removed) SRP. responses are time critical: @@ -750,7 +746,7 @@ static irqreturn_t musb_handle_intr_sessreq(struct musb *musb, u8 devctl) */ musb_writeb(mbase, MUSB_DEVCTL, MUSB_DEVCTL_SESSION); musb->ep0_stage = MUSB_EP0_START; - musb->xceiv->otg->state = OTG_STATE_A_IDLE; + musb_set_state(musb, OTG_STATE_A_IDLE); MUSB_HST_MODE(musb); musb_platform_set_vbus(musb, 1); @@ -777,7 +773,7 @@ static void musb_handle_intr_vbuserr(struct musb *musb, u8 devctl) * REVISIT: do delays from lots of DEBUG_KERNEL checks * make trouble here, keeping VBUS < 4.4V ? */ - switch (musb->xceiv->otg->state) { + switch (musb_get_state(musb)) { case OTG_STATE_A_HOST: /* recovery is dicey once we've gotten past the * initial stages of enumeration, but if VBUS @@ -806,7 +802,7 @@ static void musb_handle_intr_vbuserr(struct musb *musb, u8 devctl) dev_printk(ignore ? KERN_DEBUG : KERN_ERR, musb->controller, "VBUS_ERROR in %s (%02x, %s), retry #%d, port1 %08x\n", - usb_otg_state_string(musb->xceiv->otg->state), + musb_otg_state_string(musb), devctl, ({ char *s; switch (devctl & MUSB_DEVCTL_VBUS) { @@ -831,9 +827,9 @@ static void musb_handle_intr_vbuserr(struct musb *musb, u8 devctl) static void musb_handle_intr_suspend(struct musb *musb, u8 devctl) { musb_dbg(musb, "SUSPEND (%s) devctl %02x", - usb_otg_state_string(musb->xceiv->otg->state), devctl); + musb_otg_state_string(musb), devctl); - switch (musb->xceiv->otg->state) { + switch (musb_get_state(musb)) { case OTG_STATE_A_PERIPHERAL: /* We also come here if the cable is removed, since * this silicon doesn't report ID-no-longer-grounded. @@ -858,7 +854,7 @@ static void musb_handle_intr_suspend(struct musb *musb, u8 devctl) musb_g_suspend(musb); musb->is_active = musb->g.b_hnp_enable; if (musb->is_active) { - musb->xceiv->otg->state = OTG_STATE_B_WAIT_ACON; + musb_set_state(musb, OTG_STATE_B_WAIT_ACON); musb_dbg(musb, "HNP: Setting timer for b_ase0_brst"); mod_timer(&musb->otg_timer, jiffies + msecs_to_jiffies( @@ -871,7 +867,7 @@ static void musb_handle_intr_suspend(struct musb *musb, u8 devctl) + msecs_to_jiffies(musb->a_wait_bcon)); break; case OTG_STATE_A_HOST: - musb->xceiv->otg->state = OTG_STATE_A_SUSPEND; + musb_set_state(musb, OTG_STATE_A_SUSPEND); musb->is_active = musb->hcd->self.b_hnp_enable; break; case OTG_STATE_B_HOST: @@ -909,7 +905,7 @@ static void musb_handle_intr_connect(struct musb *musb, u8 devctl, u8 int_usb) musb->port1_status |= USB_PORT_STAT_LOW_SPEED; /* indicate new connection to OTG machine */ - switch (musb->xceiv->otg->state) { + switch (musb_get_state(musb)) { case OTG_STATE_B_PERIPHERAL: if (int_usb & MUSB_INTR_SUSPEND) { musb_dbg(musb, "HNP: SUSPEND+CONNECT, now b_host"); @@ -921,7 +917,7 @@ static void musb_handle_intr_connect(struct musb *musb, u8 devctl, u8 int_usb) case OTG_STATE_B_WAIT_ACON: musb_dbg(musb, "HNP: CONNECT, now b_host"); b_host: - musb->xceiv->otg->state = OTG_STATE_B_HOST; + musb_set_state(musb, OTG_STATE_B_HOST); if (musb->hcd) musb->hcd->self.is_b_host = 1; del_timer(&musb->otg_timer); @@ -929,7 +925,7 @@ b_host: default: if ((devctl & MUSB_DEVCTL_VBUS) == (3 << MUSB_DEVCTL_VBUS_SHIFT)) { - musb->xceiv->otg->state = OTG_STATE_A_HOST; + musb_set_state(musb, OTG_STATE_A_HOST); if (hcd) hcd->self.is_b_host = 0; } @@ -939,16 +935,16 @@ b_host: musb_host_poke_root_hub(musb); musb_dbg(musb, "CONNECT (%s) devctl %02x", - usb_otg_state_string(musb->xceiv->otg->state), devctl); + musb_otg_state_string(musb), devctl); } static void musb_handle_intr_disconnect(struct musb *musb, u8 devctl) { musb_dbg(musb, "DISCONNECT (%s) as %s, devctl %02x", - usb_otg_state_string(musb->xceiv->otg->state), + musb_otg_state_string(musb), MUSB_MODE(musb), devctl); - switch (musb->xceiv->otg->state) { + switch (musb_get_state(musb)) { case OTG_STATE_A_HOST: case OTG_STATE_A_SUSPEND: musb_host_resume_root_hub(musb); @@ -966,7 +962,7 @@ static void musb_handle_intr_disconnect(struct musb *musb, u8 devctl) musb_root_disconnect(musb); if (musb->hcd) musb->hcd->self.is_b_host = 0; - musb->xceiv->otg->state = OTG_STATE_B_PERIPHERAL; + musb_set_state(musb, OTG_STATE_B_PERIPHERAL); MUSB_DEV_MODE(musb); musb_g_disconnect(musb); break; @@ -981,7 +977,7 @@ static void musb_handle_intr_disconnect(struct musb *musb, u8 devctl) break; default: WARNING("unhandled DISCONNECT transition (%s)\n", - usb_otg_state_string(musb->xceiv->otg->state)); + musb_otg_state_string(musb)); break; } } @@ -1004,16 +1000,15 @@ static void musb_handle_intr_reset(struct musb *musb) dev_err(musb->controller, "Babble\n"); musb_recover_from_babble(musb); } else { - musb_dbg(musb, "BUS RESET as %s", - usb_otg_state_string(musb->xceiv->otg->state)); - switch (musb->xceiv->otg->state) { + musb_dbg(musb, "BUS RESET as %s", musb_otg_state_string(musb)); + switch (musb_get_state(musb)) { case OTG_STATE_A_SUSPEND: musb_g_reset(musb); fallthrough; case OTG_STATE_A_WAIT_BCON: /* OPT TD.4.7-900ms */ /* never use invalid T(a_wait_bcon) */ musb_dbg(musb, "HNP: in %s, %d msec timeout", - usb_otg_state_string(musb->xceiv->otg->state), + musb_otg_state_string(musb), TA_WAIT_BCON(musb)); mod_timer(&musb->otg_timer, jiffies + msecs_to_jiffies(TA_WAIT_BCON(musb))); @@ -1024,19 +1019,19 @@ static void musb_handle_intr_reset(struct musb *musb) break; case OTG_STATE_B_WAIT_ACON: musb_dbg(musb, "HNP: RESET (%s), to b_peripheral", - usb_otg_state_string(musb->xceiv->otg->state)); - musb->xceiv->otg->state = OTG_STATE_B_PERIPHERAL; + musb_otg_state_string(musb)); + musb_set_state(musb, OTG_STATE_B_PERIPHERAL); musb_g_reset(musb); break; case OTG_STATE_B_IDLE: - musb->xceiv->otg->state = OTG_STATE_B_PERIPHERAL; + musb_set_state(musb, OTG_STATE_B_PERIPHERAL); fallthrough; case OTG_STATE_B_PERIPHERAL: musb_g_reset(musb); break; default: musb_dbg(musb, "Unhandled BUS RESET as %s", - usb_otg_state_string(musb->xceiv->otg->state)); + musb_otg_state_string(musb)); } } } @@ -1216,8 +1211,8 @@ void musb_start(struct musb *musb) * (c) peripheral initiates, using SRP */ if (musb->port_mode != MUSB_HOST && - musb->xceiv->otg->state != OTG_STATE_A_WAIT_BCON && - (devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS) { + musb_get_state(musb) != OTG_STATE_A_WAIT_BCON && + (devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS) { musb->is_active = 1; } else { devctl |= MUSB_DEVCTL_SESSION; @@ -1863,7 +1858,7 @@ mode_show(struct device *dev, struct device_attribute *attr, char *buf) int ret; spin_lock_irqsave(&musb->lock, flags); - ret = sprintf(buf, "%s\n", usb_otg_state_string(musb->xceiv->otg->state)); + ret = sprintf(buf, "%s\n", musb_otg_state_string(musb)); spin_unlock_irqrestore(&musb->lock, flags); return ret; @@ -1908,7 +1903,7 @@ vbus_store(struct device *dev, struct device_attribute *attr, spin_lock_irqsave(&musb->lock, flags); /* force T(a_wait_bcon) to be zero/unlimited *OR* valid */ musb->a_wait_bcon = val ? max_t(int, val, OTG_TIME_A_WAIT_BCON) : 0 ; - if (musb->xceiv->otg->state == OTG_STATE_A_WAIT_BCON) + if (musb_get_state(musb) == OTG_STATE_A_WAIT_BCON) musb->is_active = 0; musb_platform_try_idle(musb, jiffies + msecs_to_jiffies(val)); spin_unlock_irqrestore(&musb->lock, flags); @@ -2089,8 +2084,8 @@ static void musb_irq_work(struct work_struct *data) musb_pm_runtime_check_session(musb); - if (musb->xceiv->otg->state != musb->xceiv_old_state) { - musb->xceiv_old_state = musb->xceiv->otg->state; + if (musb_get_state(musb) != musb->xceiv_old_state) { + musb->xceiv_old_state = musb_get_state(musb); sysfs_notify(&musb->controller->kobj, NULL, "mode"); } @@ -2453,7 +2448,7 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl) else musb->io.set_toggle = musb_default_set_toggle; - if (!musb->xceiv->io_ops) { + if (IS_ENABLED(CONFIG_USB_PHY) && musb->xceiv && !musb->xceiv->io_ops) { musb->xceiv->io_dev = musb->controller; musb->xceiv->io_priv = musb->mregs; musb->xceiv->io_ops = &musb_ulpi_access; @@ -2532,7 +2527,7 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl) } MUSB_DEV_MODE(musb); - musb->xceiv->otg->state = OTG_STATE_B_IDLE; + musb_set_state(musb, OTG_STATE_B_IDLE); switch (musb->port_mode) { case MUSB_HOST: diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h index a8a65effe68b56db5c86fe952957e68a54a07341..b7588d11cfc59d172119130dbef9154816966c73 100644 --- a/drivers/usb/musb/musb_core.h +++ b/drivers/usb/musb/musb_core.h @@ -339,6 +339,8 @@ struct musb { struct usb_phy *xceiv; struct phy *phy; + enum usb_otg_state otg_state; + int nIrq; unsigned irq_wake:1; @@ -592,6 +594,28 @@ static inline void musb_platform_clear_ep_rxintr(struct musb *musb, int epnum) musb->ops->clear_ep_rxintr(musb, epnum); } +static inline void musb_set_state(struct musb *musb, + enum usb_otg_state otg_state) +{ + if (musb->xceiv) + musb->xceiv->otg->state = otg_state; + else + musb->otg_state = otg_state; +} + +static inline enum usb_otg_state musb_get_state(struct musb *musb) +{ + if (musb->xceiv) + return musb->xceiv->otg->state; + + return musb->otg_state; +} + +static inline const char *musb_otg_state_string(struct musb *musb) +{ + return usb_otg_state_string(musb_get_state(musb)); +} + /* * gets the "dr_mode" property from DT and converts it into musb_mode * if the property is not found or not recognized returns MUSB_OTG diff --git a/drivers/usb/musb/musb_debugfs.c b/drivers/usb/musb/musb_debugfs.c index 30a89aa8a3e7ad3cf9cb2085247cd500d845f242..78c726a71b17753c677b97018ab3902bc9f6c7de 100644 --- a/drivers/usb/musb/musb_debugfs.c +++ b/drivers/usb/musb/musb_debugfs.c @@ -235,7 +235,7 @@ static int musb_softconnect_show(struct seq_file *s, void *unused) u8 reg; int connect; - switch (musb->xceiv->otg->state) { + switch (musb_get_state(musb)) { case OTG_STATE_A_HOST: case OTG_STATE_A_WAIT_BCON: pm_runtime_get_sync(musb->controller); @@ -275,7 +275,7 @@ static ssize_t musb_softconnect_write(struct file *file, pm_runtime_get_sync(musb->controller); if (!strncmp(buf, "0", 1)) { - switch (musb->xceiv->otg->state) { + switch (musb_get_state(musb)) { case OTG_STATE_A_HOST: musb_root_disconnect(musb); reg = musb_readb(musb->mregs, MUSB_DEVCTL); @@ -286,7 +286,7 @@ static ssize_t musb_softconnect_write(struct file *file, break; } } else if (!strncmp(buf, "1", 1)) { - switch (musb->xceiv->otg->state) { + switch (musb_get_state(musb)) { case OTG_STATE_A_WAIT_BCON: /* * musb_save_context() called in musb_runtime_suspend() diff --git a/drivers/usb/musb/musb_dma.h b/drivers/usb/musb/musb_dma.h index 7d67b69df0a07d5edf47edb23630a64d0219a107..e2445ca3356d09be62f9f0f3c37f1f11ca045f08 100644 --- a/drivers/usb/musb/musb_dma.h +++ b/drivers/usb/musb/musb_dma.h @@ -61,12 +61,6 @@ struct musb_hw_ep; #define musb_dma_cppi41(musb) 0 #endif -#ifdef CONFIG_USB_TI_CPPI_DMA -#define musb_dma_cppi(musb) (musb->ops->quirks & MUSB_DMA_CPPI) -#else -#define musb_dma_cppi(musb) 0 -#endif - #ifdef CONFIG_USB_TUSB_OMAP_DMA #define tusb_dma_omap(musb) (musb->ops->quirks & MUSB_DMA_TUSB_OMAP) #else @@ -79,11 +73,10 @@ struct musb_hw_ep; #define musb_dma_inventra(musb) 0 #endif -#if defined(CONFIG_USB_TI_CPPI_DMA) || defined(CONFIG_USB_TI_CPPI41_DMA) -#define is_cppi_enabled(musb) \ - (musb_dma_cppi(musb) || musb_dma_cppi41(musb)) +#if defined(CONFIG_USB_TI_CPPI41_DMA) +#define is_cppi_enabled(musb) musb_dma_cppi41(musb) #else -#define is_cppi_enabled(musb) 0 +#define is_cppi_enabled(musb) 0 #endif /* diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c index 6704a62a166592f4df150dfbc71ff0a676599aa8..31c44325e82872ac8558ff8465250e8f8e159f2d 100644 --- a/drivers/usb/musb/musb_gadget.c +++ b/drivers/usb/musb/musb_gadget.c @@ -1523,7 +1523,7 @@ static int musb_gadget_wakeup(struct usb_gadget *gadget) spin_lock_irqsave(&musb->lock, flags); - switch (musb->xceiv->otg->state) { + switch (musb_get_state(musb)) { case OTG_STATE_B_PERIPHERAL: /* NOTE: OTG state machine doesn't include B_SUSPENDED; * that's part of the standard usb 1.1 state machine, and @@ -1552,9 +1552,11 @@ static int musb_gadget_wakeup(struct usb_gadget *gadget) break; } - spin_unlock_irqrestore(&musb->lock, flags); - otg_start_srp(musb->xceiv->otg); - spin_lock_irqsave(&musb->lock, flags); + if (musb->xceiv) { + spin_unlock_irqrestore(&musb->lock, flags); + otg_start_srp(musb->xceiv->otg); + spin_lock_irqsave(&musb->lock, flags); + } /* Block idling for at least 1s */ musb_platform_try_idle(musb, @@ -1564,7 +1566,7 @@ static int musb_gadget_wakeup(struct usb_gadget *gadget) goto done; default: musb_dbg(musb, "Unhandled wake: %s", - usb_otg_state_string(musb->xceiv->otg->state)); + musb_otg_state_string(musb)); goto done; } @@ -1628,8 +1630,6 @@ static int musb_gadget_vbus_draw(struct usb_gadget *gadget, unsigned mA) { struct musb *musb = gadget_to_musb(gadget); - if (!musb->xceiv->set_power) - return -EOPNOTSUPP; return usb_phy_set_power(musb->xceiv, mA); } @@ -1787,7 +1787,7 @@ int musb_gadget_setup(struct musb *musb) musb->g.speed = USB_SPEED_UNKNOWN; MUSB_DEV_MODE(musb); - musb->xceiv->otg->state = OTG_STATE_B_IDLE; + musb_set_state(musb, OTG_STATE_B_IDLE); /* this "gadget" abstracts/virtualizes the controller */ musb->g.name = musb_driver_name; @@ -1834,7 +1834,6 @@ static int musb_gadget_start(struct usb_gadget *g, struct usb_gadget_driver *driver) { struct musb *musb = gadget_to_musb(g); - struct usb_otg *otg = musb->xceiv->otg; unsigned long flags; int retval = 0; @@ -1851,8 +1850,12 @@ static int musb_gadget_start(struct usb_gadget *g, spin_lock_irqsave(&musb->lock, flags); musb->is_active = 1; - otg_set_peripheral(otg, &musb->g); - musb->xceiv->otg->state = OTG_STATE_B_IDLE; + if (musb->xceiv) + otg_set_peripheral(musb->xceiv->otg, &musb->g); + else + phy_set_mode(musb->phy, PHY_MODE_USB_DEVICE); + + musb_set_state(musb, OTG_STATE_B_IDLE); spin_unlock_irqrestore(&musb->lock, flags); musb_start(musb); @@ -1861,7 +1864,7 @@ static int musb_gadget_start(struct usb_gadget *g, * handles power budgeting ... this way also * ensures HdrcStart is indirectly called. */ - if (musb->xceiv->last_event == USB_EVENT_ID) + if (musb->xceiv && musb->xceiv->last_event == USB_EVENT_ID) musb_platform_set_vbus(musb, 1); pm_runtime_mark_last_busy(musb->controller); @@ -1897,9 +1900,13 @@ static int musb_gadget_stop(struct usb_gadget *g) (void) musb_gadget_vbus_draw(&musb->g, 0); - musb->xceiv->otg->state = OTG_STATE_UNDEFINED; + musb_set_state(musb, OTG_STATE_UNDEFINED); musb_stop(musb); - otg_set_peripheral(musb->xceiv->otg, NULL); + + if (musb->xceiv) + otg_set_peripheral(musb->xceiv->otg, NULL); + else + phy_set_mode(musb->phy, PHY_MODE_INVALID); musb->is_active = 0; musb->gadget_driver = NULL; @@ -1926,7 +1933,7 @@ static int musb_gadget_stop(struct usb_gadget *g) void musb_g_resume(struct musb *musb) { musb->is_suspended = 0; - switch (musb->xceiv->otg->state) { + switch (musb_get_state(musb)) { case OTG_STATE_B_IDLE: break; case OTG_STATE_B_WAIT_ACON: @@ -1940,7 +1947,7 @@ void musb_g_resume(struct musb *musb) break; default: WARNING("unhandled RESUME transition (%s)\n", - usb_otg_state_string(musb->xceiv->otg->state)); + musb_otg_state_string(musb)); } } @@ -1952,10 +1959,10 @@ void musb_g_suspend(struct musb *musb) devctl = musb_readb(musb->mregs, MUSB_DEVCTL); musb_dbg(musb, "musb_g_suspend: devctl %02x", devctl); - switch (musb->xceiv->otg->state) { + switch (musb_get_state(musb)) { case OTG_STATE_B_IDLE: if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS) - musb->xceiv->otg->state = OTG_STATE_B_PERIPHERAL; + musb_set_state(musb, OTG_STATE_B_PERIPHERAL); break; case OTG_STATE_B_PERIPHERAL: musb->is_suspended = 1; @@ -1970,7 +1977,7 @@ void musb_g_suspend(struct musb *musb) * A_PERIPHERAL may need care too */ WARNING("unhandled SUSPEND transition (%s)", - usb_otg_state_string(musb->xceiv->otg->state)); + musb_otg_state_string(musb)); } } @@ -2001,22 +2008,22 @@ void musb_g_disconnect(struct musb *musb) spin_lock(&musb->lock); } - switch (musb->xceiv->otg->state) { + switch (musb_get_state(musb)) { default: musb_dbg(musb, "Unhandled disconnect %s, setting a_idle", - usb_otg_state_string(musb->xceiv->otg->state)); - musb->xceiv->otg->state = OTG_STATE_A_IDLE; + musb_otg_state_string(musb)); + musb_set_state(musb, OTG_STATE_A_IDLE); MUSB_HST_MODE(musb); break; case OTG_STATE_A_PERIPHERAL: - musb->xceiv->otg->state = OTG_STATE_A_WAIT_BCON; + musb_set_state(musb, OTG_STATE_A_WAIT_BCON); MUSB_HST_MODE(musb); break; case OTG_STATE_B_WAIT_ACON: case OTG_STATE_B_HOST: case OTG_STATE_B_PERIPHERAL: case OTG_STATE_B_IDLE: - musb->xceiv->otg->state = OTG_STATE_B_IDLE; + musb_set_state(musb, OTG_STATE_B_IDLE); break; case OTG_STATE_B_SRP_INIT: break; @@ -2080,13 +2087,13 @@ __acquires(musb->lock) * In that case, do not rely on devctl for setting * peripheral mode. */ - musb->xceiv->otg->state = OTG_STATE_B_PERIPHERAL; + musb_set_state(musb, OTG_STATE_B_PERIPHERAL); musb->g.is_a_peripheral = 0; } else if (devctl & MUSB_DEVCTL_BDEVICE) { - musb->xceiv->otg->state = OTG_STATE_B_PERIPHERAL; + musb_set_state(musb, OTG_STATE_B_PERIPHERAL); musb->g.is_a_peripheral = 0; } else { - musb->xceiv->otg->state = OTG_STATE_A_PERIPHERAL; + musb_set_state(musb, OTG_STATE_A_PERIPHERAL); musb->g.is_a_peripheral = 1; } diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c index 9ff7d891b4b7659e5bf44c6758158dfa3c9316d6..a02c29216955a551cb5772dc70913f97e13eddb3 100644 --- a/drivers/usb/musb/musb_host.c +++ b/drivers/usb/musb/musb_host.c @@ -2501,7 +2501,7 @@ static int musb_bus_suspend(struct usb_hcd *hcd) if (!is_host_active(musb)) return 0; - switch (musb->xceiv->otg->state) { + switch (musb_get_state(musb)) { case OTG_STATE_A_SUSPEND: return 0; case OTG_STATE_A_WAIT_VRISE: @@ -2511,7 +2511,7 @@ static int musb_bus_suspend(struct usb_hcd *hcd) */ devctl = musb_readb(musb->mregs, MUSB_DEVCTL); if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS) - musb->xceiv->otg->state = OTG_STATE_A_WAIT_BCON; + musb_set_state(musb, OTG_STATE_A_WAIT_BCON); break; default: break; @@ -2519,7 +2519,7 @@ static int musb_bus_suspend(struct usb_hcd *hcd) if (musb->is_active) { WARNING("trying to suspend as %s while active\n", - usb_otg_state_string(musb->xceiv->otg->state)); + musb_otg_state_string(musb)); return -EBUSY; } else return 0; @@ -2720,12 +2720,18 @@ int musb_host_setup(struct musb *musb, int power_budget) if (musb->port_mode == MUSB_HOST) { MUSB_HST_MODE(musb); - musb->xceiv->otg->state = OTG_STATE_A_IDLE; + musb_set_state(musb, OTG_STATE_A_IDLE); } - otg_set_host(musb->xceiv->otg, &hcd->self); + + if (musb->xceiv) { + otg_set_host(musb->xceiv->otg, &hcd->self); + musb->xceiv->otg->host = &hcd->self; + } else { + phy_set_mode(musb->phy, PHY_MODE_USB_HOST); + } + /* don't support otg protocols */ hcd->self.otg_port = 0; - musb->xceiv->otg->host = &hcd->self; hcd->power_budget = 2 * (power_budget ? : 250); hcd->skip_phy_initialization = 1; diff --git a/drivers/usb/musb/musb_virthub.c b/drivers/usb/musb/musb_virthub.c index cafc69536e1d787679a14d7c3d74b97da9e34b08..2b2164e028b3187e14dab8192acba89a8dd24732 100644 --- a/drivers/usb/musb/musb_virthub.c +++ b/drivers/usb/musb/musb_virthub.c @@ -43,14 +43,13 @@ void musb_host_finish_resume(struct work_struct *work) musb->port1_status |= USB_PORT_STAT_C_SUSPEND << 16; usb_hcd_poll_rh_status(musb->hcd); /* NOTE: it might really be A_WAIT_BCON ... */ - musb->xceiv->otg->state = OTG_STATE_A_HOST; + musb_set_state(musb, OTG_STATE_A_HOST); spin_unlock_irqrestore(&musb->lock, flags); } int musb_port_suspend(struct musb *musb, bool do_suspend) { - struct usb_otg *otg = musb->xceiv->otg; u8 power; void __iomem *mbase = musb->mregs; @@ -85,10 +84,11 @@ int musb_port_suspend(struct musb *musb, bool do_suspend) musb_dbg(musb, "Root port suspended, power %02x", power); musb->port1_status |= USB_PORT_STAT_SUSPEND; - switch (musb->xceiv->otg->state) { + switch (musb_get_state(musb)) { case OTG_STATE_A_HOST: - musb->xceiv->otg->state = OTG_STATE_A_SUSPEND; - musb->is_active = otg->host->b_hnp_enable; + musb_set_state(musb, OTG_STATE_A_SUSPEND); + musb->is_active = musb->xceiv && + musb->xceiv->otg->host->b_hnp_enable; if (musb->is_active) mod_timer(&musb->otg_timer, jiffies + msecs_to_jiffies( @@ -96,13 +96,14 @@ int musb_port_suspend(struct musb *musb, bool do_suspend) musb_platform_try_idle(musb, 0); break; case OTG_STATE_B_HOST: - musb->xceiv->otg->state = OTG_STATE_B_WAIT_ACON; - musb->is_active = otg->host->b_hnp_enable; + musb_set_state(musb, OTG_STATE_B_WAIT_ACON); + musb->is_active = musb->xceiv && + musb->xceiv->otg->host->b_hnp_enable; musb_platform_try_idle(musb, 0); break; default: musb_dbg(musb, "bogus rh suspend? %s", - usb_otg_state_string(musb->xceiv->otg->state)); + musb_otg_state_string(musb)); } } else if (power & MUSB_POWER_SUSPENDM) { power &= ~MUSB_POWER_SUSPENDM; @@ -123,7 +124,7 @@ void musb_port_reset(struct musb *musb, bool do_reset) u8 power; void __iomem *mbase = musb->mregs; - if (musb->xceiv->otg->state == OTG_STATE_B_IDLE) { + if (musb_get_state(musb) == OTG_STATE_B_IDLE) { musb_dbg(musb, "HNP: Returning from HNP; no hub reset from b_idle"); musb->port1_status &= ~USB_PORT_STAT_RESET; return; @@ -196,32 +197,30 @@ void musb_port_reset(struct musb *musb, bool do_reset) void musb_root_disconnect(struct musb *musb) { - struct usb_otg *otg = musb->xceiv->otg; - musb->port1_status = USB_PORT_STAT_POWER | (USB_PORT_STAT_C_CONNECTION << 16); usb_hcd_poll_rh_status(musb->hcd); musb->is_active = 0; - switch (musb->xceiv->otg->state) { + switch (musb_get_state(musb)) { case OTG_STATE_A_SUSPEND: - if (otg->host->b_hnp_enable) { - musb->xceiv->otg->state = OTG_STATE_A_PERIPHERAL; + if (musb->xceiv && musb->xceiv->otg->host->b_hnp_enable) { + musb_set_state(musb, OTG_STATE_A_PERIPHERAL); musb->g.is_a_peripheral = 1; break; } fallthrough; case OTG_STATE_A_HOST: - musb->xceiv->otg->state = OTG_STATE_A_WAIT_BCON; + musb_set_state(musb, OTG_STATE_A_WAIT_BCON); musb->is_active = 0; break; case OTG_STATE_A_WAIT_VFALL: - musb->xceiv->otg->state = OTG_STATE_B_IDLE; + musb_set_state(musb, OTG_STATE_B_IDLE); break; default: musb_dbg(musb, "host disconnect (%s)", - usb_otg_state_string(musb->xceiv->otg->state)); + musb_otg_state_string(musb)); } } EXPORT_SYMBOL_GPL(musb_root_disconnect); diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c index f571a65ae6ee231a0385a8d8efeff18bf56ba9a0..476f55d1fec3084a33ecd586855d626e01ad4caf 100644 --- a/drivers/usb/musb/omap2430.c +++ b/drivers/usb/musb/omap2430.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include #include @@ -310,6 +311,7 @@ static int omap2430_probe(struct platform_device *pdev) struct device_node *control_node; struct platform_device *control_pdev; int ret = -ENOMEM, val; + bool populate_irqs = false; if (!np) return -ENODEV; @@ -328,6 +330,18 @@ static int omap2430_probe(struct platform_device *pdev) musb->dev.dma_mask = &omap2430_dmamask; musb->dev.coherent_dma_mask = omap2430_dmamask; + /* + * Legacy SoCs using omap_device get confused if node is moved + * because of interconnect properties mixed into the node. + */ + if (of_get_property(np, "ti,hwmods", NULL)) { + dev_warn(&pdev->dev, "please update to probe with ti-sysc\n"); + populate_irqs = true; + } else { + device_set_of_node_from_dev(&musb->dev, &pdev->dev); + } + of_node_put(np); + glue->dev = &pdev->dev; glue->musb = musb; glue->status = MUSB_UNKNOWN; @@ -389,6 +403,46 @@ static int omap2430_probe(struct platform_device *pdev) goto err2; } + if (populate_irqs) { + struct resource musb_res[3]; + struct resource *res; + int i = 0; + + memset(musb_res, 0, sizeof(*musb_res) * ARRAY_SIZE(musb_res)); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) + goto err2; + + musb_res[i].start = res->start; + musb_res[i].end = res->end; + musb_res[i].flags = res->flags; + musb_res[i].name = res->name; + i++; + + ret = of_irq_get_byname(np, "mc"); + if (ret > 0) { + musb_res[i].start = ret; + musb_res[i].flags = IORESOURCE_IRQ; + musb_res[i].name = "mc"; + i++; + } + + ret = of_irq_get_byname(np, "dma"); + if (ret > 0) { + musb_res[i].start = ret; + musb_res[i].flags = IORESOURCE_IRQ; + musb_res[i].name = "dma"; + i++; + } + + ret = platform_device_add_resources(musb, musb_res, i); + if (ret) { + dev_err(&pdev->dev, "failed to add IRQ resources\n"); + goto err2; + } + } + ret = platform_device_add_data(musb, pdata, sizeof(*pdata)); if (ret) { dev_err(&pdev->dev, "failed to add platform_data\n"); diff --git a/drivers/usb/phy/Kconfig b/drivers/usb/phy/Kconfig index 2acbe41fbf7e0edfef40ad54a79dabc26bb14f63..915df5726a5ccfc88599bf3215978df642091eea 100644 --- a/drivers/usb/phy/Kconfig +++ b/drivers/usb/phy/Kconfig @@ -93,12 +93,16 @@ config USB_GPIO_VBUS tristate "GPIO based peripheral-only VBUS sensing 'transceiver'" depends on GPIOLIB || COMPILE_TEST depends on USB_GADGET || !USB_GADGET # if USB_GADGET=m, this can't be 'y' + depends on !USB_CONN_GPIO select USB_PHY help Provides simple GPIO VBUS sensing for controllers with an internal transceiver via the usb_phy interface, and optionally control of a D+ pullup GPIO as well as a VBUS - current limit regulator. + current limit regulator. This driver is for devices that do + NOT support role switch. OTG devices that can do role switch + (master/peripheral) shall use the USB based connection + detection driver USB_CONN_GPIO. config OMAP_OTG tristate "OMAP USB OTG controller driver" @@ -185,12 +189,4 @@ config USB_ULPI_VIEWPORT Provides read/write operations to the ULPI phy register set for controllers with a viewport register (e.g. Chipidea/ARC controllers). -config JZ4770_PHY - tristate "Ingenic SoCs Transceiver Driver" - depends on MIPS || COMPILE_TEST - select USB_PHY - help - This driver provides PHY support for the USB controller found - on the JZ-series and X-series SoCs from Ingenic. - endmenu diff --git a/drivers/usb/phy/Makefile b/drivers/usb/phy/Makefile index b352bdbe871214bfb3da11a85f73daae7a9bd736..df1d99010079fa69dbcdbf5d64c99c8dd7ac8195 100644 --- a/drivers/usb/phy/Makefile +++ b/drivers/usb/phy/Makefile @@ -24,4 +24,3 @@ obj-$(CONFIG_USB_MXS_PHY) += phy-mxs-usb.o obj-$(CONFIG_USB_ULPI) += phy-ulpi.o obj-$(CONFIG_USB_ULPI_VIEWPORT) += phy-ulpi-viewport.o obj-$(CONFIG_KEYSTONE_USB_PHY) += phy-keystone.o -obj-$(CONFIG_JZ4770_PHY) += phy-jz4770.o diff --git a/drivers/usb/phy/phy-generic.c b/drivers/usb/phy/phy-generic.c index 3dc5c04e7cbf91bedf49d6387d609c6239d76ee0..c1309ea24a52e11be6e9ced6d9d187822d43f16c 100644 --- a/drivers/usb/phy/phy-generic.c +++ b/drivers/usb/phy/phy-generic.c @@ -209,7 +209,7 @@ int usb_phy_gen_create_phy(struct device *dev, struct usb_phy_generic *nop) int err = 0; u32 clk_rate = 0; - bool needs_vcc = false, needs_clk = false; + bool needs_clk = false; if (dev->of_node) { struct device_node *node = dev->of_node; @@ -217,7 +217,6 @@ int usb_phy_gen_create_phy(struct device *dev, struct usb_phy_generic *nop) if (of_property_read_u32(node, "clock-frequency", &clk_rate)) clk_rate = 0; - needs_vcc = of_property_read_bool(node, "vcc-supply"); needs_clk = of_property_read_bool(node, "clocks"); } nop->gpiod_reset = devm_gpiod_get_optional(dev, "reset", @@ -257,13 +256,10 @@ int usb_phy_gen_create_phy(struct device *dev, struct usb_phy_generic *nop) } } - nop->vcc = devm_regulator_get(dev, "vcc"); - if (IS_ERR(nop->vcc)) { - dev_dbg(dev, "Error getting vcc regulator: %ld\n", - PTR_ERR(nop->vcc)); - if (needs_vcc) - return -EPROBE_DEFER; - } + nop->vcc = devm_regulator_get_optional(dev, "vcc"); + if (IS_ERR(nop->vcc) && PTR_ERR(nop->vcc) != -ENODEV) + return dev_err_probe(dev, PTR_ERR(nop->vcc), + "could not get vcc regulator\n"); nop->vbus_draw = devm_regulator_get_exclusive(dev, "vbus"); if (PTR_ERR(nop->vbus_draw) == -ENODEV) @@ -290,6 +286,7 @@ EXPORT_SYMBOL_GPL(usb_phy_gen_create_phy); static int usb_phy_generic_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; + struct device_node *dn = dev->of_node; struct usb_phy_generic *nop; int err; @@ -327,6 +324,9 @@ static int usb_phy_generic_probe(struct platform_device *pdev) platform_set_drvdata(pdev, nop); + device_set_wakeup_capable(&pdev->dev, + of_property_read_bool(dn, "wakeup-source")); + return 0; } diff --git a/drivers/usb/phy/phy-gpio-vbus-usb.c b/drivers/usb/phy/phy-gpio-vbus-usb.c index f13f5530746ccac46dc2637df4b96c37b1c18829..12dfeff7de3d0b666e632ce3171f315bdcdfcb77 100644 --- a/drivers/usb/phy/phy-gpio-vbus-usb.c +++ b/drivers/usb/phy/phy-gpio-vbus-usb.c @@ -366,12 +366,24 @@ static const struct dev_pm_ops gpio_vbus_dev_pm_ops = { MODULE_ALIAS("platform:gpio-vbus"); +/* + * NOTE: this driver matches against "gpio-usb-b-connector" for + * devices that do NOT support role switch. + */ +static const struct of_device_id gpio_vbus_of_match[] = { + { + .compatible = "gpio-usb-b-connector", + }, + {}, +}; + static struct platform_driver gpio_vbus_driver = { .driver = { .name = "gpio-vbus", #ifdef CONFIG_PM .pm = &gpio_vbus_dev_pm_ops, #endif + .of_match_table = gpio_vbus_of_match, }, .probe = gpio_vbus_probe, .remove = gpio_vbus_remove, diff --git a/drivers/usb/phy/phy-isp1301-omap.c b/drivers/usb/phy/phy-isp1301-omap.c index e5d3f206097c7034ac2d32db5f4dab35f70b4796..931610b76f3d88e1ac09760d7093895c0e3f9ce1 100644 --- a/drivers/usb/phy/phy-isp1301-omap.c +++ b/drivers/usb/phy/phy-isp1301-omap.c @@ -1471,7 +1471,7 @@ isp1301_start_hnp(struct usb_otg *otg) /*-------------------------------------------------------------------------*/ static int -isp1301_probe(struct i2c_client *i2c, const struct i2c_device_id *id) +isp1301_probe(struct i2c_client *i2c) { int status; struct isp1301 *isp; @@ -1616,7 +1616,7 @@ static struct i2c_driver isp1301_driver = { .driver = { .name = "isp1301_omap", }, - .probe = isp1301_probe, + .probe_new = isp1301_probe, .remove = isp1301_remove, .id_table = isp1301_id, }; diff --git a/drivers/usb/phy/phy-isp1301.c b/drivers/usb/phy/phy-isp1301.c index c2777a5c1f4ed2516ffdf5018f1d85b5495ed3c0..f4ee14d985853c0310a69704579d565ba7a40d4f 100644 --- a/drivers/usb/phy/phy-isp1301.c +++ b/drivers/usb/phy/phy-isp1301.c @@ -92,8 +92,7 @@ static int isp1301_phy_set_vbus(struct usb_phy *phy, int on) return 0; } -static int isp1301_probe(struct i2c_client *client, - const struct i2c_device_id *i2c_id) +static int isp1301_probe(struct i2c_client *client) { struct isp1301 *isp; struct usb_phy *phy; @@ -133,7 +132,7 @@ static struct i2c_driver isp1301_driver = { .name = DRV_NAME, .of_match_table = isp1301_of_match, }, - .probe = isp1301_probe, + .probe_new = isp1301_probe, .remove = isp1301_remove, .id_table = isp1301_id, }; diff --git a/drivers/usb/phy/phy-jz4770.c b/drivers/usb/phy/phy-jz4770.c deleted file mode 100644 index f16adcacdce3f7223bfc9b6b36af605bf1c862f0..0000000000000000000000000000000000000000 --- a/drivers/usb/phy/phy-jz4770.c +++ /dev/null @@ -1,353 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Ingenic SoCs USB PHY driver - * Copyright (c) Paul Cercueil - * Copyright (c) 漆鹏振 (Qi Pengzhen) - * Copyright (c) 周琰杰 (Zhou Yanjie) - */ - -#include -#include -#include -#include -#include -#include -#include - -/* OTGPHY register offsets */ -#define REG_USBPCR_OFFSET 0x00 -#define REG_USBRDT_OFFSET 0x04 -#define REG_USBVBFIL_OFFSET 0x08 -#define REG_USBPCR1_OFFSET 0x0c - -/* bits within the USBPCR register */ -#define USBPCR_USB_MODE BIT(31) -#define USBPCR_AVLD_REG BIT(30) -#define USBPCR_COMMONONN BIT(25) -#define USBPCR_VBUSVLDEXT BIT(24) -#define USBPCR_VBUSVLDEXTSEL BIT(23) -#define USBPCR_POR BIT(22) -#define USBPCR_SIDDQ BIT(21) -#define USBPCR_OTG_DISABLE BIT(20) -#define USBPCR_TXPREEMPHTUNE BIT(6) - -#define USBPCR_IDPULLUP_LSB 28 -#define USBPCR_IDPULLUP_MASK GENMASK(29, USBPCR_IDPULLUP_LSB) -#define USBPCR_IDPULLUP_ALWAYS (0x2 << USBPCR_IDPULLUP_LSB) -#define USBPCR_IDPULLUP_SUSPEND (0x1 << USBPCR_IDPULLUP_LSB) -#define USBPCR_IDPULLUP_OTG (0x0 << USBPCR_IDPULLUP_LSB) - -#define USBPCR_COMPDISTUNE_LSB 17 -#define USBPCR_COMPDISTUNE_MASK GENMASK(19, USBPCR_COMPDISTUNE_LSB) -#define USBPCR_COMPDISTUNE_DFT (0x4 << USBPCR_COMPDISTUNE_LSB) - -#define USBPCR_OTGTUNE_LSB 14 -#define USBPCR_OTGTUNE_MASK GENMASK(16, USBPCR_OTGTUNE_LSB) -#define USBPCR_OTGTUNE_DFT (0x4 << USBPCR_OTGTUNE_LSB) - -#define USBPCR_SQRXTUNE_LSB 11 -#define USBPCR_SQRXTUNE_MASK GENMASK(13, USBPCR_SQRXTUNE_LSB) -#define USBPCR_SQRXTUNE_DCR_20PCT (0x7 << USBPCR_SQRXTUNE_LSB) -#define USBPCR_SQRXTUNE_DFT (0x3 << USBPCR_SQRXTUNE_LSB) - -#define USBPCR_TXFSLSTUNE_LSB 7 -#define USBPCR_TXFSLSTUNE_MASK GENMASK(10, USBPCR_TXFSLSTUNE_LSB) -#define USBPCR_TXFSLSTUNE_DCR_50PPT (0xf << USBPCR_TXFSLSTUNE_LSB) -#define USBPCR_TXFSLSTUNE_DCR_25PPT (0x7 << USBPCR_TXFSLSTUNE_LSB) -#define USBPCR_TXFSLSTUNE_DFT (0x3 << USBPCR_TXFSLSTUNE_LSB) -#define USBPCR_TXFSLSTUNE_INC_25PPT (0x1 << USBPCR_TXFSLSTUNE_LSB) -#define USBPCR_TXFSLSTUNE_INC_50PPT (0x0 << USBPCR_TXFSLSTUNE_LSB) - -#define USBPCR_TXHSXVTUNE_LSB 4 -#define USBPCR_TXHSXVTUNE_MASK GENMASK(5, USBPCR_TXHSXVTUNE_LSB) -#define USBPCR_TXHSXVTUNE_DFT (0x3 << USBPCR_TXHSXVTUNE_LSB) -#define USBPCR_TXHSXVTUNE_DCR_15MV (0x1 << USBPCR_TXHSXVTUNE_LSB) - -#define USBPCR_TXRISETUNE_LSB 4 -#define USBPCR_TXRISETUNE_MASK GENMASK(5, USBPCR_TXRISETUNE_LSB) -#define USBPCR_TXRISETUNE_DFT (0x3 << USBPCR_TXRISETUNE_LSB) - -#define USBPCR_TXVREFTUNE_LSB 0 -#define USBPCR_TXVREFTUNE_MASK GENMASK(3, USBPCR_TXVREFTUNE_LSB) -#define USBPCR_TXVREFTUNE_INC_25PPT (0x7 << USBPCR_TXVREFTUNE_LSB) -#define USBPCR_TXVREFTUNE_DFT (0x5 << USBPCR_TXVREFTUNE_LSB) - -/* bits within the USBRDTR register */ -#define USBRDT_UTMI_RST BIT(27) -#define USBRDT_HB_MASK BIT(26) -#define USBRDT_VBFIL_LD_EN BIT(25) -#define USBRDT_IDDIG_EN BIT(24) -#define USBRDT_IDDIG_REG BIT(23) -#define USBRDT_VBFIL_EN BIT(2) - -/* bits within the USBPCR1 register */ -#define USBPCR1_BVLD_REG BIT(31) -#define USBPCR1_DPPD BIT(29) -#define USBPCR1_DMPD BIT(28) -#define USBPCR1_USB_SEL BIT(28) -#define USBPCR1_WORD_IF_16BIT BIT(19) - -enum ingenic_usb_phy_version { - ID_JZ4770, - ID_JZ4780, - ID_X1000, - ID_X1830, -}; - -struct ingenic_soc_info { - enum ingenic_usb_phy_version version; - - void (*usb_phy_init)(struct usb_phy *phy); -}; - -struct jz4770_phy { - const struct ingenic_soc_info *soc_info; - - struct usb_phy phy; - struct usb_otg otg; - struct device *dev; - void __iomem *base; - struct clk *clk; - struct regulator *vcc_supply; -}; - -static inline struct jz4770_phy *otg_to_jz4770_phy(struct usb_otg *otg) -{ - return container_of(otg, struct jz4770_phy, otg); -} - -static inline struct jz4770_phy *phy_to_jz4770_phy(struct usb_phy *phy) -{ - return container_of(phy, struct jz4770_phy, phy); -} - -static int ingenic_usb_phy_set_peripheral(struct usb_otg *otg, - struct usb_gadget *gadget) -{ - struct jz4770_phy *priv = otg_to_jz4770_phy(otg); - u32 reg; - - if (priv->soc_info->version >= ID_X1000) { - reg = readl(priv->base + REG_USBPCR1_OFFSET); - reg |= USBPCR1_BVLD_REG; - writel(reg, priv->base + REG_USBPCR1_OFFSET); - } - - reg = readl(priv->base + REG_USBPCR_OFFSET); - reg &= ~USBPCR_USB_MODE; - reg |= USBPCR_VBUSVLDEXT | USBPCR_VBUSVLDEXTSEL | USBPCR_OTG_DISABLE; - writel(reg, priv->base + REG_USBPCR_OFFSET); - - return 0; -} - -static int ingenic_usb_phy_set_host(struct usb_otg *otg, struct usb_bus *host) -{ - struct jz4770_phy *priv = otg_to_jz4770_phy(otg); - u32 reg; - - reg = readl(priv->base + REG_USBPCR_OFFSET); - reg &= ~(USBPCR_VBUSVLDEXT | USBPCR_VBUSVLDEXTSEL | USBPCR_OTG_DISABLE); - reg |= USBPCR_USB_MODE; - writel(reg, priv->base + REG_USBPCR_OFFSET); - - return 0; -} - -static int ingenic_usb_phy_init(struct usb_phy *phy) -{ - struct jz4770_phy *priv = phy_to_jz4770_phy(phy); - int err; - u32 reg; - - err = regulator_enable(priv->vcc_supply); - if (err) { - dev_err(priv->dev, "Unable to enable VCC: %d\n", err); - return err; - } - - err = clk_prepare_enable(priv->clk); - if (err) { - dev_err(priv->dev, "Unable to start clock: %d\n", err); - return err; - } - - priv->soc_info->usb_phy_init(phy); - - /* Wait for PHY to reset */ - usleep_range(30, 300); - reg = readl(priv->base + REG_USBPCR_OFFSET); - writel(reg & ~USBPCR_POR, priv->base + REG_USBPCR_OFFSET); - usleep_range(300, 1000); - - return 0; -} - -static void ingenic_usb_phy_shutdown(struct usb_phy *phy) -{ - struct jz4770_phy *priv = phy_to_jz4770_phy(phy); - - clk_disable_unprepare(priv->clk); - regulator_disable(priv->vcc_supply); -} - -static void ingenic_usb_phy_remove(void *phy) -{ - usb_remove_phy(phy); -} - -static void jz4770_usb_phy_init(struct usb_phy *phy) -{ - struct jz4770_phy *priv = phy_to_jz4770_phy(phy); - u32 reg; - - reg = USBPCR_AVLD_REG | USBPCR_COMMONONN | USBPCR_IDPULLUP_ALWAYS | - USBPCR_COMPDISTUNE_DFT | USBPCR_OTGTUNE_DFT | USBPCR_SQRXTUNE_DFT | - USBPCR_TXFSLSTUNE_DFT | USBPCR_TXRISETUNE_DFT | USBPCR_TXVREFTUNE_DFT | - USBPCR_POR; - writel(reg, priv->base + REG_USBPCR_OFFSET); -} - -static void jz4780_usb_phy_init(struct usb_phy *phy) -{ - struct jz4770_phy *priv = phy_to_jz4770_phy(phy); - u32 reg; - - reg = readl(priv->base + REG_USBPCR1_OFFSET) | USBPCR1_USB_SEL | - USBPCR1_WORD_IF_16BIT; - writel(reg, priv->base + REG_USBPCR1_OFFSET); - - reg = USBPCR_TXPREEMPHTUNE | USBPCR_COMMONONN | USBPCR_POR; - writel(reg, priv->base + REG_USBPCR_OFFSET); -} - -static void x1000_usb_phy_init(struct usb_phy *phy) -{ - struct jz4770_phy *priv = phy_to_jz4770_phy(phy); - u32 reg; - - reg = readl(priv->base + REG_USBPCR1_OFFSET) | USBPCR1_WORD_IF_16BIT; - writel(reg, priv->base + REG_USBPCR1_OFFSET); - - reg = USBPCR_SQRXTUNE_DCR_20PCT | USBPCR_TXPREEMPHTUNE | - USBPCR_TXHSXVTUNE_DCR_15MV | USBPCR_TXVREFTUNE_INC_25PPT | - USBPCR_COMMONONN | USBPCR_POR; - writel(reg, priv->base + REG_USBPCR_OFFSET); -} - -static void x1830_usb_phy_init(struct usb_phy *phy) -{ - struct jz4770_phy *priv = phy_to_jz4770_phy(phy); - u32 reg; - - /* rdt */ - writel(USBRDT_VBFIL_EN | USBRDT_UTMI_RST, priv->base + REG_USBRDT_OFFSET); - - reg = readl(priv->base + REG_USBPCR1_OFFSET) | USBPCR1_WORD_IF_16BIT | - USBPCR1_DMPD | USBPCR1_DPPD; - writel(reg, priv->base + REG_USBPCR1_OFFSET); - - reg = USBPCR_IDPULLUP_OTG | USBPCR_VBUSVLDEXT | USBPCR_TXPREEMPHTUNE | - USBPCR_COMMONONN | USBPCR_POR; - writel(reg, priv->base + REG_USBPCR_OFFSET); -} - -static const struct ingenic_soc_info jz4770_soc_info = { - .version = ID_JZ4770, - - .usb_phy_init = jz4770_usb_phy_init, -}; - -static const struct ingenic_soc_info jz4780_soc_info = { - .version = ID_JZ4780, - - .usb_phy_init = jz4780_usb_phy_init, -}; - -static const struct ingenic_soc_info x1000_soc_info = { - .version = ID_X1000, - - .usb_phy_init = x1000_usb_phy_init, -}; - -static const struct ingenic_soc_info x1830_soc_info = { - .version = ID_X1830, - - .usb_phy_init = x1830_usb_phy_init, -}; - -static const struct of_device_id ingenic_usb_phy_of_matches[] = { - { .compatible = "ingenic,jz4770-phy", .data = &jz4770_soc_info }, - { .compatible = "ingenic,jz4780-phy", .data = &jz4780_soc_info }, - { .compatible = "ingenic,x1000-phy", .data = &x1000_soc_info }, - { .compatible = "ingenic,x1830-phy", .data = &x1830_soc_info }, - { /* sentinel */ } -}; -MODULE_DEVICE_TABLE(of, ingenic_usb_phy_of_matches); - -static int jz4770_phy_probe(struct platform_device *pdev) -{ - struct device *dev = &pdev->dev; - struct jz4770_phy *priv; - int err; - - priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); - if (!priv) - return -ENOMEM; - - priv->soc_info = device_get_match_data(&pdev->dev); - if (!priv->soc_info) { - dev_err(&pdev->dev, "Error: No device match found\n"); - return -ENODEV; - } - - platform_set_drvdata(pdev, priv); - priv->dev = dev; - priv->phy.dev = dev; - priv->phy.otg = &priv->otg; - priv->phy.label = "ingenic-usb-phy"; - priv->phy.init = ingenic_usb_phy_init; - priv->phy.shutdown = ingenic_usb_phy_shutdown; - - priv->otg.state = OTG_STATE_UNDEFINED; - priv->otg.usb_phy = &priv->phy; - priv->otg.set_host = ingenic_usb_phy_set_host; - priv->otg.set_peripheral = ingenic_usb_phy_set_peripheral; - - priv->base = devm_platform_ioremap_resource(pdev, 0); - if (IS_ERR(priv->base)) { - dev_err(dev, "Failed to map registers\n"); - return PTR_ERR(priv->base); - } - - priv->clk = devm_clk_get(dev, NULL); - if (IS_ERR(priv->clk)) - return dev_err_probe(dev, PTR_ERR(priv->clk), - "Failed to get clock\n"); - - priv->vcc_supply = devm_regulator_get(dev, "vcc"); - if (IS_ERR(priv->vcc_supply)) - return dev_err_probe(dev, PTR_ERR(priv->vcc_supply), - "Failed to get regulator\n"); - - err = usb_add_phy(&priv->phy, USB_PHY_TYPE_USB2); - if (err) - return dev_err_probe(dev, err, "Unable to register PHY\n"); - - return devm_add_action_or_reset(dev, ingenic_usb_phy_remove, &priv->phy); -} - -static struct platform_driver ingenic_phy_driver = { - .probe = jz4770_phy_probe, - .driver = { - .name = "jz4770-phy", - .of_match_table = ingenic_usb_phy_of_matches, - }, -}; -module_platform_driver(ingenic_phy_driver); - -MODULE_AUTHOR("周琰杰 (Zhou Yanjie) "); -MODULE_AUTHOR("漆鹏振 (Qi Pengzhen) "); -MODULE_AUTHOR("Paul Cercueil "); -MODULE_DESCRIPTION("Ingenic SoCs USB PHY driver"); -MODULE_LICENSE("GPL"); diff --git a/drivers/usb/roles/class.c b/drivers/usb/roles/class.c index dfaed7eee94fcadda9ecdf95012b1945ec366c22..eacb46ec2ab30379e96b68672fef92553facf76c 100644 --- a/drivers/usb/roles/class.c +++ b/drivers/usb/roles/class.c @@ -87,7 +87,7 @@ enum usb_role usb_role_switch_get_role(struct usb_role_switch *sw) } EXPORT_SYMBOL_GPL(usb_role_switch_get_role); -static void *usb_role_switch_match(struct fwnode_handle *fwnode, const char *id, +static void *usb_role_switch_match(const struct fwnode_handle *fwnode, const char *id, void *data) { struct device *dev; @@ -106,10 +106,13 @@ usb_role_switch_is_parent(struct fwnode_handle *fwnode) struct fwnode_handle *parent = fwnode_get_parent(fwnode); struct device *dev; - if (!parent || !fwnode_property_present(parent, "usb-role-switch")) + if (!fwnode_property_present(parent, "usb-role-switch")) { + fwnode_handle_put(parent); return NULL; + } dev = class_find_device_by_fwnode(role_class, parent); + fwnode_handle_put(parent); return dev ? to_role_switch(dev) : ERR_PTR(-EPROBE_DEFER); } diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c index 3bcec419f463200c5174da993f2050d94865f6b6..67372acc23529da1cb5c4e0b2f1e07ca15d7dee6 100644 --- a/drivers/usb/serial/cp210x.c +++ b/drivers/usb/serial/cp210x.c @@ -195,6 +195,8 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(0x16DC, 0x0015) }, /* W-IE-NE-R Plein & Baus GmbH CML Control, Monitoring and Data Logger */ { USB_DEVICE(0x17A8, 0x0001) }, /* Kamstrup Optical Eye/3-wire */ { USB_DEVICE(0x17A8, 0x0005) }, /* Kamstrup M-Bus Master MultiPort 250D */ + { USB_DEVICE(0x17A8, 0x0011) }, /* Kamstrup 444 MHz RF sniffer */ + { USB_DEVICE(0x17A8, 0x0013) }, /* Kamstrup 870 MHz RF sniffer */ { USB_DEVICE(0x17A8, 0x0101) }, /* Kamstrup 868 MHz wM-Bus C-Mode Meter Reader (Int Ant) */ { USB_DEVICE(0x17A8, 0x0102) }, /* Kamstrup 868 MHz wM-Bus C-Mode Meter Reader (Ext Ant) */ { USB_DEVICE(0x17F4, 0xAAAA) }, /* Wavesense Jazz blood glucose meter */ @@ -1047,11 +1049,12 @@ static void cp210x_change_speed(struct tty_struct *tty, struct cp210x_serial_private *priv = usb_get_serial_data(serial); u32 baud; + if (tty->termios.c_ospeed == 0) + return; + /* * This maps the requested rate to the actual rate, a valid rate on * cp2102 or cp2103, or to an arbitrary rate in [1M, max_speed]. - * - * NOTE: B0 is not implemented. */ baud = clamp(tty->termios.c_ospeed, priv->min_speed, priv->max_speed); @@ -1144,7 +1147,8 @@ static void cp210x_set_flow_control(struct tty_struct *tty, tty->termios.c_iflag &= ~(IXON | IXOFF); } - if (old_termios && + if (tty->termios.c_ospeed != 0 && + old_termios && old_termios->c_ospeed != 0 && C_CRTSCTS(tty) == (old_termios->c_cflag & CRTSCTS) && I_IXON(tty) == (old_termios->c_iflag & IXON) && I_IXOFF(tty) == (old_termios->c_iflag & IXOFF) && @@ -1169,6 +1173,14 @@ static void cp210x_set_flow_control(struct tty_struct *tty, mutex_lock(&port_priv->mutex); + if (tty->termios.c_ospeed == 0) { + port_priv->dtr = false; + port_priv->rts = false; + } else if (old_termios && old_termios->c_ospeed == 0) { + port_priv->dtr = true; + port_priv->rts = true; + } + ret = cp210x_read_reg_block(port, CP210X_GET_FLOW, &flow_ctl, sizeof(flow_ctl)); if (ret) @@ -1241,7 +1253,8 @@ static void cp210x_set_termios(struct tty_struct *tty, u16 bits; int ret; - if (old_termios && !cp210x_termios_change(&tty->termios, old_termios)) + if (old_termios && !cp210x_termios_change(&tty->termios, old_termios) && + tty->termios.c_ospeed != 0) return; if (!old_termios || tty->termios.c_ospeed != old_termios->c_ospeed) diff --git a/drivers/usb/serial/f81232.c b/drivers/usb/serial/f81232.c index 2dd58cd9f0ccbae0cc67a2bc7a1c7b881d66fa4f..891fb1fe69df739eb20374d41681efef968f9fdf 100644 --- a/drivers/usb/serial/f81232.c +++ b/drivers/usb/serial/f81232.c @@ -130,9 +130,6 @@ static u8 const clock_table[] = { F81232_CLK_1_846_MHZ, F81232_CLK_14_77_MHZ, static int calc_baud_divisor(speed_t baudrate, speed_t clockrate) { - if (!baudrate) - return 0; - return DIV_ROUND_CLOSEST(clockrate, baudrate); } @@ -498,9 +495,14 @@ static void f81232_set_baudrate(struct tty_struct *tty, speed_t baud_list[] = { baudrate, old_baudrate, F81232_DEF_BAUDRATE }; for (i = 0; i < ARRAY_SIZE(baud_list); ++i) { - idx = f81232_find_clk(baud_list[i]); + baudrate = baud_list[i]; + if (baudrate == 0) { + tty_encode_baud_rate(tty, 0, 0); + return; + } + + idx = f81232_find_clk(baudrate); if (idx >= 0) { - baudrate = baud_list[i]; tty_encode_baud_rate(tty, baudrate, baudrate); break; } diff --git a/drivers/usb/serial/f81534.c b/drivers/usb/serial/f81534.c index ddfcd72eb0ae7ae9b1b934c8b1dcad646230dd87..4083ae961be430cbaa21823890eaabbed5d46ac9 100644 --- a/drivers/usb/serial/f81534.c +++ b/drivers/usb/serial/f81534.c @@ -536,9 +536,6 @@ static int f81534_submit_writer(struct usb_serial_port *port, gfp_t mem_flags) static u32 f81534_calc_baud_divisor(u32 baudrate, u32 clockrate) { - if (!baudrate) - return 0; - /* Round to nearest divisor */ return DIV_ROUND_CLOSEST(clockrate, baudrate); } @@ -568,9 +565,14 @@ static int f81534_set_port_config(struct usb_serial_port *port, u32 baud_list[] = {baudrate, old_baudrate, F81534_DEFAULT_BAUD_RATE}; for (i = 0; i < ARRAY_SIZE(baud_list); ++i) { - idx = f81534_find_clk(baud_list[i]); + baudrate = baud_list[i]; + if (baudrate == 0) { + tty_encode_baud_rate(tty, 0, 0); + return 0; + } + + idx = f81534_find_clk(baudrate); if (idx >= 0) { - baudrate = baud_list[i]; tty_encode_baud_rate(tty, baudrate, baudrate); break; } diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index c3b7f1d98e781fa0c39ed48d393db2467d39f704..dee79c7d82d5ce475f67477e8ed68709065ff16a 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -255,6 +255,7 @@ static void option_instat_callback(struct urb *urb); #define QUECTEL_PRODUCT_EP06 0x0306 #define QUECTEL_PRODUCT_EM05G 0x030a #define QUECTEL_PRODUCT_EM060K 0x030b +#define QUECTEL_PRODUCT_EM05G_SG 0x0311 #define QUECTEL_PRODUCT_EM12 0x0512 #define QUECTEL_PRODUCT_RM500Q 0x0800 #define QUECTEL_PRODUCT_RM520N 0x0801 @@ -1160,6 +1161,8 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0, 0) }, { USB_DEVICE_INTERFACE_CLASS(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM05G, 0xff), .driver_info = RSVD(6) | ZLP }, + { USB_DEVICE_INTERFACE_CLASS(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM05G_SG, 0xff), + .driver_info = RSVD(6) | ZLP }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K, 0xff, 0x00, 0x40) }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K, 0xff, 0xff, 0x30) }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K, 0xff, 0xff, 0x40) }, diff --git a/drivers/usb/serial/xr_serial.c b/drivers/usb/serial/xr_serial.c index f3811e060a44164ff110c7cd2601ce10ac25d795..fdb0aae546c333762d22b71cdf38b51dcd12e99f 100644 --- a/drivers/usb/serial/xr_serial.c +++ b/drivers/usb/serial/xr_serial.c @@ -749,8 +749,6 @@ static void xr_cdc_set_line_coding(struct tty_struct *tty, if (tty->termios.c_ospeed) lc->dwDTERate = cpu_to_le32(tty->termios.c_ospeed); - else if (old_termios) - lc->dwDTERate = cpu_to_le32(old_termios->c_ospeed); else lc->dwDTERate = cpu_to_le32(9600); diff --git a/drivers/usb/storage/alauda.c b/drivers/usb/storage/alauda.c index 747be69e5e6992ee624b747550184d16cb3a950f..5e912dd29b4c99dd85600cb3d4c8f187cc0146da 100644 --- a/drivers/usb/storage/alauda.c +++ b/drivers/usb/storage/alauda.c @@ -438,6 +438,8 @@ static int alauda_init_media(struct us_data *us) + MEDIA_INFO(us).blockshift + MEDIA_INFO(us).pageshift); MEDIA_INFO(us).pba_to_lba = kcalloc(num_zones, sizeof(u16*), GFP_NOIO); MEDIA_INFO(us).lba_to_pba = kcalloc(num_zones, sizeof(u16*), GFP_NOIO); + if (MEDIA_INFO(us).pba_to_lba == NULL || MEDIA_INFO(us).lba_to_pba == NULL) + return USB_STOR_TRANSPORT_ERROR; if (alauda_reset_media(us) != USB_STOR_XFER_GOOD) return USB_STOR_TRANSPORT_ERROR; diff --git a/drivers/usb/typec/anx7411.c b/drivers/usb/typec/anx7411.c index b8f3b75fd7eb910d09042fc63b1bba004f5eed0d..3d5edce270a44c533050f27fd1a82575e13d26e9 100644 --- a/drivers/usb/typec/anx7411.c +++ b/drivers/usb/typec/anx7411.c @@ -1440,8 +1440,7 @@ static int anx7411_psy_register(struct anx7411_data *ctx) return PTR_ERR_OR_ZERO(ctx->psy); } -static int anx7411_i2c_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int anx7411_i2c_probe(struct i2c_client *client) { struct anx7411_data *plat; struct device *dev = &client->dev; @@ -1585,7 +1584,7 @@ static struct i2c_driver anx7411_driver = { .of_match_table = anx_match_table, .pm = &anx7411_pm_ops, }, - .probe = anx7411_i2c_probe, + .probe_new = anx7411_i2c_probe, .remove = anx7411_i2c_remove, .id_table = anx7411_id, diff --git a/drivers/usb/typec/bus.c b/drivers/usb/typec/bus.c index 26ea2fdec17dca6518cb10ce12901dd124c97802..31c2a3130cadbbf48432b3d03b957b1be8762da4 100644 --- a/drivers/usb/typec/bus.c +++ b/drivers/usb/typec/bus.c @@ -134,7 +134,7 @@ int typec_altmode_exit(struct typec_altmode *adev) if (!adev || !adev->active) return 0; - if (!pdev->ops || !pdev->ops->enter) + if (!pdev->ops || !pdev->ops->exit) return -EOPNOTSUPP; /* Moving to USB Safe State */ diff --git a/drivers/usb/typec/class.c b/drivers/usb/typec/class.c index bd5e5dd70431357001f5e22a3e037772859c1e1e..5897905cb4f08bf095ce4389338ac454df5bf911 100644 --- a/drivers/usb/typec/class.c +++ b/drivers/usb/typec/class.c @@ -821,6 +821,25 @@ void typec_partner_set_svdm_version(struct typec_partner *partner, } EXPORT_SYMBOL_GPL(typec_partner_set_svdm_version); +/** + * typec_partner_usb_power_delivery_register - Register Type-C partner USB Power Delivery Support + * @partner: Type-C partner device. + * @desc: Description of the USB PD contract. + * + * This routine is a wrapper around usb_power_delivery_register(). It registers + * USB Power Delivery Capabilities for a Type-C partner device. Specifically, + * it sets the Type-C partner device as a parent for the resulting USB Power Delivery object. + * + * Returns handle to struct usb_power_delivery or ERR_PTR. + */ +struct usb_power_delivery * +typec_partner_usb_power_delivery_register(struct typec_partner *partner, + struct usb_power_delivery_desc *desc) +{ + return usb_power_delivery_register(&partner->dev, desc); +} +EXPORT_SYMBOL_GPL(typec_partner_usb_power_delivery_register); + /** * typec_register_partner - Register a USB Type-C Partner * @port: The USB Type-C Port the partner is connected to diff --git a/drivers/usb/typec/hd3ss3220.c b/drivers/usb/typec/hd3ss3220.c index 2a58185fb14c4f1edeef173afa76c25db038ebf0..f128664cb130ada1d4e9e5861e3480d5222da917 100644 --- a/drivers/usb/typec/hd3ss3220.c +++ b/drivers/usb/typec/hd3ss3220.c @@ -148,8 +148,7 @@ static const struct regmap_config config = { .max_register = 0x0A, }; -static int hd3ss3220_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int hd3ss3220_probe(struct i2c_client *client) { struct typec_capability typec_cap = { }; struct hd3ss3220 *hd3ss3220; @@ -264,7 +263,7 @@ static struct i2c_driver hd3ss3220_driver = { .name = "hd3ss3220", .of_match_table = of_match_ptr(dev_ids), }, - .probe = hd3ss3220_probe, + .probe_new = hd3ss3220_probe, .remove = hd3ss3220_remove, }; diff --git a/drivers/usb/typec/mux.c b/drivers/usb/typec/mux.c index 941735c731619c9841d57d32599006c2180bbe31..c7177ddd4f127c9149e290005989731eff9910db 100644 --- a/drivers/usb/typec/mux.c +++ b/drivers/usb/typec/mux.c @@ -32,8 +32,8 @@ static int switch_fwnode_match(struct device *dev, const void *fwnode) return device_match_fwnode(dev, fwnode); } -static void *typec_switch_match(struct fwnode_handle *fwnode, const char *id, - void *data) +static void *typec_switch_match(const struct fwnode_handle *fwnode, + const char *id, void *data) { struct device *dev; @@ -262,8 +262,8 @@ static int mux_fwnode_match(struct device *dev, const void *fwnode) return device_match_fwnode(dev, fwnode); } -static void *typec_mux_match(struct fwnode_handle *fwnode, const char *id, - void *data) +static void *typec_mux_match(const struct fwnode_handle *fwnode, + const char *id, void *data) { const struct typec_altmode_desc *desc = data; struct device *dev; diff --git a/drivers/usb/typec/retimer.c b/drivers/usb/typec/retimer.c index ee94dbbe4745306115bb4be5e0796e247b4b9084..0481e82f6bbc8ed9d89d50d6404d62c40d091b96 100644 --- a/drivers/usb/typec/retimer.c +++ b/drivers/usb/typec/retimer.c @@ -17,24 +17,12 @@ #include "class.h" #include "retimer.h" -static bool dev_name_ends_with(struct device *dev, const char *suffix) -{ - const char *name = dev_name(dev); - const int name_len = strlen(name); - const int suffix_len = strlen(suffix); - - if (suffix_len > name_len) - return false; - - return strcmp(name + (name_len - suffix_len), suffix) == 0; -} - static int retimer_fwnode_match(struct device *dev, const void *fwnode) { - return device_match_fwnode(dev, fwnode) && dev_name_ends_with(dev, "-retimer"); + return is_typec_retimer(dev) && device_match_fwnode(dev, fwnode); } -static void *typec_retimer_match(struct fwnode_handle *fwnode, const char *id, void *data) +static void *typec_retimer_match(const struct fwnode_handle *fwnode, const char *id, void *data) { struct device *dev; @@ -97,7 +85,7 @@ static void typec_retimer_release(struct device *dev) kfree(to_typec_retimer(dev)); } -static const struct device_type typec_retimer_dev_type = { +const struct device_type typec_retimer_dev_type = { .name = "typec_retimer", .release = typec_retimer_release, }; diff --git a/drivers/usb/typec/retimer.h b/drivers/usb/typec/retimer.h index fa15951d4846855a8b7a091bb2043fa6181f7581..e34bd23323be50f3d255fbffc51869fd2ed377a2 100644 --- a/drivers/usb/typec/retimer.h +++ b/drivers/usb/typec/retimer.h @@ -12,4 +12,8 @@ struct typec_retimer { #define to_typec_retimer(_dev_) container_of(_dev_, struct typec_retimer, dev) +const struct device_type typec_retimer_dev_type; + +#define is_typec_retimer(dev) ((dev)->type == &typec_retimer_dev_type) + #endif /* __USB_TYPEC_RETIMER__ */ diff --git a/drivers/usb/typec/tcpm/fusb302.c b/drivers/usb/typec/tcpm/fusb302.c index 721b2a548084c5a58ff6e0c0167020b5dfcaf7b8..1ffce00d94b46217c1d07d6b94cf4595e68d6590 100644 --- a/drivers/usb/typec/tcpm/fusb302.c +++ b/drivers/usb/typec/tcpm/fusb302.c @@ -1677,8 +1677,7 @@ static struct fwnode_handle *fusb302_fwnode_get(struct device *dev) return fwnode; } -static int fusb302_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int fusb302_probe(struct i2c_client *client) { struct fusb302_chip *chip; struct i2c_adapter *adapter = client->adapter; @@ -1837,7 +1836,7 @@ static struct i2c_driver fusb302_driver = { .pm = &fusb302_pm_ops, .of_match_table = of_match_ptr(fusb302_dt_match), }, - .probe = fusb302_probe, + .probe_new = fusb302_probe, .remove = fusb302_remove, .id_table = fusb302_i2c_device_id, }; diff --git a/drivers/usb/typec/tcpm/tcpci.c b/drivers/usb/typec/tcpm/tcpci.c index b2bfcebe218f04f8009b026fea352cae6e6f32d8..fe781a38dc82637bb61599e53194454966b6ad05 100644 --- a/drivers/usb/typec/tcpm/tcpci.c +++ b/drivers/usb/typec/tcpm/tcpci.c @@ -794,8 +794,10 @@ struct tcpci *tcpci_register_port(struct device *dev, struct tcpci_data *data) return ERR_PTR(err); tcpci->port = tcpm_register_port(tcpci->dev, &tcpci->tcpc); - if (IS_ERR(tcpci->port)) + if (IS_ERR(tcpci->port)) { + fwnode_handle_put(tcpci->tcpc.fwnode); return ERR_CAST(tcpci->port); + } return tcpci; } @@ -804,11 +806,11 @@ EXPORT_SYMBOL_GPL(tcpci_register_port); void tcpci_unregister_port(struct tcpci *tcpci) { tcpm_unregister_port(tcpci->port); + fwnode_handle_put(tcpci->tcpc.fwnode); } EXPORT_SYMBOL_GPL(tcpci_unregister_port); -static int tcpci_probe(struct i2c_client *client, - const struct i2c_device_id *i2c_id) +static int tcpci_probe(struct i2c_client *client) { struct tcpci_chip *chip; int err; @@ -878,7 +880,7 @@ static struct i2c_driver tcpci_i2c_driver = { .name = "tcpci", .of_match_table = of_match_ptr(tcpci_of_match), }, - .probe = tcpci_probe, + .probe_new = tcpci_probe, .remove = tcpci_remove, .id_table = tcpci_id, }; diff --git a/drivers/usb/typec/tcpm/tcpci_maxim.c b/drivers/usb/typec/tcpm/tcpci_maxim.c index 03f89e6f1a78e53d72db9d6ee6d742cb24a69161..83e140ffcc3e329e8aa30775b67a0bdbf11520d6 100644 --- a/drivers/usb/typec/tcpm/tcpci_maxim.c +++ b/drivers/usb/typec/tcpm/tcpci_maxim.c @@ -438,7 +438,7 @@ static int tcpci_init(struct tcpci *tcpci, struct tcpci_data *data) return -1; } -static int max_tcpci_probe(struct i2c_client *client, const struct i2c_device_id *i2c_id) +static int max_tcpci_probe(struct i2c_client *client) { int ret; struct max_tcpci_chip *chip; @@ -519,7 +519,7 @@ static struct i2c_driver max_tcpci_i2c_driver = { .name = "maxtcpc", .of_match_table = of_match_ptr(max_tcpci_of_match), }, - .probe = max_tcpci_probe, + .probe_new = max_tcpci_probe, .remove = max_tcpci_remove, .id_table = max_tcpci_id, }; diff --git a/drivers/usb/typec/tcpm/tcpci_rt1711h.c b/drivers/usb/typec/tcpm/tcpci_rt1711h.c index 7b217c712c11a668ec902907cd7717babfda555d..a0e9e3fe8564ce726516e1bd0441ec00d871672f 100644 --- a/drivers/usb/typec/tcpm/tcpci_rt1711h.c +++ b/drivers/usb/typec/tcpm/tcpci_rt1711h.c @@ -327,8 +327,7 @@ static int rt1711h_check_revision(struct i2c_client *i2c, struct rt1711h_chip *c return ret; } -static int rt1711h_probe(struct i2c_client *client, - const struct i2c_device_id *i2c_id) +static int rt1711h_probe(struct i2c_client *client) { int ret; struct rt1711h_chip *chip; @@ -413,7 +412,7 @@ static struct i2c_driver rt1711h_i2c_driver = { .name = "rt1711h", .of_match_table = of_match_ptr(rt1711h_of_match), }, - .probe = rt1711h_probe, + .probe_new = rt1711h_probe, .remove = rt1711h_remove, .id_table = rt1711h_id, }; diff --git a/drivers/usb/typec/tipd/core.c b/drivers/usb/typec/tipd/core.c index 2a77bab948f54973ca846a3171badb01d256d87c..46a4d8b128f0ed8fb749771f349d49b1154efe97 100644 --- a/drivers/usb/typec/tipd/core.c +++ b/drivers/usb/typec/tipd/core.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include "tps6598x.h" @@ -257,6 +258,7 @@ static int tps6598x_connect(struct tps6598x *tps, u32 status) typec_set_orientation(tps->port, TYPEC_ORIENTATION_REVERSE); else typec_set_orientation(tps->port, TYPEC_ORIENTATION_NORMAL); + typec_set_mode(tps->port, TYPEC_STATE_USB); tps6598x_set_data_role(tps, TPS_STATUS_TO_TYPEC_DATAROLE(status), true); tps->partner = typec_register_partner(tps->port, &desc); @@ -280,6 +282,7 @@ static void tps6598x_disconnect(struct tps6598x *tps, u32 status) typec_set_pwr_role(tps->port, TPS_STATUS_TO_TYPEC_PORTROLE(status)); typec_set_vconn_role(tps->port, TPS_STATUS_TO_TYPEC_VCONN(status)); typec_set_orientation(tps->port, TYPEC_ORIENTATION_NONE); + typec_set_mode(tps->port, TYPEC_STATE_SAFE); tps6598x_set_data_role(tps, TPS_STATUS_TO_TYPEC_DATAROLE(status), false); power_supply_changed(tps->psy); @@ -814,20 +817,19 @@ static int tps6598x_probe(struct i2c_client *client) ret = devm_tps6598_psy_register(tps); if (ret) - return ret; + goto err_role_put; tps->port = typec_register_port(&client->dev, &typec_cap); if (IS_ERR(tps->port)) { ret = PTR_ERR(tps->port); goto err_role_put; } - fwnode_handle_put(fwnode); if (status & TPS_STATUS_PLUG_PRESENT) { ret = tps6598x_read16(tps, TPS_REG_POWER_STATUS, &tps->pwr_status); if (ret < 0) { dev_err(tps->dev, "failed to read power status: %d\n", ret); - goto err_role_put; + goto err_unregister_port; } ret = tps6598x_connect(tps, status); if (ret) @@ -838,16 +840,18 @@ static int tps6598x_probe(struct i2c_client *client) irq_handler, IRQF_SHARED | IRQF_ONESHOT, dev_name(&client->dev), tps); - if (ret) { - tps6598x_disconnect(tps, 0); - typec_unregister_port(tps->port); - goto err_role_put; - } + if (ret) + goto err_disconnect; i2c_set_clientdata(client, tps); + fwnode_handle_put(fwnode); return 0; +err_disconnect: + tps6598x_disconnect(tps, 0); +err_unregister_port: + typec_unregister_port(tps->port); err_role_put: usb_role_switch_put(tps->role_sw); err_fwnode_put: diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c index a7987fc764cc66949b108ef2007917bafa2b0f06..eabe519013e7850837c56d60f718d55a27f07fc3 100644 --- a/drivers/usb/typec/ucsi/ucsi.c +++ b/drivers/usb/typec/ucsi/ucsi.c @@ -1270,8 +1270,9 @@ err: return ret; } -int ucsi_resume(struct ucsi *ucsi) +static void ucsi_resume_work(struct work_struct *work) { + struct ucsi *ucsi = container_of(work, struct ucsi, resume_work); struct ucsi_connector *con; u64 command; int ret; @@ -1279,15 +1280,21 @@ int ucsi_resume(struct ucsi *ucsi) /* Restore UCSI notification enable mask after system resume */ command = UCSI_SET_NOTIFICATION_ENABLE | ucsi->ntfy; ret = ucsi_send_command(ucsi, command, NULL, 0); - if (ret < 0) - return ret; + if (ret < 0) { + dev_err(ucsi->dev, "failed to re-enable notifications (%d)\n", ret); + return; + } for (con = ucsi->connector; con->port; con++) { mutex_lock(&con->lock); - ucsi_check_connection(con); + ucsi_partner_task(con, ucsi_check_connection, 1, 0); mutex_unlock(&con->lock); } +} +int ucsi_resume(struct ucsi *ucsi) +{ + queue_work(system_long_wq, &ucsi->resume_work); return 0; } EXPORT_SYMBOL_GPL(ucsi_resume); @@ -1347,6 +1354,7 @@ struct ucsi *ucsi_create(struct device *dev, const struct ucsi_operations *ops) if (!ucsi) return ERR_PTR(-ENOMEM); + INIT_WORK(&ucsi->resume_work, ucsi_resume_work); INIT_DELAYED_WORK(&ucsi->work, ucsi_init_work); mutex_init(&ucsi->ppm_lock); ucsi->dev = dev; @@ -1401,6 +1409,7 @@ void ucsi_unregister(struct ucsi *ucsi) /* Make sure that we are not in the middle of driver initialization */ cancel_delayed_work_sync(&ucsi->work); + cancel_work_sync(&ucsi->resume_work); /* Disable notifications */ ucsi->ops->async_write(ucsi, UCSI_CONTROL, &cmd, sizeof(cmd)); diff --git a/drivers/usb/typec/ucsi/ucsi.h b/drivers/usb/typec/ucsi/ucsi.h index 8eb391e3e592ca2bf7ecf6b3b390ba63df170061..c968474ee547396fa64d4469b5805e37ed18efb4 100644 --- a/drivers/usb/typec/ucsi/ucsi.h +++ b/drivers/usb/typec/ucsi/ucsi.h @@ -287,6 +287,7 @@ struct ucsi { struct ucsi_capability cap; struct ucsi_connector *connector; + struct work_struct resume_work; struct delayed_work work; int work_count; #define UCSI_ROLE_SWITCH_RETRY_PER_HZ 10 diff --git a/drivers/usb/typec/ucsi/ucsi_ccg.c b/drivers/usb/typec/ucsi/ucsi_ccg.c index 835f1c4372ba24c4278ea3e605dfb309fe2859ff..46441f1477f28187ff1266b0b8f916e8e82861d2 100644 --- a/drivers/usb/typec/ucsi/ucsi_ccg.c +++ b/drivers/usb/typec/ucsi/ucsi_ccg.c @@ -1338,8 +1338,7 @@ static struct attribute *ucsi_ccg_attrs[] = { }; ATTRIBUTE_GROUPS(ucsi_ccg); -static int ucsi_ccg_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int ucsi_ccg_probe(struct i2c_client *client) { struct device *dev = &client->dev; struct ucsi_ccg *uc; @@ -1482,7 +1481,7 @@ static struct i2c_driver ucsi_ccg_driver = { .dev_groups = ucsi_ccg_groups, .acpi_match_table = amd_i2c_ucsi_match, }, - .probe = ucsi_ccg_probe, + .probe_new = ucsi_ccg_probe, .remove = ucsi_ccg_remove, .id_table = ucsi_ccg_device_id, }; diff --git a/drivers/usb/typec/ucsi/ucsi_stm32g0.c b/drivers/usb/typec/ucsi/ucsi_stm32g0.c index 7b92f0c8de7088aa5f9ab81d6abf5a4c3fd78e2c..93fead0096b7bc10caf7f21cca95bd505579ebc6 100644 --- a/drivers/usb/typec/ucsi/ucsi_stm32g0.c +++ b/drivers/usb/typec/ucsi/ucsi_stm32g0.c @@ -626,7 +626,7 @@ static int ucsi_stm32g0_probe_bootloader(struct ucsi *ucsi) return 0; } -static int ucsi_stm32g0_probe(struct i2c_client *client, const struct i2c_device_id *id) +static int ucsi_stm32g0_probe(struct i2c_client *client) { struct device *dev = &client->dev; struct ucsi_stm32g0 *g0; @@ -763,7 +763,7 @@ static struct i2c_driver ucsi_stm32g0_i2c_driver = { .of_match_table = of_match_ptr(ucsi_stm32g0_typec_of_match), .pm = pm_sleep_ptr(&ucsi_stm32g0_pm_ops), }, - .probe = ucsi_stm32g0_probe, + .probe_new = ucsi_stm32g0_probe, .remove = ucsi_stm32g0_remove, .id_table = ucsi_stm32g0_typec_i2c_devid }; diff --git a/drivers/usb/typec/wusb3801.c b/drivers/usb/typec/wusb3801.c index 3cc7a15ecbd31d56c755acc774c455132672e3ca..a43a18d4b02edc232c9bc58ff2fca833437985c0 100644 --- a/drivers/usb/typec/wusb3801.c +++ b/drivers/usb/typec/wusb3801.c @@ -364,7 +364,7 @@ static int wusb3801_probe(struct i2c_client *client) /* Initialize the hardware with the devicetree settings. */ ret = wusb3801_hw_init(wusb3801); if (ret) - return ret; + goto err_put_connector; wusb3801->cap.revision = USB_TYPEC_REV_1_2; wusb3801->cap.accessory[0] = TYPEC_ACCESSORY_AUDIO; diff --git a/drivers/usb/usb-skeleton.c b/drivers/usb/usb-skeleton.c index d87deee3e26e17327b18225f2189e1a35571288d..900a64ad25e4fd09330550bad9cb1b7120cfc615 100644 --- a/drivers/usb/usb-skeleton.c +++ b/drivers/usb/usb-skeleton.c @@ -564,7 +564,6 @@ static void skel_disconnect(struct usb_interface *interface) int minor = interface->minor; dev = usb_get_intfdata(interface); - usb_set_intfdata(interface, NULL); /* give back our minor */ usb_deregister_dev(interface, &skel_class); diff --git a/drivers/usb/usbip/stub_dev.c b/drivers/usb/usbip/stub_dev.c index 3c6d452e3bf40e58ddd0fd952c0a15c4435dd108..9c6954aad6c882c804f0750fd9f939623362521a 100644 --- a/drivers/usb/usbip/stub_dev.c +++ b/drivers/usb/usbip/stub_dev.c @@ -30,7 +30,7 @@ static ssize_t usbip_status_show(struct device *dev, status = sdev->ud.status; spin_unlock_irq(&sdev->ud.lock); - return snprintf(buf, PAGE_SIZE, "%d\n", status); + return sysfs_emit(buf, "%d\n", status); } static DEVICE_ATTR_RO(usbip_status); @@ -118,6 +118,8 @@ static ssize_t usbip_sockfd_store(struct device *dev, struct device_attribute *a } else { dev_info(dev, "stub down\n"); + mutex_lock(&sdev->ud.sysfs_lock); + spin_lock_irq(&sdev->ud.lock); if (sdev->ud.status != SDEV_ST_USED) goto err; diff --git a/drivers/usb/usbip/usbip_common.c b/drivers/usb/usbip/usbip_common.c index f8b326eed54dcab356fa18025f8f34b4a11174a6..a2b2da1255dda0c1b282171ead393dc9ac0a9c68 100644 --- a/drivers/usb/usbip/usbip_common.c +++ b/drivers/usb/usbip/usbip_common.c @@ -315,6 +315,7 @@ int usbip_recv(struct socket *sock, void *buf, int size) do { sock->sk->sk_allocation = GFP_NOIO; + sock->sk->sk_use_task_frag = false; result = sock_recvmsg(sock, &msg, MSG_WAITALL); if (result <= 0) diff --git a/drivers/usb/usbip/vudc_rx.c b/drivers/usb/usbip/vudc_rx.c index d4a2f30a7580937389d844810b35826eee74d374..51bb70837b9020bdb446c422e90f8d488b7c9456 100644 --- a/drivers/usb/usbip/vudc_rx.c +++ b/drivers/usb/usbip/vudc_rx.c @@ -149,7 +149,9 @@ static int v_recv_cmd_submit(struct vudc *udc, urb_p->urb->status = -EINPROGRESS; /* FIXME: more pipe setup to please usbip_common */ - urb_p->urb->pipe &= ~(3 << 30); + BUILD_BUG_ON_MSG(PIPE_BULK != 3, "PIPE_* doesn't range from 0 to 3"); + + urb_p->urb->pipe &= ~(PIPE_BULK << 30); switch (urb_p->ep->type) { case USB_ENDPOINT_XFER_BULK: urb_p->urb->pipe |= (PIPE_BULK << 30); diff --git a/drivers/usb/usbip/vudc_sysfs.c b/drivers/usb/usbip/vudc_sysfs.c index c95e6b2bfd32a5eca37390b213f70aeed7460136..907a43a008964d647d19f9631faf4a443bc00019 100644 --- a/drivers/usb/usbip/vudc_sysfs.c +++ b/drivers/usb/usbip/vudc_sysfs.c @@ -242,7 +242,7 @@ static ssize_t usbip_status_show(struct device *dev, status = udc->ud.status; spin_unlock_irq(&udc->ud.lock); - return snprintf(out, PAGE_SIZE, "%d\n", status); + return sysfs_emit(out, "%d\n", status); } static DEVICE_ATTR_RO(usbip_status); diff --git a/drivers/vdpa/vdpa_user/vduse_dev.c b/drivers/vdpa/vdpa_user/vduse_dev.c index 35dceee3ed56086af3e105a9451dc91b8af5d743..0dd3c1f291da30cb8be62ac7742a841195ad22dd 100644 --- a/drivers/vdpa/vdpa_user/vduse_dev.c +++ b/drivers/vdpa/vdpa_user/vduse_dev.c @@ -1656,7 +1656,7 @@ static const struct file_operations vduse_ctrl_fops = { .llseek = noop_llseek, }; -static char *vduse_devnode(struct device *dev, umode_t *mode) +static char *vduse_devnode(const struct device *dev, umode_t *mode) { return kasprintf(GFP_KERNEL, "vduse/%s", dev_name(dev)); } diff --git a/drivers/vfio/Kconfig b/drivers/vfio/Kconfig index 86c381ceb9a1e95e95643dcce8a20fc74c5b1559..a8f54462946742661820ed9dd42560a54aa0ceb4 100644 --- a/drivers/vfio/Kconfig +++ b/drivers/vfio/Kconfig @@ -2,8 +2,9 @@ menuconfig VFIO tristate "VFIO Non-Privileged userspace driver framework" select IOMMU_API - select VFIO_IOMMU_TYPE1 if MMU && (X86 || S390 || ARM || ARM64) + depends on IOMMUFD || !IOMMUFD select INTERVAL_TREE + select VFIO_CONTAINER if IOMMUFD=n help VFIO provides a framework for secure userspace device drivers. See Documentation/driver-api/vfio.rst for more details. @@ -11,6 +12,18 @@ menuconfig VFIO If you don't know what to do here, say N. if VFIO +config VFIO_CONTAINER + bool "Support for the VFIO container /dev/vfio/vfio" + select VFIO_IOMMU_TYPE1 if MMU && (X86 || S390 || ARM || ARM64) + default y + help + The VFIO container is the classic interface to VFIO for establishing + IOMMU mappings. If N is selected here then IOMMUFD must be used to + manage the mappings. + + Unless testing IOMMUFD say Y here. + +if VFIO_CONTAINER config VFIO_IOMMU_TYPE1 tristate default n @@ -20,16 +33,6 @@ config VFIO_IOMMU_SPAPR_TCE depends on SPAPR_TCE_IOMMU default VFIO -config VFIO_SPAPR_EEH - tristate - depends on EEH && VFIO_IOMMU_SPAPR_TCE - default VFIO - -config VFIO_VIRQFD - tristate - select EVENTFD - default n - config VFIO_NOIOMMU bool "VFIO No-IOMMU support" help @@ -43,6 +46,12 @@ config VFIO_NOIOMMU this mode since there is no IOMMU to provide DMA translation. If you don't know what to do here, say N. +endif + +config VFIO_VIRQFD + bool + select EVENTFD + default n source "drivers/vfio/pci/Kconfig" source "drivers/vfio/platform/Kconfig" diff --git a/drivers/vfio/Makefile b/drivers/vfio/Makefile index b693a1169286f8771f7d019e600268e45d208ba5..70e7dcb302efd20ddc2d4cb31fbffc1ea5e6b49f 100644 --- a/drivers/vfio/Makefile +++ b/drivers/vfio/Makefile @@ -1,16 +1,15 @@ # SPDX-License-Identifier: GPL-2.0 -vfio_virqfd-y := virqfd.o - obj-$(CONFIG_VFIO) += vfio.o vfio-y += vfio_main.o \ - iova_bitmap.o \ - container.o + group.o \ + iova_bitmap.o +vfio-$(CONFIG_IOMMUFD) += iommufd.o +vfio-$(CONFIG_VFIO_CONTAINER) += container.o +vfio-$(CONFIG_VFIO_VIRQFD) += virqfd.o -obj-$(CONFIG_VFIO_VIRQFD) += vfio_virqfd.o obj-$(CONFIG_VFIO_IOMMU_TYPE1) += vfio_iommu_type1.o obj-$(CONFIG_VFIO_IOMMU_SPAPR_TCE) += vfio_iommu_spapr_tce.o -obj-$(CONFIG_VFIO_SPAPR_EEH) += vfio_spapr_eeh.o obj-$(CONFIG_VFIO_PCI) += pci/ obj-$(CONFIG_VFIO_PLATFORM) += platform/ obj-$(CONFIG_VFIO_MDEV) += mdev/ diff --git a/drivers/vfio/container.c b/drivers/vfio/container.c index d74164abbf401d42cbcf42b6f5f7282dd7769de7..b7a9560ab25e486ae88c4dfe9feab750972a22fb 100644 --- a/drivers/vfio/container.c +++ b/drivers/vfio/container.c @@ -188,8 +188,9 @@ void vfio_device_container_unregister(struct vfio_device *device) device->group->container->iommu_data, device); } -long vfio_container_ioctl_check_extension(struct vfio_container *container, - unsigned long arg) +static long +vfio_container_ioctl_check_extension(struct vfio_container *container, + unsigned long arg) { struct vfio_iommu_driver *driver; long ret = 0; @@ -511,14 +512,15 @@ void vfio_group_detach_container(struct vfio_group *group) vfio_container_put(container); } -int vfio_device_assign_container(struct vfio_device *device) +int vfio_group_use_container(struct vfio_group *group) { - struct vfio_group *group = device->group; - lockdep_assert_held(&group->group_lock); - if (!group->container || !group->container->iommu_driver || - WARN_ON(!group->container_users)) + /* + * The container fd has been assigned with VFIO_GROUP_SET_CONTAINER but + * VFIO_SET_IOMMU hasn't been done yet. + */ + if (!group->container->iommu_driver) return -EINVAL; if (group->type == VFIO_NO_IOMMU && !capable(CAP_SYS_RAWIO)) @@ -529,122 +531,56 @@ int vfio_device_assign_container(struct vfio_device *device) return 0; } -void vfio_device_unassign_container(struct vfio_device *device) +void vfio_group_unuse_container(struct vfio_group *group) { - mutex_lock(&device->group->group_lock); - WARN_ON(device->group->container_users <= 1); - device->group->container_users--; - fput(device->group->opened_file); - mutex_unlock(&device->group->group_lock); + lockdep_assert_held(&group->group_lock); + + WARN_ON(group->container_users <= 1); + group->container_users--; + fput(group->opened_file); } -/* - * Pin contiguous user pages and return their associated host pages for local - * domain only. - * @device [in] : device - * @iova [in] : starting IOVA of user pages to be pinned. - * @npage [in] : count of pages to be pinned. This count should not - * be greater than VFIO_PIN_PAGES_MAX_ENTRIES. - * @prot [in] : protection flags - * @pages[out] : array of host pages - * Return error or number of pages pinned. - * - * A driver may only call this function if the vfio_device was created - * by vfio_register_emulated_iommu_dev(). - */ -int vfio_pin_pages(struct vfio_device *device, dma_addr_t iova, - int npage, int prot, struct page **pages) +int vfio_device_container_pin_pages(struct vfio_device *device, + dma_addr_t iova, int npage, + int prot, struct page **pages) { - struct vfio_container *container; - struct vfio_group *group = device->group; - struct vfio_iommu_driver *driver; - int ret; - - if (!pages || !npage || !vfio_assert_device_open(device)) - return -EINVAL; + struct vfio_container *container = device->group->container; + struct iommu_group *iommu_group = device->group->iommu_group; + struct vfio_iommu_driver *driver = container->iommu_driver; if (npage > VFIO_PIN_PAGES_MAX_ENTRIES) return -E2BIG; - /* group->container cannot change while a vfio device is open */ - container = group->container; - driver = container->iommu_driver; - if (likely(driver && driver->ops->pin_pages)) - ret = driver->ops->pin_pages(container->iommu_data, - group->iommu_group, iova, - npage, prot, pages); - else - ret = -ENOTTY; - - return ret; + if (unlikely(!driver || !driver->ops->pin_pages)) + return -ENOTTY; + return driver->ops->pin_pages(container->iommu_data, iommu_group, iova, + npage, prot, pages); } -EXPORT_SYMBOL(vfio_pin_pages); -/* - * Unpin contiguous host pages for local domain only. - * @device [in] : device - * @iova [in] : starting address of user pages to be unpinned. - * @npage [in] : count of pages to be unpinned. This count should not - * be greater than VFIO_PIN_PAGES_MAX_ENTRIES. - */ -void vfio_unpin_pages(struct vfio_device *device, dma_addr_t iova, int npage) +void vfio_device_container_unpin_pages(struct vfio_device *device, + dma_addr_t iova, int npage) { - struct vfio_container *container; - struct vfio_iommu_driver *driver; + struct vfio_container *container = device->group->container; if (WARN_ON(npage <= 0 || npage > VFIO_PIN_PAGES_MAX_ENTRIES)) return; - if (WARN_ON(!vfio_assert_device_open(device))) - return; - - /* group->container cannot change while a vfio device is open */ - container = device->group->container; - driver = container->iommu_driver; - - driver->ops->unpin_pages(container->iommu_data, iova, npage); + container->iommu_driver->ops->unpin_pages(container->iommu_data, iova, + npage); } -EXPORT_SYMBOL(vfio_unpin_pages); -/* - * This interface allows the CPUs to perform some sort of virtual DMA on - * behalf of the device. - * - * CPUs read/write from/into a range of IOVAs pointing to user space memory - * into/from a kernel buffer. - * - * As the read/write of user space memory is conducted via the CPUs and is - * not a real device DMA, it is not necessary to pin the user space memory. - * - * @device [in] : VFIO device - * @iova [in] : base IOVA of a user space buffer - * @data [in] : pointer to kernel buffer - * @len [in] : kernel buffer length - * @write : indicate read or write - * Return error code on failure or 0 on success. - */ -int vfio_dma_rw(struct vfio_device *device, dma_addr_t iova, void *data, - size_t len, bool write) +int vfio_device_container_dma_rw(struct vfio_device *device, + dma_addr_t iova, void *data, + size_t len, bool write) { - struct vfio_container *container; - struct vfio_iommu_driver *driver; - int ret = 0; - - if (!data || len <= 0 || !vfio_assert_device_open(device)) - return -EINVAL; - - /* group->container cannot change while a vfio device is open */ - container = device->group->container; - driver = container->iommu_driver; + struct vfio_container *container = device->group->container; + struct vfio_iommu_driver *driver = container->iommu_driver; - if (likely(driver && driver->ops->dma_rw)) - ret = driver->ops->dma_rw(container->iommu_data, - iova, data, len, write); - else - ret = -ENOTTY; - return ret; + if (unlikely(!driver || !driver->ops->dma_rw)) + return -ENOTTY; + return driver->ops->dma_rw(container->iommu_data, iova, data, len, + write); } -EXPORT_SYMBOL(vfio_dma_rw); int __init vfio_container_init(void) { @@ -678,3 +614,6 @@ void vfio_container_cleanup(void) misc_deregister(&vfio_dev); mutex_destroy(&vfio.iommu_drivers_lock); } + +MODULE_ALIAS_MISCDEV(VFIO_MINOR); +MODULE_ALIAS("devname:vfio/vfio"); diff --git a/drivers/vfio/fsl-mc/vfio_fsl_mc.c b/drivers/vfio/fsl-mc/vfio_fsl_mc.c index b16874e913e4f527457f5864be5b942d60729a16..defeb8510ace52dd64921193cef5ef6aeca49866 100644 --- a/drivers/vfio/fsl-mc/vfio_fsl_mc.c +++ b/drivers/vfio/fsl-mc/vfio_fsl_mc.c @@ -568,7 +568,6 @@ static void vfio_fsl_mc_release_dev(struct vfio_device *core_vdev) vfio_fsl_uninit_device(vdev); mutex_destroy(&vdev->igate); - vfio_free_device(core_vdev); } static int vfio_fsl_mc_remove(struct fsl_mc_device *mc_dev) @@ -592,6 +591,9 @@ static const struct vfio_device_ops vfio_fsl_mc_ops = { .read = vfio_fsl_mc_read, .write = vfio_fsl_mc_write, .mmap = vfio_fsl_mc_mmap, + .bind_iommufd = vfio_iommufd_physical_bind, + .unbind_iommufd = vfio_iommufd_physical_unbind, + .attach_ioas = vfio_iommufd_physical_attach_ioas, }; static struct fsl_mc_driver vfio_fsl_mc_driver = { diff --git a/drivers/vfio/group.c b/drivers/vfio/group.c new file mode 100644 index 0000000000000000000000000000000000000000..bb24b2f0271e03c3b1a027b0ef8e717951f157ab --- /dev/null +++ b/drivers/vfio/group.c @@ -0,0 +1,877 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * VFIO core + * + * Copyright (C) 2012 Red Hat, Inc. All rights reserved. + * Author: Alex Williamson + * + * Derived from original vfio: + * Copyright 2010 Cisco Systems, Inc. All rights reserved. + * Author: Tom Lyon, pugs@cisco.com + */ + +#include +#include +#include +#include "vfio.h" + +static struct vfio { + struct class *class; + struct list_head group_list; + struct mutex group_lock; /* locks group_list */ + struct ida group_ida; + dev_t group_devt; +} vfio; + +static struct vfio_device *vfio_device_get_from_name(struct vfio_group *group, + char *buf) +{ + struct vfio_device *it, *device = ERR_PTR(-ENODEV); + + mutex_lock(&group->device_lock); + list_for_each_entry(it, &group->device_list, group_next) { + int ret; + + if (it->ops->match) { + ret = it->ops->match(it, buf); + if (ret < 0) { + device = ERR_PTR(ret); + break; + } + } else { + ret = !strcmp(dev_name(it->dev), buf); + } + + if (ret && vfio_device_try_get_registration(it)) { + device = it; + break; + } + } + mutex_unlock(&group->device_lock); + + return device; +} + +/* + * VFIO Group fd, /dev/vfio/$GROUP + */ +static bool vfio_group_has_iommu(struct vfio_group *group) +{ + lockdep_assert_held(&group->group_lock); + /* + * There can only be users if there is a container, and if there is a + * container there must be users. + */ + WARN_ON(!group->container != !group->container_users); + + return group->container || group->iommufd; +} + +/* + * VFIO_GROUP_UNSET_CONTAINER should fail if there are other users or + * if there was no container to unset. Since the ioctl is called on + * the group, we know that still exists, therefore the only valid + * transition here is 1->0. + */ +static int vfio_group_ioctl_unset_container(struct vfio_group *group) +{ + int ret = 0; + + mutex_lock(&group->group_lock); + if (!vfio_group_has_iommu(group)) { + ret = -EINVAL; + goto out_unlock; + } + if (group->container) { + if (group->container_users != 1) { + ret = -EBUSY; + goto out_unlock; + } + vfio_group_detach_container(group); + } + if (group->iommufd) { + iommufd_ctx_put(group->iommufd); + group->iommufd = NULL; + } + +out_unlock: + mutex_unlock(&group->group_lock); + return ret; +} + +static int vfio_group_ioctl_set_container(struct vfio_group *group, + int __user *arg) +{ + struct vfio_container *container; + struct iommufd_ctx *iommufd; + struct fd f; + int ret; + int fd; + + if (get_user(fd, arg)) + return -EFAULT; + + f = fdget(fd); + if (!f.file) + return -EBADF; + + mutex_lock(&group->group_lock); + if (vfio_group_has_iommu(group)) { + ret = -EINVAL; + goto out_unlock; + } + if (!group->iommu_group) { + ret = -ENODEV; + goto out_unlock; + } + + container = vfio_container_from_file(f.file); + if (container) { + ret = vfio_container_attach_group(container, group); + goto out_unlock; + } + + iommufd = iommufd_ctx_from_file(f.file); + if (!IS_ERR(iommufd)) { + u32 ioas_id; + + ret = iommufd_vfio_compat_ioas_id(iommufd, &ioas_id); + if (ret) { + iommufd_ctx_put(group->iommufd); + goto out_unlock; + } + + group->iommufd = iommufd; + goto out_unlock; + } + + /* The FD passed is not recognized. */ + ret = -EBADFD; + +out_unlock: + mutex_unlock(&group->group_lock); + fdput(f); + return ret; +} + +static int vfio_device_group_open(struct vfio_device *device) +{ + int ret; + + mutex_lock(&device->group->group_lock); + if (!vfio_group_has_iommu(device->group)) { + ret = -EINVAL; + goto out_unlock; + } + + /* + * Here we pass the KVM pointer with the group under the lock. If the + * device driver will use it, it must obtain a reference and release it + * during close_device. + */ + ret = vfio_device_open(device, device->group->iommufd, + device->group->kvm); + +out_unlock: + mutex_unlock(&device->group->group_lock); + return ret; +} + +void vfio_device_group_close(struct vfio_device *device) +{ + mutex_lock(&device->group->group_lock); + vfio_device_close(device, device->group->iommufd); + mutex_unlock(&device->group->group_lock); +} + +static struct file *vfio_device_open_file(struct vfio_device *device) +{ + struct file *filep; + int ret; + + ret = vfio_device_group_open(device); + if (ret) + goto err_out; + + /* + * We can't use anon_inode_getfd() because we need to modify + * the f_mode flags directly to allow more than just ioctls + */ + filep = anon_inode_getfile("[vfio-device]", &vfio_device_fops, + device, O_RDWR); + if (IS_ERR(filep)) { + ret = PTR_ERR(filep); + goto err_close_device; + } + + /* + * TODO: add an anon_inode interface to do this. + * Appears to be missing by lack of need rather than + * explicitly prevented. Now there's need. + */ + filep->f_mode |= (FMODE_PREAD | FMODE_PWRITE); + + if (device->group->type == VFIO_NO_IOMMU) + dev_warn(device->dev, "vfio-noiommu device opened by user " + "(%s:%d)\n", current->comm, task_pid_nr(current)); + /* + * On success the ref of device is moved to the file and + * put in vfio_device_fops_release() + */ + return filep; + +err_close_device: + vfio_device_group_close(device); +err_out: + return ERR_PTR(ret); +} + +static int vfio_group_ioctl_get_device_fd(struct vfio_group *group, + char __user *arg) +{ + struct vfio_device *device; + struct file *filep; + char *buf; + int fdno; + int ret; + + buf = strndup_user(arg, PAGE_SIZE); + if (IS_ERR(buf)) + return PTR_ERR(buf); + + device = vfio_device_get_from_name(group, buf); + kfree(buf); + if (IS_ERR(device)) + return PTR_ERR(device); + + fdno = get_unused_fd_flags(O_CLOEXEC); + if (fdno < 0) { + ret = fdno; + goto err_put_device; + } + + filep = vfio_device_open_file(device); + if (IS_ERR(filep)) { + ret = PTR_ERR(filep); + goto err_put_fdno; + } + + fd_install(fdno, filep); + return fdno; + +err_put_fdno: + put_unused_fd(fdno); +err_put_device: + vfio_device_put_registration(device); + return ret; +} + +static int vfio_group_ioctl_get_status(struct vfio_group *group, + struct vfio_group_status __user *arg) +{ + unsigned long minsz = offsetofend(struct vfio_group_status, flags); + struct vfio_group_status status; + + if (copy_from_user(&status, arg, minsz)) + return -EFAULT; + + if (status.argsz < minsz) + return -EINVAL; + + status.flags = 0; + + mutex_lock(&group->group_lock); + if (!group->iommu_group) { + mutex_unlock(&group->group_lock); + return -ENODEV; + } + + /* + * With the container FD the iommu_group_claim_dma_owner() is done + * during SET_CONTAINER but for IOMMFD this is done during + * VFIO_GROUP_GET_DEVICE_FD. Meaning that with iommufd + * VFIO_GROUP_FLAGS_VIABLE could be set but GET_DEVICE_FD will fail due + * to viability. + */ + if (vfio_group_has_iommu(group)) + status.flags |= VFIO_GROUP_FLAGS_CONTAINER_SET | + VFIO_GROUP_FLAGS_VIABLE; + else if (!iommu_group_dma_owner_claimed(group->iommu_group)) + status.flags |= VFIO_GROUP_FLAGS_VIABLE; + mutex_unlock(&group->group_lock); + + if (copy_to_user(arg, &status, minsz)) + return -EFAULT; + return 0; +} + +static long vfio_group_fops_unl_ioctl(struct file *filep, + unsigned int cmd, unsigned long arg) +{ + struct vfio_group *group = filep->private_data; + void __user *uarg = (void __user *)arg; + + switch (cmd) { + case VFIO_GROUP_GET_DEVICE_FD: + return vfio_group_ioctl_get_device_fd(group, uarg); + case VFIO_GROUP_GET_STATUS: + return vfio_group_ioctl_get_status(group, uarg); + case VFIO_GROUP_SET_CONTAINER: + return vfio_group_ioctl_set_container(group, uarg); + case VFIO_GROUP_UNSET_CONTAINER: + return vfio_group_ioctl_unset_container(group); + default: + return -ENOTTY; + } +} + +static int vfio_group_fops_open(struct inode *inode, struct file *filep) +{ + struct vfio_group *group = + container_of(inode->i_cdev, struct vfio_group, cdev); + int ret; + + mutex_lock(&group->group_lock); + + /* + * drivers can be zero if this races with vfio_device_remove_group(), it + * will be stable at 0 under the group rwsem + */ + if (refcount_read(&group->drivers) == 0) { + ret = -ENODEV; + goto out_unlock; + } + + if (group->type == VFIO_NO_IOMMU && !capable(CAP_SYS_RAWIO)) { + ret = -EPERM; + goto out_unlock; + } + + /* + * Do we need multiple instances of the group open? Seems not. + */ + if (group->opened_file) { + ret = -EBUSY; + goto out_unlock; + } + group->opened_file = filep; + filep->private_data = group; + ret = 0; +out_unlock: + mutex_unlock(&group->group_lock); + return ret; +} + +static int vfio_group_fops_release(struct inode *inode, struct file *filep) +{ + struct vfio_group *group = filep->private_data; + + filep->private_data = NULL; + + mutex_lock(&group->group_lock); + /* + * Device FDs hold a group file reference, therefore the group release + * is only called when there are no open devices. + */ + WARN_ON(group->notifier.head); + if (group->container) + vfio_group_detach_container(group); + if (group->iommufd) { + iommufd_ctx_put(group->iommufd); + group->iommufd = NULL; + } + group->opened_file = NULL; + mutex_unlock(&group->group_lock); + return 0; +} + +static const struct file_operations vfio_group_fops = { + .owner = THIS_MODULE, + .unlocked_ioctl = vfio_group_fops_unl_ioctl, + .compat_ioctl = compat_ptr_ioctl, + .open = vfio_group_fops_open, + .release = vfio_group_fops_release, +}; + +/* + * Group objects - create, release, get, put, search + */ +static struct vfio_group * +vfio_group_find_from_iommu(struct iommu_group *iommu_group) +{ + struct vfio_group *group; + + lockdep_assert_held(&vfio.group_lock); + + /* + * group->iommu_group from the vfio.group_list cannot be NULL + * under the vfio.group_lock. + */ + list_for_each_entry(group, &vfio.group_list, vfio_next) { + if (group->iommu_group == iommu_group) + return group; + } + return NULL; +} + +static void vfio_group_release(struct device *dev) +{ + struct vfio_group *group = container_of(dev, struct vfio_group, dev); + + mutex_destroy(&group->device_lock); + mutex_destroy(&group->group_lock); + WARN_ON(group->iommu_group); + ida_free(&vfio.group_ida, MINOR(group->dev.devt)); + kfree(group); +} + +static struct vfio_group *vfio_group_alloc(struct iommu_group *iommu_group, + enum vfio_group_type type) +{ + struct vfio_group *group; + int minor; + + group = kzalloc(sizeof(*group), GFP_KERNEL); + if (!group) + return ERR_PTR(-ENOMEM); + + minor = ida_alloc_max(&vfio.group_ida, MINORMASK, GFP_KERNEL); + if (minor < 0) { + kfree(group); + return ERR_PTR(minor); + } + + device_initialize(&group->dev); + group->dev.devt = MKDEV(MAJOR(vfio.group_devt), minor); + group->dev.class = vfio.class; + group->dev.release = vfio_group_release; + cdev_init(&group->cdev, &vfio_group_fops); + group->cdev.owner = THIS_MODULE; + + refcount_set(&group->drivers, 1); + mutex_init(&group->group_lock); + INIT_LIST_HEAD(&group->device_list); + mutex_init(&group->device_lock); + group->iommu_group = iommu_group; + /* put in vfio_group_release() */ + iommu_group_ref_get(iommu_group); + group->type = type; + BLOCKING_INIT_NOTIFIER_HEAD(&group->notifier); + + return group; +} + +static struct vfio_group *vfio_create_group(struct iommu_group *iommu_group, + enum vfio_group_type type) +{ + struct vfio_group *group; + struct vfio_group *ret; + int err; + + lockdep_assert_held(&vfio.group_lock); + + group = vfio_group_alloc(iommu_group, type); + if (IS_ERR(group)) + return group; + + err = dev_set_name(&group->dev, "%s%d", + group->type == VFIO_NO_IOMMU ? "noiommu-" : "", + iommu_group_id(iommu_group)); + if (err) { + ret = ERR_PTR(err); + goto err_put; + } + + err = cdev_device_add(&group->cdev, &group->dev); + if (err) { + ret = ERR_PTR(err); + goto err_put; + } + + list_add(&group->vfio_next, &vfio.group_list); + + return group; + +err_put: + put_device(&group->dev); + return ret; +} + +static struct vfio_group *vfio_noiommu_group_alloc(struct device *dev, + enum vfio_group_type type) +{ + struct iommu_group *iommu_group; + struct vfio_group *group; + int ret; + + iommu_group = iommu_group_alloc(); + if (IS_ERR(iommu_group)) + return ERR_CAST(iommu_group); + + ret = iommu_group_set_name(iommu_group, "vfio-noiommu"); + if (ret) + goto out_put_group; + ret = iommu_group_add_device(iommu_group, dev); + if (ret) + goto out_put_group; + + mutex_lock(&vfio.group_lock); + group = vfio_create_group(iommu_group, type); + mutex_unlock(&vfio.group_lock); + if (IS_ERR(group)) { + ret = PTR_ERR(group); + goto out_remove_device; + } + iommu_group_put(iommu_group); + return group; + +out_remove_device: + iommu_group_remove_device(dev); +out_put_group: + iommu_group_put(iommu_group); + return ERR_PTR(ret); +} + +static bool vfio_group_has_device(struct vfio_group *group, struct device *dev) +{ + struct vfio_device *device; + + mutex_lock(&group->device_lock); + list_for_each_entry(device, &group->device_list, group_next) { + if (device->dev == dev) { + mutex_unlock(&group->device_lock); + return true; + } + } + mutex_unlock(&group->device_lock); + return false; +} + +static struct vfio_group *vfio_group_find_or_alloc(struct device *dev) +{ + struct iommu_group *iommu_group; + struct vfio_group *group; + + iommu_group = iommu_group_get(dev); + if (!iommu_group && vfio_noiommu) { + /* + * With noiommu enabled, create an IOMMU group for devices that + * don't already have one, implying no IOMMU hardware/driver + * exists. Taint the kernel because we're about to give a DMA + * capable device to a user without IOMMU protection. + */ + group = vfio_noiommu_group_alloc(dev, VFIO_NO_IOMMU); + if (!IS_ERR(group)) { + add_taint(TAINT_USER, LOCKDEP_STILL_OK); + dev_warn(dev, "Adding kernel taint for vfio-noiommu group on device\n"); + } + return group; + } + + if (!iommu_group) + return ERR_PTR(-EINVAL); + + /* + * VFIO always sets IOMMU_CACHE because we offer no way for userspace to + * restore cache coherency. It has to be checked here because it is only + * valid for cases where we are using iommu groups. + */ + if (!device_iommu_capable(dev, IOMMU_CAP_CACHE_COHERENCY)) { + iommu_group_put(iommu_group); + return ERR_PTR(-EINVAL); + } + + mutex_lock(&vfio.group_lock); + group = vfio_group_find_from_iommu(iommu_group); + if (group) { + if (WARN_ON(vfio_group_has_device(group, dev))) + group = ERR_PTR(-EINVAL); + else + refcount_inc(&group->drivers); + } else { + group = vfio_create_group(iommu_group, VFIO_IOMMU); + } + mutex_unlock(&vfio.group_lock); + + /* The vfio_group holds a reference to the iommu_group */ + iommu_group_put(iommu_group); + return group; +} + +int vfio_device_set_group(struct vfio_device *device, + enum vfio_group_type type) +{ + struct vfio_group *group; + + if (type == VFIO_IOMMU) + group = vfio_group_find_or_alloc(device->dev); + else + group = vfio_noiommu_group_alloc(device->dev, type); + + if (IS_ERR(group)) + return PTR_ERR(group); + + /* Our reference on group is moved to the device */ + device->group = group; + return 0; +} + +void vfio_device_remove_group(struct vfio_device *device) +{ + struct vfio_group *group = device->group; + struct iommu_group *iommu_group; + + if (group->type == VFIO_NO_IOMMU || group->type == VFIO_EMULATED_IOMMU) + iommu_group_remove_device(device->dev); + + /* Pairs with vfio_create_group() / vfio_group_get_from_iommu() */ + if (!refcount_dec_and_mutex_lock(&group->drivers, &vfio.group_lock)) + return; + list_del(&group->vfio_next); + + /* + * We could concurrently probe another driver in the group that might + * race vfio_device_remove_group() with vfio_get_group(), so we have to + * ensure that the sysfs is all cleaned up under lock otherwise the + * cdev_device_add() will fail due to the name aready existing. + */ + cdev_device_del(&group->cdev, &group->dev); + + mutex_lock(&group->group_lock); + /* + * These data structures all have paired operations that can only be + * undone when the caller holds a live reference on the device. Since + * all pairs must be undone these WARN_ON's indicate some caller did not + * properly hold the group reference. + */ + WARN_ON(!list_empty(&group->device_list)); + WARN_ON(group->notifier.head); + + /* + * Revoke all users of group->iommu_group. At this point we know there + * are no devices active because we are unplugging the last one. Setting + * iommu_group to NULL blocks all new users. + */ + if (group->container) + vfio_group_detach_container(group); + iommu_group = group->iommu_group; + group->iommu_group = NULL; + mutex_unlock(&group->group_lock); + mutex_unlock(&vfio.group_lock); + + iommu_group_put(iommu_group); + put_device(&group->dev); +} + +void vfio_device_group_register(struct vfio_device *device) +{ + mutex_lock(&device->group->device_lock); + list_add(&device->group_next, &device->group->device_list); + mutex_unlock(&device->group->device_lock); +} + +void vfio_device_group_unregister(struct vfio_device *device) +{ + mutex_lock(&device->group->device_lock); + list_del(&device->group_next); + mutex_unlock(&device->group->device_lock); +} + +int vfio_device_group_use_iommu(struct vfio_device *device) +{ + struct vfio_group *group = device->group; + int ret = 0; + + lockdep_assert_held(&group->group_lock); + + if (WARN_ON(!group->container)) + return -EINVAL; + + ret = vfio_group_use_container(group); + if (ret) + return ret; + vfio_device_container_register(device); + return 0; +} + +void vfio_device_group_unuse_iommu(struct vfio_device *device) +{ + struct vfio_group *group = device->group; + + lockdep_assert_held(&group->group_lock); + + if (WARN_ON(!group->container)) + return; + + vfio_device_container_unregister(device); + vfio_group_unuse_container(group); +} + +bool vfio_device_has_container(struct vfio_device *device) +{ + return device->group->container; +} + +/** + * vfio_file_iommu_group - Return the struct iommu_group for the vfio group file + * @file: VFIO group file + * + * The returned iommu_group is valid as long as a ref is held on the file. This + * returns a reference on the group. This function is deprecated, only the SPAPR + * path in kvm should call it. + */ +struct iommu_group *vfio_file_iommu_group(struct file *file) +{ + struct vfio_group *group = file->private_data; + struct iommu_group *iommu_group = NULL; + + if (!IS_ENABLED(CONFIG_SPAPR_TCE_IOMMU)) + return NULL; + + if (!vfio_file_is_group(file)) + return NULL; + + mutex_lock(&group->group_lock); + if (group->iommu_group) { + iommu_group = group->iommu_group; + iommu_group_ref_get(iommu_group); + } + mutex_unlock(&group->group_lock); + return iommu_group; +} +EXPORT_SYMBOL_GPL(vfio_file_iommu_group); + +/** + * vfio_file_is_group - True if the file is usable with VFIO aPIS + * @file: VFIO group file + */ +bool vfio_file_is_group(struct file *file) +{ + return file->f_op == &vfio_group_fops; +} +EXPORT_SYMBOL_GPL(vfio_file_is_group); + +/** + * vfio_file_enforced_coherent - True if the DMA associated with the VFIO file + * is always CPU cache coherent + * @file: VFIO group file + * + * Enforced coherency means that the IOMMU ignores things like the PCIe no-snoop + * bit in DMA transactions. A return of false indicates that the user has + * rights to access additional instructions such as wbinvd on x86. + */ +bool vfio_file_enforced_coherent(struct file *file) +{ + struct vfio_group *group = file->private_data; + struct vfio_device *device; + bool ret = true; + + if (!vfio_file_is_group(file)) + return true; + + /* + * If the device does not have IOMMU_CAP_ENFORCE_CACHE_COHERENCY then + * any domain later attached to it will also not support it. If the cap + * is set then the iommu_domain eventually attached to the device/group + * must use a domain with enforce_cache_coherency(). + */ + mutex_lock(&group->device_lock); + list_for_each_entry(device, &group->device_list, group_next) { + if (!device_iommu_capable(device->dev, + IOMMU_CAP_ENFORCE_CACHE_COHERENCY)) { + ret = false; + break; + } + } + mutex_unlock(&group->device_lock); + return ret; +} +EXPORT_SYMBOL_GPL(vfio_file_enforced_coherent); + +/** + * vfio_file_set_kvm - Link a kvm with VFIO drivers + * @file: VFIO group file + * @kvm: KVM to link + * + * When a VFIO device is first opened the KVM will be available in + * device->kvm if one was associated with the group. + */ +void vfio_file_set_kvm(struct file *file, struct kvm *kvm) +{ + struct vfio_group *group = file->private_data; + + if (!vfio_file_is_group(file)) + return; + + mutex_lock(&group->group_lock); + group->kvm = kvm; + mutex_unlock(&group->group_lock); +} +EXPORT_SYMBOL_GPL(vfio_file_set_kvm); + +/** + * vfio_file_has_dev - True if the VFIO file is a handle for device + * @file: VFIO file to check + * @device: Device that must be part of the file + * + * Returns true if given file has permission to manipulate the given device. + */ +bool vfio_file_has_dev(struct file *file, struct vfio_device *device) +{ + struct vfio_group *group = file->private_data; + + if (!vfio_file_is_group(file)) + return false; + + return group == device->group; +} +EXPORT_SYMBOL_GPL(vfio_file_has_dev); + +static char *vfio_devnode(const struct device *dev, umode_t *mode) +{ + return kasprintf(GFP_KERNEL, "vfio/%s", dev_name(dev)); +} + +int __init vfio_group_init(void) +{ + int ret; + + ida_init(&vfio.group_ida); + mutex_init(&vfio.group_lock); + INIT_LIST_HEAD(&vfio.group_list); + + ret = vfio_container_init(); + if (ret) + return ret; + + /* /dev/vfio/$GROUP */ + vfio.class = class_create(THIS_MODULE, "vfio"); + if (IS_ERR(vfio.class)) { + ret = PTR_ERR(vfio.class); + goto err_group_class; + } + + vfio.class->devnode = vfio_devnode; + + ret = alloc_chrdev_region(&vfio.group_devt, 0, MINORMASK + 1, "vfio"); + if (ret) + goto err_alloc_chrdev; + return 0; + +err_alloc_chrdev: + class_destroy(vfio.class); + vfio.class = NULL; +err_group_class: + vfio_container_cleanup(); + return ret; +} + +void vfio_group_cleanup(void) +{ + WARN_ON(!list_empty(&vfio.group_list)); + ida_destroy(&vfio.group_ida); + unregister_chrdev_region(vfio.group_devt, MINORMASK + 1); + class_destroy(vfio.class); + vfio.class = NULL; + vfio_container_cleanup(); +} diff --git a/drivers/vfio/iommufd.c b/drivers/vfio/iommufd.c new file mode 100644 index 0000000000000000000000000000000000000000..4f82a6fa7c6c7fcc25da29c4a497ee673f34d618 --- /dev/null +++ b/drivers/vfio/iommufd.c @@ -0,0 +1,158 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES + */ +#include +#include + +#include "vfio.h" + +MODULE_IMPORT_NS(IOMMUFD); +MODULE_IMPORT_NS(IOMMUFD_VFIO); + +int vfio_iommufd_bind(struct vfio_device *vdev, struct iommufd_ctx *ictx) +{ + u32 ioas_id; + u32 device_id; + int ret; + + lockdep_assert_held(&vdev->dev_set->lock); + + /* + * If the driver doesn't provide this op then it means the device does + * not do DMA at all. So nothing to do. + */ + if (!vdev->ops->bind_iommufd) + return 0; + + ret = vdev->ops->bind_iommufd(vdev, ictx, &device_id); + if (ret) + return ret; + + ret = iommufd_vfio_compat_ioas_id(ictx, &ioas_id); + if (ret) + goto err_unbind; + ret = vdev->ops->attach_ioas(vdev, &ioas_id); + if (ret) + goto err_unbind; + + /* + * The legacy path has no way to return the device id or the selected + * pt_id + */ + return 0; + +err_unbind: + if (vdev->ops->unbind_iommufd) + vdev->ops->unbind_iommufd(vdev); + return ret; +} + +void vfio_iommufd_unbind(struct vfio_device *vdev) +{ + lockdep_assert_held(&vdev->dev_set->lock); + + if (vdev->ops->unbind_iommufd) + vdev->ops->unbind_iommufd(vdev); +} + +/* + * The physical standard ops mean that the iommufd_device is bound to the + * physical device vdev->dev that was provided to vfio_init_group_dev(). Drivers + * using this ops set should call vfio_register_group_dev() + */ +int vfio_iommufd_physical_bind(struct vfio_device *vdev, + struct iommufd_ctx *ictx, u32 *out_device_id) +{ + struct iommufd_device *idev; + + idev = iommufd_device_bind(ictx, vdev->dev, out_device_id); + if (IS_ERR(idev)) + return PTR_ERR(idev); + vdev->iommufd_device = idev; + return 0; +} +EXPORT_SYMBOL_GPL(vfio_iommufd_physical_bind); + +void vfio_iommufd_physical_unbind(struct vfio_device *vdev) +{ + lockdep_assert_held(&vdev->dev_set->lock); + + if (vdev->iommufd_attached) { + iommufd_device_detach(vdev->iommufd_device); + vdev->iommufd_attached = false; + } + iommufd_device_unbind(vdev->iommufd_device); + vdev->iommufd_device = NULL; +} +EXPORT_SYMBOL_GPL(vfio_iommufd_physical_unbind); + +int vfio_iommufd_physical_attach_ioas(struct vfio_device *vdev, u32 *pt_id) +{ + int rc; + + rc = iommufd_device_attach(vdev->iommufd_device, pt_id); + if (rc) + return rc; + vdev->iommufd_attached = true; + return 0; +} +EXPORT_SYMBOL_GPL(vfio_iommufd_physical_attach_ioas); + +/* + * The emulated standard ops mean that vfio_device is going to use the + * "mdev path" and will call vfio_pin_pages()/vfio_dma_rw(). Drivers using this + * ops set should call vfio_register_emulated_iommu_dev(). + */ + +static void vfio_emulated_unmap(void *data, unsigned long iova, + unsigned long length) +{ + struct vfio_device *vdev = data; + + vdev->ops->dma_unmap(vdev, iova, length); +} + +static const struct iommufd_access_ops vfio_user_ops = { + .needs_pin_pages = 1, + .unmap = vfio_emulated_unmap, +}; + +int vfio_iommufd_emulated_bind(struct vfio_device *vdev, + struct iommufd_ctx *ictx, u32 *out_device_id) +{ + lockdep_assert_held(&vdev->dev_set->lock); + + vdev->iommufd_ictx = ictx; + iommufd_ctx_get(ictx); + return 0; +} +EXPORT_SYMBOL_GPL(vfio_iommufd_emulated_bind); + +void vfio_iommufd_emulated_unbind(struct vfio_device *vdev) +{ + lockdep_assert_held(&vdev->dev_set->lock); + + if (vdev->iommufd_access) { + iommufd_access_destroy(vdev->iommufd_access); + vdev->iommufd_access = NULL; + } + iommufd_ctx_put(vdev->iommufd_ictx); + vdev->iommufd_ictx = NULL; +} +EXPORT_SYMBOL_GPL(vfio_iommufd_emulated_unbind); + +int vfio_iommufd_emulated_attach_ioas(struct vfio_device *vdev, u32 *pt_id) +{ + struct iommufd_access *user; + + lockdep_assert_held(&vdev->dev_set->lock); + + user = iommufd_access_create(vdev->iommufd_ictx, *pt_id, &vfio_user_ops, + vdev); + if (IS_ERR(user)) + return PTR_ERR(user); + vdev->iommufd_access = user; + return 0; +} +EXPORT_SYMBOL_GPL(vfio_iommufd_emulated_attach_ioas); diff --git a/drivers/vfio/iova_bitmap.c b/drivers/vfio/iova_bitmap.c index 6631e8befe1b2711d58fad544eac41dc829bd8ad..0848f920efb7c1c13521cc9d59ae96231cbf4481 100644 --- a/drivers/vfio/iova_bitmap.c +++ b/drivers/vfio/iova_bitmap.c @@ -5,6 +5,7 @@ */ #include #include +#include #include #define BITS_PER_PAGE (PAGE_SIZE * BITS_PER_BYTE) @@ -295,11 +296,13 @@ void iova_bitmap_free(struct iova_bitmap *bitmap) */ static unsigned long iova_bitmap_mapped_remaining(struct iova_bitmap *bitmap) { - unsigned long remaining; + unsigned long remaining, bytes; + + bytes = (bitmap->mapped.npages << PAGE_SHIFT) - bitmap->mapped.pgoff; remaining = bitmap->mapped_total_index - bitmap->mapped_base_index; remaining = min_t(unsigned long, remaining, - (bitmap->mapped.npages << PAGE_SHIFT) / sizeof(*bitmap->bitmap)); + bytes / sizeof(*bitmap->bitmap)); return remaining; } @@ -394,29 +397,27 @@ int iova_bitmap_for_each(struct iova_bitmap *bitmap, void *opaque, * Set the bits corresponding to the range [iova .. iova+length-1] in * the user bitmap. * - * Return: The number of bits set. */ void iova_bitmap_set(struct iova_bitmap *bitmap, unsigned long iova, size_t length) { struct iova_bitmap_map *mapped = &bitmap->mapped; - unsigned long offset = (iova - mapped->iova) >> mapped->pgshift; - unsigned long nbits = max_t(unsigned long, 1, length >> mapped->pgshift); - unsigned long page_idx = offset / BITS_PER_PAGE; - unsigned long page_offset = mapped->pgoff; - void *kaddr; - - offset = offset % BITS_PER_PAGE; + unsigned long cur_bit = ((iova - mapped->iova) >> + mapped->pgshift) + mapped->pgoff * BITS_PER_BYTE; + unsigned long last_bit = (((iova + length - 1) - mapped->iova) >> + mapped->pgshift) + mapped->pgoff * BITS_PER_BYTE; do { - unsigned long size = min(BITS_PER_PAGE - offset, nbits); + unsigned int page_idx = cur_bit / BITS_PER_PAGE; + unsigned int offset = cur_bit % BITS_PER_PAGE; + unsigned int nbits = min(BITS_PER_PAGE - offset, + last_bit - cur_bit + 1); + void *kaddr; kaddr = kmap_local_page(mapped->pages[page_idx]); - bitmap_set(kaddr + page_offset, offset, size); + bitmap_set(kaddr, offset, nbits); kunmap_local(kaddr); - page_offset = offset = 0; - nbits -= size; - page_idx++; - } while (nbits > 0); + cur_bit += nbits; + } while (cur_bit <= last_bit); } EXPORT_SYMBOL_GPL(iova_bitmap_set); diff --git a/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c index 39eeca18a0f7c884a18827fbe6d4ccf2584920b9..0bba3b05c6c780aa081f2031b5ae38b05d9aefc0 100644 --- a/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c +++ b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c @@ -360,8 +360,8 @@ static int vf_qm_check_match(struct hisi_acc_vf_core_device *hisi_acc_vdev, u32 que_iso_state; int ret; - if (migf->total_length < QM_MATCH_SIZE) - return -EINVAL; + if (migf->total_length < QM_MATCH_SIZE || hisi_acc_vdev->match_done) + return 0; if (vf_data->acc_magic != ACC_DEV_MAGIC) { dev_err(dev, "failed to match ACC_DEV_MAGIC\n"); @@ -406,6 +406,7 @@ static int vf_qm_check_match(struct hisi_acc_vf_core_device *hisi_acc_vdev, } hisi_acc_vdev->vf_qm_state = vf_data->vf_qm_state; + hisi_acc_vdev->match_done = true; return 0; } @@ -493,10 +494,6 @@ static int vf_qm_state_save(struct hisi_acc_vf_core_device *hisi_acc_vdev, struct device *dev = &vf_qm->pdev->dev; int ret; - ret = vf_qm_get_match_data(hisi_acc_vdev, vf_data); - if (ret) - return ret; - if (unlikely(qm_wait_dev_not_ready(vf_qm))) { /* Update state and return with match data */ vf_data->vf_qm_state = QM_NOT_READY; @@ -673,12 +670,6 @@ static int hisi_acc_vf_load_state(struct hisi_acc_vf_core_device *hisi_acc_vdev) struct hisi_acc_vf_migration_file *migf = hisi_acc_vdev->resuming_migf; int ret; - /* Check dev compatibility */ - ret = vf_qm_check_match(hisi_acc_vdev, migf); - if (ret) { - dev_err(dev, "failed to match the VF!\n"); - return ret; - } /* Recover data to VF */ ret = vf_qm_load_data(hisi_acc_vdev, migf); if (ret) { @@ -732,6 +723,10 @@ static ssize_t hisi_acc_vf_resume_write(struct file *filp, const char __user *bu *pos += len; done = len; migf->total_length += len; + + ret = vf_qm_check_match(migf->hisi_acc_vdev, migf); + if (ret) + done = -EFAULT; out_unlock: mutex_unlock(&migf->lock); return done; @@ -764,9 +759,58 @@ hisi_acc_vf_pci_resume(struct hisi_acc_vf_core_device *hisi_acc_vdev) stream_open(migf->filp->f_inode, migf->filp); mutex_init(&migf->lock); + migf->hisi_acc_vdev = hisi_acc_vdev; return migf; } +static long hisi_acc_vf_precopy_ioctl(struct file *filp, + unsigned int cmd, unsigned long arg) +{ + struct hisi_acc_vf_migration_file *migf = filp->private_data; + struct hisi_acc_vf_core_device *hisi_acc_vdev = migf->hisi_acc_vdev; + loff_t *pos = &filp->f_pos; + struct vfio_precopy_info info; + unsigned long minsz; + int ret; + + if (cmd != VFIO_MIG_GET_PRECOPY_INFO) + return -ENOTTY; + + minsz = offsetofend(struct vfio_precopy_info, dirty_bytes); + + if (copy_from_user(&info, (void __user *)arg, minsz)) + return -EFAULT; + if (info.argsz < minsz) + return -EINVAL; + + mutex_lock(&hisi_acc_vdev->state_mutex); + if (hisi_acc_vdev->mig_state != VFIO_DEVICE_STATE_PRE_COPY) { + mutex_unlock(&hisi_acc_vdev->state_mutex); + return -EINVAL; + } + + mutex_lock(&migf->lock); + + if (migf->disabled) { + ret = -ENODEV; + goto out; + } + + if (*pos > migf->total_length) { + ret = -EINVAL; + goto out; + } + + info.dirty_bytes = 0; + info.initial_bytes = migf->total_length - *pos; + + ret = copy_to_user((void __user *)arg, &info, minsz) ? -EFAULT : 0; +out: + mutex_unlock(&migf->lock); + mutex_unlock(&hisi_acc_vdev->state_mutex); + return ret; +} + static ssize_t hisi_acc_vf_save_read(struct file *filp, char __user *buf, size_t len, loff_t *pos) { @@ -807,12 +851,14 @@ out_unlock: static const struct file_operations hisi_acc_vf_save_fops = { .owner = THIS_MODULE, .read = hisi_acc_vf_save_read, + .unlocked_ioctl = hisi_acc_vf_precopy_ioctl, + .compat_ioctl = compat_ptr_ioctl, .release = hisi_acc_vf_release_file, .llseek = no_llseek, }; static struct hisi_acc_vf_migration_file * -hisi_acc_vf_stop_copy(struct hisi_acc_vf_core_device *hisi_acc_vdev) +hisi_acc_open_saving_migf(struct hisi_acc_vf_core_device *hisi_acc_vdev) { struct hisi_acc_vf_migration_file *migf; int ret; @@ -832,8 +878,9 @@ hisi_acc_vf_stop_copy(struct hisi_acc_vf_core_device *hisi_acc_vdev) stream_open(migf->filp->f_inode, migf->filp); mutex_init(&migf->lock); + migf->hisi_acc_vdev = hisi_acc_vdev; - ret = vf_qm_state_save(hisi_acc_vdev, migf); + ret = vf_qm_get_match_data(hisi_acc_vdev, &migf->vf_data); if (ret) { fput(migf->filp); return ERR_PTR(ret); @@ -842,6 +889,44 @@ hisi_acc_vf_stop_copy(struct hisi_acc_vf_core_device *hisi_acc_vdev) return migf; } +static struct hisi_acc_vf_migration_file * +hisi_acc_vf_pre_copy(struct hisi_acc_vf_core_device *hisi_acc_vdev) +{ + struct hisi_acc_vf_migration_file *migf; + + migf = hisi_acc_open_saving_migf(hisi_acc_vdev); + if (IS_ERR(migf)) + return migf; + + migf->total_length = QM_MATCH_SIZE; + return migf; +} + +static struct hisi_acc_vf_migration_file * +hisi_acc_vf_stop_copy(struct hisi_acc_vf_core_device *hisi_acc_vdev, bool open) +{ + int ret; + struct hisi_acc_vf_migration_file *migf = NULL; + + if (open) { + /* + * Userspace didn't use PRECOPY support. Hence saving_migf + * is not opened yet. + */ + migf = hisi_acc_open_saving_migf(hisi_acc_vdev); + if (IS_ERR(migf)) + return migf; + } else { + migf = hisi_acc_vdev->saving_migf; + } + + ret = vf_qm_state_save(hisi_acc_vdev, migf); + if (ret) + return ERR_PTR(ret); + + return open ? migf : NULL; +} + static int hisi_acc_vf_stop_device(struct hisi_acc_vf_core_device *hisi_acc_vdev) { struct device *dev = &hisi_acc_vdev->vf_dev->dev; @@ -869,6 +954,31 @@ hisi_acc_vf_set_device_state(struct hisi_acc_vf_core_device *hisi_acc_vdev, u32 cur = hisi_acc_vdev->mig_state; int ret; + if (cur == VFIO_DEVICE_STATE_RUNNING && new == VFIO_DEVICE_STATE_PRE_COPY) { + struct hisi_acc_vf_migration_file *migf; + + migf = hisi_acc_vf_pre_copy(hisi_acc_vdev); + if (IS_ERR(migf)) + return ERR_CAST(migf); + get_file(migf->filp); + hisi_acc_vdev->saving_migf = migf; + return migf->filp; + } + + if (cur == VFIO_DEVICE_STATE_PRE_COPY && new == VFIO_DEVICE_STATE_STOP_COPY) { + struct hisi_acc_vf_migration_file *migf; + + ret = hisi_acc_vf_stop_device(hisi_acc_vdev); + if (ret) + return ERR_PTR(ret); + + migf = hisi_acc_vf_stop_copy(hisi_acc_vdev, false); + if (IS_ERR(migf)) + return ERR_CAST(migf); + + return NULL; + } + if (cur == VFIO_DEVICE_STATE_RUNNING && new == VFIO_DEVICE_STATE_STOP) { ret = hisi_acc_vf_stop_device(hisi_acc_vdev); if (ret) @@ -879,7 +989,7 @@ hisi_acc_vf_set_device_state(struct hisi_acc_vf_core_device *hisi_acc_vdev, if (cur == VFIO_DEVICE_STATE_STOP && new == VFIO_DEVICE_STATE_STOP_COPY) { struct hisi_acc_vf_migration_file *migf; - migf = hisi_acc_vf_stop_copy(hisi_acc_vdev); + migf = hisi_acc_vf_stop_copy(hisi_acc_vdev, true); if (IS_ERR(migf)) return ERR_CAST(migf); get_file(migf->filp); @@ -911,6 +1021,11 @@ hisi_acc_vf_set_device_state(struct hisi_acc_vf_core_device *hisi_acc_vdev, return NULL; } + if (cur == VFIO_DEVICE_STATE_PRE_COPY && new == VFIO_DEVICE_STATE_RUNNING) { + hisi_acc_vf_disable_fds(hisi_acc_vdev); + return NULL; + } + if (cur == VFIO_DEVICE_STATE_STOP && new == VFIO_DEVICE_STATE_RUNNING) { hisi_acc_vf_start_device(hisi_acc_vdev); return NULL; @@ -957,6 +1072,14 @@ hisi_acc_vfio_pci_set_device_state(struct vfio_device *vdev, return res; } +static int +hisi_acc_vfio_pci_get_data_size(struct vfio_device *vdev, + unsigned long *stop_copy_length) +{ + *stop_copy_length = sizeof(struct acc_vf_data); + return 0; +} + static int hisi_acc_vfio_pci_get_device_state(struct vfio_device *vdev, enum vfio_device_mig_state *curr_state) @@ -1213,6 +1336,7 @@ static void hisi_acc_vfio_pci_close_device(struct vfio_device *core_vdev) static const struct vfio_migration_ops hisi_acc_vfio_pci_migrn_state_ops = { .migration_set_state = hisi_acc_vfio_pci_set_device_state, .migration_get_state = hisi_acc_vfio_pci_get_device_state, + .migration_get_data_size = hisi_acc_vfio_pci_get_data_size, }; static int hisi_acc_vfio_pci_migrn_init_dev(struct vfio_device *core_vdev) @@ -1227,7 +1351,7 @@ static int hisi_acc_vfio_pci_migrn_init_dev(struct vfio_device *core_vdev) hisi_acc_vdev->vf_dev = pdev; mutex_init(&hisi_acc_vdev->state_mutex); - core_vdev->migration_flags = VFIO_MIGRATION_STOP_COPY; + core_vdev->migration_flags = VFIO_MIGRATION_STOP_COPY | VFIO_MIGRATION_PRE_COPY; core_vdev->mig_ops = &hisi_acc_vfio_pci_migrn_state_ops; return vfio_pci_core_init_dev(core_vdev); @@ -1246,6 +1370,9 @@ static const struct vfio_device_ops hisi_acc_vfio_pci_migrn_ops = { .mmap = hisi_acc_vfio_pci_mmap, .request = vfio_pci_core_request, .match = vfio_pci_core_match, + .bind_iommufd = vfio_iommufd_physical_bind, + .unbind_iommufd = vfio_iommufd_physical_unbind, + .attach_ioas = vfio_iommufd_physical_attach_ioas, }; static const struct vfio_device_ops hisi_acc_vfio_pci_ops = { @@ -1261,6 +1388,9 @@ static const struct vfio_device_ops hisi_acc_vfio_pci_ops = { .mmap = vfio_pci_core_mmap, .request = vfio_pci_core_request, .match = vfio_pci_core_match, + .bind_iommufd = vfio_iommufd_physical_bind, + .unbind_iommufd = vfio_iommufd_physical_unbind, + .attach_ioas = vfio_iommufd_physical_attach_ioas, }; static int hisi_acc_vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) diff --git a/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.h b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.h index 67343325b320165499a6e17bea7e6d9cc9b7e7c3..dcabfeec6ca19d4334e746536c5e15a3572972d5 100644 --- a/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.h +++ b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.h @@ -91,12 +91,14 @@ struct hisi_acc_vf_migration_file { struct mutex lock; bool disabled; + struct hisi_acc_vf_core_device *hisi_acc_vdev; struct acc_vf_data vf_data; size_t total_length; }; struct hisi_acc_vf_core_device { struct vfio_pci_core_device core_device; + u8 match_done:1; u8 deferred_reset:1; /* For migration state */ struct mutex state_mutex; diff --git a/drivers/vfio/pci/mlx5/cmd.c b/drivers/vfio/pci/mlx5/cmd.c index c604b70437a5d5e56f6f8d71808a8c61c1aa0490..64e68d13cb98fc1812b8f096cc23c80ef2919bc2 100644 --- a/drivers/vfio/pci/mlx5/cmd.c +++ b/drivers/vfio/pci/mlx5/cmd.c @@ -14,18 +14,36 @@ _mlx5vf_free_page_tracker_resources(struct mlx5vf_pci_core_device *mvdev); int mlx5vf_cmd_suspend_vhca(struct mlx5vf_pci_core_device *mvdev, u16 op_mod) { + struct mlx5_vf_migration_file *migf = mvdev->saving_migf; u32 out[MLX5_ST_SZ_DW(suspend_vhca_out)] = {}; u32 in[MLX5_ST_SZ_DW(suspend_vhca_in)] = {}; + int err; lockdep_assert_held(&mvdev->state_mutex); if (mvdev->mdev_detach) return -ENOTCONN; + /* + * In case PRE_COPY is used, saving_migf is exposed while the device is + * running. Make sure to run only once there is no active save command. + * Running both in parallel, might end-up with a failure in the save + * command once it will try to turn on 'tracking' on a suspended device. + */ + if (migf) { + err = wait_for_completion_interruptible(&migf->save_comp); + if (err) + return err; + } + MLX5_SET(suspend_vhca_in, in, opcode, MLX5_CMD_OP_SUSPEND_VHCA); MLX5_SET(suspend_vhca_in, in, vhca_id, mvdev->vhca_id); MLX5_SET(suspend_vhca_in, in, op_mod, op_mod); - return mlx5_cmd_exec_inout(mvdev->mdev, suspend_vhca, in, out); + err = mlx5_cmd_exec_inout(mvdev->mdev, suspend_vhca, in, out); + if (migf) + complete(&migf->save_comp); + + return err; } int mlx5vf_cmd_resume_vhca(struct mlx5vf_pci_core_device *mvdev, u16 op_mod) @@ -45,23 +63,54 @@ int mlx5vf_cmd_resume_vhca(struct mlx5vf_pci_core_device *mvdev, u16 op_mod) } int mlx5vf_cmd_query_vhca_migration_state(struct mlx5vf_pci_core_device *mvdev, - size_t *state_size) + size_t *state_size, u8 query_flags) { u32 out[MLX5_ST_SZ_DW(query_vhca_migration_state_out)] = {}; u32 in[MLX5_ST_SZ_DW(query_vhca_migration_state_in)] = {}; + bool inc = query_flags & MLX5VF_QUERY_INC; int ret; lockdep_assert_held(&mvdev->state_mutex); if (mvdev->mdev_detach) return -ENOTCONN; + /* + * In case PRE_COPY is used, saving_migf is exposed while device is + * running. Make sure to run only once there is no active save command. + * Running both in parallel, might end-up with a failure in the + * incremental query command on un-tracked vhca. + */ + if (inc) { + ret = wait_for_completion_interruptible(&mvdev->saving_migf->save_comp); + if (ret) + return ret; + if (mvdev->saving_migf->state == + MLX5_MIGF_STATE_PRE_COPY_ERROR) { + /* + * In case we had a PRE_COPY error, only query full + * image for final image + */ + if (!(query_flags & MLX5VF_QUERY_FINAL)) { + *state_size = 0; + complete(&mvdev->saving_migf->save_comp); + return 0; + } + query_flags &= ~MLX5VF_QUERY_INC; + } + } + MLX5_SET(query_vhca_migration_state_in, in, opcode, MLX5_CMD_OP_QUERY_VHCA_MIGRATION_STATE); MLX5_SET(query_vhca_migration_state_in, in, vhca_id, mvdev->vhca_id); MLX5_SET(query_vhca_migration_state_in, in, op_mod, 0); + MLX5_SET(query_vhca_migration_state_in, in, incremental, + query_flags & MLX5VF_QUERY_INC); ret = mlx5_cmd_exec_inout(mvdev->mdev, query_vhca_migration_state, in, out); + if (inc) + complete(&mvdev->saving_migf->save_comp); + if (ret) return ret; @@ -173,6 +222,11 @@ void mlx5vf_cmd_set_migratable(struct mlx5vf_pci_core_device *mvdev, if (MLX5_CAP_GEN(mvdev->mdev, adv_virtualization)) mvdev->core_device.vdev.log_ops = log_ops; + if (MLX5_CAP_GEN_2(mvdev->mdev, migration_multi_load) && + MLX5_CAP_GEN_2(mvdev->mdev, migration_tracking_state)) + mvdev->core_device.vdev.migration_flags |= + VFIO_MIGRATION_PRE_COPY; + end: mlx5_vf_put_core_dev(mvdev->mdev); } @@ -210,11 +264,11 @@ err_exec: } static int _create_mkey(struct mlx5_core_dev *mdev, u32 pdn, - struct mlx5_vf_migration_file *migf, + struct mlx5_vhca_data_buffer *buf, struct mlx5_vhca_recv_buf *recv_buf, u32 *mkey) { - size_t npages = migf ? DIV_ROUND_UP(migf->total_length, PAGE_SIZE) : + size_t npages = buf ? DIV_ROUND_UP(buf->allocated_length, PAGE_SIZE) : recv_buf->npages; int err = 0, inlen; __be64 *mtt; @@ -232,10 +286,10 @@ static int _create_mkey(struct mlx5_core_dev *mdev, u32 pdn, DIV_ROUND_UP(npages, 2)); mtt = (__be64 *)MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt); - if (migf) { + if (buf) { struct sg_dma_page_iter dma_iter; - for_each_sgtable_dma_page(&migf->table.sgt, &dma_iter, 0) + for_each_sgtable_dma_page(&buf->table.sgt, &dma_iter, 0) *mtt++ = cpu_to_be64(sg_page_iter_dma_address(&dma_iter)); } else { int i; @@ -255,35 +309,195 @@ static int _create_mkey(struct mlx5_core_dev *mdev, u32 pdn, MLX5_SET(mkc, mkc, qpn, 0xffffff); MLX5_SET(mkc, mkc, log_page_size, PAGE_SHIFT); MLX5_SET(mkc, mkc, translations_octword_size, DIV_ROUND_UP(npages, 2)); - MLX5_SET64(mkc, mkc, len, - migf ? migf->total_length : (npages * PAGE_SIZE)); + MLX5_SET64(mkc, mkc, len, npages * PAGE_SIZE); err = mlx5_core_create_mkey(mdev, mkey, in, inlen); kvfree(in); return err; } +static int mlx5vf_dma_data_buffer(struct mlx5_vhca_data_buffer *buf) +{ + struct mlx5vf_pci_core_device *mvdev = buf->migf->mvdev; + struct mlx5_core_dev *mdev = mvdev->mdev; + int ret; + + lockdep_assert_held(&mvdev->state_mutex); + if (mvdev->mdev_detach) + return -ENOTCONN; + + if (buf->dmaed || !buf->allocated_length) + return -EINVAL; + + ret = dma_map_sgtable(mdev->device, &buf->table.sgt, buf->dma_dir, 0); + if (ret) + return ret; + + ret = _create_mkey(mdev, buf->migf->pdn, buf, NULL, &buf->mkey); + if (ret) + goto err; + + buf->dmaed = true; + + return 0; +err: + dma_unmap_sgtable(mdev->device, &buf->table.sgt, buf->dma_dir, 0); + return ret; +} + +void mlx5vf_free_data_buffer(struct mlx5_vhca_data_buffer *buf) +{ + struct mlx5_vf_migration_file *migf = buf->migf; + struct sg_page_iter sg_iter; + + lockdep_assert_held(&migf->mvdev->state_mutex); + WARN_ON(migf->mvdev->mdev_detach); + + if (buf->dmaed) { + mlx5_core_destroy_mkey(migf->mvdev->mdev, buf->mkey); + dma_unmap_sgtable(migf->mvdev->mdev->device, &buf->table.sgt, + buf->dma_dir, 0); + } + + /* Undo alloc_pages_bulk_array() */ + for_each_sgtable_page(&buf->table.sgt, &sg_iter, 0) + __free_page(sg_page_iter_page(&sg_iter)); + sg_free_append_table(&buf->table); + kfree(buf); +} + +struct mlx5_vhca_data_buffer * +mlx5vf_alloc_data_buffer(struct mlx5_vf_migration_file *migf, + size_t length, + enum dma_data_direction dma_dir) +{ + struct mlx5_vhca_data_buffer *buf; + int ret; + + buf = kzalloc(sizeof(*buf), GFP_KERNEL); + if (!buf) + return ERR_PTR(-ENOMEM); + + buf->dma_dir = dma_dir; + buf->migf = migf; + if (length) { + ret = mlx5vf_add_migration_pages(buf, + DIV_ROUND_UP_ULL(length, PAGE_SIZE)); + if (ret) + goto end; + + if (dma_dir != DMA_NONE) { + ret = mlx5vf_dma_data_buffer(buf); + if (ret) + goto end; + } + } + + return buf; +end: + mlx5vf_free_data_buffer(buf); + return ERR_PTR(ret); +} + +void mlx5vf_put_data_buffer(struct mlx5_vhca_data_buffer *buf) +{ + spin_lock_irq(&buf->migf->list_lock); + list_add_tail(&buf->buf_elm, &buf->migf->avail_list); + spin_unlock_irq(&buf->migf->list_lock); +} + +struct mlx5_vhca_data_buffer * +mlx5vf_get_data_buffer(struct mlx5_vf_migration_file *migf, + size_t length, enum dma_data_direction dma_dir) +{ + struct mlx5_vhca_data_buffer *buf, *temp_buf; + struct list_head free_list; + + lockdep_assert_held(&migf->mvdev->state_mutex); + if (migf->mvdev->mdev_detach) + return ERR_PTR(-ENOTCONN); + + INIT_LIST_HEAD(&free_list); + + spin_lock_irq(&migf->list_lock); + list_for_each_entry_safe(buf, temp_buf, &migf->avail_list, buf_elm) { + if (buf->dma_dir == dma_dir) { + list_del_init(&buf->buf_elm); + if (buf->allocated_length >= length) { + spin_unlock_irq(&migf->list_lock); + goto found; + } + /* + * Prevent holding redundant buffers. Put in a free + * list and call at the end not under the spin lock + * (&migf->list_lock) to mlx5vf_free_data_buffer which + * might sleep. + */ + list_add(&buf->buf_elm, &free_list); + } + } + spin_unlock_irq(&migf->list_lock); + buf = mlx5vf_alloc_data_buffer(migf, length, dma_dir); + +found: + while ((temp_buf = list_first_entry_or_null(&free_list, + struct mlx5_vhca_data_buffer, buf_elm))) { + list_del(&temp_buf->buf_elm); + mlx5vf_free_data_buffer(temp_buf); + } + + return buf; +} + void mlx5vf_mig_file_cleanup_cb(struct work_struct *_work) { struct mlx5vf_async_data *async_data = container_of(_work, struct mlx5vf_async_data, work); struct mlx5_vf_migration_file *migf = container_of(async_data, struct mlx5_vf_migration_file, async_data); - struct mlx5_core_dev *mdev = migf->mvdev->mdev; mutex_lock(&migf->lock); if (async_data->status) { - migf->is_err = true; + mlx5vf_put_data_buffer(async_data->buf); + if (async_data->header_buf) + mlx5vf_put_data_buffer(async_data->header_buf); + if (async_data->status == MLX5_CMD_STAT_BAD_RES_STATE_ERR) + migf->state = MLX5_MIGF_STATE_PRE_COPY_ERROR; + else + migf->state = MLX5_MIGF_STATE_ERROR; wake_up_interruptible(&migf->poll_wait); } mutex_unlock(&migf->lock); - - mlx5_core_destroy_mkey(mdev, async_data->mkey); - dma_unmap_sgtable(mdev->device, &migf->table.sgt, DMA_FROM_DEVICE, 0); - mlx5_core_dealloc_pd(mdev, async_data->pdn); kvfree(async_data->out); + complete(&migf->save_comp); fput(migf->filp); } +static int add_buf_header(struct mlx5_vhca_data_buffer *header_buf, + size_t image_size) +{ + struct mlx5_vf_migration_file *migf = header_buf->migf; + struct mlx5_vf_migration_header header = {}; + unsigned long flags; + struct page *page; + u8 *to_buff; + + header.image_size = cpu_to_le64(image_size); + page = mlx5vf_get_migration_page(header_buf, 0); + if (!page) + return -EINVAL; + to_buff = kmap_local_page(page); + memcpy(to_buff, &header, sizeof(header)); + kunmap_local(to_buff); + header_buf->length = sizeof(header); + header_buf->header_image_size = image_size; + header_buf->start_pos = header_buf->migf->max_pos; + migf->max_pos += header_buf->length; + spin_lock_irqsave(&migf->list_lock, flags); + list_add_tail(&header_buf->buf_elm, &migf->buf_list); + spin_unlock_irqrestore(&migf->list_lock, flags); + return 0; +} + static void mlx5vf_save_callback(int status, struct mlx5_async_work *context) { struct mlx5vf_async_data *async_data = container_of(context, @@ -292,67 +506,96 @@ static void mlx5vf_save_callback(int status, struct mlx5_async_work *context) struct mlx5_vf_migration_file, async_data); if (!status) { - WRITE_ONCE(migf->total_length, - MLX5_GET(save_vhca_state_out, async_data->out, - actual_image_size)); + size_t image_size; + unsigned long flags; + + image_size = MLX5_GET(save_vhca_state_out, async_data->out, + actual_image_size); + if (async_data->header_buf) { + status = add_buf_header(async_data->header_buf, image_size); + if (status) + goto err; + } + async_data->buf->length = image_size; + async_data->buf->start_pos = migf->max_pos; + migf->max_pos += async_data->buf->length; + spin_lock_irqsave(&migf->list_lock, flags); + list_add_tail(&async_data->buf->buf_elm, &migf->buf_list); + spin_unlock_irqrestore(&migf->list_lock, flags); + migf->state = async_data->last_chunk ? + MLX5_MIGF_STATE_COMPLETE : MLX5_MIGF_STATE_PRE_COPY; wake_up_interruptible(&migf->poll_wait); } +err: /* * The error and the cleanup flows can't run from an * interrupt context */ + if (status == -EREMOTEIO) + status = MLX5_GET(save_vhca_state_out, async_data->out, status); async_data->status = status; queue_work(migf->mvdev->cb_wq, &async_data->work); } int mlx5vf_cmd_save_vhca_state(struct mlx5vf_pci_core_device *mvdev, - struct mlx5_vf_migration_file *migf) + struct mlx5_vf_migration_file *migf, + struct mlx5_vhca_data_buffer *buf, bool inc, + bool track) { u32 out_size = MLX5_ST_SZ_BYTES(save_vhca_state_out); u32 in[MLX5_ST_SZ_DW(save_vhca_state_in)] = {}; + struct mlx5_vhca_data_buffer *header_buf = NULL; struct mlx5vf_async_data *async_data; - struct mlx5_core_dev *mdev; - u32 pdn, mkey; int err; lockdep_assert_held(&mvdev->state_mutex); if (mvdev->mdev_detach) return -ENOTCONN; - mdev = mvdev->mdev; - err = mlx5_core_alloc_pd(mdev, &pdn); + err = wait_for_completion_interruptible(&migf->save_comp); if (err) return err; - err = dma_map_sgtable(mdev->device, &migf->table.sgt, DMA_FROM_DEVICE, - 0); - if (err) - goto err_dma_map; - - err = _create_mkey(mdev, pdn, migf, NULL, &mkey); - if (err) - goto err_create_mkey; + if (migf->state == MLX5_MIGF_STATE_PRE_COPY_ERROR) + /* + * In case we had a PRE_COPY error, SAVE is triggered only for + * the final image, read device full image. + */ + inc = false; MLX5_SET(save_vhca_state_in, in, opcode, MLX5_CMD_OP_SAVE_VHCA_STATE); MLX5_SET(save_vhca_state_in, in, op_mod, 0); MLX5_SET(save_vhca_state_in, in, vhca_id, mvdev->vhca_id); - MLX5_SET(save_vhca_state_in, in, mkey, mkey); - MLX5_SET(save_vhca_state_in, in, size, migf->total_length); + MLX5_SET(save_vhca_state_in, in, mkey, buf->mkey); + MLX5_SET(save_vhca_state_in, in, size, buf->allocated_length); + MLX5_SET(save_vhca_state_in, in, incremental, inc); + MLX5_SET(save_vhca_state_in, in, set_track, track); async_data = &migf->async_data; + async_data->buf = buf; + async_data->last_chunk = !track; async_data->out = kvzalloc(out_size, GFP_KERNEL); if (!async_data->out) { err = -ENOMEM; goto err_out; } - /* no data exists till the callback comes back */ - migf->total_length = 0; + if (MLX5VF_PRE_COPY_SUPP(mvdev)) { + header_buf = mlx5vf_get_data_buffer(migf, + sizeof(struct mlx5_vf_migration_header), DMA_NONE); + if (IS_ERR(header_buf)) { + err = PTR_ERR(header_buf); + goto err_free; + } + } + + if (async_data->last_chunk) + migf->state = MLX5_MIGF_STATE_SAVE_LAST; + + async_data->header_buf = header_buf; get_file(migf->filp); - async_data->mkey = mkey; - async_data->pdn = pdn; err = mlx5_cmd_exec_cb(&migf->async_ctx, in, sizeof(in), async_data->out, out_size, mlx5vf_save_callback, @@ -363,68 +606,92 @@ int mlx5vf_cmd_save_vhca_state(struct mlx5vf_pci_core_device *mvdev, return 0; err_exec: + if (header_buf) + mlx5vf_put_data_buffer(header_buf); fput(migf->filp); +err_free: kvfree(async_data->out); err_out: - mlx5_core_destroy_mkey(mdev, mkey); -err_create_mkey: - dma_unmap_sgtable(mdev->device, &migf->table.sgt, DMA_FROM_DEVICE, 0); -err_dma_map: - mlx5_core_dealloc_pd(mdev, pdn); + complete(&migf->save_comp); return err; } int mlx5vf_cmd_load_vhca_state(struct mlx5vf_pci_core_device *mvdev, - struct mlx5_vf_migration_file *migf) + struct mlx5_vf_migration_file *migf, + struct mlx5_vhca_data_buffer *buf) { - struct mlx5_core_dev *mdev; - u32 out[MLX5_ST_SZ_DW(save_vhca_state_out)] = {}; - u32 in[MLX5_ST_SZ_DW(save_vhca_state_in)] = {}; - u32 pdn, mkey; + u32 out[MLX5_ST_SZ_DW(load_vhca_state_out)] = {}; + u32 in[MLX5_ST_SZ_DW(load_vhca_state_in)] = {}; int err; lockdep_assert_held(&mvdev->state_mutex); if (mvdev->mdev_detach) return -ENOTCONN; - mutex_lock(&migf->lock); - if (!migf->total_length) { - err = -EINVAL; - goto end; + if (!buf->dmaed) { + err = mlx5vf_dma_data_buffer(buf); + if (err) + return err; } - mdev = mvdev->mdev; - err = mlx5_core_alloc_pd(mdev, &pdn); - if (err) - goto end; - - err = dma_map_sgtable(mdev->device, &migf->table.sgt, DMA_TO_DEVICE, 0); - if (err) - goto err_reg; - - err = _create_mkey(mdev, pdn, migf, NULL, &mkey); - if (err) - goto err_mkey; - MLX5_SET(load_vhca_state_in, in, opcode, MLX5_CMD_OP_LOAD_VHCA_STATE); MLX5_SET(load_vhca_state_in, in, op_mod, 0); MLX5_SET(load_vhca_state_in, in, vhca_id, mvdev->vhca_id); - MLX5_SET(load_vhca_state_in, in, mkey, mkey); - MLX5_SET(load_vhca_state_in, in, size, migf->total_length); + MLX5_SET(load_vhca_state_in, in, mkey, buf->mkey); + MLX5_SET(load_vhca_state_in, in, size, buf->length); + return mlx5_cmd_exec_inout(mvdev->mdev, load_vhca_state, in, out); +} - err = mlx5_cmd_exec_inout(mdev, load_vhca_state, in, out); +int mlx5vf_cmd_alloc_pd(struct mlx5_vf_migration_file *migf) +{ + int err; - mlx5_core_destroy_mkey(mdev, mkey); -err_mkey: - dma_unmap_sgtable(mdev->device, &migf->table.sgt, DMA_TO_DEVICE, 0); -err_reg: - mlx5_core_dealloc_pd(mdev, pdn); -end: - mutex_unlock(&migf->lock); + lockdep_assert_held(&migf->mvdev->state_mutex); + if (migf->mvdev->mdev_detach) + return -ENOTCONN; + + err = mlx5_core_alloc_pd(migf->mvdev->mdev, &migf->pdn); return err; } +void mlx5vf_cmd_dealloc_pd(struct mlx5_vf_migration_file *migf) +{ + lockdep_assert_held(&migf->mvdev->state_mutex); + if (migf->mvdev->mdev_detach) + return; + + mlx5_core_dealloc_pd(migf->mvdev->mdev, migf->pdn); +} + +void mlx5fv_cmd_clean_migf_resources(struct mlx5_vf_migration_file *migf) +{ + struct mlx5_vhca_data_buffer *entry; + + lockdep_assert_held(&migf->mvdev->state_mutex); + WARN_ON(migf->mvdev->mdev_detach); + + if (migf->buf) { + mlx5vf_free_data_buffer(migf->buf); + migf->buf = NULL; + } + + if (migf->buf_header) { + mlx5vf_free_data_buffer(migf->buf_header); + migf->buf_header = NULL; + } + + list_splice(&migf->avail_list, &migf->buf_list); + + while ((entry = list_first_entry_or_null(&migf->buf_list, + struct mlx5_vhca_data_buffer, buf_elm))) { + list_del(&entry->buf_elm); + mlx5vf_free_data_buffer(entry); + } + + mlx5vf_cmd_dealloc_pd(migf); +} + static void combine_ranges(struct rb_root_cached *root, u32 cur_nodes, u32 req_nodes) { diff --git a/drivers/vfio/pci/mlx5/cmd.h b/drivers/vfio/pci/mlx5/cmd.h index 921d5720a1e57b87a901b8374ac6f4870bb1bafb..5483171d57ad35c01b0010d2ce9e5d2a11169545 100644 --- a/drivers/vfio/pci/mlx5/cmd.h +++ b/drivers/vfio/pci/mlx5/cmd.h @@ -12,31 +12,74 @@ #include #include +#define MLX5VF_PRE_COPY_SUPP(mvdev) \ + ((mvdev)->core_device.vdev.migration_flags & VFIO_MIGRATION_PRE_COPY) + +enum mlx5_vf_migf_state { + MLX5_MIGF_STATE_ERROR = 1, + MLX5_MIGF_STATE_PRE_COPY_ERROR, + MLX5_MIGF_STATE_PRE_COPY, + MLX5_MIGF_STATE_SAVE_LAST, + MLX5_MIGF_STATE_COMPLETE, +}; + +enum mlx5_vf_load_state { + MLX5_VF_LOAD_STATE_READ_IMAGE_NO_HEADER, + MLX5_VF_LOAD_STATE_READ_HEADER, + MLX5_VF_LOAD_STATE_PREP_IMAGE, + MLX5_VF_LOAD_STATE_READ_IMAGE, + MLX5_VF_LOAD_STATE_LOAD_IMAGE, +}; + +struct mlx5_vf_migration_header { + __le64 image_size; + /* For future use in case we may need to change the kernel protocol */ + __le64 flags; +}; + +struct mlx5_vhca_data_buffer { + struct sg_append_table table; + loff_t start_pos; + u64 length; + u64 allocated_length; + u64 header_image_size; + u32 mkey; + enum dma_data_direction dma_dir; + u8 dmaed:1; + struct list_head buf_elm; + struct mlx5_vf_migration_file *migf; + /* Optimize mlx5vf_get_migration_page() for sequential access */ + struct scatterlist *last_offset_sg; + unsigned int sg_last_entry; + unsigned long last_offset; +}; + struct mlx5vf_async_data { struct mlx5_async_work cb_work; struct work_struct work; + struct mlx5_vhca_data_buffer *buf; + struct mlx5_vhca_data_buffer *header_buf; int status; - u32 pdn; - u32 mkey; + u8 last_chunk:1; void *out; }; struct mlx5_vf_migration_file { struct file *filp; struct mutex lock; - u8 disabled:1; - u8 is_err:1; + enum mlx5_vf_migf_state state; - struct sg_append_table table; - size_t total_length; - size_t allocated_length; - - /* Optimize mlx5vf_get_migration_page() for sequential access */ - struct scatterlist *last_offset_sg; - unsigned int sg_last_entry; - unsigned long last_offset; + enum mlx5_vf_load_state load_state; + u32 pdn; + loff_t max_pos; + struct mlx5_vhca_data_buffer *buf; + struct mlx5_vhca_data_buffer *buf_header; + spinlock_t list_lock; + struct list_head buf_list; + struct list_head avail_list; struct mlx5vf_pci_core_device *mvdev; wait_queue_head_t poll_wait; + struct completion save_comp; struct mlx5_async_ctx async_ctx; struct mlx5vf_async_data async_data; }; @@ -113,19 +156,42 @@ struct mlx5vf_pci_core_device { struct mlx5_core_dev *mdev; }; +enum { + MLX5VF_QUERY_INC = (1UL << 0), + MLX5VF_QUERY_FINAL = (1UL << 1), +}; + int mlx5vf_cmd_suspend_vhca(struct mlx5vf_pci_core_device *mvdev, u16 op_mod); int mlx5vf_cmd_resume_vhca(struct mlx5vf_pci_core_device *mvdev, u16 op_mod); int mlx5vf_cmd_query_vhca_migration_state(struct mlx5vf_pci_core_device *mvdev, - size_t *state_size); + size_t *state_size, u8 query_flags); void mlx5vf_cmd_set_migratable(struct mlx5vf_pci_core_device *mvdev, const struct vfio_migration_ops *mig_ops, const struct vfio_log_ops *log_ops); void mlx5vf_cmd_remove_migratable(struct mlx5vf_pci_core_device *mvdev); void mlx5vf_cmd_close_migratable(struct mlx5vf_pci_core_device *mvdev); int mlx5vf_cmd_save_vhca_state(struct mlx5vf_pci_core_device *mvdev, - struct mlx5_vf_migration_file *migf); + struct mlx5_vf_migration_file *migf, + struct mlx5_vhca_data_buffer *buf, bool inc, + bool track); int mlx5vf_cmd_load_vhca_state(struct mlx5vf_pci_core_device *mvdev, - struct mlx5_vf_migration_file *migf); + struct mlx5_vf_migration_file *migf, + struct mlx5_vhca_data_buffer *buf); +int mlx5vf_cmd_alloc_pd(struct mlx5_vf_migration_file *migf); +void mlx5vf_cmd_dealloc_pd(struct mlx5_vf_migration_file *migf); +void mlx5fv_cmd_clean_migf_resources(struct mlx5_vf_migration_file *migf); +struct mlx5_vhca_data_buffer * +mlx5vf_alloc_data_buffer(struct mlx5_vf_migration_file *migf, + size_t length, enum dma_data_direction dma_dir); +void mlx5vf_free_data_buffer(struct mlx5_vhca_data_buffer *buf); +struct mlx5_vhca_data_buffer * +mlx5vf_get_data_buffer(struct mlx5_vf_migration_file *migf, + size_t length, enum dma_data_direction dma_dir); +void mlx5vf_put_data_buffer(struct mlx5_vhca_data_buffer *buf); +int mlx5vf_add_migration_pages(struct mlx5_vhca_data_buffer *buf, + unsigned int npages); +struct page *mlx5vf_get_migration_page(struct mlx5_vhca_data_buffer *buf, + unsigned long offset); void mlx5vf_state_mutex_unlock(struct mlx5vf_pci_core_device *mvdev); void mlx5vf_disable_fds(struct mlx5vf_pci_core_device *mvdev); void mlx5vf_mig_file_cleanup_cb(struct work_struct *_work); diff --git a/drivers/vfio/pci/mlx5/main.c b/drivers/vfio/pci/mlx5/main.c index fd6ccb8454a24a496c351fb415f96ee2fd9361a5..9feb89c6d939daccbaabfcb76a147d15603db4f0 100644 --- a/drivers/vfio/pci/mlx5/main.c +++ b/drivers/vfio/pci/mlx5/main.c @@ -32,8 +32,8 @@ static struct mlx5vf_pci_core_device *mlx5vf_drvdata(struct pci_dev *pdev) core_device); } -static struct page * -mlx5vf_get_migration_page(struct mlx5_vf_migration_file *migf, +struct page * +mlx5vf_get_migration_page(struct mlx5_vhca_data_buffer *buf, unsigned long offset) { unsigned long cur_offset = 0; @@ -41,20 +41,20 @@ mlx5vf_get_migration_page(struct mlx5_vf_migration_file *migf, unsigned int i; /* All accesses are sequential */ - if (offset < migf->last_offset || !migf->last_offset_sg) { - migf->last_offset = 0; - migf->last_offset_sg = migf->table.sgt.sgl; - migf->sg_last_entry = 0; + if (offset < buf->last_offset || !buf->last_offset_sg) { + buf->last_offset = 0; + buf->last_offset_sg = buf->table.sgt.sgl; + buf->sg_last_entry = 0; } - cur_offset = migf->last_offset; + cur_offset = buf->last_offset; - for_each_sg(migf->last_offset_sg, sg, - migf->table.sgt.orig_nents - migf->sg_last_entry, i) { + for_each_sg(buf->last_offset_sg, sg, + buf->table.sgt.orig_nents - buf->sg_last_entry, i) { if (offset < sg->length + cur_offset) { - migf->last_offset_sg = sg; - migf->sg_last_entry += i; - migf->last_offset = cur_offset; + buf->last_offset_sg = sg; + buf->sg_last_entry += i; + buf->last_offset = cur_offset; return nth_page(sg_page(sg), (offset - cur_offset) / PAGE_SIZE); } @@ -63,8 +63,8 @@ mlx5vf_get_migration_page(struct mlx5_vf_migration_file *migf, return NULL; } -static int mlx5vf_add_migration_pages(struct mlx5_vf_migration_file *migf, - unsigned int npages) +int mlx5vf_add_migration_pages(struct mlx5_vhca_data_buffer *buf, + unsigned int npages) { unsigned int to_alloc = npages; struct page **page_list; @@ -85,13 +85,13 @@ static int mlx5vf_add_migration_pages(struct mlx5_vf_migration_file *migf, } to_alloc -= filled; ret = sg_alloc_append_table_from_pages( - &migf->table, page_list, filled, 0, + &buf->table, page_list, filled, 0, filled << PAGE_SHIFT, UINT_MAX, SG_MAX_SINGLE_ALLOC, GFP_KERNEL); if (ret) goto err; - migf->allocated_length += filled * PAGE_SIZE; + buf->allocated_length += filled * PAGE_SIZE; /* clean input for another bulk allocation */ memset(page_list, 0, filled * sizeof(*page_list)); to_fill = min_t(unsigned int, to_alloc, @@ -108,16 +108,8 @@ err: static void mlx5vf_disable_fd(struct mlx5_vf_migration_file *migf) { - struct sg_page_iter sg_iter; - mutex_lock(&migf->lock); - /* Undo alloc_pages_bulk_array() */ - for_each_sgtable_page(&migf->table.sgt, &sg_iter, 0) - __free_page(sg_page_iter_page(&sg_iter)); - sg_free_append_table(&migf->table); - migf->disabled = true; - migf->total_length = 0; - migf->allocated_length = 0; + migf->state = MLX5_MIGF_STATE_ERROR; migf->filp->f_pos = 0; mutex_unlock(&migf->lock); } @@ -132,10 +124,91 @@ static int mlx5vf_release_file(struct inode *inode, struct file *filp) return 0; } +static struct mlx5_vhca_data_buffer * +mlx5vf_get_data_buff_from_pos(struct mlx5_vf_migration_file *migf, loff_t pos, + bool *end_of_data) +{ + struct mlx5_vhca_data_buffer *buf; + bool found = false; + + *end_of_data = false; + spin_lock_irq(&migf->list_lock); + if (list_empty(&migf->buf_list)) { + *end_of_data = true; + goto end; + } + + buf = list_first_entry(&migf->buf_list, struct mlx5_vhca_data_buffer, + buf_elm); + if (pos >= buf->start_pos && + pos < buf->start_pos + buf->length) { + found = true; + goto end; + } + + /* + * As we use a stream based FD we may expect having the data always + * on first chunk + */ + migf->state = MLX5_MIGF_STATE_ERROR; + +end: + spin_unlock_irq(&migf->list_lock); + return found ? buf : NULL; +} + +static ssize_t mlx5vf_buf_read(struct mlx5_vhca_data_buffer *vhca_buf, + char __user **buf, size_t *len, loff_t *pos) +{ + unsigned long offset; + ssize_t done = 0; + size_t copy_len; + + copy_len = min_t(size_t, + vhca_buf->start_pos + vhca_buf->length - *pos, *len); + while (copy_len) { + size_t page_offset; + struct page *page; + size_t page_len; + u8 *from_buff; + int ret; + + offset = *pos - vhca_buf->start_pos; + page_offset = offset % PAGE_SIZE; + offset -= page_offset; + page = mlx5vf_get_migration_page(vhca_buf, offset); + if (!page) + return -EINVAL; + page_len = min_t(size_t, copy_len, PAGE_SIZE - page_offset); + from_buff = kmap_local_page(page); + ret = copy_to_user(*buf, from_buff + page_offset, page_len); + kunmap_local(from_buff); + if (ret) + return -EFAULT; + *pos += page_len; + *len -= page_len; + *buf += page_len; + done += page_len; + copy_len -= page_len; + } + + if (*pos >= vhca_buf->start_pos + vhca_buf->length) { + spin_lock_irq(&vhca_buf->migf->list_lock); + list_del_init(&vhca_buf->buf_elm); + list_add_tail(&vhca_buf->buf_elm, &vhca_buf->migf->avail_list); + spin_unlock_irq(&vhca_buf->migf->list_lock); + } + + return done; +} + static ssize_t mlx5vf_save_read(struct file *filp, char __user *buf, size_t len, loff_t *pos) { struct mlx5_vf_migration_file *migf = filp->private_data; + struct mlx5_vhca_data_buffer *vhca_buf; + bool first_loop_call = true; + bool end_of_data; ssize_t done = 0; if (pos) @@ -144,52 +217,56 @@ static ssize_t mlx5vf_save_read(struct file *filp, char __user *buf, size_t len, if (!(filp->f_flags & O_NONBLOCK)) { if (wait_event_interruptible(migf->poll_wait, - READ_ONCE(migf->total_length) || migf->is_err)) + !list_empty(&migf->buf_list) || + migf->state == MLX5_MIGF_STATE_ERROR || + migf->state == MLX5_MIGF_STATE_PRE_COPY_ERROR || + migf->state == MLX5_MIGF_STATE_PRE_COPY || + migf->state == MLX5_MIGF_STATE_COMPLETE)) return -ERESTARTSYS; } mutex_lock(&migf->lock); - if ((filp->f_flags & O_NONBLOCK) && !READ_ONCE(migf->total_length)) { - done = -EAGAIN; - goto out_unlock; - } - if (*pos > migf->total_length) { - done = -EINVAL; - goto out_unlock; - } - if (migf->disabled || migf->is_err) { + if (migf->state == MLX5_MIGF_STATE_ERROR) { done = -ENODEV; goto out_unlock; } - len = min_t(size_t, migf->total_length - *pos, len); while (len) { - size_t page_offset; - struct page *page; - size_t page_len; - u8 *from_buff; - int ret; + ssize_t count; + + vhca_buf = mlx5vf_get_data_buff_from_pos(migf, *pos, + &end_of_data); + if (first_loop_call) { + first_loop_call = false; + /* Temporary end of file as part of PRE_COPY */ + if (end_of_data && (migf->state == MLX5_MIGF_STATE_PRE_COPY || + migf->state == MLX5_MIGF_STATE_PRE_COPY_ERROR)) { + done = -ENOMSG; + goto out_unlock; + } + + if (end_of_data && migf->state != MLX5_MIGF_STATE_COMPLETE) { + if (filp->f_flags & O_NONBLOCK) { + done = -EAGAIN; + goto out_unlock; + } + } + } + + if (end_of_data) + goto out_unlock; - page_offset = (*pos) % PAGE_SIZE; - page = mlx5vf_get_migration_page(migf, *pos - page_offset); - if (!page) { - if (done == 0) - done = -EINVAL; + if (!vhca_buf) { + done = -EINVAL; goto out_unlock; } - page_len = min_t(size_t, len, PAGE_SIZE - page_offset); - from_buff = kmap_local_page(page); - ret = copy_to_user(buf, from_buff + page_offset, page_len); - kunmap_local(from_buff); - if (ret) { - done = -EFAULT; + count = mlx5vf_buf_read(vhca_buf, &buf, &len, pos); + if (count < 0) { + done = count; goto out_unlock; } - *pos += page_len; - len -= page_len; - done += page_len; - buf += page_len; + done += count; } out_unlock: @@ -206,27 +283,188 @@ static __poll_t mlx5vf_save_poll(struct file *filp, poll_wait(filp, &migf->poll_wait, wait); mutex_lock(&migf->lock); - if (migf->disabled || migf->is_err) + if (migf->state == MLX5_MIGF_STATE_ERROR) pollflags = EPOLLIN | EPOLLRDNORM | EPOLLRDHUP; - else if (READ_ONCE(migf->total_length)) + else if (!list_empty(&migf->buf_list) || + migf->state == MLX5_MIGF_STATE_COMPLETE) pollflags = EPOLLIN | EPOLLRDNORM; mutex_unlock(&migf->lock); return pollflags; } +/* + * FD is exposed and user can use it after receiving an error. + * Mark migf in error, and wake the user. + */ +static void mlx5vf_mark_err(struct mlx5_vf_migration_file *migf) +{ + migf->state = MLX5_MIGF_STATE_ERROR; + wake_up_interruptible(&migf->poll_wait); +} + +static long mlx5vf_precopy_ioctl(struct file *filp, unsigned int cmd, + unsigned long arg) +{ + struct mlx5_vf_migration_file *migf = filp->private_data; + struct mlx5vf_pci_core_device *mvdev = migf->mvdev; + struct mlx5_vhca_data_buffer *buf; + struct vfio_precopy_info info = {}; + loff_t *pos = &filp->f_pos; + unsigned long minsz; + size_t inc_length = 0; + bool end_of_data; + int ret; + + if (cmd != VFIO_MIG_GET_PRECOPY_INFO) + return -ENOTTY; + + minsz = offsetofend(struct vfio_precopy_info, dirty_bytes); + + if (copy_from_user(&info, (void __user *)arg, minsz)) + return -EFAULT; + + if (info.argsz < minsz) + return -EINVAL; + + mutex_lock(&mvdev->state_mutex); + if (mvdev->mig_state != VFIO_DEVICE_STATE_PRE_COPY && + mvdev->mig_state != VFIO_DEVICE_STATE_PRE_COPY_P2P) { + ret = -EINVAL; + goto err_state_unlock; + } + + /* + * We can't issue a SAVE command when the device is suspended, so as + * part of VFIO_DEVICE_STATE_PRE_COPY_P2P no reason to query for extra + * bytes that can't be read. + */ + if (mvdev->mig_state == VFIO_DEVICE_STATE_PRE_COPY) { + /* + * Once the query returns it's guaranteed that there is no + * active SAVE command. + * As so, the other code below is safe with the proper locks. + */ + ret = mlx5vf_cmd_query_vhca_migration_state(mvdev, &inc_length, + MLX5VF_QUERY_INC); + if (ret) + goto err_state_unlock; + } + + mutex_lock(&migf->lock); + if (migf->state == MLX5_MIGF_STATE_ERROR) { + ret = -ENODEV; + goto err_migf_unlock; + } + + buf = mlx5vf_get_data_buff_from_pos(migf, *pos, &end_of_data); + if (buf) { + if (buf->start_pos == 0) { + info.initial_bytes = buf->header_image_size - *pos; + } else if (buf->start_pos == + sizeof(struct mlx5_vf_migration_header)) { + /* First data buffer following the header */ + info.initial_bytes = buf->start_pos + + buf->length - *pos; + } else { + info.dirty_bytes = buf->start_pos + buf->length - *pos; + } + } else { + if (!end_of_data) { + ret = -EINVAL; + goto err_migf_unlock; + } + + info.dirty_bytes = inc_length; + } + + if (!end_of_data || !inc_length) { + mutex_unlock(&migf->lock); + goto done; + } + + mutex_unlock(&migf->lock); + /* + * We finished transferring the current state and the device has a + * dirty state, save a new state to be ready for. + */ + buf = mlx5vf_get_data_buffer(migf, inc_length, DMA_FROM_DEVICE); + if (IS_ERR(buf)) { + ret = PTR_ERR(buf); + mlx5vf_mark_err(migf); + goto err_state_unlock; + } + + ret = mlx5vf_cmd_save_vhca_state(mvdev, migf, buf, true, true); + if (ret) { + mlx5vf_mark_err(migf); + mlx5vf_put_data_buffer(buf); + goto err_state_unlock; + } + +done: + mlx5vf_state_mutex_unlock(mvdev); + if (copy_to_user((void __user *)arg, &info, minsz)) + return -EFAULT; + return 0; + +err_migf_unlock: + mutex_unlock(&migf->lock); +err_state_unlock: + mlx5vf_state_mutex_unlock(mvdev); + return ret; +} + static const struct file_operations mlx5vf_save_fops = { .owner = THIS_MODULE, .read = mlx5vf_save_read, .poll = mlx5vf_save_poll, + .unlocked_ioctl = mlx5vf_precopy_ioctl, + .compat_ioctl = compat_ptr_ioctl, .release = mlx5vf_release_file, .llseek = no_llseek, }; +static int mlx5vf_pci_save_device_inc_data(struct mlx5vf_pci_core_device *mvdev) +{ + struct mlx5_vf_migration_file *migf = mvdev->saving_migf; + struct mlx5_vhca_data_buffer *buf; + size_t length; + int ret; + + if (migf->state == MLX5_MIGF_STATE_ERROR) + return -ENODEV; + + ret = mlx5vf_cmd_query_vhca_migration_state(mvdev, &length, + MLX5VF_QUERY_INC | MLX5VF_QUERY_FINAL); + if (ret) + goto err; + + buf = mlx5vf_get_data_buffer(migf, length, DMA_FROM_DEVICE); + if (IS_ERR(buf)) { + ret = PTR_ERR(buf); + goto err; + } + + ret = mlx5vf_cmd_save_vhca_state(mvdev, migf, buf, true, false); + if (ret) + goto err_save; + + return 0; + +err_save: + mlx5vf_put_data_buffer(buf); +err: + mlx5vf_mark_err(migf); + return ret; +} + static struct mlx5_vf_migration_file * -mlx5vf_pci_save_device_data(struct mlx5vf_pci_core_device *mvdev) +mlx5vf_pci_save_device_data(struct mlx5vf_pci_core_device *mvdev, bool track) { struct mlx5_vf_migration_file *migf; + struct mlx5_vhca_data_buffer *buf; + size_t length; int ret; migf = kzalloc(sizeof(*migf), GFP_KERNEL); @@ -236,43 +474,211 @@ mlx5vf_pci_save_device_data(struct mlx5vf_pci_core_device *mvdev) migf->filp = anon_inode_getfile("mlx5vf_mig", &mlx5vf_save_fops, migf, O_RDONLY); if (IS_ERR(migf->filp)) { - int err = PTR_ERR(migf->filp); - - kfree(migf); - return ERR_PTR(err); + ret = PTR_ERR(migf->filp); + goto end; } + migf->mvdev = mvdev; + ret = mlx5vf_cmd_alloc_pd(migf); + if (ret) + goto out_free; + stream_open(migf->filp->f_inode, migf->filp); mutex_init(&migf->lock); init_waitqueue_head(&migf->poll_wait); + init_completion(&migf->save_comp); + /* + * save_comp is being used as a binary semaphore built from + * a completion. A normal mutex cannot be used because the lock is + * passed between kernel threads and lockdep can't model this. + */ + complete(&migf->save_comp); mlx5_cmd_init_async_ctx(mvdev->mdev, &migf->async_ctx); INIT_WORK(&migf->async_data.work, mlx5vf_mig_file_cleanup_cb); - ret = mlx5vf_cmd_query_vhca_migration_state(mvdev, - &migf->total_length); + INIT_LIST_HEAD(&migf->buf_list); + INIT_LIST_HEAD(&migf->avail_list); + spin_lock_init(&migf->list_lock); + ret = mlx5vf_cmd_query_vhca_migration_state(mvdev, &length, 0); if (ret) - goto out_free; + goto out_pd; - ret = mlx5vf_add_migration_pages( - migf, DIV_ROUND_UP_ULL(migf->total_length, PAGE_SIZE)); - if (ret) - goto out_free; + buf = mlx5vf_alloc_data_buffer(migf, length, DMA_FROM_DEVICE); + if (IS_ERR(buf)) { + ret = PTR_ERR(buf); + goto out_pd; + } - migf->mvdev = mvdev; - ret = mlx5vf_cmd_save_vhca_state(mvdev, migf); + ret = mlx5vf_cmd_save_vhca_state(mvdev, migf, buf, false, track); if (ret) - goto out_free; + goto out_save; return migf; +out_save: + mlx5vf_free_data_buffer(buf); +out_pd: + mlx5vf_cmd_dealloc_pd(migf); out_free: fput(migf->filp); +end: + kfree(migf); return ERR_PTR(ret); } +static int +mlx5vf_append_page_to_mig_buf(struct mlx5_vhca_data_buffer *vhca_buf, + const char __user **buf, size_t *len, + loff_t *pos, ssize_t *done) +{ + unsigned long offset; + size_t page_offset; + struct page *page; + size_t page_len; + u8 *to_buff; + int ret; + + offset = *pos - vhca_buf->start_pos; + page_offset = offset % PAGE_SIZE; + + page = mlx5vf_get_migration_page(vhca_buf, offset - page_offset); + if (!page) + return -EINVAL; + page_len = min_t(size_t, *len, PAGE_SIZE - page_offset); + to_buff = kmap_local_page(page); + ret = copy_from_user(to_buff + page_offset, *buf, page_len); + kunmap_local(to_buff); + if (ret) + return -EFAULT; + + *pos += page_len; + *done += page_len; + *buf += page_len; + *len -= page_len; + vhca_buf->length += page_len; + return 0; +} + +static int +mlx5vf_resume_read_image_no_header(struct mlx5_vhca_data_buffer *vhca_buf, + loff_t requested_length, + const char __user **buf, size_t *len, + loff_t *pos, ssize_t *done) +{ + int ret; + + if (requested_length > MAX_MIGRATION_SIZE) + return -ENOMEM; + + if (vhca_buf->allocated_length < requested_length) { + ret = mlx5vf_add_migration_pages( + vhca_buf, + DIV_ROUND_UP(requested_length - vhca_buf->allocated_length, + PAGE_SIZE)); + if (ret) + return ret; + } + + while (*len) { + ret = mlx5vf_append_page_to_mig_buf(vhca_buf, buf, len, pos, + done); + if (ret) + return ret; + } + + return 0; +} + +static ssize_t +mlx5vf_resume_read_image(struct mlx5_vf_migration_file *migf, + struct mlx5_vhca_data_buffer *vhca_buf, + size_t image_size, const char __user **buf, + size_t *len, loff_t *pos, ssize_t *done, + bool *has_work) +{ + size_t copy_len, to_copy; + int ret; + + to_copy = min_t(size_t, *len, image_size - vhca_buf->length); + copy_len = to_copy; + while (to_copy) { + ret = mlx5vf_append_page_to_mig_buf(vhca_buf, buf, &to_copy, pos, + done); + if (ret) + return ret; + } + + *len -= copy_len; + if (vhca_buf->length == image_size) { + migf->load_state = MLX5_VF_LOAD_STATE_LOAD_IMAGE; + migf->max_pos += image_size; + *has_work = true; + } + + return 0; +} + +static int +mlx5vf_resume_read_header(struct mlx5_vf_migration_file *migf, + struct mlx5_vhca_data_buffer *vhca_buf, + const char __user **buf, + size_t *len, loff_t *pos, + ssize_t *done, bool *has_work) +{ + struct page *page; + size_t copy_len; + u8 *to_buff; + int ret; + + copy_len = min_t(size_t, *len, + sizeof(struct mlx5_vf_migration_header) - vhca_buf->length); + page = mlx5vf_get_migration_page(vhca_buf, 0); + if (!page) + return -EINVAL; + to_buff = kmap_local_page(page); + ret = copy_from_user(to_buff + vhca_buf->length, *buf, copy_len); + if (ret) { + ret = -EFAULT; + goto end; + } + + *buf += copy_len; + *pos += copy_len; + *done += copy_len; + *len -= copy_len; + vhca_buf->length += copy_len; + if (vhca_buf->length == sizeof(struct mlx5_vf_migration_header)) { + u64 flags; + + vhca_buf->header_image_size = le64_to_cpup((__le64 *)to_buff); + if (vhca_buf->header_image_size > MAX_MIGRATION_SIZE) { + ret = -ENOMEM; + goto end; + } + + flags = le64_to_cpup((__le64 *)(to_buff + + offsetof(struct mlx5_vf_migration_header, flags))); + if (flags) { + ret = -EOPNOTSUPP; + goto end; + } + + migf->load_state = MLX5_VF_LOAD_STATE_PREP_IMAGE; + migf->max_pos += vhca_buf->length; + *has_work = true; + } +end: + kunmap_local(to_buff); + return ret; +} + static ssize_t mlx5vf_resume_write(struct file *filp, const char __user *buf, size_t len, loff_t *pos) { struct mlx5_vf_migration_file *migf = filp->private_data; + struct mlx5_vhca_data_buffer *vhca_buf = migf->buf; + struct mlx5_vhca_data_buffer *vhca_buf_header = migf->buf_header; loff_t requested_length; + bool has_work = false; ssize_t done = 0; + int ret = 0; if (pos) return -ESPIPE; @@ -282,56 +688,83 @@ static ssize_t mlx5vf_resume_write(struct file *filp, const char __user *buf, check_add_overflow((loff_t)len, *pos, &requested_length)) return -EINVAL; - if (requested_length > MAX_MIGRATION_SIZE) - return -ENOMEM; - + mutex_lock(&migf->mvdev->state_mutex); mutex_lock(&migf->lock); - if (migf->disabled) { - done = -ENODEV; + if (migf->state == MLX5_MIGF_STATE_ERROR) { + ret = -ENODEV; goto out_unlock; } - if (migf->allocated_length < requested_length) { - done = mlx5vf_add_migration_pages( - migf, - DIV_ROUND_UP(requested_length - migf->allocated_length, - PAGE_SIZE)); - if (done) - goto out_unlock; - } - - while (len) { - size_t page_offset; - struct page *page; - size_t page_len; - u8 *to_buff; - int ret; - - page_offset = (*pos) % PAGE_SIZE; - page = mlx5vf_get_migration_page(migf, *pos - page_offset); - if (!page) { - if (done == 0) - done = -EINVAL; - goto out_unlock; + while (len || has_work) { + has_work = false; + switch (migf->load_state) { + case MLX5_VF_LOAD_STATE_READ_HEADER: + ret = mlx5vf_resume_read_header(migf, vhca_buf_header, + &buf, &len, pos, + &done, &has_work); + if (ret) + goto out_unlock; + break; + case MLX5_VF_LOAD_STATE_PREP_IMAGE: + { + u64 size = vhca_buf_header->header_image_size; + + if (vhca_buf->allocated_length < size) { + mlx5vf_free_data_buffer(vhca_buf); + + migf->buf = mlx5vf_alloc_data_buffer(migf, + size, DMA_TO_DEVICE); + if (IS_ERR(migf->buf)) { + ret = PTR_ERR(migf->buf); + migf->buf = NULL; + goto out_unlock; + } + + vhca_buf = migf->buf; + } + + vhca_buf->start_pos = migf->max_pos; + migf->load_state = MLX5_VF_LOAD_STATE_READ_IMAGE; + break; } + case MLX5_VF_LOAD_STATE_READ_IMAGE_NO_HEADER: + ret = mlx5vf_resume_read_image_no_header(vhca_buf, + requested_length, + &buf, &len, pos, &done); + if (ret) + goto out_unlock; + break; + case MLX5_VF_LOAD_STATE_READ_IMAGE: + ret = mlx5vf_resume_read_image(migf, vhca_buf, + vhca_buf_header->header_image_size, + &buf, &len, pos, &done, &has_work); + if (ret) + goto out_unlock; + break; + case MLX5_VF_LOAD_STATE_LOAD_IMAGE: + ret = mlx5vf_cmd_load_vhca_state(migf->mvdev, migf, vhca_buf); + if (ret) + goto out_unlock; + migf->load_state = MLX5_VF_LOAD_STATE_READ_HEADER; + + /* prep header buf for next image */ + vhca_buf_header->length = 0; + vhca_buf_header->header_image_size = 0; + /* prep data buf for next image */ + vhca_buf->length = 0; - page_len = min_t(size_t, len, PAGE_SIZE - page_offset); - to_buff = kmap_local_page(page); - ret = copy_from_user(to_buff + page_offset, buf, page_len); - kunmap_local(to_buff); - if (ret) { - done = -EFAULT; - goto out_unlock; + break; + default: + break; } - *pos += page_len; - len -= page_len; - done += page_len; - buf += page_len; - migf->total_length += page_len; } + out_unlock: + if (ret) + migf->state = MLX5_MIGF_STATE_ERROR; mutex_unlock(&migf->lock); - return done; + mlx5vf_state_mutex_unlock(migf->mvdev); + return ret ? ret : done; } static const struct file_operations mlx5vf_resume_fops = { @@ -345,6 +778,8 @@ static struct mlx5_vf_migration_file * mlx5vf_pci_resume_device_data(struct mlx5vf_pci_core_device *mvdev) { struct mlx5_vf_migration_file *migf; + struct mlx5_vhca_data_buffer *buf; + int ret; migf = kzalloc(sizeof(*migf), GFP_KERNEL); if (!migf) @@ -353,20 +788,59 @@ mlx5vf_pci_resume_device_data(struct mlx5vf_pci_core_device *mvdev) migf->filp = anon_inode_getfile("mlx5vf_mig", &mlx5vf_resume_fops, migf, O_WRONLY); if (IS_ERR(migf->filp)) { - int err = PTR_ERR(migf->filp); + ret = PTR_ERR(migf->filp); + goto end; + } - kfree(migf); - return ERR_PTR(err); + migf->mvdev = mvdev; + ret = mlx5vf_cmd_alloc_pd(migf); + if (ret) + goto out_free; + + buf = mlx5vf_alloc_data_buffer(migf, 0, DMA_TO_DEVICE); + if (IS_ERR(buf)) { + ret = PTR_ERR(buf); + goto out_pd; + } + + migf->buf = buf; + if (MLX5VF_PRE_COPY_SUPP(mvdev)) { + buf = mlx5vf_alloc_data_buffer(migf, + sizeof(struct mlx5_vf_migration_header), DMA_NONE); + if (IS_ERR(buf)) { + ret = PTR_ERR(buf); + goto out_buf; + } + + migf->buf_header = buf; + migf->load_state = MLX5_VF_LOAD_STATE_READ_HEADER; + } else { + /* Initial state will be to read the image */ + migf->load_state = MLX5_VF_LOAD_STATE_READ_IMAGE_NO_HEADER; } + stream_open(migf->filp->f_inode, migf->filp); mutex_init(&migf->lock); + INIT_LIST_HEAD(&migf->buf_list); + INIT_LIST_HEAD(&migf->avail_list); + spin_lock_init(&migf->list_lock); return migf; +out_buf: + mlx5vf_free_data_buffer(migf->buf); +out_pd: + mlx5vf_cmd_dealloc_pd(migf); +out_free: + fput(migf->filp); +end: + kfree(migf); + return ERR_PTR(ret); } void mlx5vf_disable_fds(struct mlx5vf_pci_core_device *mvdev) { if (mvdev->resuming_migf) { mlx5vf_disable_fd(mvdev->resuming_migf); + mlx5fv_cmd_clean_migf_resources(mvdev->resuming_migf); fput(mvdev->resuming_migf->filp); mvdev->resuming_migf = NULL; } @@ -374,6 +848,7 @@ void mlx5vf_disable_fds(struct mlx5vf_pci_core_device *mvdev) mlx5_cmd_cleanup_async_ctx(&mvdev->saving_migf->async_ctx); cancel_work_sync(&mvdev->saving_migf->async_data.work); mlx5vf_disable_fd(mvdev->saving_migf); + mlx5fv_cmd_clean_migf_resources(mvdev->saving_migf); fput(mvdev->saving_migf->filp); mvdev->saving_migf = NULL; } @@ -402,7 +877,8 @@ mlx5vf_pci_step_device_state_locked(struct mlx5vf_pci_core_device *mvdev, return NULL; } - if (cur == VFIO_DEVICE_STATE_RUNNING && new == VFIO_DEVICE_STATE_RUNNING_P2P) { + if ((cur == VFIO_DEVICE_STATE_RUNNING && new == VFIO_DEVICE_STATE_RUNNING_P2P) || + (cur == VFIO_DEVICE_STATE_PRE_COPY && new == VFIO_DEVICE_STATE_PRE_COPY_P2P)) { ret = mlx5vf_cmd_suspend_vhca(mvdev, MLX5_SUSPEND_VHCA_IN_OP_MOD_SUSPEND_INITIATOR); if (ret) @@ -410,7 +886,8 @@ mlx5vf_pci_step_device_state_locked(struct mlx5vf_pci_core_device *mvdev, return NULL; } - if (cur == VFIO_DEVICE_STATE_RUNNING_P2P && new == VFIO_DEVICE_STATE_RUNNING) { + if ((cur == VFIO_DEVICE_STATE_RUNNING_P2P && new == VFIO_DEVICE_STATE_RUNNING) || + (cur == VFIO_DEVICE_STATE_PRE_COPY_P2P && new == VFIO_DEVICE_STATE_PRE_COPY)) { ret = mlx5vf_cmd_resume_vhca(mvdev, MLX5_RESUME_VHCA_IN_OP_MOD_RESUME_INITIATOR); if (ret) @@ -421,7 +898,7 @@ mlx5vf_pci_step_device_state_locked(struct mlx5vf_pci_core_device *mvdev, if (cur == VFIO_DEVICE_STATE_STOP && new == VFIO_DEVICE_STATE_STOP_COPY) { struct mlx5_vf_migration_file *migf; - migf = mlx5vf_pci_save_device_data(mvdev); + migf = mlx5vf_pci_save_device_data(mvdev, false); if (IS_ERR(migf)) return ERR_CAST(migf); get_file(migf->filp); @@ -429,7 +906,10 @@ mlx5vf_pci_step_device_state_locked(struct mlx5vf_pci_core_device *mvdev, return migf->filp; } - if ((cur == VFIO_DEVICE_STATE_STOP_COPY && new == VFIO_DEVICE_STATE_STOP)) { + if ((cur == VFIO_DEVICE_STATE_STOP_COPY && new == VFIO_DEVICE_STATE_STOP) || + (cur == VFIO_DEVICE_STATE_PRE_COPY && new == VFIO_DEVICE_STATE_RUNNING) || + (cur == VFIO_DEVICE_STATE_PRE_COPY_P2P && + new == VFIO_DEVICE_STATE_RUNNING_P2P)) { mlx5vf_disable_fds(mvdev); return NULL; } @@ -446,14 +926,39 @@ mlx5vf_pci_step_device_state_locked(struct mlx5vf_pci_core_device *mvdev, } if (cur == VFIO_DEVICE_STATE_RESUMING && new == VFIO_DEVICE_STATE_STOP) { - ret = mlx5vf_cmd_load_vhca_state(mvdev, - mvdev->resuming_migf); - if (ret) - return ERR_PTR(ret); + if (!MLX5VF_PRE_COPY_SUPP(mvdev)) { + ret = mlx5vf_cmd_load_vhca_state(mvdev, + mvdev->resuming_migf, + mvdev->resuming_migf->buf); + if (ret) + return ERR_PTR(ret); + } mlx5vf_disable_fds(mvdev); return NULL; } + if ((cur == VFIO_DEVICE_STATE_RUNNING && new == VFIO_DEVICE_STATE_PRE_COPY) || + (cur == VFIO_DEVICE_STATE_RUNNING_P2P && + new == VFIO_DEVICE_STATE_PRE_COPY_P2P)) { + struct mlx5_vf_migration_file *migf; + + migf = mlx5vf_pci_save_device_data(mvdev, true); + if (IS_ERR(migf)) + return ERR_CAST(migf); + get_file(migf->filp); + mvdev->saving_migf = migf; + return migf->filp; + } + + if (cur == VFIO_DEVICE_STATE_PRE_COPY_P2P && new == VFIO_DEVICE_STATE_STOP_COPY) { + ret = mlx5vf_cmd_suspend_vhca(mvdev, + MLX5_SUSPEND_VHCA_IN_OP_MOD_SUSPEND_RESPONDER); + if (ret) + return ERR_PTR(ret); + ret = mlx5vf_pci_save_device_inc_data(mvdev); + return ret ? ERR_PTR(ret) : NULL; + } + /* * vfio_mig_get_next_state() does not use arcs other than the above */ @@ -512,6 +1017,23 @@ mlx5vf_pci_set_device_state(struct vfio_device *vdev, return res; } +static int mlx5vf_pci_get_data_size(struct vfio_device *vdev, + unsigned long *stop_copy_length) +{ + struct mlx5vf_pci_core_device *mvdev = container_of( + vdev, struct mlx5vf_pci_core_device, core_device.vdev); + size_t state_size; + int ret; + + mutex_lock(&mvdev->state_mutex); + ret = mlx5vf_cmd_query_vhca_migration_state(mvdev, + &state_size, 0); + if (!ret) + *stop_copy_length = state_size; + mlx5vf_state_mutex_unlock(mvdev); + return ret; +} + static int mlx5vf_pci_get_device_state(struct vfio_device *vdev, enum vfio_device_mig_state *curr_state) { @@ -577,6 +1099,7 @@ static void mlx5vf_pci_close_device(struct vfio_device *core_vdev) static const struct vfio_migration_ops mlx5vf_pci_mig_ops = { .migration_set_state = mlx5vf_pci_set_device_state, .migration_get_state = mlx5vf_pci_get_device_state, + .migration_get_data_size = mlx5vf_pci_get_data_size, }; static const struct vfio_log_ops mlx5vf_pci_log_ops = { @@ -623,6 +1146,9 @@ static const struct vfio_device_ops mlx5vf_pci_ops = { .mmap = vfio_pci_core_mmap, .request = vfio_pci_core_request, .match = vfio_pci_core_match, + .bind_iommufd = vfio_iommufd_physical_bind, + .unbind_iommufd = vfio_iommufd_physical_unbind, + .attach_ioas = vfio_iommufd_physical_attach_ioas, }; static int mlx5vf_pci_probe(struct pci_dev *pdev, @@ -676,18 +1202,7 @@ static struct pci_driver mlx5vf_pci_driver = { .driver_managed_dma = true, }; -static void __exit mlx5vf_pci_cleanup(void) -{ - pci_unregister_driver(&mlx5vf_pci_driver); -} - -static int __init mlx5vf_pci_init(void) -{ - return pci_register_driver(&mlx5vf_pci_driver); -} - -module_init(mlx5vf_pci_init); -module_exit(mlx5vf_pci_cleanup); +module_pci_driver(mlx5vf_pci_driver); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Max Gurtovoy "); diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c index 1d4919edfbde488918327ca894529c291310f7cf..29091ee2e9849bafc044a70559b7db1f2f260116 100644 --- a/drivers/vfio/pci/vfio_pci.c +++ b/drivers/vfio/pci/vfio_pci.c @@ -138,6 +138,9 @@ static const struct vfio_device_ops vfio_pci_ops = { .mmap = vfio_pci_core_mmap, .request = vfio_pci_core_request, .match = vfio_pci_core_match, + .bind_iommufd = vfio_iommufd_physical_bind, + .unbind_iommufd = vfio_iommufd_physical_unbind, + .attach_ioas = vfio_iommufd_physical_attach_ioas, }; static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) diff --git a/drivers/vfio/pci/vfio_pci_core.c b/drivers/vfio/pci/vfio_pci_core.c index e030c2120183ef5c6bd4019ad50e5e6ac4f40a6c..26a541cc64d114a22e4f9688c71f3a163683ebdc 100644 --- a/drivers/vfio/pci/vfio_pci_core.c +++ b/drivers/vfio/pci/vfio_pci_core.c @@ -27,6 +27,9 @@ #include #include #include +#if IS_ENABLED(CONFIG_EEH) +#include +#endif #include "vfio_pci_priv.h" @@ -686,7 +689,9 @@ void vfio_pci_core_close_device(struct vfio_device *core_vdev) vdev->sriov_pf_core_dev->vf_token->users--; mutex_unlock(&vdev->sriov_pf_core_dev->vf_token->lock); } - vfio_spapr_pci_eeh_release(vdev->pdev); +#if IS_ENABLED(CONFIG_EEH) + eeh_dev_release(vdev->pdev); +#endif vfio_pci_core_disable(vdev); mutex_lock(&vdev->igate); @@ -705,7 +710,9 @@ EXPORT_SYMBOL_GPL(vfio_pci_core_close_device); void vfio_pci_core_finish_enable(struct vfio_pci_core_device *vdev) { vfio_pci_probe_mmaps(vdev); - vfio_spapr_pci_eeh_open(vdev->pdev); +#if IS_ENABLED(CONFIG_EEH) + eeh_dev_open(vdev->pdev); +#endif if (vdev->sriov_pf_core_dev) { mutex_lock(&vdev->sriov_pf_core_dev->vf_token->lock); @@ -2109,7 +2116,6 @@ void vfio_pci_core_release_dev(struct vfio_device *core_vdev) mutex_destroy(&vdev->vma_lock); kfree(vdev->region); kfree(vdev->pm_save); - vfio_free_device(core_vdev); } EXPORT_SYMBOL_GPL(vfio_pci_core_release_dev); @@ -2128,7 +2134,8 @@ int vfio_pci_core_register_device(struct vfio_pci_core_device *vdev) if (vdev->vdev.mig_ops) { if (!(vdev->vdev.mig_ops->migration_get_state && - vdev->vdev.mig_ops->migration_set_state) || + vdev->vdev.mig_ops->migration_set_state && + vdev->vdev.mig_ops->migration_get_data_size) || !(vdev->vdev.migration_flags & VFIO_MIGRATION_STOP_COPY)) return -EINVAL; } diff --git a/drivers/vfio/platform/vfio_amba.c b/drivers/vfio/platform/vfio_amba.c index eaea63e5294c58d9874d5526d3921d0bfc88874d..83fe5401559585d241146258b9630d1670ed8600 100644 --- a/drivers/vfio/platform/vfio_amba.c +++ b/drivers/vfio/platform/vfio_amba.c @@ -95,7 +95,6 @@ static void vfio_amba_release_dev(struct vfio_device *core_vdev) vfio_platform_release_common(vdev); kfree(vdev->name); - vfio_free_device(core_vdev); } static void vfio_amba_remove(struct amba_device *adev) @@ -117,6 +116,9 @@ static const struct vfio_device_ops vfio_amba_ops = { .read = vfio_platform_read, .write = vfio_platform_write, .mmap = vfio_platform_mmap, + .bind_iommufd = vfio_iommufd_physical_bind, + .unbind_iommufd = vfio_iommufd_physical_unbind, + .attach_ioas = vfio_iommufd_physical_attach_ioas, }; static const struct amba_id pl330_ids[] = { diff --git a/drivers/vfio/platform/vfio_platform.c b/drivers/vfio/platform/vfio_platform.c index 82cedcebfd9022ca7e08fe920b01ebc5e3cfa62e..22a1efca32a8a86ae24ae683d2035d7cece8e016 100644 --- a/drivers/vfio/platform/vfio_platform.c +++ b/drivers/vfio/platform/vfio_platform.c @@ -83,7 +83,6 @@ static void vfio_platform_release_dev(struct vfio_device *core_vdev) container_of(core_vdev, struct vfio_platform_device, vdev); vfio_platform_release_common(vdev); - vfio_free_device(core_vdev); } static int vfio_platform_remove(struct platform_device *pdev) @@ -106,6 +105,9 @@ static const struct vfio_device_ops vfio_platform_ops = { .read = vfio_platform_read, .write = vfio_platform_write, .mmap = vfio_platform_mmap, + .bind_iommufd = vfio_iommufd_physical_bind, + .unbind_iommufd = vfio_iommufd_physical_unbind, + .attach_ioas = vfio_iommufd_physical_attach_ioas, }; static struct platform_driver vfio_platform_driver = { diff --git a/drivers/vfio/platform/vfio_platform_common.c b/drivers/vfio/platform/vfio_platform_common.c index 55dc4f43c31e3eccbeada369c6de76ed89e5fc61..1a0a238ffa3543d7999c42b4d219ccfe7897fdb7 100644 --- a/drivers/vfio/platform/vfio_platform_common.c +++ b/drivers/vfio/platform/vfio_platform_common.c @@ -72,12 +72,11 @@ static int vfio_platform_acpi_call_reset(struct vfio_platform_device *vdev, const char **extra_dbg) { #ifdef CONFIG_ACPI - struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; struct device *dev = vdev->device; acpi_handle handle = ACPI_HANDLE(dev); acpi_status acpi_ret; - acpi_ret = acpi_evaluate_object(handle, "_RST", NULL, &buffer); + acpi_ret = acpi_evaluate_object(handle, "_RST", NULL, NULL); if (ACPI_FAILURE(acpi_ret)) { if (extra_dbg) *extra_dbg = acpi_format_exception(acpi_ret); diff --git a/drivers/vfio/vfio.h b/drivers/vfio/vfio.h index bcad54bbab08c4b8497e5aead5d5576fc45a01e4..f8219a438bfbf58d3f7c08e907a3dc4f660dfed5 100644 --- a/drivers/vfio/vfio.h +++ b/drivers/vfio/vfio.h @@ -6,14 +6,25 @@ #ifndef __VFIO_VFIO_H__ #define __VFIO_VFIO_H__ +#include #include #include #include +struct iommufd_ctx; struct iommu_group; struct vfio_device; struct vfio_container; +void vfio_device_put_registration(struct vfio_device *device); +bool vfio_device_try_get_registration(struct vfio_device *device); +int vfio_device_open(struct vfio_device *device, + struct iommufd_ctx *iommufd, struct kvm *kvm); +void vfio_device_close(struct vfio_device *device, + struct iommufd_ctx *iommufd); + +extern const struct file_operations vfio_device_fops; + enum vfio_group_type { /* * Physical device with IOMMU backing. @@ -54,14 +65,30 @@ struct vfio_group { struct list_head device_list; struct mutex device_lock; struct list_head vfio_next; +#if IS_ENABLED(CONFIG_VFIO_CONTAINER) struct list_head container_next; +#endif enum vfio_group_type type; struct mutex group_lock; struct kvm *kvm; struct file *opened_file; struct blocking_notifier_head notifier; + struct iommufd_ctx *iommufd; }; +int vfio_device_set_group(struct vfio_device *device, + enum vfio_group_type type); +void vfio_device_remove_group(struct vfio_device *device); +void vfio_device_group_register(struct vfio_device *device); +void vfio_device_group_unregister(struct vfio_device *device); +int vfio_device_group_use_iommu(struct vfio_device *device); +void vfio_device_group_unuse_iommu(struct vfio_device *device); +void vfio_device_group_close(struct vfio_device *device); +bool vfio_device_has_container(struct vfio_device *device); +int __init vfio_group_init(void); +void vfio_group_cleanup(void); + +#if IS_ENABLED(CONFIG_VFIO_CONTAINER) /* events for the backend driver notify callback */ enum vfio_iommu_notify_type { VFIO_IOMMU_CONTAINER_CLOSE = 0, @@ -109,20 +136,114 @@ struct vfio_iommu_driver { int vfio_register_iommu_driver(const struct vfio_iommu_driver_ops *ops); void vfio_unregister_iommu_driver(const struct vfio_iommu_driver_ops *ops); -bool vfio_assert_device_open(struct vfio_device *device); - struct vfio_container *vfio_container_from_file(struct file *filep); -int vfio_device_assign_container(struct vfio_device *device); -void vfio_device_unassign_container(struct vfio_device *device); +int vfio_group_use_container(struct vfio_group *group); +void vfio_group_unuse_container(struct vfio_group *group); int vfio_container_attach_group(struct vfio_container *container, struct vfio_group *group); void vfio_group_detach_container(struct vfio_group *group); void vfio_device_container_register(struct vfio_device *device); void vfio_device_container_unregister(struct vfio_device *device); -long vfio_container_ioctl_check_extension(struct vfio_container *container, - unsigned long arg); +int vfio_device_container_pin_pages(struct vfio_device *device, + dma_addr_t iova, int npage, + int prot, struct page **pages); +void vfio_device_container_unpin_pages(struct vfio_device *device, + dma_addr_t iova, int npage); +int vfio_device_container_dma_rw(struct vfio_device *device, + dma_addr_t iova, void *data, + size_t len, bool write); + int __init vfio_container_init(void); void vfio_container_cleanup(void); +#else +static inline struct vfio_container * +vfio_container_from_file(struct file *filep) +{ + return NULL; +} + +static inline int vfio_group_use_container(struct vfio_group *group) +{ + return -EOPNOTSUPP; +} + +static inline void vfio_group_unuse_container(struct vfio_group *group) +{ +} + +static inline int vfio_container_attach_group(struct vfio_container *container, + struct vfio_group *group) +{ + return -EOPNOTSUPP; +} + +static inline void vfio_group_detach_container(struct vfio_group *group) +{ +} + +static inline void vfio_device_container_register(struct vfio_device *device) +{ +} + +static inline void vfio_device_container_unregister(struct vfio_device *device) +{ +} + +static inline int vfio_device_container_pin_pages(struct vfio_device *device, + dma_addr_t iova, int npage, + int prot, struct page **pages) +{ + return -EOPNOTSUPP; +} + +static inline void vfio_device_container_unpin_pages(struct vfio_device *device, + dma_addr_t iova, int npage) +{ +} + +static inline int vfio_device_container_dma_rw(struct vfio_device *device, + dma_addr_t iova, void *data, + size_t len, bool write) +{ + return -EOPNOTSUPP; +} + +static inline int vfio_container_init(void) +{ + return 0; +} +static inline void vfio_container_cleanup(void) +{ +} +#endif + +#if IS_ENABLED(CONFIG_IOMMUFD) +int vfio_iommufd_bind(struct vfio_device *device, struct iommufd_ctx *ictx); +void vfio_iommufd_unbind(struct vfio_device *device); +#else +static inline int vfio_iommufd_bind(struct vfio_device *device, + struct iommufd_ctx *ictx) +{ + return -EOPNOTSUPP; +} + +static inline void vfio_iommufd_unbind(struct vfio_device *device) +{ +} +#endif + +#if IS_ENABLED(CONFIG_VFIO_VIRQFD) +int __init vfio_virqfd_init(void); +void vfio_virqfd_exit(void); +#else +static inline int __init vfio_virqfd_init(void) +{ + return 0; +} +static inline void vfio_virqfd_exit(void) +{ +} +#endif #ifdef CONFIG_VFIO_NOIOMMU extern bool vfio_noiommu __read_mostly; diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c index 169f07ac162d9c9520eefa47674591a820ca97eb..60a50ce8701e5c6292e46f398563f2ccb9aec866 100644 --- a/drivers/vfio/vfio_iommu_spapr_tce.c +++ b/drivers/vfio/vfio_iommu_spapr_tce.c @@ -4,6 +4,7 @@ * * Copyright (C) 2013 IBM Corp. All rights reserved. * Author: Alexey Kardashevskiy + * Copyright Gavin Shan, IBM Corporation 2014. * * Derived from original vfio_iommu_type1.c: * Copyright (C) 2012 Red Hat, Inc. All rights reserved. @@ -773,6 +774,57 @@ static long tce_iommu_create_default_window(struct tce_container *container) return ret; } +static long vfio_spapr_ioctl_eeh_pe_op(struct iommu_group *group, + unsigned long arg) +{ + struct eeh_pe *pe; + struct vfio_eeh_pe_op op; + unsigned long minsz; + + pe = eeh_iommu_group_to_pe(group); + if (!pe) + return -ENODEV; + + minsz = offsetofend(struct vfio_eeh_pe_op, op); + if (copy_from_user(&op, (void __user *)arg, minsz)) + return -EFAULT; + if (op.argsz < minsz || op.flags) + return -EINVAL; + + switch (op.op) { + case VFIO_EEH_PE_DISABLE: + return eeh_pe_set_option(pe, EEH_OPT_DISABLE); + case VFIO_EEH_PE_ENABLE: + return eeh_pe_set_option(pe, EEH_OPT_ENABLE); + case VFIO_EEH_PE_UNFREEZE_IO: + return eeh_pe_set_option(pe, EEH_OPT_THAW_MMIO); + case VFIO_EEH_PE_UNFREEZE_DMA: + return eeh_pe_set_option(pe, EEH_OPT_THAW_DMA); + case VFIO_EEH_PE_GET_STATE: + return eeh_pe_get_state(pe); + break; + case VFIO_EEH_PE_RESET_DEACTIVATE: + return eeh_pe_reset(pe, EEH_RESET_DEACTIVATE, true); + case VFIO_EEH_PE_RESET_HOT: + return eeh_pe_reset(pe, EEH_RESET_HOT, true); + case VFIO_EEH_PE_RESET_FUNDAMENTAL: + return eeh_pe_reset(pe, EEH_RESET_FUNDAMENTAL, true); + case VFIO_EEH_PE_CONFIGURE: + return eeh_pe_configure(pe); + case VFIO_EEH_PE_INJECT_ERR: + minsz = offsetofend(struct vfio_eeh_pe_op, err.mask); + if (op.argsz < minsz) + return -EINVAL; + if (copy_from_user(&op, (void __user *)arg, minsz)) + return -EFAULT; + + return eeh_pe_inject_err(pe, op.err.type, op.err.func, + op.err.addr, op.err.mask); + default: + return -EINVAL; + } +} + static long tce_iommu_ioctl(void *iommu_data, unsigned int cmd, unsigned long arg) { @@ -785,14 +837,12 @@ static long tce_iommu_ioctl(void *iommu_data, switch (arg) { case VFIO_SPAPR_TCE_IOMMU: case VFIO_SPAPR_TCE_v2_IOMMU: - ret = 1; - break; + return 1; + case VFIO_EEH: + return eeh_enabled(); default: - ret = vfio_spapr_iommu_eeh_ioctl(NULL, cmd, arg); - break; + return 0; } - - return (ret < 0) ? 0 : ret; } /* @@ -1046,8 +1096,7 @@ static long tce_iommu_ioctl(void *iommu_data, ret = 0; list_for_each_entry(tcegrp, &container->group_list, next) { - ret = vfio_spapr_iommu_eeh_ioctl(tcegrp->grp, - cmd, arg); + ret = vfio_spapr_ioctl_eeh_pe_op(tcegrp->grp, arg); if (ret) return ret; } diff --git a/drivers/vfio/vfio_main.c b/drivers/vfio/vfio_main.c index 6e8804fe00953d4fa7beaf1b503751302cefcafc..5177bb061b17b51b9e8e9030d9bdf13c0b6a2b4a 100644 --- a/drivers/vfio/vfio_main.c +++ b/drivers/vfio/vfio_main.c @@ -13,8 +13,6 @@ #include #include #include -#include -#include #include #include #include @@ -35,6 +33,7 @@ #include #include #include +#include #include "vfio.h" #define DRIVER_VERSION "0.3" @@ -42,17 +41,11 @@ #define DRIVER_DESC "VFIO - User Level meta-driver" static struct vfio { - struct class *class; - struct list_head group_list; - struct mutex group_lock; /* locks group_list */ - struct ida group_ida; - dev_t group_devt; struct class *device_class; struct ida device_ida; } vfio; static DEFINE_XARRAY(vfio_device_set_xa); -static const struct file_operations vfio_group_fops; int vfio_assign_device_set(struct vfio_device *device, void *set_id) { @@ -138,208 +131,21 @@ unsigned int vfio_device_set_open_count(struct vfio_device_set *dev_set) } EXPORT_SYMBOL_GPL(vfio_device_set_open_count); -/* - * Group objects - create, release, get, put, search - */ -static struct vfio_group * -__vfio_group_get_from_iommu(struct iommu_group *iommu_group) -{ - struct vfio_group *group; - - /* - * group->iommu_group from the vfio.group_list cannot be NULL - * under the vfio.group_lock. - */ - list_for_each_entry(group, &vfio.group_list, vfio_next) { - if (group->iommu_group == iommu_group) { - refcount_inc(&group->drivers); - return group; - } - } - return NULL; -} - -static struct vfio_group * -vfio_group_get_from_iommu(struct iommu_group *iommu_group) -{ - struct vfio_group *group; - - mutex_lock(&vfio.group_lock); - group = __vfio_group_get_from_iommu(iommu_group); - mutex_unlock(&vfio.group_lock); - return group; -} - -static void vfio_group_release(struct device *dev) -{ - struct vfio_group *group = container_of(dev, struct vfio_group, dev); - - mutex_destroy(&group->device_lock); - mutex_destroy(&group->group_lock); - WARN_ON(group->iommu_group); - ida_free(&vfio.group_ida, MINOR(group->dev.devt)); - kfree(group); -} - -static struct vfio_group *vfio_group_alloc(struct iommu_group *iommu_group, - enum vfio_group_type type) -{ - struct vfio_group *group; - int minor; - - group = kzalloc(sizeof(*group), GFP_KERNEL); - if (!group) - return ERR_PTR(-ENOMEM); - - minor = ida_alloc_max(&vfio.group_ida, MINORMASK, GFP_KERNEL); - if (minor < 0) { - kfree(group); - return ERR_PTR(minor); - } - - device_initialize(&group->dev); - group->dev.devt = MKDEV(MAJOR(vfio.group_devt), minor); - group->dev.class = vfio.class; - group->dev.release = vfio_group_release; - cdev_init(&group->cdev, &vfio_group_fops); - group->cdev.owner = THIS_MODULE; - - refcount_set(&group->drivers, 1); - mutex_init(&group->group_lock); - INIT_LIST_HEAD(&group->device_list); - mutex_init(&group->device_lock); - group->iommu_group = iommu_group; - /* put in vfio_group_release() */ - iommu_group_ref_get(iommu_group); - group->type = type; - BLOCKING_INIT_NOTIFIER_HEAD(&group->notifier); - - return group; -} - -static struct vfio_group *vfio_create_group(struct iommu_group *iommu_group, - enum vfio_group_type type) -{ - struct vfio_group *group; - struct vfio_group *ret; - int err; - - group = vfio_group_alloc(iommu_group, type); - if (IS_ERR(group)) - return group; - - err = dev_set_name(&group->dev, "%s%d", - group->type == VFIO_NO_IOMMU ? "noiommu-" : "", - iommu_group_id(iommu_group)); - if (err) { - ret = ERR_PTR(err); - goto err_put; - } - - mutex_lock(&vfio.group_lock); - - /* Did we race creating this group? */ - ret = __vfio_group_get_from_iommu(iommu_group); - if (ret) - goto err_unlock; - - err = cdev_device_add(&group->cdev, &group->dev); - if (err) { - ret = ERR_PTR(err); - goto err_unlock; - } - - list_add(&group->vfio_next, &vfio.group_list); - - mutex_unlock(&vfio.group_lock); - return group; - -err_unlock: - mutex_unlock(&vfio.group_lock); -err_put: - put_device(&group->dev); - return ret; -} - -static void vfio_device_remove_group(struct vfio_device *device) -{ - struct vfio_group *group = device->group; - struct iommu_group *iommu_group; - - if (group->type == VFIO_NO_IOMMU || group->type == VFIO_EMULATED_IOMMU) - iommu_group_remove_device(device->dev); - - /* Pairs with vfio_create_group() / vfio_group_get_from_iommu() */ - if (!refcount_dec_and_mutex_lock(&group->drivers, &vfio.group_lock)) - return; - list_del(&group->vfio_next); - - /* - * We could concurrently probe another driver in the group that might - * race vfio_device_remove_group() with vfio_get_group(), so we have to - * ensure that the sysfs is all cleaned up under lock otherwise the - * cdev_device_add() will fail due to the name aready existing. - */ - cdev_device_del(&group->cdev, &group->dev); - - mutex_lock(&group->group_lock); - /* - * These data structures all have paired operations that can only be - * undone when the caller holds a live reference on the device. Since - * all pairs must be undone these WARN_ON's indicate some caller did not - * properly hold the group reference. - */ - WARN_ON(!list_empty(&group->device_list)); - WARN_ON(group->notifier.head); - - /* - * Revoke all users of group->iommu_group. At this point we know there - * are no devices active because we are unplugging the last one. Setting - * iommu_group to NULL blocks all new users. - */ - if (group->container) - vfio_group_detach_container(group); - iommu_group = group->iommu_group; - group->iommu_group = NULL; - mutex_unlock(&group->group_lock); - mutex_unlock(&vfio.group_lock); - - iommu_group_put(iommu_group); - put_device(&group->dev); -} - /* * Device objects - create, release, get, put, search */ /* Device reference always implies a group reference */ -static void vfio_device_put_registration(struct vfio_device *device) +void vfio_device_put_registration(struct vfio_device *device) { if (refcount_dec_and_test(&device->refcount)) complete(&device->comp); } -static bool vfio_device_try_get_registration(struct vfio_device *device) +bool vfio_device_try_get_registration(struct vfio_device *device) { return refcount_inc_not_zero(&device->refcount); } -static struct vfio_device *vfio_group_get_device(struct vfio_group *group, - struct device *dev) -{ - struct vfio_device *device; - - mutex_lock(&group->device_lock); - list_for_each_entry(device, &group->device_list, group_next) { - if (device->dev == dev && - vfio_device_try_get_registration(device)) { - mutex_unlock(&group->device_lock); - return device; - } - } - mutex_unlock(&group->device_lock); - return NULL; -} - /* * VFIO driver API */ @@ -352,15 +158,15 @@ static void vfio_device_release(struct device *dev) vfio_release_device_set(device); ida_free(&vfio.device_ida, device->index); - /* - * kvfree() cannot be done here due to a life cycle mess in - * vfio-ccw. Before the ccw part is fixed all drivers are - * required to support @release and call vfio_free_device() - * from there. - */ - device->ops->release(device); + if (device->ops->release) + device->ops->release(device); + + kvfree(device); } +static int vfio_init_device(struct vfio_device *device, struct device *dev, + const struct vfio_device_ops *ops); + /* * Allocate and initialize vfio_device so it can be registered to vfio * core. @@ -399,11 +205,9 @@ EXPORT_SYMBOL_GPL(_vfio_alloc_device); /* * Initialize a vfio_device so it can be registered to vfio core. - * - * Only vfio-ccw driver should call this interface. */ -int vfio_init_device(struct vfio_device *device, struct device *dev, - const struct vfio_device_ops *ops) +static int vfio_init_device(struct vfio_device *device, struct device *dev, + const struct vfio_device_ops *ops) { int ret; @@ -435,107 +239,16 @@ out_uninit: ida_free(&vfio.device_ida, device->index); return ret; } -EXPORT_SYMBOL_GPL(vfio_init_device); - -/* - * The helper called by driver @release callback to free the device - * structure. Drivers which don't have private data to clean can - * simply use this helper as its @release. - */ -void vfio_free_device(struct vfio_device *device) -{ - kvfree(device); -} -EXPORT_SYMBOL_GPL(vfio_free_device); - -static struct vfio_group *vfio_noiommu_group_alloc(struct device *dev, - enum vfio_group_type type) -{ - struct iommu_group *iommu_group; - struct vfio_group *group; - int ret; - - iommu_group = iommu_group_alloc(); - if (IS_ERR(iommu_group)) - return ERR_CAST(iommu_group); - - ret = iommu_group_set_name(iommu_group, "vfio-noiommu"); - if (ret) - goto out_put_group; - ret = iommu_group_add_device(iommu_group, dev); - if (ret) - goto out_put_group; - - group = vfio_create_group(iommu_group, type); - if (IS_ERR(group)) { - ret = PTR_ERR(group); - goto out_remove_device; - } - iommu_group_put(iommu_group); - return group; - -out_remove_device: - iommu_group_remove_device(dev); -out_put_group: - iommu_group_put(iommu_group); - return ERR_PTR(ret); -} - -static struct vfio_group *vfio_group_find_or_alloc(struct device *dev) -{ - struct iommu_group *iommu_group; - struct vfio_group *group; - - iommu_group = iommu_group_get(dev); - if (!iommu_group && vfio_noiommu) { - /* - * With noiommu enabled, create an IOMMU group for devices that - * don't already have one, implying no IOMMU hardware/driver - * exists. Taint the kernel because we're about to give a DMA - * capable device to a user without IOMMU protection. - */ - group = vfio_noiommu_group_alloc(dev, VFIO_NO_IOMMU); - if (!IS_ERR(group)) { - add_taint(TAINT_USER, LOCKDEP_STILL_OK); - dev_warn(dev, "Adding kernel taint for vfio-noiommu group on device\n"); - } - return group; - } - - if (!iommu_group) - return ERR_PTR(-EINVAL); - - /* - * VFIO always sets IOMMU_CACHE because we offer no way for userspace to - * restore cache coherency. It has to be checked here because it is only - * valid for cases where we are using iommu groups. - */ - if (!device_iommu_capable(dev, IOMMU_CAP_CACHE_COHERENCY)) { - iommu_group_put(iommu_group); - return ERR_PTR(-EINVAL); - } - - group = vfio_group_get_from_iommu(iommu_group); - if (!group) - group = vfio_create_group(iommu_group, VFIO_IOMMU); - - /* The vfio_group holds a reference to the iommu_group */ - iommu_group_put(iommu_group); - return group; -} static int __vfio_register_dev(struct vfio_device *device, - struct vfio_group *group) + enum vfio_group_type type) { - struct vfio_device *existing_device; int ret; - /* - * In all cases group is the output of one of the group allocation - * functions and we have group->drivers incremented for us. - */ - if (IS_ERR(group)) - return PTR_ERR(group); + if (WARN_ON(device->ops->bind_iommufd && + (!device->ops->unbind_iommufd || + !device->ops->attach_ioas))) + return -EINVAL; /* * If the driver doesn't specify a set then the device is added to a @@ -544,25 +257,13 @@ static int __vfio_register_dev(struct vfio_device *device, if (!device->dev_set) vfio_assign_device_set(device, device); - existing_device = vfio_group_get_device(group, device->dev); - if (existing_device) { - /* - * group->iommu_group is non-NULL because we hold the drivers - * refcount. - */ - dev_WARN(device->dev, "Device already exists on group %d\n", - iommu_group_id(group->iommu_group)); - vfio_device_put_registration(existing_device); - ret = -EBUSY; - goto err_out; - } - - /* Our reference on group is moved to the device */ - device->group = group; - ret = dev_set_name(&device->device, "vfio%d", device->index); if (ret) - goto err_out; + return ret; + + ret = vfio_device_set_group(device, type); + if (ret) + return ret; ret = device_add(&device->device); if (ret) @@ -571,9 +272,7 @@ static int __vfio_register_dev(struct vfio_device *device, /* Refcounting can't start until the driver calls register */ refcount_set(&device->refcount, 1); - mutex_lock(&group->device_lock); - list_add(&device->group_next, &group->device_list); - mutex_unlock(&group->device_lock); + vfio_device_group_register(device); return 0; err_out: @@ -583,8 +282,7 @@ err_out: int vfio_register_group_dev(struct vfio_device *device) { - return __vfio_register_dev(device, - vfio_group_find_or_alloc(device->dev)); + return __vfio_register_dev(device, VFIO_IOMMU); } EXPORT_SYMBOL_GPL(vfio_register_group_dev); @@ -594,46 +292,15 @@ EXPORT_SYMBOL_GPL(vfio_register_group_dev); */ int vfio_register_emulated_iommu_dev(struct vfio_device *device) { - return __vfio_register_dev(device, - vfio_noiommu_group_alloc(device->dev, VFIO_EMULATED_IOMMU)); + return __vfio_register_dev(device, VFIO_EMULATED_IOMMU); } EXPORT_SYMBOL_GPL(vfio_register_emulated_iommu_dev); -static struct vfio_device *vfio_device_get_from_name(struct vfio_group *group, - char *buf) -{ - struct vfio_device *it, *device = ERR_PTR(-ENODEV); - - mutex_lock(&group->device_lock); - list_for_each_entry(it, &group->device_list, group_next) { - int ret; - - if (it->ops->match) { - ret = it->ops->match(it, buf); - if (ret < 0) { - device = ERR_PTR(ret); - break; - } - } else { - ret = !strcmp(dev_name(it->dev), buf); - } - - if (ret && vfio_device_try_get_registration(it)) { - device = it; - break; - } - } - mutex_unlock(&group->device_lock); - - return device; -} - /* * Decrement the device reference count and wait for the device to be * removed. Open file descriptors for the device... */ void vfio_unregister_group_dev(struct vfio_device *device) { - struct vfio_group *group = device->group; unsigned int i = 0; bool interrupted = false; long rc; @@ -661,333 +328,101 @@ void vfio_unregister_group_dev(struct vfio_device *device) } } - mutex_lock(&group->device_lock); - list_del(&device->group_next); - mutex_unlock(&group->device_lock); + vfio_device_group_unregister(device); /* Balances device_add in register path */ device_del(&device->device); + /* Balances vfio_device_set_group in register path */ vfio_device_remove_group(device); } EXPORT_SYMBOL_GPL(vfio_unregister_group_dev); -/* - * VFIO Group fd, /dev/vfio/$GROUP - */ -/* - * VFIO_GROUP_UNSET_CONTAINER should fail if there are other users or - * if there was no container to unset. Since the ioctl is called on - * the group, we know that still exists, therefore the only valid - * transition here is 1->0. - */ -static int vfio_group_ioctl_unset_container(struct vfio_group *group) +/* true if the vfio_device has open_device() called but not close_device() */ +static bool vfio_assert_device_open(struct vfio_device *device) { - int ret = 0; - - mutex_lock(&group->group_lock); - if (!group->container) { - ret = -EINVAL; - goto out_unlock; - } - if (group->container_users != 1) { - ret = -EBUSY; - goto out_unlock; - } - vfio_group_detach_container(group); - -out_unlock: - mutex_unlock(&group->group_lock); - return ret; + return !WARN_ON_ONCE(!READ_ONCE(device->open_count)); } -static int vfio_group_ioctl_set_container(struct vfio_group *group, - int __user *arg) +static int vfio_device_first_open(struct vfio_device *device, + struct iommufd_ctx *iommufd, struct kvm *kvm) { - struct vfio_container *container; - struct fd f; int ret; - int fd; - if (get_user(fd, arg)) - return -EFAULT; + lockdep_assert_held(&device->dev_set->lock); - f = fdget(fd); - if (!f.file) - return -EBADF; + if (!try_module_get(device->dev->driver->owner)) + return -ENODEV; - mutex_lock(&group->group_lock); - if (group->container || WARN_ON(group->container_users)) { - ret = -EINVAL; - goto out_unlock; - } - if (!group->iommu_group) { - ret = -ENODEV; - goto out_unlock; - } + if (iommufd) + ret = vfio_iommufd_bind(device, iommufd); + else + ret = vfio_device_group_use_iommu(device); + if (ret) + goto err_module_put; - container = vfio_container_from_file(f.file); - ret = -EINVAL; - if (container) { - ret = vfio_container_attach_group(container, group); - goto out_unlock; + device->kvm = kvm; + if (device->ops->open_device) { + ret = device->ops->open_device(device); + if (ret) + goto err_unuse_iommu; } + return 0; -out_unlock: - mutex_unlock(&group->group_lock); - fdput(f); +err_unuse_iommu: + device->kvm = NULL; + if (iommufd) + vfio_iommufd_unbind(device); + else + vfio_device_group_unuse_iommu(device); +err_module_put: + module_put(device->dev->driver->owner); return ret; } -static const struct file_operations vfio_device_fops; - -/* true if the vfio_device has open_device() called but not close_device() */ -bool vfio_assert_device_open(struct vfio_device *device) +static void vfio_device_last_close(struct vfio_device *device, + struct iommufd_ctx *iommufd) { - return !WARN_ON_ONCE(!READ_ONCE(device->open_count)); + lockdep_assert_held(&device->dev_set->lock); + + if (device->ops->close_device) + device->ops->close_device(device); + device->kvm = NULL; + if (iommufd) + vfio_iommufd_unbind(device); + else + vfio_device_group_unuse_iommu(device); + module_put(device->dev->driver->owner); } -static struct file *vfio_device_open(struct vfio_device *device) +int vfio_device_open(struct vfio_device *device, + struct iommufd_ctx *iommufd, struct kvm *kvm) { - struct file *filep; - int ret; - - mutex_lock(&device->group->group_lock); - ret = vfio_device_assign_container(device); - mutex_unlock(&device->group->group_lock); - if (ret) - return ERR_PTR(ret); - - if (!try_module_get(device->dev->driver->owner)) { - ret = -ENODEV; - goto err_unassign_container; - } + int ret = 0; mutex_lock(&device->dev_set->lock); device->open_count++; if (device->open_count == 1) { - /* - * Here we pass the KVM pointer with the group under the read - * lock. If the device driver will use it, it must obtain a - * reference and release it during close_device. - */ - mutex_lock(&device->group->group_lock); - device->kvm = device->group->kvm; - - if (device->ops->open_device) { - ret = device->ops->open_device(device); - if (ret) - goto err_undo_count; - } - vfio_device_container_register(device); - mutex_unlock(&device->group->group_lock); - } - mutex_unlock(&device->dev_set->lock); - - /* - * We can't use anon_inode_getfd() because we need to modify - * the f_mode flags directly to allow more than just ioctls - */ - filep = anon_inode_getfile("[vfio-device]", &vfio_device_fops, - device, O_RDWR); - if (IS_ERR(filep)) { - ret = PTR_ERR(filep); - goto err_close_device; - } - - /* - * TODO: add an anon_inode interface to do this. - * Appears to be missing by lack of need rather than - * explicitly prevented. Now there's need. - */ - filep->f_mode |= (FMODE_PREAD | FMODE_PWRITE); - - if (device->group->type == VFIO_NO_IOMMU) - dev_warn(device->dev, "vfio-noiommu device opened by user " - "(%s:%d)\n", current->comm, task_pid_nr(current)); - /* - * On success the ref of device is moved to the file and - * put in vfio_device_fops_release() - */ - return filep; - -err_close_device: - mutex_lock(&device->dev_set->lock); - mutex_lock(&device->group->group_lock); - if (device->open_count == 1) { - if (device->ops->close_device) - device->ops->close_device(device); - - vfio_device_container_unregister(device); + ret = vfio_device_first_open(device, iommufd, kvm); + if (ret) + device->open_count--; } -err_undo_count: - mutex_unlock(&device->group->group_lock); - device->open_count--; - if (device->open_count == 0 && device->kvm) - device->kvm = NULL; mutex_unlock(&device->dev_set->lock); - module_put(device->dev->driver->owner); -err_unassign_container: - vfio_device_unassign_container(device); - return ERR_PTR(ret); -} - -static int vfio_group_ioctl_get_device_fd(struct vfio_group *group, - char __user *arg) -{ - struct vfio_device *device; - struct file *filep; - char *buf; - int fdno; - int ret; - - buf = strndup_user(arg, PAGE_SIZE); - if (IS_ERR(buf)) - return PTR_ERR(buf); - device = vfio_device_get_from_name(group, buf); - kfree(buf); - if (IS_ERR(device)) - return PTR_ERR(device); - - fdno = get_unused_fd_flags(O_CLOEXEC); - if (fdno < 0) { - ret = fdno; - goto err_put_device; - } - - filep = vfio_device_open(device); - if (IS_ERR(filep)) { - ret = PTR_ERR(filep); - goto err_put_fdno; - } - - fd_install(fdno, filep); - return fdno; - -err_put_fdno: - put_unused_fd(fdno); -err_put_device: - vfio_device_put_registration(device); - return ret; -} - -static int vfio_group_ioctl_get_status(struct vfio_group *group, - struct vfio_group_status __user *arg) -{ - unsigned long minsz = offsetofend(struct vfio_group_status, flags); - struct vfio_group_status status; - - if (copy_from_user(&status, arg, minsz)) - return -EFAULT; - - if (status.argsz < minsz) - return -EINVAL; - - status.flags = 0; - - mutex_lock(&group->group_lock); - if (!group->iommu_group) { - mutex_unlock(&group->group_lock); - return -ENODEV; - } - - if (group->container) - status.flags |= VFIO_GROUP_FLAGS_CONTAINER_SET | - VFIO_GROUP_FLAGS_VIABLE; - else if (!iommu_group_dma_owner_claimed(group->iommu_group)) - status.flags |= VFIO_GROUP_FLAGS_VIABLE; - mutex_unlock(&group->group_lock); - - if (copy_to_user(arg, &status, minsz)) - return -EFAULT; - return 0; -} - -static long vfio_group_fops_unl_ioctl(struct file *filep, - unsigned int cmd, unsigned long arg) -{ - struct vfio_group *group = filep->private_data; - void __user *uarg = (void __user *)arg; - - switch (cmd) { - case VFIO_GROUP_GET_DEVICE_FD: - return vfio_group_ioctl_get_device_fd(group, uarg); - case VFIO_GROUP_GET_STATUS: - return vfio_group_ioctl_get_status(group, uarg); - case VFIO_GROUP_SET_CONTAINER: - return vfio_group_ioctl_set_container(group, uarg); - case VFIO_GROUP_UNSET_CONTAINER: - return vfio_group_ioctl_unset_container(group); - default: - return -ENOTTY; - } -} - -static int vfio_group_fops_open(struct inode *inode, struct file *filep) -{ - struct vfio_group *group = - container_of(inode->i_cdev, struct vfio_group, cdev); - int ret; - - mutex_lock(&group->group_lock); - - /* - * drivers can be zero if this races with vfio_device_remove_group(), it - * will be stable at 0 under the group rwsem - */ - if (refcount_read(&group->drivers) == 0) { - ret = -ENODEV; - goto out_unlock; - } - - if (group->type == VFIO_NO_IOMMU && !capable(CAP_SYS_RAWIO)) { - ret = -EPERM; - goto out_unlock; - } - - /* - * Do we need multiple instances of the group open? Seems not. - */ - if (group->opened_file) { - ret = -EBUSY; - goto out_unlock; - } - group->opened_file = filep; - filep->private_data = group; - ret = 0; -out_unlock: - mutex_unlock(&group->group_lock); return ret; } -static int vfio_group_fops_release(struct inode *inode, struct file *filep) +void vfio_device_close(struct vfio_device *device, + struct iommufd_ctx *iommufd) { - struct vfio_group *group = filep->private_data; - - filep->private_data = NULL; - - mutex_lock(&group->group_lock); - /* - * Device FDs hold a group file reference, therefore the group release - * is only called when there are no open devices. - */ - WARN_ON(group->notifier.head); - if (group->container) - vfio_group_detach_container(group); - group->opened_file = NULL; - mutex_unlock(&group->group_lock); - return 0; + mutex_lock(&device->dev_set->lock); + vfio_assert_device_open(device); + if (device->open_count == 1) + vfio_device_last_close(device, iommufd); + device->open_count--; + mutex_unlock(&device->dev_set->lock); } -static const struct file_operations vfio_group_fops = { - .owner = THIS_MODULE, - .unlocked_ioctl = vfio_group_fops_unl_ioctl, - .compat_ioctl = compat_ptr_ioctl, - .open = vfio_group_fops_open, - .release = vfio_group_fops_release, -}; - /* * Wrapper around pm_runtime_resume_and_get(). * Return error code on failure or 0 on success. @@ -1028,24 +463,7 @@ static int vfio_device_fops_release(struct inode *inode, struct file *filep) { struct vfio_device *device = filep->private_data; - mutex_lock(&device->dev_set->lock); - vfio_assert_device_open(device); - mutex_lock(&device->group->group_lock); - if (device->open_count == 1) { - if (device->ops->close_device) - device->ops->close_device(device); - - vfio_device_container_unregister(device); - } - mutex_unlock(&device->group->group_lock); - device->open_count--; - if (device->open_count == 0) - device->kvm = NULL; - mutex_unlock(&device->dev_set->lock); - - module_put(device->dev->driver->owner); - - vfio_device_unassign_container(device); + vfio_device_group_close(device); vfio_device_put_registration(device); @@ -1072,7 +490,7 @@ int vfio_mig_get_next_state(struct vfio_device *device, enum vfio_device_mig_state new_fsm, enum vfio_device_mig_state *next_fsm) { - enum { VFIO_DEVICE_NUM_STATES = VFIO_DEVICE_STATE_RUNNING_P2P + 1 }; + enum { VFIO_DEVICE_NUM_STATES = VFIO_DEVICE_STATE_PRE_COPY_P2P + 1 }; /* * The coding in this table requires the driver to implement the * following FSM arcs: @@ -1087,30 +505,65 @@ int vfio_mig_get_next_state(struct vfio_device *device, * RUNNING_P2P -> RUNNING * RUNNING_P2P -> STOP * STOP -> RUNNING_P2P - * Without P2P the driver must implement: + * + * If precopy is supported then the driver must support these additional + * FSM arcs: + * RUNNING -> PRE_COPY + * PRE_COPY -> RUNNING + * PRE_COPY -> STOP_COPY + * However, if precopy and P2P are supported together then the driver + * must support these additional arcs beyond the P2P arcs above: + * PRE_COPY -> RUNNING + * PRE_COPY -> PRE_COPY_P2P + * PRE_COPY_P2P -> PRE_COPY + * PRE_COPY_P2P -> RUNNING_P2P + * PRE_COPY_P2P -> STOP_COPY + * RUNNING -> PRE_COPY + * RUNNING_P2P -> PRE_COPY_P2P + * + * Without P2P and precopy the driver must implement: * RUNNING -> STOP * STOP -> RUNNING * * The coding will step through multiple states for some combination * transitions; if all optional features are supported, this means the * following ones: + * PRE_COPY -> PRE_COPY_P2P -> STOP_COPY + * PRE_COPY -> RUNNING -> RUNNING_P2P + * PRE_COPY -> RUNNING -> RUNNING_P2P -> STOP + * PRE_COPY -> RUNNING -> RUNNING_P2P -> STOP -> RESUMING + * PRE_COPY_P2P -> RUNNING_P2P -> RUNNING + * PRE_COPY_P2P -> RUNNING_P2P -> STOP + * PRE_COPY_P2P -> RUNNING_P2P -> STOP -> RESUMING * RESUMING -> STOP -> RUNNING_P2P + * RESUMING -> STOP -> RUNNING_P2P -> PRE_COPY_P2P * RESUMING -> STOP -> RUNNING_P2P -> RUNNING + * RESUMING -> STOP -> RUNNING_P2P -> RUNNING -> PRE_COPY * RESUMING -> STOP -> STOP_COPY + * RUNNING -> RUNNING_P2P -> PRE_COPY_P2P * RUNNING -> RUNNING_P2P -> STOP * RUNNING -> RUNNING_P2P -> STOP -> RESUMING * RUNNING -> RUNNING_P2P -> STOP -> STOP_COPY + * RUNNING_P2P -> RUNNING -> PRE_COPY * RUNNING_P2P -> STOP -> RESUMING * RUNNING_P2P -> STOP -> STOP_COPY + * STOP -> RUNNING_P2P -> PRE_COPY_P2P * STOP -> RUNNING_P2P -> RUNNING + * STOP -> RUNNING_P2P -> RUNNING -> PRE_COPY * STOP_COPY -> STOP -> RESUMING * STOP_COPY -> STOP -> RUNNING_P2P * STOP_COPY -> STOP -> RUNNING_P2P -> RUNNING + * + * The following transitions are blocked: + * STOP_COPY -> PRE_COPY + * STOP_COPY -> PRE_COPY_P2P */ static const u8 vfio_from_fsm_table[VFIO_DEVICE_NUM_STATES][VFIO_DEVICE_NUM_STATES] = { [VFIO_DEVICE_STATE_STOP] = { [VFIO_DEVICE_STATE_STOP] = VFIO_DEVICE_STATE_STOP, [VFIO_DEVICE_STATE_RUNNING] = VFIO_DEVICE_STATE_RUNNING_P2P, + [VFIO_DEVICE_STATE_PRE_COPY] = VFIO_DEVICE_STATE_RUNNING_P2P, + [VFIO_DEVICE_STATE_PRE_COPY_P2P] = VFIO_DEVICE_STATE_RUNNING_P2P, [VFIO_DEVICE_STATE_STOP_COPY] = VFIO_DEVICE_STATE_STOP_COPY, [VFIO_DEVICE_STATE_RESUMING] = VFIO_DEVICE_STATE_RESUMING, [VFIO_DEVICE_STATE_RUNNING_P2P] = VFIO_DEVICE_STATE_RUNNING_P2P, @@ -1119,14 +572,38 @@ int vfio_mig_get_next_state(struct vfio_device *device, [VFIO_DEVICE_STATE_RUNNING] = { [VFIO_DEVICE_STATE_STOP] = VFIO_DEVICE_STATE_RUNNING_P2P, [VFIO_DEVICE_STATE_RUNNING] = VFIO_DEVICE_STATE_RUNNING, + [VFIO_DEVICE_STATE_PRE_COPY] = VFIO_DEVICE_STATE_PRE_COPY, + [VFIO_DEVICE_STATE_PRE_COPY_P2P] = VFIO_DEVICE_STATE_RUNNING_P2P, [VFIO_DEVICE_STATE_STOP_COPY] = VFIO_DEVICE_STATE_RUNNING_P2P, [VFIO_DEVICE_STATE_RESUMING] = VFIO_DEVICE_STATE_RUNNING_P2P, [VFIO_DEVICE_STATE_RUNNING_P2P] = VFIO_DEVICE_STATE_RUNNING_P2P, [VFIO_DEVICE_STATE_ERROR] = VFIO_DEVICE_STATE_ERROR, }, + [VFIO_DEVICE_STATE_PRE_COPY] = { + [VFIO_DEVICE_STATE_STOP] = VFIO_DEVICE_STATE_RUNNING, + [VFIO_DEVICE_STATE_RUNNING] = VFIO_DEVICE_STATE_RUNNING, + [VFIO_DEVICE_STATE_PRE_COPY] = VFIO_DEVICE_STATE_PRE_COPY, + [VFIO_DEVICE_STATE_PRE_COPY_P2P] = VFIO_DEVICE_STATE_PRE_COPY_P2P, + [VFIO_DEVICE_STATE_STOP_COPY] = VFIO_DEVICE_STATE_PRE_COPY_P2P, + [VFIO_DEVICE_STATE_RESUMING] = VFIO_DEVICE_STATE_RUNNING, + [VFIO_DEVICE_STATE_RUNNING_P2P] = VFIO_DEVICE_STATE_RUNNING, + [VFIO_DEVICE_STATE_ERROR] = VFIO_DEVICE_STATE_ERROR, + }, + [VFIO_DEVICE_STATE_PRE_COPY_P2P] = { + [VFIO_DEVICE_STATE_STOP] = VFIO_DEVICE_STATE_RUNNING_P2P, + [VFIO_DEVICE_STATE_RUNNING] = VFIO_DEVICE_STATE_RUNNING_P2P, + [VFIO_DEVICE_STATE_PRE_COPY] = VFIO_DEVICE_STATE_PRE_COPY, + [VFIO_DEVICE_STATE_PRE_COPY_P2P] = VFIO_DEVICE_STATE_PRE_COPY_P2P, + [VFIO_DEVICE_STATE_STOP_COPY] = VFIO_DEVICE_STATE_STOP_COPY, + [VFIO_DEVICE_STATE_RESUMING] = VFIO_DEVICE_STATE_RUNNING_P2P, + [VFIO_DEVICE_STATE_RUNNING_P2P] = VFIO_DEVICE_STATE_RUNNING_P2P, + [VFIO_DEVICE_STATE_ERROR] = VFIO_DEVICE_STATE_ERROR, + }, [VFIO_DEVICE_STATE_STOP_COPY] = { [VFIO_DEVICE_STATE_STOP] = VFIO_DEVICE_STATE_STOP, [VFIO_DEVICE_STATE_RUNNING] = VFIO_DEVICE_STATE_STOP, + [VFIO_DEVICE_STATE_PRE_COPY] = VFIO_DEVICE_STATE_ERROR, + [VFIO_DEVICE_STATE_PRE_COPY_P2P] = VFIO_DEVICE_STATE_ERROR, [VFIO_DEVICE_STATE_STOP_COPY] = VFIO_DEVICE_STATE_STOP_COPY, [VFIO_DEVICE_STATE_RESUMING] = VFIO_DEVICE_STATE_STOP, [VFIO_DEVICE_STATE_RUNNING_P2P] = VFIO_DEVICE_STATE_STOP, @@ -1135,6 +612,8 @@ int vfio_mig_get_next_state(struct vfio_device *device, [VFIO_DEVICE_STATE_RESUMING] = { [VFIO_DEVICE_STATE_STOP] = VFIO_DEVICE_STATE_STOP, [VFIO_DEVICE_STATE_RUNNING] = VFIO_DEVICE_STATE_STOP, + [VFIO_DEVICE_STATE_PRE_COPY] = VFIO_DEVICE_STATE_STOP, + [VFIO_DEVICE_STATE_PRE_COPY_P2P] = VFIO_DEVICE_STATE_STOP, [VFIO_DEVICE_STATE_STOP_COPY] = VFIO_DEVICE_STATE_STOP, [VFIO_DEVICE_STATE_RESUMING] = VFIO_DEVICE_STATE_RESUMING, [VFIO_DEVICE_STATE_RUNNING_P2P] = VFIO_DEVICE_STATE_STOP, @@ -1143,6 +622,8 @@ int vfio_mig_get_next_state(struct vfio_device *device, [VFIO_DEVICE_STATE_RUNNING_P2P] = { [VFIO_DEVICE_STATE_STOP] = VFIO_DEVICE_STATE_STOP, [VFIO_DEVICE_STATE_RUNNING] = VFIO_DEVICE_STATE_RUNNING, + [VFIO_DEVICE_STATE_PRE_COPY] = VFIO_DEVICE_STATE_RUNNING, + [VFIO_DEVICE_STATE_PRE_COPY_P2P] = VFIO_DEVICE_STATE_PRE_COPY_P2P, [VFIO_DEVICE_STATE_STOP_COPY] = VFIO_DEVICE_STATE_STOP, [VFIO_DEVICE_STATE_RESUMING] = VFIO_DEVICE_STATE_STOP, [VFIO_DEVICE_STATE_RUNNING_P2P] = VFIO_DEVICE_STATE_RUNNING_P2P, @@ -1151,6 +632,8 @@ int vfio_mig_get_next_state(struct vfio_device *device, [VFIO_DEVICE_STATE_ERROR] = { [VFIO_DEVICE_STATE_STOP] = VFIO_DEVICE_STATE_ERROR, [VFIO_DEVICE_STATE_RUNNING] = VFIO_DEVICE_STATE_ERROR, + [VFIO_DEVICE_STATE_PRE_COPY] = VFIO_DEVICE_STATE_ERROR, + [VFIO_DEVICE_STATE_PRE_COPY_P2P] = VFIO_DEVICE_STATE_ERROR, [VFIO_DEVICE_STATE_STOP_COPY] = VFIO_DEVICE_STATE_ERROR, [VFIO_DEVICE_STATE_RESUMING] = VFIO_DEVICE_STATE_ERROR, [VFIO_DEVICE_STATE_RUNNING_P2P] = VFIO_DEVICE_STATE_ERROR, @@ -1161,6 +644,11 @@ int vfio_mig_get_next_state(struct vfio_device *device, static const unsigned int state_flags_table[VFIO_DEVICE_NUM_STATES] = { [VFIO_DEVICE_STATE_STOP] = VFIO_MIGRATION_STOP_COPY, [VFIO_DEVICE_STATE_RUNNING] = VFIO_MIGRATION_STOP_COPY, + [VFIO_DEVICE_STATE_PRE_COPY] = + VFIO_MIGRATION_STOP_COPY | VFIO_MIGRATION_PRE_COPY, + [VFIO_DEVICE_STATE_PRE_COPY_P2P] = VFIO_MIGRATION_STOP_COPY | + VFIO_MIGRATION_P2P | + VFIO_MIGRATION_PRE_COPY, [VFIO_DEVICE_STATE_STOP_COPY] = VFIO_MIGRATION_STOP_COPY, [VFIO_DEVICE_STATE_RESUMING] = VFIO_MIGRATION_STOP_COPY, [VFIO_DEVICE_STATE_RUNNING_P2P] = @@ -1272,6 +760,34 @@ out_copy: return 0; } +static int +vfio_ioctl_device_feature_migration_data_size(struct vfio_device *device, + u32 flags, void __user *arg, + size_t argsz) +{ + struct vfio_device_feature_mig_data_size data_size = {}; + unsigned long stop_copy_length; + int ret; + + if (!device->mig_ops) + return -ENOTTY; + + ret = vfio_check_feature(flags, argsz, VFIO_DEVICE_FEATURE_GET, + sizeof(data_size)); + if (ret != 1) + return ret; + + ret = device->mig_ops->migration_get_data_size(device, &stop_copy_length); + if (ret) + return ret; + + data_size.stop_copy_length = stop_copy_length; + if (copy_to_user(arg, &data_size, sizeof(data_size))) + return -EFAULT; + + return 0; +} + static int vfio_ioctl_device_feature_migration(struct vfio_device *device, u32 flags, void __user *arg, size_t argsz) @@ -1499,6 +1015,10 @@ static int vfio_ioctl_device_feature(struct vfio_device *device, return vfio_ioctl_device_feature_logging_report( device, feature.flags, arg->data, feature.argsz - minsz); + case VFIO_DEVICE_FEATURE_MIG_DATA_SIZE: + return vfio_ioctl_device_feature_migration_data_size( + device, feature.flags, arg->data, + feature.argsz - minsz); default: if (unlikely(!device->ops->device_feature)) return -EINVAL; @@ -1568,7 +1088,7 @@ static int vfio_device_fops_mmap(struct file *filep, struct vm_area_struct *vma) return device->ops->mmap(device, vma); } -static const struct file_operations vfio_device_fops = { +const struct file_operations vfio_device_fops = { .owner = THIS_MODULE, .release = vfio_device_fops_release, .read = vfio_device_fops_read, @@ -1578,118 +1098,6 @@ static const struct file_operations vfio_device_fops = { .mmap = vfio_device_fops_mmap, }; -/** - * vfio_file_iommu_group - Return the struct iommu_group for the vfio group file - * @file: VFIO group file - * - * The returned iommu_group is valid as long as a ref is held on the file. This - * returns a reference on the group. This function is deprecated, only the SPAPR - * path in kvm should call it. - */ -struct iommu_group *vfio_file_iommu_group(struct file *file) -{ - struct vfio_group *group = file->private_data; - struct iommu_group *iommu_group = NULL; - - if (!IS_ENABLED(CONFIG_SPAPR_TCE_IOMMU)) - return NULL; - - if (!vfio_file_is_group(file)) - return NULL; - - mutex_lock(&group->group_lock); - if (group->iommu_group) { - iommu_group = group->iommu_group; - iommu_group_ref_get(iommu_group); - } - mutex_unlock(&group->group_lock); - return iommu_group; -} -EXPORT_SYMBOL_GPL(vfio_file_iommu_group); - -/** - * vfio_file_is_group - True if the file is usable with VFIO aPIS - * @file: VFIO group file - */ -bool vfio_file_is_group(struct file *file) -{ - return file->f_op == &vfio_group_fops; -} -EXPORT_SYMBOL_GPL(vfio_file_is_group); - -/** - * vfio_file_enforced_coherent - True if the DMA associated with the VFIO file - * is always CPU cache coherent - * @file: VFIO group file - * - * Enforced coherency means that the IOMMU ignores things like the PCIe no-snoop - * bit in DMA transactions. A return of false indicates that the user has - * rights to access additional instructions such as wbinvd on x86. - */ -bool vfio_file_enforced_coherent(struct file *file) -{ - struct vfio_group *group = file->private_data; - bool ret; - - if (!vfio_file_is_group(file)) - return true; - - mutex_lock(&group->group_lock); - if (group->container) { - ret = vfio_container_ioctl_check_extension(group->container, - VFIO_DMA_CC_IOMMU); - } else { - /* - * Since the coherency state is determined only once a container - * is attached the user must do so before they can prove they - * have permission. - */ - ret = true; - } - mutex_unlock(&group->group_lock); - return ret; -} -EXPORT_SYMBOL_GPL(vfio_file_enforced_coherent); - -/** - * vfio_file_set_kvm - Link a kvm with VFIO drivers - * @file: VFIO group file - * @kvm: KVM to link - * - * When a VFIO device is first opened the KVM will be available in - * device->kvm if one was associated with the group. - */ -void vfio_file_set_kvm(struct file *file, struct kvm *kvm) -{ - struct vfio_group *group = file->private_data; - - if (!vfio_file_is_group(file)) - return; - - mutex_lock(&group->group_lock); - group->kvm = kvm; - mutex_unlock(&group->group_lock); -} -EXPORT_SYMBOL_GPL(vfio_file_set_kvm); - -/** - * vfio_file_has_dev - True if the VFIO file is a handle for device - * @file: VFIO file to check - * @device: Device that must be part of the file - * - * Returns true if given file has permission to manipulate the given device. - */ -bool vfio_file_has_dev(struct file *file, struct vfio_device *device) -{ - struct vfio_group *group = file->private_data; - - if (!vfio_file_is_group(file)) - return false; - - return group == device->group; -} -EXPORT_SYMBOL_GPL(vfio_file_has_dev); - /* * Sub-module support */ @@ -1810,34 +1218,139 @@ int vfio_set_irqs_validate_and_prepare(struct vfio_irq_set *hdr, int num_irqs, EXPORT_SYMBOL(vfio_set_irqs_validate_and_prepare); /* - * Module/class support + * Pin contiguous user pages and return their associated host pages for local + * domain only. + * @device [in] : device + * @iova [in] : starting IOVA of user pages to be pinned. + * @npage [in] : count of pages to be pinned. This count should not + * be greater than VFIO_PIN_PAGES_MAX_ENTRIES. + * @prot [in] : protection flags + * @pages[out] : array of host pages + * Return error or number of pages pinned. + * + * A driver may only call this function if the vfio_device was created + * by vfio_register_emulated_iommu_dev() due to vfio_device_container_pin_pages(). */ -static char *vfio_devnode(struct device *dev, umode_t *mode) +int vfio_pin_pages(struct vfio_device *device, dma_addr_t iova, + int npage, int prot, struct page **pages) { - return kasprintf(GFP_KERNEL, "vfio/%s", dev_name(dev)); + /* group->container cannot change while a vfio device is open */ + if (!pages || !npage || WARN_ON(!vfio_assert_device_open(device))) + return -EINVAL; + if (vfio_device_has_container(device)) + return vfio_device_container_pin_pages(device, iova, + npage, prot, pages); + if (device->iommufd_access) { + int ret; + + if (iova > ULONG_MAX) + return -EINVAL; + /* + * VFIO ignores the sub page offset, npages is from the start of + * a PAGE_SIZE chunk of IOVA. The caller is expected to recover + * the sub page offset by doing: + * pages[0] + (iova % PAGE_SIZE) + */ + ret = iommufd_access_pin_pages( + device->iommufd_access, ALIGN_DOWN(iova, PAGE_SIZE), + npage * PAGE_SIZE, pages, + (prot & IOMMU_WRITE) ? IOMMUFD_ACCESS_RW_WRITE : 0); + if (ret) + return ret; + return npage; + } + return -EINVAL; +} +EXPORT_SYMBOL(vfio_pin_pages); + +/* + * Unpin contiguous host pages for local domain only. + * @device [in] : device + * @iova [in] : starting address of user pages to be unpinned. + * @npage [in] : count of pages to be unpinned. This count should not + * be greater than VFIO_PIN_PAGES_MAX_ENTRIES. + */ +void vfio_unpin_pages(struct vfio_device *device, dma_addr_t iova, int npage) +{ + if (WARN_ON(!vfio_assert_device_open(device))) + return; + + if (vfio_device_has_container(device)) { + vfio_device_container_unpin_pages(device, iova, npage); + return; + } + if (device->iommufd_access) { + if (WARN_ON(iova > ULONG_MAX)) + return; + iommufd_access_unpin_pages(device->iommufd_access, + ALIGN_DOWN(iova, PAGE_SIZE), + npage * PAGE_SIZE); + return; + } } +EXPORT_SYMBOL(vfio_unpin_pages); +/* + * This interface allows the CPUs to perform some sort of virtual DMA on + * behalf of the device. + * + * CPUs read/write from/into a range of IOVAs pointing to user space memory + * into/from a kernel buffer. + * + * As the read/write of user space memory is conducted via the CPUs and is + * not a real device DMA, it is not necessary to pin the user space memory. + * + * @device [in] : VFIO device + * @iova [in] : base IOVA of a user space buffer + * @data [in] : pointer to kernel buffer + * @len [in] : kernel buffer length + * @write : indicate read or write + * Return error code on failure or 0 on success. + */ +int vfio_dma_rw(struct vfio_device *device, dma_addr_t iova, void *data, + size_t len, bool write) +{ + if (!data || len <= 0 || !vfio_assert_device_open(device)) + return -EINVAL; + + if (vfio_device_has_container(device)) + return vfio_device_container_dma_rw(device, iova, + data, len, write); + + if (device->iommufd_access) { + unsigned int flags = 0; + + if (iova > ULONG_MAX) + return -EINVAL; + + /* VFIO historically tries to auto-detect a kthread */ + if (!current->mm) + flags |= IOMMUFD_ACCESS_RW_KTHREAD; + if (write) + flags |= IOMMUFD_ACCESS_RW_WRITE; + return iommufd_access_rw(device->iommufd_access, iova, data, + len, flags); + } + return -EINVAL; +} +EXPORT_SYMBOL(vfio_dma_rw); + +/* + * Module/class support + */ static int __init vfio_init(void) { int ret; - ida_init(&vfio.group_ida); ida_init(&vfio.device_ida); - mutex_init(&vfio.group_lock); - INIT_LIST_HEAD(&vfio.group_list); - ret = vfio_container_init(); + ret = vfio_group_init(); if (ret) return ret; - /* /dev/vfio/$GROUP */ - vfio.class = class_create(THIS_MODULE, "vfio"); - if (IS_ERR(vfio.class)) { - ret = PTR_ERR(vfio.class); - goto err_group_class; - } - - vfio.class->devnode = vfio_devnode; + ret = vfio_virqfd_init(); + if (ret) + goto err_virqfd; /* /sys/class/vfio-dev/vfioX */ vfio.device_class = class_create(THIS_MODULE, "vfio-dev"); @@ -1846,36 +1359,23 @@ static int __init vfio_init(void) goto err_dev_class; } - ret = alloc_chrdev_region(&vfio.group_devt, 0, MINORMASK + 1, "vfio"); - if (ret) - goto err_alloc_chrdev; - pr_info(DRIVER_DESC " version: " DRIVER_VERSION "\n"); return 0; -err_alloc_chrdev: - class_destroy(vfio.device_class); - vfio.device_class = NULL; err_dev_class: - class_destroy(vfio.class); - vfio.class = NULL; -err_group_class: - vfio_container_cleanup(); + vfio_virqfd_exit(); +err_virqfd: + vfio_group_cleanup(); return ret; } static void __exit vfio_cleanup(void) { - WARN_ON(!list_empty(&vfio.group_list)); - ida_destroy(&vfio.device_ida); - ida_destroy(&vfio.group_ida); - unregister_chrdev_region(vfio.group_devt, MINORMASK + 1); class_destroy(vfio.device_class); vfio.device_class = NULL; - class_destroy(vfio.class); - vfio_container_cleanup(); - vfio.class = NULL; + vfio_virqfd_exit(); + vfio_group_cleanup(); xa_destroy(&vfio_device_set_xa); } @@ -1886,6 +1386,4 @@ MODULE_VERSION(DRIVER_VERSION); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); -MODULE_ALIAS_MISCDEV(VFIO_MINOR); -MODULE_ALIAS("devname:vfio/vfio"); MODULE_SOFTDEP("post: vfio_iommu_type1 vfio_iommu_spapr_tce"); diff --git a/drivers/vfio/vfio_spapr_eeh.c b/drivers/vfio/vfio_spapr_eeh.c deleted file mode 100644 index 67f55ac1d459cc18a192fa580d9e5e5df2962914..0000000000000000000000000000000000000000 --- a/drivers/vfio/vfio_spapr_eeh.c +++ /dev/null @@ -1,107 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * EEH functionality support for VFIO devices. The feature is only - * available on sPAPR compatible platforms. - * - * Copyright Gavin Shan, IBM Corporation 2014. - */ - -#include -#include -#include -#include - -#define DRIVER_VERSION "0.1" -#define DRIVER_AUTHOR "Gavin Shan, IBM Corporation" -#define DRIVER_DESC "VFIO IOMMU SPAPR EEH" - -/* We might build address mapping here for "fast" path later */ -void vfio_spapr_pci_eeh_open(struct pci_dev *pdev) -{ - eeh_dev_open(pdev); -} -EXPORT_SYMBOL_GPL(vfio_spapr_pci_eeh_open); - -void vfio_spapr_pci_eeh_release(struct pci_dev *pdev) -{ - eeh_dev_release(pdev); -} -EXPORT_SYMBOL_GPL(vfio_spapr_pci_eeh_release); - -long vfio_spapr_iommu_eeh_ioctl(struct iommu_group *group, - unsigned int cmd, unsigned long arg) -{ - struct eeh_pe *pe; - struct vfio_eeh_pe_op op; - unsigned long minsz; - long ret = -EINVAL; - - switch (cmd) { - case VFIO_CHECK_EXTENSION: - if (arg == VFIO_EEH) - ret = eeh_enabled() ? 1 : 0; - else - ret = 0; - break; - case VFIO_EEH_PE_OP: - pe = eeh_iommu_group_to_pe(group); - if (!pe) - return -ENODEV; - - minsz = offsetofend(struct vfio_eeh_pe_op, op); - if (copy_from_user(&op, (void __user *)arg, minsz)) - return -EFAULT; - if (op.argsz < minsz || op.flags) - return -EINVAL; - - switch (op.op) { - case VFIO_EEH_PE_DISABLE: - ret = eeh_pe_set_option(pe, EEH_OPT_DISABLE); - break; - case VFIO_EEH_PE_ENABLE: - ret = eeh_pe_set_option(pe, EEH_OPT_ENABLE); - break; - case VFIO_EEH_PE_UNFREEZE_IO: - ret = eeh_pe_set_option(pe, EEH_OPT_THAW_MMIO); - break; - case VFIO_EEH_PE_UNFREEZE_DMA: - ret = eeh_pe_set_option(pe, EEH_OPT_THAW_DMA); - break; - case VFIO_EEH_PE_GET_STATE: - ret = eeh_pe_get_state(pe); - break; - case VFIO_EEH_PE_RESET_DEACTIVATE: - ret = eeh_pe_reset(pe, EEH_RESET_DEACTIVATE, true); - break; - case VFIO_EEH_PE_RESET_HOT: - ret = eeh_pe_reset(pe, EEH_RESET_HOT, true); - break; - case VFIO_EEH_PE_RESET_FUNDAMENTAL: - ret = eeh_pe_reset(pe, EEH_RESET_FUNDAMENTAL, true); - break; - case VFIO_EEH_PE_CONFIGURE: - ret = eeh_pe_configure(pe); - break; - case VFIO_EEH_PE_INJECT_ERR: - minsz = offsetofend(struct vfio_eeh_pe_op, err.mask); - if (op.argsz < minsz) - return -EINVAL; - if (copy_from_user(&op, (void __user *)arg, minsz)) - return -EFAULT; - - ret = eeh_pe_inject_err(pe, op.err.type, op.err.func, - op.err.addr, op.err.mask); - break; - default: - ret = -EINVAL; - } - } - - return ret; -} -EXPORT_SYMBOL_GPL(vfio_spapr_iommu_eeh_ioctl); - -MODULE_VERSION(DRIVER_VERSION); -MODULE_LICENSE("GPL v2"); -MODULE_AUTHOR(DRIVER_AUTHOR); -MODULE_DESCRIPTION(DRIVER_DESC); diff --git a/drivers/vfio/virqfd.c b/drivers/vfio/virqfd.c index 414e98d82b02e561d5423910d0cb572b7c4ea7d3..497a17b378656888dcc1b27ef2e3fbfb61450e79 100644 --- a/drivers/vfio/virqfd.c +++ b/drivers/vfio/virqfd.c @@ -12,15 +12,12 @@ #include #include #include - -#define DRIVER_VERSION "0.1" -#define DRIVER_AUTHOR "Alex Williamson " -#define DRIVER_DESC "IRQFD support for VFIO bus drivers" +#include "vfio.h" static struct workqueue_struct *vfio_irqfd_cleanup_wq; static DEFINE_SPINLOCK(virqfd_lock); -static int __init vfio_virqfd_init(void) +int __init vfio_virqfd_init(void) { vfio_irqfd_cleanup_wq = create_singlethread_workqueue("vfio-irqfd-cleanup"); @@ -30,7 +27,7 @@ static int __init vfio_virqfd_init(void) return 0; } -static void __exit vfio_virqfd_exit(void) +void vfio_virqfd_exit(void) { destroy_workqueue(vfio_irqfd_cleanup_wq); } @@ -216,11 +213,3 @@ void vfio_virqfd_disable(struct virqfd **pvirqfd) flush_workqueue(vfio_irqfd_cleanup_wq); } EXPORT_SYMBOL_GPL(vfio_virqfd_disable); - -module_init(vfio_virqfd_init); -module_exit(vfio_virqfd_exit); - -MODULE_VERSION(DRIVER_VERSION); -MODULE_LICENSE("GPL v2"); -MODULE_AUTHOR(DRIVER_AUTHOR); -MODULE_DESCRIPTION(DRIVER_DESC); diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig index 71019b167f8b0db6c49d1eb1048d9ebbb9430182..df6e09f7d242226a229b605b3623aad8a0d7fbdf 100644 --- a/drivers/video/fbdev/Kconfig +++ b/drivers/video/fbdev/Kconfig @@ -609,6 +609,7 @@ config FB_TGA config FB_UVESA tristate "Userspace VESA VGA graphics support" depends on FB && CONNECTOR + depends on !UML select FB_CFB_FILLRECT select FB_CFB_COPYAREA select FB_CFB_IMAGEBLIT @@ -2254,7 +2255,6 @@ config FB_SSD1307 select FB_SYS_COPYAREA select FB_SYS_IMAGEBLIT select FB_DEFERRED_IO - select PWM select FB_BACKLIGHT help This driver implements support for the Solomon SSD1307 diff --git a/drivers/video/fbdev/controlfb.c b/drivers/video/fbdev/controlfb.c index 6bbcd9fc864e9eba589dcb2a08aab0e127e51e8c..77dbf94aae5f4fdacf5e340fb89453b6b71dba82 100644 --- a/drivers/video/fbdev/controlfb.c +++ b/drivers/video/fbdev/controlfb.c @@ -376,7 +376,7 @@ static int read_control_sense(struct fb_info_control *p) #define CONTROL_PIXCLOCK_MIN 5000 /* ~ 200 MHz dot clock */ /* - * calculate the clock paramaters to be sent to CUDA according to given + * calculate the clock parameters to be sent to CUDA according to given * pixclock in pico second. */ static int calc_clock_params(unsigned long clk, unsigned char *param) diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c index c0143d38df83a09946aa38bee9a019ebc7fcd4be..14a7d404062c38e006933da5d42a729b8f9fbf07 100644 --- a/drivers/video/fbdev/core/fbcon.c +++ b/drivers/video/fbdev/core/fbcon.c @@ -2450,7 +2450,8 @@ err_out: if (userfont) { p->userfont = old_userfont; - REFCOUNT(data)--; + if (--REFCOUNT(data) == 0) + kfree(data - FONT_EXTRA_WORDS * sizeof(int)); } vc->vc_font.width = old_width; diff --git a/drivers/video/fbdev/da8xx-fb.c b/drivers/video/fbdev/da8xx-fb.c index 11922b009ed7cefc34bdcf9f5d9824276c093e59..cd07e401b32685992a4bce14b5a7f15908431e89 100644 --- a/drivers/video/fbdev/da8xx-fb.c +++ b/drivers/video/fbdev/da8xx-fb.c @@ -1431,7 +1431,7 @@ static int fb_probe(struct platform_device *device) dev_err(&device->dev, "GLCD: kmalloc for frame buffer failed\n"); ret = -EINVAL; - goto err_release_fb; + goto err_disable_reg; } da8xx_fb_info->screen_base = (char __iomem *) par->vram_virt; @@ -1475,7 +1475,7 @@ static int fb_probe(struct platform_device *device) ret = fb_alloc_cmap(&da8xx_fb_info->cmap, PALETTE_SIZE, 0); if (ret) - goto err_release_fb; + goto err_disable_reg; da8xx_fb_info->cmap.len = par->palette_sz; /* initialize var_screeninfo */ @@ -1529,6 +1529,9 @@ err_cpu_freq: err_dealloc_cmap: fb_dealloc_cmap(&da8xx_fb_info->cmap); +err_disable_reg: + if (par->lcd_supply) + regulator_disable(par->lcd_supply); err_release_fb: framebuffer_release(da8xx_fb_info); diff --git a/drivers/video/fbdev/ep93xx-fb.c b/drivers/video/fbdev/ep93xx-fb.c index 2398b3d48fedf8f67074192dd19cd537430a0fe7..305f1587bd8988676c9327351dc0cfc08979adfc 100644 --- a/drivers/video/fbdev/ep93xx-fb.c +++ b/drivers/video/fbdev/ep93xx-fb.c @@ -552,12 +552,14 @@ static int ep93xxfb_probe(struct platform_device *pdev) err = register_framebuffer(info); if (err) - goto failed_check; + goto failed_framebuffer; dev_info(info->dev, "registered. Mode = %dx%d-%d\n", info->var.xres, info->var.yres, info->var.bits_per_pixel); return 0; +failed_framebuffer: + clk_disable_unprepare(fbi->clk); failed_check: if (fbi->mach_info->teardown) fbi->mach_info->teardown(pdev); diff --git a/drivers/video/fbdev/geode/Kconfig b/drivers/video/fbdev/geode/Kconfig index 2f8f0fb1dae2fec454f6e27e51119bfd4f4bd8c6..b184085a78c243636dca31bee29feb37ef569c4c 100644 --- a/drivers/video/fbdev/geode/Kconfig +++ b/drivers/video/fbdev/geode/Kconfig @@ -5,6 +5,7 @@ config FB_GEODE bool "AMD Geode family framebuffer support" depends on FB && PCI && (X86_32 || (X86 && COMPILE_TEST)) + depends on !UML help Say 'Y' here to allow you to select framebuffer drivers for the AMD Geode family of processors. diff --git a/drivers/video/fbdev/matrox/matroxfb_maven.c b/drivers/video/fbdev/matrox/matroxfb_maven.c index f2e02958673dfd608f5c54230a74088870b4e978..727a10a59811b38d701f12159ce5201ee8d18bea 100644 --- a/drivers/video/fbdev/matrox/matroxfb_maven.c +++ b/drivers/video/fbdev/matrox/matroxfb_maven.c @@ -1249,8 +1249,7 @@ static int maven_shutdown_client(struct i2c_client* clnt) { return 0; } -static int maven_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int maven_probe(struct i2c_client *client) { struct i2c_adapter *adapter = client->adapter; int err = -ENODEV; @@ -1292,7 +1291,7 @@ static struct i2c_driver maven_driver={ .driver = { .name = "maven", }, - .probe = maven_probe, + .probe_new = maven_probe, .remove = maven_remove, .id_table = maven_id, }; diff --git a/drivers/video/fbdev/omap2/omapfb/displays/connector-analog-tv.c b/drivers/video/fbdev/omap2/omapfb/displays/connector-analog-tv.c index a9fd732f81030056607ccb468f41f1241e300384..0daaf9f89bab5cf638aab7594c2854a05558e8e3 100644 --- a/drivers/video/fbdev/omap2/omapfb/displays/connector-analog-tv.c +++ b/drivers/video/fbdev/omap2/omapfb/displays/connector-analog-tv.c @@ -12,7 +12,6 @@ #include #include